From d77fee7103c2f3b9e252702ed5957ba4089e8d70 Mon Sep 17 00:00:00 2001 From: Daniel Date: Mon, 20 Jan 2025 18:00:37 +0100 Subject: [PATCH 1/6] Restructuring some code. --- .../include/nlohmann/json.hpp | 0 extras/ros/README.md | 20 ++ {ros => extras/ros}/docker-compose.yml | 12 +- {ros => extras/ros}/dockerfile | 14 +- .../ros}/pose2D_visualizer/package.xml | 0 .../pose2D_visualizer/__init__.py | 0 .../pose2D_visualizer/pose2D_visualizer.py | 6 +- .../resource/pose2D_visualizer | 0 .../ros}/pose2D_visualizer/setup.cfg | 0 .../ros}/pose2D_visualizer/setup.py | 0 .../pose2D_visualizer/test/test_copyright.py | 0 .../pose2D_visualizer/test/test_flake8.py | 0 .../pose2D_visualizer/test/test_pep257.py | 0 .../ros}/rpt2D_wrapper_cpp/CMakeLists.txt | 10 +- .../ros}/rpt2D_wrapper_cpp/package.xml | 0 .../rpt2D_wrapper_cpp/src/rpt2D_wrapper.cpp | 27 +- .../ros}/rpt2D_wrapper_py/package.xml | 0 .../resource/rpt2D_wrapper_py | 0 .../rpt2D_wrapper_py/__init__.py | 0 .../rpt2D_wrapper_py/rpt2D_wrapper.py | 0 .../ros}/rpt2D_wrapper_py/setup.cfg | 0 {ros => extras/ros}/rpt2D_wrapper_py/setup.py | 0 .../rpt2D_wrapper_py/test/test_copyright.py | 0 .../ros}/rpt2D_wrapper_py/test/test_flake8.py | 0 .../ros}/rpt2D_wrapper_py/test/test_pep257.py | 0 .../src/test_triangulate.hpp | 296 ---------------- scripts/test_skelda_dataset.py | 27 +- scripts/test_triangulate.py | 258 +------------- .../src => scripts}/utils_2d_pose.hpp | 0 scripts/utils_pipeline.hpp | 316 ++++++++++++++++++ scripts/utils_pipeline.py | 255 ++++++++++++++ .../tests => tests}/.gitignore | 0 {ros => tests}/README.md | 25 +- .../my_app.cpp => tests/test_utils2d.cpp | 2 +- 34 files changed, 660 insertions(+), 608 deletions(-) rename {ros/rpt2D_wrapper_cpp => extras}/include/nlohmann/json.hpp (100%) create mode 100644 extras/ros/README.md rename {ros => extras/ros}/docker-compose.yml (89%) rename {ros => extras/ros}/dockerfile (85%) rename {ros => extras/ros}/pose2D_visualizer/package.xml (100%) rename {ros => extras/ros}/pose2D_visualizer/pose2D_visualizer/__init__.py (100%) rename {ros => extras/ros}/pose2D_visualizer/pose2D_visualizer/pose2D_visualizer.py (96%) rename {ros => extras/ros}/pose2D_visualizer/resource/pose2D_visualizer (100%) rename {ros => extras/ros}/pose2D_visualizer/setup.cfg (100%) rename {ros => extras/ros}/pose2D_visualizer/setup.py (100%) rename {ros => extras/ros}/pose2D_visualizer/test/test_copyright.py (100%) rename {ros => extras/ros}/pose2D_visualizer/test/test_flake8.py (100%) rename {ros => extras/ros}/pose2D_visualizer/test/test_pep257.py (100%) rename {ros => extras/ros}/rpt2D_wrapper_cpp/CMakeLists.txt (93%) rename {ros => extras/ros}/rpt2D_wrapper_cpp/package.xml (100%) rename {ros => extras/ros}/rpt2D_wrapper_cpp/src/rpt2D_wrapper.cpp (89%) rename {ros => extras/ros}/rpt2D_wrapper_py/package.xml (100%) rename {ros => extras/ros}/rpt2D_wrapper_py/resource/rpt2D_wrapper_py (100%) rename {ros => extras/ros}/rpt2D_wrapper_py/rpt2D_wrapper_py/__init__.py (100%) rename {ros => extras/ros}/rpt2D_wrapper_py/rpt2D_wrapper_py/rpt2D_wrapper.py (100%) rename {ros => extras/ros}/rpt2D_wrapper_py/setup.cfg (100%) rename {ros => extras/ros}/rpt2D_wrapper_py/setup.py (100%) rename {ros => extras/ros}/rpt2D_wrapper_py/test/test_copyright.py (100%) rename {ros => extras/ros}/rpt2D_wrapper_py/test/test_flake8.py (100%) rename {ros => extras/ros}/rpt2D_wrapper_py/test/test_pep257.py (100%) delete mode 100644 ros/rpt2D_wrapper_cpp/src/test_triangulate.hpp rename {ros/rpt2D_wrapper_cpp/src => scripts}/utils_2d_pose.hpp (100%) create mode 100644 scripts/utils_pipeline.hpp create mode 100644 scripts/utils_pipeline.py rename {ros/rpt2D_wrapper_cpp/tests => tests}/.gitignore (100%) rename {ros => tests}/README.md (54%) rename ros/rpt2D_wrapper_cpp/tests/my_app.cpp => tests/test_utils2d.cpp (98%) diff --git a/ros/rpt2D_wrapper_cpp/include/nlohmann/json.hpp b/extras/include/nlohmann/json.hpp similarity index 100% rename from ros/rpt2D_wrapper_cpp/include/nlohmann/json.hpp rename to extras/include/nlohmann/json.hpp diff --git a/extras/ros/README.md b/extras/ros/README.md new file mode 100644 index 0000000..1c8a869 --- /dev/null +++ b/extras/ros/README.md @@ -0,0 +1,20 @@ +# ROS-Wrapper + +Run pose estimator with ros topics as inputs and publish detected poses. + +
+ +- Build container: + ```bash + docker build --progress=plain -t rapidposetriangulation_ros -f extras/ros/dockerfile . + ``` + +- Update or remove the `ROS_DOMAIN_ID` in `docker-compose.yml` to fit your environment + +- Run and test: + ```bash + xhost +; docker compose -f extras/ros/docker-compose.yml up + + docker exec -it ros-test_node-1 bash + export ROS_DOMAIN_ID=18 + ``` diff --git a/ros/docker-compose.yml b/extras/ros/docker-compose.yml similarity index 89% rename from ros/docker-compose.yml rename to extras/ros/docker-compose.yml index 1953e82..491c6b2 100644 --- a/ros/docker-compose.yml +++ b/extras/ros/docker-compose.yml @@ -10,8 +10,8 @@ services: runtime: nvidia privileged: true volumes: - - ../:/RapidPoseTriangulation/ - - ../skelda/:/skelda/ + - ../../:/RapidPoseTriangulation/ + - ../../skelda/:/skelda/ - /tmp/.X11-unix:/tmp/.X11-unix - /dev/shm:/dev/shm environment: @@ -27,8 +27,8 @@ services: runtime: nvidia privileged: true volumes: - - ../:/RapidPoseTriangulation/ - - ../skelda/:/skelda/ + - ../../:/RapidPoseTriangulation/ + - ../../skelda/:/skelda/ - /tmp/.X11-unix:/tmp/.X11-unix - /dev/shm:/dev/shm environment: @@ -45,8 +45,8 @@ services: runtime: nvidia privileged: true volumes: - - ../:/RapidPoseTriangulation/ - - ../skelda/:/skelda/ + - ../../:/RapidPoseTriangulation/ + - ../../skelda/:/skelda/ - /tmp/.X11-unix:/tmp/.X11-unix - /dev/shm:/dev/shm environment: diff --git a/ros/dockerfile b/extras/ros/dockerfile similarity index 85% rename from ros/dockerfile rename to extras/ros/dockerfile index f5367b3..ec65f90 100644 --- a/ros/dockerfile +++ b/extras/ros/dockerfile @@ -60,14 +60,16 @@ RUN echo "source /project/dev_ws/install/setup.bash" >> ~/.bashrc RUN pip3 install --no-cache-dir "setuptools<=58.2.0" # Copy modules -COPY ./ros/pose2D_visualizer /RapidPoseTriangulation/ros/pose2D_visualizer/ -COPY ./ros/rpt2D_wrapper_py /RapidPoseTriangulation/ros/rpt2D_wrapper_py/ -COPY ./ros/rpt2D_wrapper_cpp /RapidPoseTriangulation/ros/rpt2D_wrapper_cpp/ +COPY ./extras/include/ /RapidPoseTriangulation/extras/include/ +COPY ./scripts/ /RapidPoseTriangulation/scripts/ +COPY ./extras/ros/pose2D_visualizer/ /RapidPoseTriangulation/extras/ros/pose2D_visualizer/ +COPY ./extras/ros/rpt2D_wrapper_py/ /RapidPoseTriangulation/extras/ros/rpt2D_wrapper_py/ +COPY ./extras/ros/rpt2D_wrapper_cpp/ /RapidPoseTriangulation/extras/ros/rpt2D_wrapper_cpp/ # Link and build as ros package -RUN ln -s /RapidPoseTriangulation/ros/pose2D_visualizer/ /project/dev_ws/src/pose2D_visualizer -RUN ln -s /RapidPoseTriangulation/ros/rpt2D_wrapper_py/ /project/dev_ws/src/rpt2D_wrapper_py -RUN ln -s /RapidPoseTriangulation/ros/rpt2D_wrapper_cpp/ /project/dev_ws/src/rpt2D_wrapper_cpp +RUN ln -s /RapidPoseTriangulation/extras/ros/pose2D_visualizer/ /project/dev_ws/src/pose2D_visualizer +RUN ln -s /RapidPoseTriangulation/extras/ros/rpt2D_wrapper_py/ /project/dev_ws/src/rpt2D_wrapper_py +RUN ln -s /RapidPoseTriangulation/extras/ros/rpt2D_wrapper_cpp/ /project/dev_ws/src/rpt2D_wrapper_cpp RUN /bin/bash -i -c 'cd /project/dev_ws/; colcon build --symlink-install --cmake-args -DCMAKE_BUILD_TYPE=Release' # Update ros packages -> autocompletion and check diff --git a/ros/pose2D_visualizer/package.xml b/extras/ros/pose2D_visualizer/package.xml similarity index 100% rename from ros/pose2D_visualizer/package.xml rename to extras/ros/pose2D_visualizer/package.xml diff --git a/ros/pose2D_visualizer/pose2D_visualizer/__init__.py b/extras/ros/pose2D_visualizer/pose2D_visualizer/__init__.py similarity index 100% rename from ros/pose2D_visualizer/pose2D_visualizer/__init__.py rename to extras/ros/pose2D_visualizer/pose2D_visualizer/__init__.py diff --git a/ros/pose2D_visualizer/pose2D_visualizer/pose2D_visualizer.py b/extras/ros/pose2D_visualizer/pose2D_visualizer/pose2D_visualizer.py similarity index 96% rename from ros/pose2D_visualizer/pose2D_visualizer/pose2D_visualizer.py rename to extras/ros/pose2D_visualizer/pose2D_visualizer/pose2D_visualizer.py index 1b865db..98b65f5 100644 --- a/ros/pose2D_visualizer/pose2D_visualizer/pose2D_visualizer.py +++ b/extras/ros/pose2D_visualizer/pose2D_visualizer/pose2D_visualizer.py @@ -14,8 +14,8 @@ from sensor_msgs.msg import Image from std_msgs.msg import String filepath = os.path.dirname(os.path.realpath(__file__)) + "/" -sys.path.append(filepath + "../../../scripts/") -import test_triangulate +sys.path.append(filepath + "../../../../scripts/") +import utils_pipeline from skelda import utils_view @@ -42,7 +42,7 @@ def callback_images(image_data): # Convert into cv images from image string if image_data.encoding == "bayer_rggb8": bayer_image = bridge.imgmsg_to_cv2(image_data, "bayer_rggb8") - color_image = test_triangulate.bayer2rgb(bayer_image) + color_image = utils_pipeline.bayer2rgb(bayer_image) elif image_data.encoding == "mono8": gray_image = bridge.imgmsg_to_cv2(image_data, "mono8") color_image = cv2.cvtColor(gray_image, cv2.COLOR_GRAY2RGB) diff --git a/ros/pose2D_visualizer/resource/pose2D_visualizer b/extras/ros/pose2D_visualizer/resource/pose2D_visualizer similarity index 100% rename from ros/pose2D_visualizer/resource/pose2D_visualizer rename to extras/ros/pose2D_visualizer/resource/pose2D_visualizer diff --git a/ros/pose2D_visualizer/setup.cfg b/extras/ros/pose2D_visualizer/setup.cfg similarity index 100% rename from ros/pose2D_visualizer/setup.cfg rename to extras/ros/pose2D_visualizer/setup.cfg diff --git a/ros/pose2D_visualizer/setup.py b/extras/ros/pose2D_visualizer/setup.py similarity index 100% rename from ros/pose2D_visualizer/setup.py rename to extras/ros/pose2D_visualizer/setup.py diff --git a/ros/pose2D_visualizer/test/test_copyright.py b/extras/ros/pose2D_visualizer/test/test_copyright.py similarity index 100% rename from ros/pose2D_visualizer/test/test_copyright.py rename to extras/ros/pose2D_visualizer/test/test_copyright.py diff --git a/ros/pose2D_visualizer/test/test_flake8.py b/extras/ros/pose2D_visualizer/test/test_flake8.py similarity index 100% rename from ros/pose2D_visualizer/test/test_flake8.py rename to extras/ros/pose2D_visualizer/test/test_flake8.py diff --git a/ros/pose2D_visualizer/test/test_pep257.py b/extras/ros/pose2D_visualizer/test/test_pep257.py similarity index 100% rename from ros/pose2D_visualizer/test/test_pep257.py rename to extras/ros/pose2D_visualizer/test/test_pep257.py diff --git a/ros/rpt2D_wrapper_cpp/CMakeLists.txt b/extras/ros/rpt2D_wrapper_cpp/CMakeLists.txt similarity index 93% rename from ros/rpt2D_wrapper_cpp/CMakeLists.txt rename to extras/ros/rpt2D_wrapper_cpp/CMakeLists.txt index aa93999..987bcec 100644 --- a/ros/rpt2D_wrapper_cpp/CMakeLists.txt +++ b/extras/ros/rpt2D_wrapper_cpp/CMakeLists.txt @@ -23,11 +23,11 @@ find_package(sensor_msgs REQUIRED) find_package(cv_bridge REQUIRED) find_package(OpenCV REQUIRED) -### 3) ONNX Runtime -include_directories(/onnxruntime/include - /onnxruntime/include/onnxruntime/core/session - /onnxruntime/include/onnxruntime/core/providers/tensorrt) -link_directories(/onnxruntime/build/Linux/Release) +### 3) ONNX Runtime +include_directories(/onnxruntime/include/ + /onnxruntime/include/onnxruntime/core/session/ + /onnxruntime/include/onnxruntime/core/providers/tensorrt/) +link_directories(/onnxruntime/build/Linux/Release/) add_executable(rpt2D_wrapper src/rpt2D_wrapper.cpp) ament_target_dependencies(rpt2D_wrapper rclcpp std_msgs sensor_msgs cv_bridge) diff --git a/ros/rpt2D_wrapper_cpp/package.xml b/extras/ros/rpt2D_wrapper_cpp/package.xml similarity index 100% rename from ros/rpt2D_wrapper_cpp/package.xml rename to extras/ros/rpt2D_wrapper_cpp/package.xml diff --git a/ros/rpt2D_wrapper_cpp/src/rpt2D_wrapper.cpp b/extras/ros/rpt2D_wrapper_cpp/src/rpt2D_wrapper.cpp similarity index 89% rename from ros/rpt2D_wrapper_cpp/src/rpt2D_wrapper.cpp rename to extras/ros/rpt2D_wrapper_cpp/src/rpt2D_wrapper.cpp index dc2f3f6..f65f958 100644 --- a/ros/rpt2D_wrapper_cpp/src/rpt2D_wrapper.cpp +++ b/extras/ros/rpt2D_wrapper_cpp/src/rpt2D_wrapper.cpp @@ -18,11 +18,11 @@ #include // JSON library -#include "nlohmann/json.hpp" +#include "/RapidPoseTriangulation/extras/include/nlohmann/json.hpp" using json = nlohmann::json; -#include "test_triangulate.hpp" -#include "utils_2d_pose.hpp" +#include "/RapidPoseTriangulation/scripts/utils_pipeline.hpp" +#include "/RapidPoseTriangulation/scripts/utils_2d_pose.hpp" // ================================================================================================= @@ -34,6 +34,12 @@ static const float min_bbox_score = 0.4; static const float min_bbox_area = 0.1 * 0.1; static const bool batch_poses = true; +static const std::map whole_body = { + {"foots", true}, + {"face", true}, + {"hands", true}, +}; + // ================================================================================================= // ================================================================================================= @@ -61,9 +67,9 @@ public: pose_pub_ = this->create_publisher(pose_out_topic, qos_profile); // Load model - bool whole_body = test_triangulate::use_whole_body(); + bool use_wb = utils_pipeline::use_whole_body(whole_body); this->kpt_model = std::make_unique( - whole_body, min_bbox_score, min_bbox_area, batch_poses); + use_wb, min_bbox_score, min_bbox_area, batch_poses); RCLCPP_INFO(this->get_logger(), "Finished initialization of pose estimator."); @@ -86,6 +92,7 @@ private: // Pose model pointer std::unique_ptr kpt_model; + const std::vector joint_names_2d = utils_pipeline::get_joint_names(whole_body); // Threading std::thread model_thread; @@ -138,7 +145,7 @@ void Rpt2DWrapperNode::callbackImages(const sensor_msgs::msg::Image::SharedPtr m { cv_bridge::CvImageConstPtr cv_ptr = cv_bridge::toCvShare(msg, "rgb8"); cv::Mat color_image = cv_ptr->image; - bayer_image = test_triangulate::rgb2bayer(color_image); + bayer_image = utils_pipeline::rgb2bayer(color_image); } else { @@ -185,14 +192,14 @@ void Rpt2DWrapperNode::callbackModel() } // Create image vector - cv::Mat rgb_image = test_triangulate::bayer2rgb(local_image); + cv::Mat rgb_image = utils_pipeline::bayer2rgb(local_image); std::vector images_2d; images_2d.push_back(rgb_image); // Predict 2D poses auto poses_2d_all = kpt_model->predict(images_2d); - auto poses_2d_upd = test_triangulate::update_keypoints( - poses_2d_all, test_triangulate::joint_names_2d); + auto poses_2d_upd = utils_pipeline::update_keypoints( + poses_2d_all, joint_names_2d, whole_body); auto &poses_2d = poses_2d_upd[0]; // Drop persons with no joints @@ -228,7 +235,7 @@ void Rpt2DWrapperNode::callbackModel() json poses_msg; poses_msg["bodies2D"] = valid_poses; // shape: persons x keypoints x 3 - poses_msg["joints"] = test_triangulate::joint_names_2d; + poses_msg["joints"] = joint_names_2d; poses_msg["num_persons"] = valid_poses.size(); poses_msg["timestamps"] = { {"image", local_timestamp}, diff --git a/ros/rpt2D_wrapper_py/package.xml b/extras/ros/rpt2D_wrapper_py/package.xml similarity index 100% rename from ros/rpt2D_wrapper_py/package.xml rename to extras/ros/rpt2D_wrapper_py/package.xml diff --git a/ros/rpt2D_wrapper_py/resource/rpt2D_wrapper_py b/extras/ros/rpt2D_wrapper_py/resource/rpt2D_wrapper_py similarity index 100% rename from ros/rpt2D_wrapper_py/resource/rpt2D_wrapper_py rename to extras/ros/rpt2D_wrapper_py/resource/rpt2D_wrapper_py diff --git a/ros/rpt2D_wrapper_py/rpt2D_wrapper_py/__init__.py b/extras/ros/rpt2D_wrapper_py/rpt2D_wrapper_py/__init__.py similarity index 100% rename from ros/rpt2D_wrapper_py/rpt2D_wrapper_py/__init__.py rename to extras/ros/rpt2D_wrapper_py/rpt2D_wrapper_py/__init__.py diff --git a/ros/rpt2D_wrapper_py/rpt2D_wrapper_py/rpt2D_wrapper.py b/extras/ros/rpt2D_wrapper_py/rpt2D_wrapper_py/rpt2D_wrapper.py similarity index 100% rename from ros/rpt2D_wrapper_py/rpt2D_wrapper_py/rpt2D_wrapper.py rename to extras/ros/rpt2D_wrapper_py/rpt2D_wrapper_py/rpt2D_wrapper.py diff --git a/ros/rpt2D_wrapper_py/setup.cfg b/extras/ros/rpt2D_wrapper_py/setup.cfg similarity index 100% rename from ros/rpt2D_wrapper_py/setup.cfg rename to extras/ros/rpt2D_wrapper_py/setup.cfg diff --git a/ros/rpt2D_wrapper_py/setup.py b/extras/ros/rpt2D_wrapper_py/setup.py similarity index 100% rename from ros/rpt2D_wrapper_py/setup.py rename to extras/ros/rpt2D_wrapper_py/setup.py diff --git a/ros/rpt2D_wrapper_py/test/test_copyright.py b/extras/ros/rpt2D_wrapper_py/test/test_copyright.py similarity index 100% rename from ros/rpt2D_wrapper_py/test/test_copyright.py rename to extras/ros/rpt2D_wrapper_py/test/test_copyright.py diff --git a/ros/rpt2D_wrapper_py/test/test_flake8.py b/extras/ros/rpt2D_wrapper_py/test/test_flake8.py similarity index 100% rename from ros/rpt2D_wrapper_py/test/test_flake8.py rename to extras/ros/rpt2D_wrapper_py/test/test_flake8.py diff --git a/ros/rpt2D_wrapper_py/test/test_pep257.py b/extras/ros/rpt2D_wrapper_py/test/test_pep257.py similarity index 100% rename from ros/rpt2D_wrapper_py/test/test_pep257.py rename to extras/ros/rpt2D_wrapper_py/test/test_pep257.py diff --git a/ros/rpt2D_wrapper_cpp/src/test_triangulate.hpp b/ros/rpt2D_wrapper_cpp/src/test_triangulate.hpp deleted file mode 100644 index 95402b3..0000000 --- a/ros/rpt2D_wrapper_cpp/src/test_triangulate.hpp +++ /dev/null @@ -1,296 +0,0 @@ -#pragma once - -#include -#include - -#include - -// ================================================================================================= - -namespace test_triangulate -{ - // If any part shall be disabled, also update the joint names list - static const std::map whole_body = { - {"foots", true}, - {"face", true}, - {"hands", true}, - }; - - static const std::vector joint_names_2d = { - // coco - "nose", - "eye_left", - "eye_right", - "ear_left", - "ear_right", - "shoulder_left", - "shoulder_right", - "elbow_left", - "elbow_right", - "wrist_left", - "wrist_right", - "hip_left", - "hip_right", - "knee_left", - "knee_right", - "ankle_left", - "ankle_right", - // foot - "foot_toe_big_left", - "foot_toe_small_left", - "foot_heel_left", - "foot_toe_big_right", - "foot_toe_small_right", - "foot_heel_right", - // face - "face_jaw_right_1", - "face_jaw_right_2", - "face_jaw_right_3", - "face_jaw_right_4", - "face_jaw_right_5", - "face_jaw_right_6", - "face_jaw_right_7", - "face_jaw_right_8", - "face_jaw_middle", - "face_jaw_left_1", - "face_jaw_left_2", - "face_jaw_left_3", - "face_jaw_left_4", - "face_jaw_left_5", - "face_jaw_left_6", - "face_jaw_left_7", - "face_jaw_left_8", - "face_eyebrow_right_1", - "face_eyebrow_right_2", - "face_eyebrow_right_3", - "face_eyebrow_right_4", - "face_eyebrow_right_5", - "face_eyebrow_left_1", - "face_eyebrow_left_2", - "face_eyebrow_left_3", - "face_eyebrow_left_4", - "face_eyebrow_left_5", - "face_nose_1", - "face_nose_2", - "face_nose_3", - "face_nose_4", - "face_nose_5", - "face_nose_6", - "face_nose_7", - "face_nose_8", - "face_nose_9", - "face_eye_right_1", - "face_eye_right_2", - "face_eye_right_3", - "face_eye_right_4", - "face_eye_right_5", - "face_eye_right_6", - "face_eye_left_1", - "face_eye_left_2", - "face_eye_left_3", - "face_eye_left_4", - "face_eye_left_5", - "face_eye_left_6", - "face_mouth_1", - "face_mouth_2", - "face_mouth_3", - "face_mouth_4", - "face_mouth_5", - "face_mouth_6", - "face_mouth_7", - "face_mouth_8", - "face_mouth_9", - "face_mouth_10", - "face_mouth_11", - "face_mouth_12", - "face_mouth_13", - "face_mouth_14", - "face_mouth_15", - "face_mouth_16", - "face_mouth_17", - "face_mouth_18", - "face_mouth_19", - "face_mouth_20", - // hand - "hand_wrist_left", - "hand_finger_thumb_left_1", - "hand_finger_thumb_left_2", - "hand_finger_thumb_left_3", - "hand_finger_thumb_left_4", - "hand_finger_index_left_1", - "hand_finger_index_left_2", - "hand_finger_index_left_3", - "hand_finger_index_left_4", - "hand_finger_middle_left_1", - "hand_finger_middle_left_2", - "hand_finger_middle_left_3", - "hand_finger_middle_left_4", - "hand_finger_ring_left_1", - "hand_finger_ring_left_2", - "hand_finger_ring_left_3", - "hand_finger_ring_left_4", - "hand_finger_pinky_left_1", - "hand_finger_pinky_left_2", - "hand_finger_pinky_left_3", - "hand_finger_pinky_left_4", - "hand_wrist_right", - "hand_finger_thumb_right_1", - "hand_finger_thumb_right_2", - "hand_finger_thumb_right_3", - "hand_finger_thumb_right_4", - "hand_finger_index_right_1", - "hand_finger_index_right_2", - "hand_finger_index_right_3", - "hand_finger_index_right_4", - "hand_finger_middle_right_1", - "hand_finger_middle_right_2", - "hand_finger_middle_right_3", - "hand_finger_middle_right_4", - "hand_finger_ring_right_1", - "hand_finger_ring_right_2", - "hand_finger_ring_right_3", - "hand_finger_ring_right_4", - "hand_finger_pinky_right_1", - "hand_finger_pinky_right_2", - "hand_finger_pinky_right_3", - "hand_finger_pinky_right_4", - // extras - "hip_middle", - "shoulder_middle", - "head", - }; - - // ============================================================================================= - // ============================================================================================= - - [[maybe_unused]] static bool use_whole_body() - { - for (const auto &pair : whole_body) - { - if (pair.second) - { - return true; - } - } - return false; - } - - // ============================================================================================= - - cv::Mat bayer2rgb(const cv::Mat &bayer) - { - cv::Mat rgb; - cv::cvtColor(bayer, rgb, cv::COLOR_BayerBG2RGB); - return rgb; - } - - cv::Mat rgb2bayer(const cv::Mat &img) - { - CV_Assert(img.type() == CV_8UC3); - cv::Mat bayer(img.rows, img.cols, CV_8UC1); - - for (int r = 0; r < img.rows; ++r) - { - const uchar *imgData = img.ptr(r); - uchar *bayerRowPtr = bayer.ptr(r); - - for (int c = 0; c < img.cols; ++c) - { - int pixelIndex = 3 * c; - - // Use faster bit operation instead of modulo+if - // Even row, even col => R = 0 - // Even row, odd col => G = 1 - // Odd row, even col => G = 1 - // Odd row, odd col => B = 2 - int row_mod = r & 1; - int col_mod = c & 1; - int component = row_mod + col_mod; - - bayerRowPtr[c] = imgData[pixelIndex + component]; - } - } - - return bayer; - } - - // ============================================================================================= - - inline int find_index(const std::vector &vec, const std::string &key) - { - auto it = std::find(vec.begin(), vec.end(), key); - if (it == vec.end()) - { - throw std::runtime_error("Cannot find \"" + key + "\" in joint_names."); - } - return static_cast(std::distance(vec.begin(), it)); - } - - std::vector>>> update_keypoints( - const std::vector>>> &poses_2d, - const std::vector &joint_names) - { - std::vector>>> new_views; - new_views.reserve(poses_2d.size()); - - for (const auto &view : poses_2d) - { - // "view" is a list of bodies => each body is Nx3 - std::vector>> new_bodies; - new_bodies.reserve(view.size()); - - for (const auto &body : view) - { - // 1) Copy first 17 keypoints - std::vector> new_body; - new_body.insert(new_body.end(), body.begin(), body.begin() + 17); - - // 2) Optionally append extra keypoints - if (whole_body.at("foots")) - { - new_body.insert(new_body.end(), body.begin() + 17, body.begin() + 23); - } - if (whole_body.at("face")) - { - new_body.insert(new_body.end(), body.begin() + 23, body.begin() + 91); - } - if (whole_body.at("hands")) - { - new_body.insert(new_body.end(), body.begin() + 91, body.end()); - } - - // 3) Compute mid_hip - int hlid = find_index(joint_names, "hip_left"); - int hrid = find_index(joint_names, "hip_right"); - float mid_hip_x = 0.5 * (new_body[hlid][0] + new_body[hrid][0]); - float mid_hip_y = 0.5 * (new_body[hlid][1] + new_body[hrid][1]); - float mid_hip_c = std::min(new_body[hlid][2], new_body[hrid][2]); - new_body.push_back({mid_hip_x, mid_hip_y, mid_hip_c}); - - // 4) Compute mid_shoulder - int slid = find_index(joint_names, "shoulder_left"); - int srid = find_index(joint_names, "shoulder_right"); - float mid_shoulder_x = 0.5 * (new_body[slid][0] + new_body[srid][0]); - float mid_shoulder_y = 0.5 * (new_body[slid][1] + new_body[srid][1]); - float mid_shoulder_c = std::min(new_body[slid][2], new_body[srid][2]); - new_body.push_back({mid_shoulder_x, mid_shoulder_y, mid_shoulder_c}); - - // 5) Compute head - int elid = find_index(joint_names, "ear_left"); - int erid = find_index(joint_names, "ear_right"); - float head_x = 0.5 * (new_body[elid][0] + new_body[erid][0]); - float head_y = 0.5 * (new_body[elid][1] + new_body[erid][1]); - float head_c = std::min(new_body[elid][2], new_body[erid][2]); - new_body.push_back({head_x, head_y, head_c}); - - // Add this updated body into new_bodies - new_bodies.push_back(new_body); - } - - // Add all updated bodies for this view - new_views.push_back(new_bodies); - } - - return new_views; - } -} diff --git a/scripts/test_skelda_dataset.py b/scripts/test_skelda_dataset.py index 3dd3967..00e8102 100644 --- a/scripts/test_skelda_dataset.py +++ b/scripts/test_skelda_dataset.py @@ -8,8 +8,8 @@ import matplotlib import numpy as np import tqdm -import test_triangulate import utils_2d_pose +import utils_pipeline from skelda import evals sys.path.append("/RapidPoseTriangulation/swig/") @@ -17,6 +17,12 @@ import rpt # ================================================================================================== +whole_body = { + "foots": False, + "face": False, + "hands": False, +} + dataset_use = "human36m" # dataset_use = "panoptic" # dataset_use = "mvor" @@ -175,7 +181,7 @@ datasets = { }, } -joint_names_2d = test_triangulate.joint_names_2d +joint_names_2d = utils_pipeline.get_joint_names(whole_body) joint_names_3d = list(joint_names_2d) eval_joints = [ "head", @@ -197,7 +203,7 @@ if dataset_use == "human36m": if dataset_use == "panoptic": eval_joints[eval_joints.index("head")] = "nose" if dataset_use == "human36m_wb": - if any((test_triangulate.whole_body.values())): + if utils_pipeline.use_whole_body(whole_body): eval_joints = list(joint_names_2d) else: eval_joints[eval_joints.index("head")] = "nose" @@ -323,9 +329,8 @@ def main(): batch_poses = datasets[dataset_use].get("batch_poses", default_batch_poses) # Load 2D pose model - whole_body = test_triangulate.whole_body - if any((whole_body[k] for k in whole_body)): - kpt_model = utils_2d_pose.load_wb_model() + if utils_pipeline.use_whole_body(whole_body): + kpt_model = utils_2d_pose.load_wb_model(min_bbox_score, min_bbox_area, batch_poses) else: kpt_model = utils_2d_pose.load_model(min_bbox_score, min_bbox_area, batch_poses) @@ -354,7 +359,7 @@ def main(): try: for i in range(len(label["imgpaths"])): imgpath = label["imgpaths"][i] - img = test_triangulate.load_image(imgpath) + img = utils_pipeline.load_image(imgpath) except cv2.error: print("One of the paths not found:", label["imgpaths"]) continue @@ -370,7 +375,7 @@ def main(): try: for i in range(len(label["imgpaths"])): imgpath = label["imgpaths"][i] - img = test_triangulate.load_image(imgpath) + img = utils_pipeline.load_image(imgpath) images_2d.append(img) except cv2.error: print("One of the paths not found:", label["imgpaths"]) @@ -393,14 +398,14 @@ def main(): # This also resulted in notably better MPJPE results in most cases, presumbly since the # demosaicing algorithm from OpenCV is better than the default one from the cameras for i in range(len(images_2d)): - images_2d[i] = test_triangulate.rgb2bayer(images_2d[i]) + images_2d[i] = utils_pipeline.rgb2bayer(images_2d[i]) time_imgs = time.time() - start start = time.time() for i in range(len(images_2d)): - images_2d[i] = test_triangulate.bayer2rgb(images_2d[i]) + images_2d[i] = utils_pipeline.bayer2rgb(images_2d[i]) poses_2d = utils_2d_pose.get_2d_pose(kpt_model, images_2d) - poses_2d = test_triangulate.update_keypoints(poses_2d, joint_names_2d) + poses_2d = utils_pipeline.update_keypoints(poses_2d, joint_names_2d, whole_body) time_2d = time.time() - start all_poses_2d.append(poses_2d) diff --git a/scripts/test_triangulate.py b/scripts/test_triangulate.py index 5c25d44..10547e8 100644 --- a/scripts/test_triangulate.py +++ b/scripts/test_triangulate.py @@ -3,13 +3,12 @@ import json import os import sys import time -from typing import List -import cv2 import matplotlib import numpy as np import utils_2d_pose +import utils_pipeline from skelda import utils_pose, utils_view sys.path.append("/RapidPoseTriangulation/swig/") @@ -25,175 +24,9 @@ whole_body = { "hands": False, } -joint_names_2d = [ - "nose", - "eye_left", - "eye_right", - "ear_left", - "ear_right", - "shoulder_left", - "shoulder_right", - "elbow_left", - "elbow_right", - "wrist_left", - "wrist_right", - "hip_left", - "hip_right", - "knee_left", - "knee_right", - "ankle_left", - "ankle_right", -] -if whole_body["foots"]: - joint_names_2d.extend( - [ - "foot_toe_big_left", - "foot_toe_small_left", - "foot_heel_left", - "foot_toe_big_right", - "foot_toe_small_right", - "foot_heel_right", - ] - ) -if whole_body["face"]: - joint_names_2d.extend( - [ - "face_jaw_right_1", - "face_jaw_right_2", - "face_jaw_right_3", - "face_jaw_right_4", - "face_jaw_right_5", - "face_jaw_right_6", - "face_jaw_right_7", - "face_jaw_right_8", - "face_jaw_middle", - "face_jaw_left_1", - "face_jaw_left_2", - "face_jaw_left_3", - "face_jaw_left_4", - "face_jaw_left_5", - "face_jaw_left_6", - "face_jaw_left_7", - "face_jaw_left_8", - "face_eyebrow_right_1", - "face_eyebrow_right_2", - "face_eyebrow_right_3", - "face_eyebrow_right_4", - "face_eyebrow_right_5", - "face_eyebrow_left_1", - "face_eyebrow_left_2", - "face_eyebrow_left_3", - "face_eyebrow_left_4", - "face_eyebrow_left_5", - "face_nose_1", - "face_nose_2", - "face_nose_3", - "face_nose_4", - "face_nose_5", - "face_nose_6", - "face_nose_7", - "face_nose_8", - "face_nose_9", - "face_eye_right_1", - "face_eye_right_2", - "face_eye_right_3", - "face_eye_right_4", - "face_eye_right_5", - "face_eye_right_6", - "face_eye_left_1", - "face_eye_left_2", - "face_eye_left_3", - "face_eye_left_4", - "face_eye_left_5", - "face_eye_left_6", - "face_mouth_1", - "face_mouth_2", - "face_mouth_3", - "face_mouth_4", - "face_mouth_5", - "face_mouth_6", - "face_mouth_7", - "face_mouth_8", - "face_mouth_9", - "face_mouth_10", - "face_mouth_11", - "face_mouth_12", - "face_mouth_13", - "face_mouth_14", - "face_mouth_15", - "face_mouth_16", - "face_mouth_17", - "face_mouth_18", - "face_mouth_19", - "face_mouth_20", - ] - ) -if whole_body["hands"]: - joint_names_2d.extend( - [ - "hand_wrist_left", - "hand_finger_thumb_left_1", - "hand_finger_thumb_left_2", - "hand_finger_thumb_left_3", - "hand_finger_thumb_left_4", - "hand_finger_index_left_1", - "hand_finger_index_left_2", - "hand_finger_index_left_3", - "hand_finger_index_left_4", - "hand_finger_middle_left_1", - "hand_finger_middle_left_2", - "hand_finger_middle_left_3", - "hand_finger_middle_left_4", - "hand_finger_ring_left_1", - "hand_finger_ring_left_2", - "hand_finger_ring_left_3", - "hand_finger_ring_left_4", - "hand_finger_pinky_left_1", - "hand_finger_pinky_left_2", - "hand_finger_pinky_left_3", - "hand_finger_pinky_left_4", - "hand_wrist_right", - "hand_finger_thumb_right_1", - "hand_finger_thumb_right_2", - "hand_finger_thumb_right_3", - "hand_finger_thumb_right_4", - "hand_finger_index_right_1", - "hand_finger_index_right_2", - "hand_finger_index_right_3", - "hand_finger_index_right_4", - "hand_finger_middle_right_1", - "hand_finger_middle_right_2", - "hand_finger_middle_right_3", - "hand_finger_middle_right_4", - "hand_finger_ring_right_1", - "hand_finger_ring_right_2", - "hand_finger_ring_right_3", - "hand_finger_ring_right_4", - "hand_finger_pinky_right_1", - "hand_finger_pinky_right_2", - "hand_finger_pinky_right_3", - "hand_finger_pinky_right_4", - ] - ) -joint_names_2d.extend( - [ - "hip_middle", - "shoulder_middle", - "head", - ] -) +joint_names_2d = utils_pipeline.get_joint_names(whole_body) joint_names_3d = list(joint_names_2d) -main_limbs = [ - ("shoulder_left", "elbow_left"), - ("elbow_left", "wrist_left"), - ("shoulder_right", "elbow_right"), - ("elbow_right", "wrist_right"), - ("hip_left", "knee_left"), - ("knee_left", "ankle_left"), - ("hip_right", "knee_right"), - ("knee_right", "ankle_right"), -] # ================================================================================================== @@ -217,85 +50,6 @@ def update_sample(sample, new_dir=""): # ================================================================================================== -def load_image(path: str): - image = cv2.imread(path, 3) - image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) - image = np.asarray(image, dtype=np.uint8) - return image - - -# ================================================================================================== - - -def rgb2bayer(img): - bayer = np.zeros((img.shape[0], img.shape[1]), dtype=img.dtype) - bayer[0::2, 0::2] = img[0::2, 0::2, 0] - bayer[0::2, 1::2] = img[0::2, 1::2, 1] - bayer[1::2, 0::2] = img[1::2, 0::2, 1] - bayer[1::2, 1::2] = img[1::2, 1::2, 2] - return bayer - - -def bayer2rgb(bayer): - img = cv2.cvtColor(bayer, cv2.COLOR_BayerBG2RGB) - return img - - -# ================================================================================================== - - -def update_keypoints(poses_2d: list, joint_names: List[str]) -> list: - new_views = [] - for view in poses_2d: - new_bodies = [] - for body in view: - body = body.tolist() - - new_body = body[:17] - if whole_body["foots"]: - new_body.extend(body[17:23]) - if whole_body["face"]: - new_body.extend(body[23:91]) - if whole_body["hands"]: - new_body.extend(body[91:]) - body = new_body - - hlid = joint_names.index("hip_left") - hrid = joint_names.index("hip_right") - mid_hip = [ - float(((body[hlid][0] + body[hrid][0]) / 2.0)), - float(((body[hlid][1] + body[hrid][1]) / 2.0)), - min(body[hlid][2], body[hrid][2]), - ] - body.append(mid_hip) - - slid = joint_names.index("shoulder_left") - srid = joint_names.index("shoulder_right") - mid_shoulder = [ - float(((body[slid][0] + body[srid][0]) / 2.0)), - float(((body[slid][1] + body[srid][1]) / 2.0)), - min(body[slid][2], body[srid][2]), - ] - body.append(mid_shoulder) - - elid = joint_names.index("ear_left") - erid = joint_names.index("ear_right") - head = [ - float(((body[elid][0] + body[erid][0]) / 2.0)), - float(((body[elid][1] + body[erid][1]) / 2.0)), - min(body[elid][2], body[erid][2]), - ] - body.append(head) - - new_bodies.append(body) - new_views.append(new_bodies) - - return new_views - - -# ================================================================================================== - - def main(): if any((whole_body[k] for k in whole_body)): kpt_model = utils_2d_pose.load_wb_model() @@ -330,15 +84,15 @@ def main(): images_2d = [] for i in range(len(sample["cameras_color"])): imgpath = sample["imgpaths_color"][i] - img = load_image(imgpath) - img = rgb2bayer(img) - img = bayer2rgb(img) + img = utils_pipeline.load_image(imgpath) + img = utils_pipeline.rgb2bayer(img) + img = utils_pipeline.bayer2rgb(img) images_2d.append(img) # Get 2D poses stime = time.time() poses_2d = utils_2d_pose.get_2d_pose(kpt_model, images_2d) - poses_2d = update_keypoints(poses_2d, joint_names_2d) + poses_2d = utils_pipeline.update_keypoints(poses_2d, joint_names_2d, whole_body) print("2D time:", time.time() - stime) # print([np.array(p).round(6).tolist() for p in poses_2d]) diff --git a/ros/rpt2D_wrapper_cpp/src/utils_2d_pose.hpp b/scripts/utils_2d_pose.hpp similarity index 100% rename from ros/rpt2D_wrapper_cpp/src/utils_2d_pose.hpp rename to scripts/utils_2d_pose.hpp diff --git a/scripts/utils_pipeline.hpp b/scripts/utils_pipeline.hpp new file mode 100644 index 0000000..8744c2a --- /dev/null +++ b/scripts/utils_pipeline.hpp @@ -0,0 +1,316 @@ +#pragma once + +#include +#include + +#include + +// ================================================================================================= + +namespace utils_pipeline +{ + bool use_whole_body(const std::map &whole_body) + { + for (const auto &pair : whole_body) + { + if (pair.second) + { + return true; + } + } + return false; + } + + // ============================================================================================= + + std::vector get_joint_names(const std::map &whole_body) + { + std::vector joint_names_2d = { + "nose", + "eye_left", + "eye_right", + "ear_left", + "ear_right", + "shoulder_left", + "shoulder_right", + "elbow_left", + "elbow_right", + "wrist_left", + "wrist_right", + "hip_left", + "hip_right", + "knee_left", + "knee_right", + "ankle_left", + "ankle_right", + }; + + if (whole_body.at("foots")) + { + joint_names_2d.insert( + joint_names_2d.end(), + { + "foot_toe_big_left", + "foot_toe_small_left", + "foot_heel_left", + "foot_toe_big_right", + "foot_toe_small_right", + "foot_heel_right", + }); + } + if (whole_body.at("face")) + { + joint_names_2d.insert( + joint_names_2d.end(), + { + "face_jaw_right_1", + "face_jaw_right_2", + "face_jaw_right_3", + "face_jaw_right_4", + "face_jaw_right_5", + "face_jaw_right_6", + "face_jaw_right_7", + "face_jaw_right_8", + "face_jaw_middle", + "face_jaw_left_1", + "face_jaw_left_2", + "face_jaw_left_3", + "face_jaw_left_4", + "face_jaw_left_5", + "face_jaw_left_6", + "face_jaw_left_7", + "face_jaw_left_8", + "face_eyebrow_right_1", + "face_eyebrow_right_2", + "face_eyebrow_right_3", + "face_eyebrow_right_4", + "face_eyebrow_right_5", + "face_eyebrow_left_1", + "face_eyebrow_left_2", + "face_eyebrow_left_3", + "face_eyebrow_left_4", + "face_eyebrow_left_5", + "face_nose_1", + "face_nose_2", + "face_nose_3", + "face_nose_4", + "face_nose_5", + "face_nose_6", + "face_nose_7", + "face_nose_8", + "face_nose_9", + "face_eye_right_1", + "face_eye_right_2", + "face_eye_right_3", + "face_eye_right_4", + "face_eye_right_5", + "face_eye_right_6", + "face_eye_left_1", + "face_eye_left_2", + "face_eye_left_3", + "face_eye_left_4", + "face_eye_left_5", + "face_eye_left_6", + "face_mouth_1", + "face_mouth_2", + "face_mouth_3", + "face_mouth_4", + "face_mouth_5", + "face_mouth_6", + "face_mouth_7", + "face_mouth_8", + "face_mouth_9", + "face_mouth_10", + "face_mouth_11", + "face_mouth_12", + "face_mouth_13", + "face_mouth_14", + "face_mouth_15", + "face_mouth_16", + "face_mouth_17", + "face_mouth_18", + "face_mouth_19", + "face_mouth_20", + }); + } + if (whole_body.at("hands")) + { + joint_names_2d.insert( + joint_names_2d.end(), + { + "hand_wrist_left", + "hand_finger_thumb_left_1", + "hand_finger_thumb_left_2", + "hand_finger_thumb_left_3", + "hand_finger_thumb_left_4", + "hand_finger_index_left_1", + "hand_finger_index_left_2", + "hand_finger_index_left_3", + "hand_finger_index_left_4", + "hand_finger_middle_left_1", + "hand_finger_middle_left_2", + "hand_finger_middle_left_3", + "hand_finger_middle_left_4", + "hand_finger_ring_left_1", + "hand_finger_ring_left_2", + "hand_finger_ring_left_3", + "hand_finger_ring_left_4", + "hand_finger_pinky_left_1", + "hand_finger_pinky_left_2", + "hand_finger_pinky_left_3", + "hand_finger_pinky_left_4", + "hand_wrist_right", + "hand_finger_thumb_right_1", + "hand_finger_thumb_right_2", + "hand_finger_thumb_right_3", + "hand_finger_thumb_right_4", + "hand_finger_index_right_1", + "hand_finger_index_right_2", + "hand_finger_index_right_3", + "hand_finger_index_right_4", + "hand_finger_middle_right_1", + "hand_finger_middle_right_2", + "hand_finger_middle_right_3", + "hand_finger_middle_right_4", + "hand_finger_ring_right_1", + "hand_finger_ring_right_2", + "hand_finger_ring_right_3", + "hand_finger_ring_right_4", + "hand_finger_pinky_right_1", + "hand_finger_pinky_right_2", + "hand_finger_pinky_right_3", + "hand_finger_pinky_right_4", + }); + } + + joint_names_2d.insert( + joint_names_2d.end(), + { + "hip_middle", + "shoulder_middle", + "head", + }); + + return joint_names_2d; + } + + // ============================================================================================= + + cv::Mat bayer2rgb(const cv::Mat &bayer) + { + cv::Mat rgb; + cv::cvtColor(bayer, rgb, cv::COLOR_BayerBG2RGB); + return rgb; + } + + cv::Mat rgb2bayer(const cv::Mat &img) + { + CV_Assert(img.type() == CV_8UC3); + cv::Mat bayer(img.rows, img.cols, CV_8UC1); + + for (int r = 0; r < img.rows; ++r) + { + const uchar *imgData = img.ptr(r); + uchar *bayerRowPtr = bayer.ptr(r); + + for (int c = 0; c < img.cols; ++c) + { + int pixelIndex = 3 * c; + + // Use faster bit operation instead of modulo+if + // Even row, even col => R = 0 + // Even row, odd col => G = 1 + // Odd row, even col => G = 1 + // Odd row, odd col => B = 2 + int row_mod = r & 1; + int col_mod = c & 1; + int component = row_mod + col_mod; + + bayerRowPtr[c] = imgData[pixelIndex + component]; + } + } + + return bayer; + } + + // ============================================================================================= + + inline int find_index(const std::vector &vec, const std::string &key) + { + auto it = std::find(vec.begin(), vec.end(), key); + if (it == vec.end()) + { + throw std::runtime_error("Cannot find \"" + key + "\" in joint_names."); + } + return static_cast(std::distance(vec.begin(), it)); + } + + std::vector>>> update_keypoints( + const std::vector>>> &poses_2d, + const std::vector &joint_names, + const std::map &whole_body) + { + std::vector>>> new_views; + new_views.reserve(poses_2d.size()); + + for (const auto &view : poses_2d) + { + // "view" is a list of bodies => each body is Nx3 + std::vector>> new_bodies; + new_bodies.reserve(view.size()); + + for (const auto &body : view) + { + // 1) Copy first 17 keypoints + std::vector> new_body; + new_body.insert(new_body.end(), body.begin(), body.begin() + 17); + + // 2) Optionally append extra keypoints + if (whole_body.at("foots")) + { + new_body.insert(new_body.end(), body.begin() + 17, body.begin() + 23); + } + if (whole_body.at("face")) + { + new_body.insert(new_body.end(), body.begin() + 23, body.begin() + 91); + } + if (whole_body.at("hands")) + { + new_body.insert(new_body.end(), body.begin() + 91, body.end()); + } + + // 3) Compute mid_hip + int hlid = find_index(joint_names, "hip_left"); + int hrid = find_index(joint_names, "hip_right"); + float mid_hip_x = 0.5 * (new_body[hlid][0] + new_body[hrid][0]); + float mid_hip_y = 0.5 * (new_body[hlid][1] + new_body[hrid][1]); + float mid_hip_c = std::min(new_body[hlid][2], new_body[hrid][2]); + new_body.push_back({mid_hip_x, mid_hip_y, mid_hip_c}); + + // 4) Compute mid_shoulder + int slid = find_index(joint_names, "shoulder_left"); + int srid = find_index(joint_names, "shoulder_right"); + float mid_shoulder_x = 0.5 * (new_body[slid][0] + new_body[srid][0]); + float mid_shoulder_y = 0.5 * (new_body[slid][1] + new_body[srid][1]); + float mid_shoulder_c = std::min(new_body[slid][2], new_body[srid][2]); + new_body.push_back({mid_shoulder_x, mid_shoulder_y, mid_shoulder_c}); + + // 5) Compute head + int elid = find_index(joint_names, "ear_left"); + int erid = find_index(joint_names, "ear_right"); + float head_x = 0.5 * (new_body[elid][0] + new_body[erid][0]); + float head_y = 0.5 * (new_body[elid][1] + new_body[erid][1]); + float head_c = std::min(new_body[elid][2], new_body[erid][2]); + new_body.push_back({head_x, head_y, head_c}); + + // Add this updated body into new_bodies + new_bodies.push_back(new_body); + } + + // Add all updated bodies for this view + new_views.push_back(new_bodies); + } + + return new_views; + } +} diff --git a/scripts/utils_pipeline.py b/scripts/utils_pipeline.py new file mode 100644 index 0000000..b586617 --- /dev/null +++ b/scripts/utils_pipeline.py @@ -0,0 +1,255 @@ +from typing import List + +import cv2 +import numpy as np + +# ================================================================================================== + +def use_whole_body(whole_body: dict) -> bool: + return any((whole_body[k] for k in whole_body)) + +# ================================================================================================== + + +def get_joint_names(whole_body: dict): + + joint_names_2d = [ + "nose", + "eye_left", + "eye_right", + "ear_left", + "ear_right", + "shoulder_left", + "shoulder_right", + "elbow_left", + "elbow_right", + "wrist_left", + "wrist_right", + "hip_left", + "hip_right", + "knee_left", + "knee_right", + "ankle_left", + "ankle_right", + ] + + if whole_body["foots"]: + joint_names_2d.extend( + [ + "foot_toe_big_left", + "foot_toe_small_left", + "foot_heel_left", + "foot_toe_big_right", + "foot_toe_small_right", + "foot_heel_right", + ] + ) + if whole_body["face"]: + joint_names_2d.extend( + [ + "face_jaw_right_1", + "face_jaw_right_2", + "face_jaw_right_3", + "face_jaw_right_4", + "face_jaw_right_5", + "face_jaw_right_6", + "face_jaw_right_7", + "face_jaw_right_8", + "face_jaw_middle", + "face_jaw_left_1", + "face_jaw_left_2", + "face_jaw_left_3", + "face_jaw_left_4", + "face_jaw_left_5", + "face_jaw_left_6", + "face_jaw_left_7", + "face_jaw_left_8", + "face_eyebrow_right_1", + "face_eyebrow_right_2", + "face_eyebrow_right_3", + "face_eyebrow_right_4", + "face_eyebrow_right_5", + "face_eyebrow_left_1", + "face_eyebrow_left_2", + "face_eyebrow_left_3", + "face_eyebrow_left_4", + "face_eyebrow_left_5", + "face_nose_1", + "face_nose_2", + "face_nose_3", + "face_nose_4", + "face_nose_5", + "face_nose_6", + "face_nose_7", + "face_nose_8", + "face_nose_9", + "face_eye_right_1", + "face_eye_right_2", + "face_eye_right_3", + "face_eye_right_4", + "face_eye_right_5", + "face_eye_right_6", + "face_eye_left_1", + "face_eye_left_2", + "face_eye_left_3", + "face_eye_left_4", + "face_eye_left_5", + "face_eye_left_6", + "face_mouth_1", + "face_mouth_2", + "face_mouth_3", + "face_mouth_4", + "face_mouth_5", + "face_mouth_6", + "face_mouth_7", + "face_mouth_8", + "face_mouth_9", + "face_mouth_10", + "face_mouth_11", + "face_mouth_12", + "face_mouth_13", + "face_mouth_14", + "face_mouth_15", + "face_mouth_16", + "face_mouth_17", + "face_mouth_18", + "face_mouth_19", + "face_mouth_20", + ] + ) + if whole_body["hands"]: + joint_names_2d.extend( + [ + "hand_wrist_left", + "hand_finger_thumb_left_1", + "hand_finger_thumb_left_2", + "hand_finger_thumb_left_3", + "hand_finger_thumb_left_4", + "hand_finger_index_left_1", + "hand_finger_index_left_2", + "hand_finger_index_left_3", + "hand_finger_index_left_4", + "hand_finger_middle_left_1", + "hand_finger_middle_left_2", + "hand_finger_middle_left_3", + "hand_finger_middle_left_4", + "hand_finger_ring_left_1", + "hand_finger_ring_left_2", + "hand_finger_ring_left_3", + "hand_finger_ring_left_4", + "hand_finger_pinky_left_1", + "hand_finger_pinky_left_2", + "hand_finger_pinky_left_3", + "hand_finger_pinky_left_4", + "hand_wrist_right", + "hand_finger_thumb_right_1", + "hand_finger_thumb_right_2", + "hand_finger_thumb_right_3", + "hand_finger_thumb_right_4", + "hand_finger_index_right_1", + "hand_finger_index_right_2", + "hand_finger_index_right_3", + "hand_finger_index_right_4", + "hand_finger_middle_right_1", + "hand_finger_middle_right_2", + "hand_finger_middle_right_3", + "hand_finger_middle_right_4", + "hand_finger_ring_right_1", + "hand_finger_ring_right_2", + "hand_finger_ring_right_3", + "hand_finger_ring_right_4", + "hand_finger_pinky_right_1", + "hand_finger_pinky_right_2", + "hand_finger_pinky_right_3", + "hand_finger_pinky_right_4", + ] + ) + + joint_names_2d.extend( + [ + "hip_middle", + "shoulder_middle", + "head", + ] + ) + + return joint_names_2d + + +# ================================================================================================== + + +def load_image(path: str): + image = cv2.imread(path, 3) + image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) + image = np.asarray(image, dtype=np.uint8) + return image + + +# ================================================================================================== + + +def rgb2bayer(img): + bayer = np.zeros((img.shape[0], img.shape[1]), dtype=img.dtype) + bayer[0::2, 0::2] = img[0::2, 0::2, 0] + bayer[0::2, 1::2] = img[0::2, 1::2, 1] + bayer[1::2, 0::2] = img[1::2, 0::2, 1] + bayer[1::2, 1::2] = img[1::2, 1::2, 2] + return bayer + + +def bayer2rgb(bayer): + img = cv2.cvtColor(bayer, cv2.COLOR_BayerBG2RGB) + return img + + +# ================================================================================================== + + +def update_keypoints(poses_2d: list, joint_names: List[str], whole_body: dict) -> list: + new_views = [] + for view in poses_2d: + new_bodies = [] + for body in view: + body = body.tolist() + + new_body = body[:17] + if whole_body["foots"]: + new_body.extend(body[17:23]) + if whole_body["face"]: + new_body.extend(body[23:91]) + if whole_body["hands"]: + new_body.extend(body[91:]) + body = new_body + + hlid = joint_names.index("hip_left") + hrid = joint_names.index("hip_right") + mid_hip = [ + float(((body[hlid][0] + body[hrid][0]) / 2.0)), + float(((body[hlid][1] + body[hrid][1]) / 2.0)), + min(body[hlid][2], body[hrid][2]), + ] + body.append(mid_hip) + + slid = joint_names.index("shoulder_left") + srid = joint_names.index("shoulder_right") + mid_shoulder = [ + float(((body[slid][0] + body[srid][0]) / 2.0)), + float(((body[slid][1] + body[srid][1]) / 2.0)), + min(body[slid][2], body[srid][2]), + ] + body.append(mid_shoulder) + + elid = joint_names.index("ear_left") + erid = joint_names.index("ear_right") + head = [ + float(((body[elid][0] + body[erid][0]) / 2.0)), + float(((body[elid][1] + body[erid][1]) / 2.0)), + min(body[elid][2], body[erid][2]), + ] + body.append(head) + + new_bodies.append(body) + new_views.append(new_bodies) + + return new_views diff --git a/ros/rpt2D_wrapper_cpp/tests/.gitignore b/tests/.gitignore similarity index 100% rename from ros/rpt2D_wrapper_cpp/tests/.gitignore rename to tests/.gitignore diff --git a/ros/README.md b/tests/README.md similarity index 54% rename from ros/README.md rename to tests/README.md index 31ffb69..83a3dc0 100644 --- a/ros/README.md +++ b/tests/README.md @@ -1,28 +1,17 @@ -# ROS-Wrapper +# Tests -Run pose estimator with ros topics as inputs and publish detected poses. +Various module tests -
+### Triangulator -- Build container: ```bash - docker build --progress=plain -t rapidposetriangulation_ros -f ros/dockerfile . + cd /RapidPoseTriangulation/tests/ && python3 test_interface.py && cd .. ``` -- Update or remove the `ROS_DOMAIN_ID` in `docker-compose.yml` to fit your environment - -- Run and test: - ```bash - xhost +; docker compose -f ros/docker-compose.yml up - - docker exec -it ros-test_node-1 bash - export ROS_DOMAIN_ID=18 - ``` - -### Debugging +### Onnx C++ Interface ```bash -cd /RapidPoseTriangulation/ros/rpt2D_wrapper_cpp/tests/ +cd /RapidPoseTriangulation/tests/ g++ -std=c++17 -O3 -march=native -Wall -DCOMPILE_EXAMPLE_MAIN \ $(pkg-config --cflags opencv4) \ @@ -30,7 +19,7 @@ g++ -std=c++17 -O3 -march=native -Wall -DCOMPILE_EXAMPLE_MAIN \ -I /onnxruntime/include/onnxruntime/core/session \ -I /onnxruntime/include/onnxruntime/core/providers/tensorrt \ -L /onnxruntime/build/Linux/Release \ - my_app.cpp \ + test_utils2d.cpp \ -o my_app \ -Wl,--start-group \ -lonnxruntime_providers_tensorrt \ diff --git a/ros/rpt2D_wrapper_cpp/tests/my_app.cpp b/tests/test_utils2d.cpp similarity index 98% rename from ros/rpt2D_wrapper_cpp/tests/my_app.cpp rename to tests/test_utils2d.cpp index b966355..f7d83ad 100644 --- a/ros/rpt2D_wrapper_cpp/tests/my_app.cpp +++ b/tests/test_utils2d.cpp @@ -5,7 +5,7 @@ #include -#include "../src/utils_2d_pose.hpp" +#include "/RapidPoseTriangulation/scripts/utils_2d_pose.hpp" // ================================================================================================= // ================================================================================================= From c5f190ab35cac6c4ae7d14427656f89a77c1b242 Mon Sep 17 00:00:00 2001 From: Daniel Date: Tue, 21 Jan 2025 15:10:43 +0100 Subject: [PATCH 2/6] Eval skelda datasets with cpp implementation. --- README.md | 21 + scripts/.gitignore | 1 + scripts/test_skelda_dataset_cpp.cpp | 266 ++++++++++++ scripts/test_skelda_dataset_cpp.py | 395 ++++++++++++++++++ ...a_dataset.py => test_skelda_dataset_py.py} | 0 5 files changed, 683 insertions(+) create mode 100644 scripts/.gitignore create mode 100644 scripts/test_skelda_dataset_cpp.cpp create mode 100644 scripts/test_skelda_dataset_cpp.py rename scripts/{test_skelda_dataset.py => test_skelda_dataset_py.py} (100%) diff --git a/README.md b/README.md index 2a010bf..809c85b 100644 --- a/README.md +++ b/README.md @@ -30,6 +30,27 @@ Fast triangulation of multiple persons from multiple camera views. - Build triangulator: ```bash cd /RapidPoseTriangulation/swig/ && make all && cd ../tests/ && python3 test_interface.py && cd .. + + cd /RapidPoseTriangulation/scripts/ && \ + g++ -std=c++17 -fPIC -O3 -march=native -Wall -Werror -flto=auto -fopenmp -fopenmp-simd \ + $(pkg-config --cflags opencv4) \ + -I /RapidPoseTriangulation/rpt/ \ + -I /onnxruntime/include/ \ + -I /onnxruntime/include/onnxruntime/core/session/ \ + -I /onnxruntime/include/onnxruntime/core/providers/tensorrt/ \ + -L /onnxruntime/build/Linux/Release/ \ + test_skelda_dataset_cpp.cpp \ + /RapidPoseTriangulation/rpt/*.cpp \ + -o test_skelda_dataset \ + -Wl,--start-group \ + -lonnxruntime_providers_tensorrt \ + -lonnxruntime_providers_shared \ + -lonnxruntime_providers_cuda \ + -lonnxruntime \ + -Wl,--end-group \ + $(pkg-config --libs opencv4) \ + -Wl,-rpath,/onnxruntime/build/Linux/Release/ \ + && cd .. ``` - Test with samples: diff --git a/scripts/.gitignore b/scripts/.gitignore new file mode 100644 index 0000000..efd6aff --- /dev/null +++ b/scripts/.gitignore @@ -0,0 +1 @@ +test_skelda_dataset diff --git a/scripts/test_skelda_dataset_cpp.cpp b/scripts/test_skelda_dataset_cpp.cpp new file mode 100644 index 0000000..7f45977 --- /dev/null +++ b/scripts/test_skelda_dataset_cpp.cpp @@ -0,0 +1,266 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +// OpenCV +#include + +// JSON library +#include "/RapidPoseTriangulation/extras/include/nlohmann/json.hpp" +using json = nlohmann::json; + +#include "/RapidPoseTriangulation/scripts/utils_pipeline.hpp" +#include "/RapidPoseTriangulation/scripts/utils_2d_pose.hpp" +#include "/RapidPoseTriangulation/rpt/interface.hpp" +#include "/RapidPoseTriangulation/rpt/camera.hpp" + +// ================================================================================================= + +static const std::string path_data = "/tmp/rpt/all.json"; +static const std::string path_cfg = "/tmp/rpt/config.json"; + +// ================================================================================================= + +std::vector load_images(json &item) +{ + // Load images + std::vector images; + for (size_t j = 0; j < item["imgpaths"].size(); j++) + { + auto ipath = item["imgpaths"][j].get(); + cv::Mat image = cv::imread(ipath, cv::IMREAD_COLOR); + cv::cvtColor(image, image, cv::COLOR_BGR2RGB); + images.push_back(image); + } + + if (item["dataset_name"] == "human36m") + { + // Since the images don't have the same shape, rescale some of them + for (size_t i = 0; i < images.size(); i++) + { + cv::Mat &img = images[i]; + cv::Size ishape = img.size(); + if (ishape != cv::Size(1000, 1000)) + { + auto cam = item["cameras"][i]; + cam["K"][1][1] = cam["K"][1][1].get() * (1000.0 / ishape.height); + cam["K"][1][2] = cam["K"][1][2].get() * (1000.0 / ishape.height); + cam["K"][0][0] = cam["K"][0][0].get() * (1000.0 / ishape.width); + cam["K"][0][2] = cam["K"][0][2].get() * (1000.0 / ishape.width); + cv::resize(img, img, cv::Size(1000, 1000)); + images[i] = img; + } + } + } + + // Convert image format to Bayer encoding to simulate real camera input + // This also resulted in notably better MPJPE results in most cases, presumbly since the + // demosaicing algorithm from OpenCV is better than the default one from the cameras + for (size_t i = 0; i < images.size(); i++) + { + cv::Mat &img = images[i]; + cv::Mat bayer_image = utils_pipeline::rgb2bayer(img); + images[i] = std::move(bayer_image); + } + + return images; +} + +// ================================================================================================= + +std::string read_file(const std::string &path) +{ + std::ifstream file_stream(path); + if (!file_stream.is_open()) + { + throw std::runtime_error("Unable to open file: " + path); + } + + std::stringstream buffer; + buffer << file_stream.rdbuf(); + return buffer.str(); +} + +void write_file(const std::string &path, const std::string &content) +{ + std::ofstream file_stream(path, std::ios::out | std::ios::binary); + if (!file_stream.is_open()) + { + throw std::runtime_error("Unable to open file for writing: " + path); + } + + file_stream << content; + + if (!file_stream) + { + throw std::runtime_error("Error occurred while writing to file: " + path); + } + file_stream.close(); +} + +// ================================================================================================= + +int main(int argc, char **argv) +{ + // Load the files + auto dataset = json::parse(read_file(path_data)); + auto config = json::parse(read_file(path_cfg)); + + // Load the configuration + const std::map whole_body = config["whole_body"]; + const float min_bbox_score = config["min_bbox_score"]; + const float min_bbox_area = config["min_bbox_area"]; + const bool batch_poses = config["batch_poses"]; + const std::vector joint_names_2d = utils_pipeline::get_joint_names(whole_body); + const float min_match_score = config["min_match_score"]; + const size_t min_group_size = config["min_group_size"]; + const int take_interval = config["take_interval"]; + + // Load 2D model + bool use_wb = utils_pipeline::use_whole_body(whole_body); + std::unique_ptr kpt_model = + std::make_unique( + use_wb, min_bbox_score, min_bbox_area, batch_poses); + + // Load 3D model + std::unique_ptr tri_model = std::make_unique( + min_match_score, min_group_size); + + // Timers + size_t time_count = dataset.size(); + double time_image = 0.0; + double time_pose2d = 0.0; + double time_pose3d = 0.0; + size_t print_steps = (size_t)std::floor((float)time_count / 100.0f); + + std::cout << "Running predictions: |"; + size_t bar_width = (size_t)std::ceil((float)time_count / (float)print_steps) - 2; + for (size_t i = 0; i < bar_width; i++) + { + std::cout << "-"; + } + std::cout << "|" << std::endl; + + // Calculate 2D poses [items, views, persons, joints, 3] + std::vector>>>> all_poses_2d; + std::cout << "Calculating 2D poses: "; + for (size_t i = 0; i < dataset.size(); i++) + { + if (i % print_steps == 0) + { + std::cout << "#" << std::flush; + } + std::chrono::duration elapsed; + auto &item = dataset[i]; + + // Load images + auto stime = std::chrono::high_resolution_clock::now(); + std::vector images = load_images(item); + elapsed = std::chrono::high_resolution_clock::now() - stime; + time_image += elapsed.count(); + + // Predict 2D poses + stime = std::chrono::high_resolution_clock::now(); + for (size_t i = 0; i < images.size(); i++) + { + cv::Mat &img = images[i]; + cv::Mat rgb = utils_pipeline::bayer2rgb(img); + images[i] = std::move(rgb); + } + auto poses_2d_all = kpt_model->predict(images); + auto poses_2d_upd = utils_pipeline::update_keypoints( + poses_2d_all, joint_names_2d, whole_body); + elapsed = std::chrono::high_resolution_clock::now() - stime; + time_pose2d += elapsed.count(); + + all_poses_2d.push_back(std::move(poses_2d_upd)); + } + std::cout << std::endl; + + // Calculate 3D poses [items, persons, joints, 4] + std::vector>>> all_poses_3d; + std::vector all_ids; + std::string old_scene = ""; + int old_id = -1; + std::cout << "Calculating 3D poses: "; + for (size_t i = 0; i < dataset.size(); i++) + { + if (i % print_steps == 0) + { + std::cout << "#" << std::flush; + } + std::chrono::duration elapsed; + auto &item = dataset[i]; + auto &poses_2d = all_poses_2d[i]; + + if (old_scene != item["scene"] || old_id + take_interval < item["index"]) + { + // Reset last poses if scene changes + tri_model->reset(); + old_scene = item["scene"]; + } + + auto stime = std::chrono::high_resolution_clock::now(); + std::vector cameras; + for (size_t j = 0; j < item["cameras"].size(); j++) + { + auto &cam = item["cameras"][j]; + Camera camera; + camera.name = cam["name"].get(); + camera.K = cam["K"].get, 3>>(); + camera.DC = cam["DC"].get>(); + camera.R = cam["R"].get, 3>>(); + camera.T = cam["T"].get, 3>>(); + camera.width = cam["width"].get(); + camera.height = cam["height"].get(); + camera.type = cam["type"].get(); + cameras.push_back(camera); + } + std::array, 2> roomparams = { + item["room_size"].get>(), + item["room_center"].get>()}; + + auto poses_3d = tri_model->triangulate_poses(poses_2d, cameras, roomparams, joint_names_2d); + elapsed = std::chrono::high_resolution_clock::now() - stime; + time_pose3d += elapsed.count(); + + all_poses_3d.push_back(std::move(poses_3d)); + all_ids.push_back(item["id"].get()); + } + std::cout << std::endl; + + // Print timing stats + std::cout << "\nMetrics:" << std::endl; + tri_model->print_stats(); + size_t warmup = 10; + double avg_time_image = time_image / (time_count - warmup); + double avg_time_pose2d = time_pose2d / (time_count - warmup); + double avg_time_pose3d = time_pose3d / (time_count - warmup); + double fps = 1.0 / (avg_time_pose2d + avg_time_pose3d); + std::cout << "{\n" + << " \"img_loading\": " << avg_time_image << ",\n" + << " \"avg_time_2d\": " << avg_time_pose2d << ",\n" + << " \"avg_time_3d\": " << avg_time_pose3d << ",\n" + << " \"fps\": " << fps << "\n" + << "}" << std::endl; + + // Store the results as json + json all_results; + all_results["all_ids"] = all_ids; + all_results["all_poses_2d"] = all_poses_2d; + all_results["all_poses_3d"] = all_poses_3d; + all_results["joint_names_2d"] = joint_names_2d; + all_results["joint_names_3d"] = joint_names_2d; + + // Save the results + std::string path_results = "/tmp/rpt/results.json"; + write_file(path_results, all_results.dump(0)); + + return 0; +} \ No newline at end of file diff --git a/scripts/test_skelda_dataset_cpp.py b/scripts/test_skelda_dataset_cpp.py new file mode 100644 index 0000000..1fbdf9d --- /dev/null +++ b/scripts/test_skelda_dataset_cpp.py @@ -0,0 +1,395 @@ +import json +import os + +import utils_pipeline +from skelda import evals +from skelda.writers import json_writer + +# ================================================================================================== + +whole_body = { + "foots": False, + "face": False, + "hands": False, +} + +dataset_use = "human36m" +# dataset_use = "panoptic" +# dataset_use = "mvor" +# dataset_use = "shelf" +# dataset_use = "campus" +# dataset_use = "ikeaasm" +# dataset_use = "chi3d" +# dataset_use = "tsinghua" +# dataset_use = "human36m_wb" +# dataset_use = "egohumans_tagging" +# dataset_use = "egohumans_legoassemble" +# dataset_use = "egohumans_fencing" +# dataset_use = "egohumans_basketball" +# dataset_use = "egohumans_volleyball" +# dataset_use = "egohumans_badminton" +# dataset_use = "egohumans_tennis" + + +# Describes the minimum area as fraction of the image size for a 2D bounding box to be considered +# If the persons are small in the image, use a lower value +default_min_bbox_area = 0.1 * 0.1 + +# Describes how confident a 2D bounding box needs to be to be considered +# If the persons are small in the image, or poorly recognizable, use a lower value +default_min_bbox_score = 0.3 + +# Describes how good two 2D poses need to match each other to create a valid triangulation +# If the quality of the 2D detections is poor, use a lower value +default_min_match_score = 0.94 + +# Describes the minimum number of camera pairs that need to detect the same person +# If the number of cameras is high, and the views are not occluded, use a higher value +default_min_group_size = 1 + +# Batch poses per image for faster processing +# If most of the time only one person is in a image, disable it, because it is slightly slower then +default_batch_poses = True + +datasets = { + "human36m": { + "path": "/datasets/human36m/skelda/pose_test.json", + "take_interval": 5, + "min_match_score": 0.95, + "min_group_size": 1, + "min_bbox_score": 0.4, + "min_bbox_area": 0.1 * 0.1, + "batch_poses": False, + }, + "panoptic": { + "path": "/datasets/panoptic/skelda/test.json", + "cams": ["00_03", "00_06", "00_12", "00_13", "00_23"], + # "cams": ["00_03", "00_06", "00_12"], + # "cams": ["00_03", "00_06", "00_12", "00_13", "00_23", "00_15", "00_10", "00_21", "00_09", "00_01"], + "take_interval": 3, + "min_match_score": 0.95, + "use_scenes": ["160906_pizza1", "160422_haggling1", "160906_ian5"], + "min_group_size": 1, + # "min_group_size": 4, + "min_bbox_area": 0.05 * 0.05, + }, + "mvor": { + "path": "/datasets/mvor/skelda/all.json", + "take_interval": 1, + "with_depth": False, + "min_match_score": 0.85, + "min_bbox_score": 0.25, + }, + "campus": { + "path": "/datasets/campus/skelda/test.json", + "take_interval": 1, + "min_match_score": 0.90, + "min_bbox_score": 0.5, + }, + "shelf": { + "path": "/datasets/shelf/skelda/test.json", + "take_interval": 1, + "min_match_score": 0.96, + "min_group_size": 2, + }, + "ikeaasm": { + "path": "/datasets/ikeaasm/skelda/test.json", + "take_interval": 2, + "min_match_score": 0.92, + "min_bbox_score": 0.20, + }, + "chi3d": { + "path": "/datasets/chi3d/skelda/all.json", + "take_interval": 5, + }, + "tsinghua": { + "path": "/datasets/tsinghua/skelda/test.json", + "take_interval": 3, + "min_match_score": 0.95, + "min_group_size": 2, + }, + "human36m_wb": { + "path": "/datasets/human36m/skelda/wb/test.json", + "take_interval": 100, + "min_bbox_score": 0.4, + "batch_poses": False, + }, + "egohumans_tagging": { + "path": "/datasets/egohumans/skelda/all.json", + "take_interval": 2, + "subset": "tagging", + "min_group_size": 2, + "min_bbox_score": 0.2, + "min_bbox_area": 0.05 * 0.05, + }, + "egohumans_legoassemble": { + "path": "/datasets/egohumans/skelda/all.json", + "take_interval": 2, + "subset": "legoassemble", + "min_group_size": 2, + }, + "egohumans_fencing": { + "path": "/datasets/egohumans/skelda/all.json", + "take_interval": 2, + "subset": "fencing", + "min_group_size": 7, + "min_bbox_score": 0.5, + "min_bbox_area": 0.05 * 0.05, + }, + "egohumans_basketball": { + "path": "/datasets/egohumans/skelda/all.json", + "take_interval": 2, + "subset": "basketball", + "min_group_size": 7, + "min_bbox_score": 0.25, + "min_bbox_area": 0.025 * 0.025, + }, + "egohumans_volleyball": { + "path": "/datasets/egohumans/skelda/all.json", + "take_interval": 2, + "subset": "volleyball", + "min_group_size": 11, + "min_bbox_score": 0.25, + "min_bbox_area": 0.05 * 0.05, + }, + "egohumans_badminton": { + "path": "/datasets/egohumans/skelda/all.json", + "take_interval": 2, + "subset": "badminton", + "min_group_size": 7, + "min_bbox_score": 0.25, + "min_bbox_area": 0.05 * 0.05, + }, + "egohumans_tennis": { + "path": "/datasets/egohumans/skelda/all.json", + "take_interval": 2, + "subset": "tennis", + "min_group_size": 11, + "min_bbox_area": 0.025 * 0.025, + }, +} + +joint_names_2d = utils_pipeline.get_joint_names(whole_body) +joint_names_3d = list(joint_names_2d) +eval_joints = [ + "head", + "shoulder_left", + "shoulder_right", + "elbow_left", + "elbow_right", + "wrist_left", + "wrist_right", + "hip_left", + "hip_right", + "knee_left", + "knee_right", + "ankle_left", + "ankle_right", +] +if dataset_use == "human36m": + eval_joints[eval_joints.index("head")] = "nose" +if dataset_use == "panoptic": + eval_joints[eval_joints.index("head")] = "nose" +if dataset_use == "human36m_wb": + if utils_pipeline.use_whole_body(whole_body): + eval_joints = list(joint_names_2d) + else: + eval_joints[eval_joints.index("head")] = "nose" + +# output_dir = "/RapidPoseTriangulation/data/testoutput/" +output_dir = "" + +# ================================================================================================== + + +def load_json(path: str): + with open(path, "r", encoding="utf-8") as file: + data = json.load(file) + return data + + +def save_json(data: dict, path: str): + with open(path, "w+", encoding="utf-8") as file: + json.dump(data, file, indent=0) + + +# ================================================================================================== + + +def load_labels(dataset: dict): + """Load labels by dataset description""" + + if "panoptic" in dataset: + labels = load_json(dataset["panoptic"]["path"]) + labels = [lb for i, lb in enumerate(labels) if i % 1500 < 90] + + # Filter by maximum number of persons + labels = [l for l in labels if len(l["bodies3D"]) <= 10] + + # Filter scenes + if "use_scenes" in dataset["panoptic"]: + labels = [ + l for l in labels if l["scene"] in dataset["panoptic"]["use_scenes"] + ] + + # Filter cameras + if not "cameras_depth" in labels[0]: + for label in labels: + for i, cam in reversed(list(enumerate(label["cameras"]))): + if cam["name"] not in dataset["panoptic"]["cams"]: + label["cameras"].pop(i) + label["imgpaths"].pop(i) + + elif "human36m" in dataset: + labels = load_json(dataset["human36m"]["path"]) + labels = [lb for lb in labels if lb["subject"] == "S9"] + labels = [lb for i, lb in enumerate(labels) if i % 4000 < 150] + + for label in labels: + label.pop("action") + label.pop("frame") + + elif "mvor" in dataset: + labels = load_json(dataset["mvor"]["path"]) + + # Rename keys + for label in labels: + label["cameras_color"] = label["cameras"] + label["imgpaths_color"] = label["imgpaths"] + + elif "ikeaasm" in dataset: + labels = load_json(dataset["ikeaasm"]["path"]) + cams0 = str(labels[0]["cameras"]) + labels = [lb for lb in labels if str(lb["cameras"]) == cams0] + + elif "shelf" in dataset: + labels = load_json(dataset["shelf"]["path"]) + labels = [lb for lb in labels if "test" in lb["splits"]] + + elif "campus" in dataset: + labels = load_json(dataset["campus"]["path"]) + labels = [lb for lb in labels if "test" in lb["splits"]] + + elif "tsinghua" in dataset: + labels = load_json(dataset["tsinghua"]["path"]) + labels = [lb for lb in labels if "test" in lb["splits"]] + labels = [lb for lb in labels if lb["seq"] == "seq_1"] + labels = [lb for i, lb in enumerate(labels) if i % 300 < 90] + + for label in labels: + label["bodyids"] = list(range(len(label["bodies3D"]))) + + elif "chi3d" in dataset: + labels = load_json(dataset["chi3d"]["path"]) + labels = [lb for lb in labels if lb["setup"] == "s03"] + labels = [lb for i, lb in enumerate(labels) if i % 2000 < 150] + + elif "human36m_wb" in dataset: + labels = load_json(dataset["human36m_wb"]["path"]) + + elif any(("egohumans" in key for key in dataset)): + labels = load_json(dataset[dataset_use]["path"]) + labels = [lb for lb in labels if "test" in lb["splits"]] + labels = [lb for lb in labels if dataset[dataset_use]["subset"] in lb["seq"]] + if dataset[dataset_use]["subset"] in ["volleyball", "tennis"]: + labels = [lb for i, lb in enumerate(labels) if i % 150 < 60] + + else: + raise ValueError("Dataset not available") + + # Optionally drop samples to speed up train/eval + if "take_interval" in dataset: + take_interval = dataset["take_interval"] + if take_interval > 1: + labels = [l for i, l in enumerate(labels) if i % take_interval == 0] + + # Add default values + for label in labels: + if "scene" not in label: + label["scene"] = "default" + for cam in label["cameras"]: + if not "type" in cam: + cam["type"] = "pinhole" + + return labels + + +# ================================================================================================== + + +def main(): + global joint_names_3d, eval_joints + + print("Loading dataset ...") + labels = load_labels( + { + dataset_use: datasets[dataset_use], + "take_interval": datasets[dataset_use]["take_interval"], + } + ) + + # Print a dataset sample for debugging + print(labels[0]) + + # Save dataset + tmp_export_dir = "/tmp/rpt/" + for label in labels: + if "splits" in label: + label.pop("splits") + json_writer.save_dataset(labels, tmp_export_dir) + + # Load dataset specific parameters + min_match_score = datasets[dataset_use].get( + "min_match_score", default_min_match_score + ) + min_group_size = datasets[dataset_use].get("min_group_size", default_min_group_size) + min_bbox_score = datasets[dataset_use].get("min_bbox_score", default_min_bbox_score) + min_bbox_area = datasets[dataset_use].get("min_bbox_area", default_min_bbox_area) + batch_poses = datasets[dataset_use].get("batch_poses", default_batch_poses) + + # Save config + config_path = tmp_export_dir + "config.json" + config = { + "min_match_score": min_match_score, + "min_group_size": min_group_size, + "min_bbox_score": min_bbox_score, + "min_bbox_area": min_bbox_area, + "batch_poses": batch_poses, + "whole_body": whole_body, + "take_interval": datasets[dataset_use]["take_interval"], + } + save_json(config, config_path) + + # Call the CPP binary + os.system("/RapidPoseTriangulation/scripts/test_skelda_dataset") + + # Load the results + print("Loading exports ...") + res_path = tmp_export_dir + "results.json" + results = load_json(res_path) + all_poses_3d = results["all_poses_3d"] + all_ids = results["all_ids"] + joint_names_3d = results["joint_names_3d"] + + # Run evaluation + _ = evals.mpjpe.run_eval( + labels, + all_poses_3d, + all_ids, + joint_names_net=joint_names_3d, + joint_names_use=eval_joints, + save_error_imgs=output_dir, + ) + _ = evals.pcp.run_eval( + labels, + all_poses_3d, + all_ids, + joint_names_net=joint_names_3d, + joint_names_use=eval_joints, + replace_head_with_nose=True, + ) + +# ================================================================================================== + +if __name__ == "__main__": + main() diff --git a/scripts/test_skelda_dataset.py b/scripts/test_skelda_dataset_py.py similarity index 100% rename from scripts/test_skelda_dataset.py rename to scripts/test_skelda_dataset_py.py From 29c072400f44b96b8a0f132fbdebf0fe33419aa8 Mon Sep 17 00:00:00 2001 From: Daniel Date: Tue, 21 Jan 2025 16:23:19 +0100 Subject: [PATCH 3/6] Mixed updates and improvements. --- README.md | 12 +++--- .../rpt2D_wrapper_cpp/src/rpt2D_wrapper.cpp | 12 +++--- rpt/triangulator.cpp | 29 +++++++------- scripts/.gitignore | 1 - scripts/test_skelda_dataset_cpp.cpp | 17 ++++---- scripts/test_skelda_dataset_cpp.py | 2 +- scripts/test_skelda_dataset_py.py | 10 ++--- scripts/utils_2d_pose.hpp | 40 ++----------------- scripts/utils_2d_pose.py | 1 + tests/.gitignore | 1 - tests/README.md | 6 +-- 11 files changed, 49 insertions(+), 82 deletions(-) delete mode 100644 scripts/.gitignore delete mode 100644 tests/.gitignore diff --git a/README.md b/README.md index 809c85b..169f5fb 100644 --- a/README.md +++ b/README.md @@ -32,16 +32,16 @@ Fast triangulation of multiple persons from multiple camera views. cd /RapidPoseTriangulation/swig/ && make all && cd ../tests/ && python3 test_interface.py && cd .. cd /RapidPoseTriangulation/scripts/ && \ - g++ -std=c++17 -fPIC -O3 -march=native -Wall -Werror -flto=auto -fopenmp -fopenmp-simd \ - $(pkg-config --cflags opencv4) \ + g++ -std=c++2a -fPIC -O3 -march=native -Wall -Werror -flto=auto -fopenmp -fopenmp-simd \ -I /RapidPoseTriangulation/rpt/ \ - -I /onnxruntime/include/ \ - -I /onnxruntime/include/onnxruntime/core/session/ \ - -I /onnxruntime/include/onnxruntime/core/providers/tensorrt/ \ + -isystem /usr/include/opencv4/ \ + -isystem /onnxruntime/include/ \ + -isystem /onnxruntime/include/onnxruntime/core/session/ \ + -isystem /onnxruntime/include/onnxruntime/core/providers/tensorrt/ \ -L /onnxruntime/build/Linux/Release/ \ test_skelda_dataset_cpp.cpp \ /RapidPoseTriangulation/rpt/*.cpp \ - -o test_skelda_dataset \ + -o test_skelda_dataset.bin \ -Wl,--start-group \ -lonnxruntime_providers_tensorrt \ -lonnxruntime_providers_shared \ diff --git a/extras/ros/rpt2D_wrapper_cpp/src/rpt2D_wrapper.cpp b/extras/ros/rpt2D_wrapper_cpp/src/rpt2D_wrapper.cpp index f65f958..a8abc89 100644 --- a/extras/ros/rpt2D_wrapper_cpp/src/rpt2D_wrapper.cpp +++ b/extras/ros/rpt2D_wrapper_cpp/src/rpt2D_wrapper.cpp @@ -1,12 +1,12 @@ +#include #include +#include +#include #include +#include #include #include #include -#include -#include -#include -#include // ROS2 #include @@ -14,15 +14,15 @@ #include // OpenCV / cv_bridge -#include #include +#include // JSON library #include "/RapidPoseTriangulation/extras/include/nlohmann/json.hpp" using json = nlohmann::json; -#include "/RapidPoseTriangulation/scripts/utils_pipeline.hpp" #include "/RapidPoseTriangulation/scripts/utils_2d_pose.hpp" +#include "/RapidPoseTriangulation/scripts/utils_pipeline.hpp" // ================================================================================================= diff --git a/rpt/triangulator.cpp b/rpt/triangulator.cpp index f0d1662..91c1167 100644 --- a/rpt/triangulator.cpp +++ b/rpt/triangulator.cpp @@ -581,20 +581,21 @@ void TriangulatorInternal::reset() void TriangulatorInternal::print_stats() { - std::cout << "Triangulator statistics:" << std::endl; - std::cout << " Number of calls: " << num_calls << std::endl; - std::cout << " Init time: " << init_time / num_calls << std::endl; - std::cout << " Undistort time: " << undistort_time / num_calls << std::endl; - std::cout << " Project time: " << project_time / num_calls << std::endl; - std::cout << " Match time: " << match_time / num_calls << std::endl; - std::cout << " Pairs time: " << pairs_time / num_calls << std::endl; - std::cout << " Pair scoring time: " << pair_scoring_time / num_calls << std::endl; - std::cout << " Grouping time: " << grouping_time / num_calls << std::endl; - std::cout << " Full time: " << full_time / num_calls << std::endl; - std::cout << " Merge time: " << merge_time / num_calls << std::endl; - std::cout << " Post time: " << post_time / num_calls << std::endl; - std::cout << " Convert time: " << convert_time / num_calls << std::endl; - std::cout << " Total time: " << total_time / num_calls << std::endl; + std::cout << "{" << std::endl; + std::cout << " \"triangulator_calls\": " << num_calls << "," << std::endl; + std::cout << " \"init_time\": " << init_time / num_calls << "," << std::endl; + std::cout << " \"undistort_time\": " << undistort_time / num_calls << "," << std::endl; + std::cout << " \"project_time\": " << project_time / num_calls << "," << std::endl; + std::cout << " \"match_time\": " << match_time / num_calls << "," << std::endl; + std::cout << " \"pairs_time\": " << pairs_time / num_calls << "," << std::endl; + std::cout << " \"pair_scoring_time\": " << pair_scoring_time / num_calls << "," << std::endl; + std::cout << " \"grouping_time\": " << grouping_time / num_calls << "," << std::endl; + std::cout << " \"full_time\": " << full_time / num_calls << "," << std::endl; + std::cout << " \"merge_time\": " << merge_time / num_calls << "," << std::endl; + std::cout << " \"post_time\": " << post_time / num_calls << "," << std::endl; + std::cout << " \"convert_time\": " << convert_time / num_calls << "," << std::endl; + std::cout << " \"total_time\": " << total_time / num_calls << std::endl; + std::cout << "}" << std::endl; } // ================================================================================================= diff --git a/scripts/.gitignore b/scripts/.gitignore deleted file mode 100644 index efd6aff..0000000 --- a/scripts/.gitignore +++ /dev/null @@ -1 +0,0 @@ -test_skelda_dataset diff --git a/scripts/test_skelda_dataset_cpp.cpp b/scripts/test_skelda_dataset_cpp.cpp index 7f45977..8e3bcaf 100644 --- a/scripts/test_skelda_dataset_cpp.cpp +++ b/scripts/test_skelda_dataset_cpp.cpp @@ -1,12 +1,12 @@ #include -#include -#include -#include #include -#include #include +#include +#include #include #include +#include +#include // OpenCV #include @@ -15,10 +15,10 @@ #include "/RapidPoseTriangulation/extras/include/nlohmann/json.hpp" using json = nlohmann::json; -#include "/RapidPoseTriangulation/scripts/utils_pipeline.hpp" -#include "/RapidPoseTriangulation/scripts/utils_2d_pose.hpp" -#include "/RapidPoseTriangulation/rpt/interface.hpp" #include "/RapidPoseTriangulation/rpt/camera.hpp" +#include "/RapidPoseTriangulation/rpt/interface.hpp" +#include "/RapidPoseTriangulation/scripts/utils_2d_pose.hpp" +#include "/RapidPoseTriangulation/scripts/utils_pipeline.hpp" // ================================================================================================= @@ -232,12 +232,12 @@ int main(int argc, char **argv) all_poses_3d.push_back(std::move(poses_3d)); all_ids.push_back(item["id"].get()); + old_id = item["index"]; } std::cout << std::endl; // Print timing stats std::cout << "\nMetrics:" << std::endl; - tri_model->print_stats(); size_t warmup = 10; double avg_time_image = time_image / (time_count - warmup); double avg_time_pose2d = time_pose2d / (time_count - warmup); @@ -249,6 +249,7 @@ int main(int argc, char **argv) << " \"avg_time_3d\": " << avg_time_pose3d << ",\n" << " \"fps\": " << fps << "\n" << "}" << std::endl; + tri_model->print_stats(); // Store the results as json json all_results; diff --git a/scripts/test_skelda_dataset_cpp.py b/scripts/test_skelda_dataset_cpp.py index 1fbdf9d..f601736 100644 --- a/scripts/test_skelda_dataset_cpp.py +++ b/scripts/test_skelda_dataset_cpp.py @@ -361,7 +361,7 @@ def main(): save_json(config, config_path) # Call the CPP binary - os.system("/RapidPoseTriangulation/scripts/test_skelda_dataset") + os.system("/RapidPoseTriangulation/scripts/test_skelda_dataset.bin") # Load the results print("Loading exports ...") diff --git a/scripts/test_skelda_dataset_py.py b/scripts/test_skelda_dataset_py.py index 00e8102..501d1a2 100644 --- a/scripts/test_skelda_dataset_py.py +++ b/scripts/test_skelda_dataset_py.py @@ -330,7 +330,9 @@ def main(): # Load 2D pose model if utils_pipeline.use_whole_body(whole_body): - kpt_model = utils_2d_pose.load_wb_model(min_bbox_score, min_bbox_area, batch_poses) + kpt_model = utils_2d_pose.load_wb_model( + min_bbox_score, min_bbox_area, batch_poses + ) else: kpt_model = utils_2d_pose.load_model(min_bbox_score, min_bbox_area, batch_poses) @@ -446,10 +448,7 @@ def main(): all_ids.append(label["id"]) times[i][2] = time_3d - # Print per-step triangulation timings - print("") - triangulator.print_stats() - + # Print per-step timings warmup_iters = 10 if len(times) > warmup_iters: times = times[warmup_iters:] @@ -464,6 +463,7 @@ def main(): } print("\nMetrics:") print(json.dumps(tstats, indent=2)) + triangulator.print_stats() _ = evals.mpjpe.run_eval( labels, diff --git a/scripts/utils_2d_pose.hpp b/scripts/utils_2d_pose.hpp index fa97ecb..b99a0ff 100644 --- a/scripts/utils_2d_pose.hpp +++ b/scripts/utils_2d_pose.hpp @@ -1,13 +1,13 @@ #pragma once #include -#include -#include #include +#include +#include -#include #include #include +#include #include // ================================================================================================= @@ -646,27 +646,10 @@ namespace utils_2d_pose std::vector> RTMDet::call(const cv::Mat &image) { - auto stime = std::chrono::high_resolution_clock::now(); - std::chrono::duration elapsed; - cv::Mat preprocessed = preprocess(image); std::vector inputs = {preprocessed}; - - elapsed = std::chrono::high_resolution_clock::now() - stime; - std::cout << "Preprocess time: " << elapsed.count() << "s\n"; - stime = std::chrono::high_resolution_clock::now(); - auto results = call_by_image(inputs); - - elapsed = std::chrono::high_resolution_clock::now() - stime; - std::cout << "Inference time: " << elapsed.count() << "s\n"; - stime = std::chrono::high_resolution_clock::now(); - auto outputs = postprocess(results, image); - - elapsed = std::chrono::high_resolution_clock::now() - stime; - std::cout << "Postprocess time: " << elapsed.count() << "s\n"; - return outputs; } @@ -818,26 +801,9 @@ namespace utils_2d_pose std::vector>> RTMPose::call( const cv::Mat &image, const std::vector> &bboxes) { - auto stime = std::chrono::high_resolution_clock::now(); - std::chrono::duration elapsed; - std::vector inputs = preprocess(image, bboxes); - - elapsed = std::chrono::high_resolution_clock::now() - stime; - std::cout << "Preprocess time: " << elapsed.count() << "s\n"; - stime = std::chrono::high_resolution_clock::now(); - auto results = call_by_image(inputs); - - elapsed = std::chrono::high_resolution_clock::now() - stime; - std::cout << "Inference time: " << elapsed.count() << "s\n"; - stime = std::chrono::high_resolution_clock::now(); - auto outputs = postprocess(results, image, bboxes); - - elapsed = std::chrono::high_resolution_clock::now() - stime; - std::cout << "Postprocess time: " << elapsed.count() << "s\n"; - return outputs; } diff --git a/scripts/utils_2d_pose.py b/scripts/utils_2d_pose.py index 7b364e3..601518d 100644 --- a/scripts/utils_2d_pose.py +++ b/scripts/utils_2d_pose.py @@ -20,6 +20,7 @@ class BaseModel(ABC): raise FileNotFoundError("File not found:", model_path) if model_path.endswith(".onnx"): + print("Loading model:", model_path) self.init_onnxruntime(model_path) self.runtime = "ort" else: diff --git a/tests/.gitignore b/tests/.gitignore deleted file mode 100644 index da9ca31..0000000 --- a/tests/.gitignore +++ /dev/null @@ -1 +0,0 @@ -my_app diff --git a/tests/README.md b/tests/README.md index 83a3dc0..d6a791c 100644 --- a/tests/README.md +++ b/tests/README.md @@ -13,14 +13,14 @@ Various module tests ```bash cd /RapidPoseTriangulation/tests/ -g++ -std=c++17 -O3 -march=native -Wall -DCOMPILE_EXAMPLE_MAIN \ +g++ -std=c++17 -O3 -march=native -Wall \ $(pkg-config --cflags opencv4) \ -I /onnxruntime/include \ -I /onnxruntime/include/onnxruntime/core/session \ -I /onnxruntime/include/onnxruntime/core/providers/tensorrt \ -L /onnxruntime/build/Linux/Release \ test_utils2d.cpp \ - -o my_app \ + -o my_app.bin \ -Wl,--start-group \ -lonnxruntime_providers_tensorrt \ -lonnxruntime_providers_shared \ @@ -30,5 +30,5 @@ g++ -std=c++17 -O3 -march=native -Wall -DCOMPILE_EXAMPLE_MAIN \ $(pkg-config --libs opencv4) \ -Wl,-rpath,/onnxruntime/build/Linux/Release -./my_app +./my_app.bin ``` From 2ba9e096ac9585f576ce18851a47fd81c75d908c Mon Sep 17 00:00:00 2001 From: Daniel Date: Tue, 21 Jan 2025 17:15:33 +0100 Subject: [PATCH 4/6] Updated image callback to directly calculate poses. --- .../rpt2D_wrapper_cpp/src/rpt2D_wrapper.cpp | 144 +++++++----------- 1 file changed, 53 insertions(+), 91 deletions(-) diff --git a/extras/ros/rpt2D_wrapper_cpp/src/rpt2D_wrapper.cpp b/extras/ros/rpt2D_wrapper_cpp/src/rpt2D_wrapper.cpp index a8abc89..cc5b627 100644 --- a/extras/ros/rpt2D_wrapper_cpp/src/rpt2D_wrapper.cpp +++ b/extras/ros/rpt2D_wrapper_cpp/src/rpt2D_wrapper.cpp @@ -3,9 +3,7 @@ #include #include #include -#include #include -#include #include // ROS2 @@ -49,9 +47,7 @@ public: Rpt2DWrapperNode(const std::string &node_name) : Node(node_name) { - this->stop_flag = false; - this->last_input_image = cv::Mat(); - this->last_input_time = 0.0; + this->is_busy = false; // QoS rclcpp::QoS qos_profile(1); @@ -61,7 +57,7 @@ public: // Setup subscriber image_sub_ = this->create_subscription( img_input_topic, qos_profile, - std::bind(&Rpt2DWrapperNode::callbackImages, this, std::placeholders::_1)); + std::bind(&Rpt2DWrapperNode::callback_images, this, std::placeholders::_1)); // Setup publisher pose_pub_ = this->create_publisher(pose_out_topic, qos_profile); @@ -72,48 +68,20 @@ public: use_wb, min_bbox_score, min_bbox_area, batch_poses); RCLCPP_INFO(this->get_logger(), "Finished initialization of pose estimator."); - - // Start background prediction thread - model_thread = std::thread(&Rpt2DWrapperNode::callbackWrapper, this); - } - - ~Rpt2DWrapperNode() - { - stop_flag = true; - if (model_thread.joinable()) - { - model_thread.join(); - } } private: rclcpp::Subscription::SharedPtr image_sub_; rclcpp::Publisher::SharedPtr pose_pub_; + std::atomic is_busy; // Pose model pointer std::unique_ptr kpt_model; const std::vector joint_names_2d = utils_pipeline::get_joint_names(whole_body); - // Threading - std::thread model_thread; - std::mutex mutex; - std::atomic stop_flag; + void callback_images(const sensor_msgs::msg::Image::SharedPtr msg); - cv::Mat last_input_image; - double last_input_time; - - void callbackImages(const sensor_msgs::msg::Image::SharedPtr msg); - void callbackModel(); - - void callbackWrapper() - { - using namespace std::chrono_literals; - while (!stop_flag) - { - callbackModel(); - std::this_thread::sleep_for(std::chrono::microseconds(100)); - } - } + std::vector>> call_model(const cv::Mat &image); void publish(const json &data) { @@ -125,8 +93,16 @@ private: // ================================================================================================= -void Rpt2DWrapperNode::callbackImages(const sensor_msgs::msg::Image::SharedPtr msg) +void Rpt2DWrapperNode::callback_images(const sensor_msgs::msg::Image::SharedPtr msg) { + if (this->is_busy) + { + RCLCPP_WARN(this->get_logger(), "Skipping frame, still processing..."); + return; + } + this->is_busy = true; + auto ts_image = std::chrono::high_resolution_clock::now(); + // Load or convert image to Bayer format cv::Mat bayer_image; try @@ -158,43 +134,51 @@ void Rpt2DWrapperNode::callbackImages(const sensor_msgs::msg::Image::SharedPtr m return; } - // Get timestamp - double time_stamp = msg->header.stamp.sec + msg->header.stamp.nanosec / 1.0e9; + // Call model + const auto &valid_poses = this->call_model(bayer_image); - // Store in member variables with lock - { - std::lock_guard lk(mutex); - this->last_input_image = std::move(bayer_image); - this->last_input_time = time_stamp; - } + // Calculate timings + double time_stamp = msg->header.stamp.sec + msg->header.stamp.nanosec / 1.0e9; + auto ts_image_sec = std::chrono::duration(ts_image.time_since_epoch()).count(); + auto ts_pose = std::chrono::high_resolution_clock::now(); + double ts_pose_sec = std::chrono::duration(ts_pose.time_since_epoch()).count(); + double z_trigger_image = ts_image_sec - time_stamp; + double z_trigger_pose = ts_pose_sec - time_stamp; + double z_image_pose = ts_pose_sec - ts_image_sec; + + // Build JSON + json poses_msg; + poses_msg["bodies2D"] = valid_poses; // shape: persons x keypoints x 3 + poses_msg["joints"] = joint_names_2d; + poses_msg["num_persons"] = valid_poses.size(); + poses_msg["timestamps"] = { + {"trigger", time_stamp}, + {"image", ts_image_sec}, + {"pose", ts_pose_sec}, + {"z-trigger-image", z_trigger_image}, + {"z-image-pose", z_image_pose}, + {"z-trigger-pose", z_trigger_pose}}; + + // Publish + publish(poses_msg); + + // Print info + double elapsed_time = std::chrono::duration( + std::chrono::high_resolution_clock::now() - ts_image) + .count(); + std::cout << "Detected persons: " << valid_poses.size() + << " - Prediction time: " << elapsed_time << "s" << std::endl; + + this->is_busy = false; } // ================================================================================================= -void Rpt2DWrapperNode::callbackModel() +std::vector>> Rpt2DWrapperNode::call_model(const cv::Mat &image) { - auto ptime = std::chrono::high_resolution_clock::now(); - - // Check if we have an image - cv::Mat local_image; - double local_timestamp = 0.0; - { - std::lock_guard lk(mutex); - if (last_input_time == 0.0) - { - return; - } - local_image = std::move(last_input_image); - local_timestamp = last_input_time; - - last_input_image = cv::Mat(); - last_input_time = 0.0; - } - // Create image vector - cv::Mat rgb_image = utils_pipeline::bayer2rgb(local_image); - std::vector images_2d; - images_2d.push_back(rgb_image); + cv::Mat rgb_image = utils_pipeline::bayer2rgb(image); + std::vector images_2d = {rgb_image}; // Predict 2D poses auto poses_2d_all = kpt_model->predict(images_2d); @@ -228,29 +212,7 @@ void Rpt2DWrapperNode::callbackModel() } } - // Build JSON - auto ts_pose = std::chrono::high_resolution_clock::now(); - double ts_pose_sec = std::chrono::duration(ts_pose.time_since_epoch()).count(); - double z_images_pose = ts_pose_sec - local_timestamp; - - json poses_msg; - poses_msg["bodies2D"] = valid_poses; // shape: persons x keypoints x 3 - poses_msg["joints"] = joint_names_2d; - poses_msg["num_persons"] = valid_poses.size(); - poses_msg["timestamps"] = { - {"image", local_timestamp}, - {"pose", ts_pose_sec}, - {"z-images-pose", z_images_pose}}; - - // Publish - publish(poses_msg); - - // Print info - double elapsed_time = std::chrono::duration( - std::chrono::high_resolution_clock::now() - ptime) - .count(); - std::cout << "Detected persons: " << valid_poses.size() - << " - Prediction time: " << elapsed_time << "s" << std::endl; + return valid_poses; } // ================================================================================================= From 9778c75bf0718937aee19201d1d15684c49a9cee Mon Sep 17 00:00:00 2001 From: Daniel Date: Tue, 21 Jan 2025 18:01:14 +0100 Subject: [PATCH 5/6] Drop old code. --- README.md | 2 +- extras/ros/docker-compose.yml | 1 - extras/ros/dockerfile | 2 - extras/ros/rpt2D_wrapper_py/package.xml | 18 - .../resource/rpt2D_wrapper_py | 0 .../rpt2D_wrapper_py/__init__.py | 0 .../rpt2D_wrapper_py/rpt2D_wrapper.py | 196 ------- extras/ros/rpt2D_wrapper_py/setup.cfg | 4 - extras/ros/rpt2D_wrapper_py/setup.py | 23 - .../rpt2D_wrapper_py/test/test_copyright.py | 27 - .../ros/rpt2D_wrapper_py/test/test_flake8.py | 25 - .../ros/rpt2D_wrapper_py/test/test_pep257.py | 23 - ...ataset_cpp.cpp => test_skelda_dataset.cpp} | 0 ..._dataset_cpp.py => test_skelda_dataset.py} | 0 scripts/test_skelda_dataset_py.py | 489 ------------------ 15 files changed, 1 insertion(+), 809 deletions(-) delete mode 100644 extras/ros/rpt2D_wrapper_py/package.xml delete mode 100644 extras/ros/rpt2D_wrapper_py/resource/rpt2D_wrapper_py delete mode 100644 extras/ros/rpt2D_wrapper_py/rpt2D_wrapper_py/__init__.py delete mode 100644 extras/ros/rpt2D_wrapper_py/rpt2D_wrapper_py/rpt2D_wrapper.py delete mode 100644 extras/ros/rpt2D_wrapper_py/setup.cfg delete mode 100644 extras/ros/rpt2D_wrapper_py/setup.py delete mode 100644 extras/ros/rpt2D_wrapper_py/test/test_copyright.py delete mode 100644 extras/ros/rpt2D_wrapper_py/test/test_flake8.py delete mode 100644 extras/ros/rpt2D_wrapper_py/test/test_pep257.py rename scripts/{test_skelda_dataset_cpp.cpp => test_skelda_dataset.cpp} (100%) rename scripts/{test_skelda_dataset_cpp.py => test_skelda_dataset.py} (100%) delete mode 100644 scripts/test_skelda_dataset_py.py diff --git a/README.md b/README.md index 169f5fb..0d4167b 100644 --- a/README.md +++ b/README.md @@ -39,7 +39,7 @@ Fast triangulation of multiple persons from multiple camera views. -isystem /onnxruntime/include/onnxruntime/core/session/ \ -isystem /onnxruntime/include/onnxruntime/core/providers/tensorrt/ \ -L /onnxruntime/build/Linux/Release/ \ - test_skelda_dataset_cpp.cpp \ + test_skelda_dataset.cpp \ /RapidPoseTriangulation/rpt/*.cpp \ -o test_skelda_dataset.bin \ -Wl,--start-group \ diff --git a/extras/ros/docker-compose.yml b/extras/ros/docker-compose.yml index 491c6b2..0e239d5 100644 --- a/extras/ros/docker-compose.yml +++ b/extras/ros/docker-compose.yml @@ -35,7 +35,6 @@ services: - DISPLAY - QT_X11_NO_MITSHM=1 - "PYTHONUNBUFFERED=1" - # command: /bin/bash -i -c 'export ROS_DOMAIN_ID=18 && ros2 run rpt2D_wrapper_py rpt2D_wrapper' command: /bin/bash -i -c 'export ROS_DOMAIN_ID=18 && ros2 run rpt2D_wrapper_cpp rpt2D_wrapper' pose_visualizer: diff --git a/extras/ros/dockerfile b/extras/ros/dockerfile index ec65f90..169e0a2 100644 --- a/extras/ros/dockerfile +++ b/extras/ros/dockerfile @@ -63,12 +63,10 @@ RUN pip3 install --no-cache-dir "setuptools<=58.2.0" COPY ./extras/include/ /RapidPoseTriangulation/extras/include/ COPY ./scripts/ /RapidPoseTriangulation/scripts/ COPY ./extras/ros/pose2D_visualizer/ /RapidPoseTriangulation/extras/ros/pose2D_visualizer/ -COPY ./extras/ros/rpt2D_wrapper_py/ /RapidPoseTriangulation/extras/ros/rpt2D_wrapper_py/ COPY ./extras/ros/rpt2D_wrapper_cpp/ /RapidPoseTriangulation/extras/ros/rpt2D_wrapper_cpp/ # Link and build as ros package RUN ln -s /RapidPoseTriangulation/extras/ros/pose2D_visualizer/ /project/dev_ws/src/pose2D_visualizer -RUN ln -s /RapidPoseTriangulation/extras/ros/rpt2D_wrapper_py/ /project/dev_ws/src/rpt2D_wrapper_py RUN ln -s /RapidPoseTriangulation/extras/ros/rpt2D_wrapper_cpp/ /project/dev_ws/src/rpt2D_wrapper_cpp RUN /bin/bash -i -c 'cd /project/dev_ws/; colcon build --symlink-install --cmake-args -DCMAKE_BUILD_TYPE=Release' diff --git a/extras/ros/rpt2D_wrapper_py/package.xml b/extras/ros/rpt2D_wrapper_py/package.xml deleted file mode 100644 index 178e790..0000000 --- a/extras/ros/rpt2D_wrapper_py/package.xml +++ /dev/null @@ -1,18 +0,0 @@ - - - - rpt2D_wrapper_py - 0.0.0 - TODO: Package description - root - TODO: License declaration - - ament_copyright - ament_flake8 - ament_pep257 - python3-pytest - - - ament_python - - diff --git a/extras/ros/rpt2D_wrapper_py/resource/rpt2D_wrapper_py b/extras/ros/rpt2D_wrapper_py/resource/rpt2D_wrapper_py deleted file mode 100644 index e69de29..0000000 diff --git a/extras/ros/rpt2D_wrapper_py/rpt2D_wrapper_py/__init__.py b/extras/ros/rpt2D_wrapper_py/rpt2D_wrapper_py/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/extras/ros/rpt2D_wrapper_py/rpt2D_wrapper_py/rpt2D_wrapper.py b/extras/ros/rpt2D_wrapper_py/rpt2D_wrapper_py/rpt2D_wrapper.py deleted file mode 100644 index dfd7cdb..0000000 --- a/extras/ros/rpt2D_wrapper_py/rpt2D_wrapper_py/rpt2D_wrapper.py +++ /dev/null @@ -1,196 +0,0 @@ -import json -import os -import sys -import threading -import time - -import numpy as np -import rclpy -from cv_bridge import CvBridge -from rclpy.qos import QoSHistoryPolicy, QoSProfile, QoSReliabilityPolicy -from sensor_msgs.msg import Image -from std_msgs.msg import String - -filepath = os.path.dirname(os.path.realpath(__file__)) + "/" -sys.path.append(filepath + "../../../scripts/") -import test_triangulate -import utils_2d_pose - -# ================================================================================================== - -bridge = CvBridge() -node = None -publisher_pose = None - -cam_id = "camera01" -img_input_topic = "/" + cam_id + "/pylon_ros2_camera_node/image_raw" -pose_out_topic = "/poses/" + cam_id - -last_input_image = None -last_input_time = 0.0 -kpt_model = None -joint_names_2d = test_triangulate.joint_names_2d - -lock = threading.Lock() -stop_flag = False - -# Model config -min_bbox_score = 0.4 -min_bbox_area = 0.1 * 0.1 -batch_poses = True - - -# ================================================================================================== - - -def callback_images(image_data): - global last_input_image, last_input_time, lock - - # Convert into cv images from image string - if image_data.encoding == "bayer_rggb8": - bayer_image = bridge.imgmsg_to_cv2(image_data, "bayer_rggb8") - elif image_data.encoding == "mono8": - bayer_image = bridge.imgmsg_to_cv2(image_data, "mono8") - elif image_data.encoding == "rgb8": - color_image = bridge.imgmsg_to_cv2(image_data, "rgb8") - bayer_image = test_triangulate.rgb2bayer(color_image) - else: - raise ValueError("Unknown image encoding:", image_data.encoding) - - time_stamp = image_data.header.stamp.sec + image_data.header.stamp.nanosec / 1.0e9 - - with lock: - last_input_image = bayer_image - last_input_time = time_stamp - - -# ================================================================================================== - - -def callback_model(): - global last_input_image, last_input_time, kpt_model, lock - - ptime = time.time() - if last_input_time == 0.0: - time.sleep(0.0001) - return - - # Collect inputs - images_2d = [] - timestamps = [] - with lock: - img = last_input_image - ts = last_input_time - images_2d.append(img) - timestamps.append(ts) - last_input_image = None - last_input_time = 0.0 - - # Predict 2D poses - images_2d = [test_triangulate.bayer2rgb(img) for img in images_2d] - poses_2d = utils_2d_pose.get_2d_pose(kpt_model, images_2d) - poses_2d = test_triangulate.update_keypoints(poses_2d, joint_names_2d) - poses_2d = poses_2d[0] - - # Drop persons with no joints - poses_2d = np.asarray(poses_2d) - mask = np.sum(poses_2d[..., 2], axis=1) > 0 - poses_2d = poses_2d[mask] - - # Round poses - poses2D = [np.array(p).round(3).tolist() for p in poses_2d] - - # Publish poses - ts_pose = time.time() - poses = { - "bodies2D": poses2D, - "joints": joint_names_2d, - "num_persons": len(poses2D), - "timestamps": { - "image": timestamps[0], - "pose": ts_pose, - "z-images-pose": ts_pose - timestamps[0], - }, - } - publish(poses) - - msg = "Detected persons: {} - Prediction time: {:.4f}s" - print(msg.format(poses["num_persons"], time.time() - ptime)) - - -# ================================================================================================== - - -def callback_wrapper(): - global stop_flag - while not stop_flag: - callback_model() - time.sleep(0.001) - - -# ================================================================================================== - - -def publish(data): - # Publish json data - msg = String() - msg.data = json.dumps(data) - publisher_pose.publish(msg) - - -# ================================================================================================== - - -def main(): - global node, publisher_pose, kpt_model, stop_flag - - # Start node - rclpy.init(args=sys.argv) - node = rclpy.create_node("rpt2D_wrapper") - - # Quality of service settings - qos_profile = QoSProfile( - reliability=QoSReliabilityPolicy.RELIABLE, - history=QoSHistoryPolicy.KEEP_LAST, - depth=1, - ) - - # Create subscribers - _ = node.create_subscription( - Image, - img_input_topic, - callback_images, - qos_profile, - ) - - # Create publishers - publisher_pose = node.create_publisher(String, pose_out_topic, qos_profile) - - # Load 2D pose model - whole_body = test_triangulate.whole_body - if any((whole_body[k] for k in whole_body)): - kpt_model = utils_2d_pose.load_wb_model( - min_bbox_score, min_bbox_area, batch_poses - ) - else: - kpt_model = utils_2d_pose.load_model(min_bbox_score, min_bbox_area, batch_poses) - - node.get_logger().info("Finished initialization of pose estimator") - - # Start prediction thread - p1 = threading.Thread(target=callback_wrapper) - p1.start() - - # Run ros update thread - rclpy.spin(node) - - stop_flag = True - p1.join() - node.destroy_node() - rclpy.shutdown() - - -# ================================================================================================== - -if __name__ == "__main__": - main() diff --git a/extras/ros/rpt2D_wrapper_py/setup.cfg b/extras/ros/rpt2D_wrapper_py/setup.cfg deleted file mode 100644 index 80d9b5d..0000000 --- a/extras/ros/rpt2D_wrapper_py/setup.cfg +++ /dev/null @@ -1,4 +0,0 @@ -[develop] -script_dir=$base/lib/rpt2D_wrapper_py -[install] -install_scripts=$base/lib/rpt2D_wrapper_py diff --git a/extras/ros/rpt2D_wrapper_py/setup.py b/extras/ros/rpt2D_wrapper_py/setup.py deleted file mode 100644 index cb32e53..0000000 --- a/extras/ros/rpt2D_wrapper_py/setup.py +++ /dev/null @@ -1,23 +0,0 @@ -from setuptools import setup - -package_name = "rpt2D_wrapper_py" - -setup( - name=package_name, - version="0.0.0", - packages=[package_name], - data_files=[ - ("share/ament_index/resource_index/packages", ["resource/" + package_name]), - ("share/" + package_name, ["package.xml"]), - ], - install_requires=["setuptools"], - zip_safe=True, - maintainer="root", - maintainer_email="root@todo.todo", - description="TODO: Package description", - license="TODO: License declaration", - tests_require=["pytest"], - entry_points={ - "console_scripts": ["rpt2D_wrapper = rpt2D_wrapper_py.rpt2D_wrapper:main"], - }, -) diff --git a/extras/ros/rpt2D_wrapper_py/test/test_copyright.py b/extras/ros/rpt2D_wrapper_py/test/test_copyright.py deleted file mode 100644 index 8f18fa4..0000000 --- a/extras/ros/rpt2D_wrapper_py/test/test_copyright.py +++ /dev/null @@ -1,27 +0,0 @@ -# Copyright 2015 Open Source Robotics Foundation, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import pytest -from ament_copyright.main import main - - -# Remove the `skip` decorator once the source file(s) have a copyright header -@pytest.mark.skip( - reason="No copyright header has been placed in the generated source file." -) -@pytest.mark.copyright -@pytest.mark.linter -def test_copyright(): - rc = main(argv=[".", "test"]) - assert rc == 0, "Found errors" diff --git a/extras/ros/rpt2D_wrapper_py/test/test_flake8.py b/extras/ros/rpt2D_wrapper_py/test/test_flake8.py deleted file mode 100644 index f494570..0000000 --- a/extras/ros/rpt2D_wrapper_py/test/test_flake8.py +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright 2017 Open Source Robotics Foundation, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import pytest -from ament_flake8.main import main_with_errors - - -@pytest.mark.flake8 -@pytest.mark.linter -def test_flake8(): - rc, errors = main_with_errors(argv=[]) - assert rc == 0, "Found %d code style errors / warnings:\n" % len( - errors - ) + "\n".join(errors) diff --git a/extras/ros/rpt2D_wrapper_py/test/test_pep257.py b/extras/ros/rpt2D_wrapper_py/test/test_pep257.py deleted file mode 100644 index 4eddb46..0000000 --- a/extras/ros/rpt2D_wrapper_py/test/test_pep257.py +++ /dev/null @@ -1,23 +0,0 @@ -# Copyright 2015 Open Source Robotics Foundation, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import pytest -from ament_pep257.main import main - - -@pytest.mark.linter -@pytest.mark.pep257 -def test_pep257(): - rc = main(argv=[".", "test"]) - assert rc == 0, "Found code style errors / warnings" diff --git a/scripts/test_skelda_dataset_cpp.cpp b/scripts/test_skelda_dataset.cpp similarity index 100% rename from scripts/test_skelda_dataset_cpp.cpp rename to scripts/test_skelda_dataset.cpp diff --git a/scripts/test_skelda_dataset_cpp.py b/scripts/test_skelda_dataset.py similarity index 100% rename from scripts/test_skelda_dataset_cpp.py rename to scripts/test_skelda_dataset.py diff --git a/scripts/test_skelda_dataset_py.py b/scripts/test_skelda_dataset_py.py deleted file mode 100644 index 501d1a2..0000000 --- a/scripts/test_skelda_dataset_py.py +++ /dev/null @@ -1,489 +0,0 @@ -import json -import os -import sys -import time - -import cv2 -import matplotlib -import numpy as np -import tqdm - -import utils_2d_pose -import utils_pipeline -from skelda import evals - -sys.path.append("/RapidPoseTriangulation/swig/") -import rpt - -# ================================================================================================== - -whole_body = { - "foots": False, - "face": False, - "hands": False, -} - -dataset_use = "human36m" -# dataset_use = "panoptic" -# dataset_use = "mvor" -# dataset_use = "shelf" -# dataset_use = "campus" -# dataset_use = "ikeaasm" -# dataset_use = "chi3d" -# dataset_use = "tsinghua" -# dataset_use = "human36m_wb" -# dataset_use = "egohumans_tagging" -# dataset_use = "egohumans_legoassemble" -# dataset_use = "egohumans_fencing" -# dataset_use = "egohumans_basketball" -# dataset_use = "egohumans_volleyball" -# dataset_use = "egohumans_badminton" -# dataset_use = "egohumans_tennis" -# dataset_use = "ntu" -# dataset_use = "koarob" - - -# Describes the minimum area as fraction of the image size for a 2D bounding box to be considered -# If the persons are small in the image, use a lower value -default_min_bbox_area = 0.1 * 0.1 - -# Describes how confident a 2D bounding box needs to be to be considered -# If the persons are small in the image, or poorly recognizable, use a lower value -default_min_bbox_score = 0.3 - -# Describes how good two 2D poses need to match each other to create a valid triangulation -# If the quality of the 2D detections is poor, use a lower value -default_min_match_score = 0.94 - -# Describes the minimum number of camera pairs that need to detect the same person -# If the number of cameras is high, and the views are not occluded, use a higher value -default_min_group_size = 1 - -# Batch poses per image for faster processing -# If most of the time only one person is in a image, disable it, because it is slightly slower then -default_batch_poses = True - -datasets = { - "human36m": { - "path": "/datasets/human36m/skelda/pose_test.json", - "take_interval": 5, - "min_match_score": 0.95, - "min_group_size": 1, - "min_bbox_score": 0.4, - "min_bbox_area": 0.1 * 0.1, - "batch_poses": False, - }, - "panoptic": { - "path": "/datasets/panoptic/skelda/test.json", - "cams": ["00_03", "00_06", "00_12", "00_13", "00_23"], - # "cams": ["00_03", "00_06", "00_12"], - # "cams": ["00_03", "00_06", "00_12", "00_13", "00_23", "00_15", "00_10", "00_21", "00_09", "00_01"], - "take_interval": 3, - "min_match_score": 0.95, - "use_scenes": ["160906_pizza1", "160422_haggling1", "160906_ian5"], - "min_group_size": 1, - # "min_group_size": 4, - "min_bbox_area": 0.05 * 0.05, - }, - "mvor": { - "path": "/datasets/mvor/skelda/all.json", - "take_interval": 1, - "with_depth": False, - "min_match_score": 0.85, - "min_bbox_score": 0.25, - }, - "campus": { - "path": "/datasets/campus/skelda/test.json", - "take_interval": 1, - "min_match_score": 0.90, - "min_bbox_score": 0.5, - }, - "shelf": { - "path": "/datasets/shelf/skelda/test.json", - "take_interval": 1, - "min_match_score": 0.96, - "min_group_size": 2, - }, - "ikeaasm": { - "path": "/datasets/ikeaasm/skelda/test.json", - "take_interval": 2, - "min_match_score": 0.92, - "min_bbox_score": 0.20, - }, - "chi3d": { - "path": "/datasets/chi3d/skelda/all.json", - "take_interval": 5, - }, - "tsinghua": { - "path": "/datasets/tsinghua/skelda/test.json", - "take_interval": 3, - "min_match_score": 0.95, - "min_group_size": 2, - }, - "human36m_wb": { - "path": "/datasets/human36m/skelda/wb/test.json", - "take_interval": 100, - "min_bbox_score": 0.4, - "batch_poses": False, - }, - "egohumans_tagging": { - "path": "/datasets/egohumans/skelda/all.json", - "take_interval": 2, - "subset": "tagging", - "min_group_size": 2, - "min_bbox_score": 0.2, - "min_bbox_area": 0.05 * 0.05, - }, - "egohumans_legoassemble": { - "path": "/datasets/egohumans/skelda/all.json", - "take_interval": 2, - "subset": "legoassemble", - "min_group_size": 2, - }, - "egohumans_fencing": { - "path": "/datasets/egohumans/skelda/all.json", - "take_interval": 2, - "subset": "fencing", - "min_group_size": 7, - "min_bbox_score": 0.5, - "min_bbox_area": 0.05 * 0.05, - }, - "egohumans_basketball": { - "path": "/datasets/egohumans/skelda/all.json", - "take_interval": 2, - "subset": "basketball", - "min_group_size": 7, - "min_bbox_score": 0.25, - "min_bbox_area": 0.025 * 0.025, - }, - "egohumans_volleyball": { - "path": "/datasets/egohumans/skelda/all.json", - "take_interval": 2, - "subset": "volleyball", - "min_group_size": 11, - "min_bbox_score": 0.25, - "min_bbox_area": 0.05 * 0.05, - }, - "egohumans_badminton": { - "path": "/datasets/egohumans/skelda/all.json", - "take_interval": 2, - "subset": "badminton", - "min_group_size": 7, - "min_bbox_score": 0.25, - "min_bbox_area": 0.05 * 0.05, - }, - "egohumans_tennis": { - "path": "/datasets/egohumans/skelda/all.json", - "take_interval": 2, - "subset": "tennis", - "min_group_size": 11, - "min_bbox_area": 0.025 * 0.025, - }, -} - -joint_names_2d = utils_pipeline.get_joint_names(whole_body) -joint_names_3d = list(joint_names_2d) -eval_joints = [ - "head", - "shoulder_left", - "shoulder_right", - "elbow_left", - "elbow_right", - "wrist_left", - "wrist_right", - "hip_left", - "hip_right", - "knee_left", - "knee_right", - "ankle_left", - "ankle_right", -] -if dataset_use == "human36m": - eval_joints[eval_joints.index("head")] = "nose" -if dataset_use == "panoptic": - eval_joints[eval_joints.index("head")] = "nose" -if dataset_use == "human36m_wb": - if utils_pipeline.use_whole_body(whole_body): - eval_joints = list(joint_names_2d) - else: - eval_joints[eval_joints.index("head")] = "nose" - -# output_dir = "/RapidPoseTriangulation/data/testoutput/" -output_dir = "" - -# ================================================================================================== - - -def load_json(path: str): - with open(path, "r", encoding="utf-8") as file: - data = json.load(file) - return data - - -# ================================================================================================== - - -def load_labels(dataset: dict): - """Load labels by dataset description""" - - if "panoptic" in dataset: - labels = load_json(dataset["panoptic"]["path"]) - labels = [lb for i, lb in enumerate(labels) if i % 1500 < 90] - - # Filter by maximum number of persons - labels = [l for l in labels if len(l["bodies3D"]) <= 10] - - # Filter scenes - if "use_scenes" in dataset["panoptic"]: - labels = [ - l for l in labels if l["scene"] in dataset["panoptic"]["use_scenes"] - ] - - # Filter cameras - if not "cameras_depth" in labels[0]: - for label in labels: - for i, cam in reversed(list(enumerate(label["cameras"]))): - if cam["name"] not in dataset["panoptic"]["cams"]: - label["cameras"].pop(i) - label["imgpaths"].pop(i) - - elif "human36m" in dataset: - labels = load_json(dataset["human36m"]["path"]) - labels = [lb for lb in labels if lb["subject"] == "S9"] - labels = [lb for i, lb in enumerate(labels) if i % 4000 < 150] - - for label in labels: - label.pop("action") - label.pop("frame") - - elif "mvor" in dataset: - labels = load_json(dataset["mvor"]["path"]) - - # Rename keys - for label in labels: - label["cameras_color"] = label["cameras"] - label["imgpaths_color"] = label["imgpaths"] - - elif "ikeaasm" in dataset: - labels = load_json(dataset["ikeaasm"]["path"]) - cams0 = str(labels[0]["cameras"]) - labels = [lb for lb in labels if str(lb["cameras"]) == cams0] - - elif "shelf" in dataset: - labels = load_json(dataset["shelf"]["path"]) - labels = [lb for lb in labels if "test" in lb["splits"]] - - elif "campus" in dataset: - labels = load_json(dataset["campus"]["path"]) - labels = [lb for lb in labels if "test" in lb["splits"]] - - elif "tsinghua" in dataset: - labels = load_json(dataset["tsinghua"]["path"]) - labels = [lb for lb in labels if "test" in lb["splits"]] - labels = [lb for lb in labels if lb["seq"] == "seq_1"] - labels = [lb for i, lb in enumerate(labels) if i % 300 < 90] - - for label in labels: - label["bodyids"] = list(range(len(label["bodies3D"]))) - - elif "chi3d" in dataset: - labels = load_json(dataset["chi3d"]["path"]) - labels = [lb for lb in labels if lb["setup"] == "s03"] - labels = [lb for i, lb in enumerate(labels) if i % 2000 < 150] - - elif "human36m_wb" in dataset: - labels = load_json(dataset["human36m_wb"]["path"]) - - elif any(("egohumans" in key for key in dataset)): - labels = load_json(dataset[dataset_use]["path"]) - labels = [lb for lb in labels if "test" in lb["splits"]] - labels = [lb for lb in labels if dataset[dataset_use]["subset"] in lb["seq"]] - if dataset[dataset_use]["subset"] in ["volleyball", "tennis"]: - labels = [lb for i, lb in enumerate(labels) if i % 150 < 60] - - else: - raise ValueError("Dataset not available") - - # Optionally drop samples to speed up train/eval - if "take_interval" in dataset: - take_interval = dataset["take_interval"] - if take_interval > 1: - labels = [l for i, l in enumerate(labels) if i % take_interval == 0] - - return labels - - -# ================================================================================================== - - -def main(): - global joint_names_3d, eval_joints - - # Load dataset specific parameters - min_match_score = datasets[dataset_use].get( - "min_match_score", default_min_match_score - ) - min_group_size = datasets[dataset_use].get("min_group_size", default_min_group_size) - min_bbox_score = datasets[dataset_use].get("min_bbox_score", default_min_bbox_score) - min_bbox_area = datasets[dataset_use].get("min_bbox_area", default_min_bbox_area) - batch_poses = datasets[dataset_use].get("batch_poses", default_batch_poses) - - # Load 2D pose model - if utils_pipeline.use_whole_body(whole_body): - kpt_model = utils_2d_pose.load_wb_model( - min_bbox_score, min_bbox_area, batch_poses - ) - else: - kpt_model = utils_2d_pose.load_model(min_bbox_score, min_bbox_area, batch_poses) - - # Manually set matplotlib backend - try: - matplotlib.use("TkAgg") - except ImportError: - print("WARNING: Using headless mode, no visualizations will be shown.") - - print("Loading dataset ...") - labels = load_labels( - { - dataset_use: datasets[dataset_use], - "take_interval": datasets[dataset_use]["take_interval"], - } - ) - - # Print a dataset sample for debugging - print(labels[0]) - - print("\nPrefetching images ...") - for label in tqdm.tqdm(labels): - # If the images are stored on a HDD, it sometimes takes a while to load them - # Prefetching them results in more stable timings of the following steps - # To prevent memory overflow, the code only loads the images, but does not store them - try: - for i in range(len(label["imgpaths"])): - imgpath = label["imgpaths"][i] - img = utils_pipeline.load_image(imgpath) - except cv2.error: - print("One of the paths not found:", label["imgpaths"]) - continue - time.sleep(3) - - print("\nCalculating 2D predictions ...") - all_poses_2d = [] - times = [] - for label in tqdm.tqdm(labels): - images_2d = [] - - start = time.time() - try: - for i in range(len(label["imgpaths"])): - imgpath = label["imgpaths"][i] - img = utils_pipeline.load_image(imgpath) - images_2d.append(img) - except cv2.error: - print("One of the paths not found:", label["imgpaths"]) - continue - - if dataset_use == "human36m": - for i in range(len(images_2d)): - # Since the images don't have the same shape, rescale some of them - img = images_2d[i] - ishape = img.shape - if ishape != (1000, 1000, 3): - cam = label["cameras"][i] - cam["K"][1][1] = cam["K"][1][1] * (1000 / ishape[0]) - cam["K"][1][2] = cam["K"][1][2] * (1000 / ishape[0]) - cam["K"][0][0] = cam["K"][0][0] * (1000 / ishape[1]) - cam["K"][0][2] = cam["K"][0][2] * (1000 / ishape[1]) - images_2d[i] = cv2.resize(img, (1000, 1000)) - - # Convert image format to Bayer encoding to simulate real camera input - # This also resulted in notably better MPJPE results in most cases, presumbly since the - # demosaicing algorithm from OpenCV is better than the default one from the cameras - for i in range(len(images_2d)): - images_2d[i] = utils_pipeline.rgb2bayer(images_2d[i]) - time_imgs = time.time() - start - - start = time.time() - for i in range(len(images_2d)): - images_2d[i] = utils_pipeline.bayer2rgb(images_2d[i]) - poses_2d = utils_2d_pose.get_2d_pose(kpt_model, images_2d) - poses_2d = utils_pipeline.update_keypoints(poses_2d, joint_names_2d, whole_body) - time_2d = time.time() - start - - all_poses_2d.append(poses_2d) - times.append([time_imgs, time_2d, 0]) - - print("\nCalculating 3D predictions ...") - all_poses_3d = [] - all_ids = [] - triangulator = rpt.Triangulator( - min_match_score=min_match_score, min_group_size=min_group_size - ) - old_scene = "" - old_index = -1 - for i in tqdm.tqdm(range(len(labels))): - label = labels[i] - poses_2d = all_poses_2d[i] - - if old_scene != label.get("scene", "") or ( - old_index + datasets[dataset_use]["take_interval"] < label["index"] - ): - # Reset last poses if scene changes - old_scene = label.get("scene", "") - triangulator.reset() - - start = time.time() - if sum(np.sum(p) for p in poses_2d) == 0: - poses3D = np.zeros([1, len(joint_names_3d), 4]).tolist() - else: - rpt_cameras = rpt.convert_cameras(label["cameras"]) - roomparams = [label["room_size"], label["room_center"]] - poses3D = triangulator.triangulate_poses( - poses_2d, rpt_cameras, roomparams, joint_names_2d - ) - time_3d = time.time() - start - - old_index = label["index"] - all_poses_3d.append(np.array(poses3D).tolist()) - all_ids.append(label["id"]) - times[i][2] = time_3d - - # Print per-step timings - warmup_iters = 10 - if len(times) > warmup_iters: - times = times[warmup_iters:] - avg_time_im = np.mean([t[0] for t in times]) - avg_time_2d = np.mean([t[1] for t in times]) - avg_time_3d = np.mean([t[2] for t in times]) - tstats = { - "img_loading": avg_time_im, - "avg_time_2d": avg_time_2d, - "avg_time_3d": avg_time_3d, - "avg_fps": 1.0 / (avg_time_2d + avg_time_3d), - } - print("\nMetrics:") - print(json.dumps(tstats, indent=2)) - triangulator.print_stats() - - _ = evals.mpjpe.run_eval( - labels, - all_poses_3d, - all_ids, - joint_names_net=joint_names_3d, - joint_names_use=eval_joints, - save_error_imgs=output_dir, - ) - _ = evals.pcp.run_eval( - labels, - all_poses_3d, - all_ids, - joint_names_net=joint_names_3d, - joint_names_use=eval_joints, - replace_head_with_nose=True, - ) - - -# ================================================================================================== - -if __name__ == "__main__": - main() From 9ce51f90b3c58b304ff047f7aab2bf0a59ab446e Mon Sep 17 00:00:00 2001 From: Daniel Date: Tue, 21 Jan 2025 18:11:49 +0100 Subject: [PATCH 6/6] Renamed ros packages. --- extras/ros/docker-compose.yml | 4 ++-- extras/ros/dockerfile | 8 ++++---- extras/ros/pose2D_visualizer/setup.cfg | 4 ---- .../package.xml | 2 +- .../pose2d_visualizer}/__init__.py | 0 .../pose2d_visualizer/pose2d_visualizer.py} | 2 +- .../resource/pose2d_visualizer} | 0 extras/ros/pose2d_visualizer/setup.cfg | 4 ++++ .../setup.py | 4 ++-- .../test/test_copyright.py | 0 .../test/test_flake8.py | 0 .../test/test_pep257.py | 0 .../CMakeLists.txt | 14 +++++++------- .../package.xml | 2 +- .../src/rpt2d_wrapper.cpp} | 2 +- 15 files changed, 23 insertions(+), 23 deletions(-) delete mode 100644 extras/ros/pose2D_visualizer/setup.cfg rename extras/ros/{pose2D_visualizer => pose2d_visualizer}/package.xml (94%) rename extras/ros/{pose2D_visualizer/pose2D_visualizer => pose2d_visualizer/pose2d_visualizer}/__init__.py (100%) rename extras/ros/{pose2D_visualizer/pose2D_visualizer/pose2D_visualizer.py => pose2d_visualizer/pose2d_visualizer/pose2d_visualizer.py} (98%) rename extras/ros/{pose2D_visualizer/resource/pose2D_visualizer => pose2d_visualizer/resource/pose2d_visualizer} (100%) create mode 100644 extras/ros/pose2d_visualizer/setup.cfg rename extras/ros/{pose2D_visualizer => pose2d_visualizer}/setup.py (80%) rename extras/ros/{pose2D_visualizer => pose2d_visualizer}/test/test_copyright.py (100%) rename extras/ros/{pose2D_visualizer => pose2d_visualizer}/test/test_flake8.py (100%) rename extras/ros/{pose2D_visualizer => pose2d_visualizer}/test/test_pep257.py (100%) rename extras/ros/{rpt2D_wrapper_cpp => rpt2d_wrapper_cpp}/CMakeLists.txt (84%) rename extras/ros/{rpt2D_wrapper_cpp => rpt2d_wrapper_cpp}/package.xml (95%) rename extras/ros/{rpt2D_wrapper_cpp/src/rpt2D_wrapper.cpp => rpt2d_wrapper_cpp/src/rpt2d_wrapper.cpp} (99%) diff --git a/extras/ros/docker-compose.yml b/extras/ros/docker-compose.yml index 0e239d5..f0e9ca9 100644 --- a/extras/ros/docker-compose.yml +++ b/extras/ros/docker-compose.yml @@ -35,7 +35,7 @@ services: - DISPLAY - QT_X11_NO_MITSHM=1 - "PYTHONUNBUFFERED=1" - command: /bin/bash -i -c 'export ROS_DOMAIN_ID=18 && ros2 run rpt2D_wrapper_cpp rpt2D_wrapper' + command: /bin/bash -i -c 'export ROS_DOMAIN_ID=18 && ros2 run rpt2d_wrapper_cpp rpt2d_wrapper' pose_visualizer: image: rapidposetriangulation_ros @@ -52,7 +52,7 @@ services: - DISPLAY - QT_X11_NO_MITSHM=1 - "PYTHONUNBUFFERED=1" - command: /bin/bash -i -c 'sleep 2 && export ROS_DOMAIN_ID=18 && ros2 run pose2D_visualizer pose2D_visualizer' + command: /bin/bash -i -c 'sleep 2 && export ROS_DOMAIN_ID=18 && ros2 run pose2d_visualizer pose2d_visualizer' pose_viewer: image: rapidposetriangulation_ros diff --git a/extras/ros/dockerfile b/extras/ros/dockerfile index 169e0a2..8420926 100644 --- a/extras/ros/dockerfile +++ b/extras/ros/dockerfile @@ -62,12 +62,12 @@ RUN pip3 install --no-cache-dir "setuptools<=58.2.0" # Copy modules COPY ./extras/include/ /RapidPoseTriangulation/extras/include/ COPY ./scripts/ /RapidPoseTriangulation/scripts/ -COPY ./extras/ros/pose2D_visualizer/ /RapidPoseTriangulation/extras/ros/pose2D_visualizer/ -COPY ./extras/ros/rpt2D_wrapper_cpp/ /RapidPoseTriangulation/extras/ros/rpt2D_wrapper_cpp/ +COPY ./extras/ros/pose2d_visualizer/ /RapidPoseTriangulation/extras/ros/pose2d_visualizer/ +COPY ./extras/ros/rpt2d_wrapper_cpp/ /RapidPoseTriangulation/extras/ros/rpt2d_wrapper_cpp/ # Link and build as ros package -RUN ln -s /RapidPoseTriangulation/extras/ros/pose2D_visualizer/ /project/dev_ws/src/pose2D_visualizer -RUN ln -s /RapidPoseTriangulation/extras/ros/rpt2D_wrapper_cpp/ /project/dev_ws/src/rpt2D_wrapper_cpp +RUN ln -s /RapidPoseTriangulation/extras/ros/pose2d_visualizer/ /project/dev_ws/src/pose2d_visualizer +RUN ln -s /RapidPoseTriangulation/extras/ros/rpt2d_wrapper_cpp/ /project/dev_ws/src/rpt2d_wrapper_cpp RUN /bin/bash -i -c 'cd /project/dev_ws/; colcon build --symlink-install --cmake-args -DCMAKE_BUILD_TYPE=Release' # Update ros packages -> autocompletion and check diff --git a/extras/ros/pose2D_visualizer/setup.cfg b/extras/ros/pose2D_visualizer/setup.cfg deleted file mode 100644 index 924f0ef..0000000 --- a/extras/ros/pose2D_visualizer/setup.cfg +++ /dev/null @@ -1,4 +0,0 @@ -[develop] -script_dir=$base/lib/pose2D_visualizer -[install] -install_scripts=$base/lib/pose2D_visualizer diff --git a/extras/ros/pose2D_visualizer/package.xml b/extras/ros/pose2d_visualizer/package.xml similarity index 94% rename from extras/ros/pose2D_visualizer/package.xml rename to extras/ros/pose2d_visualizer/package.xml index c4a0bb2..5334080 100644 --- a/extras/ros/pose2D_visualizer/package.xml +++ b/extras/ros/pose2d_visualizer/package.xml @@ -1,7 +1,7 @@ - pose2D_visualizer + pose2d_visualizer 0.0.0 TODO: Package description root diff --git a/extras/ros/pose2D_visualizer/pose2D_visualizer/__init__.py b/extras/ros/pose2d_visualizer/pose2d_visualizer/__init__.py similarity index 100% rename from extras/ros/pose2D_visualizer/pose2D_visualizer/__init__.py rename to extras/ros/pose2d_visualizer/pose2d_visualizer/__init__.py diff --git a/extras/ros/pose2D_visualizer/pose2D_visualizer/pose2D_visualizer.py b/extras/ros/pose2d_visualizer/pose2d_visualizer/pose2d_visualizer.py similarity index 98% rename from extras/ros/pose2D_visualizer/pose2D_visualizer/pose2D_visualizer.py rename to extras/ros/pose2d_visualizer/pose2d_visualizer/pose2d_visualizer.py index 98b65f5..67e2ec1 100644 --- a/extras/ros/pose2D_visualizer/pose2D_visualizer/pose2D_visualizer.py +++ b/extras/ros/pose2d_visualizer/pose2d_visualizer/pose2d_visualizer.py @@ -111,7 +111,7 @@ def main(): # Start node rclpy.init(args=sys.argv) - node = rclpy.create_node("pose2D_visualizer") + node = rclpy.create_node("pose2d_visualizer") # Quality of service settings qos_profile = QoSProfile( diff --git a/extras/ros/pose2D_visualizer/resource/pose2D_visualizer b/extras/ros/pose2d_visualizer/resource/pose2d_visualizer similarity index 100% rename from extras/ros/pose2D_visualizer/resource/pose2D_visualizer rename to extras/ros/pose2d_visualizer/resource/pose2d_visualizer diff --git a/extras/ros/pose2d_visualizer/setup.cfg b/extras/ros/pose2d_visualizer/setup.cfg new file mode 100644 index 0000000..5522611 --- /dev/null +++ b/extras/ros/pose2d_visualizer/setup.cfg @@ -0,0 +1,4 @@ +[develop] +script_dir=$base/lib/pose2d_visualizer +[install] +install_scripts=$base/lib/pose2d_visualizer diff --git a/extras/ros/pose2D_visualizer/setup.py b/extras/ros/pose2d_visualizer/setup.py similarity index 80% rename from extras/ros/pose2D_visualizer/setup.py rename to extras/ros/pose2d_visualizer/setup.py index 87ca089..ea0f365 100644 --- a/extras/ros/pose2D_visualizer/setup.py +++ b/extras/ros/pose2d_visualizer/setup.py @@ -1,6 +1,6 @@ from setuptools import setup -package_name = "pose2D_visualizer" +package_name = "pose2d_visualizer" setup( name=package_name, @@ -18,6 +18,6 @@ setup( license="TODO: License declaration", tests_require=["pytest"], entry_points={ - "console_scripts": ["pose2D_visualizer = pose2D_visualizer.pose2D_visualizer:main"], + "console_scripts": ["pose2d_visualizer = pose2d_visualizer.pose2d_visualizer:main"], }, ) diff --git a/extras/ros/pose2D_visualizer/test/test_copyright.py b/extras/ros/pose2d_visualizer/test/test_copyright.py similarity index 100% rename from extras/ros/pose2D_visualizer/test/test_copyright.py rename to extras/ros/pose2d_visualizer/test/test_copyright.py diff --git a/extras/ros/pose2D_visualizer/test/test_flake8.py b/extras/ros/pose2d_visualizer/test/test_flake8.py similarity index 100% rename from extras/ros/pose2D_visualizer/test/test_flake8.py rename to extras/ros/pose2d_visualizer/test/test_flake8.py diff --git a/extras/ros/pose2D_visualizer/test/test_pep257.py b/extras/ros/pose2d_visualizer/test/test_pep257.py similarity index 100% rename from extras/ros/pose2D_visualizer/test/test_pep257.py rename to extras/ros/pose2d_visualizer/test/test_pep257.py diff --git a/extras/ros/rpt2D_wrapper_cpp/CMakeLists.txt b/extras/ros/rpt2d_wrapper_cpp/CMakeLists.txt similarity index 84% rename from extras/ros/rpt2D_wrapper_cpp/CMakeLists.txt rename to extras/ros/rpt2d_wrapper_cpp/CMakeLists.txt index 987bcec..20514a8 100644 --- a/extras/ros/rpt2D_wrapper_cpp/CMakeLists.txt +++ b/extras/ros/rpt2d_wrapper_cpp/CMakeLists.txt @@ -1,5 +1,5 @@ cmake_minimum_required(VERSION 3.5) -project(rpt2D_wrapper_cpp) +project(rpt2d_wrapper_cpp) # Default to C99 if(NOT CMAKE_C_STANDARD) @@ -29,13 +29,13 @@ include_directories(/onnxruntime/include/ /onnxruntime/include/onnxruntime/core/providers/tensorrt/) link_directories(/onnxruntime/build/Linux/Release/) -add_executable(rpt2D_wrapper src/rpt2D_wrapper.cpp) -ament_target_dependencies(rpt2D_wrapper rclcpp std_msgs sensor_msgs cv_bridge) -target_include_directories(rpt2D_wrapper PUBLIC +add_executable(rpt2d_wrapper src/rpt2d_wrapper.cpp) +ament_target_dependencies(rpt2d_wrapper rclcpp std_msgs sensor_msgs cv_bridge) +target_include_directories(rpt2d_wrapper PUBLIC $ $) -target_link_libraries(rpt2D_wrapper +target_link_libraries(rpt2d_wrapper ${OpenCV_LIBS} onnxruntime_providers_tensorrt onnxruntime_providers_shared @@ -43,12 +43,12 @@ target_link_libraries(rpt2D_wrapper onnxruntime ) -set_target_properties(rpt2D_wrapper PROPERTIES +set_target_properties(rpt2d_wrapper PROPERTIES BUILD_WITH_INSTALL_RPATH TRUE INSTALL_RPATH "/onnxruntime/build/Linux/Release" ) -install(TARGETS rpt2D_wrapper +install(TARGETS rpt2d_wrapper DESTINATION lib/${PROJECT_NAME}) if(BUILD_TESTING) diff --git a/extras/ros/rpt2D_wrapper_cpp/package.xml b/extras/ros/rpt2d_wrapper_cpp/package.xml similarity index 95% rename from extras/ros/rpt2D_wrapper_cpp/package.xml rename to extras/ros/rpt2d_wrapper_cpp/package.xml index c818ba0..53e24cc 100644 --- a/extras/ros/rpt2D_wrapper_cpp/package.xml +++ b/extras/ros/rpt2d_wrapper_cpp/package.xml @@ -1,7 +1,7 @@ - rpt2D_wrapper_cpp + rpt2d_wrapper_cpp 0.0.0 TODO: Package description root diff --git a/extras/ros/rpt2D_wrapper_cpp/src/rpt2D_wrapper.cpp b/extras/ros/rpt2d_wrapper_cpp/src/rpt2d_wrapper.cpp similarity index 99% rename from extras/ros/rpt2D_wrapper_cpp/src/rpt2D_wrapper.cpp rename to extras/ros/rpt2d_wrapper_cpp/src/rpt2d_wrapper.cpp index cc5b627..a5ecfce 100644 --- a/extras/ros/rpt2D_wrapper_cpp/src/rpt2D_wrapper.cpp +++ b/extras/ros/rpt2d_wrapper_cpp/src/rpt2d_wrapper.cpp @@ -222,7 +222,7 @@ int main(int argc, char **argv) { rclcpp::init(argc, argv); - auto node = std::make_shared("rpt2D_wrapper"); + auto node = std::make_shared("rpt2d_wrapper"); rclcpp::spin(node); rclcpp::shutdown();