#include #include #include #include #include #include #include // ROS2 #include #include #include // OpenCV / cv_bridge #include #include // JSON library #include "/RapidPoseTriangulation/extras/include/nlohmann/json.hpp" using json = nlohmann::json; #include "/RapidPoseTriangulation/scripts/utils_2d_pose.hpp" #include "/RapidPoseTriangulation/scripts/utils_pipeline.hpp" // ================================================================================================= static const std::string cam_id = "camera01"; static const std::string img_input_topic = "/" + cam_id + "/pylon_ros2_camera_node/image_raw"; static const std::string pose_out_topic = "/poses/" + cam_id; static const float min_bbox_score = 0.4; static const float min_bbox_area = 0.1 * 0.1; static const bool batch_poses = true; static const std::map whole_body = { {"foots", true}, {"face", true}, {"hands", true}, }; // ================================================================================================= // ================================================================================================= class Rpt2DWrapperNode : public rclcpp::Node { public: Rpt2DWrapperNode(const std::string &node_name) : Node(node_name) { this->is_busy = false; // QoS rclcpp::QoS qos_profile(1); qos_profile.reliable(); qos_profile.keep_last(1); // Setup subscriber image_sub_ = this->create_subscription( img_input_topic, qos_profile, std::bind(&Rpt2DWrapperNode::callback_images, this, std::placeholders::_1)); // Setup publisher pose_pub_ = this->create_publisher(pose_out_topic, qos_profile); // Load model bool use_wb = utils_pipeline::use_whole_body(whole_body); this->kpt_model = std::make_unique( use_wb, min_bbox_score, min_bbox_area, batch_poses); RCLCPP_INFO(this->get_logger(), "Finished initialization of pose estimator."); } private: rclcpp::Subscription::SharedPtr image_sub_; rclcpp::Publisher::SharedPtr pose_pub_; std::atomic is_busy; // Pose model pointer std::unique_ptr kpt_model; const std::vector joint_names_2d = utils_pipeline::get_joint_names(whole_body); void callback_images(const sensor_msgs::msg::Image::SharedPtr msg); std::vector>> call_model(const cv::Mat &image); void publish(const json &data) { std_msgs::msg::String msg; msg.data = data.dump(); pose_pub_->publish(msg); } }; // ================================================================================================= void Rpt2DWrapperNode::callback_images(const sensor_msgs::msg::Image::SharedPtr msg) { if (this->is_busy) { RCLCPP_WARN(this->get_logger(), "Skipping frame, still processing..."); return; } this->is_busy = true; auto ts_image = std::chrono::high_resolution_clock::now(); // Load or convert image to Bayer format cv::Mat bayer_image; try { if (msg->encoding == "mono8") { cv_bridge::CvImageConstPtr cv_ptr = cv_bridge::toCvShare(msg, msg->encoding); bayer_image = cv_ptr->image; } else if (msg->encoding == "bayer_rggb8") { cv_bridge::CvImageConstPtr cv_ptr = cv_bridge::toCvShare(msg, msg->encoding); bayer_image = cv_ptr->image; } else if (msg->encoding == "rgb8") { cv_bridge::CvImageConstPtr cv_ptr = cv_bridge::toCvShare(msg, "rgb8"); cv::Mat color_image = cv_ptr->image; bayer_image = utils_pipeline::rgb2bayer(color_image); } else { throw std::runtime_error("Unknown image encoding: " + msg->encoding); } } catch (const std::exception &e) { RCLCPP_ERROR(this->get_logger(), "cv_bridge exception: %s", e.what()); return; } // Call model const auto &valid_poses = this->call_model(bayer_image); // Calculate timings double time_stamp = msg->header.stamp.sec + msg->header.stamp.nanosec / 1.0e9; auto ts_image_sec = std::chrono::duration(ts_image.time_since_epoch()).count(); auto ts_pose = std::chrono::high_resolution_clock::now(); double ts_pose_sec = std::chrono::duration(ts_pose.time_since_epoch()).count(); double z_trigger_image = ts_image_sec - time_stamp; double z_trigger_pose = ts_pose_sec - time_stamp; double z_image_pose = ts_pose_sec - ts_image_sec; // Build JSON json poses_msg; poses_msg["bodies2D"] = valid_poses; // shape: persons x keypoints x 3 poses_msg["joints"] = joint_names_2d; poses_msg["num_persons"] = valid_poses.size(); poses_msg["timestamps"] = { {"trigger", time_stamp}, {"image", ts_image_sec}, {"pose", ts_pose_sec}, {"z-trigger-image", z_trigger_image}, {"z-image-pose", z_image_pose}, {"z-trigger-pose", z_trigger_pose}}; // Publish publish(poses_msg); // Print info double elapsed_time = std::chrono::duration( std::chrono::high_resolution_clock::now() - ts_image) .count(); std::cout << "Detected persons: " << valid_poses.size() << " - Prediction time: " << elapsed_time << "s" << std::endl; this->is_busy = false; } // ================================================================================================= std::vector>> Rpt2DWrapperNode::call_model(const cv::Mat &image) { // Create image vector cv::Mat rgb_image = utils_pipeline::bayer2rgb(image); std::vector images_2d = {rgb_image}; // Predict 2D poses auto poses_2d_all = kpt_model->predict(images_2d); auto poses_2d_upd = utils_pipeline::update_keypoints( poses_2d_all, joint_names_2d, whole_body); auto &poses_2d = poses_2d_upd[0]; // Drop persons with no joints std::vector>> valid_poses; for (auto &person : poses_2d) { float sum_conf = 0.0; for (auto &kp : person) { sum_conf += kp[2]; } if (sum_conf > 0.0) { valid_poses.push_back(person); } } // Round poses to 3 decimal places for (auto &person : valid_poses) { for (auto &kp : person) { kp[0] = std::round(kp[0] * 1000.0) / 1000.0; kp[1] = std::round(kp[1] * 1000.0) / 1000.0; kp[2] = std::round(kp[2] * 1000.0) / 1000.0; } } return valid_poses; } // ================================================================================================= // ================================================================================================= int main(int argc, char **argv) { rclcpp::init(argc, argv); auto node = std::make_shared("rpt2D_wrapper"); rclcpp::spin(node); rclcpp::shutdown(); return 0; }