feat: initialize offline multiview pose tracking experiment
Set up pose_tracking_exp as a uv-managed Python package for offline multiview body tracking experiments. This initial commit includes: - the typed package scaffold, CLI entrypoints, and repo-local uv configuration - scene and replay loaders for generic JSON replays and ActualTest parquet inputs - ParaJumping payload conversion and RTMPose-to-body20 normalization - a custom articulated tracker with tentative, active, and lost lifecycle handling - RPT-backed proposal generation, camera convention handling, and multiview reprojection updates - regression tests for normalization, camera conventions, ActualTest ingestion, seeding, and tracker smoke flows - project documentation covering extrinsic formats and the ActualTest calibration caveat
This commit is contained in:
@@ -0,0 +1,67 @@
|
||||
import json
|
||||
from pathlib import Path
|
||||
|
||||
import numpy as np
|
||||
import pytest
|
||||
|
||||
pytest.importorskip("rpt")
|
||||
|
||||
from pose_tracking_exp.models import CameraFrame, FrameBundle, PoseDetection, TrackerConfig
|
||||
from pose_tracking_exp.replay import load_scene_file
|
||||
from pose_tracking_exp.tracker import PoseTracker
|
||||
|
||||
RPT_ROOT = Path("/home/crosstyan/Code/RapidPoseTriangulation")
|
||||
|
||||
|
||||
def test_tracker_promotes_rpt_sample_person():
|
||||
scene = load_scene_file(RPT_ROOT / "data/p1/sample.json")
|
||||
pose_payload = json.loads((RPT_ROOT / "tests/poses_p1.json").read_text(encoding="utf-8"))
|
||||
view_poses = pose_payload["2D"]
|
||||
|
||||
tracker = PoseTracker(
|
||||
scene,
|
||||
TrackerConfig(
|
||||
tentative_min_age=2,
|
||||
tentative_hits_required=2,
|
||||
tentative_promote_score=1.2,
|
||||
proposal_min_score=0.5,
|
||||
),
|
||||
)
|
||||
|
||||
bundles: list[FrameBundle] = []
|
||||
for bundle_index in range(3):
|
||||
views: list[CameraFrame] = []
|
||||
for camera, detections in zip(scene.cameras, view_poses, strict=True):
|
||||
pose_array = np.asarray(detections, dtype=np.float64)
|
||||
frame_detections: list[PoseDetection] = []
|
||||
for person_pose in pose_array:
|
||||
mins = person_pose[:, :2].min(axis=0)
|
||||
maxs = person_pose[:, :2].max(axis=0)
|
||||
frame_detections.append(
|
||||
PoseDetection(
|
||||
bbox=np.asarray([mins[0], mins[1], maxs[0], maxs[1]], dtype=np.float64),
|
||||
bbox_confidence=1.0,
|
||||
keypoints=person_pose,
|
||||
)
|
||||
)
|
||||
views.append(
|
||||
CameraFrame(
|
||||
camera_name=camera.name,
|
||||
frame_index=bundle_index,
|
||||
timestamp_unix_ns=1_000_000_000 + bundle_index * 33_000_000,
|
||||
detections=tuple(frame_detections),
|
||||
source_size=(camera.width, camera.height),
|
||||
)
|
||||
)
|
||||
bundles.append(
|
||||
FrameBundle(
|
||||
bundle_index=bundle_index,
|
||||
timestamp_unix_ns=views[0].timestamp_unix_ns,
|
||||
views=tuple(views),
|
||||
)
|
||||
)
|
||||
|
||||
results = tracker.run(bundles)
|
||||
assert any(result.proposals for result in results)
|
||||
assert any(result.active_tracks for result in results[1:])
|
||||
assert len(results[-1].active_tracks) >= 1
|
||||
Reference in New Issue
Block a user