feat: initialize offline multiview pose tracking experiment
Set up pose_tracking_exp as a uv-managed Python package for offline multiview body tracking experiments. This initial commit includes: - the typed package scaffold, CLI entrypoints, and repo-local uv configuration - scene and replay loaders for generic JSON replays and ActualTest parquet inputs - ParaJumping payload conversion and RTMPose-to-body20 normalization - a custom articulated tracker with tentative, active, and lost lifecycle handling - RPT-backed proposal generation, camera convention handling, and multiview reprojection updates - regression tests for normalization, camera conventions, ActualTest ingestion, seeding, and tracker smoke flows - project documentation covering extrinsic formats and the ActualTest calibration caveat
This commit is contained in:
@@ -0,0 +1,5 @@
|
|||||||
|
.pytest_cache/
|
||||||
|
.venv/
|
||||||
|
.venv-pypi/
|
||||||
|
__pycache__/
|
||||||
|
*.pyc
|
||||||
@@ -0,0 +1,44 @@
|
|||||||
|
# pose_tracking_exp
|
||||||
|
|
||||||
|
Offline multiview body tracking experiments built around:
|
||||||
|
|
||||||
|
- `RapidPoseTriangulation` for geometric birth proposals
|
||||||
|
- a typed replay format for recorded per-camera detections
|
||||||
|
- a recursive active/lost tracker with fixed bone lengths
|
||||||
|
|
||||||
|
## Install
|
||||||
|
|
||||||
|
```bash
|
||||||
|
uv sync --extra dev
|
||||||
|
```
|
||||||
|
|
||||||
|
## Run
|
||||||
|
|
||||||
|
```bash
|
||||||
|
uv run pose-tracking-exp run data/scene.json data/replay.jsonl
|
||||||
|
```
|
||||||
|
|
||||||
|
`scene.json` may declare camera extrinsics in either format:
|
||||||
|
|
||||||
|
- `opencv_world_to_camera`: OpenCV `solvePnP` / `cv2.projectPoints` convention. This is the default.
|
||||||
|
- `rpt_camera_pose`: camera pose in world coordinates, which is what `RapidPoseTriangulation` expects internally.
|
||||||
|
|
||||||
|
The loader normalizes both to OpenCV extrinsics for reprojection and converts to RPT pose only when building the triangulation config.
|
||||||
|
If you already have an older hand-authored scene file that stored RPT camera pose directly, set `extrinsic_format` explicitly to `rpt_camera_pose`.
|
||||||
|
|
||||||
|
## Convert ParaJumping Payload Records
|
||||||
|
|
||||||
|
```bash
|
||||||
|
uv run pose-tracking-exp convert-parajumping input.jsonl output.jsonl
|
||||||
|
```
|
||||||
|
|
||||||
|
## ActualTest Calibration Caveat
|
||||||
|
|
||||||
|
`ActualTest_WeiHua/camera_params.parquet` appears to store raw OpenCV extrinsics from the ChArUco pipeline, not camera poses. The tracker now converts those values before calling `RapidPoseTriangulation`, because RPT expects camera centers and camera-to-world rotation.
|
||||||
|
|
||||||
|
In repo terms:
|
||||||
|
|
||||||
|
- OpenCV reprojection keeps `R`, `T`, and `rvec` as world-to-camera extrinsics.
|
||||||
|
- RPT export uses the derived camera pose `pose_R = R^T` and `pose_T = -R^T t`.
|
||||||
|
|
||||||
|
There is still one upstream caveat: the ParaJumping calibration notebook averages `rvec` samples component-wise before writing the parquet. That is a rough approximation for rotations and can introduce some bias even when the convention is handled correctly.
|
||||||
@@ -0,0 +1,45 @@
|
|||||||
|
[build-system]
|
||||||
|
requires = ["hatchling>=1.27.0"]
|
||||||
|
build-backend = "hatchling.build"
|
||||||
|
|
||||||
|
[project]
|
||||||
|
name = "pose-tracking-exp"
|
||||||
|
version = "0.1.0"
|
||||||
|
description = "Offline multiview pose tracking experiment with RPT-backed proposal births"
|
||||||
|
readme = "README.md"
|
||||||
|
requires-python = ">=3.12"
|
||||||
|
dependencies = [
|
||||||
|
"beartype>=0.19.0",
|
||||||
|
"click>=8.2.1",
|
||||||
|
"jaxtyping>=0.3.2",
|
||||||
|
"numpy>=2.1.0",
|
||||||
|
"opencv-python>=4.12.0.88",
|
||||||
|
"pyarrow>=21.0.0",
|
||||||
|
"rapid-pose-triangulation",
|
||||||
|
"scipy>=1.15.0",
|
||||||
|
]
|
||||||
|
|
||||||
|
[dependency-groups]
|
||||||
|
dev = [
|
||||||
|
"basedpyright>=1.31.0",
|
||||||
|
"pytest>=8.4.0",
|
||||||
|
]
|
||||||
|
|
||||||
|
[project.scripts]
|
||||||
|
pose-tracking-exp = "pose_tracking_exp.cli:main"
|
||||||
|
|
||||||
|
[tool.hatch.build.targets.wheel]
|
||||||
|
packages = ["src/pose_tracking_exp"]
|
||||||
|
|
||||||
|
[tool.uv.sources]
|
||||||
|
rapid-pose-triangulation = { path = "../RapidPoseTriangulation", editable = true }
|
||||||
|
|
||||||
|
[tool.pytest.ini_options]
|
||||||
|
testpaths = ["tests"]
|
||||||
|
|
||||||
|
[tool.basedpyright]
|
||||||
|
typeCheckingMode = "standard"
|
||||||
|
pythonVersion = "3.12"
|
||||||
|
include = ["src", "tests"]
|
||||||
|
reportAny = false
|
||||||
|
reportExplicitAny = false
|
||||||
@@ -0,0 +1,168 @@
|
|||||||
|
> N views (known camera parameters, including extrinsic), and have D_n detection per view (unmatched, unknown id, although I could use some 2D tracker like SORT/ByteTrack etc. but it won't solve cross view association, only have a relatively stable 2D view tracking) and the 2D detector is noisy (some random noise, unknown distribution, due to the neural pose estimator or occlusion) if we could somehow get a 3D detection (pick some annotation) (refer: https://arxiv.org/html/2503.21692 a naive way, with DLT) but it's still noisy, and it's still keypoint based non IK (what's the word? SO3?) for a correct human pose representation, design some skeleton like (that could convert from points, assuming using IK? and go back to keypoints with FK) but you still need a skeleton (beta) estimation (once established shouldn't change unless tracking lost) and with that skeleton we could do gating (since the 3D detection is noisy) need something robust (always assuming the neural detection/DLT result is noisy, but we have all the history state, but don't use sliding window, model the noise somehow?) I personally believe sliding window is a bad/lazy design and should be avoid, but it's just my experience
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
Good. With that context, here is the design I would actually build.
|
||||||
|
|
||||||
|
The main separation is this: the triangulated 3D keypoint cloud is not the persistent state. It is a proposal or measurement. The persistent state should be an articulated skeleton with fixed shape. In your terminology, the free 3D keypoint output is a joint-coordinate or kinematic-free representation in (\mathbb{R}^{3J}); it is not an (SO(3)) state. (SO(3)) enters only when you represent joint rotations. RapidPoseTriangulation is essentially a proposal-style pipeline: it forms cross-view 2D pose pairs, filters them using previous 3D pose projections, triangulates pairwise 3D proposals, reprojects them, rejects high-error pairs, groups proposals in 3D, merges them, and only then optionally assigns tracks or clips joint motion. ([arXiv][1])
|
||||||
|
|
||||||
|
So the long-lived track for person (p) should be something like
|
||||||
|
[
|
||||||
|
x_t^p = \big[T_t^p,\ q_t^p,\ \dot T_t^p,\ \dot q_t^p,\ \beta^p,\ \eta_t^p\big].
|
||||||
|
]
|
||||||
|
Here (T_t^p\in SE(3)) is root translation and orientation, (q_t^p) are local joint rotations or reduced-DOF angles, (\beta^p) is the subject shape or bone-length parameter, and (\eta_t^p) is a latent reliability or noise state. The observable joints are derived by forward kinematics,
|
||||||
|
[
|
||||||
|
X_{i,t}^p = FK_i(T_t^p,q_t^p,\beta^p),
|
||||||
|
]
|
||||||
|
and projected into each calibrated view by
|
||||||
|
[
|
||||||
|
\hat u_{v,i,t}^p = \Pi_v(X_{i,t}^p).
|
||||||
|
]
|
||||||
|
That gives you exactly what you wanted earlier: reprojection, IK/FK consistency, fixed skeleton once established, and a place to put historical noise.
|
||||||
|
|
||||||
|
For association, I would split the problem in two. For already-existing tracks, do not solve full cross-view association from scratch each frame. Predict each 3D track forward, reproject it into every view, and associate detections to that predicted skeleton in each camera. Cross-view consistency then comes from the fact that the same 3D state must explain all matched views. For births only, use a within-frame geometric proposer: build cross-view hypotheses from stable core joints, triangulate, reproject, reject bad hypotheses, cluster in 3D, then initialize a new articulated track from the surviving cluster. RapidPose explicitly uses a core-joint subset early on—shoulders, hips, elbows, wrists, knees, ankles—to keep association cheap before triangulating all joints. If you want a published association front-end, Dong et al. use multi-way matching over 2D poses with geometry and appearance cues, while Part-Aware Measurement uses temporal consistency to associate current 2D poses with previous 3D skeletons and adds a filter for 2D outliers. ([arXiv][1])
|
||||||
|
|
||||||
|
The update should be recursive and articulated. I would not use a Kalman filter on raw DLT joints. I would do a one-step recursive MAP or iterated-EKF-style update on the articulated state:
|
||||||
|
[
|
||||||
|
\hat x_t^p
|
||||||
|
==========
|
||||||
|
|
||||||
|
\arg\min_x
|
||||||
|
|x-\bar x_t^p|*{Q^{-1}}^2
|
||||||
|
+
|
||||||
|
\sum*{(v,m)\in A_t^p}\sum_{i\in\mathcal J}
|
||||||
|
\rho!\left(
|
||||||
|
|u_{v,i,t}^{m}-\Pi_v(FK_i(x))|*{R*{v,i,t}^{-1}}^2
|
||||||
|
\right)
|
||||||
|
+
|
||||||
|
\phi_{\text{limits}}(q)
|
||||||
|
+
|
||||||
|
\phi_{\text{shape}}(\beta).
|
||||||
|
]
|
||||||
|
Here (\bar x_t^p) is the predicted state, (A_t^p) is the set of associated detections, (\rho) is a robust loss, and the priors enforce joint limits and fixed shape. This is fully online. No sliding window is required.
|
||||||
|
|
||||||
|
One important point: do not treat the same 2D detections and their DLT triangulation as two independent measurements unless you explicitly model the correlation. If the 3D DLT point was computed from those same 2D keypoints, then using both naively double-counts the evidence. The clean choice is to use raw multi-view 2D detections for the main update, and use DLT 3D only for proposal generation, coarse gating, or initialization. If you insist on using DLT in the update, make it a pseudo-measurement with covariance propagated from the 2D uncertainty.
|
||||||
|
|
||||||
|
For noise, assume the detector is heavy-tailed and heteroscedastic. That is the right place to be robust, instead of hiding the problem inside the pose state. The simplest useful model is Student-(t) or a Gaussian scale mixture:
|
||||||
|
[
|
||||||
|
u_{v,i,t} = \hat u_{v,i,t} + \epsilon_{v,i,t},
|
||||||
|
\qquad
|
||||||
|
\epsilon_{v,i,t}\sim t_\nu(0,R_{v,i,t}).
|
||||||
|
]
|
||||||
|
Then update a per-joint, per-view scale recursively from the innovation
|
||||||
|
[
|
||||||
|
r_{v,i,t}=u_{v,i,t}-\hat u_{v,i,t},
|
||||||
|
]
|
||||||
|
for example with
|
||||||
|
[
|
||||||
|
w_{v,i,t}=\frac{\nu+2}{\nu+r_{v,i,t}^\top S_{v,i,t}^{-1}r_{v,i,t}},
|
||||||
|
]
|
||||||
|
and
|
||||||
|
[
|
||||||
|
\log \sigma^2_{v,i,t}
|
||||||
|
=====================
|
||||||
|
|
||||||
|
\lambda \log \sigma^2_{v,i,t-1}
|
||||||
|
+
|
||||||
|
(1-\lambda)\log(\epsilon+|r_{v,i,t}|^2).
|
||||||
|
]
|
||||||
|
Then define
|
||||||
|
[
|
||||||
|
R_{v,i,t}=R_{\min}+f(c_{v,i,t})+\sigma^2_{v,i,t}I.
|
||||||
|
]
|
||||||
|
If you do use triangulated 3D proposals, add a geometry term that depends on ray intersection angle or triangulation condition number, because a narrow baseline should produce much larger depth uncertainty than a wide baseline.
|
||||||
|
|
||||||
|
This is the non-window way to use history. The history lives in three places: the dynamic prior on ((T,q)), the nearly-static shape (\beta), and the recursive noise state (\eta) or (\sigma^2). That is not a sliding window. It is a proper state-space model with exponential forgetting.
|
||||||
|
|
||||||
|
For (\beta), I would use a two-timescale rule. During track establishment, allow slow adaptation under a strong anthropometric prior. After the track is stable, freeze it. In practice I would not start with fully free per-bone lengths. I would use a low-dimensional shape model: global scale, leg-length ratio, arm-length ratio, torso scale, shoulder width, maybe hip width. Full free bone lengths tend to absorb detector bias and produce a skeleton that explains bad keypoints by deforming itself. Re-open (\beta) only if the track is lost and later reinitialized, or if you have strong evidence over many good frames that the previous estimate was wrong.
|
||||||
|
|
||||||
|
For gating, use Mahalanobis distance in image space on core joints:
|
||||||
|
[
|
||||||
|
d^2_{p,v,m}
|
||||||
|
===========
|
||||||
|
|
||||||
|
\sum_{i\in \mathcal J_{\text{core}}}
|
||||||
|
(u_{v,i}^m-\hat u_{v,i}^p)^\top
|
||||||
|
R_{v,i,t}^{-1}
|
||||||
|
(u_{v,i}^m-\hat u_{v,i}^p).
|
||||||
|
]
|
||||||
|
Gate per joint or per limb, not only on the full body. If one wrist is bad, mark it missing; do not reject the entire person. A secondary 3D gate is useful only for proposal merging:
|
||||||
|
[
|
||||||
|
d^2_{3D} =
|
||||||
|
(\hat X^{DLT}-X^{pred})^\top
|
||||||
|
(\Sigma_{DLT}+\Sigma_{pred})^{-1}
|
||||||
|
(\hat X^{DLT}-X^{pred}).
|
||||||
|
]
|
||||||
|
Primary association should still be driven by the FK-reprojected articulated state.
|
||||||
|
|
||||||
|
So the short design is:
|
||||||
|
|
||||||
|
Proposal layer: per-frame cross-view geometric hypotheses from noisy 2D detections.
|
||||||
|
State layer: articulated track ((SE(3)+) joint angles (+\beta)).
|
||||||
|
Observation layer: direct multi-view 2D reprojection residuals.
|
||||||
|
Noise layer: recursive per-joint, per-view heavy-tailed covariance adaptation.
|
||||||
|
Association: predicted reprojection gating for existing tracks; triangulation/clustering only for births.
|
||||||
|
Shape: estimate slowly, then freeze.
|
||||||
|
|
||||||
|
That is the version I would trust. It is robust, physically constrained, reprojectable, and it does not depend on a sliding window to behave well.
|
||||||
|
|
||||||
|
[1]: https://arxiv.org/html/2503.21692v5 "RapidPoseTriangulation: Multi-view Multi-person Whole-body Human Pose Triangulation in a Millisecond"
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
Yes. I would make it explicitly two-stage. RapidPoseTriangulation already has a fast within-frame proposal stage based on cross-view pairs, core-joint triangulation, reprojection filtering, 3D grouping, and only then an optional track-assignment stage; Part-Aware likewise uses the previous 3D skeletons to associate current 2D poses across views and time while filtering 2D outliers. Your “tentative then established” split is the right systems version of that idea. ([arXiv][1])
|
||||||
|
|
||||||
|
For the tentative state, I would not use per-joint (SO(3)) yet. I would use one root state plus a free 3D core-joint cloud. Concretely,
|
||||||
|
[
|
||||||
|
x_t^{\text{tent}}=
|
||||||
|
[c_t,\ \dot c_t,\ Y_t^{\text{core}},\ \dot Y_t^{\text{core}},\ s_t,\ \Sigma_t,\ q_t^{\text{track}}],
|
||||||
|
]
|
||||||
|
where (c_t) is pelvis/root position, (Y_t^{\text{core}}) are root-centered 3D core joints, (s_t) is just a coarse global scale, (\Sigma_t) is uncertainty, and (q_t^{\text{track}}) is an existence/quality score. If you need an orientation, use a temporary torso frame derived from hips and shoulders, but do not commit to IK yet. RapidPose uses shoulders, hips, elbows, wrists, knees, and ankles as its core set; for confirmation I would weight torso and proximal joints highest and treat distal joints as secondary evidence. That weighting choice is my recommendation, not something stated in the paper. ([arXiv][1])
|
||||||
|
|
||||||
|
A short fixed buffer in the tentative stage makes sense. I would treat it as an evidence buffer, not as the main estimator. Promotion should require K-of-M good updates, minimum multi-view support, low normalized reprojection error, stable limb ratios, and non-degenerate triangulation geometry. A practical rule is to estimate only a low-dimensional (\beta) at promotion time,
|
||||||
|
[
|
||||||
|
\beta \approx [\text{global scale},\ \text{torso scale},\ \text{arm ratio},\ \text{leg ratio}],
|
||||||
|
]
|
||||||
|
from weighted median limb lengths over the tentative buffer, then soft-lock it for a grace period and hard-freeze it later. I would not freeze a fully free per-bone skeleton immediately on the first “confirmed” frame. Part-Aware’s emphasis on temporal consistency for association and explicit 2D outlier filtering supports using history mainly for confirmation and robustness, not just smoothing. ([arXiv][2])
|
||||||
|
|
||||||
|
For deletion, do not go straight from “confirmed” to “dead.” Use four states: tentative, active, lost, dead. Tentative dies quickly if it is not confirmed within a small age budget or if it loses support for a couple of frames. Active moves to lost after a small number of consecutive misses. Lost keeps predicting forward, inflates covariance, and keeps trying reacquisition using the frozen (\beta) and reprojected core joints as gates. Delete only when the lost age is too large, the predicted uncertainty becomes too large to be useful, or the predicted root has exited the capture volume for several frames. A scalar existence score is better than a pure frame-count rule:
|
||||||
|
[
|
||||||
|
s_t=\lambda s_{t-1}+a,n^{\text{views}}*t+b,n^{\text{joints}}*t-c,\bar e^{\text{repr}}*t-d,\mathbf 1*{\text{miss}}.
|
||||||
|
]
|
||||||
|
Promote when (s_t>\tau*{\text{promote}}), move to lost when (s_t<\tau*{\text{lost}}), and delete when lost age or uncertainty crosses a bound. RapidPose’s use of previous 3D poses for pair filtering and Part-Aware’s use of previous 3D skeletons for 2D-3D association both support this “active/lost/reacquire” structure. ([arXiv][1])
|
||||||
|
|
||||||
|
On the DLT question: “not using DLT” does not mean “use a neural network.” There are three different operations here. First, algebraic triangulation: OpenCV’s `sfm::triangulatePoints` is explicitly a DLT-based triangulator. Second, geometric or nonlinear triangulation: refine the point by minimizing reprojection error rather than keeping the raw algebraic DLT solution. OpenCV also exposes `correctMatches`, which implements the optimal-triangulation-style correction of correspondences under the epipolar constraint before triangulation. Third, articulated state fitting: do not reconstruct a separate 3D point cloud at all; solve directly for the skeleton state that best explains all 2D observations. ([OpenCV Documentation][3])
|
||||||
|
|
||||||
|
For established tracks, the clean answer is the third one. Do not reconstruct an independent 3D joint cloud every frame. Solve the articulated state directly from all 2D views:
|
||||||
|
[
|
||||||
|
\hat x_t
|
||||||
|
========
|
||||||
|
|
||||||
|
\arg\min_x
|
||||||
|
|x-\bar x_t|*{Q^{-1}}^2
|
||||||
|
+
|
||||||
|
\sum*{v=1}^{N}\sum_{i\in\mathcal J}
|
||||||
|
w_{v,i,t},
|
||||||
|
\rho!\left(
|
||||||
|
\left|
|
||||||
|
u_{v,i,t}-\Pi_v(FK_i(x,\beta))
|
||||||
|
\right|^2
|
||||||
|
\right)
|
||||||
|
+
|
||||||
|
\phi_{\text{limits}}(q)
|
||||||
|
+
|
||||||
|
\phi_{\text{shape}}(\beta).
|
||||||
|
]
|
||||||
|
This is the same optimization pattern used by top-down body-model fitting methods: SMPL-X/SMPLify-X fits a parametric body model to 2D features and explicitly relies on a pose prior because 2D-to-3D is ambiguous, and KITRO likewise starts from 2D keypoints and refines the 3D model along the kinematic tree while discussing the depth ambiguity of pure 2D fitting. My extension is: in your calibrated multi-view setting, apply that fitting jointly across views, where the ambiguity is much smaller than in single-view. That multi-view claim is an inference from geometry, not a quoted result from those single-view papers.
|
||||||
|
|
||||||
|
So for real time, I would not try to ban DLT. I would confine it to births and cheap initialization. Tentative births can use DLT or another simple ray-based triangulation on a conservative core set because that is fast and good enough to decide whether a hypothesis is real; RapidPose is essentially showing that this kind of proposal stage can be very cheap. Once the track is established, use the previous articulated state as the initializer and do a small number of Gauss-Newton or LM updates on the multi-view reprojection objective. That linear-init plus nonlinear-refine pattern is standard in vision; OpenCV’s iterative `SolvePnP`, for example, uses LM refinement with a DLT initializer for non-planar cases. That is only an analogy for the optimization pattern, not a claim that `SolvePnP` solves human pose. ([arXiv][1])
|
||||||
|
|
||||||
|
So the recommendation is very simple. Tentative stage: root-centered free core-joint state, short evidence buffer, no IK. Promotion: fit a low-dimensional (\beta), soft-lock then freeze. Established stage: articulated IK/FK state updated directly from multi-view 2D reprojection, with recursive noise modeling and no sliding window. Deletion: active/lost/dead, driven by support and uncertainty, not just missed-frame count.
|
||||||
|
|
||||||
|
The next useful step is to write the exact state machine and per-frame update equations in implementation form.
|
||||||
|
|
||||||
|
[1]: https://arxiv.org/html/2503.21692v5 "RapidPoseTriangulation: Multi-view Multi-person Whole-body Human Pose Triangulation in a Millisecond"
|
||||||
|
[2]: https://arxiv.org/abs/2106.11589 "[2106.11589] Part-Aware Measurement for Robust Multi-View Multi-Human 3D Pose Estimation and Tracking"
|
||||||
|
[3]: https://docs.opencv.org/4.x/d0/dbd/group__triangulation.html "OpenCV: Triangulation"
|
||||||
|
|
||||||
|
|
||||||
@@ -0,0 +1,545 @@
|
|||||||
|
Below is a concrete version that matches your two-stage idea.
|
||||||
|
|
||||||
|
The central choice is:
|
||||||
|
|
||||||
|
The tentative stage is allowed to use a short evidence buffer and DLT-style 3D core-pose proposals.
|
||||||
|
|
||||||
|
The established stage does not maintain a free 3D keypoint cloud. It maintains an articulated state and updates that state directly from multi-view 2D reprojection error. No extra neural network is required.
|
||||||
|
|
||||||
|
Notation first.
|
||||||
|
|
||||||
|
(t) is frame index.
|
||||||
|
(v \in {1,\dots,N}) indexes cameras.
|
||||||
|
(m) indexes 2D detections inside one camera.
|
||||||
|
(p) indexes tracks.
|
||||||
|
(i \in \mathcal J) indexes full-body joints.
|
||||||
|
(\mathcal C \subset \mathcal J) is the core joint set, for example pelvis, hips, shoulders, elbows, knees.
|
||||||
|
|
||||||
|
Each camera has known projection (\Pi_v(\cdot)).
|
||||||
|
|
||||||
|
Each 2D detection is
|
||||||
|
[
|
||||||
|
d_{v,t}^m = {(u_{v,i,t}^m,\ c_{v,i,t}^m)}*{i\in\mathcal J},
|
||||||
|
]
|
||||||
|
where (u*{v,i,t}^m \in \mathbb R^2) is the observed 2D joint and (c_{v,i,t}^m \in [0,1]) is its confidence.
|
||||||
|
|
||||||
|
For an articulated track, the skeleton model is
|
||||||
|
[
|
||||||
|
X_{i,t}^p = FK_i(T_t^p,\ q_t^p,\ \beta^p),
|
||||||
|
]
|
||||||
|
where (T_t^p \in SE(3)) is the root transform, (q_t^p) are joint angles, and (\beta^p) are bone-length or shape parameters. The predicted 2D image joint is
|
||||||
|
[
|
||||||
|
\hat u_{v,i,t}^p = \Pi_v(X_{i,t}^p).
|
||||||
|
]
|
||||||
|
|
||||||
|
I would use this lifecycle:
|
||||||
|
|
||||||
|
```mermaid
|
||||||
|
stateDiagram-v2
|
||||||
|
[*] --> Tentative
|
||||||
|
|
||||||
|
Tentative --> Active: confirm\nscore > τ_promote\nK hits in last L\nmean views >= V_min\nrepr err <= τ_repr\nbone CV <= τ_len
|
||||||
|
Tentative --> Dead: age > A_tent_max\nor misses > M_tent
|
||||||
|
|
||||||
|
Active --> Active: matched and updated
|
||||||
|
Active --> Lost: misses > M_lost\nor score < τ_lost
|
||||||
|
|
||||||
|
Lost --> Active: reacquired\nsufficient 2D support\nor strong proposal match
|
||||||
|
Lost --> Dead: lost_age > A_delete\nor trace(P) > τ_P\nor outside volume
|
||||||
|
|
||||||
|
Dead --> [*]
|
||||||
|
```
|
||||||
|
|
||||||
|
Now the states.
|
||||||
|
|
||||||
|
Tentative state:
|
||||||
|
[
|
||||||
|
x_{t}^{p,\mathrm{tent}}
|
||||||
|
=======================
|
||||||
|
|
||||||
|
\big(
|
||||||
|
r_t^p,\ \dot r_t^p,\ Z_t^p,\ \dot Z_t^p,\ B_t^p,\ s_t^p,\ a_t^p,\ m_t^p,\ \Sigma_{Z,t}^p
|
||||||
|
\big).
|
||||||
|
]
|
||||||
|
|
||||||
|
Here:
|
||||||
|
|
||||||
|
(r_t^p \in \mathbb R^3) is provisional root or pelvis position.
|
||||||
|
|
||||||
|
(Z_t^p = {Z_{i,t}^p}*{i\in\mathcal C}) is a root-centered 3D core pose, so (Z*{\text{pelvis},t}^p=0).
|
||||||
|
|
||||||
|
(B_t^p) is a short FIFO evidence buffer of the last (L) accepted core-pose proposals.
|
||||||
|
|
||||||
|
(s_t^p) is the tentative existence score.
|
||||||
|
|
||||||
|
(a_t^p) is age in frames.
|
||||||
|
|
||||||
|
(m_t^p) is consecutive miss count.
|
||||||
|
|
||||||
|
(\Sigma_{Z,t}^p) is uncertainty of the core-pose estimate.
|
||||||
|
|
||||||
|
Established state:
|
||||||
|
[
|
||||||
|
x_t^{p,\mathrm{act}}
|
||||||
|
====================
|
||||||
|
|
||||||
|
\big(
|
||||||
|
T_t^p,\ q_t^p,\ \xi_t^p,\ \dot q_t^p,\ \beta^p,\ f_\beta^p,\ P_t^p,\ \sigma_{v,i,t}^{2,p},\ s_t^p,\ m_t^p,\ \ell_t^p
|
||||||
|
\big).
|
||||||
|
]
|
||||||
|
|
||||||
|
Here:
|
||||||
|
|
||||||
|
(T_t^p \in SE(3)) is root pose.
|
||||||
|
|
||||||
|
(q_t^p \in \mathbb R^d) is articulated pose.
|
||||||
|
|
||||||
|
(\xi_t^p \in \mathbb R^6) is root velocity in Lie algebra coordinates.
|
||||||
|
|
||||||
|
(\dot q_t^p) is angular velocity.
|
||||||
|
|
||||||
|
(\beta^p \in \mathbb R^B) is low-dimensional shape or bone-length state.
|
||||||
|
|
||||||
|
(f_\beta^p \in {0,1}) says whether (\beta) is frozen.
|
||||||
|
|
||||||
|
(P_t^p) is approximate state covariance.
|
||||||
|
|
||||||
|
(\sigma_{v,i,t}^{2,p}) is per-view, per-joint measurement noise.
|
||||||
|
|
||||||
|
(s_t^p) is track score.
|
||||||
|
|
||||||
|
(m_t^p) is consecutive miss count.
|
||||||
|
|
||||||
|
(\ell_t^p) is lost age.
|
||||||
|
|
||||||
|
The per-frame algorithm is:
|
||||||
|
|
||||||
|
```text
|
||||||
|
function UPDATE_FRAME(t, detections_by_view, tracks, cameras):
|
||||||
|
|
||||||
|
split tracks into Tentative, Active, Lost
|
||||||
|
|
||||||
|
# 1) Predict established tracks
|
||||||
|
for p in Active ∪ Lost:
|
||||||
|
p_pred <- PREDICT_ESTABLISHED(p)
|
||||||
|
|
||||||
|
# 2) Associate 2D detections to predicted established tracks
|
||||||
|
matches, unused_detections <- ASSOCIATE_ESTABLISHED(p_pred, detections_by_view, cameras)
|
||||||
|
|
||||||
|
# 3) Update Active tracks by direct articulated fitting from 2D
|
||||||
|
for p in Active:
|
||||||
|
M_p <- BUILD_MEASUREMENT_SET(p, matches[p], cameras)
|
||||||
|
if SUFFICIENT_SUPPORT(M_p):
|
||||||
|
p <- UPDATE_ESTABLISHED(p_pred, M_p, cameras)
|
||||||
|
p <- UPDATE_ACTIVE_SCORE(p, M_p)
|
||||||
|
p.miss <- 0
|
||||||
|
if SHOULD_FREEZE_BETA(p): p.f_beta <- 1
|
||||||
|
else:
|
||||||
|
p <- MARK_MISSED(p_pred)
|
||||||
|
p <- ACTIVE_TO_LOST_IF_NEEDED(p)
|
||||||
|
|
||||||
|
# 4) Reacquire Lost tracks
|
||||||
|
for p in Lost:
|
||||||
|
M_p <- BUILD_MEASUREMENT_SET(p, matches[p], cameras)
|
||||||
|
if SUFFICIENT_REACQ_SUPPORT(M_p):
|
||||||
|
p <- UPDATE_ESTABLISHED(p_pred, M_p, cameras)
|
||||||
|
p <- REACTIVATE(p)
|
||||||
|
else:
|
||||||
|
p <- PREDICT_ONLY(p_pred)
|
||||||
|
p.lost_age <- p.lost_age + 1
|
||||||
|
|
||||||
|
# 5) Build geometric proposals from unused detections
|
||||||
|
proposals <- BUILD_CORE_PROPOSALS(unused_detections, cameras)
|
||||||
|
clusters <- CLUSTER_PROPOSALS(proposals)
|
||||||
|
|
||||||
|
# 6) Update tentative tracks using proposal clusters
|
||||||
|
Tentative <- UPDATE_TENTATIVE_TRACKS(Tentative, clusters)
|
||||||
|
|
||||||
|
# 7) Promote mature tentative tracks to established
|
||||||
|
for z in Tentative:
|
||||||
|
if SHOULD_PROMOTE(z):
|
||||||
|
p0 <- PROMOTE_TO_ESTABLISHED(z, detections_by_view, cameras)
|
||||||
|
add p0 to Active
|
||||||
|
remove z from Tentative
|
||||||
|
elif SHOULD_DELETE_TENTATIVE(z):
|
||||||
|
remove z
|
||||||
|
|
||||||
|
# 8) Try proposal-based reacquisition for Lost tracks
|
||||||
|
for p in Lost:
|
||||||
|
c <- BEST_COMPATIBLE_CLUSTER(p, clusters)
|
||||||
|
if c exists and REACQ_COMPATIBLE(p, c):
|
||||||
|
p <- REACQUIRE_FROM_CLUSTER(p, c, detections_by_view, cameras)
|
||||||
|
move p to Active
|
||||||
|
|
||||||
|
# 9) Delete dead Lost tracks
|
||||||
|
for p in Lost:
|
||||||
|
if SHOULD_DELETE_LOST(p):
|
||||||
|
remove p
|
||||||
|
|
||||||
|
return Tentative ∪ Active ∪ Lost
|
||||||
|
```
|
||||||
|
|
||||||
|
Now the actual equations.
|
||||||
|
|
||||||
|
For tentative proposals, use only detections not already consumed by established tracks. For each camera pair ((v_1,v_2)) and detection pair ((a,b)), triangulate the visible core joints and build a provisional core pose
|
||||||
|
[
|
||||||
|
Y_{i,t}^{(v_1,a,v_2,b)} \in \mathbb R^3,\qquad i\in\mathcal C_{\mathrm{obs}}.
|
||||||
|
]
|
||||||
|
Then define the root
|
||||||
|
[
|
||||||
|
r_t = \text{pelvis}(Y_t),
|
||||||
|
]
|
||||||
|
and root-centered core joints
|
||||||
|
[
|
||||||
|
Z_{i,t} = Y_{i,t} - r_t.
|
||||||
|
]
|
||||||
|
|
||||||
|
Score that pair proposal with
|
||||||
|
[
|
||||||
|
E_{\mathrm{pair}}
|
||||||
|
=================
|
||||||
|
|
||||||
|
\sum_{i\in\mathcal C_{\mathrm{obs}}}
|
||||||
|
\left[
|
||||||
|
\frac{|u_{v_1,i,t}^a-\Pi_{v_1}(Y_{i,t})|^2}{\sigma_{\mathrm{base}}^2(c_{v_1,i,t}^a)}
|
||||||
|
+
|
||||||
|
\frac{|u_{v_2,i,t}^b-\Pi_{v_2}(Y_{i,t})|^2}{\sigma_{\mathrm{base}}^2(c_{v_2,i,t}^b)}
|
||||||
|
\right]
|
||||||
|
+
|
||||||
|
\lambda_\theta \sum_{i\in\mathcal C_{\mathrm{obs}}}\frac{1}{\sin^2 \theta_i}
|
||||||
|
+
|
||||||
|
\lambda_{\mathrm{anthro}}\psi_{\mathrm{anthro}}(Z_t).
|
||||||
|
]
|
||||||
|
|
||||||
|
(\theta_i) is the angle between the two camera rays for joint (i). Small ray angle means bad geometry, so the penalty increases.
|
||||||
|
|
||||||
|
(\psi_{\mathrm{anthro}}(Z_t)) is a soft anthropometric sanity term. It can be very simple: reject impossible torso width, negative limb lengths, or absurd limb ratios.
|
||||||
|
|
||||||
|
Keep the proposal only if enough core joints are visible and (E_{\mathrm{pair}}) is below threshold.
|
||||||
|
|
||||||
|
Then cluster proposals in 3D using
|
||||||
|
[
|
||||||
|
d_{\mathrm{prop}}(p,q)
|
||||||
|
======================
|
||||||
|
|
||||||
|
\frac{|r^p-r^q|^2}{\tau_r^2}
|
||||||
|
+
|
||||||
|
\lambda_Z
|
||||||
|
\sum_{i\in\mathcal C}
|
||||||
|
\frac{|Z_i^p-Z_i^q|^2}{\tau_Z^2}.
|
||||||
|
]
|
||||||
|
|
||||||
|
Each cluster becomes one proposal centroid for the tentative layer.
|
||||||
|
|
||||||
|
Tentative update is simple and robust. Associate each cluster centroid to an existing tentative track or start a new tentative track if no association exists. Update with the buffer, not with a global sliding smoother. The buffer is only a birth-confirmation device.
|
||||||
|
|
||||||
|
A useful tentative score is
|
||||||
|
[
|
||||||
|
s_t^p
|
||||||
|
=====
|
||||||
|
|
||||||
|
\lambda_s s_{t-1}^p
|
||||||
|
+
|
||||||
|
a_1 n_{\mathrm{pairs},t}^p
|
||||||
|
+
|
||||||
|
a_2 n_{\mathrm{views},t}^p
|
||||||
|
--------------------------
|
||||||
|
|
||||||
|
## a_3 \bar e_{\mathrm{repr},t}^p
|
||||||
|
|
||||||
|
## a_4 \mathrm{CV}_{\mathrm{bone},t}^p
|
||||||
|
|
||||||
|
a_5 \mathbf 1_{\mathrm{miss}}.
|
||||||
|
]
|
||||||
|
|
||||||
|
Here:
|
||||||
|
|
||||||
|
(n_{\mathrm{pairs},t}^p) is the number of supporting cross-view pair proposals in the cluster.
|
||||||
|
|
||||||
|
(n_{\mathrm{views},t}^p) is the number of unique cameras contributing.
|
||||||
|
|
||||||
|
(\bar e_{\mathrm{repr},t}^p) is the mean reprojection error of the cluster.
|
||||||
|
|
||||||
|
(\mathrm{CV}_{\mathrm{bone},t}^p) is the coefficient of variation of a few stable limb lengths in the tentative buffer.
|
||||||
|
|
||||||
|
Promotion rule:
|
||||||
|
[
|
||||||
|
\text{promote if}
|
||||||
|
\quad
|
||||||
|
a_t^p \ge A_{\min},
|
||||||
|
\quad
|
||||||
|
#{\text{hits in last }L} \ge K,
|
||||||
|
\quad
|
||||||
|
\overline{n_{\mathrm{views}}} \ge V_{\min},
|
||||||
|
\quad
|
||||||
|
\overline{e_{\mathrm{repr}}} \le \tau_{\mathrm{repr}},
|
||||||
|
\quad
|
||||||
|
\mathrm{CV}*{\mathrm{bone}} \le \tau*{\mathrm{len}},
|
||||||
|
\quad
|
||||||
|
s_t^p \ge \tau_{\mathrm{promote}}.
|
||||||
|
]
|
||||||
|
|
||||||
|
At promotion, estimate (\beta) once from the tentative buffer. Let (\mathcal L) be a set of skeleton limbs. For each limb (b=(i,j)),
|
||||||
|
[
|
||||||
|
\hat \ell_b
|
||||||
|
===========
|
||||||
|
|
||||||
|
\operatorname{wmedian}*{\tau \in B_t^p}
|
||||||
|
|Z*{j,\tau}^p - Z_{i,\tau}^p|.
|
||||||
|
]
|
||||||
|
Then fit the low-dimensional skeleton shape
|
||||||
|
[
|
||||||
|
\hat\beta
|
||||||
|
=========
|
||||||
|
|
||||||
|
\arg\min_\beta
|
||||||
|
\sum_{b\in\mathcal L}
|
||||||
|
\omega_b \big(\ell_b(\beta)-\hat\ell_b\big)^2
|
||||||
|
+
|
||||||
|
\lambda_\beta |\beta-\beta_0|*{\Sigma*\beta^{-1}}^2.
|
||||||
|
]
|
||||||
|
|
||||||
|
That gives a stable initial skeleton. Then initialize the articulated pose:
|
||||||
|
[
|
||||||
|
(\hat T,\hat q)
|
||||||
|
===============
|
||||||
|
|
||||||
|
\arg\min_{T,q}
|
||||||
|
\sum_{i\in\mathcal C}
|
||||||
|
|FK_i(T,q,\hat\beta)-(\hat r+\hat Z_i)|^2
|
||||||
|
+
|
||||||
|
\lambda_{\mathrm{lim}}\phi_{\mathrm{lim}}(q).
|
||||||
|
]
|
||||||
|
|
||||||
|
After this, create the established track and allow (\beta) to move only slightly for a short grace period. Then freeze it:
|
||||||
|
[
|
||||||
|
\beta_t \leftarrow \beta_{t-1} + \eta_\beta \Delta\beta_t,
|
||||||
|
\qquad
|
||||||
|
0 < \eta_\beta \ll 1.
|
||||||
|
]
|
||||||
|
Once the track is stable, set (f_\beta=1) and stop updating (\beta).
|
||||||
|
|
||||||
|
For established tracks, prediction is recursive:
|
||||||
|
[
|
||||||
|
T_{t|t-1} = T_{t-1}\exp(\Delta t,\hat \xi_{t-1}),
|
||||||
|
]
|
||||||
|
[
|
||||||
|
q_{t|t-1} = q_{t-1} + \Delta t,\dot q_{t-1},
|
||||||
|
]
|
||||||
|
[
|
||||||
|
\beta_{t|t-1} = \beta_{t-1}.
|
||||||
|
]
|
||||||
|
|
||||||
|
Now the key point: the established stage does not triangulate a fresh 3D body and then fit to that. It updates the articulated state directly from all available 2D measurements.
|
||||||
|
|
||||||
|
First, associate detections per view. For predicted track (p) in camera (v), the detection cost is
|
||||||
|
[
|
||||||
|
C_{p,m}^{(v)}
|
||||||
|
=============
|
||||||
|
|
||||||
|
\sum_{i\in\mathcal C}
|
||||||
|
\delta_{i}
|
||||||
|
,\omega_i,
|
||||||
|
\frac{|u_{v,i,t}^m-\hat u_{v,i,t}^p|^2}{\sigma_{v,i,t}^{2,p}}
|
||||||
|
-------------------------------------------------------------
|
||||||
|
|
||||||
|
\lambda_{\mathrm{id}} \mathbf 1[\text{same 2D tracker id}],
|
||||||
|
]
|
||||||
|
with (\delta_i=1) only for visible and confidence-valid joints. Use Hungarian or another bipartite matcher independently per view after gating.
|
||||||
|
|
||||||
|
Then update the articulated state by minimizing
|
||||||
|
[
|
||||||
|
x_t^{p,*}
|
||||||
|
=========
|
||||||
|
|
||||||
|
\arg\min_x
|
||||||
|
|x-\bar x_t^p|*{Q^{-1}}^2
|
||||||
|
+
|
||||||
|
\sum*{(v,m)\in\mathcal M_t^p}
|
||||||
|
\sum_{i\in\mathcal J}
|
||||||
|
\delta_{v,i,t}^p
|
||||||
|
,w_{v,i,t}^p,
|
||||||
|
\left|
|
||||||
|
u_{v,i,t}^m - \Pi_v(FK_i(x))
|
||||||
|
\right|*{R*{v,i,t}^{-1}}^2
|
||||||
|
+
|
||||||
|
\lambda_{\mathrm{lim}}\phi_{\mathrm{lim}}(q)
|
||||||
|
+
|
||||||
|
\lambda_{\beta}\phi_{\beta}(\beta).
|
||||||
|
]
|
||||||
|
|
||||||
|
Definitions:
|
||||||
|
|
||||||
|
(\bar x_t^p) is the predicted state.
|
||||||
|
|
||||||
|
(\mathcal M_t^p) is the set of matched detections for track (p) across cameras.
|
||||||
|
|
||||||
|
(\delta_{v,i,t}^p \in {0,1}) is the joint acceptance mask after joint-level gating.
|
||||||
|
|
||||||
|
(R_{v,i,t} = \sigma_{v,i,t}^{2,p} I_2).
|
||||||
|
|
||||||
|
(w_{v,i,t}^p) is a robust weight.
|
||||||
|
|
||||||
|
A convenient robust weight is Student-(t):
|
||||||
|
[
|
||||||
|
w_{v,i,t}^p
|
||||||
|
===========
|
||||||
|
|
||||||
|
\frac{\nu+2}{\nu + \left|u_{v,i,t}^m-\hat u_{v,i,t}^p\right|*{R*{v,i,t}^{-1}}^2}.
|
||||||
|
]
|
||||||
|
|
||||||
|
Joint-level gating uses
|
||||||
|
[
|
||||||
|
d_{v,i,t}^{2,p}
|
||||||
|
===============
|
||||||
|
|
||||||
|
\left(u_{v,i,t}^m-\hat u_{v,i,t}^p\right)^\top
|
||||||
|
S_{v,i,t}^{-1}
|
||||||
|
\left(u_{v,i,t}^m-\hat u_{v,i,t}^p\right),
|
||||||
|
]
|
||||||
|
and accept the joint if (d_{v,i,t}^{2,p} \le \tau_{\mathrm{joint}}).
|
||||||
|
|
||||||
|
In practice, solve the update with 1 to 3 Gauss-Newton or LM steps from the predicted state. Let the local increment be
|
||||||
|
[
|
||||||
|
\Delta z = [\delta\xi,\ \delta q,\ \delta\beta_{\mathrm{free}}],
|
||||||
|
]
|
||||||
|
where (\delta\xi) is the root pose increment in Lie algebra coordinates, (\delta q) is the joint-angle increment, and (\delta\beta_{\mathrm{free}}) is omitted if (\beta) is frozen.
|
||||||
|
|
||||||
|
At one linearized step,
|
||||||
|
[
|
||||||
|
H
|
||||||
|
=
|
||||||
|
|
||||||
|
Q^{-1}
|
||||||
|
+
|
||||||
|
\sum_{(v,m),i}
|
||||||
|
\delta_{v,i,t}^p,w_{v,i,t}^p,J_{v,i,t}^\top R_{v,i,t}^{-1} J_{v,i,t}
|
||||||
|
+
|
||||||
|
H_{\mathrm{prior}},
|
||||||
|
]
|
||||||
|
[
|
||||||
|
g
|
||||||
|
=
|
||||||
|
|
||||||
|
## Q^{-1}(z-\bar z_t^p)
|
||||||
|
|
||||||
|
\sum_{(v,m),i}
|
||||||
|
\delta_{v,i,t}^p,w_{v,i,t}^p,J_{v,i,t}^\top R_{v,i,t}^{-1} r_{v,i,t}
|
||||||
|
+
|
||||||
|
g_{\mathrm{prior}},
|
||||||
|
]
|
||||||
|
[
|
||||||
|
\Delta z = -H^{-1} g.
|
||||||
|
]
|
||||||
|
|
||||||
|
Here (r_{v,i,t} = u_{v,i,t}^m-\hat u_{v,i,t}^p), and (J_{v,i,t}) is the Jacobian of the projected joint with respect to the state increment.
|
||||||
|
|
||||||
|
Then update
|
||||||
|
[
|
||||||
|
T \leftarrow T\exp(\widehat{\delta\xi}),\qquad
|
||||||
|
q \leftarrow q + \delta q,\qquad
|
||||||
|
\beta \leftarrow \beta + \delta\beta_{\mathrm{free}}.
|
||||||
|
]
|
||||||
|
|
||||||
|
Approximate posterior covariance for deletion and gating can be taken as
|
||||||
|
[
|
||||||
|
P_t^p \approx H^{-1}.
|
||||||
|
]
|
||||||
|
|
||||||
|
The recursive noise model is important because your detector is noisy and heteroscedastic. I would use
|
||||||
|
[
|
||||||
|
\sigma_{v,i,t}^{2,p}
|
||||||
|
====================
|
||||||
|
|
||||||
|
\operatorname{clip}
|
||||||
|
\left(
|
||||||
|
\lambda_\sigma \sigma_{v,i,t-1}^{2,p}
|
||||||
|
+
|
||||||
|
(1-\lambda_\sigma)
|
||||||
|
\Big[
|
||||||
|
\sigma_{\mathrm{base}}^2(c_{v,i,t})
|
||||||
|
+
|
||||||
|
\kappa \min(|r_{v,i,t}|^2, r_{\max}^2)
|
||||||
|
\Big],
|
||||||
|
\ \sigma_{\min}^2,\ \sigma_{\max}^2
|
||||||
|
\right).
|
||||||
|
]
|
||||||
|
|
||||||
|
So history enters through the recursive state and recursive noise, not through a sliding window.
|
||||||
|
|
||||||
|
For the active track score, use
|
||||||
|
[
|
||||||
|
s_t^p
|
||||||
|
=====
|
||||||
|
|
||||||
|
\lambda_s s_{t-1}^p
|
||||||
|
+
|
||||||
|
b_1 n_{\mathrm{views},t}^{p,\mathrm{acc}}
|
||||||
|
+
|
||||||
|
b_2 \frac{n_{\mathrm{joints},t}^{p,\mathrm{acc}}}{|\mathcal J|}
|
||||||
|
---------------------------------------------------------------
|
||||||
|
|
||||||
|
## b_3 \bar e_{\mathrm{repr},t}^p
|
||||||
|
|
||||||
|
b_4 \mathbf 1_{\mathrm{miss}}.
|
||||||
|
]
|
||||||
|
|
||||||
|
Then the lifecycle rules become explicit.
|
||||||
|
|
||||||
|
Active to Lost:
|
||||||
|
[
|
||||||
|
m_t^p > M_{\mathrm{lost}}
|
||||||
|
\quad\text{or}\quad
|
||||||
|
s_t^p < \tau_{\mathrm{lost}}.
|
||||||
|
]
|
||||||
|
|
||||||
|
Lost to Active:
|
||||||
|
either enough direct 2D support reappears,
|
||||||
|
[
|
||||||
|
n_{\mathrm{views},t}^{p,\mathrm{acc}} \ge V_{\mathrm{reacq}},
|
||||||
|
]
|
||||||
|
or a tentative proposal cluster matches the lost track prediction:
|
||||||
|
[
|
||||||
|
C_{\mathrm{reacq}}(p,c)
|
||||||
|
=======================
|
||||||
|
|
||||||
|
\frac{|r_c-\hat r_t^p|^2}{\tau_r^2}
|
||||||
|
+
|
||||||
|
\lambda_Z
|
||||||
|
\sum_{i\in\mathcal C}
|
||||||
|
\frac{|Z_{c,i}-\hat Z_{i,t}^p|^2}{\tau_Z^2}
|
||||||
|
\le \tau_{\mathrm{reacq}}.
|
||||||
|
]
|
||||||
|
|
||||||
|
Lost to Dead:
|
||||||
|
[
|
||||||
|
\ell_t^p > A_{\mathrm{delete}}
|
||||||
|
\quad\text{or}\quad
|
||||||
|
\operatorname{tr}(P_t^p) > \tau_P
|
||||||
|
\quad\text{or}\quad
|
||||||
|
\hat r_t^p \notin \text{working volume for } G \text{ frames}.
|
||||||
|
]
|
||||||
|
|
||||||
|
Tentative to Dead:
|
||||||
|
[
|
||||||
|
a_t^p > A_{\mathrm{tent,max}}
|
||||||
|
\quad\text{or}\quad
|
||||||
|
m_t^p > M_{\mathrm{tent}}.
|
||||||
|
]
|
||||||
|
|
||||||
|
So the design is:
|
||||||
|
|
||||||
|
Tentative stage: free 3D core pose, short evidence buffer, DLT allowed, cheap and conservative.
|
||||||
|
|
||||||
|
Promotion: estimate (\beta) once from stable buffer statistics, initialize articulated model, then freeze (\beta) after a short grace period.
|
||||||
|
|
||||||
|
Established stage: direct recursive articulated fitting from multi-view 2D reprojection, no per-frame free 3D cloud, no sliding window.
|
||||||
|
|
||||||
|
Lost stage: predict only, inflate uncertainty, try 2D reprojection-based reacquisition first, proposal-based reacquisition second, delete only when uncertainty or lost age becomes too large.
|
||||||
|
|
||||||
|
This directly answers your earlier question about “how do I get 3D without DLT?”:
|
||||||
|
|
||||||
|
In the established stage, you do not first reconstruct a standalone 3D pose. The 3D body is the current articulated state itself. You recover it by minimizing multi-view 2D reprojection error of the FK model. DLT is only a birth and reacquisition aid.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@@ -0,0 +1,40 @@
|
|||||||
|
from pose_tracking_exp.joints import BODY20_JOINT_NAMES, BODY20_OBSERVATION_COUNT
|
||||||
|
from pose_tracking_exp.models import (
|
||||||
|
ActiveTrackState,
|
||||||
|
CameraCalibration,
|
||||||
|
CameraFrame,
|
||||||
|
FrameBundle,
|
||||||
|
PoseDetection,
|
||||||
|
ProposalCluster,
|
||||||
|
ReplaySequence,
|
||||||
|
SceneConfig,
|
||||||
|
TentativeTrackState,
|
||||||
|
TrackerConfig,
|
||||||
|
TrackedFrameResult,
|
||||||
|
)
|
||||||
|
from pose_tracking_exp.parajumping import decode_pose_payload
|
||||||
|
from pose_tracking_exp.replay import load_replay_file, load_scene_file
|
||||||
|
from pose_tracking_exp.sync import synchronize_frames
|
||||||
|
from pose_tracking_exp.tracker import PoseTracker
|
||||||
|
|
||||||
|
__all__ = [
|
||||||
|
"BODY20_JOINT_NAMES",
|
||||||
|
"BODY20_OBSERVATION_COUNT",
|
||||||
|
"ActiveTrackState",
|
||||||
|
"CameraCalibration",
|
||||||
|
"CameraFrame",
|
||||||
|
"FrameBundle",
|
||||||
|
"PoseDetection",
|
||||||
|
"PoseTracker",
|
||||||
|
"ProposalCluster",
|
||||||
|
"ReplaySequence",
|
||||||
|
"SceneConfig",
|
||||||
|
"TentativeTrackState",
|
||||||
|
"TrackedFrameResult",
|
||||||
|
"TrackerConfig",
|
||||||
|
"decode_pose_payload",
|
||||||
|
"load_replay_file",
|
||||||
|
"load_scene_file",
|
||||||
|
"synchronize_frames",
|
||||||
|
]
|
||||||
|
|
||||||
@@ -0,0 +1,129 @@
|
|||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
import cv2
|
||||||
|
import numpy as np
|
||||||
|
import pyarrow.parquet as pq
|
||||||
|
from beartype import beartype
|
||||||
|
|
||||||
|
from pose_tracking_exp.models import CameraCalibration, CameraFrame, FrameBundle, PoseDetection, SceneConfig
|
||||||
|
from pose_tracking_exp.normalization import infer_bbox_from_keypoints, normalize_rtmpose_body20
|
||||||
|
|
||||||
|
_NOMINAL_FRAME_PERIOD_NS = 33_333_333
|
||||||
|
|
||||||
|
|
||||||
|
@beartype
|
||||||
|
def load_actualtest_scene(root: Path) -> SceneConfig:
|
||||||
|
# ActualTest parquet comes from the ChArUco/OpenCV side, so `rvec` / `tvec`
|
||||||
|
# are world->camera extrinsics. The RPT-facing camera pose is derived later
|
||||||
|
# from this canonical OpenCV form.
|
||||||
|
camera_rows = pq.read_table(root / "camera_params" / "camera_params.parquet").to_pylist()
|
||||||
|
cameras: list[CameraCalibration] = []
|
||||||
|
for item in camera_rows:
|
||||||
|
rotation, _ = cv2.Rodrigues(np.asarray(item["extrinsic"]["rvec"], dtype=np.float64).reshape(3, 1))
|
||||||
|
cameras.append(
|
||||||
|
CameraCalibration.from_opencv_extrinsics(
|
||||||
|
name=str(item["port"]),
|
||||||
|
width=int(item["resolution"]["width"]),
|
||||||
|
height=int(item["resolution"]["height"]),
|
||||||
|
K=np.asarray(item["intrinsic"]["camera_matrix"], dtype=np.float64),
|
||||||
|
DC=np.asarray(item["intrinsic"]["distortion_coefficients"], dtype=np.float64).reshape(-1),
|
||||||
|
R=np.asarray(rotation, dtype=np.float64),
|
||||||
|
T=np.asarray(item["extrinsic"]["tvec"], dtype=np.float64).reshape(3),
|
||||||
|
rvec=np.asarray(item["extrinsic"]["rvec"], dtype=np.float64).reshape(3),
|
||||||
|
)
|
||||||
|
)
|
||||||
|
return SceneConfig(
|
||||||
|
room_size=np.asarray([20.0, 20.0, 8.0], dtype=np.float64),
|
||||||
|
room_center=np.asarray([0.0, 0.0, 2.0], dtype=np.float64),
|
||||||
|
cameras=tuple(sorted(cameras, key=lambda camera: camera.name)),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@beartype
|
||||||
|
def load_actualtest_segment_bundles(
|
||||||
|
root: Path,
|
||||||
|
segment_name: str,
|
||||||
|
*,
|
||||||
|
frame_start: int = 690,
|
||||||
|
frame_stop: int | None = None,
|
||||||
|
max_frames: int | None = None,
|
||||||
|
min_visible_joints: int = 6,
|
||||||
|
) -> list[FrameBundle]:
|
||||||
|
segment_root = root / segment_name
|
||||||
|
by_camera: dict[str, dict[int, tuple[PoseDetection, ...]]] = {}
|
||||||
|
|
||||||
|
for parquet_path in sorted(segment_root.glob("*_detected.parquet")):
|
||||||
|
camera_name = parquet_path.name.removesuffix("_detected.parquet")
|
||||||
|
rows = pq.read_table(parquet_path).to_pylist()
|
||||||
|
frames: dict[int, tuple[PoseDetection, ...]] = {}
|
||||||
|
for row in rows:
|
||||||
|
frame_index = int(row["frame_index"])
|
||||||
|
if frame_index < frame_start:
|
||||||
|
continue
|
||||||
|
if frame_stop is not None and frame_index >= frame_stop:
|
||||||
|
continue
|
||||||
|
|
||||||
|
detections: list[PoseDetection] = []
|
||||||
|
boxes = row["boxes"]
|
||||||
|
keypoints_batch = row["kps"]
|
||||||
|
confidence_batch = row["kps_scores"]
|
||||||
|
if not (len(boxes) == len(keypoints_batch) == len(confidence_batch)):
|
||||||
|
raise ValueError(
|
||||||
|
f"Mismatched detection arrays for camera {camera_name} frame {frame_index}: "
|
||||||
|
f"{len(boxes)=}, {len(keypoints_batch)=}, {len(confidence_batch)=}."
|
||||||
|
)
|
||||||
|
|
||||||
|
for box, keypoints_xy, confidences in zip(boxes, keypoints_batch, confidence_batch, strict=True):
|
||||||
|
keypoints_xy_array = np.asarray(keypoints_xy, dtype=np.float64)
|
||||||
|
confidences_array = np.asarray(confidences, dtype=np.float64)
|
||||||
|
pose = normalize_rtmpose_body20(keypoints_xy_array, confidences_array)
|
||||||
|
if np.count_nonzero(pose[:, 2] > 0.15) < min_visible_joints:
|
||||||
|
continue
|
||||||
|
bbox = (
|
||||||
|
np.asarray(box, dtype=np.float64)
|
||||||
|
if len(box) == 4
|
||||||
|
else infer_bbox_from_keypoints(pose)
|
||||||
|
)
|
||||||
|
visible_confidences = pose[pose[:, 2] > 0.0, 2]
|
||||||
|
detections.append(
|
||||||
|
PoseDetection(
|
||||||
|
bbox=bbox,
|
||||||
|
bbox_confidence=float(np.mean(visible_confidences)) if visible_confidences.size else 0.0,
|
||||||
|
keypoints=pose,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
frames[frame_index] = tuple(detections)
|
||||||
|
by_camera[camera_name] = frames
|
||||||
|
|
||||||
|
if not by_camera:
|
||||||
|
return []
|
||||||
|
|
||||||
|
common_frames = sorted(set.intersection(*(set(frames) for frames in by_camera.values())))
|
||||||
|
if max_frames is not None:
|
||||||
|
common_frames = common_frames[:max_frames]
|
||||||
|
|
||||||
|
scene = load_actualtest_scene(root)
|
||||||
|
camera_by_name = {camera.name: camera for camera in scene.cameras}
|
||||||
|
bundles: list[FrameBundle] = []
|
||||||
|
for bundle_index, frame_index in enumerate(common_frames):
|
||||||
|
timestamp_unix_ns = bundle_index * _NOMINAL_FRAME_PERIOD_NS
|
||||||
|
views: list[CameraFrame] = []
|
||||||
|
for camera_name in sorted(by_camera):
|
||||||
|
camera = camera_by_name[camera_name]
|
||||||
|
views.append(
|
||||||
|
CameraFrame(
|
||||||
|
camera_name=camera_name,
|
||||||
|
frame_index=frame_index,
|
||||||
|
timestamp_unix_ns=timestamp_unix_ns,
|
||||||
|
detections=by_camera[camera_name][frame_index],
|
||||||
|
source_size=(camera.width, camera.height),
|
||||||
|
)
|
||||||
|
)
|
||||||
|
bundles.append(
|
||||||
|
FrameBundle(
|
||||||
|
bundle_index=bundle_index,
|
||||||
|
timestamp_unix_ns=timestamp_unix_ns,
|
||||||
|
views=tuple(views),
|
||||||
|
)
|
||||||
|
)
|
||||||
|
return bundles
|
||||||
@@ -0,0 +1,21 @@
|
|||||||
|
import cv2
|
||||||
|
import numpy as np
|
||||||
|
|
||||||
|
from pose_tracking_exp.models import CameraCalibration
|
||||||
|
from pose_tracking_exp.tensor_types import Pose3D
|
||||||
|
|
||||||
|
|
||||||
|
def project_pose(camera: CameraCalibration, pose3d: Pose3D) -> np.ndarray:
|
||||||
|
assert camera.rvec is not None
|
||||||
|
points = np.ascontiguousarray(pose3d[:, :3], dtype=np.float64).reshape(-1, 1, 3)
|
||||||
|
projected, _ = cv2.projectPoints(
|
||||||
|
points,
|
||||||
|
camera.rvec.astype(np.float64, copy=False).reshape(3, 1),
|
||||||
|
camera.T.astype(np.float64, copy=False).reshape(3, 1),
|
||||||
|
camera.K.astype(np.float64, copy=False),
|
||||||
|
camera.DC.astype(np.float64, copy=False),
|
||||||
|
)
|
||||||
|
projection = np.zeros((pose3d.shape[0], 3), dtype=np.float64)
|
||||||
|
projection[:, :2] = projected.reshape(-1, 2)
|
||||||
|
projection[:, 2] = pose3d[:, 3]
|
||||||
|
return projection
|
||||||
@@ -0,0 +1,130 @@
|
|||||||
|
import json
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Literal, cast
|
||||||
|
|
||||||
|
import click
|
||||||
|
|
||||||
|
from pose_tracking_exp.actualtest import load_actualtest_scene, load_actualtest_segment_bundles
|
||||||
|
from pose_tracking_exp.models import TrackerConfig
|
||||||
|
from pose_tracking_exp.parajumping import convert_payload_jsonl_lines
|
||||||
|
from pose_tracking_exp.replay import load_replay_file, load_scene_file
|
||||||
|
from pose_tracking_exp.sync import synchronize_frames
|
||||||
|
from pose_tracking_exp.tracker import PoseTracker
|
||||||
|
|
||||||
|
|
||||||
|
@click.group()
|
||||||
|
def main() -> None:
|
||||||
|
"""Offline multiview pose tracking experiment CLI."""
|
||||||
|
|
||||||
|
|
||||||
|
@main.command("convert-parajumping")
|
||||||
|
@click.argument("input_path", type=click.Path(path_type=Path, exists=True, dir_okay=False))
|
||||||
|
@click.argument("output_path", type=click.Path(path_type=Path, dir_okay=False))
|
||||||
|
def convert_parajumping(input_path: Path, output_path: Path) -> None:
|
||||||
|
lines = input_path.read_text(encoding="utf-8").splitlines()
|
||||||
|
converted = convert_payload_jsonl_lines(lines)
|
||||||
|
output_path.write_text("\n".join(converted) + ("\n" if converted else ""), encoding="utf-8")
|
||||||
|
|
||||||
|
|
||||||
|
@main.command("run")
|
||||||
|
@click.argument("scene_path", type=click.Path(path_type=Path, exists=True, dir_okay=False))
|
||||||
|
@click.argument("replay_path", type=click.Path(path_type=Path, exists=True, dir_okay=False))
|
||||||
|
def run(scene_path: Path, replay_path: Path) -> None:
|
||||||
|
scene = load_scene_file(scene_path)
|
||||||
|
replay = load_replay_file(scene_path, replay_path)
|
||||||
|
config = TrackerConfig()
|
||||||
|
bundles = synchronize_frames(
|
||||||
|
replay,
|
||||||
|
max_skew_ns=config.max_sync_skew_ns,
|
||||||
|
min_views=config.min_bundle_views,
|
||||||
|
)
|
||||||
|
tracker = PoseTracker(scene, config)
|
||||||
|
results = tracker.run(bundles)
|
||||||
|
payload = [
|
||||||
|
{
|
||||||
|
"bundle_index": result.bundle_index,
|
||||||
|
"timestamp_unix_ns": result.timestamp_unix_ns,
|
||||||
|
"tentative_track_ids": [track.track_id for track in result.tentative_tracks],
|
||||||
|
"active_track_ids": [track.track_id for track in result.active_tracks],
|
||||||
|
"lost_track_ids": [track.track_id for track in result.lost_tracks],
|
||||||
|
"proposal_count": len(result.proposals),
|
||||||
|
}
|
||||||
|
for result in results
|
||||||
|
]
|
||||||
|
click.echo(json.dumps(payload, indent=2))
|
||||||
|
|
||||||
|
|
||||||
|
@main.command("run-actualtest")
|
||||||
|
@click.argument("root_path", type=click.Path(path_type=Path, exists=True, file_okay=False))
|
||||||
|
@click.option("--segment", "segment_name", default="Segment_1", show_default=True)
|
||||||
|
@click.option("--frame-start", default=690, type=int, show_default=True)
|
||||||
|
@click.option("--frame-stop", type=int)
|
||||||
|
@click.option("--max-frames", type=int)
|
||||||
|
@click.option("--mode", type=click.Choice(["single_person", "general"]), default="single_person", show_default=True)
|
||||||
|
@click.option("--proposal-min-score", default=0.5, type=float, show_default=True)
|
||||||
|
@click.option("--tentative-min-age", default=2, type=int, show_default=True)
|
||||||
|
@click.option("--tentative-hits-required", default=2, type=int, show_default=True)
|
||||||
|
@click.option("--tentative-promote-score", default=1.2, type=float, show_default=True)
|
||||||
|
def run_actualtest(
|
||||||
|
root_path: Path,
|
||||||
|
segment_name: str,
|
||||||
|
frame_start: int,
|
||||||
|
frame_stop: int | None,
|
||||||
|
max_frames: int | None,
|
||||||
|
mode: str,
|
||||||
|
proposal_min_score: float,
|
||||||
|
tentative_min_age: int,
|
||||||
|
tentative_hits_required: int,
|
||||||
|
tentative_promote_score: float,
|
||||||
|
) -> None:
|
||||||
|
tracker_mode = cast(Literal["general", "single_person"], mode)
|
||||||
|
scene = load_actualtest_scene(root_path)
|
||||||
|
bundles = load_actualtest_segment_bundles(
|
||||||
|
root_path,
|
||||||
|
segment_name,
|
||||||
|
frame_start=frame_start,
|
||||||
|
frame_stop=frame_stop,
|
||||||
|
max_frames=max_frames,
|
||||||
|
)
|
||||||
|
config = TrackerConfig(
|
||||||
|
mode=tracker_mode,
|
||||||
|
proposal_min_score=proposal_min_score,
|
||||||
|
tentative_min_age=tentative_min_age,
|
||||||
|
tentative_hits_required=tentative_hits_required,
|
||||||
|
tentative_promote_score=tentative_promote_score,
|
||||||
|
)
|
||||||
|
tracker = PoseTracker(scene, config)
|
||||||
|
results = tracker.run(bundles)
|
||||||
|
diagnostics = tracker.diagnostics_snapshot()
|
||||||
|
payload = {
|
||||||
|
"segment": segment_name,
|
||||||
|
"mode": tracker_mode,
|
||||||
|
"bundle_count": len(results),
|
||||||
|
"active_track_frames": sum(1 for result in results if result.active_tracks),
|
||||||
|
"proposal_frames": sum(1 for result in results if result.proposals),
|
||||||
|
"max_active_tracks": max((len(result.active_tracks) for result in results), default=0),
|
||||||
|
"diagnostics": {
|
||||||
|
"match_existing_calls": diagnostics.match_existing_calls,
|
||||||
|
"match_existing_seconds": diagnostics.match_existing_seconds,
|
||||||
|
"proposal_build_calls": diagnostics.proposal_build_calls,
|
||||||
|
"proposal_build_seconds": diagnostics.proposal_build_seconds,
|
||||||
|
"promotions": diagnostics.promotions,
|
||||||
|
"reacquisitions": diagnostics.reacquisitions,
|
||||||
|
"active_updates": diagnostics.active_updates,
|
||||||
|
"seed_initializations": diagnostics.seed_initializations,
|
||||||
|
"nonlinear_refinements": diagnostics.nonlinear_refinements,
|
||||||
|
},
|
||||||
|
"results": [
|
||||||
|
{
|
||||||
|
"bundle_index": result.bundle_index,
|
||||||
|
"source_frame_index": bundle.views[0].frame_index if bundle.views else -1,
|
||||||
|
"timestamp_unix_ns": result.timestamp_unix_ns,
|
||||||
|
"tentative_track_ids": [track.track_id for track in result.tentative_tracks],
|
||||||
|
"active_track_ids": [track.track_id for track in result.active_tracks],
|
||||||
|
"lost_track_ids": [track.track_id for track in result.lost_tracks],
|
||||||
|
"proposal_count": len(result.proposals),
|
||||||
|
}
|
||||||
|
for bundle, result in zip(bundles, results, strict=True)
|
||||||
|
],
|
||||||
|
}
|
||||||
|
click.echo(json.dumps(payload, indent=2))
|
||||||
@@ -0,0 +1,63 @@
|
|||||||
|
BODY20_JOINT_NAMES: tuple[str, ...] = (
|
||||||
|
"nose",
|
||||||
|
"eye_left",
|
||||||
|
"eye_right",
|
||||||
|
"ear_left",
|
||||||
|
"ear_right",
|
||||||
|
"shoulder_left",
|
||||||
|
"shoulder_right",
|
||||||
|
"elbow_left",
|
||||||
|
"elbow_right",
|
||||||
|
"wrist_left",
|
||||||
|
"wrist_right",
|
||||||
|
"hip_left",
|
||||||
|
"hip_right",
|
||||||
|
"knee_left",
|
||||||
|
"knee_right",
|
||||||
|
"ankle_left",
|
||||||
|
"ankle_right",
|
||||||
|
"hip_middle",
|
||||||
|
"shoulder_middle",
|
||||||
|
"head",
|
||||||
|
)
|
||||||
|
|
||||||
|
BODY20_OBSERVATION_COUNT = len(BODY20_JOINT_NAMES)
|
||||||
|
|
||||||
|
BODY20_INDEX_BY_NAME = {name: index for index, name in enumerate(BODY20_JOINT_NAMES)}
|
||||||
|
|
||||||
|
CORE_JOINT_NAMES: tuple[str, ...] = (
|
||||||
|
"hip_middle",
|
||||||
|
"hip_left",
|
||||||
|
"hip_right",
|
||||||
|
"shoulder_left",
|
||||||
|
"shoulder_right",
|
||||||
|
"elbow_left",
|
||||||
|
"elbow_right",
|
||||||
|
"knee_left",
|
||||||
|
"knee_right",
|
||||||
|
)
|
||||||
|
|
||||||
|
CORE_JOINT_INDICES: tuple[int, ...] = tuple(BODY20_INDEX_BY_NAME[name] for name in CORE_JOINT_NAMES)
|
||||||
|
|
||||||
|
RTMPOSE_BODY17_INDEX_BY_NAME = {
|
||||||
|
"nose": 0,
|
||||||
|
"eye_left": 1,
|
||||||
|
"eye_right": 2,
|
||||||
|
"ear_left": 3,
|
||||||
|
"ear_right": 4,
|
||||||
|
"shoulder_left": 5,
|
||||||
|
"shoulder_right": 6,
|
||||||
|
"elbow_left": 7,
|
||||||
|
"elbow_right": 8,
|
||||||
|
"wrist_left": 9,
|
||||||
|
"wrist_right": 10,
|
||||||
|
"hip_left": 11,
|
||||||
|
"hip_right": 12,
|
||||||
|
"knee_left": 13,
|
||||||
|
"knee_right": 14,
|
||||||
|
"ankle_left": 15,
|
||||||
|
"ankle_right": 16,
|
||||||
|
}
|
||||||
|
|
||||||
|
RTMPOSE_BODY17_NAMES = tuple(RTMPOSE_BODY17_INDEX_BY_NAME.keys())
|
||||||
|
|
||||||
@@ -0,0 +1,347 @@
|
|||||||
|
import math
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
|
from beartype import beartype
|
||||||
|
from scipy.optimize import least_squares
|
||||||
|
|
||||||
|
from pose_tracking_exp.camera_math import project_pose
|
||||||
|
from pose_tracking_exp.joints import BODY20_INDEX_BY_NAME
|
||||||
|
from pose_tracking_exp.models import CameraCalibration, PoseDetection, SkeletonState
|
||||||
|
from pose_tracking_exp.tensor_types import Pose3D
|
||||||
|
|
||||||
|
PARAMETER_DIMENSION = 31
|
||||||
|
SHAPE_DIMENSION = 8
|
||||||
|
|
||||||
|
|
||||||
|
def _rot_x(theta: float) -> np.ndarray:
|
||||||
|
c = math.cos(theta)
|
||||||
|
s = math.sin(theta)
|
||||||
|
return np.asarray([[1.0, 0.0, 0.0], [0.0, c, -s], [0.0, s, c]], dtype=np.float64)
|
||||||
|
|
||||||
|
|
||||||
|
def _rot_y(theta: float) -> np.ndarray:
|
||||||
|
c = math.cos(theta)
|
||||||
|
s = math.sin(theta)
|
||||||
|
return np.asarray([[c, 0.0, s], [0.0, 1.0, 0.0], [-s, 0.0, c]], dtype=np.float64)
|
||||||
|
|
||||||
|
|
||||||
|
def _rot_z(theta: float) -> np.ndarray:
|
||||||
|
c = math.cos(theta)
|
||||||
|
s = math.sin(theta)
|
||||||
|
return np.asarray([[c, -s, 0.0], [s, c, 0.0], [0.0, 0.0, 1.0]], dtype=np.float64)
|
||||||
|
|
||||||
|
|
||||||
|
def _euler_xyz(angles: np.ndarray) -> np.ndarray:
|
||||||
|
return _rot_x(float(angles[0])) @ _rot_y(float(angles[1])) @ _rot_z(float(angles[2]))
|
||||||
|
|
||||||
|
|
||||||
|
def _neck_rot(angles: np.ndarray) -> np.ndarray:
|
||||||
|
return _rot_y(float(angles[0])) @ _rot_x(float(angles[1]))
|
||||||
|
|
||||||
|
|
||||||
|
def _safe_normalize(vector: np.ndarray, fallback: np.ndarray) -> np.ndarray:
|
||||||
|
norm = float(np.linalg.norm(vector))
|
||||||
|
if norm <= 1e-8:
|
||||||
|
return fallback
|
||||||
|
return vector / norm
|
||||||
|
|
||||||
|
|
||||||
|
def _rotation_matrix_to_euler_xyz(rotation: np.ndarray) -> np.ndarray:
|
||||||
|
sy = float(np.sqrt(rotation[0, 0] ** 2 + rotation[0, 1] ** 2))
|
||||||
|
singular = sy < 1e-6
|
||||||
|
if not singular:
|
||||||
|
x = math.atan2(rotation[1, 2], rotation[2, 2])
|
||||||
|
y = math.atan2(-rotation[0, 2], sy)
|
||||||
|
z = math.atan2(rotation[0, 1], rotation[0, 0])
|
||||||
|
else:
|
||||||
|
x = math.atan2(-rotation[2, 1], rotation[1, 1])
|
||||||
|
y = math.atan2(-rotation[0, 2], sy)
|
||||||
|
z = 0.0
|
||||||
|
return np.asarray([x, y, z], dtype=np.float64)
|
||||||
|
|
||||||
|
|
||||||
|
def _default_shape() -> np.ndarray:
|
||||||
|
return np.ones(SHAPE_DIMENSION, dtype=np.float64)
|
||||||
|
|
||||||
|
|
||||||
|
def _base_lengths(beta: np.ndarray) -> dict[str, float]:
|
||||||
|
scale = float(beta[0])
|
||||||
|
return {
|
||||||
|
"torso": 0.52 * scale * float(beta[1]),
|
||||||
|
"shoulder_width": 0.36 * scale * float(beta[2]),
|
||||||
|
"pelvis_width": 0.24 * scale * float(beta[3]),
|
||||||
|
"upper_arm": 0.30 * scale * float(beta[4]),
|
||||||
|
"lower_arm": 0.26 * scale * float(beta[5]),
|
||||||
|
"upper_leg": 0.45 * scale * float(beta[6]),
|
||||||
|
"lower_leg": 0.43 * scale * float(beta[7]),
|
||||||
|
"neck": 0.12 * scale,
|
||||||
|
"head": 0.18 * scale,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def render_pose(parameters: np.ndarray, beta: np.ndarray) -> Pose3D:
|
||||||
|
if parameters.shape != (PARAMETER_DIMENSION,):
|
||||||
|
raise ValueError(f"Expected parameters with shape ({PARAMETER_DIMENSION},), got {parameters.shape}.")
|
||||||
|
if beta.shape != (SHAPE_DIMENSION,):
|
||||||
|
raise ValueError(f"Expected beta with shape ({SHAPE_DIMENSION},), got {beta.shape}.")
|
||||||
|
|
||||||
|
lengths = _base_lengths(beta)
|
||||||
|
pose = np.zeros((20, 4), dtype=np.float64)
|
||||||
|
root_t = parameters[0:3]
|
||||||
|
root_r = _euler_xyz(parameters[3:6])
|
||||||
|
spine_r = _euler_xyz(parameters[6:9])
|
||||||
|
neck_r = _neck_rot(parameters[9:11])
|
||||||
|
l_shoulder_r = _euler_xyz(parameters[11:14])
|
||||||
|
r_shoulder_r = _euler_xyz(parameters[14:17])
|
||||||
|
l_elbow_r = _rot_z(float(parameters[17]))
|
||||||
|
r_elbow_r = _rot_z(float(-parameters[18]))
|
||||||
|
l_hip_r = _euler_xyz(parameters[19:22])
|
||||||
|
r_hip_r = _euler_xyz(parameters[22:25])
|
||||||
|
l_knee_r = _rot_x(float(parameters[25]))
|
||||||
|
r_knee_r = _rot_x(float(parameters[26]))
|
||||||
|
l_ankle_r = _rot_x(float(parameters[27])) @ _rot_z(float(parameters[28]))
|
||||||
|
r_ankle_r = _rot_x(float(parameters[29])) @ _rot_z(float(parameters[30]))
|
||||||
|
|
||||||
|
pelvis = root_t
|
||||||
|
hip_left = pelvis + root_r @ np.asarray([lengths["pelvis_width"] * 0.5, 0.0, 0.0], dtype=np.float64)
|
||||||
|
hip_right = pelvis + root_r @ np.asarray([-lengths["pelvis_width"] * 0.5, 0.0, 0.0], dtype=np.float64)
|
||||||
|
shoulder_middle = pelvis + root_r @ spine_r @ np.asarray([0.0, lengths["torso"], 0.0], dtype=np.float64)
|
||||||
|
head = shoulder_middle + root_r @ spine_r @ neck_r @ np.asarray([0.0, lengths["neck"] + lengths["head"], 0.0], dtype=np.float64)
|
||||||
|
|
||||||
|
shoulder_left = shoulder_middle + root_r @ spine_r @ np.asarray(
|
||||||
|
[lengths["shoulder_width"] * 0.5, 0.0, 0.0], dtype=np.float64
|
||||||
|
)
|
||||||
|
shoulder_right = shoulder_middle + root_r @ spine_r @ np.asarray(
|
||||||
|
[-lengths["shoulder_width"] * 0.5, 0.0, 0.0], dtype=np.float64
|
||||||
|
)
|
||||||
|
elbow_left = shoulder_left + root_r @ spine_r @ l_shoulder_r @ np.asarray(
|
||||||
|
[lengths["upper_arm"], 0.0, 0.0], dtype=np.float64
|
||||||
|
)
|
||||||
|
elbow_right = shoulder_right + root_r @ spine_r @ r_shoulder_r @ np.asarray(
|
||||||
|
[-lengths["upper_arm"], 0.0, 0.0], dtype=np.float64
|
||||||
|
)
|
||||||
|
wrist_left = elbow_left + root_r @ spine_r @ l_shoulder_r @ l_elbow_r @ np.asarray(
|
||||||
|
[lengths["lower_arm"], 0.0, 0.0], dtype=np.float64
|
||||||
|
)
|
||||||
|
wrist_right = elbow_right + root_r @ spine_r @ r_shoulder_r @ r_elbow_r @ np.asarray(
|
||||||
|
[-lengths["lower_arm"], 0.0, 0.0], dtype=np.float64
|
||||||
|
)
|
||||||
|
|
||||||
|
knee_left = hip_left + root_r @ l_hip_r @ np.asarray([0.0, -lengths["upper_leg"], 0.0], dtype=np.float64)
|
||||||
|
knee_right = hip_right + root_r @ r_hip_r @ np.asarray([0.0, -lengths["upper_leg"], 0.0], dtype=np.float64)
|
||||||
|
ankle_left = knee_left + root_r @ l_hip_r @ l_knee_r @ np.asarray(
|
||||||
|
[0.0, -lengths["lower_leg"], 0.0], dtype=np.float64
|
||||||
|
)
|
||||||
|
ankle_right = knee_right + root_r @ r_hip_r @ r_knee_r @ np.asarray(
|
||||||
|
[0.0, -lengths["lower_leg"], 0.0], dtype=np.float64
|
||||||
|
)
|
||||||
|
|
||||||
|
nose = head + root_r @ spine_r @ neck_r @ np.asarray([0.0, 0.0, 0.03 * float(beta[0])], dtype=np.float64)
|
||||||
|
eye_left = head + root_r @ spine_r @ neck_r @ np.asarray([0.025 * float(beta[0]), 0.0, 0.02 * float(beta[0])], dtype=np.float64)
|
||||||
|
eye_right = head + root_r @ spine_r @ neck_r @ np.asarray([-0.025 * float(beta[0]), 0.0, 0.02 * float(beta[0])], dtype=np.float64)
|
||||||
|
ear_left = head + root_r @ spine_r @ neck_r @ np.asarray([0.05 * float(beta[0]), 0.0, 0.0], dtype=np.float64)
|
||||||
|
ear_right = head + root_r @ spine_r @ neck_r @ np.asarray([-0.05 * float(beta[0]), 0.0, 0.0], dtype=np.float64)
|
||||||
|
_ = l_ankle_r, r_ankle_r
|
||||||
|
|
||||||
|
joint_positions = {
|
||||||
|
"nose": nose,
|
||||||
|
"eye_left": eye_left,
|
||||||
|
"eye_right": eye_right,
|
||||||
|
"ear_left": ear_left,
|
||||||
|
"ear_right": ear_right,
|
||||||
|
"shoulder_left": shoulder_left,
|
||||||
|
"shoulder_right": shoulder_right,
|
||||||
|
"elbow_left": elbow_left,
|
||||||
|
"elbow_right": elbow_right,
|
||||||
|
"wrist_left": wrist_left,
|
||||||
|
"wrist_right": wrist_right,
|
||||||
|
"hip_left": hip_left,
|
||||||
|
"hip_right": hip_right,
|
||||||
|
"knee_left": knee_left,
|
||||||
|
"knee_right": knee_right,
|
||||||
|
"ankle_left": ankle_left,
|
||||||
|
"ankle_right": ankle_right,
|
||||||
|
"hip_middle": pelvis,
|
||||||
|
"shoulder_middle": shoulder_middle,
|
||||||
|
"head": head,
|
||||||
|
}
|
||||||
|
|
||||||
|
for name, position in joint_positions.items():
|
||||||
|
pose[BODY20_INDEX_BY_NAME[name], :3] = position
|
||||||
|
pose[BODY20_INDEX_BY_NAME[name], 3] = 1.0
|
||||||
|
return pose
|
||||||
|
|
||||||
|
|
||||||
|
def _estimate_beta_from_pose(pose3d: Pose3D) -> np.ndarray:
|
||||||
|
beta = _default_shape()
|
||||||
|
|
||||||
|
def limb_length(name_a: str, name_b: str) -> float | None:
|
||||||
|
index_a = BODY20_INDEX_BY_NAME[name_a]
|
||||||
|
index_b = BODY20_INDEX_BY_NAME[name_b]
|
||||||
|
if pose3d[index_a, 3] <= 0.0 or pose3d[index_b, 3] <= 0.0:
|
||||||
|
return None
|
||||||
|
return float(np.linalg.norm(pose3d[index_a, :3] - pose3d[index_b, :3]))
|
||||||
|
|
||||||
|
pelvis_width = limb_length("hip_left", "hip_right")
|
||||||
|
shoulder_width = limb_length("shoulder_left", "shoulder_right")
|
||||||
|
upper_arm = limb_length("shoulder_left", "elbow_left") or limb_length("shoulder_right", "elbow_right")
|
||||||
|
lower_arm = limb_length("elbow_left", "wrist_left") or limb_length("elbow_right", "wrist_right")
|
||||||
|
upper_leg = limb_length("hip_left", "knee_left") or limb_length("hip_right", "knee_right")
|
||||||
|
lower_leg = limb_length("knee_left", "ankle_left") or limb_length("knee_right", "ankle_right")
|
||||||
|
torso = limb_length("hip_middle", "shoulder_middle")
|
||||||
|
|
||||||
|
if torso is not None:
|
||||||
|
beta[0] = torso / 0.52
|
||||||
|
scale = float(beta[0])
|
||||||
|
if scale <= 1e-8:
|
||||||
|
scale = 1.0
|
||||||
|
beta[0] = 1.0
|
||||||
|
if torso is not None:
|
||||||
|
beta[1] = torso / (0.52 * scale)
|
||||||
|
if shoulder_width is not None:
|
||||||
|
beta[2] = shoulder_width / (0.36 * scale)
|
||||||
|
if pelvis_width is not None:
|
||||||
|
beta[3] = pelvis_width / (0.24 * scale)
|
||||||
|
if upper_arm is not None:
|
||||||
|
beta[4] = upper_arm / (0.30 * scale)
|
||||||
|
if lower_arm is not None:
|
||||||
|
beta[5] = lower_arm / (0.26 * scale)
|
||||||
|
if upper_leg is not None:
|
||||||
|
beta[6] = upper_leg / (0.45 * scale)
|
||||||
|
if lower_leg is not None:
|
||||||
|
beta[7] = lower_leg / (0.43 * scale)
|
||||||
|
return np.clip(beta, 0.5, 2.0)
|
||||||
|
|
||||||
|
|
||||||
|
def _estimate_root_rotation(pose3d: Pose3D) -> np.ndarray:
|
||||||
|
hip_left = pose3d[BODY20_INDEX_BY_NAME["hip_left"], :3]
|
||||||
|
hip_right = pose3d[BODY20_INDEX_BY_NAME["hip_right"], :3]
|
||||||
|
shoulder_middle = pose3d[BODY20_INDEX_BY_NAME["shoulder_middle"], :3]
|
||||||
|
pelvis = pose3d[BODY20_INDEX_BY_NAME["hip_middle"], :3]
|
||||||
|
x_axis = _safe_normalize(hip_left - hip_right, np.asarray([1.0, 0.0, 0.0], dtype=np.float64))
|
||||||
|
y_axis = _safe_normalize(shoulder_middle - pelvis, np.asarray([0.0, 1.0, 0.0], dtype=np.float64))
|
||||||
|
z_axis = _safe_normalize(np.cross(x_axis, y_axis), np.asarray([0.0, 0.0, 1.0], dtype=np.float64))
|
||||||
|
y_axis = _safe_normalize(np.cross(z_axis, x_axis), np.asarray([0.0, 1.0, 0.0], dtype=np.float64))
|
||||||
|
rotation = np.stack([x_axis, y_axis, z_axis], axis=1)
|
||||||
|
return _rotation_matrix_to_euler_xyz(rotation)
|
||||||
|
|
||||||
|
|
||||||
|
def _resolve_beta(pose3d: Pose3D, beta: np.ndarray | None) -> np.ndarray:
|
||||||
|
if beta is None:
|
||||||
|
return _estimate_beta_from_pose(pose3d)
|
||||||
|
resolved = np.asarray(beta, dtype=np.float64)
|
||||||
|
if resolved.shape != (SHAPE_DIMENSION,):
|
||||||
|
raise ValueError(f"Expected beta with shape ({SHAPE_DIMENSION},), got {resolved.shape}.")
|
||||||
|
return np.clip(resolved, 0.5, 2.0)
|
||||||
|
|
||||||
|
|
||||||
|
def seed_state_from_pose3d(pose3d: Pose3D, beta: np.ndarray | None = None) -> SkeletonState:
|
||||||
|
seeded_beta = _resolve_beta(pose3d, beta)
|
||||||
|
parameters = np.zeros(PARAMETER_DIMENSION, dtype=np.float64)
|
||||||
|
parameters[0:3] = pose3d[BODY20_INDEX_BY_NAME["hip_middle"], :3]
|
||||||
|
parameters[3:6] = _estimate_root_rotation(pose3d)
|
||||||
|
return SkeletonState(
|
||||||
|
parameters=parameters,
|
||||||
|
beta=seeded_beta,
|
||||||
|
pose3d=np.asarray(pose3d, dtype=np.float64).copy(),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@beartype
|
||||||
|
def refine_state_from_pose3d(pose3d: Pose3D, beta: np.ndarray | None = None) -> SkeletonState:
|
||||||
|
seeded_state = seed_state_from_pose3d(pose3d, beta=beta)
|
||||||
|
|
||||||
|
def residual(values: np.ndarray) -> np.ndarray:
|
||||||
|
rendered = render_pose(values, seeded_state.beta)
|
||||||
|
mask = pose3d[:, 3] > 0.0
|
||||||
|
delta = rendered[mask, :3] - pose3d[mask, :3]
|
||||||
|
return delta.reshape(-1)
|
||||||
|
|
||||||
|
result = least_squares(
|
||||||
|
residual,
|
||||||
|
seeded_state.parameters,
|
||||||
|
method="trf",
|
||||||
|
max_nfev=80,
|
||||||
|
)
|
||||||
|
final_parameters = np.asarray(result.x, dtype=np.float64)
|
||||||
|
return SkeletonState(
|
||||||
|
parameters=final_parameters,
|
||||||
|
beta=seeded_state.beta,
|
||||||
|
pose3d=render_pose(final_parameters, seeded_state.beta),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@beartype
|
||||||
|
def initialize_state_from_pose3d(pose3d: Pose3D) -> SkeletonState:
|
||||||
|
return refine_state_from_pose3d(pose3d)
|
||||||
|
|
||||||
|
|
||||||
|
def _2d_update_residual(
|
||||||
|
candidate: np.ndarray,
|
||||||
|
predicted_parameters: np.ndarray,
|
||||||
|
beta: np.ndarray,
|
||||||
|
cameras: tuple[CameraCalibration, ...],
|
||||||
|
matched: dict[str, PoseDetection],
|
||||||
|
camera_by_name: dict[str, CameraCalibration],
|
||||||
|
noise_scale: np.ndarray,
|
||||||
|
) -> np.ndarray:
|
||||||
|
rendered = render_pose(candidate, beta)
|
||||||
|
residual_parts: list[np.ndarray] = [0.08 * (candidate - predicted_parameters)]
|
||||||
|
for camera_name, detection in matched.items():
|
||||||
|
camera = camera_by_name[camera_name]
|
||||||
|
projected = project_pose(camera, rendered)
|
||||||
|
joint_mask = detection.keypoints[:, 2] > 0.05
|
||||||
|
if not np.any(joint_mask):
|
||||||
|
continue
|
||||||
|
delta = projected[joint_mask, :2] - detection.keypoints[joint_mask, :2]
|
||||||
|
weights = np.sqrt(np.clip(detection.keypoints[joint_mask, 2], 0.05, 1.0))
|
||||||
|
sigmas = noise_scale[joint_mask][:, None]
|
||||||
|
residual_parts.append((delta / sigmas) * weights[:, None])
|
||||||
|
if len(residual_parts) == 1:
|
||||||
|
return residual_parts[0]
|
||||||
|
return np.concatenate([part.reshape(-1) for part in residual_parts], axis=0)
|
||||||
|
|
||||||
|
|
||||||
|
def update_state_from_multiview(
|
||||||
|
state: SkeletonState,
|
||||||
|
cameras: tuple[CameraCalibration, ...],
|
||||||
|
matched: dict[str, PoseDetection],
|
||||||
|
noise_scale: np.ndarray,
|
||||||
|
) -> SkeletonState:
|
||||||
|
if not matched:
|
||||||
|
return state
|
||||||
|
camera_by_name = {camera.name: camera for camera in cameras}
|
||||||
|
result = least_squares(
|
||||||
|
_2d_update_residual,
|
||||||
|
state.parameters,
|
||||||
|
args=(state.parameters, state.beta, cameras, matched, camera_by_name, noise_scale),
|
||||||
|
method="trf",
|
||||||
|
max_nfev=30,
|
||||||
|
loss="soft_l1",
|
||||||
|
f_scale=1.0,
|
||||||
|
)
|
||||||
|
parameters = np.asarray(result.x, dtype=np.float64)
|
||||||
|
return SkeletonState(parameters=parameters, beta=state.beta.copy(), pose3d=render_pose(parameters, state.beta))
|
||||||
|
|
||||||
|
|
||||||
|
def update_noise_scale(
|
||||||
|
previous: np.ndarray,
|
||||||
|
cameras: tuple[CameraCalibration, ...],
|
||||||
|
pose3d: Pose3D,
|
||||||
|
matched: dict[str, PoseDetection],
|
||||||
|
*,
|
||||||
|
ema: float,
|
||||||
|
) -> np.ndarray:
|
||||||
|
if not matched:
|
||||||
|
return previous
|
||||||
|
updated = previous.copy()
|
||||||
|
camera_by_name = {camera.name: camera for camera in cameras}
|
||||||
|
for camera_name, detection in matched.items():
|
||||||
|
projected = project_pose(camera_by_name[camera_name], pose3d)
|
||||||
|
mask = detection.keypoints[:, 2] > 0.05
|
||||||
|
if not np.any(mask):
|
||||||
|
continue
|
||||||
|
residual = projected[mask, :2] - detection.keypoints[mask, :2]
|
||||||
|
magnitude = np.sqrt(np.sum(residual * residual, axis=1))
|
||||||
|
updated[mask] = np.clip(ema * updated[mask] + (1.0 - ema) * magnitude, 3.0, 45.0)
|
||||||
|
return updated
|
||||||
@@ -0,0 +1,224 @@
|
|||||||
|
from dataclasses import dataclass, field
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Literal
|
||||||
|
|
||||||
|
import cv2
|
||||||
|
import numpy as np
|
||||||
|
|
||||||
|
from pose_tracking_exp.tensor_types import Matrix3, Pose2D, Pose3D, Vector3
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass(slots=True)
|
||||||
|
class CameraCalibration:
|
||||||
|
name: str
|
||||||
|
width: int
|
||||||
|
height: int
|
||||||
|
K: Matrix3
|
||||||
|
DC: np.ndarray
|
||||||
|
# Canonical in-repo convention: OpenCV world->camera extrinsics.
|
||||||
|
R: Matrix3
|
||||||
|
T: Vector3
|
||||||
|
model: str = "pinhole"
|
||||||
|
rvec: np.ndarray | None = None
|
||||||
|
pose_R: Matrix3 = field(init=False)
|
||||||
|
pose_T: Vector3 = field(init=False)
|
||||||
|
|
||||||
|
def __post_init__(self) -> None:
|
||||||
|
self.K = np.asarray(self.K, dtype=np.float64).reshape(3, 3)
|
||||||
|
self.DC = np.asarray(self.DC, dtype=np.float64).reshape(-1)
|
||||||
|
self.R = np.asarray(self.R, dtype=np.float64).reshape(3, 3)
|
||||||
|
self.T = np.asarray(self.T, dtype=np.float64).reshape(3)
|
||||||
|
if self.rvec is None:
|
||||||
|
rvec, _ = cv2.Rodrigues(self.R)
|
||||||
|
self.rvec = np.asarray(rvec, dtype=np.float64).reshape(3)
|
||||||
|
else:
|
||||||
|
self.rvec = np.asarray(self.rvec, dtype=np.float64).reshape(3)
|
||||||
|
self.pose_R = self.R.T
|
||||||
|
self.pose_T = -(self.pose_R @ self.T)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def from_opencv_extrinsics(
|
||||||
|
cls,
|
||||||
|
*,
|
||||||
|
name: str,
|
||||||
|
width: int,
|
||||||
|
height: int,
|
||||||
|
K: Matrix3,
|
||||||
|
DC: np.ndarray,
|
||||||
|
R: Matrix3,
|
||||||
|
T: Vector3,
|
||||||
|
model: str = "pinhole",
|
||||||
|
rvec: np.ndarray | None = None,
|
||||||
|
) -> "CameraCalibration":
|
||||||
|
return cls(
|
||||||
|
name=name,
|
||||||
|
width=width,
|
||||||
|
height=height,
|
||||||
|
K=K,
|
||||||
|
DC=DC,
|
||||||
|
R=R,
|
||||||
|
T=T,
|
||||||
|
model=model,
|
||||||
|
rvec=rvec,
|
||||||
|
)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def from_rpt_pose(
|
||||||
|
cls,
|
||||||
|
*,
|
||||||
|
name: str,
|
||||||
|
width: int,
|
||||||
|
height: int,
|
||||||
|
K: Matrix3,
|
||||||
|
DC: np.ndarray,
|
||||||
|
R: Matrix3,
|
||||||
|
T: Vector3,
|
||||||
|
model: str = "pinhole",
|
||||||
|
) -> "CameraCalibration":
|
||||||
|
pose_R = np.asarray(R, dtype=np.float64).reshape(3, 3)
|
||||||
|
pose_T = np.asarray(T, dtype=np.float64).reshape(3)
|
||||||
|
rotation = pose_R.T
|
||||||
|
translation = -(rotation @ pose_T)
|
||||||
|
rvec, _ = cv2.Rodrigues(rotation)
|
||||||
|
return cls(
|
||||||
|
name=name,
|
||||||
|
width=width,
|
||||||
|
height=height,
|
||||||
|
K=K,
|
||||||
|
DC=DC,
|
||||||
|
R=rotation,
|
||||||
|
T=translation,
|
||||||
|
model=model,
|
||||||
|
rvec=np.asarray(rvec, dtype=np.float64).reshape(3),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass(slots=True)
|
||||||
|
class SceneConfig:
|
||||||
|
room_size: Vector3
|
||||||
|
room_center: Vector3
|
||||||
|
cameras: tuple[CameraCalibration, ...]
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass(slots=True)
|
||||||
|
class PoseDetection:
|
||||||
|
bbox: np.ndarray
|
||||||
|
bbox_confidence: float
|
||||||
|
keypoints: Pose2D
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass(slots=True)
|
||||||
|
class CameraFrame:
|
||||||
|
camera_name: str
|
||||||
|
frame_index: int
|
||||||
|
timestamp_unix_ns: int
|
||||||
|
detections: tuple[PoseDetection, ...]
|
||||||
|
source_size: tuple[int, int]
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass(slots=True)
|
||||||
|
class FrameBundle:
|
||||||
|
bundle_index: int
|
||||||
|
timestamp_unix_ns: int
|
||||||
|
views: tuple[CameraFrame, ...]
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass(slots=True)
|
||||||
|
class ReplaySequence:
|
||||||
|
scene_path: Path
|
||||||
|
replay_path: Path
|
||||||
|
frames_by_camera: dict[str, list[CameraFrame]]
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass(slots=True)
|
||||||
|
class ProposalCluster:
|
||||||
|
pose3d: Pose3D
|
||||||
|
root: Vector3
|
||||||
|
source_views: frozenset[str]
|
||||||
|
support_size: int
|
||||||
|
mean_score: float
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass(slots=True)
|
||||||
|
class SkeletonState:
|
||||||
|
parameters: np.ndarray
|
||||||
|
beta: np.ndarray
|
||||||
|
pose3d: Pose3D
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass(slots=True)
|
||||||
|
class TentativeTrackState:
|
||||||
|
track_id: int
|
||||||
|
state: Literal["tentative"] = "tentative"
|
||||||
|
age: int = 0
|
||||||
|
misses: int = 0
|
||||||
|
score: float = 0.0
|
||||||
|
last_bundle_index: int = -1
|
||||||
|
root: Vector3 = field(default_factory=lambda: np.zeros(3, dtype=np.float64))
|
||||||
|
pose3d: Pose3D = field(default_factory=lambda: np.zeros((20, 4), dtype=np.float64))
|
||||||
|
evidence_buffer: list[Pose3D] = field(default_factory=list)
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass(slots=True)
|
||||||
|
class ActiveTrackState:
|
||||||
|
track_id: int
|
||||||
|
status: Literal["active", "lost"] = "active"
|
||||||
|
misses: int = 0
|
||||||
|
lost_age: int = 0
|
||||||
|
score: float = 0.0
|
||||||
|
last_bundle_index: int = -1
|
||||||
|
skeleton: SkeletonState = field(
|
||||||
|
default_factory=lambda: SkeletonState(
|
||||||
|
parameters=np.zeros(31, dtype=np.float64),
|
||||||
|
beta=np.ones(8, dtype=np.float64),
|
||||||
|
pose3d=np.zeros((20, 4), dtype=np.float64),
|
||||||
|
)
|
||||||
|
)
|
||||||
|
noise_scale: np.ndarray = field(default_factory=lambda: np.full((20,), 9.0, dtype=np.float64))
|
||||||
|
|
||||||
|
|
||||||
|
TrackState = TentativeTrackState | ActiveTrackState
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass(slots=True)
|
||||||
|
class TrackedFrameResult:
|
||||||
|
bundle_index: int
|
||||||
|
timestamp_unix_ns: int
|
||||||
|
tentative_tracks: tuple[TentativeTrackState, ...]
|
||||||
|
active_tracks: tuple[ActiveTrackState, ...]
|
||||||
|
lost_tracks: tuple[ActiveTrackState, ...]
|
||||||
|
proposals: tuple[ProposalCluster, ...]
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass(slots=True)
|
||||||
|
class TrackerDiagnostics:
|
||||||
|
match_existing_calls: int = 0
|
||||||
|
match_existing_seconds: float = 0.0
|
||||||
|
proposal_build_calls: int = 0
|
||||||
|
proposal_build_seconds: float = 0.0
|
||||||
|
promotions: int = 0
|
||||||
|
reacquisitions: int = 0
|
||||||
|
active_updates: int = 0
|
||||||
|
seed_initializations: int = 0
|
||||||
|
nonlinear_refinements: int = 0
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass(slots=True)
|
||||||
|
class TrackerConfig:
|
||||||
|
mode: Literal["general", "single_person"] = "general"
|
||||||
|
min_bundle_views: int = 2
|
||||||
|
max_sync_skew_ns: int = 12_000_000
|
||||||
|
tentative_buffer_size: int = 5
|
||||||
|
tentative_min_age: int = 3
|
||||||
|
tentative_hits_required: int = 3
|
||||||
|
tentative_promote_score: float = 3.0
|
||||||
|
tentative_max_misses: int = 2
|
||||||
|
active_min_views: int = 2
|
||||||
|
active_core_gate_px: float = 80.0
|
||||||
|
active_joint_gate_px: float = 120.0
|
||||||
|
active_miss_to_lost: int = 3
|
||||||
|
lost_delete_age: int = 15
|
||||||
|
proposal_match_distance_m: float = 0.45
|
||||||
|
noise_ema: float = 0.85
|
||||||
|
proposal_min_score: float = 0.9
|
||||||
|
proposal_min_group_size: int = 1
|
||||||
@@ -0,0 +1,154 @@
|
|||||||
|
import math
|
||||||
|
from collections.abc import Mapping
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
|
from beartype import beartype
|
||||||
|
from jaxtyping import jaxtyped
|
||||||
|
|
||||||
|
from pose_tracking_exp.joints import BODY20_INDEX_BY_NAME, BODY20_OBSERVATION_COUNT, RTMPOSE_BODY17_INDEX_BY_NAME
|
||||||
|
from pose_tracking_exp.tensor_types import FloatArray, JointXY, Pose2D
|
||||||
|
|
||||||
|
|
||||||
|
def _visible_mean(points: list[tuple[np.ndarray, float]], fallback_xy: np.ndarray) -> tuple[np.ndarray, float]:
|
||||||
|
visible = [point for point, confidence in points if confidence > 0.0]
|
||||||
|
confidences = [confidence for _, confidence in points if confidence > 0.0]
|
||||||
|
if not visible:
|
||||||
|
return fallback_xy.astype(np.float64, copy=True), 0.0
|
||||||
|
stacked = np.stack(visible, axis=0)
|
||||||
|
return stacked.mean(axis=0), float(sum(confidences) / len(confidences))
|
||||||
|
|
||||||
|
|
||||||
|
def _normalize_named_keypoints(
|
||||||
|
keypoint_map: Mapping[str, tuple[np.ndarray, float]],
|
||||||
|
*,
|
||||||
|
shoulder_middle_source: tuple[str, str],
|
||||||
|
hip_middle_source: tuple[str, str],
|
||||||
|
head_source_names: tuple[str, ...],
|
||||||
|
) -> Pose2D:
|
||||||
|
pose = np.zeros((BODY20_OBSERVATION_COUNT, 3), dtype=np.float64)
|
||||||
|
for name in (
|
||||||
|
"nose",
|
||||||
|
"eye_left",
|
||||||
|
"eye_right",
|
||||||
|
"ear_left",
|
||||||
|
"ear_right",
|
||||||
|
"shoulder_left",
|
||||||
|
"shoulder_right",
|
||||||
|
"elbow_left",
|
||||||
|
"elbow_right",
|
||||||
|
"wrist_left",
|
||||||
|
"wrist_right",
|
||||||
|
"hip_left",
|
||||||
|
"hip_right",
|
||||||
|
"knee_left",
|
||||||
|
"knee_right",
|
||||||
|
"ankle_left",
|
||||||
|
"ankle_right",
|
||||||
|
):
|
||||||
|
point, confidence = keypoint_map[name]
|
||||||
|
pose[BODY20_INDEX_BY_NAME[name], :2] = point
|
||||||
|
pose[BODY20_INDEX_BY_NAME[name], 2] = confidence
|
||||||
|
|
||||||
|
hip_middle_xy, hip_middle_conf = _visible_mean(
|
||||||
|
[keypoint_map[hip_middle_source[0]], keypoint_map[hip_middle_source[1]]],
|
||||||
|
np.zeros(2, dtype=np.float64),
|
||||||
|
)
|
||||||
|
shoulder_middle_xy, shoulder_middle_conf = _visible_mean(
|
||||||
|
[keypoint_map[shoulder_middle_source[0]], keypoint_map[shoulder_middle_source[1]]],
|
||||||
|
hip_middle_xy,
|
||||||
|
)
|
||||||
|
head_xy, head_conf = _visible_mean([keypoint_map[name] for name in head_source_names], shoulder_middle_xy)
|
||||||
|
pose[BODY20_INDEX_BY_NAME["hip_middle"], :2] = hip_middle_xy
|
||||||
|
pose[BODY20_INDEX_BY_NAME["hip_middle"], 2] = hip_middle_conf
|
||||||
|
pose[BODY20_INDEX_BY_NAME["shoulder_middle"], :2] = shoulder_middle_xy
|
||||||
|
pose[BODY20_INDEX_BY_NAME["shoulder_middle"], 2] = shoulder_middle_conf
|
||||||
|
pose[BODY20_INDEX_BY_NAME["head"], :2] = head_xy
|
||||||
|
pose[BODY20_INDEX_BY_NAME["head"], 2] = head_conf
|
||||||
|
return pose
|
||||||
|
|
||||||
|
|
||||||
|
@jaxtyped(typechecker=beartype)
|
||||||
|
def normalize_rtmpose_body20(
|
||||||
|
keypoints_xy: FloatArray,
|
||||||
|
confidences: FloatArray,
|
||||||
|
) -> Pose2D:
|
||||||
|
if keypoints_xy.shape != (133, 2):
|
||||||
|
raise ValueError(f"Expected RTMPose keypoints with shape (133, 2), got {keypoints_xy.shape}.")
|
||||||
|
if confidences.shape != (133,):
|
||||||
|
raise ValueError(f"Expected RTMPose confidences with shape (133,), got {confidences.shape}.")
|
||||||
|
|
||||||
|
keypoint_map = {
|
||||||
|
name: (keypoints_xy[source_index], float(confidences[source_index]))
|
||||||
|
for name, source_index in RTMPOSE_BODY17_INDEX_BY_NAME.items()
|
||||||
|
}
|
||||||
|
return _normalize_named_keypoints(
|
||||||
|
keypoint_map,
|
||||||
|
shoulder_middle_source=("shoulder_left", "shoulder_right"),
|
||||||
|
hip_middle_source=("hip_left", "hip_right"),
|
||||||
|
head_source_names=("nose", "eye_left", "eye_right", "ear_left", "ear_right"),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@jaxtyped(typechecker=beartype)
|
||||||
|
def normalize_openpose25_body20(keypoints: FloatArray) -> Pose2D:
|
||||||
|
if keypoints.shape != (25, 3):
|
||||||
|
raise ValueError(f"Expected OpenPose25 keypoints with shape (25, 3), got {keypoints.shape}.")
|
||||||
|
|
||||||
|
def point(index: int) -> tuple[np.ndarray, float]:
|
||||||
|
xy = np.asarray(keypoints[index, :2], dtype=np.float64)
|
||||||
|
confidence = float(keypoints[index, 2])
|
||||||
|
return xy, confidence
|
||||||
|
|
||||||
|
keypoint_map = {
|
||||||
|
"nose": point(0),
|
||||||
|
"eye_left": point(16),
|
||||||
|
"eye_right": point(15),
|
||||||
|
"ear_left": point(18),
|
||||||
|
"ear_right": point(17),
|
||||||
|
"shoulder_left": point(5),
|
||||||
|
"shoulder_right": point(2),
|
||||||
|
"elbow_left": point(6),
|
||||||
|
"elbow_right": point(3),
|
||||||
|
"wrist_left": point(7),
|
||||||
|
"wrist_right": point(4),
|
||||||
|
"hip_left": point(12),
|
||||||
|
"hip_right": point(9),
|
||||||
|
"knee_left": point(13),
|
||||||
|
"knee_right": point(10),
|
||||||
|
"ankle_left": point(14),
|
||||||
|
"ankle_right": point(11),
|
||||||
|
}
|
||||||
|
pose = _normalize_named_keypoints(
|
||||||
|
keypoint_map,
|
||||||
|
shoulder_middle_source=("shoulder_left", "shoulder_right"),
|
||||||
|
hip_middle_source=("hip_left", "hip_right"),
|
||||||
|
head_source_names=("nose", "eye_left", "eye_right", "ear_left", "ear_right"),
|
||||||
|
)
|
||||||
|
neck_xy, neck_conf = point(1)
|
||||||
|
if neck_conf > pose[BODY20_INDEX_BY_NAME["shoulder_middle"], 2]:
|
||||||
|
pose[BODY20_INDEX_BY_NAME["shoulder_middle"], :2] = neck_xy
|
||||||
|
pose[BODY20_INDEX_BY_NAME["shoulder_middle"], 2] = neck_conf
|
||||||
|
mid_hip_xy, mid_hip_conf = point(8)
|
||||||
|
if mid_hip_conf > pose[BODY20_INDEX_BY_NAME["hip_middle"], 2]:
|
||||||
|
pose[BODY20_INDEX_BY_NAME["hip_middle"], :2] = mid_hip_xy
|
||||||
|
pose[BODY20_INDEX_BY_NAME["hip_middle"], 2] = mid_hip_conf
|
||||||
|
return pose
|
||||||
|
|
||||||
|
|
||||||
|
@beartype
|
||||||
|
def infer_bbox_from_keypoints(pose: Pose2D) -> np.ndarray:
|
||||||
|
visible = pose[pose[:, 2] > 0.0, :2]
|
||||||
|
if visible.size == 0:
|
||||||
|
return np.zeros(4, dtype=np.float64)
|
||||||
|
mins = visible.min(axis=0)
|
||||||
|
maxs = visible.max(axis=0)
|
||||||
|
return np.asarray([mins[0], mins[1], maxs[0], maxs[1]], dtype=np.float64)
|
||||||
|
|
||||||
|
|
||||||
|
@beartype
|
||||||
|
def core_reprojection_distance(predicted: Pose2D, observed: Pose2D) -> float:
|
||||||
|
mask = np.logical_and(predicted[:, 2] > 0.0, observed[:, 2] > 0.0)
|
||||||
|
if not np.any(mask):
|
||||||
|
return math.inf
|
||||||
|
deltas = predicted[mask, :2] - observed[mask, :2]
|
||||||
|
return float(np.sqrt(np.mean(np.sum(deltas * deltas, axis=1))))
|
||||||
@@ -0,0 +1,147 @@
|
|||||||
|
import base64
|
||||||
|
import json
|
||||||
|
from dataclasses import dataclass
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
|
from beartype import beartype
|
||||||
|
|
||||||
|
from pose_tracking_exp.models import CameraFrame, PoseDetection
|
||||||
|
from pose_tracking_exp.normalization import normalize_rtmpose_body20
|
||||||
|
|
||||||
|
PROTOCOL_HEADER = bytes([0x80]) + b"POSE"
|
||||||
|
POSE_JOINT_COUNT = 133
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass(slots=True)
|
||||||
|
class DecodedPosePayload:
|
||||||
|
frame_index: int
|
||||||
|
reference_size: tuple[int, int]
|
||||||
|
timestamp_unix_ns: int
|
||||||
|
detections: tuple[PoseDetection, ...]
|
||||||
|
|
||||||
|
|
||||||
|
def _read_u8(payload: memoryview, offset: int) -> tuple[int, int]:
|
||||||
|
return int(payload[offset]), offset + 1
|
||||||
|
|
||||||
|
|
||||||
|
def _read_u16_array(payload: memoryview, offset: int, count: int) -> tuple[np.ndarray, int]:
|
||||||
|
size = count * 2
|
||||||
|
array = np.frombuffer(payload[offset : offset + size], dtype="<u2", count=count).astype(np.float64)
|
||||||
|
return array, offset + size
|
||||||
|
|
||||||
|
|
||||||
|
@beartype
|
||||||
|
def decode_pose_payload(payload: bytes) -> DecodedPosePayload:
|
||||||
|
if not payload.startswith(PROTOCOL_HEADER):
|
||||||
|
raise ValueError("Invalid ParaJumping pose payload header.")
|
||||||
|
|
||||||
|
view = memoryview(payload)
|
||||||
|
offset = len(PROTOCOL_HEADER)
|
||||||
|
frame_index = int.from_bytes(view[offset : offset + 4], "little")
|
||||||
|
offset += 4
|
||||||
|
reference_size = tuple(int(x) for x in np.frombuffer(view[offset : offset + 4], dtype="<u2", count=2))
|
||||||
|
offset += 4
|
||||||
|
|
||||||
|
num_bbox = int(view[offset])
|
||||||
|
offset += 1
|
||||||
|
bbox_raw, offset = _read_u16_array(view, offset, num_bbox * 4)
|
||||||
|
bboxes = bbox_raw.reshape(num_bbox, 4) if num_bbox > 0 else np.zeros((0, 4), dtype=np.float64)
|
||||||
|
|
||||||
|
num_bbox_conf = int(view[offset])
|
||||||
|
offset += 1
|
||||||
|
bbox_confidence = np.frombuffer(view[offset : offset + num_bbox_conf], dtype=np.uint8, count=num_bbox_conf)
|
||||||
|
offset += num_bbox_conf
|
||||||
|
|
||||||
|
num_keypoints = int(view[offset])
|
||||||
|
offset += 1
|
||||||
|
keypoints_raw, offset = _read_u16_array(view, offset, num_keypoints * POSE_JOINT_COUNT * 2)
|
||||||
|
keypoints_xy = (
|
||||||
|
keypoints_raw.reshape(num_keypoints, POSE_JOINT_COUNT, 2)
|
||||||
|
if num_keypoints > 0
|
||||||
|
else np.zeros((0, POSE_JOINT_COUNT, 2), dtype=np.float64)
|
||||||
|
)
|
||||||
|
|
||||||
|
num_keypoint_conf = int(view[offset])
|
||||||
|
offset += 1
|
||||||
|
keypoint_confidence = (
|
||||||
|
np.frombuffer(view[offset : offset + num_keypoint_conf], dtype=np.uint8, count=num_keypoint_conf).astype(np.float64)
|
||||||
|
/ 255.0
|
||||||
|
)
|
||||||
|
offset += num_keypoint_conf
|
||||||
|
timestamp_unix_ns = int.from_bytes(view[offset : offset + 8], "little")
|
||||||
|
|
||||||
|
if num_keypoint_conf > 0 and num_keypoint_conf != num_keypoints * POSE_JOINT_COUNT:
|
||||||
|
raise ValueError("Unexpected keypoint confidence payload length.")
|
||||||
|
|
||||||
|
detection_items: list[PoseDetection] = []
|
||||||
|
confidences = (
|
||||||
|
keypoint_confidence.reshape(num_keypoints, POSE_JOINT_COUNT)
|
||||||
|
if num_keypoints > 0
|
||||||
|
else np.zeros((0, POSE_JOINT_COUNT), dtype=np.float64)
|
||||||
|
)
|
||||||
|
|
||||||
|
for index in range(num_keypoints):
|
||||||
|
normalized = normalize_rtmpose_body20(keypoints_xy[index], confidences[index])
|
||||||
|
bbox_score = float(bbox_confidence[index] / 255.0) if index < bbox_confidence.shape[0] else 0.0
|
||||||
|
bbox = bboxes[index] if index < bboxes.shape[0] else np.zeros(4, dtype=np.float64)
|
||||||
|
detection_items.append(
|
||||||
|
PoseDetection(
|
||||||
|
bbox=np.asarray(bbox, dtype=np.float64),
|
||||||
|
bbox_confidence=bbox_score,
|
||||||
|
keypoints=np.asarray(normalized, dtype=np.float64),
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
return DecodedPosePayload(
|
||||||
|
frame_index=frame_index,
|
||||||
|
reference_size=(reference_size[0], reference_size[1]),
|
||||||
|
timestamp_unix_ns=timestamp_unix_ns,
|
||||||
|
detections=tuple(detection_items),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@beartype
|
||||||
|
def frame_from_payload(camera_name: str, payload: bytes) -> CameraFrame:
|
||||||
|
decoded = decode_pose_payload(payload)
|
||||||
|
return CameraFrame(
|
||||||
|
camera_name=camera_name,
|
||||||
|
frame_index=decoded.frame_index,
|
||||||
|
timestamp_unix_ns=decoded.timestamp_unix_ns,
|
||||||
|
detections=decoded.detections,
|
||||||
|
source_size=decoded.reference_size,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@beartype
|
||||||
|
def convert_payload_record(record: dict[str, object]) -> dict[str, object]:
|
||||||
|
camera_name = str(record["camera"])
|
||||||
|
payload_b64 = str(record["payload_b64"])
|
||||||
|
frame = frame_from_payload(camera_name, base64.b64decode(payload_b64))
|
||||||
|
|
||||||
|
return {
|
||||||
|
"camera": frame.camera_name,
|
||||||
|
"frame_index": frame.frame_index,
|
||||||
|
"timestamp_unix_ns": frame.timestamp_unix_ns,
|
||||||
|
"source_size": list(frame.source_size),
|
||||||
|
"detections": [
|
||||||
|
{
|
||||||
|
"bbox": detection.bbox.tolist(),
|
||||||
|
"bbox_confidence": detection.bbox_confidence,
|
||||||
|
"keypoints": detection.keypoints.tolist(),
|
||||||
|
}
|
||||||
|
for detection in frame.detections
|
||||||
|
],
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@beartype
|
||||||
|
def convert_payload_jsonl_lines(lines: list[str]) -> list[str]:
|
||||||
|
output_lines: list[str] = []
|
||||||
|
for line in lines:
|
||||||
|
if not line.strip():
|
||||||
|
continue
|
||||||
|
record = json.loads(line)
|
||||||
|
converted = convert_payload_record(record)
|
||||||
|
output_lines.append(json.dumps(converted))
|
||||||
|
return output_lines
|
||||||
|
|
||||||
@@ -0,0 +1,108 @@
|
|||||||
|
import json
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
|
from beartype import beartype
|
||||||
|
|
||||||
|
from pose_tracking_exp.models import CameraCalibration, CameraFrame, PoseDetection, ReplaySequence, SceneConfig
|
||||||
|
|
||||||
|
_OPENCV_EXTRINSICS = "opencv_world_to_camera"
|
||||||
|
_RPT_POSE = "rpt_camera_pose"
|
||||||
|
|
||||||
|
|
||||||
|
def _as_float_array(values: object, shape: tuple[int, ...]) -> np.ndarray:
|
||||||
|
array = np.asarray(values, dtype=np.float64)
|
||||||
|
if array.shape != shape:
|
||||||
|
raise ValueError(f"Expected shape {shape}, got {array.shape}.")
|
||||||
|
return array
|
||||||
|
|
||||||
|
|
||||||
|
@beartype
|
||||||
|
def load_scene_file(path: Path) -> SceneConfig:
|
||||||
|
payload = json.loads(path.read_text(encoding="utf-8"))
|
||||||
|
default_extrinsic_format = str(payload.get("extrinsic_format", _OPENCV_EXTRINSICS))
|
||||||
|
cameras: list[CameraCalibration] = []
|
||||||
|
for camera_payload in payload["cameras"]:
|
||||||
|
extrinsic_format = str(camera_payload.get("extrinsic_format", default_extrinsic_format))
|
||||||
|
name = str(camera_payload["name"])
|
||||||
|
width = int(camera_payload["width"])
|
||||||
|
height = int(camera_payload["height"])
|
||||||
|
K = _as_float_array(camera_payload["K"], (3, 3))
|
||||||
|
DC = np.asarray(camera_payload.get("DC", [0.0, 0.0, 0.0, 0.0, 0.0]), dtype=np.float64)
|
||||||
|
R = _as_float_array(camera_payload["R"], (3, 3))
|
||||||
|
T = _as_float_array(camera_payload["T"], (3, 1)).reshape(3)
|
||||||
|
model = str(camera_payload.get("model", "pinhole"))
|
||||||
|
if extrinsic_format == _OPENCV_EXTRINSICS:
|
||||||
|
cameras.append(
|
||||||
|
CameraCalibration.from_opencv_extrinsics(
|
||||||
|
name=name,
|
||||||
|
width=width,
|
||||||
|
height=height,
|
||||||
|
K=K,
|
||||||
|
DC=DC,
|
||||||
|
R=R,
|
||||||
|
T=T,
|
||||||
|
model=model,
|
||||||
|
rvec=np.asarray(camera_payload["rvec"], dtype=np.float64).reshape(3)
|
||||||
|
if "rvec" in camera_payload
|
||||||
|
else None,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
elif extrinsic_format == _RPT_POSE:
|
||||||
|
cameras.append(
|
||||||
|
CameraCalibration.from_rpt_pose(
|
||||||
|
name=name,
|
||||||
|
width=width,
|
||||||
|
height=height,
|
||||||
|
K=K,
|
||||||
|
DC=DC,
|
||||||
|
R=R,
|
||||||
|
T=T,
|
||||||
|
model=model,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
raise ValueError(
|
||||||
|
f"Unsupported extrinsic format {extrinsic_format!r}. "
|
||||||
|
f"Expected {_OPENCV_EXTRINSICS!r} or {_RPT_POSE!r}."
|
||||||
|
)
|
||||||
|
return SceneConfig(
|
||||||
|
room_size=_as_float_array(payload["room_size"], (3,)),
|
||||||
|
room_center=_as_float_array(payload["room_center"], (3,)),
|
||||||
|
cameras=tuple(cameras),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@beartype
|
||||||
|
def load_replay_file(scene_path: Path, replay_path: Path) -> ReplaySequence:
|
||||||
|
frames_by_camera: dict[str, list[CameraFrame]] = {}
|
||||||
|
for raw_line in replay_path.read_text(encoding="utf-8").splitlines():
|
||||||
|
if not raw_line.strip():
|
||||||
|
continue
|
||||||
|
payload = json.loads(raw_line)
|
||||||
|
camera_name = str(payload["camera"])
|
||||||
|
detections: list[PoseDetection] = []
|
||||||
|
for detection_payload in payload["detections"]:
|
||||||
|
detections.append(
|
||||||
|
PoseDetection(
|
||||||
|
bbox=np.asarray(detection_payload["bbox"], dtype=np.float64),
|
||||||
|
bbox_confidence=float(detection_payload["bbox_confidence"]),
|
||||||
|
keypoints=np.asarray(detection_payload["keypoints"], dtype=np.float64),
|
||||||
|
)
|
||||||
|
)
|
||||||
|
frames_by_camera.setdefault(camera_name, []).append(
|
||||||
|
CameraFrame(
|
||||||
|
camera_name=camera_name,
|
||||||
|
frame_index=int(payload["frame_index"]),
|
||||||
|
timestamp_unix_ns=int(payload["timestamp_unix_ns"]),
|
||||||
|
detections=tuple(detections),
|
||||||
|
source_size=(
|
||||||
|
int(payload["source_size"][0]),
|
||||||
|
int(payload["source_size"][1]),
|
||||||
|
),
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
for frames in frames_by_camera.values():
|
||||||
|
frames.sort(key=lambda item: (item.timestamp_unix_ns, item.frame_index))
|
||||||
|
return ReplaySequence(scene_path=scene_path, replay_path=replay_path, frames_by_camera=frames_by_camera)
|
||||||
@@ -0,0 +1,78 @@
|
|||||||
|
from typing import Any
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
|
import rpt
|
||||||
|
from beartype import beartype
|
||||||
|
|
||||||
|
from pose_tracking_exp.joints import BODY20_JOINT_NAMES, BODY20_OBSERVATION_COUNT, BODY20_INDEX_BY_NAME
|
||||||
|
from pose_tracking_exp.models import CameraFrame, ProposalCluster, SceneConfig
|
||||||
|
from pose_tracking_exp.tensor_types import Pose2D
|
||||||
|
|
||||||
|
|
||||||
|
@beartype
|
||||||
|
def build_rpt_config(scene: SceneConfig, *, min_match_score: float, min_group_size: int) -> Any:
|
||||||
|
cameras = [
|
||||||
|
{
|
||||||
|
"name": camera.name,
|
||||||
|
"width": camera.width,
|
||||||
|
"height": camera.height,
|
||||||
|
"K": camera.K.tolist(),
|
||||||
|
"DC": camera.DC.tolist(),
|
||||||
|
"R": camera.pose_R.tolist(),
|
||||||
|
"T": camera.pose_T.reshape(3, 1).tolist(),
|
||||||
|
"model": camera.model,
|
||||||
|
}
|
||||||
|
for camera in scene.cameras
|
||||||
|
]
|
||||||
|
roomparams = np.asarray([scene.room_size, scene.room_center], dtype=np.float32)
|
||||||
|
return rpt.make_triangulation_config(
|
||||||
|
cameras,
|
||||||
|
roomparams,
|
||||||
|
BODY20_JOINT_NAMES,
|
||||||
|
min_match_score=min_match_score,
|
||||||
|
min_group_size=min_group_size,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@beartype
|
||||||
|
def pack_view_detections(frames: tuple[CameraFrame, ...], unmatched_indices: dict[str, list[int]]) -> tuple[np.ndarray, np.ndarray]:
|
||||||
|
packed_views: list[np.ndarray] = []
|
||||||
|
for frame in frames:
|
||||||
|
indices = unmatched_indices.get(frame.camera_name, [])
|
||||||
|
if indices:
|
||||||
|
packed_views.append(
|
||||||
|
np.asarray([frame.detections[index].keypoints for index in indices], dtype=np.float32)
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
packed_views.append(np.zeros((0, BODY20_OBSERVATION_COUNT, 3), dtype=np.float32))
|
||||||
|
return rpt.pack_poses_2d(packed_views, joint_count=BODY20_OBSERVATION_COUNT)
|
||||||
|
|
||||||
|
|
||||||
|
@beartype
|
||||||
|
def extract_clusters(
|
||||||
|
trace: Any,
|
||||||
|
camera_names: tuple[str, ...],
|
||||||
|
) -> tuple[ProposalCluster, ...]:
|
||||||
|
clusters: list[ProposalCluster] = []
|
||||||
|
for pose_index, pose3d in enumerate(trace.final_poses):
|
||||||
|
pose_array = np.asarray(pose3d, dtype=np.float64)
|
||||||
|
root = pose_array[BODY20_INDEX_BY_NAME["hip_middle"], :3]
|
||||||
|
source_indices = []
|
||||||
|
if pose_index < len(trace.merge.group_proposal_indices):
|
||||||
|
source_indices = trace.merge.group_proposal_indices[pose_index]
|
||||||
|
source_views: set[str] = set()
|
||||||
|
for core_proposal_index in source_indices:
|
||||||
|
pair = trace.core_proposals[core_proposal_index].pair
|
||||||
|
source_views.add(camera_names[pair.view1])
|
||||||
|
source_views.add(camera_names[pair.view2])
|
||||||
|
mean_score = float(np.mean(pose_array[:, 3][pose_array[:, 3] > 0.0])) if np.any(pose_array[:, 3] > 0.0) else 0.0
|
||||||
|
clusters.append(
|
||||||
|
ProposalCluster(
|
||||||
|
pose3d=pose_array,
|
||||||
|
root=np.asarray(root, dtype=np.float64),
|
||||||
|
source_views=frozenset(source_views),
|
||||||
|
support_size=max(1, len(source_indices)),
|
||||||
|
mean_score=mean_score,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
return tuple(clusters)
|
||||||
@@ -0,0 +1,53 @@
|
|||||||
|
from collections.abc import Iterable
|
||||||
|
|
||||||
|
from beartype import beartype
|
||||||
|
|
||||||
|
from pose_tracking_exp.models import CameraFrame, FrameBundle, ReplaySequence
|
||||||
|
|
||||||
|
|
||||||
|
@beartype
|
||||||
|
def synchronize_frames(
|
||||||
|
replay: ReplaySequence,
|
||||||
|
*,
|
||||||
|
max_skew_ns: int,
|
||||||
|
min_views: int,
|
||||||
|
) -> list[FrameBundle]:
|
||||||
|
if not replay.frames_by_camera:
|
||||||
|
return []
|
||||||
|
|
||||||
|
camera_names = sorted(replay.frames_by_camera)
|
||||||
|
reference_name = camera_names[0]
|
||||||
|
reference_frames = replay.frames_by_camera[reference_name]
|
||||||
|
cursors = {camera_name: 0 for camera_name in camera_names}
|
||||||
|
bundles: list[FrameBundle] = []
|
||||||
|
|
||||||
|
for bundle_index, reference_frame in enumerate(reference_frames):
|
||||||
|
matched: list[CameraFrame] = [reference_frame]
|
||||||
|
cursors[reference_name] += 1
|
||||||
|
for camera_name in camera_names[1:]:
|
||||||
|
frames = replay.frames_by_camera[camera_name]
|
||||||
|
cursor = cursors[camera_name]
|
||||||
|
best_index = -1
|
||||||
|
best_skew = max_skew_ns + 1
|
||||||
|
while cursor < len(frames):
|
||||||
|
skew = abs(frames[cursor].timestamp_unix_ns - reference_frame.timestamp_unix_ns)
|
||||||
|
if skew <= best_skew:
|
||||||
|
best_skew = skew
|
||||||
|
best_index = cursor
|
||||||
|
if frames[cursor].timestamp_unix_ns > reference_frame.timestamp_unix_ns and skew > best_skew:
|
||||||
|
break
|
||||||
|
cursor += 1
|
||||||
|
if best_index >= 0 and best_skew <= max_skew_ns:
|
||||||
|
matched.append(frames[best_index])
|
||||||
|
cursors[camera_name] = best_index + 1
|
||||||
|
if len(matched) >= min_views:
|
||||||
|
matched.sort(key=lambda item: item.camera_name)
|
||||||
|
bundles.append(
|
||||||
|
FrameBundle(
|
||||||
|
bundle_index=len(bundles),
|
||||||
|
timestamp_unix_ns=reference_frame.timestamp_unix_ns,
|
||||||
|
views=tuple(matched),
|
||||||
|
)
|
||||||
|
)
|
||||||
|
return bundles
|
||||||
|
|
||||||
@@ -0,0 +1,16 @@
|
|||||||
|
import numpy as np
|
||||||
|
from jaxtyping import Float, Int, UInt
|
||||||
|
|
||||||
|
FloatArray = Float[np.ndarray, "..."]
|
||||||
|
Vector2 = Float[np.ndarray, "2"]
|
||||||
|
Vector3 = Float[np.ndarray, "3"]
|
||||||
|
Vector4 = Float[np.ndarray, "4"]
|
||||||
|
Matrix3 = Float[np.ndarray, "3 3"]
|
||||||
|
Pose2D = Float[np.ndarray, "joints 3"]
|
||||||
|
Pose3D = Float[np.ndarray, "joints 4"]
|
||||||
|
PoseBatch2D = Float[np.ndarray, "persons joints 3"]
|
||||||
|
JointXY = Float[np.ndarray, "joints 2"]
|
||||||
|
JointConf = Float[np.ndarray, " joints"]
|
||||||
|
JointCount = UInt[np.ndarray, " views"]
|
||||||
|
TrackIds = Int[np.ndarray, " tracks"]
|
||||||
|
|
||||||
@@ -0,0 +1,448 @@
|
|||||||
|
from dataclasses import replace
|
||||||
|
from time import perf_counter
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
|
from beartype import beartype
|
||||||
|
from scipy.optimize import linear_sum_assignment
|
||||||
|
|
||||||
|
from pose_tracking_exp.camera_math import project_pose
|
||||||
|
from pose_tracking_exp.joints import BODY20_INDEX_BY_NAME, CORE_JOINT_INDICES
|
||||||
|
from pose_tracking_exp.kinematics import seed_state_from_pose3d, update_noise_scale, update_state_from_multiview
|
||||||
|
from pose_tracking_exp.models import (
|
||||||
|
ActiveTrackState,
|
||||||
|
FrameBundle,
|
||||||
|
PoseDetection,
|
||||||
|
ProposalCluster,
|
||||||
|
SceneConfig,
|
||||||
|
SkeletonState,
|
||||||
|
TentativeTrackState,
|
||||||
|
TrackedFrameResult,
|
||||||
|
TrackerConfig,
|
||||||
|
TrackerDiagnostics,
|
||||||
|
)
|
||||||
|
from pose_tracking_exp.normalization import core_reprojection_distance
|
||||||
|
from pose_tracking_exp.rpt_adapter import build_rpt_config, extract_clusters, pack_view_detections
|
||||||
|
|
||||||
|
CORE_JOINT_MASK = np.zeros((20,), dtype=bool)
|
||||||
|
CORE_JOINT_MASK[list(CORE_JOINT_INDICES)] = True
|
||||||
|
|
||||||
|
|
||||||
|
def _copy_pose3d(pose3d: np.ndarray) -> np.ndarray:
|
||||||
|
return np.asarray(pose3d, dtype=np.float64).copy()
|
||||||
|
|
||||||
|
|
||||||
|
def _proposal_rank(proposal: ProposalCluster) -> tuple[int, float]:
|
||||||
|
return proposal.support_size, proposal.mean_score
|
||||||
|
|
||||||
|
|
||||||
|
def _active_track_rank(track: ActiveTrackState) -> tuple[float, int, int]:
|
||||||
|
return track.score, -track.misses, track.last_bundle_index
|
||||||
|
|
||||||
|
|
||||||
|
def _lost_track_rank(track: ActiveTrackState) -> tuple[float, int, int]:
|
||||||
|
return track.score, -track.lost_age, track.last_bundle_index
|
||||||
|
|
||||||
|
|
||||||
|
def _tentative_track_rank(track: TentativeTrackState) -> tuple[float, int, int, int]:
|
||||||
|
return track.score, len(track.evidence_buffer), -track.misses, track.last_bundle_index
|
||||||
|
|
||||||
|
|
||||||
|
def _tentative_root_spread(track: TentativeTrackState) -> float:
|
||||||
|
if len(track.evidence_buffer) < 2:
|
||||||
|
return 0.0
|
||||||
|
roots = np.asarray(
|
||||||
|
[pose[BODY20_INDEX_BY_NAME["hip_middle"], :3] for pose in track.evidence_buffer],
|
||||||
|
dtype=np.float64,
|
||||||
|
)
|
||||||
|
center = roots.mean(axis=0)
|
||||||
|
return float(np.max(np.linalg.norm(roots - center[None, :], axis=1)))
|
||||||
|
|
||||||
|
|
||||||
|
@beartype
|
||||||
|
class PoseTracker:
|
||||||
|
def __init__(self, scene: SceneConfig, config: TrackerConfig | None = None) -> None:
|
||||||
|
self._scene = scene
|
||||||
|
self._config = config or TrackerConfig()
|
||||||
|
self._rpt_config = build_rpt_config(
|
||||||
|
scene,
|
||||||
|
min_match_score=self._config.proposal_min_score,
|
||||||
|
min_group_size=self._config.proposal_min_group_size,
|
||||||
|
)
|
||||||
|
self._tentative: dict[int, TentativeTrackState] = {}
|
||||||
|
self._active: dict[int, ActiveTrackState] = {}
|
||||||
|
self._lost: dict[int, ActiveTrackState] = {}
|
||||||
|
self._next_track_id = 1
|
||||||
|
self._diagnostics = TrackerDiagnostics()
|
||||||
|
|
||||||
|
def diagnostics_snapshot(self) -> TrackerDiagnostics:
|
||||||
|
return replace(self._diagnostics)
|
||||||
|
|
||||||
|
def run(self, bundles: list[FrameBundle]) -> list[TrackedFrameResult]:
|
||||||
|
self._diagnostics = TrackerDiagnostics()
|
||||||
|
return [self.step(bundle) for bundle in bundles]
|
||||||
|
|
||||||
|
def step(self, bundle: FrameBundle) -> TrackedFrameResult:
|
||||||
|
self._enforce_single_person_constraints()
|
||||||
|
matches, unmatched = self._match_existing_tracks(bundle)
|
||||||
|
self._update_active_tracks(bundle, matches)
|
||||||
|
self._update_lost_tracks(bundle, matches)
|
||||||
|
proposals = self._refresh_single_person_track_from_proposals(bundle, self._build_proposals(bundle, unmatched))
|
||||||
|
self._update_tentative_tracks(bundle, self._birth_candidate_proposals(proposals))
|
||||||
|
self._promote_tentative_tracks(bundle)
|
||||||
|
self._reacquire_lost_tracks(bundle, proposals)
|
||||||
|
self._delete_expired_tracks()
|
||||||
|
self._enforce_single_person_constraints()
|
||||||
|
return TrackedFrameResult(
|
||||||
|
bundle_index=bundle.bundle_index,
|
||||||
|
timestamp_unix_ns=bundle.timestamp_unix_ns,
|
||||||
|
tentative_tracks=tuple(sorted(self._tentative.values(), key=lambda item: item.track_id)),
|
||||||
|
active_tracks=tuple(sorted(self._active.values(), key=lambda item: item.track_id)),
|
||||||
|
lost_tracks=tuple(sorted(self._lost.values(), key=lambda item: item.track_id)),
|
||||||
|
proposals=proposals,
|
||||||
|
)
|
||||||
|
|
||||||
|
def _single_person_mode(self) -> bool:
|
||||||
|
return self._config.mode == "single_person"
|
||||||
|
|
||||||
|
def _keep_best_active_track(self) -> None:
|
||||||
|
if len(self._active) <= 1:
|
||||||
|
return
|
||||||
|
best_id = max(self._active, key=lambda track_id: _active_track_rank(self._active[track_id]))
|
||||||
|
for track_id in list(self._active):
|
||||||
|
if track_id != best_id:
|
||||||
|
self._active.pop(track_id, None)
|
||||||
|
|
||||||
|
def _keep_best_lost_track(self) -> None:
|
||||||
|
if len(self._lost) <= 1:
|
||||||
|
return
|
||||||
|
best_id = max(self._lost, key=lambda track_id: _lost_track_rank(self._lost[track_id]))
|
||||||
|
for track_id in list(self._lost):
|
||||||
|
if track_id != best_id:
|
||||||
|
self._lost.pop(track_id, None)
|
||||||
|
|
||||||
|
def _keep_best_tentative_track(self) -> None:
|
||||||
|
if len(self._tentative) <= 1:
|
||||||
|
return
|
||||||
|
best_id = max(self._tentative, key=lambda track_id: _tentative_track_rank(self._tentative[track_id]))
|
||||||
|
for track_id in list(self._tentative):
|
||||||
|
if track_id != best_id:
|
||||||
|
self._tentative.pop(track_id, None)
|
||||||
|
|
||||||
|
def _enforce_single_person_constraints(self) -> None:
|
||||||
|
if not self._single_person_mode():
|
||||||
|
return
|
||||||
|
self._keep_best_active_track()
|
||||||
|
if self._active:
|
||||||
|
self._lost.clear()
|
||||||
|
self._tentative.clear()
|
||||||
|
return
|
||||||
|
self._keep_best_lost_track()
|
||||||
|
if self._lost:
|
||||||
|
self._tentative.clear()
|
||||||
|
return
|
||||||
|
self._keep_best_tentative_track()
|
||||||
|
|
||||||
|
def _predicted_pose_by_track(self) -> dict[int, np.ndarray]:
|
||||||
|
result: dict[int, np.ndarray] = {}
|
||||||
|
for track_id, track in self._active.items():
|
||||||
|
result[track_id] = track.skeleton.pose3d
|
||||||
|
for track_id, track in self._lost.items():
|
||||||
|
result[track_id] = track.skeleton.pose3d
|
||||||
|
return result
|
||||||
|
|
||||||
|
def _match_existing_tracks(
|
||||||
|
self,
|
||||||
|
bundle: FrameBundle,
|
||||||
|
) -> tuple[dict[int, dict[str, PoseDetection]], dict[str, list[int]]]:
|
||||||
|
started_at = perf_counter()
|
||||||
|
try:
|
||||||
|
predicted = self._predicted_pose_by_track()
|
||||||
|
matches: dict[int, dict[str, PoseDetection]] = {track_id: {} for track_id in predicted}
|
||||||
|
unmatched_indices = {frame.camera_name: list(range(len(frame.detections))) for frame in bundle.views}
|
||||||
|
if not predicted:
|
||||||
|
return matches, unmatched_indices
|
||||||
|
|
||||||
|
camera_by_name = {camera.name: camera for camera in self._scene.cameras}
|
||||||
|
track_ids = sorted(predicted)
|
||||||
|
for frame in bundle.views:
|
||||||
|
cost = np.full(
|
||||||
|
(len(track_ids), len(frame.detections)),
|
||||||
|
fill_value=self._config.active_joint_gate_px * 10.0,
|
||||||
|
dtype=np.float64,
|
||||||
|
)
|
||||||
|
for row, track_id in enumerate(track_ids):
|
||||||
|
projected = project_pose(camera_by_name[frame.camera_name], predicted[track_id])
|
||||||
|
projected_core = projected.copy()
|
||||||
|
projected_core[~CORE_JOINT_MASK, 2] = 0.0
|
||||||
|
for col, detection in enumerate(frame.detections):
|
||||||
|
observed_core = detection.keypoints.copy()
|
||||||
|
observed_core[~CORE_JOINT_MASK, 2] = 0.0
|
||||||
|
cost[row, col] = core_reprojection_distance(projected_core, observed_core)
|
||||||
|
if cost.size == 0:
|
||||||
|
continue
|
||||||
|
rows, cols = linear_sum_assignment(cost)
|
||||||
|
used_indices: set[int] = set()
|
||||||
|
for row, col in zip(rows, cols, strict=False):
|
||||||
|
if cost[row, col] > self._config.active_core_gate_px:
|
||||||
|
continue
|
||||||
|
matches[track_ids[row]][frame.camera_name] = frame.detections[col]
|
||||||
|
used_indices.add(col)
|
||||||
|
unmatched_indices[frame.camera_name] = [
|
||||||
|
index for index in unmatched_indices[frame.camera_name] if index not in used_indices
|
||||||
|
]
|
||||||
|
return matches, unmatched_indices
|
||||||
|
finally:
|
||||||
|
self._diagnostics.match_existing_calls += 1
|
||||||
|
self._diagnostics.match_existing_seconds += perf_counter() - started_at
|
||||||
|
|
||||||
|
def _refine_track_state(
|
||||||
|
self,
|
||||||
|
track: ActiveTrackState,
|
||||||
|
matched: dict[str, PoseDetection],
|
||||||
|
) -> tuple[SkeletonState, np.ndarray]:
|
||||||
|
self._diagnostics.nonlinear_refinements += 1
|
||||||
|
updated_skeleton = update_state_from_multiview(
|
||||||
|
track.skeleton,
|
||||||
|
self._scene.cameras,
|
||||||
|
matched,
|
||||||
|
track.noise_scale,
|
||||||
|
)
|
||||||
|
updated_noise = update_noise_scale(
|
||||||
|
track.noise_scale,
|
||||||
|
self._scene.cameras,
|
||||||
|
updated_skeleton.pose3d,
|
||||||
|
matched,
|
||||||
|
ema=self._config.noise_ema,
|
||||||
|
)
|
||||||
|
self._diagnostics.active_updates += 1
|
||||||
|
return updated_skeleton, updated_noise
|
||||||
|
|
||||||
|
def _seed_skeleton(self, pose3d: np.ndarray, beta: np.ndarray | None = None) -> SkeletonState:
|
||||||
|
self._diagnostics.seed_initializations += 1
|
||||||
|
return seed_state_from_pose3d(_copy_pose3d(pose3d), beta=None if beta is None else beta.copy())
|
||||||
|
|
||||||
|
def _update_active_tracks(self, bundle: FrameBundle, matches: dict[int, dict[str, PoseDetection]]) -> None:
|
||||||
|
demote_to_lost: list[int] = []
|
||||||
|
for track_id, track in list(self._active.items()):
|
||||||
|
matched = matches.get(track_id, {})
|
||||||
|
if len(matched) >= self._config.active_min_views:
|
||||||
|
updated_skeleton, updated_noise = self._refine_track_state(track, matched)
|
||||||
|
track.skeleton = updated_skeleton
|
||||||
|
track.noise_scale = updated_noise
|
||||||
|
track.score = 0.85 * track.score + len(matched)
|
||||||
|
track.misses = 0
|
||||||
|
track.last_bundle_index = bundle.bundle_index
|
||||||
|
else:
|
||||||
|
track.misses += 1
|
||||||
|
track.score -= 1.0
|
||||||
|
if track.misses >= self._config.active_miss_to_lost:
|
||||||
|
demote_to_lost.append(track_id)
|
||||||
|
|
||||||
|
for track_id in demote_to_lost:
|
||||||
|
track = self._active.pop(track_id)
|
||||||
|
track.status = "lost"
|
||||||
|
track.lost_age = 0
|
||||||
|
self._lost[track_id] = track
|
||||||
|
|
||||||
|
def _update_lost_tracks(self, bundle: FrameBundle, matches: dict[int, dict[str, PoseDetection]]) -> None:
|
||||||
|
recover_to_active: list[int] = []
|
||||||
|
for track_id, track in list(self._lost.items()):
|
||||||
|
matched = matches.get(track_id, {})
|
||||||
|
if len(matched) >= self._config.active_min_views:
|
||||||
|
updated_skeleton, updated_noise = self._refine_track_state(track, matched)
|
||||||
|
track.skeleton = updated_skeleton
|
||||||
|
track.noise_scale = updated_noise
|
||||||
|
track.status = "active"
|
||||||
|
track.misses = 0
|
||||||
|
track.lost_age = 0
|
||||||
|
track.score = max(track.score, 1.0) + len(matched)
|
||||||
|
track.last_bundle_index = bundle.bundle_index
|
||||||
|
recover_to_active.append(track_id)
|
||||||
|
else:
|
||||||
|
track.lost_age += 1
|
||||||
|
track.score -= 0.5
|
||||||
|
for track_id in recover_to_active:
|
||||||
|
self._diagnostics.reacquisitions += 1
|
||||||
|
self._active[track_id] = self._lost.pop(track_id)
|
||||||
|
|
||||||
|
def _build_proposals(self, bundle: FrameBundle, unmatched_indices: dict[str, list[int]]) -> tuple[ProposalCluster, ...]:
|
||||||
|
started_at = perf_counter()
|
||||||
|
try:
|
||||||
|
poses_2d, person_counts = pack_view_detections(bundle.views, unmatched_indices)
|
||||||
|
if int(np.sum(person_counts)) == 0:
|
||||||
|
return ()
|
||||||
|
trace = __import__("rpt").triangulate_debug(poses_2d, person_counts, self._rpt_config)
|
||||||
|
return extract_clusters(trace, tuple(frame.camera_name for frame in bundle.views))
|
||||||
|
finally:
|
||||||
|
self._diagnostics.proposal_build_calls += 1
|
||||||
|
self._diagnostics.proposal_build_seconds += perf_counter() - started_at
|
||||||
|
|
||||||
|
def _birth_candidate_proposals(self, proposals: tuple[ProposalCluster, ...]) -> tuple[ProposalCluster, ...]:
|
||||||
|
if not self._single_person_mode():
|
||||||
|
return proposals
|
||||||
|
if self._active or self._lost:
|
||||||
|
return ()
|
||||||
|
if not proposals:
|
||||||
|
return ()
|
||||||
|
return (max(proposals, key=_proposal_rank),)
|
||||||
|
|
||||||
|
def _refresh_single_person_track_from_proposals(
|
||||||
|
self,
|
||||||
|
bundle: FrameBundle,
|
||||||
|
proposals: tuple[ProposalCluster, ...],
|
||||||
|
) -> tuple[ProposalCluster, ...]:
|
||||||
|
if not self._single_person_mode() or not proposals:
|
||||||
|
return proposals
|
||||||
|
|
||||||
|
remaining = list(proposals)
|
||||||
|
if self._active:
|
||||||
|
track = next(iter(self._active.values()))
|
||||||
|
predicted_root = track.skeleton.pose3d[BODY20_INDEX_BY_NAME["hip_middle"], :3]
|
||||||
|
best_index = -1
|
||||||
|
best_distance = self._config.proposal_match_distance_m
|
||||||
|
for proposal_index, proposal in enumerate(remaining):
|
||||||
|
distance = float(np.linalg.norm(predicted_root - proposal.root))
|
||||||
|
if distance <= best_distance:
|
||||||
|
best_distance = distance
|
||||||
|
best_index = proposal_index
|
||||||
|
if best_index >= 0:
|
||||||
|
proposal = remaining.pop(best_index)
|
||||||
|
track.skeleton = self._seed_skeleton(proposal.pose3d, beta=track.skeleton.beta)
|
||||||
|
track.score = 0.85 * track.score + proposal.mean_score * proposal.support_size
|
||||||
|
track.misses = 0
|
||||||
|
track.last_bundle_index = bundle.bundle_index
|
||||||
|
self._diagnostics.active_updates += 1
|
||||||
|
return tuple(remaining)
|
||||||
|
|
||||||
|
if self._lost:
|
||||||
|
track_id, track = next(iter(self._lost.items()))
|
||||||
|
predicted_root = track.skeleton.pose3d[BODY20_INDEX_BY_NAME["hip_middle"], :3]
|
||||||
|
best_index = -1
|
||||||
|
best_distance = self._config.proposal_match_distance_m
|
||||||
|
for proposal_index, proposal in enumerate(remaining):
|
||||||
|
distance = float(np.linalg.norm(predicted_root - proposal.root))
|
||||||
|
if distance <= best_distance:
|
||||||
|
best_distance = distance
|
||||||
|
best_index = proposal_index
|
||||||
|
if best_index >= 0:
|
||||||
|
proposal = remaining.pop(best_index)
|
||||||
|
track = self._lost.pop(track_id)
|
||||||
|
track.skeleton = self._seed_skeleton(proposal.pose3d, beta=track.skeleton.beta)
|
||||||
|
track.status = "active"
|
||||||
|
track.misses = 0
|
||||||
|
track.lost_age = 0
|
||||||
|
track.score = max(track.score, 1.0) + proposal.mean_score * proposal.support_size
|
||||||
|
track.last_bundle_index = bundle.bundle_index
|
||||||
|
self._active[track_id] = track
|
||||||
|
self._diagnostics.active_updates += 1
|
||||||
|
self._diagnostics.reacquisitions += 1
|
||||||
|
return tuple(remaining)
|
||||||
|
|
||||||
|
return proposals
|
||||||
|
|
||||||
|
def _update_tentative_tracks(self, bundle: FrameBundle, proposals: tuple[ProposalCluster, ...]) -> None:
|
||||||
|
for track in self._tentative.values():
|
||||||
|
track.age += 1
|
||||||
|
track.misses += 1
|
||||||
|
|
||||||
|
for proposal in proposals:
|
||||||
|
best_track_id = -1
|
||||||
|
best_distance = self._config.proposal_match_distance_m
|
||||||
|
for track_id, track in self._tentative.items():
|
||||||
|
distance = float(np.linalg.norm(track.root - proposal.root))
|
||||||
|
if distance < best_distance:
|
||||||
|
best_distance = distance
|
||||||
|
best_track_id = track_id
|
||||||
|
if best_track_id < 0:
|
||||||
|
track_id = self._next_track_id
|
||||||
|
self._next_track_id += 1
|
||||||
|
self._tentative[track_id] = TentativeTrackState(
|
||||||
|
track_id=track_id,
|
||||||
|
age=1,
|
||||||
|
misses=0,
|
||||||
|
score=proposal.mean_score * proposal.support_size,
|
||||||
|
last_bundle_index=bundle.bundle_index,
|
||||||
|
root=proposal.root.copy(),
|
||||||
|
pose3d=_copy_pose3d(proposal.pose3d),
|
||||||
|
evidence_buffer=[_copy_pose3d(proposal.pose3d)],
|
||||||
|
)
|
||||||
|
continue
|
||||||
|
|
||||||
|
track = self._tentative[best_track_id]
|
||||||
|
track.root = proposal.root.copy()
|
||||||
|
track.pose3d = _copy_pose3d(proposal.pose3d)
|
||||||
|
track.score = 0.8 * track.score + proposal.mean_score * proposal.support_size
|
||||||
|
track.misses = 0
|
||||||
|
track.last_bundle_index = bundle.bundle_index
|
||||||
|
track.evidence_buffer.append(_copy_pose3d(proposal.pose3d))
|
||||||
|
if len(track.evidence_buffer) > self._config.tentative_buffer_size:
|
||||||
|
track.evidence_buffer.pop(0)
|
||||||
|
|
||||||
|
def _promote_tentative_tracks(self, bundle: FrameBundle) -> None:
|
||||||
|
promote_ids: list[int] = []
|
||||||
|
for track_id, track in self._tentative.items():
|
||||||
|
if track.age < self._config.tentative_min_age:
|
||||||
|
continue
|
||||||
|
if len(track.evidence_buffer) < self._config.tentative_hits_required:
|
||||||
|
continue
|
||||||
|
if track.score < self._config.tentative_promote_score:
|
||||||
|
continue
|
||||||
|
if _tentative_root_spread(track) > self._config.proposal_match_distance_m:
|
||||||
|
continue
|
||||||
|
promote_ids.append(track_id)
|
||||||
|
|
||||||
|
for track_id in promote_ids:
|
||||||
|
tentative = self._tentative.pop(track_id)
|
||||||
|
mean_pose = np.mean(np.stack(tentative.evidence_buffer, axis=0), axis=0)
|
||||||
|
self._active[track_id] = ActiveTrackState(
|
||||||
|
track_id=track_id,
|
||||||
|
status="active",
|
||||||
|
misses=0,
|
||||||
|
lost_age=0,
|
||||||
|
score=max(tentative.score, 1.0),
|
||||||
|
last_bundle_index=bundle.bundle_index,
|
||||||
|
skeleton=self._seed_skeleton(mean_pose),
|
||||||
|
noise_scale=np.full((20,), 9.0, dtype=np.float64),
|
||||||
|
)
|
||||||
|
self._diagnostics.promotions += 1
|
||||||
|
|
||||||
|
def _reacquire_lost_tracks(self, bundle: FrameBundle, proposals: tuple[ProposalCluster, ...]) -> None:
|
||||||
|
used_indices: set[int] = set()
|
||||||
|
reactivate: list[tuple[int, int]] = []
|
||||||
|
for track_id, track in self._lost.items():
|
||||||
|
predicted_root = track.skeleton.pose3d[BODY20_INDEX_BY_NAME["hip_middle"], :3]
|
||||||
|
best_index = -1
|
||||||
|
best_distance = self._config.proposal_match_distance_m
|
||||||
|
for proposal_index, proposal in enumerate(proposals):
|
||||||
|
if proposal_index in used_indices:
|
||||||
|
continue
|
||||||
|
distance = float(np.linalg.norm(predicted_root - proposal.root))
|
||||||
|
if distance <= best_distance:
|
||||||
|
best_distance = distance
|
||||||
|
best_index = proposal_index
|
||||||
|
if best_index >= 0:
|
||||||
|
used_indices.add(best_index)
|
||||||
|
reactivate.append((track_id, best_index))
|
||||||
|
for track_id, proposal_index in reactivate:
|
||||||
|
if track_id not in self._lost:
|
||||||
|
continue
|
||||||
|
track = self._lost.pop(track_id)
|
||||||
|
proposal = proposals[proposal_index]
|
||||||
|
track.skeleton = self._seed_skeleton(proposal.pose3d, beta=track.skeleton.beta)
|
||||||
|
track.status = "active"
|
||||||
|
track.misses = 0
|
||||||
|
track.lost_age = 0
|
||||||
|
track.score = max(track.score, 1.0)
|
||||||
|
track.last_bundle_index = bundle.bundle_index
|
||||||
|
self._active[track_id] = track
|
||||||
|
self._diagnostics.reacquisitions += 1
|
||||||
|
|
||||||
|
def _delete_expired_tracks(self) -> None:
|
||||||
|
tentative_delete = [
|
||||||
|
track_id for track_id, track in self._tentative.items() if track.misses > self._config.tentative_max_misses
|
||||||
|
]
|
||||||
|
for track_id in tentative_delete:
|
||||||
|
self._tentative.pop(track_id, None)
|
||||||
|
lost_delete = [track_id for track_id, track in self._lost.items() if track.lost_age >= self._config.lost_delete_age]
|
||||||
|
for track_id in lost_delete:
|
||||||
|
self._lost.pop(track_id, None)
|
||||||
@@ -0,0 +1,77 @@
|
|||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
|
import pyarrow as pa
|
||||||
|
import pyarrow.parquet as pq
|
||||||
|
|
||||||
|
from pose_tracking_exp.actualtest import load_actualtest_scene, load_actualtest_segment_bundles
|
||||||
|
from pose_tracking_exp.joints import BODY20_INDEX_BY_NAME
|
||||||
|
|
||||||
|
|
||||||
|
def _write_parquet(path: Path, rows: list[dict[str, object]]) -> None:
|
||||||
|
path.parent.mkdir(parents=True, exist_ok=True)
|
||||||
|
pq.write_table(pa.Table.from_pylist(rows), path)
|
||||||
|
|
||||||
|
|
||||||
|
def _sample_rtmpose_detection() -> tuple[list[float], list[list[float]], list[float]]:
|
||||||
|
keypoints_xy = np.zeros((133, 2), dtype=np.float64)
|
||||||
|
scores = np.zeros((133,), dtype=np.float64)
|
||||||
|
keypoints_xy[5] = [10.0, 20.0]
|
||||||
|
keypoints_xy[6] = [30.0, 20.0]
|
||||||
|
keypoints_xy[11] = [12.0, 60.0]
|
||||||
|
keypoints_xy[12] = [28.0, 60.0]
|
||||||
|
keypoints_xy[0] = [20.0, 8.0]
|
||||||
|
scores[[0, 5, 6, 11, 12]] = 1.0
|
||||||
|
return [8.0, 4.0, 32.0, 64.0], keypoints_xy.tolist(), scores.tolist()
|
||||||
|
|
||||||
|
|
||||||
|
def test_load_actualtest_parquet_scene_and_segment(tmp_path: Path) -> None:
|
||||||
|
root = tmp_path / "ActualTest_WeiHua"
|
||||||
|
_write_parquet(
|
||||||
|
root / "camera_params" / "camera_params.parquet",
|
||||||
|
[
|
||||||
|
{
|
||||||
|
"name": "AF_02",
|
||||||
|
"port": 5602,
|
||||||
|
"intrinsic": {
|
||||||
|
"camera_matrix": [[500.0, 0.0, 320.0], [0.0, 500.0, 240.0], [0.0, 0.0, 1.0]],
|
||||||
|
"distortion_coefficients": [0.0, 0.0, 0.0, 0.0, 0.0],
|
||||||
|
},
|
||||||
|
"extrinsic": {"rvec": [0.0, 0.0, 0.0], "tvec": [0.0, 0.0, 0.0]},
|
||||||
|
"resolution": {"width": 640, "height": 480},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "AF_03",
|
||||||
|
"port": 5603,
|
||||||
|
"intrinsic": {
|
||||||
|
"camera_matrix": [[500.0, 0.0, 320.0], [0.0, 500.0, 240.0], [0.0, 0.0, 1.0]],
|
||||||
|
"distortion_coefficients": [0.0, 0.0, 0.0, 0.0, 0.0],
|
||||||
|
},
|
||||||
|
"extrinsic": {"rvec": [0.0, 0.0, 0.0], "tvec": [1.0, 0.0, 0.0]},
|
||||||
|
"resolution": {"width": 640, "height": 480},
|
||||||
|
},
|
||||||
|
],
|
||||||
|
)
|
||||||
|
box, keypoints_xy, scores = _sample_rtmpose_detection()
|
||||||
|
for camera_name in ("5602", "5603"):
|
||||||
|
_write_parquet(
|
||||||
|
root / "Segment_1" / f"{camera_name}_detected.parquet",
|
||||||
|
[
|
||||||
|
{"frame_index": 689, "boxes": [], "kps": [], "kps_scores": []},
|
||||||
|
{"frame_index": 690, "boxes": [box], "kps": [keypoints_xy], "kps_scores": [scores]},
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
|
scene = load_actualtest_scene(root)
|
||||||
|
bundles = load_actualtest_segment_bundles(root, "Segment_1", frame_start=690, max_frames=1)
|
||||||
|
|
||||||
|
assert [camera.name for camera in scene.cameras] == ["5602", "5603"]
|
||||||
|
np.testing.assert_allclose(scene.cameras[0].pose_T, [0.0, 0.0, 0.0])
|
||||||
|
np.testing.assert_allclose(scene.cameras[1].pose_T, [-1.0, 0.0, 0.0])
|
||||||
|
assert len(bundles) == 1
|
||||||
|
assert [view.camera_name for view in bundles[0].views] == ["5602", "5603"]
|
||||||
|
assert bundles[0].views[0].frame_index == 690
|
||||||
|
np.testing.assert_allclose(
|
||||||
|
bundles[0].views[0].detections[0].keypoints[BODY20_INDEX_BY_NAME["hip_middle"], :2],
|
||||||
|
[20.0, 60.0],
|
||||||
|
)
|
||||||
@@ -0,0 +1,148 @@
|
|||||||
|
import json
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import NamedTuple, cast
|
||||||
|
|
||||||
|
import cv2
|
||||||
|
import numpy as np
|
||||||
|
import pytest
|
||||||
|
|
||||||
|
pytest.importorskip("rpt")
|
||||||
|
|
||||||
|
from pose_tracking_exp.models import CameraCalibration, SceneConfig
|
||||||
|
from pose_tracking_exp.replay import load_scene_file
|
||||||
|
from pose_tracking_exp.rpt_adapter import build_rpt_config
|
||||||
|
|
||||||
|
|
||||||
|
class _CameraArgs(NamedTuple):
|
||||||
|
name: str
|
||||||
|
width: int
|
||||||
|
height: int
|
||||||
|
K: np.ndarray
|
||||||
|
DC: np.ndarray
|
||||||
|
model: str
|
||||||
|
|
||||||
|
|
||||||
|
def _camera_args() -> _CameraArgs:
|
||||||
|
return _CameraArgs(
|
||||||
|
name="cam0",
|
||||||
|
width=640,
|
||||||
|
height=480,
|
||||||
|
K=np.asarray([[500.0, 0.0, 320.0], [0.0, 500.0, 240.0], [0.0, 0.0, 1.0]], dtype=np.float64),
|
||||||
|
DC=np.zeros(5, dtype=np.float64),
|
||||||
|
model="pinhole",
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def test_from_opencv_extrinsics_derives_rpt_pose() -> None:
|
||||||
|
args = _camera_args()
|
||||||
|
rotation_vec = np.asarray([0.0, 0.2, 0.0], dtype=np.float64).reshape(3, 1)
|
||||||
|
rotation, _ = cv2.Rodrigues(rotation_vec)
|
||||||
|
translation = np.asarray([0.5, -0.1, 2.0], dtype=np.float64)
|
||||||
|
|
||||||
|
camera = CameraCalibration.from_opencv_extrinsics(
|
||||||
|
name=args.name,
|
||||||
|
width=args.width,
|
||||||
|
height=args.height,
|
||||||
|
K=args.K,
|
||||||
|
DC=args.DC,
|
||||||
|
model=args.model,
|
||||||
|
R=rotation,
|
||||||
|
T=translation,
|
||||||
|
rvec=rotation_vec.reshape(3),
|
||||||
|
)
|
||||||
|
|
||||||
|
np.testing.assert_allclose(camera.pose_R, rotation.T)
|
||||||
|
np.testing.assert_allclose(camera.pose_T, -(rotation.T @ translation))
|
||||||
|
|
||||||
|
|
||||||
|
def test_from_rpt_pose_derives_opencv_extrinsics() -> None:
|
||||||
|
args = _camera_args()
|
||||||
|
pose_rotation_vec = np.asarray([0.0, -0.3, 0.0], dtype=np.float64).reshape(3, 1)
|
||||||
|
pose_rotation, _ = cv2.Rodrigues(pose_rotation_vec)
|
||||||
|
pose_translation = np.asarray([1.5, 0.2, -0.4], dtype=np.float64)
|
||||||
|
|
||||||
|
camera = CameraCalibration.from_rpt_pose(
|
||||||
|
name=args.name,
|
||||||
|
width=args.width,
|
||||||
|
height=args.height,
|
||||||
|
K=args.K,
|
||||||
|
DC=args.DC,
|
||||||
|
model=args.model,
|
||||||
|
R=pose_rotation,
|
||||||
|
T=pose_translation,
|
||||||
|
)
|
||||||
|
|
||||||
|
np.testing.assert_allclose(camera.pose_R, pose_rotation)
|
||||||
|
np.testing.assert_allclose(camera.pose_T, pose_translation)
|
||||||
|
np.testing.assert_allclose(camera.R, pose_rotation.T)
|
||||||
|
np.testing.assert_allclose(camera.T, -(pose_rotation.T @ pose_translation))
|
||||||
|
|
||||||
|
|
||||||
|
def test_load_scene_file_supports_explicit_rpt_pose(tmp_path: Path) -> None:
|
||||||
|
scene_path = tmp_path / "scene.json"
|
||||||
|
payload = {
|
||||||
|
"extrinsic_format": "rpt_camera_pose",
|
||||||
|
"room_size": [6.0, 4.0, 3.0],
|
||||||
|
"room_center": [0.0, 0.0, 1.0],
|
||||||
|
"cameras": [
|
||||||
|
{
|
||||||
|
"name": "cam0",
|
||||||
|
"width": 640,
|
||||||
|
"height": 480,
|
||||||
|
"K": [[500.0, 0.0, 320.0], [0.0, 500.0, 240.0], [0.0, 0.0, 1.0]],
|
||||||
|
"DC": [0.0, 0.0, 0.0, 0.0, 0.0],
|
||||||
|
"R": [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]],
|
||||||
|
"T": [[1.0], [2.0], [3.0]],
|
||||||
|
}
|
||||||
|
],
|
||||||
|
}
|
||||||
|
scene_path.write_text(json.dumps(payload), encoding="utf-8")
|
||||||
|
|
||||||
|
scene = load_scene_file(scene_path)
|
||||||
|
|
||||||
|
np.testing.assert_allclose(scene.cameras[0].pose_T, [1.0, 2.0, 3.0])
|
||||||
|
np.testing.assert_allclose(scene.cameras[0].T, [-1.0, -2.0, -3.0])
|
||||||
|
|
||||||
|
|
||||||
|
def test_build_rpt_config_uses_pose_convention(monkeypatch: pytest.MonkeyPatch) -> None:
|
||||||
|
args = _camera_args()
|
||||||
|
camera = CameraCalibration.from_opencv_extrinsics(
|
||||||
|
name=args.name,
|
||||||
|
width=args.width,
|
||||||
|
height=args.height,
|
||||||
|
K=args.K,
|
||||||
|
DC=args.DC,
|
||||||
|
model=args.model,
|
||||||
|
R=np.eye(3, dtype=np.float64),
|
||||||
|
T=np.asarray([1.0, 2.0, 3.0], dtype=np.float64),
|
||||||
|
rvec=np.zeros(3, dtype=np.float64),
|
||||||
|
)
|
||||||
|
scene = SceneConfig(
|
||||||
|
room_size=np.asarray([6.0, 4.0, 3.0], dtype=np.float64),
|
||||||
|
room_center=np.asarray([0.0, 0.0, 1.0], dtype=np.float64),
|
||||||
|
cameras=(camera,),
|
||||||
|
)
|
||||||
|
captured: dict[str, object] = {}
|
||||||
|
|
||||||
|
def fake_make_triangulation_config(
|
||||||
|
cameras: list[dict[str, object]],
|
||||||
|
roomparams: np.ndarray,
|
||||||
|
joint_names: list[str],
|
||||||
|
*,
|
||||||
|
min_match_score: float,
|
||||||
|
min_group_size: int,
|
||||||
|
) -> dict[str, object]:
|
||||||
|
captured["cameras"] = cameras
|
||||||
|
captured["roomparams"] = roomparams
|
||||||
|
captured["joint_names"] = joint_names
|
||||||
|
captured["min_match_score"] = min_match_score
|
||||||
|
captured["min_group_size"] = min_group_size
|
||||||
|
return captured
|
||||||
|
|
||||||
|
monkeypatch.setattr("pose_tracking_exp.rpt_adapter.rpt.make_triangulation_config", fake_make_triangulation_config)
|
||||||
|
|
||||||
|
build_rpt_config(scene, min_match_score=0.5, min_group_size=2)
|
||||||
|
|
||||||
|
camera_payload = cast(list[dict[str, object]], captured["cameras"])[0]
|
||||||
|
assert camera_payload["R"] == camera.pose_R.tolist()
|
||||||
|
assert camera_payload["T"] == camera.pose_T.reshape(3, 1).tolist()
|
||||||
@@ -0,0 +1,49 @@
|
|||||||
|
import numpy as np
|
||||||
|
|
||||||
|
from pose_tracking_exp.joints import BODY20_INDEX_BY_NAME
|
||||||
|
from pose_tracking_exp.kinematics import seed_state_from_pose3d
|
||||||
|
|
||||||
|
|
||||||
|
def _sample_pose3d() -> np.ndarray:
|
||||||
|
pose = np.zeros((20, 4), dtype=np.float64)
|
||||||
|
joint_positions = {
|
||||||
|
"hip_middle": [0.0, 1.0, 3.0],
|
||||||
|
"hip_left": [0.12, 1.0, 3.0],
|
||||||
|
"hip_right": [-0.12, 1.0, 3.0],
|
||||||
|
"shoulder_middle": [0.0, 1.52, 3.0],
|
||||||
|
"shoulder_left": [0.18, 1.52, 3.0],
|
||||||
|
"shoulder_right": [-0.18, 1.52, 3.0],
|
||||||
|
"elbow_left": [0.42, 1.48, 3.02],
|
||||||
|
"elbow_right": [-0.42, 1.48, 3.02],
|
||||||
|
"wrist_left": [0.64, 1.45, 3.04],
|
||||||
|
"wrist_right": [-0.64, 1.45, 3.04],
|
||||||
|
"knee_left": [0.1, 0.58, 3.0],
|
||||||
|
"knee_right": [-0.1, 0.58, 3.0],
|
||||||
|
"ankle_left": [0.1, 0.15, 3.02],
|
||||||
|
"ankle_right": [-0.1, 0.15, 3.02],
|
||||||
|
"head": [0.0, 1.82, 3.02],
|
||||||
|
"nose": [0.0, 1.8, 3.06],
|
||||||
|
"eye_left": [0.03, 1.81, 3.05],
|
||||||
|
"eye_right": [-0.03, 1.81, 3.05],
|
||||||
|
"ear_left": [0.06, 1.81, 3.02],
|
||||||
|
"ear_right": [-0.06, 1.81, 3.02],
|
||||||
|
}
|
||||||
|
for name, position in joint_positions.items():
|
||||||
|
pose[BODY20_INDEX_BY_NAME[name], :3] = position
|
||||||
|
pose[BODY20_INDEX_BY_NAME[name], 3] = 1.0
|
||||||
|
return pose
|
||||||
|
|
||||||
|
|
||||||
|
def test_seed_state_from_pose3d_does_not_call_least_squares(monkeypatch) -> None:
|
||||||
|
def fail_least_squares(*args: object, **kwargs: object) -> object:
|
||||||
|
raise AssertionError("seed_state_from_pose3d should not call scipy.optimize.least_squares")
|
||||||
|
|
||||||
|
monkeypatch.setattr("pose_tracking_exp.kinematics.least_squares", fail_least_squares)
|
||||||
|
state = seed_state_from_pose3d(_sample_pose3d())
|
||||||
|
|
||||||
|
assert state.parameters.shape == (31,)
|
||||||
|
assert state.beta.shape == (8,)
|
||||||
|
np.testing.assert_allclose(
|
||||||
|
state.parameters[:3],
|
||||||
|
_sample_pose3d()[BODY20_INDEX_BY_NAME["hip_middle"], :3],
|
||||||
|
)
|
||||||
@@ -0,0 +1,156 @@
|
|||||||
|
import base64
|
||||||
|
import json
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
|
|
||||||
|
from pose_tracking_exp.joints import BODY20_INDEX_BY_NAME
|
||||||
|
from pose_tracking_exp.normalization import normalize_rtmpose_body20
|
||||||
|
from pose_tracking_exp.parajumping import PROTOCOL_HEADER, convert_payload_record, decode_pose_payload
|
||||||
|
from pose_tracking_exp.replay import load_replay_file, load_scene_file
|
||||||
|
from pose_tracking_exp.sync import synchronize_frames
|
||||||
|
|
||||||
|
|
||||||
|
def _encode_payload(
|
||||||
|
*,
|
||||||
|
frame_index: int,
|
||||||
|
reference_size: tuple[int, int],
|
||||||
|
boxes: np.ndarray,
|
||||||
|
box_scores: np.ndarray,
|
||||||
|
keypoints_xy: np.ndarray,
|
||||||
|
keypoint_scores: np.ndarray,
|
||||||
|
timestamp_unix_ns: int,
|
||||||
|
) -> bytes:
|
||||||
|
return (
|
||||||
|
PROTOCOL_HEADER
|
||||||
|
+ int(frame_index).to_bytes(4, "little")
|
||||||
|
+ np.asarray(reference_size, dtype="<u2").tobytes()
|
||||||
|
+ int(boxes.shape[0]).to_bytes(1, "little")
|
||||||
|
+ np.asarray(boxes, dtype="<u2").tobytes()
|
||||||
|
+ int(box_scores.shape[0]).to_bytes(1, "little")
|
||||||
|
+ np.asarray(box_scores, dtype=np.uint8).tobytes()
|
||||||
|
+ int(keypoints_xy.shape[0]).to_bytes(1, "little")
|
||||||
|
+ np.asarray(keypoints_xy, dtype="<u2").tobytes()
|
||||||
|
+ int(keypoint_scores.size).to_bytes(1, "little")
|
||||||
|
+ np.asarray(keypoint_scores, dtype=np.uint8).reshape(-1).tobytes()
|
||||||
|
+ int(timestamp_unix_ns).to_bytes(8, "little")
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def test_normalize_rtmpose_body20_derives_midpoints_and_head():
|
||||||
|
keypoints = np.zeros((133, 2), dtype=np.float64)
|
||||||
|
scores = np.zeros((133,), dtype=np.float64)
|
||||||
|
keypoints[5] = [10.0, 20.0]
|
||||||
|
keypoints[6] = [30.0, 20.0]
|
||||||
|
keypoints[11] = [12.0, 60.0]
|
||||||
|
keypoints[12] = [28.0, 60.0]
|
||||||
|
keypoints[0] = [20.0, 8.0]
|
||||||
|
scores[[0, 5, 6, 11, 12]] = 1.0
|
||||||
|
|
||||||
|
normalized = normalize_rtmpose_body20(keypoints, scores)
|
||||||
|
|
||||||
|
np.testing.assert_allclose(normalized[BODY20_INDEX_BY_NAME["hip_middle"], :2], [20.0, 60.0])
|
||||||
|
np.testing.assert_allclose(normalized[BODY20_INDEX_BY_NAME["shoulder_middle"], :2], [20.0, 20.0])
|
||||||
|
np.testing.assert_allclose(normalized[BODY20_INDEX_BY_NAME["head"], :2], [20.0, 8.0])
|
||||||
|
|
||||||
|
|
||||||
|
def test_decode_payload_and_convert_record():
|
||||||
|
keypoints_xy = np.zeros((1, 133, 2), dtype=np.uint16)
|
||||||
|
keypoint_scores = np.zeros((1, 133), dtype=np.uint8)
|
||||||
|
keypoints_xy[0, 5] = [100, 200]
|
||||||
|
keypoints_xy[0, 6] = [200, 200]
|
||||||
|
keypoints_xy[0, 11] = [110, 400]
|
||||||
|
keypoints_xy[0, 12] = [190, 400]
|
||||||
|
keypoints_xy[0, 0] = [150, 120]
|
||||||
|
keypoint_scores[0, [0, 5, 6, 11, 12]] = 255
|
||||||
|
|
||||||
|
payload = _encode_payload(
|
||||||
|
frame_index=7,
|
||||||
|
reference_size=(640, 480),
|
||||||
|
boxes=np.asarray([[90, 100, 210, 420]], dtype=np.uint16),
|
||||||
|
box_scores=np.asarray([200], dtype=np.uint8),
|
||||||
|
keypoints_xy=keypoints_xy,
|
||||||
|
keypoint_scores=keypoint_scores,
|
||||||
|
timestamp_unix_ns=1234,
|
||||||
|
)
|
||||||
|
decoded = decode_pose_payload(payload)
|
||||||
|
assert decoded.frame_index == 7
|
||||||
|
assert decoded.reference_size == (640, 480)
|
||||||
|
assert len(decoded.detections) == 1
|
||||||
|
np.testing.assert_allclose(
|
||||||
|
decoded.detections[0].keypoints[BODY20_INDEX_BY_NAME["hip_middle"], :2],
|
||||||
|
[150.0, 400.0],
|
||||||
|
)
|
||||||
|
|
||||||
|
converted = convert_payload_record({"camera": "cam0", "payload_b64": base64.b64encode(payload).decode("ascii")})
|
||||||
|
assert converted["camera"] == "cam0"
|
||||||
|
assert converted["frame_index"] == 7
|
||||||
|
|
||||||
|
|
||||||
|
def test_load_replay_and_synchronize(tmp_path: Path):
|
||||||
|
scene_path = tmp_path / "scene.json"
|
||||||
|
replay_path = tmp_path / "replay.jsonl"
|
||||||
|
scene_path.write_text(
|
||||||
|
json.dumps(
|
||||||
|
{
|
||||||
|
"room_size": [6.0, 4.0, 3.0],
|
||||||
|
"room_center": [0.0, 0.0, 1.0],
|
||||||
|
"cameras": [
|
||||||
|
{
|
||||||
|
"name": "cam0",
|
||||||
|
"width": 640,
|
||||||
|
"height": 480,
|
||||||
|
"K": [[500.0, 0.0, 320.0], [0.0, 500.0, 240.0], [0.0, 0.0, 1.0]],
|
||||||
|
"DC": [0.0, 0.0, 0.0, 0.0, 0.0],
|
||||||
|
"R": [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]],
|
||||||
|
"T": [[0.0], [0.0], [0.0]],
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "cam1",
|
||||||
|
"width": 640,
|
||||||
|
"height": 480,
|
||||||
|
"K": [[500.0, 0.0, 320.0], [0.0, 500.0, 240.0], [0.0, 0.0, 1.0]],
|
||||||
|
"DC": [0.0, 0.0, 0.0, 0.0, 0.0],
|
||||||
|
"R": [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]],
|
||||||
|
"T": [[1.0], [0.0], [0.0]],
|
||||||
|
},
|
||||||
|
],
|
||||||
|
}
|
||||||
|
),
|
||||||
|
encoding="utf-8",
|
||||||
|
)
|
||||||
|
pose = np.zeros((20, 3), dtype=np.float64)
|
||||||
|
replay_path.write_text(
|
||||||
|
"\n".join(
|
||||||
|
[
|
||||||
|
json.dumps(
|
||||||
|
{
|
||||||
|
"camera": "cam0",
|
||||||
|
"frame_index": 0,
|
||||||
|
"timestamp_unix_ns": 1000,
|
||||||
|
"source_size": [640, 480],
|
||||||
|
"detections": [{"bbox": [0, 0, 1, 1], "bbox_confidence": 1.0, "keypoints": pose.tolist()}],
|
||||||
|
}
|
||||||
|
),
|
||||||
|
json.dumps(
|
||||||
|
{
|
||||||
|
"camera": "cam1",
|
||||||
|
"frame_index": 0,
|
||||||
|
"timestamp_unix_ns": 1006,
|
||||||
|
"source_size": [640, 480],
|
||||||
|
"detections": [{"bbox": [0, 0, 1, 1], "bbox_confidence": 1.0, "keypoints": pose.tolist()}],
|
||||||
|
}
|
||||||
|
),
|
||||||
|
]
|
||||||
|
)
|
||||||
|
+ "\n",
|
||||||
|
encoding="utf-8",
|
||||||
|
)
|
||||||
|
|
||||||
|
scene = load_scene_file(scene_path)
|
||||||
|
assert len(scene.cameras) == 2
|
||||||
|
replay = load_replay_file(scene_path, replay_path)
|
||||||
|
bundles = synchronize_frames(replay, max_skew_ns=20, min_views=2)
|
||||||
|
assert len(bundles) == 1
|
||||||
|
assert {frame.camera_name for frame in bundles[0].views} == {"cam0", "cam1"}
|
||||||
|
|
||||||
@@ -0,0 +1,156 @@
|
|||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
|
import pytest
|
||||||
|
|
||||||
|
pytest.importorskip("rpt")
|
||||||
|
|
||||||
|
from pose_tracking_exp.joints import BODY20_INDEX_BY_NAME
|
||||||
|
from pose_tracking_exp.models import CameraCalibration, CameraFrame, FrameBundle, ProposalCluster, SceneConfig, TrackerConfig
|
||||||
|
from pose_tracking_exp.tracker import PoseTracker
|
||||||
|
|
||||||
|
|
||||||
|
def _make_scene() -> SceneConfig:
|
||||||
|
cameras = (
|
||||||
|
CameraCalibration(
|
||||||
|
name="cam0",
|
||||||
|
width=640,
|
||||||
|
height=480,
|
||||||
|
K=np.asarray([[500.0, 0.0, 320.0], [0.0, 500.0, 240.0], [0.0, 0.0, 1.0]], dtype=np.float64),
|
||||||
|
DC=np.zeros(5, dtype=np.float64),
|
||||||
|
R=np.eye(3, dtype=np.float64),
|
||||||
|
T=np.zeros(3, dtype=np.float64),
|
||||||
|
),
|
||||||
|
CameraCalibration(
|
||||||
|
name="cam1",
|
||||||
|
width=640,
|
||||||
|
height=480,
|
||||||
|
K=np.asarray([[500.0, 0.0, 320.0], [0.0, 500.0, 240.0], [0.0, 0.0, 1.0]], dtype=np.float64),
|
||||||
|
DC=np.zeros(5, dtype=np.float64),
|
||||||
|
R=np.eye(3, dtype=np.float64),
|
||||||
|
T=np.asarray([1.0, 0.0, 0.0], dtype=np.float64),
|
||||||
|
),
|
||||||
|
)
|
||||||
|
return SceneConfig(
|
||||||
|
room_size=np.asarray([6.0, 4.0, 3.0], dtype=np.float64),
|
||||||
|
room_center=np.asarray([0.0, 0.0, 1.0], dtype=np.float64),
|
||||||
|
cameras=cameras,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def _make_bundle(bundle_index: int) -> FrameBundle:
|
||||||
|
views = tuple(
|
||||||
|
CameraFrame(
|
||||||
|
camera_name=camera_name,
|
||||||
|
frame_index=bundle_index,
|
||||||
|
timestamp_unix_ns=1_000_000_000 + bundle_index * 33_000_000,
|
||||||
|
detections=(),
|
||||||
|
source_size=(640, 480),
|
||||||
|
)
|
||||||
|
for camera_name in ("cam0", "cam1")
|
||||||
|
)
|
||||||
|
return FrameBundle(
|
||||||
|
bundle_index=bundle_index,
|
||||||
|
timestamp_unix_ns=views[0].timestamp_unix_ns,
|
||||||
|
views=views,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def _make_proposal(root_x: float, *, score: float = 1.0) -> ProposalCluster:
|
||||||
|
pose = np.zeros((20, 4), dtype=np.float64)
|
||||||
|
joint_positions = {
|
||||||
|
"hip_middle": [root_x, 1.0, 3.0],
|
||||||
|
"hip_left": [root_x + 0.12, 1.0, 3.0],
|
||||||
|
"hip_right": [root_x - 0.12, 1.0, 3.0],
|
||||||
|
"shoulder_middle": [root_x, 1.52, 3.0],
|
||||||
|
"shoulder_left": [root_x + 0.18, 1.52, 3.0],
|
||||||
|
"shoulder_right": [root_x - 0.18, 1.52, 3.0],
|
||||||
|
"elbow_left": [root_x + 0.42, 1.48, 3.02],
|
||||||
|
"elbow_right": [root_x - 0.42, 1.48, 3.02],
|
||||||
|
"wrist_left": [root_x + 0.64, 1.45, 3.04],
|
||||||
|
"wrist_right": [root_x - 0.64, 1.45, 3.04],
|
||||||
|
"knee_left": [root_x + 0.1, 0.58, 3.0],
|
||||||
|
"knee_right": [root_x - 0.1, 0.58, 3.0],
|
||||||
|
"ankle_left": [root_x + 0.1, 0.15, 3.02],
|
||||||
|
"ankle_right": [root_x - 0.1, 0.15, 3.02],
|
||||||
|
"head": [root_x, 1.82, 3.02],
|
||||||
|
"nose": [root_x, 1.8, 3.06],
|
||||||
|
"eye_left": [root_x + 0.03, 1.81, 3.05],
|
||||||
|
"eye_right": [root_x - 0.03, 1.81, 3.05],
|
||||||
|
"ear_left": [root_x + 0.06, 1.81, 3.02],
|
||||||
|
"ear_right": [root_x - 0.06, 1.81, 3.02],
|
||||||
|
}
|
||||||
|
for name, position in joint_positions.items():
|
||||||
|
pose[BODY20_INDEX_BY_NAME[name], :3] = position
|
||||||
|
pose[BODY20_INDEX_BY_NAME[name], 3] = score
|
||||||
|
return ProposalCluster(
|
||||||
|
pose3d=pose,
|
||||||
|
root=np.asarray([root_x, 1.0, 3.0], dtype=np.float64),
|
||||||
|
source_views=frozenset({"cam0", "cam1"}),
|
||||||
|
support_size=2,
|
||||||
|
mean_score=score,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def test_single_person_mode_caps_active_tracks(monkeypatch) -> None:
|
||||||
|
tracker = PoseTracker(
|
||||||
|
_make_scene(),
|
||||||
|
TrackerConfig(
|
||||||
|
mode="single_person",
|
||||||
|
tentative_min_age=1,
|
||||||
|
tentative_hits_required=1,
|
||||||
|
tentative_promote_score=0.0,
|
||||||
|
active_miss_to_lost=5,
|
||||||
|
proposal_min_score=0.5,
|
||||||
|
),
|
||||||
|
)
|
||||||
|
proposals_by_bundle = {
|
||||||
|
0: (_make_proposal(0.0, score=0.95), _make_proposal(0.9, score=0.7)),
|
||||||
|
1: (_make_proposal(0.05, score=0.96), _make_proposal(0.85, score=0.75)),
|
||||||
|
}
|
||||||
|
|
||||||
|
monkeypatch.setattr(
|
||||||
|
tracker,
|
||||||
|
"_build_proposals",
|
||||||
|
lambda bundle, unmatched: proposals_by_bundle[bundle.bundle_index],
|
||||||
|
)
|
||||||
|
|
||||||
|
results = tracker.run([_make_bundle(0), _make_bundle(1)])
|
||||||
|
|
||||||
|
assert len(results[0].active_tracks) == 1
|
||||||
|
assert len(results[1].active_tracks) == 1
|
||||||
|
assert not results[1].tentative_tracks
|
||||||
|
assert [track.track_id for track in results[1].active_tracks] == [1]
|
||||||
|
|
||||||
|
|
||||||
|
def test_single_person_mode_reuses_lost_track_id(monkeypatch) -> None:
|
||||||
|
tracker = PoseTracker(
|
||||||
|
_make_scene(),
|
||||||
|
TrackerConfig(
|
||||||
|
mode="single_person",
|
||||||
|
tentative_min_age=1,
|
||||||
|
tentative_hits_required=1,
|
||||||
|
tentative_promote_score=0.0,
|
||||||
|
active_miss_to_lost=1,
|
||||||
|
lost_delete_age=10,
|
||||||
|
proposal_min_score=0.5,
|
||||||
|
),
|
||||||
|
)
|
||||||
|
proposals_by_bundle = {
|
||||||
|
0: (_make_proposal(0.0, score=0.95),),
|
||||||
|
1: (),
|
||||||
|
2: (_make_proposal(0.05, score=0.96),),
|
||||||
|
}
|
||||||
|
|
||||||
|
monkeypatch.setattr(
|
||||||
|
tracker,
|
||||||
|
"_build_proposals",
|
||||||
|
lambda bundle, unmatched: proposals_by_bundle[bundle.bundle_index],
|
||||||
|
)
|
||||||
|
|
||||||
|
results = tracker.run([_make_bundle(0), _make_bundle(1), _make_bundle(2)])
|
||||||
|
|
||||||
|
assert [track.track_id for track in results[0].active_tracks] == [1]
|
||||||
|
assert [track.track_id for track in results[1].lost_tracks] == [1]
|
||||||
|
assert [track.track_id for track in results[2].active_tracks] == [1]
|
||||||
|
assert tracker.diagnostics_snapshot().reacquisitions >= 1
|
||||||
@@ -0,0 +1,67 @@
|
|||||||
|
import json
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
|
import pytest
|
||||||
|
|
||||||
|
pytest.importorskip("rpt")
|
||||||
|
|
||||||
|
from pose_tracking_exp.models import CameraFrame, FrameBundle, PoseDetection, TrackerConfig
|
||||||
|
from pose_tracking_exp.replay import load_scene_file
|
||||||
|
from pose_tracking_exp.tracker import PoseTracker
|
||||||
|
|
||||||
|
RPT_ROOT = Path("/home/crosstyan/Code/RapidPoseTriangulation")
|
||||||
|
|
||||||
|
|
||||||
|
def test_tracker_promotes_rpt_sample_person():
|
||||||
|
scene = load_scene_file(RPT_ROOT / "data/p1/sample.json")
|
||||||
|
pose_payload = json.loads((RPT_ROOT / "tests/poses_p1.json").read_text(encoding="utf-8"))
|
||||||
|
view_poses = pose_payload["2D"]
|
||||||
|
|
||||||
|
tracker = PoseTracker(
|
||||||
|
scene,
|
||||||
|
TrackerConfig(
|
||||||
|
tentative_min_age=2,
|
||||||
|
tentative_hits_required=2,
|
||||||
|
tentative_promote_score=1.2,
|
||||||
|
proposal_min_score=0.5,
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
|
bundles: list[FrameBundle] = []
|
||||||
|
for bundle_index in range(3):
|
||||||
|
views: list[CameraFrame] = []
|
||||||
|
for camera, detections in zip(scene.cameras, view_poses, strict=True):
|
||||||
|
pose_array = np.asarray(detections, dtype=np.float64)
|
||||||
|
frame_detections: list[PoseDetection] = []
|
||||||
|
for person_pose in pose_array:
|
||||||
|
mins = person_pose[:, :2].min(axis=0)
|
||||||
|
maxs = person_pose[:, :2].max(axis=0)
|
||||||
|
frame_detections.append(
|
||||||
|
PoseDetection(
|
||||||
|
bbox=np.asarray([mins[0], mins[1], maxs[0], maxs[1]], dtype=np.float64),
|
||||||
|
bbox_confidence=1.0,
|
||||||
|
keypoints=person_pose,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
views.append(
|
||||||
|
CameraFrame(
|
||||||
|
camera_name=camera.name,
|
||||||
|
frame_index=bundle_index,
|
||||||
|
timestamp_unix_ns=1_000_000_000 + bundle_index * 33_000_000,
|
||||||
|
detections=tuple(frame_detections),
|
||||||
|
source_size=(camera.width, camera.height),
|
||||||
|
)
|
||||||
|
)
|
||||||
|
bundles.append(
|
||||||
|
FrameBundle(
|
||||||
|
bundle_index=bundle_index,
|
||||||
|
timestamp_unix_ns=views[0].timestamp_unix_ns,
|
||||||
|
views=tuple(views),
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
results = tracker.run(bundles)
|
||||||
|
assert any(result.proposals for result in results)
|
||||||
|
assert any(result.active_tracks for result in results[1:])
|
||||||
|
assert len(results[-1].active_tracks) >= 1
|
||||||
@@ -0,0 +1,370 @@
|
|||||||
|
version = 1
|
||||||
|
revision = 3
|
||||||
|
requires-python = ">=3.12"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "basedpyright"
|
||||||
|
version = "1.38.4"
|
||||||
|
source = { registry = "https://pypi.org/simple" }
|
||||||
|
dependencies = [
|
||||||
|
{ name = "nodejs-wheel-binaries" },
|
||||||
|
]
|
||||||
|
sdist = { url = "https://files.pythonhosted.org/packages/08/b4/26cb812eaf8ab56909c792c005fe1690706aef6f21d61107639e46e9c54c/basedpyright-1.38.4.tar.gz", hash = "sha256:8e7d4f37ffb6106621e06b9355025009cdf5b48f71c592432dd2dd304bf55e70", size = 25354730, upload-time = "2026-03-25T13:50:44.353Z" }
|
||||||
|
wheels = [
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/62/0b/3f95fd47def42479e61077523d3752086d5c12009192a7f1c9fd5507e687/basedpyright-1.38.4-py3-none-any.whl", hash = "sha256:90aa067cf3e8a3c17ad5836a72b9e1f046bc72a4ad57d928473d9368c9cd07a2", size = 12352258, upload-time = "2026-03-25T13:50:41.059Z" },
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "beartype"
|
||||||
|
version = "0.22.9"
|
||||||
|
source = { registry = "https://pypi.org/simple" }
|
||||||
|
sdist = { url = "https://files.pythonhosted.org/packages/c7/94/1009e248bbfbab11397abca7193bea6626806be9a327d399810d523a07cb/beartype-0.22.9.tar.gz", hash = "sha256:8f82b54aa723a2848a56008d18875f91c1db02c32ef6a62319a002e3e25a975f", size = 1608866, upload-time = "2025-12-13T06:50:30.72Z" }
|
||||||
|
wheels = [
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/71/cc/18245721fa7747065ab478316c7fea7c74777d07f37ae60db2e84f8172e8/beartype-0.22.9-py3-none-any.whl", hash = "sha256:d16c9bbc61ea14637596c5f6fbff2ee99cbe3573e46a716401734ef50c3060c2", size = 1333658, upload-time = "2025-12-13T06:50:28.266Z" },
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "click"
|
||||||
|
version = "8.3.1"
|
||||||
|
source = { registry = "https://pypi.org/simple" }
|
||||||
|
dependencies = [
|
||||||
|
{ name = "colorama", marker = "sys_platform == 'win32'" },
|
||||||
|
]
|
||||||
|
sdist = { url = "https://files.pythonhosted.org/packages/3d/fa/656b739db8587d7b5dfa22e22ed02566950fbfbcdc20311993483657a5c0/click-8.3.1.tar.gz", hash = "sha256:12ff4785d337a1bb490bb7e9c2b1ee5da3112e94a8622f26a6c77f5d2fc6842a", size = 295065, upload-time = "2025-11-15T20:45:42.706Z" }
|
||||||
|
wheels = [
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/98/78/01c019cdb5d6498122777c1a43056ebb3ebfeef2076d9d026bfe15583b2b/click-8.3.1-py3-none-any.whl", hash = "sha256:981153a64e25f12d547d3426c367a4857371575ee7ad18df2a6183ab0545b2a6", size = 108274, upload-time = "2025-11-15T20:45:41.139Z" },
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "colorama"
|
||||||
|
version = "0.4.6"
|
||||||
|
source = { registry = "https://pypi.org/simple" }
|
||||||
|
sdist = { url = "https://files.pythonhosted.org/packages/d8/53/6f443c9a4a8358a93a6792e2acffb9d9d5cb0a5cfd8802644b7b1c9a02e4/colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44", size = 27697, upload-time = "2022-10-25T02:36:22.414Z" }
|
||||||
|
wheels = [
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/d1/d6/3965ed04c63042e047cb6a3e6ed1a63a35087b6a609aa3a15ed8ac56c221/colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6", size = 25335, upload-time = "2022-10-25T02:36:20.889Z" },
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "iniconfig"
|
||||||
|
version = "2.3.0"
|
||||||
|
source = { registry = "https://pypi.org/simple" }
|
||||||
|
sdist = { url = "https://files.pythonhosted.org/packages/72/34/14ca021ce8e5dfedc35312d08ba8bf51fdd999c576889fc2c24cb97f4f10/iniconfig-2.3.0.tar.gz", hash = "sha256:c76315c77db068650d49c5b56314774a7804df16fee4402c1f19d6d15d8c4730", size = 20503, upload-time = "2025-10-18T21:55:43.219Z" }
|
||||||
|
wheels = [
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/cb/b1/3846dd7f199d53cb17f49cba7e651e9ce294d8497c8c150530ed11865bb8/iniconfig-2.3.0-py3-none-any.whl", hash = "sha256:f631c04d2c48c52b84d0d0549c99ff3859c98df65b3101406327ecc7d53fbf12", size = 7484, upload-time = "2025-10-18T21:55:41.639Z" },
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "jaxtyping"
|
||||||
|
version = "0.3.9"
|
||||||
|
source = { registry = "https://pypi.org/simple" }
|
||||||
|
dependencies = [
|
||||||
|
{ name = "wadler-lindig" },
|
||||||
|
]
|
||||||
|
sdist = { url = "https://files.pythonhosted.org/packages/c2/be/00294e369938937e31b094437d5ea040e4fd1a20b998ebe572c4a1dcfa68/jaxtyping-0.3.9.tar.gz", hash = "sha256:f8c02d1b623d5f1b6665d4f3ddaec675d70004f16a792102c2fc51264190951d", size = 45857, upload-time = "2026-02-16T10:35:13.263Z" }
|
||||||
|
wheels = [
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/94/05/3e39d416fb92b2738a76e8265e6bfc5d10542f90a7c32ad1eb831eea3fa3/jaxtyping-0.3.9-py3-none-any.whl", hash = "sha256:a00557a9d616eff157491f06ed2e21ed94886fad3832399273eb912b345da378", size = 56274, upload-time = "2026-02-16T10:35:11.795Z" },
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "nodejs-wheel-binaries"
|
||||||
|
version = "24.14.0"
|
||||||
|
source = { registry = "https://pypi.org/simple" }
|
||||||
|
sdist = { url = "https://files.pythonhosted.org/packages/71/05/c75c0940b1ebf82975d14f37176679b6f3229eae8b47b6a70d1e1dae0723/nodejs_wheel_binaries-24.14.0.tar.gz", hash = "sha256:c87b515e44b0e4a523017d8c59f26ccbd05b54fe593338582825d4b51fc91e1c", size = 8057, upload-time = "2026-02-27T02:57:30.931Z" }
|
||||||
|
wheels = [
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/58/8c/b057c2db3551a6fe04e93dd14e33d810ac8907891534ffcc7a051b253858/nodejs_wheel_binaries-24.14.0-py2.py3-none-macosx_13_0_arm64.whl", hash = "sha256:59bb78b8eb08c3e32186da1ef913f1c806b5473d8bd0bb4492702092747b674a", size = 54798488, upload-time = "2026-02-27T02:56:56.831Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/30/88/7e1b29c067b6625c97c81eb8b0ef37cf5ad5b62bb81e23f4bde804910ec9/nodejs_wheel_binaries-24.14.0-py2.py3-none-macosx_13_0_x86_64.whl", hash = "sha256:348fa061b57625de7250d608e2d9b7c4bc170544da7e328325343860eadd59e5", size = 54972803, upload-time = "2026-02-27T02:57:01.696Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/1e/e0/a83f0ff12faca2a56366462e572e38ac6f5cb361877bb29e289138eb7f24/nodejs_wheel_binaries-24.14.0-py2.py3-none-manylinux_2_28_aarch64.whl", hash = "sha256:222dbf516ccc877afcad4e4789a81b4ee93daaa9f0ad97c464417d9597f49449", size = 59340859, upload-time = "2026-02-27T02:57:06.125Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/e2/9f/06fad4ae8a723ae7096b5311eba67ad8b4df5f359c0a68e366750b7fef78/nodejs_wheel_binaries-24.14.0-py2.py3-none-manylinux_2_28_x86_64.whl", hash = "sha256:b35d6fcccfe4fb0a409392d237fbc67796bac0d357b996bc12d057a1531a238b", size = 59838751, upload-time = "2026-02-27T02:57:10.449Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/8c/72/4916dadc7307c3e9bcfa43b4b6f88237932d502c66f89eb2d90fb07810db/nodejs_wheel_binaries-24.14.0-py2.py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:519507fb74f3f2b296ab1e9f00dcc211f36bbfb93c60229e72dcdee9dafd301a", size = 61340534, upload-time = "2026-02-27T02:57:15.309Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/2e/df/a8ba881ee5d04b04e0d93abc8ce501ff7292813583e97f9789eb3fc0472a/nodejs_wheel_binaries-24.14.0-py2.py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:68c93c52ff06d704bcb5ed160b4ba04ab1b291d238aaf996b03a5396e0e9a7ed", size = 61922394, upload-time = "2026-02-27T02:57:20.24Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/60/8c/b8c5f61201c72a0c7dc694b459941f89a6defda85deff258a9940a4e2efc/nodejs_wheel_binaries-24.14.0-py2.py3-none-win_amd64.whl", hash = "sha256:60b83c4e98b0c7d836ac9ccb67dcb36e343691cbe62cd325799ff9ed936286f3", size = 41218783, upload-time = "2026-02-27T02:57:24.175Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/91/23/1f904bc9cbd8eece393e20840c08ba3ac03440090c3a4e95168fa6d2709f/nodejs_wheel_binaries-24.14.0-py2.py3-none-win_arm64.whl", hash = "sha256:78a9bd1d6b11baf1433f9fb84962ff8aa71c87d48b6434f98224bc49a2253a6e", size = 38926103, upload-time = "2026-02-27T02:57:27.458Z" },
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "numpy"
|
||||||
|
version = "2.4.3"
|
||||||
|
source = { registry = "https://pypi.org/simple" }
|
||||||
|
sdist = { url = "https://files.pythonhosted.org/packages/10/8b/c265f4823726ab832de836cdd184d0986dcf94480f81e8739692a7ac7af2/numpy-2.4.3.tar.gz", hash = "sha256:483a201202b73495f00dbc83796c6ae63137a9bdade074f7648b3e32613412dd", size = 20727743, upload-time = "2026-03-09T07:58:53.426Z" }
|
||||||
|
wheels = [
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/a9/ed/6388632536f9788cea23a3a1b629f25b43eaacd7d7377e5d6bc7b9deb69b/numpy-2.4.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:61b0cbabbb6126c8df63b9a3a0c4b1f44ebca5e12ff6997b80fcf267fb3150ef", size = 16669628, upload-time = "2026-03-09T07:56:24.252Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/74/1b/ee2abfc68e1ce728b2958b6ba831d65c62e1b13ce3017c13943f8f9b5b2e/numpy-2.4.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:7395e69ff32526710748f92cd8c9849b361830968ea3e24a676f272653e8983e", size = 14696872, upload-time = "2026-03-09T07:56:26.991Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/ba/d1/780400e915ff5638166f11ca9dc2c5815189f3d7cf6f8759a1685e586413/numpy-2.4.3-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:abdce0f71dcb4a00e4e77f3faf05e4616ceccfe72ccaa07f47ee79cda3b7b0f4", size = 5203489, upload-time = "2026-03-09T07:56:29.414Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/0b/bb/baffa907e9da4cc34a6e556d6d90e032f6d7a75ea47968ea92b4858826c4/numpy-2.4.3-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:48da3a4ee1336454b07497ff7ec83903efa5505792c4e6d9bf83d99dc07a1e18", size = 6550814, upload-time = "2026-03-09T07:56:32.225Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/7b/12/8c9f0c6c95f76aeb20fc4a699c33e9f827fa0d0f857747c73bb7b17af945/numpy-2.4.3-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:32e3bef222ad6b052280311d1d60db8e259e4947052c3ae7dd6817451fc8a4c5", size = 15666601, upload-time = "2026-03-09T07:56:34.461Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/bd/79/cc665495e4d57d0aa6fbcc0aa57aa82671dfc78fbf95fe733ed86d98f52a/numpy-2.4.3-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:e7dd01a46700b1967487141a66ac1a3cf0dd8ebf1f08db37d46389401512ca97", size = 16621358, upload-time = "2026-03-09T07:56:36.852Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/a8/40/b4ecb7224af1065c3539f5ecfff879d090de09608ad1008f02c05c770cb3/numpy-2.4.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:76f0f283506c28b12bba319c0fab98217e9f9b54e6160e9c79e9f7348ba32e9c", size = 17016135, upload-time = "2026-03-09T07:56:39.337Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/f7/b1/6a88e888052eed951afed7a142dcdf3b149a030ca59b4c71eef085858e43/numpy-2.4.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:737f630a337364665aba3b5a77e56a68cc42d350edd010c345d65a3efa3addcc", size = 18345816, upload-time = "2026-03-09T07:56:42.31Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/f3/8f/103a60c5f8c3d7fc678c19cd7b2476110da689ccb80bc18050efbaeae183/numpy-2.4.3-cp312-cp312-win32.whl", hash = "sha256:26952e18d82a1dbbc2f008d402021baa8d6fc8e84347a2072a25e08b46d698b9", size = 5960132, upload-time = "2026-03-09T07:56:44.851Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/d7/7c/f5ee1bf6ed888494978046a809df2882aad35d414b622893322df7286879/numpy-2.4.3-cp312-cp312-win_amd64.whl", hash = "sha256:65f3c2455188f09678355f5cae1f959a06b778bc66d535da07bf2ef20cd319d5", size = 12316144, upload-time = "2026-03-09T07:56:47.057Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/71/46/8d1cb3f7a00f2fb6394140e7e6623696e54c6318a9d9691bb4904672cf42/numpy-2.4.3-cp312-cp312-win_arm64.whl", hash = "sha256:2abad5c7fef172b3377502bde47892439bae394a71bc329f31df0fd829b41a9e", size = 10220364, upload-time = "2026-03-09T07:56:49.849Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/b6/d0/1fe47a98ce0df229238b77611340aff92d52691bcbc10583303181abf7fc/numpy-2.4.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:b346845443716c8e542d54112966383b448f4a3ba5c66409771b8c0889485dd3", size = 16665297, upload-time = "2026-03-09T07:56:52.296Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/27/d9/4e7c3f0e68dfa91f21c6fb6cf839bc829ec920688b1ce7ec722b1a6202fb/numpy-2.4.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:2629289168f4897a3c4e23dc98d6f1731f0fc0fe52fb9db19f974041e4cc12b9", size = 14691853, upload-time = "2026-03-09T07:56:54.992Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/3a/66/bd096b13a87549683812b53ab211e6d413497f84e794fb3c39191948da97/numpy-2.4.3-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:bb2e3cf95854233799013779216c57e153c1ee67a0bf92138acca0e429aefaee", size = 5198435, upload-time = "2026-03-09T07:56:57.184Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/a2/2f/687722910b5a5601de2135c891108f51dfc873d8e43c8ed9f4ebb440b4a2/numpy-2.4.3-cp313-cp313-macosx_14_0_x86_64.whl", hash = "sha256:7f3408ff897f8ab07a07fbe2823d7aee6ff644c097cc1f90382511fe982f647f", size = 6546347, upload-time = "2026-03-09T07:56:59.531Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/bf/ec/7971c4e98d86c564750393fab8d7d83d0a9432a9d78bb8a163a6dc59967a/numpy-2.4.3-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:decb0eb8a53c3b009b0962378065589685d66b23467ef5dac16cbe818afde27f", size = 15664626, upload-time = "2026-03-09T07:57:01.385Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/7e/eb/7daecbea84ec935b7fc732e18f532073064a3816f0932a40a17f3349185f/numpy-2.4.3-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d5f51900414fc9204a0e0da158ba2ac52b75656e7dce7e77fb9f84bfa343b4cc", size = 16608916, upload-time = "2026-03-09T07:57:04.008Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/df/58/2a2b4a817ffd7472dca4421d9f0776898b364154e30c95f42195041dc03b/numpy-2.4.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:6bd06731541f89cdc01b261ba2c9e037f1543df7472517836b78dfb15bd6e476", size = 17015824, upload-time = "2026-03-09T07:57:06.347Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/4a/ca/627a828d44e78a418c55f82dd4caea8ea4a8ef24e5144d9e71016e52fb40/numpy-2.4.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:22654fe6be0e5206f553a9250762c653d3698e46686eee53b399ab90da59bd92", size = 18334581, upload-time = "2026-03-09T07:57:09.114Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/cd/c0/76f93962fc79955fcba30a429b62304332345f22d4daec1cb33653425643/numpy-2.4.3-cp313-cp313-win32.whl", hash = "sha256:d71e379452a2f670ccb689ec801b1218cd3983e253105d6e83780967e899d687", size = 5958618, upload-time = "2026-03-09T07:57:11.432Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/b1/3c/88af0040119209b9b5cb59485fa48b76f372c73068dbf9254784b975ac53/numpy-2.4.3-cp313-cp313-win_amd64.whl", hash = "sha256:0a60e17a14d640f49146cb38e3f105f571318db7826d9b6fef7e4dce758faecd", size = 12312824, upload-time = "2026-03-09T07:57:13.586Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/58/ce/3d07743aced3d173f877c3ef6a454c2174ba42b584ab0b7e6d99374f51ed/numpy-2.4.3-cp313-cp313-win_arm64.whl", hash = "sha256:c9619741e9da2059cd9c3f206110b97583c7152c1dc9f8aafd4beb450ac1c89d", size = 10221218, upload-time = "2026-03-09T07:57:16.183Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/62/09/d96b02a91d09e9d97862f4fc8bfebf5400f567d8eb1fe4b0cc4795679c15/numpy-2.4.3-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:7aa4e54f6469300ebca1d9eb80acd5253cdfa36f2c03d79a35883687da430875", size = 14819570, upload-time = "2026-03-09T07:57:18.564Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/b5/ca/0b1aba3905fdfa3373d523b2b15b19029f4f3031c87f4066bd9d20ef6c6b/numpy-2.4.3-cp313-cp313t-macosx_14_0_arm64.whl", hash = "sha256:d1b90d840b25874cf5cd20c219af10bac3667db3876d9a495609273ebe679070", size = 5326113, upload-time = "2026-03-09T07:57:21.052Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/c0/63/406e0fd32fcaeb94180fd6a4c41e55736d676c54346b7efbce548b94a914/numpy-2.4.3-cp313-cp313t-macosx_14_0_x86_64.whl", hash = "sha256:a749547700de0a20a6718293396ec237bb38218049cfce788e08fcb716e8cf73", size = 6646370, upload-time = "2026-03-09T07:57:22.804Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/b6/d0/10f7dc157d4b37af92720a196be6f54f889e90dcd30dce9dc657ed92c257/numpy-2.4.3-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:94f3c4a151a2e529adf49c1d54f0f57ff8f9b233ee4d44af623a81553ab86368", size = 15723499, upload-time = "2026-03-09T07:57:24.693Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/66/f1/d1c2bf1161396629701bc284d958dc1efa3a5a542aab83cf11ee6eb4cba5/numpy-2.4.3-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:22c31dc07025123aedf7f2db9e91783df13f1776dc52c6b22c620870dc0fab22", size = 16657164, upload-time = "2026-03-09T07:57:27.676Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/1a/be/cca19230b740af199ac47331a21c71e7a3d0ba59661350483c1600d28c37/numpy-2.4.3-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:148d59127ac95979d6f07e4d460f934ebdd6eed641db9c0db6c73026f2b2101a", size = 17081544, upload-time = "2026-03-09T07:57:30.664Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/b9/c5/9602b0cbb703a0936fb40f8a95407e8171935b15846de2f0776e08af04c7/numpy-2.4.3-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:a97cbf7e905c435865c2d939af3d93f99d18eaaa3cabe4256f4304fb51604349", size = 18380290, upload-time = "2026-03-09T07:57:33.763Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/ed/81/9f24708953cd30be9ee36ec4778f4b112b45165812f2ada4cc5ea1c1f254/numpy-2.4.3-cp313-cp313t-win32.whl", hash = "sha256:be3b8487d725a77acccc9924f65fd8bce9af7fac8c9820df1049424a2115af6c", size = 6082814, upload-time = "2026-03-09T07:57:36.491Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/e2/9e/52f6eaa13e1a799f0ab79066c17f7016a4a8ae0c1aefa58c82b4dab690b4/numpy-2.4.3-cp313-cp313t-win_amd64.whl", hash = "sha256:1ec84fd7c8e652b0f4aaaf2e6e9cc8eaa9b1b80a537e06b2e3a2fb176eedcb26", size = 12452673, upload-time = "2026-03-09T07:57:38.281Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/c4/04/b8cece6ead0b30c9fbd99bb835ad7ea0112ac5f39f069788c5558e3b1ab2/numpy-2.4.3-cp313-cp313t-win_arm64.whl", hash = "sha256:120df8c0a81ebbf5b9020c91439fccd85f5e018a927a39f624845be194a2be02", size = 10290907, upload-time = "2026-03-09T07:57:40.747Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/70/ae/3936f79adebf8caf81bd7a599b90a561334a658be4dcc7b6329ebf4ee8de/numpy-2.4.3-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:5884ce5c7acfae1e4e1b6fde43797d10aa506074d25b531b4f54bde33c0c31d4", size = 16664563, upload-time = "2026-03-09T07:57:43.817Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/9b/62/760f2b55866b496bb1fa7da2a6db076bef908110e568b02fcfc1422e2a3a/numpy-2.4.3-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:297837823f5bc572c5f9379b0c9f3a3365f08492cbdc33bcc3af174372ebb168", size = 14702161, upload-time = "2026-03-09T07:57:46.169Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/32/af/a7a39464e2c0a21526fb4fb76e346fb172ebc92f6d1c7a07c2c139cc17b1/numpy-2.4.3-cp314-cp314-macosx_14_0_arm64.whl", hash = "sha256:a111698b4a3f8dcbe54c64a7708f049355abd603e619013c346553c1fd4ca90b", size = 5208738, upload-time = "2026-03-09T07:57:48.506Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/29/8c/2a0cf86a59558fa078d83805589c2de490f29ed4fb336c14313a161d358a/numpy-2.4.3-cp314-cp314-macosx_14_0_x86_64.whl", hash = "sha256:4bd4741a6a676770e0e97fe9ab2e51de01183df3dcbcec591d26d331a40de950", size = 6543618, upload-time = "2026-03-09T07:57:50.591Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/aa/b8/612ce010c0728b1c363fa4ea3aa4c22fe1c5da1de008486f8c2f5cb92fae/numpy-2.4.3-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:54f29b877279d51e210e0c80709ee14ccbbad647810e8f3d375561c45ef613dd", size = 15680676, upload-time = "2026-03-09T07:57:52.34Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/a9/7e/4f120ecc54ba26ddf3dc348eeb9eb063f421de65c05fc961941798feea18/numpy-2.4.3-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:679f2a834bae9020f81534671c56fd0cc76dd7e5182f57131478e23d0dc59e24", size = 16613492, upload-time = "2026-03-09T07:57:54.91Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/2c/86/1b6020db73be330c4b45d5c6ee4295d59cfeef0e3ea323959d053e5a6909/numpy-2.4.3-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:d84f0f881cb2225c2dfd7f78a10a5645d487a496c6668d6cc39f0f114164f3d0", size = 17031789, upload-time = "2026-03-09T07:57:57.641Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/07/3a/3b90463bf41ebc21d1b7e06079f03070334374208c0f9a1f05e4ae8455e7/numpy-2.4.3-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:d213c7e6e8d211888cc359bab7199670a00f5b82c0978b9d1c75baf1eddbeac0", size = 18339941, upload-time = "2026-03-09T07:58:00.577Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/a8/74/6d736c4cd962259fd8bae9be27363eb4883a2f9069763747347544c2a487/numpy-2.4.3-cp314-cp314-win32.whl", hash = "sha256:52077feedeff7c76ed7c9f1a0428558e50825347b7545bbb8523da2cd55c547a", size = 6007503, upload-time = "2026-03-09T07:58:03.331Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/48/39/c56ef87af669364356bb011922ef0734fc49dad51964568634c72a009488/numpy-2.4.3-cp314-cp314-win_amd64.whl", hash = "sha256:0448e7f9caefb34b4b7dd2b77f21e8906e5d6f0365ad525f9f4f530b13df2afc", size = 12444915, upload-time = "2026-03-09T07:58:06.353Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/9d/1f/ab8528e38d295fd349310807496fabb7cf9fe2e1f70b97bc20a483ea9d4a/numpy-2.4.3-cp314-cp314-win_arm64.whl", hash = "sha256:b44fd60341c4d9783039598efadd03617fa28d041fc37d22b62d08f2027fa0e7", size = 10494875, upload-time = "2026-03-09T07:58:08.734Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/e6/ef/b7c35e4d5ef141b836658ab21a66d1a573e15b335b1d111d31f26c8ef80f/numpy-2.4.3-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:0a195f4216be9305a73c0e91c9b026a35f2161237cf1c6de9b681637772ea657", size = 14822225, upload-time = "2026-03-09T07:58:11.034Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/cd/8d/7730fa9278cf6648639946cc816e7cc89f0d891602584697923375f801ed/numpy-2.4.3-cp314-cp314t-macosx_14_0_arm64.whl", hash = "sha256:cd32fbacb9fd1bf041bf8e89e4576b6f00b895f06d00914820ae06a616bdfef7", size = 5328769, upload-time = "2026-03-09T07:58:13.67Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/47/01/d2a137317c958b074d338807c1b6a383406cdf8b8e53b075d804cc3d211d/numpy-2.4.3-cp314-cp314t-macosx_14_0_x86_64.whl", hash = "sha256:2e03c05abaee1f672e9d67bc858f300b5ccba1c21397211e8d77d98350972093", size = 6649461, upload-time = "2026-03-09T07:58:15.912Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/5c/34/812ce12bc0f00272a4b0ec0d713cd237cb390666eb6206323d1cc9cedbb2/numpy-2.4.3-cp314-cp314t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:7d1ce23cce91fcea443320a9d0ece9b9305d4368875bab09538f7a5b4131938a", size = 15725809, upload-time = "2026-03-09T07:58:17.787Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/25/c0/2aed473a4823e905e765fee3dc2cbf504bd3e68ccb1150fbdabd5c39f527/numpy-2.4.3-cp314-cp314t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c59020932feb24ed49ffd03704fbab89f22aa9c0d4b180ff45542fe8918f5611", size = 16655242, upload-time = "2026-03-09T07:58:20.476Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/f2/c8/7e052b2fc87aa0e86de23f20e2c42bd261c624748aa8efd2c78f7bb8d8c6/numpy-2.4.3-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:9684823a78a6cd6ad7511fc5e25b07947d1d5b5e2812c93fe99d7d4195130720", size = 17080660, upload-time = "2026-03-09T07:58:23.067Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/f3/3d/0876746044db2adcb11549f214d104f2e1be00f07a67edbb4e2812094847/numpy-2.4.3-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:0200b25c687033316fb39f0ff4e3e690e8957a2c3c8d22499891ec58c37a3eb5", size = 18380384, upload-time = "2026-03-09T07:58:25.839Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/07/12/8160bea39da3335737b10308df4f484235fd297f556745f13092aa039d3b/numpy-2.4.3-cp314-cp314t-win32.whl", hash = "sha256:5e10da9e93247e554bb1d22f8edc51847ddd7dde52d85ce31024c1b4312bfba0", size = 6154547, upload-time = "2026-03-09T07:58:28.289Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/42/f3/76534f61f80d74cc9cdf2e570d3d4eeb92c2280a27c39b0aaf471eda7b48/numpy-2.4.3-cp314-cp314t-win_amd64.whl", hash = "sha256:45f003dbdffb997a03da2d1d0cb41fbd24a87507fb41605c0420a3db5bd4667b", size = 12633645, upload-time = "2026-03-09T07:58:30.384Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/1f/b6/7c0d4334c15983cec7f92a69e8ce9b1e6f31857e5ee3a413ac424e6bd63d/numpy-2.4.3-cp314-cp314t-win_arm64.whl", hash = "sha256:4d382735cecd7bcf090172489a525cd7d4087bc331f7df9f60ddc9a296cf208e", size = 10565454, upload-time = "2026-03-09T07:58:33.031Z" },
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "opencv-python"
|
||||||
|
version = "4.13.0.92"
|
||||||
|
source = { registry = "https://pypi.org/simple" }
|
||||||
|
dependencies = [
|
||||||
|
{ name = "numpy" },
|
||||||
|
]
|
||||||
|
wheels = [
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/fc/6f/5a28fef4c4a382be06afe3938c64cc168223016fa520c5abaf37e8862aa5/opencv_python-4.13.0.92-cp37-abi3-macosx_13_0_arm64.whl", hash = "sha256:caf60c071ec391ba51ed00a4a920f996d0b64e3e46068aac1f646b5de0326a19", size = 46247052, upload-time = "2026-02-05T07:01:25.046Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/08/ac/6c98c44c650b8114a0fb901691351cfb3956d502e8e9b5cd27f4ee7fbf2f/opencv_python-4.13.0.92-cp37-abi3-macosx_14_0_x86_64.whl", hash = "sha256:5868a8c028a0b37561579bfb8ac1875babdc69546d236249fff296a8c010ccf9", size = 32568781, upload-time = "2026-02-05T07:01:41.379Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/3e/51/82fed528b45173bf629fa44effb76dff8bc9f4eeaee759038362dfa60237/opencv_python-4.13.0.92-cp37-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:0bc2596e68f972ca452d80f444bc404e08807d021fbba40df26b61b18e01838a", size = 47685527, upload-time = "2026-02-05T06:59:11.24Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/db/07/90b34a8e2cf9c50fe8ed25cac9011cde0676b4d9d9c973751ac7616223a2/opencv_python-4.13.0.92-cp37-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:402033cddf9d294693094de5ef532339f14ce821da3ad7df7c9f6e8316da32cf", size = 70460872, upload-time = "2026-02-05T06:59:19.162Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/02/6d/7a9cc719b3eaf4377b9c2e3edeb7ed3a81de41f96421510c0a169ca3cfd4/opencv_python-4.13.0.92-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:bccaabf9eb7f897ca61880ce2869dcd9b25b72129c28478e7f2a5e8dee945616", size = 46708208, upload-time = "2026-02-05T06:59:15.419Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/fd/55/b3b49a1b97aabcfbbd6c7326df9cb0b6fa0c0aefa8e89d500939e04aa229/opencv_python-4.13.0.92-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:620d602b8f7d8b8dab5f4b99c6eb353e78d3fb8b0f53db1bd258bb1aa001c1d5", size = 72927042, upload-time = "2026-02-05T06:59:23.389Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/fb/17/de5458312bcb07ddf434d7bfcb24bb52c59635ad58c6e7c751b48949b009/opencv_python-4.13.0.92-cp37-abi3-win32.whl", hash = "sha256:372fe164a3148ac1ca51e5f3ad0541a4a276452273f503441d718fab9c5e5f59", size = 30932638, upload-time = "2026-02-05T07:02:14.98Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/e9/a5/1be1516390333ff9be3a9cb648c9f33df79d5096e5884b5df71a588af463/opencv_python-4.13.0.92-cp37-abi3-win_amd64.whl", hash = "sha256:423d934c9fafb91aad38edf26efb46da91ffbc05f3f59c4b0c72e699720706f5", size = 40212062, upload-time = "2026-02-05T07:02:12.724Z" },
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "packaging"
|
||||||
|
version = "26.0"
|
||||||
|
source = { registry = "https://pypi.org/simple" }
|
||||||
|
sdist = { url = "https://files.pythonhosted.org/packages/65/ee/299d360cdc32edc7d2cf530f3accf79c4fca01e96ffc950d8a52213bd8e4/packaging-26.0.tar.gz", hash = "sha256:00243ae351a257117b6a241061796684b084ed1c516a08c48a3f7e147a9d80b4", size = 143416, upload-time = "2026-01-21T20:50:39.064Z" }
|
||||||
|
wheels = [
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/b7/b9/c538f279a4e237a006a2c98387d081e9eb060d203d8ed34467cc0f0b9b53/packaging-26.0-py3-none-any.whl", hash = "sha256:b36f1fef9334a5588b4166f8bcd26a14e521f2b55e6b9de3aaa80d3ff7a37529", size = 74366, upload-time = "2026-01-21T20:50:37.788Z" },
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "pluggy"
|
||||||
|
version = "1.6.0"
|
||||||
|
source = { registry = "https://pypi.org/simple" }
|
||||||
|
sdist = { url = "https://files.pythonhosted.org/packages/f9/e2/3e91f31a7d2b083fe6ef3fa267035b518369d9511ffab804f839851d2779/pluggy-1.6.0.tar.gz", hash = "sha256:7dcc130b76258d33b90f61b658791dede3486c3e6bfb003ee5c9bfb396dd22f3", size = 69412, upload-time = "2025-05-15T12:30:07.975Z" }
|
||||||
|
wheels = [
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/54/20/4d324d65cc6d9205fabedc306948156824eb9f0ee1633355a8f7ec5c66bf/pluggy-1.6.0-py3-none-any.whl", hash = "sha256:e920276dd6813095e9377c0bc5566d94c932c33b27a3e3945d8389c374dd4746", size = 20538, upload-time = "2025-05-15T12:30:06.134Z" },
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "pose-tracking-exp"
|
||||||
|
version = "0.1.0"
|
||||||
|
source = { editable = "." }
|
||||||
|
dependencies = [
|
||||||
|
{ name = "beartype" },
|
||||||
|
{ name = "click" },
|
||||||
|
{ name = "jaxtyping" },
|
||||||
|
{ name = "numpy" },
|
||||||
|
{ name = "opencv-python" },
|
||||||
|
{ name = "pyarrow" },
|
||||||
|
{ name = "rapid-pose-triangulation" },
|
||||||
|
{ name = "scipy" },
|
||||||
|
]
|
||||||
|
|
||||||
|
[package.dev-dependencies]
|
||||||
|
dev = [
|
||||||
|
{ name = "basedpyright" },
|
||||||
|
{ name = "pytest" },
|
||||||
|
]
|
||||||
|
|
||||||
|
[package.metadata]
|
||||||
|
requires-dist = [
|
||||||
|
{ name = "beartype", specifier = ">=0.19.0" },
|
||||||
|
{ name = "click", specifier = ">=8.2.1" },
|
||||||
|
{ name = "jaxtyping", specifier = ">=0.3.2" },
|
||||||
|
{ name = "numpy", specifier = ">=2.1.0" },
|
||||||
|
{ name = "opencv-python", specifier = ">=4.12.0.88" },
|
||||||
|
{ name = "pyarrow", specifier = ">=21.0.0" },
|
||||||
|
{ name = "rapid-pose-triangulation", editable = "../RapidPoseTriangulation" },
|
||||||
|
{ name = "scipy", specifier = ">=1.15.0" },
|
||||||
|
]
|
||||||
|
|
||||||
|
[package.metadata.requires-dev]
|
||||||
|
dev = [
|
||||||
|
{ name = "basedpyright", specifier = ">=1.31.0" },
|
||||||
|
{ name = "pytest", specifier = ">=8.4.0" },
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "pyarrow"
|
||||||
|
version = "23.0.1"
|
||||||
|
source = { registry = "https://pypi.org/simple" }
|
||||||
|
sdist = { url = "https://files.pythonhosted.org/packages/88/22/134986a4cc224d593c1afde5494d18ff629393d74cc2eddb176669f234a4/pyarrow-23.0.1.tar.gz", hash = "sha256:b8c5873e33440b2bc2f4a79d2b47017a89c5a24116c055625e6f2ee50523f019", size = 1167336, upload-time = "2026-02-16T10:14:12.39Z" }
|
||||||
|
wheels = [
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/9a/4b/4166bb5abbfe6f750fc60ad337c43ecf61340fa52ab386da6e8dbf9e63c4/pyarrow-23.0.1-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:f4b0dbfa124c0bb161f8b5ebb40f1a680b70279aa0c9901d44a2b5a20806039f", size = 34214575, upload-time = "2026-02-16T10:09:56.225Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/e1/da/3f941e3734ac8088ea588b53e860baeddac8323ea40ce22e3d0baa865cc9/pyarrow-23.0.1-cp312-cp312-macosx_12_0_x86_64.whl", hash = "sha256:7707d2b6673f7de054e2e83d59f9e805939038eebe1763fe811ee8fa5c0cd1a7", size = 35832540, upload-time = "2026-02-16T10:10:03.428Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/88/7c/3d841c366620e906d54430817531b877ba646310296df42ef697308c2705/pyarrow-23.0.1-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:86ff03fb9f1a320266e0de855dee4b17da6794c595d207f89bba40d16b5c78b9", size = 44470940, upload-time = "2026-02-16T10:10:10.704Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/2c/a5/da83046273d990f256cb79796a190bbf7ec999269705ddc609403f8c6b06/pyarrow-23.0.1-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:813d99f31275919c383aab17f0f455a04f5a429c261cc411b1e9a8f5e4aaaa05", size = 47586063, upload-time = "2026-02-16T10:10:17.95Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/5b/3c/b7d2ebcff47a514f47f9da1e74b7949138c58cfeb108cdd4ee62f43f0cf3/pyarrow-23.0.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:bf5842f960cddd2ef757d486041d57c96483efc295a8c4a0e20e704cbbf39c67", size = 48173045, upload-time = "2026-02-16T10:10:25.363Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/43/b2/b40961262213beaba6acfc88698eb773dfce32ecdf34d19291db94c2bd73/pyarrow-23.0.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:564baf97c858ecc03ec01a41062e8f4698abc3e6e2acd79c01c2e97880a19730", size = 50621741, upload-time = "2026-02-16T10:10:33.477Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/f6/70/1fdda42d65b28b078e93d75d371b2185a61da89dda4def8ba6ba41ebdeb4/pyarrow-23.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:07deae7783782ac7250989a7b2ecde9b3c343a643f82e8a4df03d93b633006f0", size = 27620678, upload-time = "2026-02-16T10:10:39.31Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/47/10/2cbe4c6f0fb83d2de37249567373d64327a5e4d8db72f486db42875b08f6/pyarrow-23.0.1-cp313-cp313-macosx_12_0_arm64.whl", hash = "sha256:6b8fda694640b00e8af3c824f99f789e836720aa8c9379fb435d4c4953a756b8", size = 34210066, upload-time = "2026-02-16T10:10:45.487Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/cb/4f/679fa7e84dadbaca7a65f7cdba8d6c83febbd93ca12fa4adf40ba3b6362b/pyarrow-23.0.1-cp313-cp313-macosx_12_0_x86_64.whl", hash = "sha256:8ff51b1addc469b9444b7c6f3548e19dc931b172ab234e995a60aea9f6e6025f", size = 35825526, upload-time = "2026-02-16T10:10:52.266Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/f9/63/d2747d930882c9d661e9398eefc54f15696547b8983aaaf11d4a2e8b5426/pyarrow-23.0.1-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:71c5be5cbf1e1cb6169d2a0980850bccb558ddc9b747b6206435313c47c37677", size = 44473279, upload-time = "2026-02-16T10:11:01.557Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/b3/93/10a48b5e238de6d562a411af6467e71e7aedbc9b87f8d3a35f1560ae30fb/pyarrow-23.0.1-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:9b6f4f17b43bc39d56fec96e53fe89d94bac3eb134137964371b45352d40d0c2", size = 47585798, upload-time = "2026-02-16T10:11:09.401Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/5c/20/476943001c54ef078dbf9542280e22741219a184a0632862bca4feccd666/pyarrow-23.0.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:9fc13fc6c403d1337acab46a2c4346ca6c9dec5780c3c697cf8abfd5e19b6b37", size = 48179446, upload-time = "2026-02-16T10:11:17.781Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/4b/b6/5dd0c47b335fcd8edba9bfab78ad961bd0fd55ebe53468cc393f45e0be60/pyarrow-23.0.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:5c16ed4f53247fa3ffb12a14d236de4213a4415d127fe9cebed33d51671113e2", size = 50623972, upload-time = "2026-02-16T10:11:26.185Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/d5/09/a532297c9591a727d67760e2e756b83905dd89adb365a7f6e9c72578bcc1/pyarrow-23.0.1-cp313-cp313-win_amd64.whl", hash = "sha256:cecfb12ef629cf6be0b1887f9f86463b0dd3dc3195ae6224e74006be4736035a", size = 27540749, upload-time = "2026-02-16T10:12:23.297Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/a5/8e/38749c4b1303e6ae76b3c80618f84861ae0c55dd3c2273842ea6f8258233/pyarrow-23.0.1-cp313-cp313t-macosx_12_0_arm64.whl", hash = "sha256:29f7f7419a0e30264ea261fdc0e5fe63ce5a6095003db2945d7cd78df391a7e1", size = 34471544, upload-time = "2026-02-16T10:11:32.535Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/a3/73/f237b2bc8c669212f842bcfd842b04fc8d936bfc9d471630569132dc920d/pyarrow-23.0.1-cp313-cp313t-macosx_12_0_x86_64.whl", hash = "sha256:33d648dc25b51fd8055c19e4261e813dfc4d2427f068bcecc8b53d01b81b0500", size = 35949911, upload-time = "2026-02-16T10:11:39.813Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/0c/86/b912195eee0903b5611bf596833def7d146ab2d301afeb4b722c57ffc966/pyarrow-23.0.1-cp313-cp313t-manylinux_2_28_aarch64.whl", hash = "sha256:cd395abf8f91c673dd3589cadc8cc1ee4e8674fa61b2e923c8dd215d9c7d1f41", size = 44520337, upload-time = "2026-02-16T10:11:47.764Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/69/c2/f2a717fb824f62d0be952ea724b4f6f9372a17eed6f704b5c9526f12f2f1/pyarrow-23.0.1-cp313-cp313t-manylinux_2_28_x86_64.whl", hash = "sha256:00be9576d970c31defb5c32eb72ef585bf600ef6d0a82d5eccaae96639cf9d07", size = 47548944, upload-time = "2026-02-16T10:11:56.607Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/84/a7/90007d476b9f0dc308e3bc57b832d004f848fd6c0da601375d20d92d1519/pyarrow-23.0.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:c2139549494445609f35a5cda4eb94e2c9e4d704ce60a095b342f82460c73a83", size = 48236269, upload-time = "2026-02-16T10:12:04.47Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/b0/3f/b16fab3e77709856eb6ac328ce35f57a6d4a18462c7ca5186ef31b45e0e0/pyarrow-23.0.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:7044b442f184d84e2351e5084600f0d7343d6117aabcbc1ac78eb1ae11eb4125", size = 50604794, upload-time = "2026-02-16T10:12:11.797Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/e9/a1/22df0620a9fac31d68397a75465c344e83c3dfe521f7612aea33e27ab6c0/pyarrow-23.0.1-cp313-cp313t-win_amd64.whl", hash = "sha256:a35581e856a2fafa12f3f54fce4331862b1cfb0bef5758347a858a4aa9d6bae8", size = 27660642, upload-time = "2026-02-16T10:12:17.746Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/8d/1b/6da9a89583ce7b23ac611f183ae4843cd3a6cf54f079549b0e8c14031e73/pyarrow-23.0.1-cp314-cp314-macosx_12_0_arm64.whl", hash = "sha256:5df1161da23636a70838099d4aaa65142777185cc0cdba4037a18cee7d8db9ca", size = 34238755, upload-time = "2026-02-16T10:12:32.819Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/ae/b5/d58a241fbe324dbaeb8df07be6af8752c846192d78d2272e551098f74e88/pyarrow-23.0.1-cp314-cp314-macosx_12_0_x86_64.whl", hash = "sha256:fa8e51cb04b9f8c9c5ace6bab63af9a1f88d35c0d6cbf53e8c17c098552285e1", size = 35847826, upload-time = "2026-02-16T10:12:38.949Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/54/a5/8cbc83f04aba433ca7b331b38f39e000efd9f0c7ce47128670e737542996/pyarrow-23.0.1-cp314-cp314-manylinux_2_28_aarch64.whl", hash = "sha256:0b95a3994f015be13c63148fef8832e8a23938128c185ee951c98908a696e0eb", size = 44536859, upload-time = "2026-02-16T10:12:45.467Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/36/2e/c0f017c405fcdc252dbccafbe05e36b0d0eb1ea9a958f081e01c6972927f/pyarrow-23.0.1-cp314-cp314-manylinux_2_28_x86_64.whl", hash = "sha256:4982d71350b1a6e5cfe1af742c53dfb759b11ce14141870d05d9e540d13bc5d1", size = 47614443, upload-time = "2026-02-16T10:12:55.525Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/af/6b/2314a78057912f5627afa13ba43809d9d653e6630859618b0fd81a4e0759/pyarrow-23.0.1-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:c250248f1fe266db627921c89b47b7c06fee0489ad95b04d50353537d74d6886", size = 48232991, upload-time = "2026-02-16T10:13:04.729Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/40/f2/1bcb1d3be3460832ef3370d621142216e15a2c7c62602a4ea19ec240dd64/pyarrow-23.0.1-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:5f4763b83c11c16e5f4c15601ba6dfa849e20723b46aa2617cb4bffe8768479f", size = 50645077, upload-time = "2026-02-16T10:13:14.147Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/eb/3f/b1da7b61cd66566a4d4c8383d376c606d1c34a906c3f1cb35c479f59d1aa/pyarrow-23.0.1-cp314-cp314-win_amd64.whl", hash = "sha256:3a4c85ef66c134161987c17b147d6bffdca4566f9a4c1d81a0a01cdf08414ea5", size = 28234271, upload-time = "2026-02-16T10:14:09.397Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/b5/78/07f67434e910a0f7323269be7bfbf58699bd0c1d080b18a1ab49ba943fe8/pyarrow-23.0.1-cp314-cp314t-macosx_12_0_arm64.whl", hash = "sha256:17cd28e906c18af486a499422740298c52d7c6795344ea5002a7720b4eadf16d", size = 34488692, upload-time = "2026-02-16T10:13:21.541Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/50/76/34cf7ae93ece1f740a04910d9f7e80ba166b9b4ab9596a953e9e62b90fe1/pyarrow-23.0.1-cp314-cp314t-macosx_12_0_x86_64.whl", hash = "sha256:76e823d0e86b4fb5e1cf4a58d293036e678b5a4b03539be933d3b31f9406859f", size = 35964383, upload-time = "2026-02-16T10:13:28.63Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/46/90/459b827238936d4244214be7c684e1b366a63f8c78c380807ae25ed92199/pyarrow-23.0.1-cp314-cp314t-manylinux_2_28_aarch64.whl", hash = "sha256:a62e1899e3078bf65943078b3ad2a6ddcacf2373bc06379aac61b1e548a75814", size = 44538119, upload-time = "2026-02-16T10:13:35.506Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/28/a1/93a71ae5881e99d1f9de1d4554a87be37da11cd6b152239fb5bd924fdc64/pyarrow-23.0.1-cp314-cp314t-manylinux_2_28_x86_64.whl", hash = "sha256:df088e8f640c9fae3b1f495b3c64755c4e719091caf250f3a74d095ddf3c836d", size = 47571199, upload-time = "2026-02-16T10:13:42.504Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/88/a3/d2c462d4ef313521eaf2eff04d204ac60775263f1fb08c374b543f79f610/pyarrow-23.0.1-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:46718a220d64677c93bc243af1d44b55998255427588e400677d7192671845c7", size = 48259435, upload-time = "2026-02-16T10:13:49.226Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/cc/f1/11a544b8c3d38a759eb3fbb022039117fd633e9a7b19e4841cc3da091915/pyarrow-23.0.1-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:a09f3876e87f48bc2f13583ab551f0379e5dfb83210391e68ace404181a20690", size = 50629149, upload-time = "2026-02-16T10:13:57.238Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/50/f2/c0e76a0b451ffdf0cf788932e182758eb7558953f4f27f1aff8e2518b653/pyarrow-23.0.1-cp314-cp314t-win_amd64.whl", hash = "sha256:527e8d899f14bd15b740cd5a54ad56b7f98044955373a17179d5956ddb93d9ce", size = 28365807, upload-time = "2026-02-16T10:14:03.892Z" },
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "pygments"
|
||||||
|
version = "2.19.2"
|
||||||
|
source = { registry = "https://pypi.org/simple" }
|
||||||
|
sdist = { url = "https://files.pythonhosted.org/packages/b0/77/a5b8c569bf593b0140bde72ea885a803b82086995367bf2037de0159d924/pygments-2.19.2.tar.gz", hash = "sha256:636cb2477cec7f8952536970bc533bc43743542f70392ae026374600add5b887", size = 4968631, upload-time = "2025-06-21T13:39:12.283Z" }
|
||||||
|
wheels = [
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/c7/21/705964c7812476f378728bdf590ca4b771ec72385c533964653c68e86bdc/pygments-2.19.2-py3-none-any.whl", hash = "sha256:86540386c03d588bb81d44bc3928634ff26449851e99741617ecb9037ee5ec0b", size = 1225217, upload-time = "2025-06-21T13:39:07.939Z" },
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "pytest"
|
||||||
|
version = "9.0.2"
|
||||||
|
source = { registry = "https://pypi.org/simple" }
|
||||||
|
dependencies = [
|
||||||
|
{ name = "colorama", marker = "sys_platform == 'win32'" },
|
||||||
|
{ name = "iniconfig" },
|
||||||
|
{ name = "packaging" },
|
||||||
|
{ name = "pluggy" },
|
||||||
|
{ name = "pygments" },
|
||||||
|
]
|
||||||
|
sdist = { url = "https://files.pythonhosted.org/packages/d1/db/7ef3487e0fb0049ddb5ce41d3a49c235bf9ad299b6a25d5780a89f19230f/pytest-9.0.2.tar.gz", hash = "sha256:75186651a92bd89611d1d9fc20f0b4345fd827c41ccd5c299a868a05d70edf11", size = 1568901, upload-time = "2025-12-06T21:30:51.014Z" }
|
||||||
|
wheels = [
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/3b/ab/b3226f0bd7cdcf710fbede2b3548584366da3b19b5021e74f5bde2a8fa3f/pytest-9.0.2-py3-none-any.whl", hash = "sha256:711ffd45bf766d5264d487b917733b453d917afd2b0ad65223959f59089f875b", size = 374801, upload-time = "2025-12-06T21:30:49.154Z" },
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "rapid-pose-triangulation"
|
||||||
|
version = "0.2.0"
|
||||||
|
source = { editable = "../RapidPoseTriangulation" }
|
||||||
|
dependencies = [
|
||||||
|
{ name = "numpy" },
|
||||||
|
]
|
||||||
|
|
||||||
|
[package.metadata]
|
||||||
|
requires-dist = [{ name = "numpy", specifier = ">=2.0" }]
|
||||||
|
|
||||||
|
[package.metadata.requires-dev]
|
||||||
|
dev = [{ name = "pytest", specifier = ">=8.3" }]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "scipy"
|
||||||
|
version = "1.17.1"
|
||||||
|
source = { registry = "https://pypi.org/simple" }
|
||||||
|
dependencies = [
|
||||||
|
{ name = "numpy" },
|
||||||
|
]
|
||||||
|
sdist = { url = "https://files.pythonhosted.org/packages/7a/97/5a3609c4f8d58b039179648e62dd220f89864f56f7357f5d4f45c29eb2cc/scipy-1.17.1.tar.gz", hash = "sha256:95d8e012d8cb8816c226aef832200b1d45109ed4464303e997c5b13122b297c0", size = 30573822, upload-time = "2026-02-23T00:26:24.851Z" }
|
||||||
|
wheels = [
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/35/48/b992b488d6f299dbe3f11a20b24d3dda3d46f1a635ede1c46b5b17a7b163/scipy-1.17.1-cp312-cp312-macosx_10_14_x86_64.whl", hash = "sha256:35c3a56d2ef83efc372eaec584314bd0ef2e2f0d2adb21c55e6ad5b344c0dcb8", size = 31610954, upload-time = "2026-02-23T00:17:49.855Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/b2/02/cf107b01494c19dc100f1d0b7ac3cc08666e96ba2d64db7626066cee895e/scipy-1.17.1-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:fcb310ddb270a06114bb64bbe53c94926b943f5b7f0842194d585c65eb4edd76", size = 28172662, upload-time = "2026-02-23T00:18:01.64Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/cf/a9/599c28631bad314d219cf9ffd40e985b24d603fc8a2f4ccc5ae8419a535b/scipy-1.17.1-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:cc90d2e9c7e5c7f1a482c9875007c095c3194b1cfedca3c2f3291cdc2bc7c086", size = 20344366, upload-time = "2026-02-23T00:18:12.015Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/35/f5/906eda513271c8deb5af284e5ef0206d17a96239af79f9fa0aebfe0e36b4/scipy-1.17.1-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:c80be5ede8f3f8eded4eff73cc99a25c388ce98e555b17d31da05287015ffa5b", size = 22704017, upload-time = "2026-02-23T00:18:21.502Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/da/34/16f10e3042d2f1d6b66e0428308ab52224b6a23049cb2f5c1756f713815f/scipy-1.17.1-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e19ebea31758fac5893a2ac360fedd00116cbb7628e650842a6691ba7ca28a21", size = 32927842, upload-time = "2026-02-23T00:18:35.367Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/01/8e/1e35281b8ab6d5d72ebe9911edcdffa3f36b04ed9d51dec6dd140396e220/scipy-1.17.1-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:02ae3b274fde71c5e92ac4d54bc06c42d80e399fec704383dcd99b301df37458", size = 35235890, upload-time = "2026-02-23T00:18:49.188Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/c5/5c/9d7f4c88bea6e0d5a4f1bc0506a53a00e9fcb198de372bfe4d3652cef482/scipy-1.17.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:8a604bae87c6195d8b1045eddece0514d041604b14f2727bbc2b3020172045eb", size = 35003557, upload-time = "2026-02-23T00:18:54.74Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/65/94/7698add8f276dbab7a9de9fb6b0e02fc13ee61d51c7c3f85ac28b65e1239/scipy-1.17.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:f590cd684941912d10becc07325a3eeb77886fe981415660d9265c4c418d0bea", size = 37625856, upload-time = "2026-02-23T00:19:00.307Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/a2/84/dc08d77fbf3d87d3ee27f6a0c6dcce1de5829a64f2eae85a0ecc1f0daa73/scipy-1.17.1-cp312-cp312-win_amd64.whl", hash = "sha256:41b71f4a3a4cab9d366cd9065b288efc4d4f3c0b37a91a8e0947fb5bd7f31d87", size = 36549682, upload-time = "2026-02-23T00:19:07.67Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/bc/98/fe9ae9ffb3b54b62559f52dedaebe204b408db8109a8c66fdd04869e6424/scipy-1.17.1-cp312-cp312-win_arm64.whl", hash = "sha256:f4115102802df98b2b0db3cce5cb9b92572633a1197c77b7553e5203f284a5b3", size = 24547340, upload-time = "2026-02-23T00:19:12.024Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/76/27/07ee1b57b65e92645f219b37148a7e7928b82e2b5dbeccecb4dff7c64f0b/scipy-1.17.1-cp313-cp313-macosx_10_14_x86_64.whl", hash = "sha256:5e3c5c011904115f88a39308379c17f91546f77c1667cea98739fe0fccea804c", size = 31590199, upload-time = "2026-02-23T00:19:17.192Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/ec/ae/db19f8ab842e9b724bf5dbb7db29302a91f1e55bc4d04b1025d6d605a2c5/scipy-1.17.1-cp313-cp313-macosx_12_0_arm64.whl", hash = "sha256:6fac755ca3d2c3edcb22f479fceaa241704111414831ddd3bc6056e18516892f", size = 28154001, upload-time = "2026-02-23T00:19:22.241Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/5b/58/3ce96251560107b381cbd6e8413c483bbb1228a6b919fa8652b0d4090e7f/scipy-1.17.1-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:7ff200bf9d24f2e4d5dc6ee8c3ac64d739d3a89e2326ba68aaf6c4a2b838fd7d", size = 20325719, upload-time = "2026-02-23T00:19:26.329Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/b2/83/15087d945e0e4d48ce2377498abf5ad171ae013232ae31d06f336e64c999/scipy-1.17.1-cp313-cp313-macosx_14_0_x86_64.whl", hash = "sha256:4b400bdc6f79fa02a4d86640310dde87a21fba0c979efff5248908c6f15fad1b", size = 22683595, upload-time = "2026-02-23T00:19:30.304Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/b4/e0/e58fbde4a1a594c8be8114eb4aac1a55bcd6587047efc18a61eb1f5c0d30/scipy-1.17.1-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:2b64ca7d4aee0102a97f3ba22124052b4bd2152522355073580bf4845e2550b6", size = 32896429, upload-time = "2026-02-23T00:19:35.536Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/f5/5f/f17563f28ff03c7b6799c50d01d5d856a1d55f2676f537ca8d28c7f627cd/scipy-1.17.1-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:581b2264fc0aa555f3f435a5944da7504ea3a065d7029ad60e7c3d1ae09c5464", size = 35203952, upload-time = "2026-02-23T00:19:42.259Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/8d/a5/9afd17de24f657fdfe4df9a3f1ea049b39aef7c06000c13db1530d81ccca/scipy-1.17.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:beeda3d4ae615106d7094f7e7cef6218392e4465cc95d25f900bebabfded0950", size = 34979063, upload-time = "2026-02-23T00:19:47.547Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/8b/13/88b1d2384b424bf7c924f2038c1c409f8d88bb2a8d49d097861dd64a57b2/scipy-1.17.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6609bc224e9568f65064cfa72edc0f24ee6655b47575954ec6339534b2798369", size = 37598449, upload-time = "2026-02-23T00:19:53.238Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/35/e5/d6d0e51fc888f692a35134336866341c08655d92614f492c6860dc45bb2c/scipy-1.17.1-cp313-cp313-win_amd64.whl", hash = "sha256:37425bc9175607b0268f493d79a292c39f9d001a357bebb6b88fdfaff13f6448", size = 36510943, upload-time = "2026-02-23T00:20:50.89Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/2a/fd/3be73c564e2a01e690e19cc618811540ba5354c67c8680dce3281123fb79/scipy-1.17.1-cp313-cp313-win_arm64.whl", hash = "sha256:5cf36e801231b6a2059bf354720274b7558746f3b1a4efb43fcf557ccd484a87", size = 24545621, upload-time = "2026-02-23T00:20:55.871Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/6f/6b/17787db8b8114933a66f9dcc479a8272e4b4da75fe03b0c282f7b0ade8cd/scipy-1.17.1-cp313-cp313t-macosx_10_14_x86_64.whl", hash = "sha256:d59c30000a16d8edc7e64152e30220bfbd724c9bbb08368c054e24c651314f0a", size = 31936708, upload-time = "2026-02-23T00:19:58.694Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/38/2e/524405c2b6392765ab1e2b722a41d5da33dc5c7b7278184a8ad29b6cb206/scipy-1.17.1-cp313-cp313t-macosx_12_0_arm64.whl", hash = "sha256:010f4333c96c9bb1a4516269e33cb5917b08ef2166d5556ca2fd9f082a9e6ea0", size = 28570135, upload-time = "2026-02-23T00:20:03.934Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/fd/c3/5bd7199f4ea8556c0c8e39f04ccb014ac37d1468e6cfa6a95c6b3562b76e/scipy-1.17.1-cp313-cp313t-macosx_14_0_arm64.whl", hash = "sha256:2ceb2d3e01c5f1d83c4189737a42d9cb2fc38a6eeed225e7515eef71ad301dce", size = 20741977, upload-time = "2026-02-23T00:20:07.935Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/d9/b8/8ccd9b766ad14c78386599708eb745f6b44f08400a5fd0ade7cf89b6fc93/scipy-1.17.1-cp313-cp313t-macosx_14_0_x86_64.whl", hash = "sha256:844e165636711ef41f80b4103ed234181646b98a53c8f05da12ca5ca289134f6", size = 23029601, upload-time = "2026-02-23T00:20:12.161Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/6d/a0/3cb6f4d2fb3e17428ad2880333cac878909ad1a89f678527b5328b93c1d4/scipy-1.17.1-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:158dd96d2207e21c966063e1635b1063cd7787b627b6f07305315dd73d9c679e", size = 33019667, upload-time = "2026-02-23T00:20:17.208Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/f3/c3/2d834a5ac7bf3a0c806ad1508efc02dda3c8c61472a56132d7894c312dea/scipy-1.17.1-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:74cbb80d93260fe2ffa334efa24cb8f2f0f622a9b9febf8b483c0b865bfb3475", size = 35264159, upload-time = "2026-02-23T00:20:23.087Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/4d/77/d3ed4becfdbd217c52062fafe35a72388d1bd82c2d0ba5ca19d6fcc93e11/scipy-1.17.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:dbc12c9f3d185f5c737d801da555fb74b3dcfa1a50b66a1a93e09190f41fab50", size = 35102771, upload-time = "2026-02-23T00:20:28.636Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/bd/12/d19da97efde68ca1ee5538bb261d5d2c062f0c055575128f11a2730e3ac1/scipy-1.17.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:94055a11dfebe37c656e70317e1996dc197e1a15bbcc351bcdd4610e128fe1ca", size = 37665910, upload-time = "2026-02-23T00:20:34.743Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/06/1c/1172a88d507a4baaf72c5a09bb6c018fe2ae0ab622e5830b703a46cc9e44/scipy-1.17.1-cp313-cp313t-win_amd64.whl", hash = "sha256:e30bdeaa5deed6bc27b4cc490823cd0347d7dae09119b8803ae576ea0ce52e4c", size = 36562980, upload-time = "2026-02-23T00:20:40.575Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/70/b0/eb757336e5a76dfa7911f63252e3b7d1de00935d7705cf772db5b45ec238/scipy-1.17.1-cp313-cp313t-win_arm64.whl", hash = "sha256:a720477885a9d2411f94a93d16f9d89bad0f28ca23c3f8daa521e2dcc3f44d49", size = 24856543, upload-time = "2026-02-23T00:20:45.313Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/cf/83/333afb452af6f0fd70414dc04f898647ee1423979ce02efa75c3b0f2c28e/scipy-1.17.1-cp314-cp314-macosx_10_14_x86_64.whl", hash = "sha256:a48a72c77a310327f6a3a920092fa2b8fd03d7deaa60f093038f22d98e096717", size = 31584510, upload-time = "2026-02-23T00:21:01.015Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/ed/a6/d05a85fd51daeb2e4ea71d102f15b34fedca8e931af02594193ae4fd25f7/scipy-1.17.1-cp314-cp314-macosx_12_0_arm64.whl", hash = "sha256:45abad819184f07240d8a696117a7aacd39787af9e0b719d00285549ed19a1e9", size = 28170131, upload-time = "2026-02-23T00:21:05.888Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/db/7b/8624a203326675d7746a254083a187398090a179335b2e4a20e2ddc46e83/scipy-1.17.1-cp314-cp314-macosx_14_0_arm64.whl", hash = "sha256:3fd1fcdab3ea951b610dc4cef356d416d5802991e7e32b5254828d342f7b7e0b", size = 20342032, upload-time = "2026-02-23T00:21:09.904Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/c9/35/2c342897c00775d688d8ff3987aced3426858fd89d5a0e26e020b660b301/scipy-1.17.1-cp314-cp314-macosx_14_0_x86_64.whl", hash = "sha256:7bdf2da170b67fdf10bca777614b1c7d96ae3ca5794fd9587dce41eb2966e866", size = 22678766, upload-time = "2026-02-23T00:21:14.313Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/ef/f2/7cdb8eb308a1a6ae1e19f945913c82c23c0c442a462a46480ce487fdc0ac/scipy-1.17.1-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:adb2642e060a6549c343603a3851ba76ef0b74cc8c079a9a58121c7ec9fe2350", size = 32957007, upload-time = "2026-02-23T00:21:19.663Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/0b/2e/7eea398450457ecb54e18e9d10110993fa65561c4f3add5e8eccd2b9cd41/scipy-1.17.1-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:eee2cfda04c00a857206a4330f0c5e3e56535494e30ca445eb19ec624ae75118", size = 35221333, upload-time = "2026-02-23T00:21:25.278Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/d9/77/5b8509d03b77f093a0d52e606d3c4f79e8b06d1d38c441dacb1e26cacf46/scipy-1.17.1-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:d2650c1fb97e184d12d8ba010493ee7b322864f7d3d00d3f9bb97d9c21de4068", size = 35042066, upload-time = "2026-02-23T00:21:31.358Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/f9/df/18f80fb99df40b4070328d5ae5c596f2f00fffb50167e31439e932f29e7d/scipy-1.17.1-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:08b900519463543aa604a06bec02461558a6e1cef8fdbb8098f77a48a83c8118", size = 37612763, upload-time = "2026-02-23T00:21:37.247Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/4b/39/f0e8ea762a764a9dc52aa7dabcfad51a354819de1f0d4652b6a1122424d6/scipy-1.17.1-cp314-cp314-win_amd64.whl", hash = "sha256:3877ac408e14da24a6196de0ddcace62092bfc12a83823e92e49e40747e52c19", size = 37290984, upload-time = "2026-02-23T00:22:35.023Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/7c/56/fe201e3b0f93d1a8bcf75d3379affd228a63d7e2d80ab45467a74b494947/scipy-1.17.1-cp314-cp314-win_arm64.whl", hash = "sha256:f8885db0bc2bffa59d5c1b72fad7a6a92d3e80e7257f967dd81abb553a90d293", size = 25192877, upload-time = "2026-02-23T00:22:39.798Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/96/ad/f8c414e121f82e02d76f310f16db9899c4fcde36710329502a6b2a3c0392/scipy-1.17.1-cp314-cp314t-macosx_10_14_x86_64.whl", hash = "sha256:1cc682cea2ae55524432f3cdff9e9a3be743d52a7443d0cba9017c23c87ae2f6", size = 31949750, upload-time = "2026-02-23T00:21:42.289Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/7c/b0/c741e8865d61b67c81e255f4f0a832846c064e426636cd7de84e74d209be/scipy-1.17.1-cp314-cp314t-macosx_12_0_arm64.whl", hash = "sha256:2040ad4d1795a0ae89bfc7e8429677f365d45aa9fd5e4587cf1ea737f927b4a1", size = 28585858, upload-time = "2026-02-23T00:21:47.706Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/ed/1b/3985219c6177866628fa7c2595bfd23f193ceebbe472c98a08824b9466ff/scipy-1.17.1-cp314-cp314t-macosx_14_0_arm64.whl", hash = "sha256:131f5aaea57602008f9822e2115029b55d4b5f7c070287699fe45c661d051e39", size = 20757723, upload-time = "2026-02-23T00:21:52.039Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/c0/19/2a04aa25050d656d6f7b9e7b685cc83d6957fb101665bfd9369ca6534563/scipy-1.17.1-cp314-cp314t-macosx_14_0_x86_64.whl", hash = "sha256:9cdc1a2fcfd5c52cfb3045feb399f7b3ce822abdde3a193a6b9a60b3cb5854ca", size = 23043098, upload-time = "2026-02-23T00:21:56.185Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/86/f1/3383beb9b5d0dbddd030335bf8a8b32d4317185efe495374f134d8be6cce/scipy-1.17.1-cp314-cp314t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6e3dcd57ab780c741fde8dc68619de988b966db759a3c3152e8e9142c26295ad", size = 33030397, upload-time = "2026-02-23T00:22:01.404Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/41/68/8f21e8a65a5a03f25a79165ec9d2b28c00e66dc80546cf5eb803aeeff35b/scipy-1.17.1-cp314-cp314t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a9956e4d4f4a301ebf6cde39850333a6b6110799d470dbbb1e25326ac447f52a", size = 35281163, upload-time = "2026-02-23T00:22:07.024Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/84/8d/c8a5e19479554007a5632ed7529e665c315ae7492b4f946b0deb39870e39/scipy-1.17.1-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:a4328d245944d09fd639771de275701ccadf5f781ba0ff092ad141e017eccda4", size = 35116291, upload-time = "2026-02-23T00:22:12.585Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/52/52/e57eceff0e342a1f50e274264ed47497b59e6a4e3118808ee58ddda7b74a/scipy-1.17.1-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:a77cbd07b940d326d39a1d1b37817e2ee4d79cb30e7338f3d0cddffae70fcaa2", size = 37682317, upload-time = "2026-02-23T00:22:18.513Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/11/2f/b29eafe4a3fbc3d6de9662b36e028d5f039e72d345e05c250e121a230dd4/scipy-1.17.1-cp314-cp314t-win_amd64.whl", hash = "sha256:eb092099205ef62cd1782b006658db09e2fed75bffcae7cc0d44052d8aa0f484", size = 37345327, upload-time = "2026-02-23T00:22:24.442Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/07/39/338d9219c4e87f3e708f18857ecd24d22a0c3094752393319553096b98af/scipy-1.17.1-cp314-cp314t-win_arm64.whl", hash = "sha256:200e1050faffacc162be6a486a984a0497866ec54149a01270adc8a59b7c7d21", size = 25489165, upload-time = "2026-02-23T00:22:29.563Z" },
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "wadler-lindig"
|
||||||
|
version = "0.1.7"
|
||||||
|
source = { registry = "https://pypi.org/simple" }
|
||||||
|
sdist = { url = "https://files.pythonhosted.org/packages/1e/67/cbae4bf7683a64755c2c1778c418fea96d00e34395bb91743f08bd951571/wadler_lindig-0.1.7.tar.gz", hash = "sha256:81d14d3fe77d441acf3ebd7f4aefac20c74128bf460e84b512806dccf7b2cd55", size = 15842, upload-time = "2025-06-18T07:00:42.843Z" }
|
||||||
|
wheels = [
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/8d/96/04e7b441807b26b794da5b11e59ed7f83b2cf8af202bd7eba8ad2fa6046e/wadler_lindig-0.1.7-py3-none-any.whl", hash = "sha256:e3ec83835570fd0a9509f969162aeb9c65618f998b1f42918cfc8d45122fe953", size = 20516, upload-time = "2025-06-18T07:00:41.684Z" },
|
||||||
|
]
|
||||||
Reference in New Issue
Block a user