8 Commits

19 changed files with 12861 additions and 1604 deletions

2
.gitignore vendored
View File

@ -10,3 +10,5 @@ wheels/
.venv
.hypothesis
samples
*.jpg
*.parquet

View File

@ -1,6 +1,7 @@
from collections import OrderedDict, defaultdict
from dataclasses import dataclass
from datetime import datetime
import string
from typing import Any, TypeAlias, TypedDict, Optional, Sequence
from beartype import beartype
@ -522,10 +523,14 @@ def to_homogeneous(points: Num[Array, "N 2"] | Num[Array, "N 3"]) -> Num[Array,
raise ValueError(f"Invalid shape for points: {points.shape}")
import awkward as ak
@jaxtyped(typechecker=beartype)
def point_line_distance(
points: Num[Array, "N 3"] | Num[Array, "N 2"],
line: Num[Array, "N 3"],
description: str,
eps: float = 1e-9,
):
"""
@ -544,6 +549,12 @@ def point_line_distance(
"""
numerator = abs(line[:, 0] * points[:, 0] + line[:, 1] * points[:, 1] + line[:, 2])
denominator = jnp.sqrt(line[:, 0] * line[:, 0] + line[:, 1] * line[:, 1])
# line_data = {"a": line[:, 0], "b": line[:, 1], "c": line[:, 2]}
# line_x_y = {"x": points[:, 0], "y": points[:, 1]}
# ak.to_parquet(
# line_data, f"/home/admin/Code/CVTH3PE/line_a_b_c_{description}.parquet"
# )
# ak.to_parquet(line_x_y, f"/home/admin/Code/CVTH3PE/line_x_y_{description}.parquet")
return numerator / (denominator + eps)
@ -571,7 +582,7 @@ def left_to_right_epipolar_distance(
"""
F_t = fundamental_matrix.transpose()
line1_in_2 = jnp.matmul(left, F_t)
return point_line_distance(right, line1_in_2)
return point_line_distance(right, line1_in_2, "left_to_right")
@jaxtyped(typechecker=beartype)
@ -597,7 +608,7 @@ def right_to_left_epipolar_distance(
$$x^{\\prime T}Fx = 0$$
"""
line2_in_1 = jnp.matmul(right, fundamental_matrix)
return point_line_distance(left, line2_in_1)
return point_line_distance(left, line2_in_1, "right_to_left")
def distance_between_epipolar_lines(

View File

@ -1,14 +1,20 @@
import warnings
import weakref
from collections import deque
from dataclasses import dataclass
from datetime import datetime
from datetime import datetime, timedelta
from itertools import chain
from typing import (
Any,
Callable,
Generator,
Optional,
Protocol,
Sequence,
TypeAlias,
TypedDict,
TypeVar,
Union,
cast,
overload,
)
@ -18,18 +24,428 @@ from beartype import beartype
from beartype.typing import Mapping, Sequence
from jax import Array
from jaxtyping import Array, Float, Int, jaxtyped
from pyrsistent import PVector
from pyrsistent import PVector, v, PRecord, PMap
from app.camera import Detection
from app.camera import Detection, CameraID
TrackingID: TypeAlias = int
class TrackingPrediction(TypedDict):
velocity: Optional[Float[Array, "J 3"]]
keypoints: Float[Array, "J 3"]
class GenericVelocityFilter(Protocol):
"""
a filter interface for tracking velocity estimation
"""
def predict(self, timestamp: datetime) -> TrackingPrediction:
"""
predict the velocity and the keypoints location
Args:
timestamp: timestamp of the prediction
Returns:
velocity: velocity of the tracking
keypoints: keypoints of the tracking
"""
... # pylint: disable=unnecessary-ellipsis
def update(self, keypoints: Float[Array, "J 3"], timestamp: datetime) -> None:
"""
update the filter state with new measurements
Args:
keypoints: new measurements
timestamp: timestamp of the update
"""
... # pylint: disable=unnecessary-ellipsis
def get(self) -> TrackingPrediction:
"""
get the current state of the filter state
Returns:
velocity: velocity of the tracking
keypoints: keypoints of the tracking
"""
... # pylint: disable=unnecessary-ellipsis
class DummyVelocityFilter(GenericVelocityFilter):
"""
a dummy velocity filter that does nothing
"""
_keypoints_shape: tuple[int, ...]
def __init__(self, keypoints: Float[Array, "J 3"]):
self._keypoints_shape = keypoints.shape
def predict(self, timestamp: datetime) -> TrackingPrediction:
return TrackingPrediction(
velocity=None,
keypoints=jnp.zeros(self._keypoints_shape),
)
def update(self, keypoints: Float[Array, "J 3"], timestamp: datetime) -> None: ...
def get(self) -> TrackingPrediction:
return TrackingPrediction(
velocity=None,
keypoints=jnp.zeros(self._keypoints_shape),
)
class LastDifferenceVelocityFilter(GenericVelocityFilter):
"""
a naive velocity filter that uses the last difference of keypoints
"""
_last_timestamp: datetime
_last_keypoints: Float[Array, "J 3"]
_last_velocity: Optional[Float[Array, "J 3"]] = None
def __init__(self, keypoints: Float[Array, "J 3"], timestamp: datetime):
self._last_keypoints = keypoints
self._last_timestamp = timestamp
def predict(self, timestamp: datetime) -> TrackingPrediction:
delta_t_s = (timestamp - self._last_timestamp).total_seconds()
if delta_t_s <= 0:
warnings.warn(
"delta_t={}; last={}; current={}".format(
delta_t_s, self._last_timestamp, timestamp
)
)
if self._last_velocity is None:
return TrackingPrediction(
velocity=None,
keypoints=self._last_keypoints,
)
else:
if delta_t_s <= 0:
return TrackingPrediction(
velocity=self._last_velocity,
keypoints=self._last_keypoints,
)
return TrackingPrediction(
velocity=self._last_velocity,
keypoints=self._last_keypoints + self._last_velocity * delta_t_s,
)
def update(self, keypoints: Float[Array, "J 3"], timestamp: datetime) -> None:
delta_t_s = (timestamp - self._last_timestamp).total_seconds()
if delta_t_s <= 0:
pass
else:
self._last_timestamp = timestamp
self._last_velocity = (keypoints - self._last_keypoints) / delta_t_s
self._last_keypoints = keypoints
def get(self) -> TrackingPrediction:
if self._last_velocity is None:
return TrackingPrediction(
velocity=None,
keypoints=self._last_keypoints,
)
else:
return TrackingPrediction(
velocity=self._last_velocity,
keypoints=self._last_keypoints,
)
class LeastMeanSquareVelocityFilter(GenericVelocityFilter):
"""
a velocity filter that uses the least mean square method to estimate the velocity
"""
_historical_3d_poses: deque[Float[Array, "J 3"]]
_historical_timestamps: deque[datetime]
_velocity: Optional[Float[Array, "J 3"]] = None
_max_samples: int
def __init__(
self,
historical_3d_poses: Sequence[Float[Array, "J 3"]],
historical_timestamps: Sequence[datetime],
max_samples: int = 10,
):
assert len(historical_3d_poses) == len(historical_timestamps)
temp = zip(historical_3d_poses, historical_timestamps)
temp_sorted = sorted(temp, key=lambda x: x[1])
self._historical_3d_poses = deque(
map(lambda x: x[0], temp_sorted), maxlen=max_samples
)
self._historical_timestamps = deque(
map(lambda x: x[1], temp_sorted), maxlen=max_samples
)
self._max_samples = max_samples
if len(self._historical_3d_poses) < 2:
self._velocity = None
else:
self._update(
jnp.array(self._historical_3d_poses),
jnp.array(self._historical_timestamps),
)
def predict(self, timestamp: datetime) -> TrackingPrediction:
if not self._historical_3d_poses:
raise ValueError("No historical 3D poses available for prediction")
# use the latest historical detection
latest_3d_pose = self._historical_3d_poses[-1]
latest_timestamp = self._historical_timestamps[-1]
delta_t_s = (timestamp - latest_timestamp).total_seconds()
if self._velocity is None:
return TrackingPrediction(
velocity=None,
keypoints=latest_3d_pose,
)
else:
# Linear motion model: ẋt = xt' + Vt' · (t - t')
predicted_3d_pose = latest_3d_pose + self._velocity * delta_t_s
return TrackingPrediction(
velocity=self._velocity, keypoints=predicted_3d_pose
)
@jaxtyped(typechecker=beartype)
def _update(
self,
keypoints: Float[Array, "N J 3"],
timestamps: Float[Array, "N"],
) -> None:
"""
update measurements with least mean square method
"""
if keypoints.shape[0] < 2:
raise ValueError("Not enough measurements to estimate velocity")
# Using least squares to fit a linear model for each joint and dimension
# X = timestamps, y = keypoints
# For each joint and each dimension, we solve for velocity
n_samples = timestamps.shape[0]
n_joints = keypoints.shape[1]
# Create design matrix for linear regression
# [t, 1] for each timestamp
X = jnp.column_stack([timestamps, jnp.ones(n_samples)])
# Reshape keypoints to solve for all joints and dimensions at once
# From [N, J, 3] to [N, J*3]
keypoints_reshaped = keypoints.reshape(n_samples, -1)
# Use JAX's lstsq to solve the least squares problem
# This is more numerically stable than manually computing pseudoinverse
coefficients, _, _, _ = jnp.linalg.lstsq(X, keypoints_reshaped, rcond=None)
# Coefficients shape is [2, J*3]
# First row: velocities, Second row: intercepts
velocities = coefficients[0].reshape(n_joints, 3)
# Update velocity
self._velocity = velocities
def update(self, keypoints: Float[Array, "J 3"], timestamp: datetime) -> None:
last_timestamp = self._historical_timestamps[-1]
assert last_timestamp <= timestamp
# deque would manage the maxlen automatically
self._historical_3d_poses.append(keypoints)
self._historical_timestamps.append(timestamp)
t_0 = self._historical_timestamps[0]
all_keypoints = jnp.array(self._historical_3d_poses)
def timestamp_to_seconds(timestamp: datetime) -> float:
assert t_0 <= timestamp
return (timestamp - t_0).total_seconds()
# timestamps relative to t_0 (the oldest detection timestamp)
all_timestamps = jnp.array(
map(timestamp_to_seconds, self._historical_timestamps)
)
self._update(all_keypoints, all_timestamps)
def get(self) -> TrackingPrediction:
if not self._historical_3d_poses:
raise ValueError("No historical 3D poses available")
latest_3d_pose = self._historical_3d_poses[-1]
if self._velocity is None:
return TrackingPrediction(velocity=None, keypoints=latest_3d_pose)
else:
return TrackingPrediction(velocity=self._velocity, keypoints=latest_3d_pose)
class OneEuroFilter(GenericVelocityFilter):
"""
Implementation of the 1€ filter (One Euro Filter) for smoothing keypoint data.
The 1€ filter is an adaptive low-pass filter that adjusts its cutoff frequency
based on movement speed to reduce jitter during slow movements while maintaining
responsiveness during fast movements.
Reference: https://cristal.univ-lille.fr/~casiez/1euro/
"""
_x_filtered: Float[Array, "J 3"]
_dx_filtered: Optional[Float[Array, "J 3"]] = None
_last_timestamp: datetime
_min_cutoff: float
_beta: float
_d_cutoff: float
def __init__(
self,
keypoints: Float[Array, "J 3"],
timestamp: datetime,
min_cutoff: float = 1.0,
beta: float = 0.0,
d_cutoff: float = 1.0,
):
"""
Initialize the One Euro Filter.
Args:
keypoints: Initial keypoints positions
timestamp: Initial timestamp
min_cutoff: Minimum cutoff frequency (lower = more smoothing)
beta: Speed coefficient (higher = less lag during fast movements)
d_cutoff: Cutoff frequency for the derivative filter
"""
self._last_timestamp = timestamp
# Filter parameters
self._min_cutoff = min_cutoff
self._beta = beta
self._d_cutoff = d_cutoff
# Filter state
self._x_filtered = keypoints # Position filter state
self._dx_filtered = None # Initially no velocity estimate
@overload
def _smoothing_factor(self, cutoff: float, dt: float) -> float: ...
@overload
def _smoothing_factor(
self, cutoff: Float[Array, "J"], dt: float
) -> Float[Array, "J"]: ...
@jaxtyped(typechecker=beartype)
def _smoothing_factor(
self, cutoff: Union[float, Float[Array, "J"]], dt: float
) -> Union[float, Float[Array, "J"]]:
"""Calculate the smoothing factor for the low-pass filter."""
r = 2 * jnp.pi * cutoff * dt
return r / (r + 1)
@jaxtyped(typechecker=beartype)
def _exponential_smoothing(
self,
a: Union[float, Float[Array, "J"]],
x: Float[Array, "J 3"],
x_prev: Float[Array, "J 3"],
) -> Float[Array, "J 3"]:
"""Apply exponential smoothing to the input."""
return a * x + (1 - a) * x_prev
def predict(self, timestamp: datetime) -> TrackingPrediction:
"""
Predict keypoints position at a given timestamp.
Args:
timestamp: Timestamp for prediction
Returns:
TrackingPrediction with velocity and keypoints
"""
dt = (timestamp - self._last_timestamp).total_seconds()
if self._dx_filtered is None:
return TrackingPrediction(
velocity=None,
keypoints=self._x_filtered,
)
else:
predicted_keypoints = self._x_filtered + self._dx_filtered * dt
return TrackingPrediction(
velocity=self._dx_filtered,
keypoints=predicted_keypoints,
)
def update(self, keypoints: Float[Array, "J 3"], timestamp: datetime) -> None:
"""
Update the filter with new measurements.
Args:
keypoints: New keypoint measurements
timestamp: Timestamp of the measurements
"""
dt = (timestamp - self._last_timestamp).total_seconds()
if dt <= 0:
raise ValueError(
f"new timestamp is not greater than the last timestamp; expecting: {timestamp} > {self._last_timestamp}"
)
dx = (keypoints - self._x_filtered) / dt
# Determine cutoff frequency based on movement speed
cutoff = self._min_cutoff + self._beta * jnp.linalg.norm(
dx, axis=-1, keepdims=True
)
# Apply low-pass filter to velocity
a_d = self._smoothing_factor(self._d_cutoff, dt)
self._dx_filtered = self._exponential_smoothing(
a_d,
dx,
(
jnp.zeros_like(keypoints)
if self._dx_filtered is None
else self._dx_filtered
),
)
# Apply low-pass filter to position with adaptive cutoff
a_cutoff = self._smoothing_factor(jnp.asarray(cutoff), dt)
self._x_filtered = self._exponential_smoothing(
a_cutoff, keypoints, self._x_filtered
)
# Update timestamp
self._last_timestamp = timestamp
def get(self) -> TrackingPrediction:
"""
Get the current state of the filter.
Returns:
TrackingPrediction with velocity and keypoints
"""
return TrackingPrediction(
velocity=self._dx_filtered,
keypoints=self._x_filtered,
)
@jaxtyped(typechecker=beartype)
@dataclass(frozen=True)
class Tracking:
id: int
class TrackingState:
"""
The tracking id
immutable state of a tracking
"""
keypoints: Float[Array, "J 3"]
"""
The 3D keypoints of the tracking
@ -41,50 +457,97 @@ class Tracking:
The last active timestamp of the tracking
"""
historical_detections: PVector[Detection]
historical_detections_by_camera: PMap[CameraID, Detection]
"""
Historical detections of the tracking.
Used for 3D re-triangulation
"""
velocity: Optional[Float[Array, "3"]] = None
"""
Could be `None`. Like when the 3D pose is initialized.
`velocity` should be updated when target association yields a new
3D pose.
"""
class Tracking:
id: TrackingID
state: TrackingState
velocity_filter: GenericVelocityFilter
def __init__(
self,
id: TrackingID,
state: TrackingState,
velocity_filter: Optional[GenericVelocityFilter] = None,
):
self.id = id
self.state = state
self.velocity_filter = velocity_filter or DummyVelocityFilter(state.keypoints)
def __repr__(self) -> str:
return f"Tracking({self.id}, {self.last_active_timestamp})"
return f"Tracking({self.id}, {self.state.last_active_timestamp})"
@overload
def predict(self, time: float) -> Float[Array, "J 3"]:
"""
predict the keypoints at a given time
Args:
time: the time in seconds to predict the keypoints
Returns:
the predicted keypoints
"""
... # pylint: disable=unnecessary-ellipsis
@overload
def predict(self, time: timedelta) -> Float[Array, "J 3"]:
"""
predict the keypoints at a given time
Args:
time: the time delta to predict the keypoints
"""
... # pylint: disable=unnecessary-ellipsis
@overload
def predict(self, time: datetime) -> Float[Array, "J 3"]:
"""
predict the keypoints at a given time
Args:
time: the timestamp to predict the keypoints
"""
... # pylint: disable=unnecessary-ellipsis
def predict(
self,
delta_t_s: float,
time: float | timedelta | datetime,
) -> Float[Array, "J 3"]:
"""
Predict the 3D pose of a tracking based on its velocity.
JAX-friendly implementation that avoids Python control flow.
Args:
delta_t_s: Time delta in seconds
Returns:
Predicted 3D pose keypoints
"""
# ------------------------------------------------------------------
# Step 1 decide velocity on the Python side
# ------------------------------------------------------------------
if self.velocity is None:
velocity = jnp.zeros_like(self.keypoints) # (J, 3)
if isinstance(time, timedelta):
timestamp = self.state.last_active_timestamp + time
elif isinstance(time, datetime):
timestamp = time
else:
velocity = self.velocity # (J, 3)
timestamp = self.state.last_active_timestamp + timedelta(seconds=time)
# pylint: disable-next=unsubscriptable-object
return self.velocity_filter.predict(timestamp)["keypoints"]
# ------------------------------------------------------------------
# Step 2 pure JAX math
# ------------------------------------------------------------------
return self.keypoints + velocity * delta_t_s
def update(self, new_3d_pose: Float[Array, "J 3"], timestamp: datetime) -> None:
"""
update the tracking with a new 3D pose
Note:
equivalent to call `velocity_filter.update(new_3d_pose, timestamp)`
"""
self.velocity_filter.update(new_3d_pose, timestamp)
@property
def velocity(self) -> Float[Array, "J 3"]:
"""
The velocity of the tracking for each keypoint
"""
# pylint: disable-next=unsubscriptable-object
if (vel := self.velocity_filter.get()["velocity"]) is None:
return jnp.zeros_like(self.state.keypoints)
else:
return vel
@jaxtyped(typechecker=beartype)
@ -97,10 +560,10 @@ class AffinityResult:
matrix: Float[Array, "T D"]
trackings: Sequence[Tracking]
detections: Sequence[Detection]
indices_T: Int[Array, "T"] # pylint: disable=invalid-name
indices_D: Int[Array, "D"] # pylint: disable=invalid-name
indices_T: Int[Array, "A"] # pylint: disable=invalid-name
indices_D: Int[Array, "A"] # pylint: disable=invalid-name
def tracking_detections(
def tracking_association(
self,
) -> Generator[tuple[float, Tracking, Detection], None, None]:
"""

1452
filter_object_by_box.ipynb Normal file

File diff suppressed because one or more lines are too long

208
filter_object_by_box.py Normal file
View File

@ -0,0 +1,208 @@
from narwhals import Boolean
import numpy as np
import cv2
from typing import (
TypeAlias,
TypedDict,
)
from jaxtyping import Array, Num
from shapely.geometry import Polygon
from sympy import false, true
NDArray: TypeAlias = np.ndarray
# 盒子各个面的三维三角形集合
box_triangles_list = [
["4", "6", "7"],
["4", "5", "6"],
["2", "5", "6"],
["1", "2", "5"],
["1", "2", "3"],
["0", "1", "3"],
["0", "3", "7"],
["0", "4", "7"],
["2", "6", "7"],
["2", "3", "7"],
["0", "4", "5"],
["0", "1", "5"],
]
class Camera_Params(TypedDict):
rvec: Num[NDArray, "3"]
tvec: Num[NDArray, "3"]
camera_matrix: Num[Array, "3 3"]
dist: Num[Array, "N"]
width: int
height: int
class KeypointDataset(TypedDict):
frame_index: int
boxes: Num[NDArray, "N 4"]
kps: Num[NDArray, "N J 2"]
kps_scores: Num[NDArray, "N J"]
# 三维坐标系根据相机内外参计算该镜头下的二维重投影坐标
def reprojet_3d_to_2d(point_3d, camera_param):
point_2d, _ = cv2.projectPoints(
objectPoints=point_3d,
rvec=np.array(camera_param.params.Rt[:3, :3]),
tvec=np.array(camera_param.params.Rt[:3, 3]),
cameraMatrix=np.array(camera_param.params.K),
distCoeffs=np.array(camera_param.params.dist_coeffs),
)
point_2d = point_2d.reshape(-1).astype(int)
return point_2d
# 计算盒子三维坐标系
def calculaterCubeVersices(position, dimensions):
[cx, cy, cz] = position
[width, height, depth] = dimensions
halfWidth = width / 2
halfHeight = height / 2
halfDepth = depth / 2
return [
[cx - halfWidth, cy - halfHeight, cz - halfDepth],
[cx + halfWidth, cy - halfHeight, cz - halfDepth],
[cx + halfWidth, cy + halfHeight, cz - halfDepth],
[cx - halfWidth, cy + halfHeight, cz - halfDepth],
[cx - halfWidth, cy - halfHeight, cz + halfDepth],
[cx + halfWidth, cy - halfHeight, cz + halfDepth],
[cx + halfWidth, cy + halfHeight, cz + halfDepth],
[cx - halfWidth, cy + halfHeight, cz + halfDepth],
]
# 获得盒子三维坐标系
def calculater_box_3d_points():
# 盒子原点位置,相对于六面体中心偏移
box_ori_potision = [0.205 + 0.2, 0.205 + 0.50, -0.205 - 0.45]
# 盒子边长1.5米1.5米深度1.8米
box_geometry = [0.65, 1.8, 1]
filter_box_points_3d = calculaterCubeVersices(box_ori_potision, box_geometry)
filter_box_points_3d = {
str(index): element for index, element in enumerate(filter_box_points_3d)
}
return filter_box_points_3d
# 计算盒子坐标系的二维重投影数据
def calculater_box_2d_points(filter_box_points_3d, camera_param):
box_points_2d = dict()
for element_index, elment_point_3d in enumerate(filter_box_points_3d.values()):
box_points_2d[str(element_index)] = reprojet_3d_to_2d(
np.array(elment_point_3d), camera_param
).tolist()
return box_points_2d
# 盒子总的二维平面各三角形坐标点
def calculater_box_common_scope(box_points_2d):
box_triangles_all_points = []
# 遍历三角形个数
for i in range(len(box_triangles_list)):
# 获取单个三角形二维平面坐标点
single_triangles = []
for element_key in box_triangles_list[i]:
single_triangles.append(box_points_2d[element_key])
box_triangles_all_points.append(single_triangles)
return box_triangles_all_points
def calculate_triangle_union(triangles):
"""
计算多个三角形的并集区域
参数:
triangles: 包含多个三角形的列表,每个三角形由三个点的坐标组成
返回:
union_area: 并集区域的面积
union_polygon: 表示并集区域的多边形对象
"""
# 创建多边形对象列表
polygons = [Polygon(tri) for tri in triangles]
# 计算并集
union_polygon = polygons[0]
for polygon in polygons[1:]:
union_polygon = union_polygon.union(polygon)
# 计算并集面积
union_area = union_polygon.area
return union_area, union_polygon
# 射线法判断坐标点是否在box二维重投影的区域内
def point_in_polygon(p, polygon):
x, y = p
n = len(polygon)
intersections = 0
on_boundary = False
for i in range(n):
xi, yi = polygon[i]
xj, yj = polygon[(i + 1) % n] # 闭合多边形
# 检查点是否在顶点上
if (x == xi and y == yi) or (x == xj and y == yj):
on_boundary = True
break
# 检查点是否在线段上(非顶点情况)
if (min(xi, xj) <= x <= max(xi, xj)) and (min(yi, yj) <= y <= max(yi, yj)):
cross = (x - xi) * (yj - yi) - (y - yi) * (xj - xi)
if cross == 0:
on_boundary = True
break
# 计算射线与边的交点(非水平边)
if (yi > y) != (yj > y):
slope = (xj - xi) / (yj - yi) if (yj - yi) != 0 else float("inf")
x_intersect = xi + (y - yi) * slope
if x <= x_intersect:
intersections += 1
if on_boundary:
return false
return intersections % 2 == 1 # 奇数为内部返回True
# 获取并集区域坐标点
def get_contours(union_polygon):
if union_polygon.geom_type == "Polygon":
# 单一多边形
x, y = union_polygon.exterior.xy
contours = [(list(x)[i], list(y)[i]) for i in range(len(x))]
contours = np.array(contours, np.int32)
return contours
# 筛选落在盒子二维重投影区域内的关键点信息
def filter_kps_in_contours(kps, contours) -> Boolean:
# 4 5 16 17
keypoint_index: list[list[int]] = [[4, 5], [16, 17]]
centers = []
for element_keypoint in keypoint_index:
x1, y1 = kps[element_keypoint[0]]
x2, y2 = kps[element_keypoint[1]]
centers.append([(x1 + x2) / 2, (y1 + y2) / 2])
if point_in_polygon(centers[0], contours) and point_in_polygon(
centers[1], contours
):
return true
else:
return false

View File

@ -0,0 +1,282 @@
[
{
"kps": [
419.0,
154.0
],
"kps_scores": 1.0,
"index": 0
},
{
"kps": [
419.0521240234375,
154.07498168945312
],
"kps_scores": 1.0,
"index": 1
},
{
"kps": [
418.5992736816406,
154.3507080078125
],
"kps_scores": 1.0,
"index": 2
},
{
"kps": [
417.0777893066406,
154.17327880859375
],
"kps_scores": 1.0,
"index": 3
},
{
"kps": [
416.8981628417969,
154.15330505371094
],
"kps_scores": 1.0,
"index": 4
},
{
"kps": [
415.1317443847656,
153.68324279785156
],
"kps_scores": 1.0,
"index": 5
},
{
"kps": [
413.2596130371094,
153.39761352539062
],
"kps_scores": 1.0,
"index": 6
},
{
"kps": [
412.7089538574219,
153.3645782470703
],
"kps_scores": 1.0,
"index": 7
},
{
"kps": [
409.3253173828125,
152.9347686767578
],
"kps_scores": 1.0,
"index": 8
},
{
"kps": [
404.74853515625,
152.21153259277344
],
"kps_scores": 1.0,
"index": 9
},
{
"kps": [
404.3977355957031,
152.19647216796875
],
"kps_scores": 1.0,
"index": 10
},
{
"kps": [
396.53131103515625,
152.09912109375
],
"kps_scores": 1.0,
"index": 11
},
{
"kps": [
393.76605224609375,
151.91282653808594
],
"kps_scores": 1.0,
"index": 12
},
{
"kps": [
393.28106689453125,
151.76124572753906
],
"kps_scores": 1.0,
"index": 13
},
{
"kps": [
383.2342834472656,
152.3790740966797
],
"kps_scores": 1.0,
"index": 14
},
{
"kps": [
379.7545471191406,
152.79055786132812
],
"kps_scores": 1.0,
"index": 15
},
{
"kps": [
379.8231506347656,
152.8155975341797
],
"kps_scores": 1.0,
"index": 16
},
{
"kps": [
370.0028076171875,
155.16213989257812
],
"kps_scores": 1.0,
"index": 17
},
{
"kps": [
366.5267639160156,
155.72059631347656
],
"kps_scores": 1.0,
"index": 18
},
{
"kps": [
366.69610595703125,
156.3056182861328
],
"kps_scores": 1.0,
"index": 19
},
{
"kps": [
359.8770751953125,
158.69798278808594
],
"kps_scores": 1.0,
"index": 20
},
{
"kps": [
356.67681884765625,
160.0414581298828
],
"kps_scores": 1.0,
"index": 21
},
{
"kps": [
348.1063232421875,
163.32858276367188
],
"kps_scores": 1.0,
"index": 22
},
{
"kps": [
343.6862487792969,
165.0043182373047
],
"kps_scores": 1.0,
"index": 23
},
{
"kps": [
339.2411804199219,
167.18580627441406
],
"kps_scores": 1.0,
"index": 24
},
{
"kps": [
330.0,
170.0
],
"kps_scores": 0.0,
"index": 25
},
{
"kps": [
322.0425720214844,
174.9293975830078
],
"kps_scores": 1.0,
"index": 26
},
{
"kps": [
310.0,
176.0
],
"kps_scores": 0.0,
"index": 27
},
{
"kps": [
305.0433349609375,
178.03123474121094
],
"kps_scores": 1.0,
"index": 28
},
{
"kps": [
293.71295166015625,
183.8294219970703
],
"kps_scores": 1.0,
"index": 29
},
{
"kps": [
291.28656005859375,
184.33445739746094
],
"kps_scores": 1.0,
"index": 30
},
{
"kps": [
281.0,
190.0
],
"kps_scores": 0.0,
"index": 31
},
{
"kps": [
272.0,
200.0
],
"kps_scores": 0.0,
"index": 32
},
{
"kps": [
261.0457763671875,
211.67132568359375
],
"kps_scores": 1.0,
"index": 33
},
{
"kps": [
239.03567504882812,
248.68519592285156
],
"kps_scores": 1.0,
"index": 34
}
]

View File

@ -0,0 +1,282 @@
[
{
"kps": [
474.0,
215.00003051757812
],
"kps_scores": 1.0,
"index": 0
},
{
"kps": [
474.0710754394531,
215.04542541503906
],
"kps_scores": 1.0,
"index": 1
},
{
"kps": [
476.81365966796875,
215.0387420654297
],
"kps_scores": 1.0,
"index": 2
},
{
"kps": [
479.3288269042969,
214.4371795654297
],
"kps_scores": 1.0,
"index": 3
},
{
"kps": [
479.3817443847656,
214.49256896972656
],
"kps_scores": 1.0,
"index": 4
},
{
"kps": [
483.0047302246094,
213.85231018066406
],
"kps_scores": 1.0,
"index": 5
},
{
"kps": [
484.1208801269531,
213.64219665527344
],
"kps_scores": 1.0,
"index": 6
},
{
"kps": [
484.140869140625,
213.63470458984375
],
"kps_scores": 1.0,
"index": 7
},
{
"kps": [
487.458251953125,
213.45497131347656
],
"kps_scores": 1.0,
"index": 8
},
{
"kps": [
488.8343505859375,
213.4651336669922
],
"kps_scores": 1.0,
"index": 9
},
{
"kps": [
488.899658203125,
213.48526000976562
],
"kps_scores": 1.0,
"index": 10
},
{
"kps": [
493.831787109375,
214.70533752441406
],
"kps_scores": 1.0,
"index": 11
},
{
"kps": [
495.60980224609375,
215.26271057128906
],
"kps_scores": 1.0,
"index": 12
},
{
"kps": [
495.5881042480469,
215.2436065673828
],
"kps_scores": 1.0,
"index": 13
},
{
"kps": [
502.015380859375,
217.81201171875
],
"kps_scores": 1.0,
"index": 14
},
{
"kps": [
504.2356262207031,
218.78392028808594
],
"kps_scores": 1.0,
"index": 15
},
{
"kps": [
504.2625427246094,
218.81021118164062
],
"kps_scores": 1.0,
"index": 16
},
{
"kps": [
511.97552490234375,
222.26150512695312
],
"kps_scores": 1.0,
"index": 17
},
{
"kps": [
514.9180908203125,
224.3387908935547
],
"kps_scores": 1.0,
"index": 18
},
{
"kps": [
514.7620239257812,
224.2892608642578
],
"kps_scores": 1.0,
"index": 19
},
{
"kps": [
524.9593505859375,
230.30003356933594
],
"kps_scores": 1.0,
"index": 20
},
{
"kps": [
528.3402709960938,
232.76568603515625
],
"kps_scores": 1.0,
"index": 21
},
{
"kps": [
528.371826171875,
232.73399353027344
],
"kps_scores": 1.0,
"index": 22
},
{
"kps": [
538.7906494140625,
240.9889678955078
],
"kps_scores": 1.0,
"index": 23
},
{
"kps": [
538.7630004882812,
241.00299072265625
],
"kps_scores": 1.0,
"index": 24
},
{
"kps": [
550.0248413085938,
248.24708557128906
],
"kps_scores": 1.0,
"index": 25
},
{
"kps": [
554.3512573242188,
250.6501922607422
],
"kps_scores": 1.0,
"index": 26
},
{
"kps": [
554.0921020507812,
250.47769165039062
],
"kps_scores": 1.0,
"index": 27
},
{
"kps": [
567.93212890625,
266.1629943847656
],
"kps_scores": 1.0,
"index": 28
},
{
"kps": [
571.8528442382812,
273.5104675292969
],
"kps_scores": 1.0,
"index": 29
},
{
"kps": [
571.9888305664062,
273.5711669921875
],
"kps_scores": 1.0,
"index": 30
},
{
"kps": [
586.6533203125,
309.09576416015625
],
"kps_scores": 1.0,
"index": 31
},
{
"kps": [
591.8392944335938,
325.38385009765625
],
"kps_scores": 1.0,
"index": 32
},
{
"kps": [
592.3212280273438,
325.2934265136719
],
"kps_scores": 1.0,
"index": 33
},
{
"kps": [
603.3639526367188,
362.4980773925781
],
"kps_scores": 1.0,
"index": 34
}
]

View File

@ -0,0 +1,282 @@
[
{
"kps": [
461.0,
164.0
],
"kps_scores": 1.0,
"index": 0
},
{
"kps": [
460.9234619140625,
164.2275390625
],
"kps_scores": 1.0,
"index": 1
},
{
"kps": [
460.93524169921875,
164.19480895996094
],
"kps_scores": 1.0,
"index": 2
},
{
"kps": [
460.4592590332031,
164.14320373535156
],
"kps_scores": 1.0,
"index": 3
},
{
"kps": [
459.9245910644531,
164.054931640625
],
"kps_scores": 1.0,
"index": 4
},
{
"kps": [
459.8656921386719,
164.08154296875
],
"kps_scores": 1.0,
"index": 5
},
{
"kps": [
456.9087219238281,
163.1707305908203
],
"kps_scores": 1.0,
"index": 6
},
{
"kps": [
455.7566223144531,
162.69784545898438
],
"kps_scores": 1.0,
"index": 7
},
{
"kps": [
455.740478515625,
162.74818420410156
],
"kps_scores": 1.0,
"index": 8
},
{
"kps": [
449.8667907714844,
161.95462036132812
],
"kps_scores": 1.0,
"index": 9
},
{
"kps": [
447.55975341796875,
162.12559509277344
],
"kps_scores": 1.0,
"index": 10
},
{
"kps": [
447.5325012207031,
162.12460327148438
],
"kps_scores": 1.0,
"index": 11
},
{
"kps": [
439.9998474121094,
162.59873962402344
],
"kps_scores": 1.0,
"index": 12
},
{
"kps": [
437.3090515136719,
162.88577270507812
],
"kps_scores": 1.0,
"index": 13
},
{
"kps": [
437.2088623046875,
162.84994506835938
],
"kps_scores": 1.0,
"index": 14
},
{
"kps": [
429.199951171875,
164.5860595703125
],
"kps_scores": 1.0,
"index": 15
},
{
"kps": [
429.32745361328125,
164.66001892089844
],
"kps_scores": 1.0,
"index": 16
},
{
"kps": [
424.8293762207031,
166.40106201171875
],
"kps_scores": 1.0,
"index": 17
},
{
"kps": [
419.6496887207031,
168.80294799804688
],
"kps_scores": 1.0,
"index": 18
},
{
"kps": [
419.6795349121094,
168.93418884277344
],
"kps_scores": 1.0,
"index": 19
},
{
"kps": [
414.8919677734375,
172.65428161621094
],
"kps_scores": 1.0,
"index": 20
},
{
"kps": [
410.0992431640625,
175.77218627929688
],
"kps_scores": 1.0,
"index": 21
},
{
"kps": [
410.0442810058594,
175.911376953125
],
"kps_scores": 1.0,
"index": 22
},
{
"kps": [
400.20159912109375,
184.33380126953125
],
"kps_scores": 1.0,
"index": 23
},
{
"kps": [
396.4606628417969,
186.7172088623047
],
"kps_scores": 1.0,
"index": 24
},
{
"kps": [
396.3185119628906,
186.76808166503906
],
"kps_scores": 1.0,
"index": 25
},
{
"kps": [
382.623291015625,
192.941650390625
],
"kps_scores": 1.0,
"index": 26
},
{
"kps": [
376.8236999511719,
195.2269744873047
],
"kps_scores": 1.0,
"index": 27
},
{
"kps": [
376.66937255859375,
195.1109161376953
],
"kps_scores": 1.0,
"index": 28
},
{
"kps": [
362.7231750488281,
209.30923461914062
],
"kps_scores": 1.0,
"index": 29
},
{
"kps": [
355.9901123046875,
216.26303100585938
],
"kps_scores": 1.0,
"index": 30
},
{
"kps": [
356.3956298828125,
216.3310546875
],
"kps_scores": 1.0,
"index": 31
},
{
"kps": [
343.6780090332031,
235.2663116455078
],
"kps_scores": 1.0,
"index": 32
},
{
"kps": [
332.50238037109375,
261.8990783691406
],
"kps_scores": 1.0,
"index": 33
},
{
"kps": [
332.8721923828125,
261.7060546875
],
"kps_scores": 1.0,
"index": 34
}
]

File diff suppressed because one or more lines are too long

3268
play.ipynb Normal file

File diff suppressed because it is too large Load Diff

View File

@ -31,13 +31,13 @@ from typing import (
TypeVar,
cast,
overload,
Iterable,
)
import awkward as ak
import jax
import jax.numpy as jnp
import numpy as np
import orjson
from beartype import beartype
from beartype.typing import Mapping, Sequence
from cv2 import undistortPoints
@ -46,9 +46,10 @@ from jaxtyping import Array, Float, Num, jaxtyped
from matplotlib import pyplot as plt
from numpy.typing import ArrayLike
from optax.assignment import hungarian_algorithm as linear_sum_assignment
from pyrsistent import v, pvector
from pyrsistent import pvector, v, m, pmap, PMap, freeze, thaw
from scipy.spatial.transform import Rotation as R
from typing_extensions import deprecated
from collections import defaultdict
from app.camera import (
Camera,
@ -59,15 +60,21 @@ from app.camera import (
classify_by_camera,
)
from app.solver._old import GLPKSolver
from app.tracking import AffinityResult, Tracking
from app.tracking import (
TrackingID,
AffinityResult,
LastDifferenceVelocityFilter,
Tracking,
TrackingState,
)
from app.visualize.whole_body import visualize_whole_body
NDArray: TypeAlias = np.ndarray
# %%
DATASET_PATH = Path("samples") / "04_02"
AK_CAMERA_DATASET: ak.Array = ak.from_parquet(DATASET_PATH / "camera_params.parquet")
DELTA_T_MIN = timedelta(milliseconds=10)
AK_CAMERA_DATASET: ak.Array = ak.from_parquet(DATASET_PATH / "camera_params.parquet") # type: ignore
DELTA_T_MIN = timedelta(milliseconds=1)
display(AK_CAMERA_DATASET)
@ -504,6 +511,142 @@ def triangulate_points_from_multiple_views_linear(
return vmap_triangulate(proj_matrices, points, conf)
# %%
@jaxtyped(typechecker=beartype)
def triangulate_one_point_from_multiple_views_linear_time_weighted(
proj_matrices: Float[Array, "N 3 4"],
points: Num[Array, "N 2"],
delta_t: Num[Array, "N"],
lambda_t: float = 10.0,
confidences: Optional[Float[Array, "N"]] = None,
) -> Float[Array, "3"]:
"""
Triangulate one point from multiple views with time-weighted linear least squares.
Implements the incremental reconstruction method from "Cross-View Tracking for Multi-Human 3D Pose"
with weighting formula: w_i = exp(-λ_t(t-t_i)) / ||c^i^T||_2
Args:
proj_matrices: Shape (N, 3, 4) projection matrices sequence
points: Shape (N, 2) point coordinates sequence
delta_t: Time differences between current time and each observation (in seconds)
lambda_t: Time penalty rate (higher values decrease influence of older observations)
confidences: Shape (N,) confidence values in range [0.0, 1.0]
Returns:
point_3d: Shape (3,) triangulated 3D point
"""
assert len(proj_matrices) == len(points)
assert len(delta_t) == len(points)
N = len(proj_matrices)
# Prepare confidence weights
confi: Float[Array, "N"]
if confidences is None:
confi = jnp.ones(N, dtype=np.float32)
else:
confi = jnp.sqrt(jnp.clip(confidences, 0, 1))
A = jnp.zeros((N * 2, 4), dtype=np.float32)
# First build the coefficient matrix without weights
for i in range(N):
x, y = points[i]
A = A.at[2 * i].set(proj_matrices[i, 2] * x - proj_matrices[i, 0])
A = A.at[2 * i + 1].set(proj_matrices[i, 2] * y - proj_matrices[i, 1])
# Then apply the time-based and confidence weights
for i in range(N):
# Calculate time-decay weight: e^(-λ_t * Δt)
time_weight = jnp.exp(-lambda_t * delta_t[i])
# Calculate normalization factor: ||c^i^T||_2
row_norm_1 = jnp.linalg.norm(A[2 * i])
row_norm_2 = jnp.linalg.norm(A[2 * i + 1])
# Apply combined weight: time_weight / row_norm * confidence
w1 = (time_weight / row_norm_1) * confi[i]
w2 = (time_weight / row_norm_2) * confi[i]
A = A.at[2 * i].mul(w1)
A = A.at[2 * i + 1].mul(w2)
# Solve using SVD
_, _, vh = jnp.linalg.svd(A, full_matrices=False)
point_3d_homo = vh[-1] # shape (4,)
# Ensure homogeneous coordinate is positive
point_3d_homo = jnp.where(
point_3d_homo[3] < 0,
-point_3d_homo,
point_3d_homo,
)
# Convert from homogeneous to Euclidean coordinates
point_3d = point_3d_homo[:3] / point_3d_homo[3]
return point_3d
@jaxtyped(typechecker=beartype)
def triangulate_points_from_multiple_views_linear_time_weighted(
proj_matrices: Float[Array, "N 3 4"],
points: Num[Array, "N P 2"],
delta_t: Num[Array, "N"],
lambda_t: float = 10.0,
confidences: Optional[Float[Array, "N P"]] = None,
) -> Float[Array, "P 3"]:
"""
Vectorized version that triangulates P points from N camera views with time-weighting.
This function uses JAX's vmap to efficiently triangulate multiple points in parallel.
Args:
proj_matrices: Shape (N, 3, 4) projection matrices for N cameras
points: Shape (N, P, 2) 2D points for P keypoints across N cameras
delta_t: Shape (N,) time differences between current time and each camera's timestamp (seconds)
lambda_t: Time penalty rate (higher values decrease influence of older observations)
confidences: Shape (N, P) confidence values for each point in each camera
Returns:
points_3d: Shape (P, 3) triangulated 3D points
"""
N, P, _ = points.shape
assert (
proj_matrices.shape[0] == N
), "Number of projection matrices must match number of cameras"
assert delta_t.shape[0] == N, "Number of time deltas must match number of cameras"
if confidences is None:
# Create uniform confidences if none provided
conf = jnp.ones((N, P), dtype=jnp.float32)
else:
conf = confidences
# Define the vmapped version of the single-point function
# We map over the second dimension (P points) of the input arrays
vmap_triangulate = jax.vmap(
triangulate_one_point_from_multiple_views_linear_time_weighted,
in_axes=(
None,
1,
None,
None,
1,
), # proj_matrices and delta_t static, map over points
out_axes=0, # Output has first dimension corresponding to points
)
# For each point p, extract the 2D coordinates from all cameras and triangulate
return vmap_triangulate(
proj_matrices, # (N, 3, 4) - static across points
points, # (N, P, 2) - map over dim 1 (P)
delta_t, # (N,) - static across points
lambda_t, # scalar - static
conf, # (N, P) - map over dim 1 (P)
)
# %%
@ -524,6 +667,21 @@ def triangle_from_cluster(
# %%
def group_by_cluster_by_camera(
cluster: Sequence[Detection],
) -> PMap[CameraID, Detection]:
"""
group the detections by camera, and preserve the latest detection for each camera
"""
r: dict[CameraID, Detection] = {}
for el in cluster:
if el.camera.id in r:
eld = r[el.camera.id]
preserved = max([eld, el], key=lambda x: x.timestamp)
r[el.camera.id] = preserved
return pmap(r)
class GlobalTrackingState:
_last_id: int
_trackings: dict[int, Tracking]
@ -542,13 +700,21 @@ class GlobalTrackingState:
return shallow_copy(self._trackings)
def add_tracking(self, cluster: Sequence[Detection]) -> Tracking:
if len(cluster) < 2:
raise ValueError(
"cluster must contain at least 2 detections to form a tracking"
)
kps_3d, latest_timestamp = triangle_from_cluster(cluster)
next_id = self._last_id + 1
tracking = Tracking(
id=next_id,
tracking_state = TrackingState(
keypoints=kps_3d,
last_active_timestamp=latest_timestamp,
historical_detections=v(*cluster),
historical_detections_by_camera=group_by_cluster_by_camera(cluster),
)
tracking = Tracking(
id=next_id,
state=tracking_state,
velocity_filter=LastDifferenceVelocityFilter(kps_3d, latest_timestamp),
)
self._trackings[next_id] = tracking
self._last_id = next_id
@ -671,11 +837,7 @@ def perpendicular_distance_camera_2d_points_to_tracking_raycasting(
Array of perpendicular distances for each keypoint
"""
camera = detection.camera
# Use the delta_t supplied by the caller, but clamp to DELTA_T_MIN to
# avoid division-by-zero / exploding affinities.
delta_t = max(delta_t, DELTA_T_MIN)
delta_t_s = delta_t.total_seconds()
predicted_pose = tracking.predict(delta_t_s)
predicted_pose = tracking.predict(delta_t)
# Back-project the 2D points to 3D space
# intersection with z=0 plane
@ -755,12 +917,12 @@ def calculate_tracking_detection_affinity(
Combined affinity score
"""
camera = detection.camera
delta_t_raw = detection.timestamp - tracking.last_active_timestamp
delta_t_raw = detection.timestamp - tracking.state.last_active_timestamp
# Clamp delta_t to avoid division-by-zero / exploding affinity.
delta_t = max(delta_t_raw, DELTA_T_MIN)
# Calculate 2D affinity
tracking_2d_projection = camera.project(tracking.keypoints)
tracking_2d_projection = camera.project(tracking.state.keypoints)
w, h = camera.params.image_size
distance_2d = calculate_distance_2d(
tracking_2d_projection,
@ -840,7 +1002,7 @@ def calculate_camera_affinity_matrix_jax(
# === Tracking-side tensors ===
kps3d_trk: Float[Array, "T J 3"] = jnp.stack(
[trk.keypoints for trk in trackings]
[trk.state.keypoints for trk in trackings]
) # (T, J, 3)
J = kps3d_trk.shape[1]
# === Detection-side tensors ===
@ -857,12 +1019,12 @@ def calculate_camera_affinity_matrix_jax(
# --- timestamps ----------
t0 = min(
chain(
(trk.last_active_timestamp for trk in trackings),
(trk.state.last_active_timestamp for trk in trackings),
(det.timestamp for det in camera_detections),
)
).timestamp() # common origin (float)
ts_trk = jnp.array(
[trk.last_active_timestamp.timestamp() - t0 for trk in trackings],
[trk.state.last_active_timestamp.timestamp() - t0 for trk in trackings],
dtype=jnp.float32, # now small, ms-scale fits in fp32
)
ts_det = jnp.array(
@ -1033,8 +1195,82 @@ display(affinities)
# %%
def update_tracking(tracking: Tracking, detection: Detection):
delta_t_ = detection.timestamp - tracking.last_active_timestamp
delta_t = max(delta_t_, DELTA_T_MIN)
def affinity_result_by_tracking(
results: Iterable[AffinityResult],
min_affinity: float = 0.0,
) -> dict[TrackingID, list[Detection]]:
"""
Group affinity results by target ID.
return tracking
Args:
results: the affinity results to group
min_affinity: the minimum affinity to consider
Returns:
a dictionary mapping tracking IDs to a list of detections
"""
res: dict[TrackingID, list[Detection]] = defaultdict(list)
for affinity_result in results:
for affinity, t, d in affinity_result.tracking_association():
if affinity < min_affinity:
continue
res[t.id].append(d)
return res
def update_tracking(
tracking: Tracking,
detections: Sequence[Detection],
max_delta_t: timedelta = timedelta(milliseconds=100),
lambda_t: float = 10.0,
) -> None:
"""
update the tracking with a new set of detections
Args:
tracking: the tracking to update
detections: the detections to update the tracking with
max_delta_t: the maximum time difference between the last active timestamp and the latest detection
lambda_t: the lambda value for the time difference
Note:
the function would mutate the tracking object
"""
last_active_timestamp = tracking.state.last_active_timestamp
latest_timestamp = max(d.timestamp for d in detections)
d = thaw(tracking.state.historical_detections_by_camera)
for detection in detections:
d[detection.camera.id] = detection
for camera_id, detection in d.items():
if detection.timestamp - latest_timestamp > max_delta_t:
del d[camera_id]
new_detections = freeze(d)
new_detections_list = list(new_detections.values())
project_matrices = jnp.stack(
[detection.camera.params.projection_matrix for detection in new_detections_list]
)
delta_t = jnp.array(
[
detection.timestamp.timestamp() - last_active_timestamp.timestamp()
for detection in new_detections_list
]
)
kps = jnp.stack([detection.keypoints for detection in new_detections_list])
conf = jnp.stack([detection.confidences for detection in new_detections_list])
kps_3d = triangulate_points_from_multiple_views_linear_time_weighted(
project_matrices, kps, delta_t, lambda_t, conf
)
new_state = TrackingState(
keypoints=kps_3d,
last_active_timestamp=latest_timestamp,
historical_detections_by_camera=new_detections,
)
tracking.update(kps_3d, latest_timestamp)
tracking.state = new_state
# %%
affinity_results_by_tracking = affinity_result_by_tracking(affinities.values())
for tracking_id, detections in affinity_results_by_tracking.items():
update_tracking(global_tracking_state.trackings[tracking_id], detections)
# %%

406
plot_epipolar_lines.ipynb Normal file

File diff suppressed because one or more lines are too long

View File

@ -14,6 +14,7 @@ dependencies = [
"jaxtyping>=0.2.38",
"jupytext>=1.17.0",
"matplotlib>=3.10.1",
"more-itertools>=10.7.0",
"opencv-python-headless>=4.11.0.86",
"optax>=0.2.4",
"orjson>=3.10.15",
@ -23,6 +24,7 @@ dependencies = [
"pyrsistent>=0.20.0",
"pytest>=8.3.5",
"scipy>=1.15.2",
"shapely>=2.1.1",
"torch>=2.6.0",
"torchvision>=0.21.0",
"typeguard>=4.4.2",

File diff suppressed because one or more lines are too long

1062
rebuild_by_epipolar_line.py Normal file

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

122
smooth_3d_kps.ipynb Normal file
View File

@ -0,0 +1,122 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": 16,
"id": "0d48b7eb",
"metadata": {},
"outputs": [],
"source": [
"import json\n",
"from pathlib import Path\n",
"import numpy as np"
]
},
{
"cell_type": "code",
"execution_count": 17,
"id": "dfd27584",
"metadata": {},
"outputs": [],
"source": [
"KPS_PATH = Path(\"samples/WeiHua_03.json\")\n",
"with open(KPS_PATH, \"r\") as file:\n",
" data = json.load(file)"
]
},
{
"cell_type": "code",
"execution_count": 18,
"id": "360f9c50",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'index:1, shape: (33, 133, 3)'"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/plain": [
"'index:2, shape: (662, 133, 3)'"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"for item_object_index in data.keys():\n",
" item_object = np.array(data[item_object_index])\n",
" display(f'index:{item_object_index}, shape: {item_object.shape}')"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# 对data['2']的662帧3d关键点数据进行滑动窗口平滑处理\n",
"object_points = np.array(data['2']) # shape: (662, 133, 3)\n",
"window_size = 5\n",
"kernel = np.ones(window_size) / window_size\n",
"# 对每个关键点的每个坐标轴分别做滑动平均\n",
"smoothed_points = np.zeros_like(object_points)\n",
"# 遍历133个关节\n",
"for kp_idx in range(object_points.shape[1]):\n",
" # 遍历每个关节的空间三维坐标点\n",
" for axis in range(3):\n",
" # 对第i帧的滑动平滑方式 smoothed[i] = (point[i-2] + point[i-1] + point[i] + point[i+1] + point[i+2]) / 5\n",
" smoothed_points[:, kp_idx, axis] = np.convolve(object_points[:, kp_idx, axis], kernel, mode='same')"
]
},
{
"cell_type": "code",
"execution_count": 20,
"id": "24c6c0c9",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'smoothed_points shape: (662, 133, 3)'"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"display(f'smoothed_points shape: {smoothed_points.shape}')\n",
"with open(\"samples/smoothed_3d_kps.json\", \"w\") as file:\n",
" json.dump({'1':smoothed_points.tolist()}, file)"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "cvth3pe",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.12.9"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@ -0,0 +1,193 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": 8,
"id": "11cc2345",
"metadata": {},
"outputs": [],
"source": [
"import awkward as ak\n",
"import numpy as np\n",
"from pathlib import Path"
]
},
{
"cell_type": "code",
"execution_count": 9,
"id": "84348d97",
"metadata": {},
"outputs": [],
"source": [
"CAMERA_INDEX ={\n",
" 2:\"5602\",\n",
" 4:\"5604\",\n",
"}\n",
"index = 4\n",
"CAMERA_PATH = Path(\"/home/admin/Documents/ActualTest_QuanCheng/camera_ex_params_1_2025_4_20/camera_params\")\n",
"camera_data = ak.from_parquet(CAMERA_PATH / CAMERA_INDEX[index]/ \"extrinsic.parquet\")"
]
},
{
"cell_type": "code",
"execution_count": 10,
"id": "1d771740",
"metadata": {},
"outputs": [
{
"data": {
"text/html": [
"<pre>[{rvec: [[-2.26], [0.0669], [-2.15]], tvec: [[0.166], ...]},\n",
" {rvec: [[2.07], [0.144], [2.21]], tvec: [[0.143], ...]},\n",
" {rvec: [[2.09], [0.0872], [2.25]], tvec: [[0.141], ...]},\n",
" {rvec: [[2.16], [0.172], [2.09]], tvec: [[0.162], ...]},\n",
" {rvec: [[2.15], [0.18], [2.09]], tvec: [[0.162], ...]},\n",
" {rvec: [[-2.22], [0.117], [-2.14]], tvec: [[0.162], ...]},\n",
" {rvec: [[2.18], [0.176], [2.08]], tvec: [[0.166], ...]},\n",
" {rvec: [[2.18], [0.176], [2.08]], tvec: [[0.166], ...]},\n",
" {rvec: [[-2.26], [0.116], [-2.1]], tvec: [[0.17], ...]},\n",
" {rvec: [[-2.26], [0.124], [-2.09]], tvec: [[0.171], ...]},\n",
" ...,\n",
" {rvec: [[-2.2], [0.0998], [-2.17]], tvec: [[0.158], ...]},\n",
" {rvec: [[-2.2], [0.0998], [-2.17]], tvec: [[0.158], ...]},\n",
" {rvec: [[2.12], [0.151], [2.16]], tvec: [[0.152], ...]},\n",
" {rvec: [[-2.3], [0.0733], [-2.1]], tvec: [[0.175], ...]},\n",
" {rvec: [[2.1], [0.16], [2.17]], tvec: [[0.149], ...]},\n",
" {rvec: [[2.1], [0.191], [2.13]], tvec: [[0.153], ...]},\n",
" {rvec: [[2.11], [0.196], [2.12]], tvec: [[0.154], ...]},\n",
" {rvec: [[2.19], [0.171], [2.08]], tvec: [[0.166], ...]},\n",
" {rvec: [[2.24], [0.0604], [2.12]], tvec: [[0.166], ...]}]\n",
"---------------------------------------------------------------------------\n",
"backend: cpu\n",
"nbytes: 10.1 kB\n",
"type: 90 * {\n",
" rvec: var * var * float64,\n",
" tvec: var * var * float64\n",
"}</pre>"
],
"text/plain": [
"<Array [{rvec: [...], tvec: [...]}, ..., {...}] type='90 * {rvec: var * var...'>"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"display(camera_data)"
]
},
{
"cell_type": "code",
"execution_count": 11,
"id": "59fde11b",
"metadata": {},
"outputs": [],
"source": [
"data = []\n",
"for element in camera_data:\n",
" rvec = element[\"rvec\"]\n",
" if rvec[0]<0:\n",
" data.append({\"rvec\": rvec, \"tvec\": element[\"tvec\"]})"
]
},
{
"cell_type": "code",
"execution_count": 12,
"id": "4792cbc4",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"<pyarrow._parquet.FileMetaData object at 0x7799cbf62d40>\n",
" created_by: parquet-cpp-arrow version 19.0.1\n",
" num_columns: 2\n",
" num_rows: 30\n",
" num_row_groups: 1\n",
" format_version: 2.6\n",
" serialized_size: 0"
]
},
"execution_count": 12,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"ak.to_parquet(ak.from_iter(data),\"/home/admin/Documents/ActualTest_QuanCheng/camera_ex_params_1_2025_4_20/camera_params/5604/re_extrinsic.parquet\")"
]
},
{
"cell_type": "code",
"execution_count": 13,
"id": "8225ee33",
"metadata": {},
"outputs": [
{
"data": {
"text/html": [
"<pre>[{rvec: [[-2.26], [0.0669], [-2.15]], tvec: [[0.166], ...]},\n",
" {rvec: [[-2.22], [0.117], [-2.14]], tvec: [[0.162], ...]},\n",
" {rvec: [[-2.26], [0.116], [-2.1]], tvec: [[0.17], ...]},\n",
" {rvec: [[-2.26], [0.124], [-2.09]], tvec: [[0.171], ...]},\n",
" {rvec: [[-2.24], [0.133], [-2.11]], tvec: [[0.167], ...]},\n",
" {rvec: [[-2.22], [0.0556], [-2.2]], tvec: [[0.158], ...]},\n",
" {rvec: [[-2.27], [0.119], [-2.09]], tvec: [[0.172], ...]},\n",
" {rvec: [[-2.34], [0.0663], [-2.06]], tvec: [[0.181], ...]},\n",
" {rvec: [[-2.21], [0.117], [-2.15]], tvec: [[0.161], ...]},\n",
" {rvec: [[-2.33], [0.0731], [-2.08]], tvec: [[0.179], ...]},\n",
" ...,\n",
" {rvec: [[-2.23], [0.106], [-2.13]], tvec: [[0.166], ...]},\n",
" {rvec: [[-2.21], [0.054], [-2.2]], tvec: [[0.157], ...]},\n",
" {rvec: [[-2.19], [0.0169], [-2.25]], tvec: [[0.151], ...]},\n",
" {rvec: [[-2.2], [0.0719], [-2.19]], tvec: [[0.157], ...]},\n",
" {rvec: [[-2.22], [0.0726], [-2.18]], tvec: [[0.161], ...]},\n",
" {rvec: [[-2.2], [0.0742], [-2.19]], tvec: [[0.158], ...]},\n",
" {rvec: [[-2.2], [0.0998], [-2.17]], tvec: [[0.158], ...]},\n",
" {rvec: [[-2.2], [0.0998], [-2.17]], tvec: [[0.158], ...]},\n",
" {rvec: [[-2.3], [0.0733], [-2.1]], tvec: [[0.175], ...]}]\n",
"---------------------------------------------------------------------------\n",
"backend: cpu\n",
"nbytes: 3.4 kB\n",
"type: 30 * {\n",
" rvec: var * var * float64,\n",
" tvec: var * var * float64\n",
"}</pre>"
],
"text/plain": [
"<Array [{rvec: [...], tvec: [...]}, ..., {...}] type='30 * {rvec: var * var...'>"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"temp_data = ak.from_parquet(\"/home/admin/Documents/ActualTest_QuanCheng/camera_ex_params_1_2025_4_20/camera_params/5604/re_extrinsic.parquet\")\n",
"display(temp_data)"
]
}
],
"metadata": {
"kernelspec": {
"display_name": ".venv",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.12.9"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

3745
uv.lock generated

File diff suppressed because it is too large Load Diff