feat: extract opengait_studio monorepo module
Move demo implementation into opengait_studio, retire Sports2D runtime integration, and align packaging with root-level monorepo dependency management.
This commit is contained in:
@@ -0,0 +1,171 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from pathlib import Path
|
||||
from typing import cast
|
||||
from unittest import mock
|
||||
|
||||
import numpy as np
|
||||
import pytest
|
||||
|
||||
from opengait_studio.input import create_source
|
||||
from opengait_studio.visualizer import (
|
||||
DISPLAY_HEIGHT,
|
||||
DISPLAY_WIDTH,
|
||||
ImageArray,
|
||||
OpenCVVisualizer,
|
||||
)
|
||||
from opengait_studio.window import select_person
|
||||
|
||||
REPO_ROOT = Path(__file__).resolve().parents[2]
|
||||
SAMPLE_VIDEO_PATH = REPO_ROOT / "assets" / "sample.mp4"
|
||||
YOLO_MODEL_PATH = REPO_ROOT / "ckpt" / "yolo11n-seg.pt"
|
||||
|
||||
|
||||
def test_prepare_raw_view_float_mask_has_visible_signal() -> None:
|
||||
viz = OpenCVVisualizer()
|
||||
|
||||
mask_float = np.zeros((64, 64), dtype=np.float32)
|
||||
mask_float[16:48, 16:48] = 1.0
|
||||
|
||||
rendered = viz._prepare_raw_view(cast(ImageArray, mask_float))
|
||||
assert rendered.dtype == np.uint8
|
||||
assert rendered.shape == (256, 176, 3)
|
||||
|
||||
mask_zero = np.zeros((64, 64), dtype=np.float32)
|
||||
rendered_zero = viz._prepare_raw_view(cast(ImageArray, mask_zero))
|
||||
|
||||
roi = slice(0, DISPLAY_HEIGHT - 40)
|
||||
diff = np.abs(rendered[roi].astype(np.int16) - rendered_zero[roi].astype(np.int16))
|
||||
assert int(np.count_nonzero(diff)) > 0
|
||||
|
||||
|
||||
def test_prepare_raw_view_handles_values_slightly_above_one() -> None:
|
||||
viz = OpenCVVisualizer()
|
||||
|
||||
mask = np.zeros((64, 64), dtype=np.float32)
|
||||
mask[20:40, 20:40] = 1.0001
|
||||
rendered = viz._prepare_raw_view(cast(ImageArray, mask))
|
||||
|
||||
roi = rendered[: DISPLAY_HEIGHT - 40, :, 0]
|
||||
assert int(np.count_nonzero(roi)) > 0
|
||||
|
||||
|
||||
def test_segmentation_view_is_normalized_only_shape() -> None:
|
||||
viz = OpenCVVisualizer()
|
||||
mask = np.zeros((480, 640), dtype=np.uint8)
|
||||
sil = np.random.rand(64, 44).astype(np.float32)
|
||||
|
||||
seg = viz._prepare_segmentation_view(cast(ImageArray, mask), sil, (0, 0, 100, 100))
|
||||
assert seg.shape == (DISPLAY_HEIGHT, DISPLAY_WIDTH, 3)
|
||||
|
||||
|
||||
def test_update_toggles_raw_window_with_r_key() -> None:
|
||||
viz = OpenCVVisualizer()
|
||||
frame = np.zeros((240, 320, 3), dtype=np.uint8)
|
||||
mask = np.zeros((240, 320), dtype=np.uint8)
|
||||
mask[20:100, 30:120] = 255
|
||||
sil = np.random.rand(64, 44).astype(np.float32)
|
||||
seg_input = np.random.rand(4, 64, 44).astype(np.float32)
|
||||
|
||||
with (
|
||||
mock.patch("cv2.namedWindow") as named_window,
|
||||
mock.patch("cv2.imshow"),
|
||||
mock.patch("cv2.destroyWindow") as destroy_window,
|
||||
mock.patch("cv2.waitKey", side_effect=[ord("r"), ord("r"), ord("q")]),
|
||||
):
|
||||
assert viz.update(
|
||||
frame,
|
||||
(10, 10, 120, 150),
|
||||
(10, 10, 120, 150),
|
||||
1,
|
||||
cast(ImageArray, mask),
|
||||
sil,
|
||||
seg_input,
|
||||
None,
|
||||
None,
|
||||
15.0,
|
||||
)
|
||||
assert viz.show_raw_window is True
|
||||
assert viz._raw_window_created is True
|
||||
|
||||
assert viz.update(
|
||||
frame,
|
||||
(10, 10, 120, 150),
|
||||
(10, 10, 120, 150),
|
||||
1,
|
||||
cast(ImageArray, mask),
|
||||
sil,
|
||||
seg_input,
|
||||
None,
|
||||
None,
|
||||
15.0,
|
||||
)
|
||||
assert viz.show_raw_window is False
|
||||
assert viz._raw_window_created is False
|
||||
assert destroy_window.called
|
||||
|
||||
assert (
|
||||
viz.update(
|
||||
frame,
|
||||
(10, 10, 120, 150),
|
||||
(10, 10, 120, 150),
|
||||
1,
|
||||
cast(ImageArray, mask),
|
||||
sil,
|
||||
seg_input,
|
||||
None,
|
||||
None,
|
||||
15.0,
|
||||
)
|
||||
is False
|
||||
)
|
||||
assert named_window.called
|
||||
|
||||
|
||||
def test_sample_video_raw_mask_shape_range_and_render_signal() -> None:
|
||||
if not SAMPLE_VIDEO_PATH.is_file():
|
||||
pytest.skip(f"Missing sample video: {SAMPLE_VIDEO_PATH}")
|
||||
if not YOLO_MODEL_PATH.is_file():
|
||||
pytest.skip(f"Missing YOLO model file: {YOLO_MODEL_PATH}")
|
||||
|
||||
ultralytics = pytest.importorskip("ultralytics")
|
||||
yolo_cls = getattr(ultralytics, "YOLO")
|
||||
|
||||
viz = OpenCVVisualizer()
|
||||
detector = yolo_cls(str(YOLO_MODEL_PATH))
|
||||
|
||||
masks_seen = 0
|
||||
rendered_nonzero: list[int] = []
|
||||
|
||||
for frame, _meta in create_source(str(SAMPLE_VIDEO_PATH), max_frames=30):
|
||||
detections = detector.track(
|
||||
frame,
|
||||
persist=True,
|
||||
verbose=False,
|
||||
classes=[0],
|
||||
device="cpu",
|
||||
)
|
||||
if not isinstance(detections, list) or not detections:
|
||||
continue
|
||||
|
||||
selected = select_person(detections[0])
|
||||
if selected is None:
|
||||
continue
|
||||
|
||||
mask_raw, _, _, _ = selected
|
||||
masks_seen += 1
|
||||
|
||||
arr = np.asarray(mask_raw)
|
||||
assert arr.ndim == 2
|
||||
assert arr.shape[0] > 0 and arr.shape[1] > 0
|
||||
assert np.issubdtype(arr.dtype, np.number)
|
||||
assert float(arr.min()) >= 0.0
|
||||
assert float(arr.max()) <= 255.0
|
||||
assert int(np.count_nonzero(arr)) > 0
|
||||
|
||||
rendered = viz._prepare_raw_view(arr)
|
||||
roi = rendered[: DISPLAY_HEIGHT - 40, :, 0]
|
||||
rendered_nonzero.append(int(np.count_nonzero(roi)))
|
||||
|
||||
assert masks_seen > 0
|
||||
assert min(rendered_nonzero) > 0
|
||||
Reference in New Issue
Block a user