e90e53ffaf
Point demo defaults and integration tests to the checkpoint directory so runtime configuration matches the canonical model artifact location.
689 lines
19 KiB
Python
689 lines
19 KiB
Python
from __future__ import annotations
|
|
|
|
import importlib.util
|
|
import json
|
|
import pickle
|
|
from pathlib import Path
|
|
import subprocess
|
|
import sys
|
|
import time
|
|
from typing import Final, cast
|
|
|
|
import pytest
|
|
import torch
|
|
|
|
from opengait.demo.sconet_demo import ScoNetDemo
|
|
|
|
import json
|
|
import pickle
|
|
from pathlib import Path
|
|
import subprocess
|
|
import sys
|
|
import time
|
|
from typing import Final, cast
|
|
|
|
import pytest
|
|
import torch
|
|
|
|
from opengait.demo.sconet_demo import ScoNetDemo
|
|
|
|
REPO_ROOT: Final[Path] = Path(__file__).resolve().parents[2]
|
|
SAMPLE_VIDEO_PATH: Final[Path] = REPO_ROOT / "assets" / "sample.mp4"
|
|
CHECKPOINT_PATH: Final[Path] = REPO_ROOT / "ckpt" / "ScoNet-20000.pt"
|
|
CONFIG_PATH: Final[Path] = REPO_ROOT / "configs" / "sconet" / "sconet_scoliosis1k.yaml"
|
|
YOLO_MODEL_PATH: Final[Path] = REPO_ROOT / "ckpt" / "yolo11n-seg.pt"
|
|
|
|
|
|
def _device_for_runtime() -> str:
|
|
return "cuda:0" if torch.cuda.is_available() else "cpu"
|
|
|
|
|
|
def _run_pipeline_cli(
|
|
*args: str, timeout_seconds: int = 120
|
|
) -> subprocess.CompletedProcess[str]:
|
|
command = [sys.executable, "-m", "opengait.demo", *args]
|
|
return subprocess.run(
|
|
command,
|
|
cwd=REPO_ROOT,
|
|
capture_output=True,
|
|
text=True,
|
|
check=False,
|
|
timeout=timeout_seconds,
|
|
)
|
|
|
|
|
|
def _require_integration_assets() -> None:
|
|
if not SAMPLE_VIDEO_PATH.is_file():
|
|
pytest.skip(f"Missing sample video: {SAMPLE_VIDEO_PATH}")
|
|
if not CONFIG_PATH.is_file():
|
|
pytest.skip(f"Missing config: {CONFIG_PATH}")
|
|
if not YOLO_MODEL_PATH.is_file():
|
|
pytest.skip(f"Missing YOLO model file: {YOLO_MODEL_PATH}")
|
|
|
|
|
|
@pytest.fixture
|
|
def compatible_checkpoint_path(tmp_path: Path) -> Path:
|
|
if not CONFIG_PATH.is_file():
|
|
pytest.skip(f"Missing config: {CONFIG_PATH}")
|
|
|
|
checkpoint_file = tmp_path / "sconet-compatible.pt"
|
|
model = ScoNetDemo(cfg_path=str(CONFIG_PATH), checkpoint_path=None, device="cpu")
|
|
torch.save(model.state_dict(), checkpoint_file)
|
|
return checkpoint_file
|
|
|
|
|
|
def _extract_prediction_json_lines(stdout: str) -> list[dict[str, object]]:
|
|
required_keys = {
|
|
"frame",
|
|
"track_id",
|
|
"label",
|
|
"confidence",
|
|
"window",
|
|
"timestamp_ns",
|
|
}
|
|
predictions: list[dict[str, object]] = []
|
|
|
|
for line in stdout.splitlines():
|
|
stripped = line.strip()
|
|
if not stripped:
|
|
continue
|
|
try:
|
|
payload_obj = cast(object, json.loads(stripped))
|
|
except json.JSONDecodeError:
|
|
continue
|
|
|
|
if not isinstance(payload_obj, dict):
|
|
continue
|
|
payload = cast(dict[str, object], payload_obj)
|
|
if required_keys.issubset(payload.keys()):
|
|
predictions.append(payload)
|
|
|
|
return predictions
|
|
|
|
|
|
def _assert_prediction_schema(prediction: dict[str, object]) -> None:
|
|
assert isinstance(prediction["frame"], int)
|
|
assert isinstance(prediction["track_id"], int)
|
|
|
|
label = prediction["label"]
|
|
assert isinstance(label, str)
|
|
assert label in {"negative", "neutral", "positive"}
|
|
|
|
confidence = prediction["confidence"]
|
|
assert isinstance(confidence, (int, float))
|
|
confidence_value = float(confidence)
|
|
assert 0.0 <= confidence_value <= 1.0
|
|
|
|
window_obj = prediction["window"]
|
|
assert isinstance(window_obj, int)
|
|
assert window_obj >= 0
|
|
|
|
assert isinstance(prediction["timestamp_ns"], int)
|
|
|
|
def test_pipeline_cli_fps_benchmark_smoke(
|
|
compatible_checkpoint_path: Path,
|
|
) -> None:
|
|
_require_integration_assets()
|
|
|
|
max_frames = 90
|
|
started_at = time.perf_counter()
|
|
result = _run_pipeline_cli(
|
|
"--source",
|
|
str(SAMPLE_VIDEO_PATH),
|
|
"--checkpoint",
|
|
str(compatible_checkpoint_path),
|
|
"--config",
|
|
str(CONFIG_PATH),
|
|
"--device",
|
|
_device_for_runtime(),
|
|
"--yolo-model",
|
|
str(YOLO_MODEL_PATH),
|
|
"--window",
|
|
"5",
|
|
"--stride",
|
|
"1",
|
|
"--max-frames",
|
|
str(max_frames),
|
|
timeout_seconds=180,
|
|
)
|
|
elapsed_seconds = time.perf_counter() - started_at
|
|
|
|
assert result.returncode == 0, (
|
|
f"Expected exit code 0, got {result.returncode}. stderr:\n{result.stderr}"
|
|
)
|
|
predictions = _extract_prediction_json_lines(result.stdout)
|
|
assert predictions, "Expected prediction output for FPS benchmark run"
|
|
|
|
for prediction in predictions:
|
|
_assert_prediction_schema(prediction)
|
|
|
|
observed_frames = {
|
|
frame_obj
|
|
for prediction in predictions
|
|
for frame_obj in [prediction["frame"]]
|
|
if isinstance(frame_obj, int)
|
|
}
|
|
observed_units = len(observed_frames)
|
|
if observed_units < 5:
|
|
pytest.skip(
|
|
"Insufficient observed frame samples for stable FPS benchmark in this environment"
|
|
)
|
|
if elapsed_seconds <= 0:
|
|
pytest.skip("Non-positive elapsed time; cannot compute FPS benchmark")
|
|
|
|
fps = observed_units / elapsed_seconds
|
|
min_expected_fps = 0.2
|
|
assert fps >= min_expected_fps, (
|
|
"Observed FPS below conservative CI threshold: "
|
|
f"{fps:.3f} < {min_expected_fps:.3f} "
|
|
f"(observed_units={observed_units}, elapsed_seconds={elapsed_seconds:.3f})"
|
|
)
|
|
|
|
|
|
def test_pipeline_cli_happy_path_outputs_json_predictions(
|
|
compatible_checkpoint_path: Path,
|
|
) -> None:
|
|
_require_integration_assets()
|
|
|
|
result = _run_pipeline_cli(
|
|
"--source",
|
|
str(SAMPLE_VIDEO_PATH),
|
|
"--checkpoint",
|
|
str(compatible_checkpoint_path),
|
|
"--config",
|
|
str(CONFIG_PATH),
|
|
"--device",
|
|
_device_for_runtime(),
|
|
"--yolo-model",
|
|
str(YOLO_MODEL_PATH),
|
|
"--window",
|
|
"10",
|
|
"--stride",
|
|
"10",
|
|
"--max-frames",
|
|
"120",
|
|
timeout_seconds=180,
|
|
)
|
|
|
|
assert result.returncode == 0, (
|
|
f"Expected exit code 0, got {result.returncode}. stderr:\n{result.stderr}"
|
|
)
|
|
predictions = _extract_prediction_json_lines(result.stdout)
|
|
assert predictions, (
|
|
"Expected at least one prediction JSON line in stdout. "
|
|
f"stdout:\n{result.stdout}\nstderr:\n{result.stderr}"
|
|
)
|
|
for prediction in predictions:
|
|
_assert_prediction_schema(prediction)
|
|
|
|
assert "Connected to NATS" not in result.stderr
|
|
|
|
|
|
def test_pipeline_cli_max_frames_caps_output_frames(
|
|
compatible_checkpoint_path: Path,
|
|
) -> None:
|
|
_require_integration_assets()
|
|
|
|
max_frames = 20
|
|
result = _run_pipeline_cli(
|
|
"--source",
|
|
str(SAMPLE_VIDEO_PATH),
|
|
"--checkpoint",
|
|
str(compatible_checkpoint_path),
|
|
"--config",
|
|
str(CONFIG_PATH),
|
|
"--device",
|
|
_device_for_runtime(),
|
|
"--yolo-model",
|
|
str(YOLO_MODEL_PATH),
|
|
"--window",
|
|
"5",
|
|
"--stride",
|
|
"1",
|
|
"--max-frames",
|
|
str(max_frames),
|
|
timeout_seconds=180,
|
|
)
|
|
|
|
assert result.returncode == 0, (
|
|
f"Expected exit code 0, got {result.returncode}. stderr:\n{result.stderr}"
|
|
)
|
|
predictions = _extract_prediction_json_lines(result.stdout)
|
|
assert predictions, "Expected prediction output with --max-frames run"
|
|
|
|
for prediction in predictions:
|
|
_assert_prediction_schema(prediction)
|
|
frame_idx_obj = prediction["frame"]
|
|
assert isinstance(frame_idx_obj, int)
|
|
assert frame_idx_obj < max_frames
|
|
|
|
|
|
def test_pipeline_cli_invalid_source_path_returns_user_error() -> None:
|
|
result = _run_pipeline_cli(
|
|
"--source",
|
|
"/definitely/not/a/real/video.mp4",
|
|
"--checkpoint",
|
|
"/tmp/unused-checkpoint.pt",
|
|
"--config",
|
|
str(CONFIG_PATH),
|
|
timeout_seconds=30,
|
|
)
|
|
|
|
assert result.returncode == 2
|
|
assert "Error: Video source not found" in result.stderr
|
|
|
|
|
|
def test_pipeline_cli_invalid_checkpoint_path_returns_user_error() -> None:
|
|
if not SAMPLE_VIDEO_PATH.is_file():
|
|
pytest.skip(f"Missing sample video: {SAMPLE_VIDEO_PATH}")
|
|
if not CONFIG_PATH.is_file():
|
|
pytest.skip(f"Missing config: {CONFIG_PATH}")
|
|
|
|
result = _run_pipeline_cli(
|
|
"--source",
|
|
str(SAMPLE_VIDEO_PATH),
|
|
"--checkpoint",
|
|
str(REPO_ROOT / "ckpt" / "missing-checkpoint.pt"),
|
|
"--config",
|
|
str(CONFIG_PATH),
|
|
timeout_seconds=30,
|
|
)
|
|
|
|
assert result.returncode == 2
|
|
assert "Error: Checkpoint not found" in result.stderr
|
|
|
|
|
|
|
|
def test_pipeline_cli_preprocess_only_requires_export_path(
|
|
compatible_checkpoint_path: Path,
|
|
) -> None:
|
|
"""Test that --preprocess-only requires --silhouette-export-path."""
|
|
_require_integration_assets()
|
|
|
|
result = _run_pipeline_cli(
|
|
"--source",
|
|
str(SAMPLE_VIDEO_PATH),
|
|
"--checkpoint",
|
|
str(compatible_checkpoint_path),
|
|
"--config",
|
|
str(CONFIG_PATH),
|
|
"--device",
|
|
_device_for_runtime(),
|
|
"--yolo-model",
|
|
str(YOLO_MODEL_PATH),
|
|
"--preprocess-only",
|
|
"--max-frames",
|
|
"10",
|
|
timeout_seconds=30,
|
|
)
|
|
|
|
assert result.returncode == 2
|
|
assert "--silhouette-export-path is required" in result.stderr
|
|
|
|
|
|
def test_pipeline_cli_preprocess_only_exports_pickle(
|
|
compatible_checkpoint_path: Path,
|
|
tmp_path: Path,
|
|
) -> None:
|
|
"""Test preprocess-only mode exports silhouettes to pickle."""
|
|
_require_integration_assets()
|
|
|
|
export_path = tmp_path / "silhouettes.pkl"
|
|
|
|
result = _run_pipeline_cli(
|
|
"--source",
|
|
str(SAMPLE_VIDEO_PATH),
|
|
"--checkpoint",
|
|
str(compatible_checkpoint_path),
|
|
"--config",
|
|
str(CONFIG_PATH),
|
|
"--device",
|
|
_device_for_runtime(),
|
|
"--yolo-model",
|
|
str(YOLO_MODEL_PATH),
|
|
"--preprocess-only",
|
|
"--silhouette-export-path",
|
|
str(export_path),
|
|
"--silhouette-export-format",
|
|
"pickle",
|
|
"--max-frames",
|
|
"30",
|
|
timeout_seconds=180,
|
|
)
|
|
|
|
assert result.returncode == 0, (
|
|
f"Expected exit code 0, got {result.returncode}. stderr:\n{result.stderr}"
|
|
)
|
|
|
|
# Verify export file exists and contains silhouettes
|
|
assert export_path.is_file(), f"Export file not found: {export_path}"
|
|
|
|
with open(export_path, "rb") as f:
|
|
silhouettes = pickle.load(f)
|
|
|
|
assert isinstance(silhouettes, list)
|
|
assert len(silhouettes) > 0, "Expected at least one silhouette"
|
|
|
|
# Verify silhouette schema
|
|
for item in silhouettes:
|
|
assert isinstance(item, dict)
|
|
assert "frame" in item
|
|
assert "track_id" in item
|
|
assert "timestamp_ns" in item
|
|
assert "silhouette" in item
|
|
assert isinstance(item["frame"], int)
|
|
assert isinstance(item["track_id"], int)
|
|
assert isinstance(item["timestamp_ns"], int)
|
|
|
|
|
|
def test_pipeline_cli_result_export_json(
|
|
compatible_checkpoint_path: Path,
|
|
tmp_path: Path,
|
|
) -> None:
|
|
"""Test that results can be exported to JSON file."""
|
|
_require_integration_assets()
|
|
|
|
export_path = tmp_path / "results.jsonl"
|
|
|
|
result = _run_pipeline_cli(
|
|
"--source",
|
|
str(SAMPLE_VIDEO_PATH),
|
|
"--checkpoint",
|
|
str(compatible_checkpoint_path),
|
|
"--config",
|
|
str(CONFIG_PATH),
|
|
"--device",
|
|
_device_for_runtime(),
|
|
"--yolo-model",
|
|
str(YOLO_MODEL_PATH),
|
|
"--window",
|
|
"10",
|
|
"--stride",
|
|
"10",
|
|
"--result-export-path",
|
|
str(export_path),
|
|
"--result-export-format",
|
|
"json",
|
|
"--max-frames",
|
|
"60",
|
|
timeout_seconds=180,
|
|
)
|
|
|
|
assert result.returncode == 0, (
|
|
f"Expected exit code 0, got {result.returncode}. stderr:\n{result.stderr}"
|
|
)
|
|
|
|
# Verify export file exists
|
|
assert export_path.is_file(), f"Export file not found: {export_path}"
|
|
|
|
# Read and verify JSON lines
|
|
predictions: list[dict[str, object]] = []
|
|
with open(export_path, "r", encoding="utf-8") as f:
|
|
for line in f:
|
|
line = line.strip()
|
|
if line:
|
|
predictions.append(cast(dict[str, object], json.loads(line)))
|
|
|
|
assert len(predictions) > 0, "Expected at least one prediction in export"
|
|
|
|
for prediction in predictions:
|
|
_assert_prediction_schema(prediction)
|
|
|
|
|
|
def test_pipeline_cli_result_export_pickle(
|
|
compatible_checkpoint_path: Path,
|
|
tmp_path: Path,
|
|
) -> None:
|
|
"""Test that results can be exported to pickle file."""
|
|
_require_integration_assets()
|
|
|
|
export_path = tmp_path / "results.pkl"
|
|
|
|
result = _run_pipeline_cli(
|
|
"--source",
|
|
str(SAMPLE_VIDEO_PATH),
|
|
"--checkpoint",
|
|
str(compatible_checkpoint_path),
|
|
"--config",
|
|
str(CONFIG_PATH),
|
|
"--device",
|
|
_device_for_runtime(),
|
|
"--yolo-model",
|
|
str(YOLO_MODEL_PATH),
|
|
"--window",
|
|
"10",
|
|
"--stride",
|
|
"10",
|
|
"--result-export-path",
|
|
str(export_path),
|
|
"--result-export-format",
|
|
"pickle",
|
|
"--max-frames",
|
|
"60",
|
|
timeout_seconds=180,
|
|
)
|
|
|
|
assert result.returncode == 0, (
|
|
f"Expected exit code 0, got {result.returncode}. stderr:\n{result.stderr}"
|
|
)
|
|
|
|
# Verify export file exists
|
|
assert export_path.is_file(), f"Export file not found: {export_path}"
|
|
|
|
# Read and verify pickle
|
|
with open(export_path, "rb") as f:
|
|
predictions = pickle.load(f)
|
|
|
|
assert isinstance(predictions, list)
|
|
assert len(predictions) > 0, "Expected at least one prediction in export"
|
|
|
|
for prediction in predictions:
|
|
_assert_prediction_schema(prediction)
|
|
|
|
|
|
def test_pipeline_cli_silhouette_and_result_export(
|
|
compatible_checkpoint_path: Path,
|
|
tmp_path: Path,
|
|
) -> None:
|
|
"""Test exporting both silhouettes and results simultaneously."""
|
|
_require_integration_assets()
|
|
|
|
silhouette_export = tmp_path / "silhouettes.pkl"
|
|
result_export = tmp_path / "results.jsonl"
|
|
|
|
result = _run_pipeline_cli(
|
|
"--source",
|
|
str(SAMPLE_VIDEO_PATH),
|
|
"--checkpoint",
|
|
str(compatible_checkpoint_path),
|
|
"--config",
|
|
str(CONFIG_PATH),
|
|
"--device",
|
|
_device_for_runtime(),
|
|
"--yolo-model",
|
|
str(YOLO_MODEL_PATH),
|
|
"--window",
|
|
"10",
|
|
"--stride",
|
|
"10",
|
|
"--silhouette-export-path",
|
|
str(silhouette_export),
|
|
"--silhouette-export-format",
|
|
"pickle",
|
|
"--result-export-path",
|
|
str(result_export),
|
|
"--result-export-format",
|
|
"json",
|
|
"--max-frames",
|
|
"60",
|
|
timeout_seconds=180,
|
|
)
|
|
|
|
assert result.returncode == 0, (
|
|
f"Expected exit code 0, got {result.returncode}. stderr:\n{result.stderr}"
|
|
)
|
|
|
|
# Verify both export files exist
|
|
assert silhouette_export.is_file(), f"Silhouette export not found: {silhouette_export}"
|
|
assert result_export.is_file(), f"Result export not found: {result_export}"
|
|
|
|
# Verify silhouette export
|
|
with open(silhouette_export, "rb") as f:
|
|
silhouettes = pickle.load(f)
|
|
assert isinstance(silhouettes, list)
|
|
assert len(silhouettes) > 0
|
|
|
|
# Verify result export
|
|
with open(result_export, "r", encoding="utf-8") as f:
|
|
predictions = [cast(dict[str, object], json.loads(line)) for line in f if line.strip()]
|
|
assert len(predictions) > 0
|
|
|
|
|
|
def test_pipeline_cli_parquet_export_requires_pyarrow(
|
|
compatible_checkpoint_path: Path,
|
|
tmp_path: Path,
|
|
) -> None:
|
|
"""Test that parquet export fails gracefully when pyarrow is not available."""
|
|
_require_integration_assets()
|
|
|
|
# Skip if pyarrow is actually installed
|
|
if importlib.util.find_spec("pyarrow") is not None:
|
|
pytest.skip("pyarrow is installed, skipping missing dependency test")
|
|
try:
|
|
import pyarrow # noqa: F401
|
|
pytest.skip("pyarrow is installed, skipping missing dependency test")
|
|
except ImportError:
|
|
pass
|
|
|
|
export_path = tmp_path / "results.parquet"
|
|
|
|
result = _run_pipeline_cli(
|
|
"--source",
|
|
str(SAMPLE_VIDEO_PATH),
|
|
"--checkpoint",
|
|
str(compatible_checkpoint_path),
|
|
"--config",
|
|
str(CONFIG_PATH),
|
|
"--device",
|
|
_device_for_runtime(),
|
|
"--yolo-model",
|
|
str(YOLO_MODEL_PATH),
|
|
"--window",
|
|
"10",
|
|
"--stride",
|
|
"10",
|
|
"--result-export-path",
|
|
str(export_path),
|
|
"--result-export-format",
|
|
"parquet",
|
|
"--max-frames",
|
|
"30",
|
|
timeout_seconds=180,
|
|
)
|
|
|
|
# Should fail with RuntimeError about pyarrow
|
|
assert result.returncode == 1
|
|
assert "parquet" in result.stderr.lower() or "pyarrow" in result.stderr.lower()
|
|
|
|
|
|
|
|
def test_pipeline_cli_silhouette_visualization(
|
|
compatible_checkpoint_path: Path,
|
|
tmp_path: Path,
|
|
) -> None:
|
|
"""Test that silhouette visualization creates PNG files."""
|
|
_require_integration_assets()
|
|
|
|
visualize_dir = tmp_path / "silhouette_viz"
|
|
|
|
result = _run_pipeline_cli(
|
|
"--source",
|
|
str(SAMPLE_VIDEO_PATH),
|
|
"--checkpoint",
|
|
str(compatible_checkpoint_path),
|
|
"--config",
|
|
str(CONFIG_PATH),
|
|
"--device",
|
|
_device_for_runtime(),
|
|
"--yolo-model",
|
|
str(YOLO_MODEL_PATH),
|
|
"--window",
|
|
"10",
|
|
"--stride",
|
|
"10",
|
|
"--silhouette-visualize-dir",
|
|
str(visualize_dir),
|
|
"--max-frames",
|
|
"30",
|
|
timeout_seconds=180,
|
|
)
|
|
|
|
assert result.returncode == 0, (
|
|
f"Expected exit code 0, got {result.returncode}. stderr:\n{result.stderr}"
|
|
)
|
|
|
|
# Verify visualization directory exists and contains PNG files
|
|
assert visualize_dir.is_dir(), f"Visualization directory not found: {visualize_dir}"
|
|
|
|
png_files = list(visualize_dir.glob("*.png"))
|
|
assert len(png_files) > 0, "Expected at least one PNG visualization file"
|
|
|
|
# Verify filenames contain frame and track info
|
|
for png_file in png_files:
|
|
assert "silhouette_frame" in png_file.name
|
|
assert "_track" in png_file.name
|
|
|
|
|
|
def test_pipeline_cli_preprocess_only_with_visualization(
|
|
compatible_checkpoint_path: Path,
|
|
tmp_path: Path,
|
|
) -> None:
|
|
"""Test preprocess-only mode with both export and visualization."""
|
|
_require_integration_assets()
|
|
|
|
export_path = tmp_path / "silhouettes.pkl"
|
|
visualize_dir = tmp_path / "silhouette_viz"
|
|
|
|
result = _run_pipeline_cli(
|
|
"--source",
|
|
str(SAMPLE_VIDEO_PATH),
|
|
"--checkpoint",
|
|
str(compatible_checkpoint_path),
|
|
"--config",
|
|
str(CONFIG_PATH),
|
|
"--device",
|
|
_device_for_runtime(),
|
|
"--yolo-model",
|
|
str(YOLO_MODEL_PATH),
|
|
"--preprocess-only",
|
|
"--silhouette-export-path",
|
|
str(export_path),
|
|
"--silhouette-visualize-dir",
|
|
str(visualize_dir),
|
|
"--max-frames",
|
|
"30",
|
|
timeout_seconds=180,
|
|
)
|
|
|
|
assert result.returncode == 0, (
|
|
f"Expected exit code 0, got {result.returncode}. stderr:\n{result.stderr}"
|
|
)
|
|
|
|
# Verify export file exists
|
|
assert export_path.is_file(), f"Export file not found: {export_path}"
|
|
|
|
# Verify visualization files exist
|
|
assert visualize_dir.is_dir(), f"Visualization directory not found: {visualize_dir}"
|
|
png_files = list(visualize_dir.glob("*.png"))
|
|
assert len(png_files) > 0, "Expected at least one PNG visualization file"
|
|
|
|
# Load and verify pickle export
|
|
with open(export_path, "rb") as f:
|
|
silhouettes = pickle.load(f)
|
|
assert isinstance(silhouettes, list)
|
|
assert len(silhouettes) > 0
|
|
# Number of exported silhouettes should match number of PNG files
|
|
assert len(silhouettes) == len(png_files), (
|
|
f"Mismatch: {len(silhouettes)} silhouettes exported but {len(png_files)} PNG files created"
|
|
)
|