1691274e85
Add a Python batch wrapper around zed_svo_to_mcap for multi-camera segment exports. The new script supports dataset discovery, repeated segment-dir inputs, CSV-driven ordering, skip/probe/report flows, dry-run, and CUDA environment passthrough so kindergarten-style datasets can be converted into one bundled MCAP per segment. Extend zed_svo_to_mcap so bundled multi-camera mode accepts --end-frame with synced-group semantics. In this mode the value is interpreted as the last emitted synced frame-group index from the common synced start, while --start-frame remains unsupported. Vendor a minimal pose-config TOML and a sample segments CSV into this repo so the MCAP workflow is self-contained. Update the README to document the batch MCAP flow, use portable placeholders instead of machine-specific absolute paths, and describe the expected dataset layout explicitly.
19 lines
630 B
TOML
19 lines
630 B
TOML
# Minimal pose-tracking config for zed_svo_to_mcap.
|
|
# The converter currently reads only:
|
|
# - zed.coordinate_system
|
|
# - zed.body_tracking.reference_frame
|
|
# - zed.body_tracking.set_floor_as_origin
|
|
|
|
[zed]
|
|
# Native ZED 3D/body coordinate system used when reading positional tracking.
|
|
# Supported values in this repo are IMAGE and RIGHT_HANDED_Y_UP.
|
|
coordinate_system = "IMAGE"
|
|
|
|
[zed.body_tracking]
|
|
# Reference frame used for per-camera pose estimation.
|
|
# Supported values are CAMERA and WORLD.
|
|
reference_frame = "CAMERA"
|
|
|
|
# When true, WORLD origin is placed on the floor during positional tracking.
|
|
set_floor_as_origin = false
|