From a0ad369211a7910fdda340d344ecb3ac8b5eadfc Mon Sep 17 00:00:00 2001 From: crosstyan Date: Wed, 4 Feb 2026 15:42:08 +0800 Subject: [PATCH] python workspace --- .gitattributes | 1 + .gitignore | 1 + AGENTS.md | 113 + proxy.sh | 7 + py_workspace/.python-version | 1 + py_workspace/README.md | 46 + py_workspace/UV_LOCAL_PACKAGE_GUIDE.md | 83 + py_workspace/depth_sensing.py | 131 + .../pyzed_pkg/build/lib/pyzed/__init__.py | 0 .../pyzed/sl.cpython-312-x86_64-linux-gnu.so | 3 + .../libs/pyzed_pkg/build/lib/pyzed/sl.pyi | 14672 ++++++++++++++++ py_workspace/libs/pyzed_pkg/pyproject.toml | 17 + .../libs/pyzed_pkg/pyzed.egg-info/PKG-INFO | 5 + .../libs/pyzed_pkg/pyzed.egg-info/SOURCES.txt | 8 + .../pyzed.egg-info/dependency_links.txt | 1 + .../pyzed_pkg/pyzed.egg-info/top_level.txt | 1 + py_workspace/libs/pyzed_pkg/pyzed/__init__.py | 0 .../__pycache__/__init__.cpython-312.pyc | Bin 0 -> 154 bytes .../pyzed/build/lib/pyzed/__init__.py | 0 .../pyzed_pkg/pyzed/build/lib/pyzed/sl.pyi | 14672 ++++++++++++++++ .../pyzed_pkg/pyzed/pyzed.egg-info/PKG-INFO | 5 + .../pyzed/pyzed.egg-info/SOURCES.txt | 9 + .../pyzed/pyzed.egg-info/dependency_links.txt | 1 + .../pyzed/pyzed.egg-info/top_level.txt | 1 + .../pyzed/sl.cpython-312-x86_64-linux-gnu.so | 3 + py_workspace/libs/pyzed_pkg/pyzed/sl.pyi | 14672 ++++++++++++++++ .../__pycache__/viewer.cpython-312.pyc | Bin 0 -> 43665 bytes py_workspace/ogl_viewer/viewer.py | 785 + py_workspace/pyproject.toml | 19 + py_workspace/uv.lock | 1659 ++ zed_settings/SN33076638.conf | 127 + zed_settings/SN41831756.conf | 102 + zed_settings/SN44289123.conf | 102 + zed_settings/SN44435674.conf | 102 + zed_settings/SN46195029.conf | 102 + zed_settings/inside_network.json | 90 + zed_settings/inside_shared.json | 90 + 37 files changed, 47631 insertions(+) create mode 100644 .gitattributes create mode 100644 AGENTS.md create mode 100755 proxy.sh create mode 100644 py_workspace/.python-version create mode 100755 py_workspace/README.md create mode 100644 py_workspace/UV_LOCAL_PACKAGE_GUIDE.md create mode 100644 py_workspace/depth_sensing.py create mode 100644 py_workspace/libs/pyzed_pkg/build/lib/pyzed/__init__.py create mode 100755 py_workspace/libs/pyzed_pkg/build/lib/pyzed/sl.cpython-312-x86_64-linux-gnu.so create mode 100644 py_workspace/libs/pyzed_pkg/build/lib/pyzed/sl.pyi create mode 100644 py_workspace/libs/pyzed_pkg/pyproject.toml create mode 100644 py_workspace/libs/pyzed_pkg/pyzed.egg-info/PKG-INFO create mode 100644 py_workspace/libs/pyzed_pkg/pyzed.egg-info/SOURCES.txt create mode 100644 py_workspace/libs/pyzed_pkg/pyzed.egg-info/dependency_links.txt create mode 100644 py_workspace/libs/pyzed_pkg/pyzed.egg-info/top_level.txt create mode 100644 py_workspace/libs/pyzed_pkg/pyzed/__init__.py create mode 100644 py_workspace/libs/pyzed_pkg/pyzed/__pycache__/__init__.cpython-312.pyc create mode 100644 py_workspace/libs/pyzed_pkg/pyzed/build/lib/pyzed/__init__.py create mode 100644 py_workspace/libs/pyzed_pkg/pyzed/build/lib/pyzed/sl.pyi create mode 100644 py_workspace/libs/pyzed_pkg/pyzed/pyzed.egg-info/PKG-INFO create mode 100644 py_workspace/libs/pyzed_pkg/pyzed/pyzed.egg-info/SOURCES.txt create mode 100644 py_workspace/libs/pyzed_pkg/pyzed/pyzed.egg-info/dependency_links.txt create mode 100644 py_workspace/libs/pyzed_pkg/pyzed/pyzed.egg-info/top_level.txt create mode 100755 py_workspace/libs/pyzed_pkg/pyzed/sl.cpython-312-x86_64-linux-gnu.so create mode 100644 py_workspace/libs/pyzed_pkg/pyzed/sl.pyi create mode 100644 py_workspace/ogl_viewer/__pycache__/viewer.cpython-312.pyc create mode 100755 py_workspace/ogl_viewer/viewer.py create mode 100644 py_workspace/pyproject.toml create mode 100644 py_workspace/uv.lock create mode 100644 zed_settings/SN33076638.conf create mode 100755 zed_settings/SN41831756.conf create mode 100755 zed_settings/SN44289123.conf create mode 100755 zed_settings/SN44435674.conf create mode 100755 zed_settings/SN46195029.conf create mode 100644 zed_settings/inside_network.json create mode 100644 zed_settings/inside_shared.json diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 0000000..c3ad15c --- /dev/null +++ b/.gitattributes @@ -0,0 +1 @@ +*.so filter=lfs diff=lfs merge=lfs -text diff --git a/.gitignore b/.gitignore index d9b6fb3..c7899c9 100644 --- a/.gitignore +++ b/.gitignore @@ -1,2 +1,3 @@ playground/ misc/zed_installer.run +clangd-linux-* diff --git a/AGENTS.md b/AGENTS.md new file mode 100644 index 0000000..141475d --- /dev/null +++ b/AGENTS.md @@ -0,0 +1,113 @@ +# Agent Context & Reminders + +## ZED SDK Architecture + +### Streaming API vs Fusion API + +The ZED SDK provides two distinct network APIs that are often confused: + +| Feature | Streaming API | Fusion API | +|---------|---------------|------------| +| **Data Transmitted** | Compressed video (H264/H265) | Metadata only (bodies, objects, poses) | +| **Bandwidth** | 10-40 Mbps | <100 Kbps | +| **Edge Compute** | Video encoding only | Full depth NN + tracking + detection | +| **Host Compute** | Full depth + tracking + detection | Lightweight fusion only | +| **API Methods** | `enableStreaming()` / `setFromStream()` | `startPublishing()` / `subscribe()` | + +### Key Insight + +**There is NO built-in mode for streaming computed depth maps or point clouds.** The architecture forces a choice: + +1. **Streaming API**: Edge sends video → Host computes everything (depth, tracking, detection) +2. **Fusion API**: Edge computes everything → Sends only metadata (bodies/poses) + +### Code Patterns + +#### Streaming Sender (Edge) +```cpp +sl::StreamingParameters stream_params; +stream_params.codec = sl::STREAMING_CODEC::H265; +stream_params.port = 30000; +stream_params.bitrate = 12000; +zed.enableStreaming(stream_params); +``` + +#### Streaming Receiver (Host) +```cpp +sl::InitParameters init_params; +init_params.input.setFromStream("192.168.1.100", 30000); +zed.open(init_params); +// Full ZED SDK available - depth, tracking, etc. +``` + +#### Fusion Publisher (Edge or Host) +```cpp +sl::CommunicationParameters comm_params; +comm_params.setForLocalNetwork(30000); +// or comm_params.setForIntraProcess(); for same-machine +zed.startPublishing(comm_params); +``` + +#### Fusion Subscriber (Host) +```cpp +sl::Fusion fusion; +fusion.init(init_params); +sl::CameraIdentifier cam(serial_number); +fusion.subscribe(cam, comm_params, pose); +``` + +## Project: Multi-Camera Body Tracking + +### Location +`/workspaces/zed-playground/playground/body tracking/multi-camera/cpp/` + +### Architecture +- **ClientPublisher**: Receives camera streams, runs body tracking, publishes to Fusion +- **Fusion**: Subscribes to multiple ClientPublishers, fuses body data from all cameras +- **GLViewer**: 3D visualization of fused bodies + +### Camera Configuration (Hard-coded) +From `inside_network.json`: + +| Serial | IP | Streaming Port | +|--------|-----|----------------| +| 44289123 | 192.168.128.2 | 30000 | +| 44435674 | 192.168.128.2 | 30002 | +| 41831756 | 192.168.128.2 | 30004 | +| 46195029 | 192.168.128.2 | 30006 | + +### Data Flow +``` +Edge Camera (enableStreaming) → Network Stream + ↓ +ClientPublisher (setFromStream) → Body Tracking (host) + ↓ +startPublishing() → Fusion (INTRA_PROCESS) + ↓ +Fused Bodies → GLViewer +``` + +### Build +```bash +cd "/workspaces/zed-playground/playground/body tracking/multi-camera/cpp/build" +cmake .. +make -j4 +``` + +### Run +```bash +./ZED_BodyFusion +``` + +## Related Samples + +### Camera Streaming Receiver +`/workspaces/zed-playground/playground/camera streaming/receiver/cpp/` +- Simple streaming receiver sample +- Shows basic `setFromStream()` usage with OpenCV display + +## ZED SDK Headers +Located at: `/usr/local/zed/include/sl/` +- `Camera.hpp` - Main camera API +- `Fusion.hpp` - Fusion module API +- `CameraOne.hpp` - Single camera utilities diff --git a/proxy.sh b/proxy.sh new file mode 100755 index 0000000..85d72c3 --- /dev/null +++ b/proxy.sh @@ -0,0 +1,7 @@ +unset HTTP_PROXY +export http_proxy="http://127.0.0.1:36000" +unset HTTPS_PROXY +export https_proxy="http://127.0.0.1:36000" +unset NO_PROXY +export no_proxy="10.0.0.0/8,172.16.0.0/12,192.168.0.0/16,127.0.0.1,127.0.0.0/8,localhost,127.0.0.1,::1,192.168.2.0/24,Crosstyan-MacBook.local,*.local" + diff --git a/py_workspace/.python-version b/py_workspace/.python-version new file mode 100644 index 0000000..e4fba21 --- /dev/null +++ b/py_workspace/.python-version @@ -0,0 +1 @@ +3.12 diff --git a/py_workspace/README.md b/py_workspace/README.md new file mode 100755 index 0000000..bb6b18d --- /dev/null +++ b/py_workspace/README.md @@ -0,0 +1,46 @@ +# ZED SDK - Depth Sensing + +This sample shows how to retreive the current point cloud. + +## Getting Started + + - Get the latest [ZED SDK](https://www.stereolabs.com/developers/release/) and [pyZED Package](https://www.stereolabs.com/docs/app-development/python/install/) + - Check the [Documentation](https://www.stereolabs.com/docs/) + +## Setting up (Optional) + +For improved data retrieval and handling on GPU: + - Install CuPy using pip + ```sh + pip install cupy-cuda11x # For CUDA 11.x + pip install cupy-cuda12x # For CUDA 12.x + ``` + - Install cuda bindings using pip + ```sh + pip install cuda-python + ``` + +## Run the program + +To run the program, use the following command in your terminal : +```bash +python depth_sensing.py +``` +If you wish to run the program from an input_svo_file, or an IP adress, or specify a resolution run : + +```bash +python depth_sensing.py --input_svo_file --ip_address --resolution +``` +Arguments: + - `--input_svo_file` A path to an existing .svo file, that will be playbacked. If this parameter and ip_adress are not specified, the soft will use the camera wired as default. + - `--ip_address` IP Address, in format a.b.c.d:port or a.b.c.d. If specified, the soft will try to connect to the IP. + - `--resolution` Resolution, can be either HD2K, HD1200, HD1080, HD720, SVGA or VGA. + - `--disable_gpu` Disable GPU acceleration even if CuPy is available. + +### Features + - Camera live point cloud is retreived + - An OpenGL windows displays it in 3D + +## Support + +If you need assistance go to our Community site at https://community.stereolabs.com/ \ No newline at end of file diff --git a/py_workspace/UV_LOCAL_PACKAGE_GUIDE.md b/py_workspace/UV_LOCAL_PACKAGE_GUIDE.md new file mode 100644 index 0000000..1919b14 --- /dev/null +++ b/py_workspace/UV_LOCAL_PACKAGE_GUIDE.md @@ -0,0 +1,83 @@ +# Integrating Local Binary Extensions with `uv` + +This guide explains how we packaged the local `pyzed` binary extension (originally from a system installation) so that `uv` can manage it as a project dependency. + +## The Problem + +The ZED SDK installs its Python wrapper (`pyzed`) as a system-level package (often in `/usr/local/lib/...`). It consists of compiled extensions (`.so` or `.sl` files) and Python bindings. + +`uv` strictly manages virtual environments and dependencies. It cannot directly "see" or import packages from the global system site-packages unless explicitly configured to use system site-packages (which reduces isolation). Furthermore, a raw `.so` file or a bare directory without metadata isn't a valid package source for `uv`. + +## The Solution: A Local Package Wrapper + +To make `pyzed` compatible with `uv`, we wrapped the raw library files into a proper, minimally compliant Python package located at `libs/pyzed_pkg`. + +### 1. Directory Structure + +We organized the files into a standard package layout: + +```text +libs/pyzed_pkg/ +├── pyproject.toml # Package metadata (CRITICAL) +└── pyzed/ # The actual importable package + ├── __init__.py + ├── sl.cpython-312-x86_64-linux-gnu.so # The compiled extension + └── ... +``` + +### 2. The Local `pyproject.toml` + +We created a `pyproject.toml` inside `libs/pyzed_pkg` to tell build tools how to handle the files. + +Key configuration points: +1. **Build System**: Uses `setuptools` to bundle the files. +2. **Package Discovery**: Explicitly lists `pyzed`. +3. **Package Data**: **Crucially**, configures `setuptools` to include binary files (`*.so`, `*.pyi`) which are usually ignored by default. + +```toml +[project] +name = "pyzed" +version = "0.1.0" +description = "Wrapper for ZED SDK" +requires-python = ">=3.12" +dependencies = [] + +[build-system] +requires = ["setuptools", "wheel"] +build-backend = "setuptools.build_meta" + +[tool.setuptools] +packages = ["pyzed"] + +# IMPORTANT: Ensure the binary extension is included in the build +[tool.setuptools.package-data] +pyzed = ["*.so", "*.pyi"] +``` + +## Configuring the Main Project + +In the root `pyproject.toml` of your application, we used **`[tool.uv.sources]`** to redirect the `pyzed` dependency to our local path. + +```toml +[project] +dependencies = [ + "pyzed", # Declared as a normal dependency + "cupy-cuda12x", + "numpy", +] + +# uv-specific configuration +[tool.uv.sources] +pyzed = { path = "libs/pyzed_pkg" } +``` + +## How `uv` Processes This + +1. **Resolution**: When you run `uv sync` or `uv run`, `uv` sees `pyzed` in the dependencies. +2. **Source Lookup**: It checks `tool.uv.sources` and finds the local path `libs/pyzed_pkg`. +3. **Build/Install**: + * `uv` treats the directory as a source distribution. + * It uses the `build-system` defined in `libs/pyzed_pkg/pyproject.toml` to build a temporary wheel. + * This wheel (containing the `.so` file) is installed into the project's virtual environment (`.venv`). + +This ensures that your application works seamlessly with `import pyzed.sl` while maintaining a clean, isolated, and reproducible environment managed by `uv`. diff --git a/py_workspace/depth_sensing.py b/py_workspace/depth_sensing.py new file mode 100644 index 0000000..f3b7f78 --- /dev/null +++ b/py_workspace/depth_sensing.py @@ -0,0 +1,131 @@ +######################################################################## +# +# Copyright (c) 2022, STEREOLABS. +# +# All rights reserved. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +######################################################################## + +""" + This sample demonstrates how to capture a live 3D point cloud + with the ZED SDK and display the result in an OpenGL window. +""" + +import sys +import ogl_viewer.viewer as gl +import pyzed.sl as sl +import argparse + +def parse_args(init, opt): + if len(opt.input_svo_file)>0 and opt.input_svo_file.endswith((".svo", ".svo2")): + init.set_from_svo_file(opt.input_svo_file) + print("[Sample] Using SVO File input: {0}".format(opt.input_svo_file)) + elif len(opt.ip_address)>0 : + ip_str = opt.ip_address + if ip_str.replace(':','').replace('.','').isdigit() and len(ip_str.split('.'))==4 and len(ip_str.split(':'))==2: + init.set_from_stream(ip_str.split(':')[0],int(ip_str.split(':')[1])) + print("[Sample] Using Stream input, IP : ",ip_str) + elif ip_str.replace(':','').replace('.','').isdigit() and len(ip_str.split('.'))==4: + init.set_from_stream(ip_str) + print("[Sample] Using Stream input, IP : ",ip_str) + else : + print("Unvalid IP format. Using live stream") + if ("HD2K" in opt.resolution): + init.camera_resolution = sl.RESOLUTION.HD2K + print("[Sample] Using Camera in resolution HD2K") + elif ("HD1200" in opt.resolution): + init.camera_resolution = sl.RESOLUTION.HD1200 + print("[Sample] Using Camera in resolution HD1200") + elif ("HD1080" in opt.resolution): + init.camera_resolution = sl.RESOLUTION.HD1080 + print("[Sample] Using Camera in resolution HD1080") + elif ("HD720" in opt.resolution): + init.camera_resolution = sl.RESOLUTION.HD720 + print("[Sample] Using Camera in resolution HD720") + elif ("SVGA" in opt.resolution): + init.camera_resolution = sl.RESOLUTION.SVGA + print("[Sample] Using Camera in resolution SVGA") + elif ("VGA" in opt.resolution): + init.camera_resolution = sl.RESOLUTION.VGA + print("[Sample] Using Camera in resolution VGA") + elif len(opt.resolution)>0: + print("[Sample] No valid resolution entered. Using default") + else : + print("[Sample] Using default resolution") + + + +def main(opt): + print("Running Depth Sensing sample ... Press 'Esc' to quit\nPress 's' to save the point cloud") + + # Determine memory type based on CuPy availability and user preference + use_gpu = gl.GPU_ACCELERATION_AVAILABLE and not opt.disable_gpu_data_transfer + mem_type = sl.MEM.GPU if use_gpu else sl.MEM.CPU + if use_gpu: + print("🚀 Using GPU data transfer with CuPy") + + init = sl.InitParameters(depth_mode=sl.DEPTH_MODE.NEURAL, + coordinate_units=sl.UNIT.METER, + coordinate_system=sl.COORDINATE_SYSTEM.RIGHT_HANDED_Y_UP) + parse_args(init, opt) + zed = sl.Camera() + status = zed.open(init) + if status > sl.ERROR_CODE.SUCCESS: + print(repr(status)) + exit() + + res = sl.Resolution() + res.width = -1 + res.height = -1 + + # Get the first PC to retrieve the resolution + point_cloud = sl.Mat() + zed.retrieve_measure(point_cloud, sl.MEASURE.XYZRGBA, mem_type, res) + res = point_cloud.get_resolution() + + # Create OpenGL viewer + viewer = gl.GLViewer() + viewer.init(1, sys.argv, res) + + while viewer.is_available(): + if zed.grab() <= sl.ERROR_CODE.SUCCESS: + # Retrieve point cloud data using the optimal memory type (GPU if CuPy available) + zed.retrieve_measure(point_cloud, sl.MEASURE.XYZRGBA, mem_type, res) + viewer.updateData(point_cloud) + if viewer.save_data: + # For saving, we take CPU memory regardless of processing type + point_cloud_to_save = sl.Mat() + zed.retrieve_measure(point_cloud_to_save, sl.MEASURE.XYZRGBA, sl.MEM.CPU) + err = point_cloud_to_save.write('Pointcloud.ply') + if(err == sl.ERROR_CODE.SUCCESS): + print("Current .ply file saving succeed") + else: + print("Current .ply file failed") + viewer.save_data = False + viewer.exit() + zed.close() + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument('--input_svo_file', type=str, help='Path to an .svo file, if you want to replay it',default = '') + parser.add_argument('--ip_address', type=str, help='IP Adress, in format a.b.c.d:port or a.b.c.d, if you have a streaming setup', default = '') + parser.add_argument('--resolution', type=str, help='Resolution, can be either HD2K, HD1200, HD1080, HD720, SVGA or VGA', default = '') + parser.add_argument('--disable-gpu-data-transfer', action='store_true', help='Disable GPU data transfer acceleration with CuPy even if CuPy is available') + opt = parser.parse_args() + if len(opt.input_svo_file)>0 and len(opt.ip_address)>0: + print("Specify only input_svo_file or ip_address, or none to use wired camera, not both. Exit program") + exit() + main(opt) diff --git a/py_workspace/libs/pyzed_pkg/build/lib/pyzed/__init__.py b/py_workspace/libs/pyzed_pkg/build/lib/pyzed/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/py_workspace/libs/pyzed_pkg/build/lib/pyzed/sl.cpython-312-x86_64-linux-gnu.so b/py_workspace/libs/pyzed_pkg/build/lib/pyzed/sl.cpython-312-x86_64-linux-gnu.so new file mode 100755 index 0000000..039b885 --- /dev/null +++ b/py_workspace/libs/pyzed_pkg/build/lib/pyzed/sl.cpython-312-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:730a4c2adc33129b4ed503cb04a75b0baaa2538fcd254781830541a0a8a14e1c +size 5161120 diff --git a/py_workspace/libs/pyzed_pkg/build/lib/pyzed/sl.pyi b/py_workspace/libs/pyzed_pkg/build/lib/pyzed/sl.pyi new file mode 100644 index 0000000..33c405e --- /dev/null +++ b/py_workspace/libs/pyzed_pkg/build/lib/pyzed/sl.pyi @@ -0,0 +1,14672 @@ +import enum +import numpy as np +from typing import List, Tuple, Dict, Optional, Union, Any, overload, Mapping, MutableMapping + +class Timestamp(): + """ + Structure representing timestamps with utilities. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def data_ns(self) -> int: + """ + Timestamp in nanoseconds. + """ + return int() + + @data_ns.setter + def data_ns(self, data_ns: Any) -> None: + pass + + def get_nanoseconds(self) -> int: + """ + Returns the timestamp in nanoseconds. + """ + return int() + + def get_microseconds(self) -> int: + """ + Returns the timestamp in microseconds. + """ + return int() + + def get_milliseconds(self) -> int: + """ + Returns the timestamp in milliseconds. + """ + return int() + + def get_seconds(self) -> int: + """ + Returns the timestamp in seconds. + """ + return int() + + def set_nanoseconds(self, t_ns: int) -> None: + """ + Sets the timestamp to a value in nanoseconds. + """ + pass + + def set_microseconds(self, t_us: int) -> None: + """ + Sets the timestamp to a value in microseconds. + """ + pass + + def set_milliseconds(self, t_ms: int) -> None: + """ + Sets the timestamp to a value in milliseconds. + """ + pass + + def set_seconds(self, t_s: int) -> None: + """ + Sets the timestamp to a value in seconds. + """ + pass + + +class ERROR_CODE(enum.Enum): + """ + Lists error codes in the ZED SDK. + + | Enumerator | | + |:---:|:---:| + | POTENTIAL_CALIBRATION_ISSUE | The camera has a potential calibration issue. | + | CONFIGURATION_FALLBACK | The operation could not proceed with the target configuration but did success with a fallback. | + | SENSORS_DATA_REQUIRED | The input data does not contains the high frequency sensors data, this is usually because it requires newer SVO/Streaming. In order to work this modules needs inertial data present in it input. | + | CORRUPTED_FRAME | The image could be corrupted, Enabled with the parameter InitParameters.enable_image_validity_check. + | CAMERA_REBOOTING | The camera is currently rebooting. | + | SUCCESS | Standard code for successful behavior. | + | FAILURE | Standard code for unsuccessful behavior. | + | NO_GPU_COMPATIBLE | No GPU found or CUDA capability of the device is not supported. | + | NOT_ENOUGH_GPU_MEMORY | Not enough GPU memory for this depth mode. Try a different mode (such as PERFORMANCE), or increase the minimum depth value (see InitParameters.depth_minimum_distance). | + | CAMERA_NOT_DETECTED | No camera was detected. | + | SENSORS_NOT_INITIALIZED | The MCU that controls the sensors module has an invalid serial number. You can try to recover it by launching the **ZED Diagnostic** tool from the command line with the option ``-r``. | + | SENSORS_NOT_AVAILABLE | A camera with sensor is detected but the sensors (IMU, barometer, ...) cannot be opened. Only the MODEL.ZED does not has sensors. Unplug/replug is required. | + | INVALID_RESOLUTION | In case of invalid resolution parameter, such as an upsize beyond the original image size in Camera.retrieve_image. | + | LOW_USB_BANDWIDTH | Insufficient bandwidth for the correct use of the camera. This issue can occur when you use multiple cameras or a USB 2.0 port. | + | CALIBRATION_FILE_NOT_AVAILABLE | The calibration file of the camera is not found on the host machine. Use **ZED Explorer** or **ZED Calibration** to download the factory calibration file. | + | INVALID_CALIBRATION_FILE | The calibration file is not valid. Try to download the factory calibration file or recalibrate your camera using **ZED Calibration**. | + | INVALID_SVO_FILE | The provided SVO file is not valid. | + | SVO_RECORDING_ERROR | An error occurred while trying to record an SVO (not enough free storage, invalid file, ...). | + | SVO_UNSUPPORTED_COMPRESSION | An SVO related error, occurs when NVIDIA based compression cannot be loaded. | + | END_OF_SVOFILE_REACHED | SVO end of file has been reached. No frame will be available until the SVO position is reset. | + | INVALID_COORDINATE_SYSTEM | The requested coordinate system is not available. | + | INVALID_FIRMWARE | The firmware of the camera is out of date. Update to the latest version. | + | INVALID_FUNCTION_PARAMETERS | Invalid parameters have been given for the function. | + | CUDA_ERROR | A CUDA error has been detected in the process, in sl.Camera.grab() or sl.Camera.retrieve_xxx() only. Activate verbose in sl.Camera.open() for more info. | + | CAMERA_NOT_INITIALIZED | The ZED SDK is not initialized. Probably a missing call to sl.Camera.open(). | + | NVIDIA_DRIVER_OUT_OF_DATE | Your NVIDIA driver is too old and not compatible with your current CUDA version. | + | INVALID_FUNCTION_CALL | The call of the function is not valid in the current context. Could be a missing call of sl.Camera.open(). | + | CORRUPTED_SDK_INSTALLATION | The ZED SDK was not able to load its dependencies or some assets are missing. Reinstall the ZED SDK or check for missing dependencies (cuDNN, TensorRT). | + | INCOMPATIBLE_SDK_VERSION | The installed ZED SDK is incompatible with the one used to compile the program. | + | INVALID_AREA_FILE | The given area file does not exist. Check the path. | + | INCOMPATIBLE_AREA_FILE | The area file does not contain enough data to be used or the sl.DEPTH_MODE used during the creation of the area file is different from the one currently set. | + | CAMERA_FAILED_TO_SETUP | Failed to open the camera at the proper resolution. Try another resolution or make sure that the UVC driver is properly installed. | + | CAMERA_DETECTION_ISSUE | Your camera can not be opened. Try replugging it to another port or flipping the USB-C connector (if there is one). | + | CANNOT_START_CAMERA_STREAM | Cannot start the camera stream. Make sure your camera is not already used by another process or blocked by firewall or antivirus. | + | NO_GPU_DETECTED | No GPU found. CUDA is unable to list it. Can be a driver/reboot issue. | + | PLANE_NOT_FOUND | Plane not found. Either no plane is detected in the scene, at the location or corresponding to the floor, or the floor plane doesn't match the prior given. | + | MODULE_NOT_COMPATIBLE_WITH_CAMERA | The module you try to use is not compatible with your camera sl.MODEL. Note: sl.MODEL.ZED does not has an IMU and does not support the AI modules. | + | MOTION_SENSORS_REQUIRED | The module needs the sensors to be enabled (see InitParameters.sensors_required). | + | MODULE_NOT_COMPATIBLE_WITH_CUDA_VERSION | The module needs a newer version of CUDA. | + """ + POTENTIAL_CALIBRATION_ISSUE = enum.auto() + CONFIGURATION_FALLBACK = enum.auto() + SENSORS_DATA_REQUIRED = enum.auto() + CORRUPTED_FRAME = enum.auto() + CAMERA_REBOOTING = enum.auto() + SUCCESS = enum.auto() + FAILURE = enum.auto() + NO_GPU_COMPATIBLE = enum.auto() + NOT_ENOUGH_GPU_MEMORY = enum.auto() + CAMERA_NOT_DETECTED = enum.auto() + SENSORS_NOT_INITIALIZED = enum.auto() + SENSORS_NOT_AVAILABLE = enum.auto() + INVALID_RESOLUTION = enum.auto() + LOW_USB_BANDWIDTH = enum.auto() + CALIBRATION_FILE_NOT_AVAILABLE = enum.auto() + INVALID_CALIBRATION_FILE = enum.auto() + INVALID_SVO_FILE = enum.auto() + SVO_RECORDING_ERROR = enum.auto() + END_OF_SVOFILE_REACHED = enum.auto() + SVO_UNSUPPORTED_COMPRESSION = enum.auto() + INVALID_COORDINATE_SYSTEM = enum.auto() + INVALID_FIRMWARE = enum.auto() + INVALID_FUNCTION_PARAMETERS = enum.auto() + CUDA_ERROR = enum.auto() + CAMERA_NOT_INITIALIZED = enum.auto() + NVIDIA_DRIVER_OUT_OF_DATE = enum.auto() + INVALID_FUNCTION_CALL = enum.auto() + CORRUPTED_SDK_INSTALLATION = enum.auto() + INCOMPATIBLE_SDK_VERSION = enum.auto() + INVALID_AREA_FILE = enum.auto() + INCOMPATIBLE_AREA_FILE = enum.auto() + CAMERA_FAILED_TO_SETUP = enum.auto() + CAMERA_DETECTION_ISSUE = enum.auto() + CANNOT_START_CAMERA_STREAM = enum.auto() + NO_GPU_DETECTED = enum.auto() + PLANE_NOT_FOUND = enum.auto() + MODULE_NOT_COMPATIBLE_WITH_CAMERA = enum.auto() + MOTION_SENSORS_REQUIRED = enum.auto() + MODULE_NOT_COMPATIBLE_WITH_CUDA_VERSION = enum.auto() + DRIVER_FAILURE = enum.auto() + LAST = enum.auto() + def __str__(self) -> None: + pass + + def __repr__(self) -> None: + pass + + def __lt__(self, other) -> None: + pass + + def __le__(self, other) -> None: + pass + + def __gt__(self, other) -> None: + pass + + def __ge__(self, other) -> None: + pass + + +def _initialize_error_codes() -> None: + """ + Lists error codes in the ZED SDK. + + | Enumerator | | + |:---:|:---:| + | POTENTIAL_CALIBRATION_ISSUE | The camera has a potential calibration issue. | + | CONFIGURATION_FALLBACK | The operation could not proceed with the target configuration but did success with a fallback. | + | SENSORS_DATA_REQUIRED | The input data does not contains the high frequency sensors data, this is usually because it requires newer SVO/Streaming. In order to work this modules needs inertial data present in it input. | + | CORRUPTED_FRAME | The image could be corrupted, Enabled with the parameter InitParameters.enable_image_validity_check. + | CAMERA_REBOOTING | The camera is currently rebooting. | + | SUCCESS | Standard code for successful behavior. | + | FAILURE | Standard code for unsuccessful behavior. | + | NO_GPU_COMPATIBLE | No GPU found or CUDA capability of the device is not supported. | + | NOT_ENOUGH_GPU_MEMORY | Not enough GPU memory for this depth mode. Try a different mode (such as PERFORMANCE), or increase the minimum depth value (see InitParameters.depth_minimum_distance). | + | CAMERA_NOT_DETECTED | No camera was detected. | + | SENSORS_NOT_INITIALIZED | The MCU that controls the sensors module has an invalid serial number. You can try to recover it by launching the **ZED Diagnostic** tool from the command line with the option ``-r``. | + | SENSORS_NOT_AVAILABLE | A camera with sensor is detected but the sensors (IMU, barometer, ...) cannot be opened. Only the MODEL.ZED does not has sensors. Unplug/replug is required. | + | INVALID_RESOLUTION | In case of invalid resolution parameter, such as an upsize beyond the original image size in Camera.retrieve_image. | + | LOW_USB_BANDWIDTH | Insufficient bandwidth for the correct use of the camera. This issue can occur when you use multiple cameras or a USB 2.0 port. | + | CALIBRATION_FILE_NOT_AVAILABLE | The calibration file of the camera is not found on the host machine. Use **ZED Explorer** or **ZED Calibration** to download the factory calibration file. | + | INVALID_CALIBRATION_FILE | The calibration file is not valid. Try to download the factory calibration file or recalibrate your camera using **ZED Calibration**. | + | INVALID_SVO_FILE | The provided SVO file is not valid. | + | SVO_RECORDING_ERROR | An error occurred while trying to record an SVO (not enough free storage, invalid file, ...). | + | SVO_UNSUPPORTED_COMPRESSION | An SVO related error, occurs when NVIDIA based compression cannot be loaded. | + | END_OF_SVOFILE_REACHED | SVO end of file has been reached. No frame will be available until the SVO position is reset. | + | INVALID_COORDINATE_SYSTEM | The requested coordinate system is not available. | + | INVALID_FIRMWARE | The firmware of the camera is out of date. Update to the latest version. | + | INVALID_FUNCTION_PARAMETERS | Invalid parameters have been given for the function. | + | CUDA_ERROR | A CUDA error has been detected in the process, in sl.Camera.grab() or sl.Camera.retrieve_xxx() only. Activate verbose in sl.Camera.open() for more info. | + | CAMERA_NOT_INITIALIZED | The ZED SDK is not initialized. Probably a missing call to sl.Camera.open(). | + | NVIDIA_DRIVER_OUT_OF_DATE | Your NVIDIA driver is too old and not compatible with your current CUDA version. | + | INVALID_FUNCTION_CALL | The call of the function is not valid in the current context. Could be a missing call of sl.Camera.open(). | + | CORRUPTED_SDK_INSTALLATION | The ZED SDK was not able to load its dependencies or some assets are missing. Reinstall the ZED SDK or check for missing dependencies (cuDNN, TensorRT). | + | INCOMPATIBLE_SDK_VERSION | The installed ZED SDK is incompatible with the one used to compile the program. | + | INVALID_AREA_FILE | The given area file does not exist. Check the path. | + | INCOMPATIBLE_AREA_FILE | The area file does not contain enough data to be used or the sl.DEPTH_MODE used during the creation of the area file is different from the one currently set. | + | CAMERA_FAILED_TO_SETUP | Failed to open the camera at the proper resolution. Try another resolution or make sure that the UVC driver is properly installed. | + | CAMERA_DETECTION_ISSUE | Your camera can not be opened. Try replugging it to another port or flipping the USB-C connector (if there is one). | + | CANNOT_START_CAMERA_STREAM | Cannot start the camera stream. Make sure your camera is not already used by another process or blocked by firewall or antivirus. | + | NO_GPU_DETECTED | No GPU found. CUDA is unable to list it. Can be a driver/reboot issue. | + | PLANE_NOT_FOUND | Plane not found. Either no plane is detected in the scene, at the location or corresponding to the floor, or the floor plane doesn't match the prior given. | + | MODULE_NOT_COMPATIBLE_WITH_CAMERA | The module you try to use is not compatible with your camera sl.MODEL. Note: sl.MODEL.ZED does not has an IMU and does not support the AI modules. | + | MOTION_SENSORS_REQUIRED | The module needs the sensors to be enabled (see InitParameters.sensors_required). | + | MODULE_NOT_COMPATIBLE_WITH_CUDA_VERSION | The module needs a newer version of CUDA. | + """ + pass + +class MODEL(enum.Enum): + """ + Lists ZED camera model. + + + | Enumerator | | + |:---:|:---:| + | ZED | ZED camera model | + | ZED_M | ZED Mini (ZED M) camera model | + | ZED2 | ZED 2 camera model | + | ZED2i | ZED 2i camera model | + | ZED_X | ZED X camera model | + | ZED_XM | ZED X Mini (ZED XM) camera model | + | ZED_X_HDR | ZED X HDR camera model | + | ZED_X_HDR_MINI | ZED X HDR Mini camera model | + | ZED_X_HDR_MAX | ZED X HDR Wide camera model | + | VIRTUAL_ZED_X | Virtual ZED X generated from 2 ZED X One | + | ZED_XONE_GS | ZED X One with global shutter AR0234 sensor | + | ZED_XONE_UHD | ZED X One with 4K rolling shutter IMX678 sensor | + | ZED_XONE_HDR | ZED X One HDR | + """ + ZED = enum.auto() + ZED_M = enum.auto() + ZED2 = enum.auto() + ZED2i = enum.auto() + ZED_X = enum.auto() + ZED_XM = enum.auto() + ZED_X_HDR = enum.auto() + ZED_X_HDR_MINI = enum.auto() + ZED_X_HDR_MAX = enum.auto() + VIRTUAL_ZED_X = enum.auto() + ZED_XONE_GS = enum.auto() + ZED_XONE_UHD = enum.auto() + ZED_XONE_HDR = enum.auto() + LAST = enum.auto() + def __str__(self) -> None: + pass + + def __repr__(self) -> None: + pass + + def __lt__(self, other) -> None: + pass + + def __le__(self, other) -> None: + pass + + def __gt__(self, other) -> None: + pass + + def __ge__(self, other) -> None: + pass + + +class INPUT_TYPE(enum.Enum): + """ + Lists available input types in the ZED SDK. + + + | Enumerator | | + |:---:|:---:| + | USB | USB input mode | + | SVO | SVO file input mode | + | STREAM | STREAM input mode (requires to use Camera.enable_streaming "enable_streaming()" / Camera.disable_streaming "disable_streaming()" on the "sender" side) | + | GMSL | GMSL input mode (only on NVIDIA Jetson) | + """ + USB = enum.auto() + SVO = enum.auto() + STREAM = enum.auto() + GMSL = enum.auto() + LAST = enum.auto() + def __lt__(self, other) -> None: + pass + + def __le__(self, other) -> None: + pass + + def __gt__(self, other) -> None: + pass + + def __ge__(self, other) -> None: + pass + + +class AI_MODELS(enum.Enum): + """ + Lists available AI models. + + | Enumerator | | + |:---:|:---:| + | MULTI_CLASS_DETECTION | Related to sl.OBJECT_DETECTION_MODEL.MULTI_CLASS_BOX_FAST | + | MULTI_CLASS_MEDIUM_DETECTION | Related to sl.OBJECT_DETECTION_MODEL.MULTI_CLASS_BOX_MEDIUM | + | MULTI_CLASS_ACCURATE_DETECTION | Related to sl.OBJECT_DETECTION_MODEL.MULTI_CLASS_BOX_ACCURATE | + | HUMAN_BODY_FAST_DETECTION | Related to sl.BODY_TRACKING_MODEL.HUMAN_BODY_FAST | + | HUMAN_BODY_MEDIUM_DETECTION | Related to sl.BODY_TRACKING_MODEL.HUMAN_BODY_MEDIUM | + | HUMAN_BODY_ACCURATE_DETECTION | Related to sl.BODY_TRACKING_MODEL.HUMAN_BODY_ACCURATE | + | HUMAN_BODY_38_FAST_DETECTION | Related to sl.BODY_TRACKING_MODEL.HUMAN_BODY_FAST | + | HUMAN_BODY_38_MEDIUM_DETECTION | Related to sl.BODY_TRACKING_MODEL.HUMAN_BODY_FAST | + | HUMAN_BODY_38_ACCURATE_DETECTION | Related to sl.BODY_TRACKING_MODEL.HUMAN_BODY_FAST | + | PERSON_HEAD_DETECTION | Related to sl.OBJECT_DETECTION_MODEL.PERSON_HEAD_BOX_FAST | + | PERSON_HEAD_ACCURATE_DETECTION | Related to sl.OBJECT_DETECTION_MODEL.PERSON_HEAD_BOX_ACCURATE | + | REID_ASSOCIATION | Related to sl.BatchParameters.enable | + | NEURAL_LIGHT_DEPTH | Related to sl.DEPTH_MODE.NEURAL_LIGHT_DEPTH | + | NEURAL_DEPTH | Related to sl.DEPTH_MODE.NEURAL | + | NEURAL_PLUS_DEPTH | Related to sl.DEPTH_MODE.NEURAL_PLUS_DEPTH | + """ + MULTI_CLASS_DETECTION = enum.auto() + MULTI_CLASS_MEDIUM_DETECTION = enum.auto() + MULTI_CLASS_ACCURATE_DETECTION = enum.auto() + HUMAN_BODY_FAST_DETECTION = enum.auto() + HUMAN_BODY_MEDIUM_DETECTION = enum.auto() + HUMAN_BODY_ACCURATE_DETECTION = enum.auto() + HUMAN_BODY_38_FAST_DETECTION = enum.auto() + HUMAN_BODY_38_MEDIUM_DETECTION = enum.auto() + HUMAN_BODY_38_ACCURATE_DETECTION = enum.auto() + PERSON_HEAD_DETECTION = enum.auto() + PERSON_HEAD_ACCURATE_DETECTION = enum.auto() + REID_ASSOCIATION = enum.auto() + NEURAL_LIGHT_DEPTH = enum.auto() + NEURAL_DEPTH = enum.auto() + NEURAL_PLUS_DEPTH = enum.auto() + LAST = enum.auto() + def __lt__(self, other) -> None: + pass + + def __le__(self, other) -> None: + pass + + def __gt__(self, other) -> None: + pass + + def __ge__(self, other) -> None: + pass + + +class OBJECT_DETECTION_MODEL(enum.Enum): + """ + Lists available models for the object detection module. + + + | Enumerator | | + |:---:|:---:| + | MULTI_CLASS_BOX_FAST | Any objects, bounding box based. | + | MULTI_CLASS_BOX_ACCURATE | Any objects, bounding box based, more accurate but slower than the base model. | + | MULTI_CLASS_BOX_MEDIUM | Any objects, bounding box based, compromise between accuracy and speed. | + | PERSON_HEAD_BOX_FAST | Bounding box detector specialized in person heads particularly well suited for crowded environments. The person localization is also improved. | + | PERSON_HEAD_BOX_ACCURATE | Bounding box detector specialized in person heads, particularly well suited for crowded environments. The person localization is also improved, more accurate but slower than the base model. | + | CUSTOM_BOX_OBJECTS | For external inference, using your own custom model and/or frameworks. This mode disables the internal inference engine, the 2D bounding box detection must be provided. | + """ + MULTI_CLASS_BOX_FAST = enum.auto() + MULTI_CLASS_BOX_MEDIUM = enum.auto() + MULTI_CLASS_BOX_ACCURATE = enum.auto() + PERSON_HEAD_BOX_FAST = enum.auto() + PERSON_HEAD_BOX_ACCURATE = enum.auto() + CUSTOM_BOX_OBJECTS = enum.auto() + CUSTOM_YOLOLIKE_BOX_OBJECTS = enum.auto() + LAST = enum.auto() + def __lt__(self, other) -> None: + pass + + def __le__(self, other) -> None: + pass + + def __gt__(self, other) -> None: + pass + + def __ge__(self, other) -> None: + pass + + +class BODY_TRACKING_MODEL(enum.Enum): + """ + Lists available models for the body tracking module. + + + | Enumerator | | + |:---:|:---:| + | HUMAN_BODY_FAST | Keypoints based, specific to human skeleton, real time performance even on Jetson or low end GPU cards. | + | HUMAN_BODY_ACCURATE | Keypoints based, specific to human skeleton, state of the art accuracy, requires powerful GPU. | + | HUMAN_BODY_MEDIUM | Keypoints based, specific to human skeleton, compromise between accuracy and speed. | + """ + HUMAN_BODY_FAST = enum.auto() + HUMAN_BODY_ACCURATE = enum.auto() + HUMAN_BODY_MEDIUM = enum.auto() + LAST = enum.auto() + def __lt__(self, other) -> None: + pass + + def __le__(self, other) -> None: + pass + + def __gt__(self, other) -> None: + pass + + def __ge__(self, other) -> None: + pass + + +class OBJECT_FILTERING_MODE(enum.Enum): + """ + Lists supported bounding box preprocessing. + + + | Enumerator | | + |:---:|:---:| + | NONE | The ZED SDK will not apply any preprocessing to the detected objects. | + | NMS3D | The ZED SDK will remove objects that are in the same 3D position as an already tracked object (independent of class id). | + | NMS3D_PER_CLASS | The ZED SDK will remove objects that are in the same 3D position as an already tracked object of the same class id. | + """ + NONE = enum.auto() + NMS3D = enum.auto() + NMS3D_PER_CLASS = enum.auto() + LAST = enum.auto() + def __lt__(self, other) -> None: + pass + + def __le__(self, other) -> None: + pass + + def __gt__(self, other) -> None: + pass + + def __ge__(self, other) -> None: + pass + + +class OBJECT_ACCELERATION_PRESET(enum.Enum): + """ + Lists supported presets for maximum acceleration allowed for a given tracked object. + + + | Enumerator | | + |:---:|:---:| + | DEFAULT | The ZED SDK will automatically determine the appropriate maximum acceleration. | + | LOW | Suitable for objects with relatively low maximum acceleration (e.g., a person walking). | + | MEDIUM | Suitable for objects with moderate maximum acceleration (e.g., a person running). | + | HIGH | Suitable for objects with high maximum acceleration (e.g., a car accelerating, a kicked sports ball). | + """ + DEFAULT = enum.auto() + LOW = enum.auto() + MEDIUM = enum.auto() + HIGH = enum.auto() + LAST = enum.auto() + def __lt__(self, other) -> None: + pass + + def __le__(self, other) -> None: + pass + + def __gt__(self, other) -> None: + pass + + def __ge__(self, other) -> None: + pass + + +class CAMERA_STATE(enum.Enum): + """ + Lists possible camera states. + + + | Enumerator | | + |:---:|:---:| + | AVAILABLE | The camera can be opened by the ZED SDK. | + | NOT_AVAILABLE | The camera is already opened and unavailable. | + """ + AVAILABLE = enum.auto() + NOT_AVAILABLE = enum.auto() + LAST = enum.auto() + def __str__(self) -> None: + pass + + def __repr__(self) -> None: + pass + + def __lt__(self, other) -> None: + pass + + def __le__(self, other) -> None: + pass + + def __gt__(self, other) -> None: + pass + + def __ge__(self, other) -> None: + pass + + +class SIDE(enum.Enum): + """ + Lists possible sides on which to get data from. + + | Enumerator | | + |:---:|:---:| + | LEFT | Left side only. | + | RIGHT | Right side only. | + | BOTH | Left and right side. | + """ + LEFT = enum.auto() + RIGHT = enum.auto() + BOTH = enum.auto() + def __lt__(self, other) -> None: + pass + + def __le__(self, other) -> None: + pass + + def __gt__(self, other) -> None: + pass + + def __ge__(self, other) -> None: + pass + + +class RESOLUTION(enum.Enum): + """ + Lists available resolutions. + .. note:: + The VGA resolution does not respect the 640*480 standard to better fit the camera sensor (672*376 is used). + + .. warning:: All resolutions are not available for every camera. + .. warning:: You can find the available resolutions for each camera in `our documentation `_. + + | Enumerator | | + |:---:|:---:| + | HD4K | 3856x2180 for imx678 mono | + | QHDPLUS | 3800x1800 | + | HD2K | 2208*1242 (x2) Available FPS: 15 | + | HD1080 | 1920*1080 (x2) Available FPS: 15, 30 | + | HD1200 | 1920*1200 (x2) Available FPS: 15, 30, 60 | + | HD1536 | 1920*1536 (x2) Available FPS: 30 | + | HD720 | 1280*720 (x2) Available FPS: 15, 30, 60 | + | SVGA | 960*600 (x2) Available FPS: 15, 30, 60, 120 | + | VGA | 672*376 (x2) Available FPS: 15, 30, 60, 100 | + | AUTO | Select the resolution compatible with the camera: * ZED X/X Mini: HD1200* other cameras: HD720 | + """ + HD4K = enum.auto() + QHDPLUS = enum.auto() + HD2K = enum.auto() + HD1080 = enum.auto() + HD1200 = enum.auto() + HD1536 = enum.auto() + HD720 = enum.auto() + SVGA = enum.auto() + VGA = enum.auto() + AUTO = enum.auto() + LAST = enum.auto() + def __lt__(self, other) -> None: + pass + + def __le__(self, other) -> None: + pass + + def __gt__(self, other) -> None: + pass + + def __ge__(self, other) -> None: + pass + + +def sleep_ms(time: int) -> None: + """ + Blocks the execution of the current thread for **time milliseconds. + :param time: Number of milliseconds to wait. + """ + pass + +def sleep_us(time: int) -> None: + """ + Blocks the execution of the current thread for **time microseconds. + :param time: Number of microseconds to wait. + """ + pass + +def get_resolution(resolution: RESOLUTION) -> Resolution: + """ + Gets the corresponding sl.Resolution from an sl.RESOLUTION. + + :param resolution: The wanted sl.RESOLUTION. + :return: The sl.Resolution corresponding to sl.RESOLUTION given as argument. + """ + return Resolution() + +class DeviceProperties: + """ + Class containing information about the properties of a camera. + + .. note:: + A camera_model sl.MODEL.ZED_M with an id '-1' can be due to an inverted USB-C cable. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def camera_state(self) -> CAMERA_STATE: + """ + State of the camera. + + Default: sl.CAMERA_STATE.NOT_AVAILABLE + """ + return CAMERA_STATE() + + @camera_state.setter + def camera_state(self, camera_state: Any) -> None: + pass + + @property + def id(self) -> int: + """ + Id of the camera. + + Default: -1 + """ + return int() + + @id.setter + def id(self, id: Any) -> None: + pass + + @property + def camera_name(self) -> str: + """ + Name of Camera in DT (ZED_CAM1) + """ + return str() + + @camera_name.setter + def camera_name(self, camera_name: Any) -> None: + pass + + @property + def i2c_port(self) -> int: + """ + i2c port of the camera. + """ + return int() + + @i2c_port.setter + def i2c_port(self, i2c_port: Any) -> None: + pass + + @property + def camera_model(self) -> MODEL: + """ + Model of the camera. + """ + return MODEL() + + @camera_model.setter + def camera_model(self, camera_model: Any) -> None: + pass + + @identifier.setter + def identifier(self, identifier: Any) -> None: + pass + + @property + def camera_sensor_model(self) -> str: + """ + Name of sensor (zedx) + """ + return str() + + @camera_sensor_model.setter + def camera_sensor_model(self, camera_sensor_model: Any) -> None: + pass + + @property + def path(self) -> str: + """ + System path of the camera. + """ + return str() + + @path.setter + def path(self, path: Any) -> None: + pass + + @property + def sensor_address_right(self) -> int: + """ + sensor_address when available (ZED-X HDR/XOne HDR only) + """ + return int() + + @sensor_address_right.setter + def sensor_address_right(self, sensor_address_right: Any) -> None: + pass + + @property + def serial_number(self) -> int: + """ + Serial number of the camera. + + Default: 0 + .. warning:: Not provided for Windows. + """ + return int() + + @serial_number.setter + def serial_number(self, serial_number: Any) -> None: + pass + + @property + def sensor_address_left(self) -> int: + """ + sensor_address when available (ZED-X HDR/XOne HDR only) + """ + return int() + + @sensor_address_left.setter + def sensor_address_left(self, sensor_address_left: Any) -> None: + pass + + @property + def camera_badge(self) -> str: + """ + Badge name (zedx_ar0234) + """ + return str() + + @camera_badge.setter + def camera_badge(self, camera_badge: Any) -> None: + pass + + @property + def input_type(self) -> INPUT_TYPE: + """ + Input type of the camera. + """ + return INPUT_TYPE() + + @input_type.setter + def input_type(self, input_type: Any) -> None: + pass + + def identifier(self) -> np.numpy[np.uint8]: + """ + sensor_address when available (ZED-X HDR/XOne HDR only) + """ + return np.numpy[np.uint8]() + + def __str__(self) -> None: + pass + + def __repr__(self) -> None: + pass + + +class Matrix3f: + """ + Class representing a generic 3*3 matrix. + + It is defined in a row-major order, it means that, in the value buffer, the entire first row is stored first, followed by the entire second row, and so on. + \n The data value of the matrix can be accessed with the r() method. + .. code-block:: text + + | r00 r01 r02 | + | r10 r11 r12 | + | r20 r21 r22 | + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def matrix_name(self) -> str: + """ + Name of the matrix (optional). + """ + return str() + + @matrix_name.setter + def matrix_name(self, matrix_name: Any) -> None: + pass + + @r.setter + def r(self, r: Any) -> None: + pass + + @property + def nbElem(self) -> int: + return int() + + def _initialize_from_input(self, input_data) -> None: + pass + + def __dealloc__(self) -> None: + pass + + def init_matrix(self, matrix) -> None: + """ + Copy the values from another sl.Matrix3f. + :param matrix: sl.Matrix3f to copy. + """ + pass + + def inverse(self) -> None: + """ + Sets the sl.Matrix3f to its inverse. + """ + pass + + def inverse_mat(self, rotation) -> Matrix3f: + """ + Returns the inverse of a sl.Matrix3f. + :param rotation: sl.Matrix3f to compute the inverse from. + :return: The inverse of the sl.Matrix3f given as input. + """ + return Matrix3f() + + def transpose(self) -> None: + """ + Sets the sl.Matrix3f to its transpose. + """ + pass + + def transpose_mat(self, rotation) -> Matrix3f: + """ + Returns the transpose of a sl.Matrix3f. + :param rotation: sl.Matrix3f to compute the transpose from. + :return: The transpose of the sl.Matrix3f given as input. + """ + return Matrix3f() + + def set_identity(self) -> Matrix3f: + """ + Sets the sl.Matrix3f to identity. + :return: itself + """ + return Matrix3f() + + def identity(self) -> Matrix3f: + """ + Creates an identity sl.Matrix3f. + :return: A sl.Matrix3f set to identity. + """ + return Matrix3f() + + def set_zeros(self) -> None: + """ + Sets the sl.Matrix3f to zero. + """ + pass + + def zeros(self) -> Matrix3f: + """ + Creates a sl.Matrix3f filled with zeros. + :return: A sl.Matrix3f filled with zeros. + """ + return Matrix3f() + + def get_infos(self) -> str: + """ + Returns the components of the sl.Matrix3f in a string. + :return: A string containing the components of the current sl.Matrix3f. + """ + return str() + + def r(self) -> np.numpy[float][float]: + """ + 3*3 numpy array of inner data. + """ + return np.numpy[float][float]() + + def __mul__(self, other) -> None: + pass + + def __richcmp__(left, right, op) -> None: + pass + + def __getitem__(self, key) -> None: + pass + + def __setitem__(self, key, value) -> None: + pass + + def __repr__(self) -> None: + pass + + +class Matrix4f: + """ + Class representing a generic 4*4 matrix. + + It is defined in a row-major order, it means that, in the value buffer, the entire first row is stored first, followed by the entire second row, and so on. + \n The data value of the matrix can be accessed with the r() method. + .. code-block:: text + + | r00 r01 r02 tx | + | r10 r11 r12 ty | + | r20 r21 r22 tz | + | m30 m31 m32 m33 | + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def matrix_name(self) -> str: + """ + Returns the name of the matrix (optional). + """ + return str() + + @matrix_name.setter + def matrix_name(self, matrix_name: Any) -> None: + pass + + @m.setter + def m(self, m: Any) -> None: + pass + + def _initialize_from_input(self, input_data) -> None: + pass + + def __dealloc__(self) -> None: + pass + + def init_matrix(self, matrix: Matrix4f) -> None: + """ + Copy the values from another sl.Matrix4f. + :param matrix: sl.Matrix4f to copy. + """ + pass + + def inverse(self) -> ERROR_CODE: + """ + Sets the sl.Matrix4f to its inverse. + :return: sl.ERROR_CODE.SUCCESS if the inverse has been computed, sl.ERROR_CODE.FAILURE is not (det = 0). + """ + return ERROR_CODE() + + def inverse_mat(self, rotation: Matrix4f) -> Matrix4f: + """ + Returns the inverse of a sl.Matrix4f. + :param rotation: sl.Matrix4f to compute the inverse from. + :return: The inverse of the sl.Matrix4f given as input. + """ + return Matrix4f() + + def transpose(self) -> None: + """ + Sets the sl.Matrix4f to its transpose. + """ + pass + + def transpose_mat(self, rotation: Matrix4f) -> Matrix4f: + """ + Returns the transpose of a sl.Matrix4f. + :param rotation: sl.Matrix4f to compute the transpose from. + :return: The transpose of the sl.Matrix4f given as input. + """ + return Matrix4f() + + def set_identity(self) -> Matrix4f: + """ + Sets the sl.Matrix4f to identity. + :return: itself + """ + return Matrix4f() + + def identity(self) -> Matrix4f: + """ + Creates an identity sl.Matrix4f. + :return: A sl.Matrix3f set to identity. + """ + return Matrix4f() + + def set_zeros(self) -> None: + """ + Sets the sl.Matrix4f to zero. + """ + pass + + def zeros(self) -> Matrix4f: + """ + Creates a sl.Matrix4f filled with zeros. + :return: A sl.Matrix4f filled with zeros. + """ + return Matrix4f() + + def get_infos(self) -> str: + """ + Returns the components of the sl.Matrix4f in a string. + :return: A string containing the components of the current sl.Matrix4f. + """ + return str() + + def set_sub_matrix3f(self, input: Matrix3f, row = 0, column = 0) -> ERROR_CODE: + """ + Sets a sl.Matrix3f inside the sl.Matrix4f. + .. note:: + Can be used to set the rotation matrix when the sl.Matrix4f is a pose or an isometric matrix. + + :param input: Sub-matrix to put inside the sl.Matrix4f. + :param row: Index of the row to start the 3x3 block. Must be 0 or 1. + :param column: Index of the column to start the 3x3 block. Must be 0 or 1. + + :return: sl.ERROR_CODE.SUCCESS if everything went well, sl.ERROR_CODE.FAILURE otherwise. + """ + return ERROR_CODE() + + def set_sub_vector3f(self, input0: float, input1: float, input2: float, column = 3) -> ERROR_CODE: + """ + Sets a 3x1 Vector inside the sl.Matrix4f at the specified column index. + .. note:: + Can be used to set the translation/position matrix when the sl.Matrix4f is a pose or an isometry. + + :param input0: First value of the 3x1 Vector to put inside the sl.Matrix4f. + :param input1: Second value of the 3x1 Vector to put inside the sl.Matrix4f. + :param input2: Third value of the 3x1 Vector to put inside the sl.Matrix4f. + :param column: Index of the column to start the 3x3 block. By default, it is the last column (translation for a sl.Pose). + :return: sl.ERROR_CODE.SUCCESS if everything went well, sl.ERROR_CODE.FAILURE otherwise. + """ + return ERROR_CODE() + + def set_sub_vector4f(self, input0: float, input1: float, input2: float, input3: float, column = 3) -> ERROR_CODE: + """ + Sets a 4x1 Vector inside the sl.Matrix4f at the specified column index. + :param input0: First value of the 4x1 Vector to put inside the sl.Matrix4f. + :param input1: Second value of the 4x1 Vector to put inside the sl.Matrix4f. + :param input2: Third value of the 4x1 Vector to put inside the sl.Matrix4f. + :param input3: Fourth value of the 4x1 Vector to put inside the sl.Matrix4f. + :param column: Index of the column to start the 3x3 block. By default, it is the last column (translation for a sl.Pose). + :return: sl.ERROR_CODE.SUCCESS if everything went well, sl.ERROR_CODE.FAILURE otherwise. + """ + return ERROR_CODE() + + def m(self) -> np.numpy[float][float]: + """ + 4*4 numpy array of inner data. + """ + return np.numpy[float][float]() + + def __mul__(self, other) -> None: + pass + + def __richcmp__(left, right, op) -> None: + pass + + def __getitem__(self, key) -> None: + pass + + def __setitem__(self, key, value) -> None: + pass + + def __repr__(self) -> None: + pass + + +class VIDEO_SETTINGS(enum.Enum): + """ + Lists available camera settings for the camera (contrast, hue, saturation, gain, ...). + + .. warning:: All VIDEO_SETTINGS are not supported for all camera models. You can find the supported VIDEO_SETTINGS for each ZED camera in our `documentation `_.\n\n + GAIN and EXPOSURE are linked in auto/default mode (see sl.Camera.set_camera_settings()). + + | Enumerator | | + |:---:|:---:| + | BRIGHTNESS | Brightness control Affected value should be between 0 and 8. Note: Not available for ZED X/X Mini cameras. | + | CONTRAST | Contrast control Affected value should be between 0 and 8. Note: Not available for ZED X/X Mini cameras. | + | HUE | Hue control Affected value should be between 0 and 11. Note: Not available for ZED X/X Mini cameras. | + | SATURATION | Saturation control Affected value should be between 0 and 8. | + | SHARPNESS | Digital sharpening control Affected value should be between 0 and 8. | + | GAMMA | ISP gamma control Affected value should be between 1 and 9. | + | GAIN | Gain control Affected value should be between 0 and 100 for manual control. Note: If EXPOSURE is set to -1 (automatic mode), then GAIN will be automatic as well. | + | EXPOSURE | Exposure control Affected value should be between 0 and 100 for manual control. The exposition is mapped linearly in a percentage of the following max values. Special case for ``EXPOSURE = 0`` that corresponds to 0.17072ms. The conversion to milliseconds depends on the framerate: * 15fps & ``EXPOSURE = 100`` -> 19.97ms* 30fps & ``EXPOSURE = 100`` -> 19.97ms* 60fps & ``EXPOSURE = 100`` -> 10.84072ms* 100fps & ``EXPOSURE = 100`` -> 10.106624ms | + | AEC_AGC | Defines if the GAIN and EXPOSURE are in automatic mode or not. Setting GAIN or EXPOSURE values will automatically set this value to 0. | + | AEC_AGC_ROI | Defines the region of interest for automatic exposure/gain computation. To be used with the dedicated Camera.set_camera_settings_roi "set_camera_settings_roi()" / Camera.get_camera_settings_roi "get_camera_settings_roi()" methods. | + | WHITEBALANCE_TEMPERATURE | Color temperature control Affected value should be between 2800 and 6500 with a step of 100.Note: Setting a value will automatically set WHITEBALANCE_AUTO to 0. | + | WHITEBALANCE_AUTO | Defines if the white balance is in automatic mode or not. | + | LED_STATUS | Status of the front LED of the camera. Set to 0 to disable the light, 1 to enable the light. Default value is on. Note: Requires camera firmware 1523 at least. | + | EXPOSURE_TIME | Real exposure time control in microseconds. Note: Only available for ZED X/X Mini cameras.Note: Replace EXPOSURE setting. | + | ANALOG_GAIN | Real analog gain (sensor) control in mDB. The range is defined by Jetson DTS and by default [1000-16000]. Note: Only available for ZED X/X Mini cameras.Note: Replace GAIN settings. | + | DIGITAL_GAIN | Real digital gain (ISP) as a factor. The range is defined by Jetson DTS and by default [1-256]. Note: Only available for ZED X/X Mini cameras.Note: Replace GAIN settings. | + | AUTO_EXPOSURE_TIME_RANGE | Range of exposure auto control in micro seconds. Used with Camera.set_camera_settings_range "set_camera_settings_range()". Min/max range between max range defined in DTS. By default: [28000 - or 19000] us. Note: Only available for ZED X/X Mini cameras. | + | AUTO_ANALOG_GAIN_RANGE | Range of sensor gain in automatic control. Used with Camera.set_camera_settings_range "set_camera_settings_range()". Min/max range between max range defined in DTS. By default: [1000 - 16000] mdB. Note: Only available for ZED X/X Mini cameras. | + | AUTO_DIGITAL_GAIN_RANGE | Range of digital ISP gain in automatic control. Used with Camera.set_camera_settings_range "set_camera_settings_range()". Min/max range between max range defined in DTS. By default: [1 - 256]. Note: Only available for ZED X/X Mini cameras. | + | EXPOSURE_COMPENSATION | Exposure-target compensation made after auto exposure. Reduces the overall illumination target by factor of F-stops. Affected value should be between 0 and 100 (mapped between [-2.0,2.0]). Default value is 50, i.e. no compensation applied. Note: Only available for ZED X/X Mini cameras. | + | DENOISING | Level of denoising applied on both left and right images. Affected value should be between 0 and 100. Default value is 50. Note: Only available for ZED X/X Mini cameras. | + """ + BRIGHTNESS = enum.auto() + CONTRAST = enum.auto() + HUE = enum.auto() + SATURATION = enum.auto() + SHARPNESS = enum.auto() + GAMMA = enum.auto() + GAIN = enum.auto() + EXPOSURE = enum.auto() + AEC_AGC = enum.auto() + AEC_AGC_ROI = enum.auto() + WHITEBALANCE_TEMPERATURE = enum.auto() + WHITEBALANCE_AUTO = enum.auto() + LED_STATUS = enum.auto() + EXPOSURE_TIME = enum.auto() + ANALOG_GAIN = enum.auto() + DIGITAL_GAIN = enum.auto() + AUTO_EXPOSURE_TIME_RANGE = enum.auto() + AUTO_ANALOG_GAIN_RANGE = enum.auto() + AUTO_DIGITAL_GAIN_RANGE = enum.auto() + EXPOSURE_COMPENSATION = enum.auto() + DENOISING = enum.auto() + LAST = enum.auto() + def __lt__(self, other) -> None: + pass + + def __le__(self, other) -> None: + pass + + def __gt__(self, other) -> None: + pass + + def __ge__(self, other) -> None: + pass + + +class DEPTH_MODE(enum.Enum): + """ + Lists available depth computation modes. + + | Enumerator | | + |:---:|:---:| + | NONE | No depth map computation. Only rectified stereo images will be available. | + | PERFORMANCE | Computation mode optimized for speed. | + | QUALITY | Computation mode designed for challenging areas with untextured surfaces. | + | ULTRA | Computation mode that favors edges and sharpness. Requires more GPU memory and computation power. | + | NEURAL_LIGHT | End to End Neural disparity estimation. Requires AI module. | + | NEURAL | End to End Neural disparity estimation. Requires AI module. | + | NEURAL_PLUS | End to End Neural disparity estimation. More precise but requires more GPU memory and computation power. Requires AI module. | + """ + NONE = enum.auto() + PERFORMANCE = enum.auto() + QUALITY = enum.auto() + ULTRA = enum.auto() + NEURAL_LIGHT = enum.auto() + NEURAL = enum.auto() + NEURAL_PLUS = enum.auto() + LAST = enum.auto() + def __lt__(self, other) -> None: + pass + + def __le__(self, other) -> None: + pass + + def __gt__(self, other) -> None: + pass + + def __ge__(self, other) -> None: + pass + + +class UNIT(enum.Enum): + """ + Lists available units for measures. + + | Enumerator | | + |:---:|:---:| + | MILLIMETER | International System (1/1000 meters) | + | CENTIMETER | International System (1/100 meters) | + | METER | International System (1 meter) | + | INCH | Imperial Unit (1/12 feet) | + | FOOT | Imperial Unit (1 foot) | + """ + MILLIMETER = enum.auto() + CENTIMETER = enum.auto() + METER = enum.auto() + INCH = enum.auto() + FOOT = enum.auto() + LAST = enum.auto() + def __lt__(self, other) -> None: + pass + + def __le__(self, other) -> None: + pass + + def __gt__(self, other) -> None: + pass + + def __ge__(self, other) -> None: + pass + + +class COORDINATE_SYSTEM(enum.Enum): + """ + Lists available coordinates systems for positional tracking and 3D measures. + + | Enumerator | | + |:---:|:---:| + | IMAGE | Standard coordinates system in computer vision. Used in OpenCV: see `here `_. | + | LEFT_HANDED_Y_UP | Left-handed with Y up and Z forward. Used in Unity with DirectX. | + | RIGHT_HANDED_Y_UP | Right-handed with Y pointing up and Z backward. Used in OpenGL. | + | RIGHT_HANDED_Z_UP | Right-handed with Z pointing up and Y forward. Used in 3DSMax. | + | LEFT_HANDED_Z_UP | Left-handed with Z axis pointing up and X forward. Used in Unreal Engine. | + | RIGHT_HANDED_Z_UP_X_FWD | Right-handed with Z pointing up and X forward. Used in ROS (REP 103). | + """ + IMAGE = enum.auto() + LEFT_HANDED_Y_UP = enum.auto() + RIGHT_HANDED_Y_UP = enum.auto() + RIGHT_HANDED_Z_UP = enum.auto() + LEFT_HANDED_Z_UP = enum.auto() + RIGHT_HANDED_Z_UP_X_FWD = enum.auto() + LAST = enum.auto() + def __str__(self) -> None: + pass + + def __repr__(self) -> None: + pass + + def __lt__(self, other) -> None: + pass + + def __le__(self, other) -> None: + pass + + def __gt__(self, other) -> None: + pass + + def __ge__(self, other) -> None: + pass + + +class MEASURE(enum.Enum): + """ + Lists retrievable measures. + | Enumerator | | + |:---:|:---:| + | DISPARITY | Disparity map. Each pixel contains 1 float. Type: sl.MAT_TYPE.F32_C1 | + | DEPTH | Depth map in sl.UNIT defined in sl.InitParameters.coordinate_units. Each pixel contains 1 float. Type: sl.MAT_TYPE.F32_C1 | + | CONFIDENCE | Certainty/confidence of the depth map. Each pixel contains 1 float. Type: sl.MAT_TYPE.F32_C1 | + | XYZ | Point cloud. Each pixel contains 4 float (X, Y, Z, not used). Type: sl.MAT_TYPE.F32_C4 | + | XYZRGBA | Colored point cloud. Each pixel contains 4 float (X, Y, Z, color). The color should to be read as an unsigned char[4] representing the RGBA color. Type: sl.MAT_TYPE.F32_C4 | + | XYZBGRA | Colored point cloud. Each pixel contains 4 float (X, Y, Z, color). The color should to be read as an unsigned char[4] representing the BGRA color. Type: sl.MAT_TYPE.F32_C4 | + | XYZARGB | Colored point cloud. Each pixel contains 4 float (X, Y, Z, color). The color should to be read as an unsigned char[4] representing the ARGB color. Type: sl.MAT_TYPE.F32_C4 | + | XYZABGR | Colored point cloud. Each pixel contains 4 float (X, Y, Z, color). The color should to be read as an unsigned char[4] representing the ABGR color. Type: sl.MAT_TYPE.F32_C4 | + | NORMALS | Normal vectors map. Each pixel contains 4 float (X, Y, Z, 0). Type: sl.MAT_TYPE.F32_C4 | + | DISPARITY_RIGHT | Disparity map for right sensor. Each pixel contains 1 float. Type: sl.MAT_TYPE.F32_C1 | + | DEPTH_RIGHT | Depth map for right sensor. Each pixel contains 1 float. Type: sl.MAT_TYPE.F32_C1 | + | XYZ_RIGHT | Point cloud for right sensor. Each pixel contains 4 float (X, Y, Z, not used). Type: sl.MAT_TYPE.F32_C4 | + | XYZRGBA_RIGHT | Colored point cloud for right sensor. Each pixel contains 4 float (X, Y, Z, color). The color needs to be read as an unsigned char[4] representing the RGBA color. Type: sl.MAT_TYPE.F32_C4 | + | XYZBGRA_RIGHT | Colored point cloud for right sensor. Each pixel contains 4 float (X, Y, Z, color). The color needs to be read as an unsigned char[4] representing the BGRA color. Type: sl.MAT_TYPE.F32_C4 | + | XYZARGB_RIGHT | Colored point cloud for right sensor. Each pixel contains 4 float (X, Y, Z, color). The color needs to be read as an unsigned char[4] representing the ARGB color. Type: sl.MAT_TYPE.F32_C4 | + | XYZABGR_RIGHT | Colored point cloud for right sensor. Each pixel contains 4 float (X, Y, Z, color). The color needs to be read as an unsigned char[4] representing the ABGR color. Type: sl.MAT_TYPE.F32_C4 | + | NORMALS_RIGHT | Normal vectors map for right view. Each pixel contains 4 float (X, Y, Z, 0). Type: sl.MAT_TYPE.F32_C4 | + | DEPTH_U16_MM | Depth map in millimeter whatever the sl.UNIT defined in sl.InitParameters.coordinate_units. Invalid values are set to 0 and depth values are clamped at 65000. Each pixel contains 1 unsigned short. Type: sl.MAT_TYPE.U16_C1 | + | DEPTH_U16_MM_RIGHT | Depth map in millimeter for right sensor. Each pixel contains 1 unsigned short. Type: sl.MAT_TYPE.U16_C1 | + """ + DISPARITY = enum.auto() + DEPTH = enum.auto() + CONFIDENCE = enum.auto() + XYZ = enum.auto() + XYZRGBA = enum.auto() + XYZBGRA = enum.auto() + XYZARGB = enum.auto() + XYZABGR = enum.auto() + NORMALS = enum.auto() + DISPARITY_RIGHT = enum.auto() + DEPTH_RIGHT = enum.auto() + XYZ_RIGHT = enum.auto() + XYZRGBA_RIGHT = enum.auto() + XYZBGRA_RIGHT = enum.auto() + XYZARGB_RIGHT = enum.auto() + XYZABGR_RIGHT = enum.auto() + NORMALS_RIGHT = enum.auto() + DEPTH_U16_MM = enum.auto() + DEPTH_U16_MM_RIGHT = enum.auto() + LAST = enum.auto() + def __str__(self) -> None: + pass + + def __repr__(self) -> None: + pass + + def __lt__(self, other) -> None: + pass + + def __le__(self, other) -> None: + pass + + def __gt__(self, other) -> None: + pass + + def __ge__(self, other) -> None: + pass + + +class VIEW(enum.Enum): + """ + Lists available views. + + | Enumerator | | + |:---:|:---:| + | LEFT | Left BGRA image. Each pixel contains 4 unsigned char (B, G, R, A). Type: sl.MAT_TYPE.U8_C4 | + | RIGHT | Right BGRA image. Each pixel contains 4 unsigned char (B, G, R, A). Type: sl.MAT_TYPE.U8_C4 | + | LEFT_GRAY | Left gray image. Each pixel contains 1 unsigned char. Type: sl.MAT_TYPE.U8_C1| + | RIGHT_GRAY | Right gray image. Each pixel contains 1 unsigned char. Type: sl.MAT_TYPE.U8_C1| + | LEFT_UNRECTIFIED | Left BGRA unrectified image. Each pixel contains 4 unsigned char (B, G, R, A). Type: sl.MAT_TYPE.U8_C4 | + | RIGHT_UNRECTIFIED | Right BGRA unrectified image. Each pixel contains 4 unsigned char (B, G, R, A). Type: sl.MAT_TYPE.U8_C4 | + | LEFT_UNRECTIFIED_GRAY | Left gray unrectified image. Each pixel contains 1 unsigned char. Type: sl.MAT_TYPE.U8_C1 | + | RIGHT_UNRECTIFIED_GRAY | Right gray unrectified image. Each pixel contains 1 unsigned char. Type: sl.MAT_TYPE.U8_C1 | + | SIDE_BY_SIDE | Left and right image (the image width is therefore doubled). Each pixel contains 4 unsigned char (B, G, R, A). Type: sl.MAT_TYPE.U8_C4 | + | DEPTH | Color rendering of the depth. Each pixel contains 4 unsigned char (B, G, R, A). Type: sl.MAT_TYPE.U8_C4 Note: Use sl.MEASURE.DEPTH with sl.Camera.retrieve_measure() to get depth values. | + | CONFIDENCE | Color rendering of the depth confidence. Each pixel contains 4 unsigned char (B, G, R, A). Type: sl.MAT_TYPE.U8_C4 Note: Use sl.MEASURE.CONFIDENCE with sl.Camera.retrieve_measure() to get confidence values. | + | NORMALS | Color rendering of the normals. Each pixel contains 4 unsigned char (B, G, R, A). Type: sl.MAT_TYPE.U8_C4 Note: Use sl.MEASURE.NORMALS with sl.Camera.retrieve_measure() to get normal values. | + | DEPTH_RIGHT | Color rendering of the right depth mapped on right sensor. Each pixel contains 4 unsigned char (B, G, R, A). Type: sl.MAT_TYPE.U8_C4 Note: Use sl.MEASURE.DEPTH_RIGHT with sl.Camera.retrieve_measure() to get depth right values. | + | NORMALS_RIGHT | Color rendering of the normals mapped on right sensor. Each pixel contains 4 unsigned char (B, G, R, A). Type: sl.MAT_TYPE.U8_C4 Note: Use sl.MEASURE.NORMALS_RIGHT with sl.Camera.retrieve_measure() to get normal right values. | + """ + LEFT = enum.auto() + RIGHT = enum.auto() + LEFT_GRAY = enum.auto() + RIGHT_GRAY = enum.auto() + LEFT_UNRECTIFIED = enum.auto() + RIGHT_UNRECTIFIED = enum.auto() + LEFT_UNRECTIFIED_GRAY = enum.auto() + RIGHT_UNRECTIFIED_GRAY = enum.auto() + SIDE_BY_SIDE = enum.auto() + DEPTH = enum.auto() + CONFIDENCE = enum.auto() + NORMALS = enum.auto() + DEPTH_RIGHT = enum.auto() + NORMALS_RIGHT = enum.auto() + LEFT_BGRA = enum.auto() + LEFT_BGR = enum.auto() + RIGHT_BGRA = enum.auto() + RIGHT_BGR = enum.auto() + LEFT_UNRECTIFIED_BGRA = enum.auto() + LEFT_UNRECTIFIED_BGR = enum.auto() + RIGHT_UNRECTIFIED_BGRA = enum.auto() + RIGHT_UNRECTIFIED_BGR = enum.auto() + SIDE_BY_SIDE_BGRA = enum.auto() + SIDE_BY_SIDE_BGR = enum.auto() + SIDE_BY_SIDE_GRAY = enum.auto() + SIDE_BY_SIDE_UNRECTIFIED_BGRA = enum.auto() + SIDE_BY_SIDE_UNRECTIFIED_BGR = enum.auto() + SIDE_BY_SIDE_UNRECTIFIED_GRAY = enum.auto() + DEPTH_BGRA = enum.auto() + DEPTH_BGR = enum.auto() + DEPTH_GRAY = enum.auto() + CONFIDENCE_BGRA = enum.auto() + CONFIDENCE_BGR = enum.auto() + CONFIDENCE_GRAY = enum.auto() + NORMALS_BGRA = enum.auto() + NORMALS_BGR = enum.auto() + NORMALS_GRAY = enum.auto() + DEPTH_RIGHT_BGRA = enum.auto() + DEPTH_RIGHT_BGR = enum.auto() + DEPTH_RIGHT_GRAY = enum.auto() + NORMALS_RIGHT_BGRA = enum.auto() + NORMALS_RIGHT_BGR = enum.auto() + NORMALS_RIGHT_GRAY = enum.auto() + LAST = enum.auto() + def __str__(self) -> None: + pass + + def __repr__(self) -> None: + pass + + def __lt__(self, other) -> None: + pass + + def __le__(self, other) -> None: + pass + + def __gt__(self, other) -> None: + pass + + def __ge__(self, other) -> None: + pass + + +class POSITIONAL_TRACKING_STATE(enum.Enum): + """ + Lists the different states of positional tracking. + + | Enumerator | | + |:---:|:---:| + | SEARCHING | Warning: DEPRECATED: This state is no longer in use. | + | OK | The positional tracking is functioning normally. | + | OFF | The positional tracking is currently disabled. | + | FPS_TOO_LOW | The effective FPS is too low to provide accurate motion tracking results. Consider adjusting performance parameters (e.g., depth mode, camera resolution) to improve tracking quality.| + | SEARCHING_FLOOR_PLANE | The camera is currently searching for the floor plane to establish its position relative to it. The world reference frame will be set afterward. | + | UNAVAILABLE | The tracking module was unable to perform tracking from the previous frame to the current frame. | + """ + SEARCHING = enum.auto() + OK = enum.auto() + OFF = enum.auto() + FPS_TOO_LOW = enum.auto() + SEARCHING_FLOOR_PLANE = enum.auto() + UNAVAILABLE = enum.auto() + LAST = enum.auto() + def __str__(self) -> None: + pass + + def __repr__(self) -> None: + pass + + def __lt__(self, other) -> None: + pass + + def __le__(self, other) -> None: + pass + + def __gt__(self, other) -> None: + pass + + def __ge__(self, other) -> None: + pass + + +class ODOMETRY_STATUS(enum.Enum): + """ + Report the status of current odom tracking. + + | Enumerator | | + |:----------:|:---------------------------| + | OK | The positional tracking module successfully tracked from the previous frame to the current frame. | + | UNAVAILABLE | The positional tracking module cannot track the current frame. | + | INSUFFICIENT_FEATURES | The positional tracking failed to track the current frame because it could not find enought features. | + """ + OK = enum.auto() + UNAVAILABLE = enum.auto() + INSUFFICIENT_FEATURES = enum.auto() + LAST = enum.auto() + def __str__(self) -> None: + pass + + def __repr__(self) -> None: + pass + + def __lt__(self, other) -> None: + pass + + def __le__(self, other) -> None: + pass + + def __gt__(self, other) -> None: + pass + + def __ge__(self, other) -> None: + pass + + +class SPATIAL_MEMORY_STATUS(enum.Enum): + """ + Report the status of current map tracking. + + | Enumerator | | + |:-----------:|:---------------------------| + | OK | The positional tracking module is operating normally. | + | LOOP_CLOSED | The positional tracking module detected a loop and corrected its position. | + | SEARCHING | The positional tracking module is searching for recognizable areas in the global map to relocate. | + | INITIALIZING| Displayed until the cameras has acquired enough memory (Initial Area Mapping) or has found its first loop closure and is localized in the loaded area map (Lifelong Mapping/Localization). Users need to keep moving the camera for it to get updated. | + | MAP_UPDATE | Displayed when the robot is mapping (Initial Area Mapping) or when the robot is getting out of the area map bounds (Lifelong Mapping). Displayed as “Tracking” when in exploratory mode with SLAM engaged. | + | KNOWN_MAP | Displayed when the camera is localized within the loaded area map. | + | LOST | Displayed when localization cannot operate anymore (camera completely obstructed, sudden localization jumps after being localized) in Mapping/ Localization modes. It can also include the case where the camera jumps or is located out of map bounds in Localization mode. This should be an indicator for users to stop the robot. | + | OFF | Displayed when the spatial memory is turned off.| + """ + OK = enum.auto() + LOOP_CLOSED = enum.auto() + SEARCHING = enum.auto() + INITIALIZING = enum.auto() + MAP_UPDATE = enum.auto() + KNOWN_MAP = enum.auto() + LOST = enum.auto() + OFF = enum.auto() + LAST = enum.auto() + def __str__(self) -> None: + pass + + def __repr__(self) -> None: + pass + + def __lt__(self, other) -> None: + pass + + def __le__(self, other) -> None: + pass + + def __gt__(self, other) -> None: + pass + + def __ge__(self, other) -> None: + pass + + +class POSITIONAL_TRACKING_FUSION_STATUS(enum.Enum): + """ + Report the status of the positional tracking fusion. + + | Enumerator | | + |:----------:|:---------------------------| + | VISUAL_INERTIAL | The positional tracking module is fusing visual and inertial data. | + | VISUAL | The positional tracking module is fusing visual data only. | + | INERTIAL | The positional tracking module is fusing inertial data only. | + | GNSS | The positional tracking module is fusing GNSS data only. | + | VISUAL_INERTIAL_GNSS | The positional tracking module is fusing visual, inertial, and GNSS data. | + | VISUAL_GNSS | The positional tracking module is fusing visual and GNSS data. | + | INERTIAL_GNSS | The positional tracking module is fusing inertial and GNSS data. | + | UNAVAILABLE | The positional tracking module is unavailable. | + """ + VISUAL_INERTIAL = enum.auto() + VISUAL = enum.auto() + INERTIAL = enum.auto() + GNSS = enum.auto() + VISUAL_INERTIAL_GNSS = enum.auto() + VISUAL_GNSS = enum.auto() + INERTIAL_GNSS = enum.auto() + UNAVAILABLE = enum.auto() + LAST = enum.auto() + def __str__(self) -> None: + pass + + def __repr__(self) -> None: + pass + + def __lt__(self, other) -> None: + pass + + def __le__(self, other) -> None: + pass + + def __gt__(self, other) -> None: + pass + + def __ge__(self, other) -> None: + pass + + +class GNSS_STATUS(enum.Enum): + """ + Lists that represents the status of the of GNSS signal. + + | Enumerator | | + |:---:|:---:| + | UNKNOWN | No GNSS fix data is available. | + | SINGLE | Single Point Positioning. | + | DGNSS | Differential GNSS. | + | PPS | Precise Positioning Service. | + | RTK_FLOAT | Real Time Kinematic Float. | + | RTK_FIX | Real Time Kinematic Fixed. | + """ + UNKNOWN = enum.auto() + SINGLE = enum.auto() + DGNSS = enum.auto() + PPS = enum.auto() + RTK_FLOAT = enum.auto() + RTK_FIX = enum.auto() + LAST = enum.auto() + def __str__(self) -> None: + pass + + def __repr__(self) -> None: + pass + + def __lt__(self, other) -> None: + pass + + def __le__(self, other) -> None: + pass + + def __gt__(self, other) -> None: + pass + + def __ge__(self, other) -> None: + pass + + +class GNSS_MODE(enum.Enum): + """ + Lists that represents the mode of GNSS signal. + + | Enumerator | | + |:---:|:---:| + | UNKNOWN | No GNSS fix data is available. | + | NO_FIX | No GNSS fix is available. | + | FIX_2D | 2D GNSS fix, providing latitude and longitude coordinates but without altitude information. | + | FIX_3D | 3D GNSS fix, providing latitude, longitude, and altitude coordinates. | + """ + UNKNOWN = enum.auto() + NO_FIX = enum.auto() + FIX_2D = enum.auto() + FIX_3D = enum.auto() + LAST = enum.auto() + def __str__(self) -> None: + pass + + def __repr__(self) -> None: + pass + + def __lt__(self, other) -> None: + pass + + def __le__(self, other) -> None: + pass + + def __gt__(self, other) -> None: + pass + + def __ge__(self, other) -> None: + pass + + +class GNSS_FUSION_STATUS(enum.Enum): + """ + Lists that represents the current GNSS fusion status + + | Enumerator | | + |:---:|:---:| + | OK | The GNSS fusion module is calibrated and working successfully. | + | OFF | The GNSS fusion module is not enabled. | + | CALIBRATION_IN_PROGRESS | Calibration of the GNSS/VIO fusion module is in progress. | + | RECALIBRATION_IN_PROGRESS | Re-alignment of GNSS/VIO data is in progress, leading to potentially inaccurate global position. | + """ + OK = enum.auto() + OFF = enum.auto() + CALIBRATION_IN_PROGRESS = enum.auto() + RECALIBRATION_IN_PROGRESS = enum.auto() + LAST = enum.auto() + def __str__(self) -> None: + pass + + def __repr__(self) -> None: + pass + + def __lt__(self, other) -> None: + pass + + def __le__(self, other) -> None: + pass + + def __gt__(self, other) -> None: + pass + + def __ge__(self, other) -> None: + pass + + +class Landmark: + """ + Represents a 3d landmark. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def position(self) -> list[float]: + """ + The position of the landmark. + """ + return list[float]() + + @position.setter + def position(self, position: Any) -> None: + pass + + @property + def id(self) -> int: + """ + The ID of the landmark. + """ + return int() + + @id.setter + def id(self, id: Any) -> None: + pass + + +class Landmark2D: + """ + Represents the projection of a 3d landmark in the image. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @position.setter + def position(self, position: Any) -> None: + pass + + @property + def id(self) -> int: + """ + Unique identifier of the corresponding landmark. + """ + return int() + + @property + def dynamic_confidence(self) -> float: + """ + Confidence score indicating the likelihood that the landmark is associated with a dynamic object. + + The value ranges from 0 to 1, where a smaller value indicates greater confidence that the landmark + is owned by a dynamic object. + """ + return float() + + def position(self) -> np.array: + """ + The position of the landmark in the image. + """ + return np.array() + + +class PositionalTrackingStatus: + """ + Lists the different status of the positional tracking + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def odometry_status(self) -> ODOMETRY_STATUS: + """ + Represents the current state of Visual-Inertial Odometry (VIO) tracking between the previous frame and the current frame. + """ + return ODOMETRY_STATUS() + + @odometry_status.setter + def odometry_status(self, odometry_status: Any) -> None: + pass + + @property + def tracking_fusion_status(self) -> POSITIONAL_TRACKING_FUSION_STATUS: + """ + Represents the current state of the positional tracking fusion. + """ + return POSITIONAL_TRACKING_FUSION_STATUS() + + @tracking_fusion_status.setter + def tracking_fusion_status(self, tracking_fusion_status: Any) -> None: + pass + + @property + def spatial_memory_status(self) -> SPATIAL_MEMORY_STATUS: + """ + Represents the current state of camera tracking in the global map. + """ + return SPATIAL_MEMORY_STATUS() + + @spatial_memory_status.setter + def spatial_memory_status(self, spatial_memory_status: Any) -> None: + pass + + +class FusedPositionalTrackingStatus: + """ + Lists the different status of the positional tracking + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def odometry_status(self) -> ODOMETRY_STATUS: + return ODOMETRY_STATUS() + + @odometry_status.setter + def odometry_status(self, odometry_status: Any) -> None: + pass + + @property + def tracking_fusion_status(self) -> POSITIONAL_TRACKING_FUSION_STATUS: + return POSITIONAL_TRACKING_FUSION_STATUS() + + @tracking_fusion_status.setter + def tracking_fusion_status(self, tracking_fusion_status: Any) -> None: + pass + + @property + def gnss_mode(self) -> GNSS_MODE: + return GNSS_MODE() + + @gnss_mode.setter + def gnss_mode(self, gnss_mode: Any) -> None: + pass + + @property + def spatial_memory_status(self) -> SPATIAL_MEMORY_STATUS: + return SPATIAL_MEMORY_STATUS() + + @spatial_memory_status.setter + def spatial_memory_status(self, spatial_memory_status: Any) -> None: + pass + + @property + def gnss_status(self) -> GNSS_STATUS: + return GNSS_STATUS() + + @gnss_status.setter + def gnss_status(self, gnss_status: Any) -> None: + pass + + @property + def gnss_fusion_status(self) -> GNSS_FUSION_STATUS: + return GNSS_FUSION_STATUS() + + @gnss_fusion_status.setter + def gnss_fusion_status(self, gnss_fusion_status: Any) -> None: + pass + + +class POSITIONAL_TRACKING_MODE(enum.Enum): + """ + Lists the mode of positional tracking that can be used. + + | Enumerator | | + |:---:|:---:| + | GEN_1 | Default mode. Fast and stable mode. Requires depth computation. Less robust than GEN_3. | + | GEN_2 | Warning: DEPRECATED. | + | GEN_3 | Fast and accurate, in both exploratory mode and mapped environments. Note: Can be used even if depth_mode is set to DEPTH_MODE::NONE. | + """ + GEN_1 = enum.auto() + GEN_2 = enum.auto() + GEN_3 = enum.auto() + def __str__(self) -> None: + pass + + def __repr__(self) -> None: + pass + + def __lt__(self, other) -> None: + pass + + def __le__(self, other) -> None: + pass + + def __gt__(self, other) -> None: + pass + + def __ge__(self, other) -> None: + pass + + +class AREA_EXPORTING_STATE(enum.Enum): + """ + Lists the different states of spatial memory area export. + + | Enumerator | | + |:---:|:---:| + | SUCCESS | The spatial memory file has been successfully created. | + | RUNNING | The spatial memory is currently being written. | + | NOT_STARTED | The spatial memory file exportation has not been called. | + | FILE_EMPTY | The spatial memory contains no data, the file is empty. | + | FILE_ERROR | The spatial memory file has not been written because of a wrong file name. | + | SPATIAL_MEMORY_DISABLED | The spatial memory learning is disabled. No file can be created. | + """ + SUCCESS = enum.auto() + RUNNING = enum.auto() + NOT_STARTED = enum.auto() + FILE_EMPTY = enum.auto() + FILE_ERROR = enum.auto() + SPATIAL_MEMORY_DISABLED = enum.auto() + LAST = enum.auto() + def __str__(self) -> None: + pass + + def __repr__(self) -> None: + pass + + def __lt__(self, other) -> None: + pass + + def __le__(self, other) -> None: + pass + + def __gt__(self, other) -> None: + pass + + def __ge__(self, other) -> None: + pass + + +class REFERENCE_FRAME(enum.Enum): + """ + Lists possible types of position matrix used to store camera path and pose. + + | Enumerator | | + |:---:|:---:| + | WORLD | The transform of sl.Pose will contain the motion with reference to the world frame (previously called sl.PATH). | + | CAMERA | The transform of sl.Pose will contain the motion with reference to the previous camera frame (previously called sl.POSE). | + """ + WORLD = enum.auto() + CAMERA = enum.auto() + LAST = enum.auto() + def __str__(self) -> None: + pass + + def __repr__(self) -> None: + pass + + def __lt__(self, other) -> None: + pass + + def __le__(self, other) -> None: + pass + + def __gt__(self, other) -> None: + pass + + def __ge__(self, other) -> None: + pass + + +class TIME_REFERENCE(enum.Enum): + """ + Lists possible time references for timestamps or data. + + + | Enumerator | | + |:---:|:---:| + | IMAGE | The requested timestamp or data will be at the time of the frame extraction. | + | CURRENT | The requested timestamp or data will be at the time of the function call. | + """ + IMAGE = enum.auto() + CURRENT = enum.auto() + LAST = enum.auto() + def __str__(self) -> None: + pass + + def __repr__(self) -> None: + pass + + def __lt__(self, other) -> None: + pass + + def __le__(self, other) -> None: + pass + + def __gt__(self, other) -> None: + pass + + def __ge__(self, other) -> None: + pass + + +class SPATIAL_MAPPING_STATE(enum.Enum): + """ + Lists the different states of spatial mapping. + + | Enumerator | | + |:---:|:---:| + | INITIALIZING | The spatial mapping is initializing. | + | OK | The depth and tracking data were correctly integrated in the mapping algorithm. | + | NOT_ENOUGH_MEMORY | The maximum memory dedicated to the scanning has been reached. The mesh will no longer be updated. | + | NOT_ENABLED | sl.Camera.enable_spatial_mapping() wasn't called or the scanning was stopped and not relaunched. | + | FPS_TOO_LOW | The effective FPS is too low to give proper results for spatial mapping. Consider using performance parameters (sl.DEPTH_MODE.PERFORMANCE, sl.MAPPING_RESOLUTION.LOW, low camera resolution (RESOLUTION "sl.RESOLUTION.VGA/SVGA" or sl.RESOLUTION.HD720). | + """ + INITIALIZING = enum.auto() + OK = enum.auto() + NOT_ENOUGH_MEMORY = enum.auto() + NOT_ENABLED = enum.auto() + FPS_TOO_LOW = enum.auto() + LAST = enum.auto() + +class REGION_OF_INTEREST_AUTO_DETECTION_STATE(enum.Enum): + """ + Lists the different states of region of interest auto detection. + + | Enumerator | | + |:---:|:---:| + | RUNNING | The region of interest auto detection is initializing. | + | READY | The region of interest mask is ready, if auto_apply was enabled, the region of interest mask is being used | + | NOT_ENABLED | The region of interest auto detection is not enabled | + """ + RUNNING = enum.auto() + READY = enum.auto() + NOT_ENABLED = enum.auto() + LAST = enum.auto() + +class SVO_COMPRESSION_MODE(enum.Enum): + """ + Lists available compression modes for SVO recording. + .. note:: + LOSSLESS is an improvement of previous lossless compression (used in ZED Explorer), even if size may be bigger, compression time is much faster. + + + | Enumerator | | + |:---:|:---:| + | LOSSLESS | PNG/ZSTD (lossless) CPU based compression. Average size: 42% of RAW | + | H264 | H264 (AVCHD) GPU based compression. Average size: 1% of RAW Note: Requires a NVIDIA GPU. | + | H265 | H265 (HEVC) GPU based compression. Average size: 1% of RAW Note: Requires a NVIDIA GPU. | + | H264_LOSSLESS | H264 Lossless GPU/Hardware based compression. Average size: 25% of RAW Provides a SSIM/PSNR result (vs RAW) >= 99.9%. Note: Requires a NVIDIA GPU. | + | H265_LOSSLESS | H265 Lossless GPU/Hardware based compression. Average size: 25% of RAW Provides a SSIM/PSNR result (vs RAW) >= 99.9%. Note: Requires a NVIDIA GPU. | + """ + LOSSLESS = enum.auto() + H264 = enum.auto() + H265 = enum.auto() + H264_LOSSLESS = enum.auto() + H265_LOSSLESS = enum.auto() + LAST = enum.auto() + def __str__(self) -> None: + pass + + def __repr__(self) -> None: + pass + + def __lt__(self, other) -> None: + pass + + def __le__(self, other) -> None: + pass + + def __gt__(self, other) -> None: + pass + + def __ge__(self, other) -> None: + pass + + +class MEM(enum.Enum): + """ + Lists available memory type. + .. note:: + The ZED SDK Python wrapper does not support GPU data storage/access. + + + | Enumerator | | + |:---:|:---:| + | CPU | Data will be stored on the CPU (processor side). | + | GPU | Data will be stored on the GPU | + | BOTH | Data will be stored on both the CPU and GPU memory | + """ + CPU = enum.auto() + GPU = enum.auto() + BOTH = enum.auto() + +class COPY_TYPE(enum.Enum): + """ + Lists available copy operation on sl.Mat. + .. note:: + The ZED SDK Python wrapper does not support GPU data storage/access. + + + | Enumerator | | + |:---:|:---:| + | CPU_CPU | Copy data from CPU to CPU. | + | GPU_CPU | Copy data from GPU to CPU. | + | CPU_GPU | Copy data from CPU to GPU. | + | GPU_GPU | Copy data from GPU to GPU. | + """ + CPU_CPU = enum.auto() + GPU_CPU = enum.auto() + CPU_GPU = enum.auto() + GPU_GPU = enum.auto() + +class MAT_TYPE(enum.Enum): + """ + Lists available sl.Mat formats. + .. note:: + sl.Mat type depends on image or measure type. + + .. note:: + For the dependencies, see sl.VIEW and sl.MEASURE. + + + | Enumerator | | + |:---:|:---:| + | F32_C1 | 1-channel matrix of float | + | F32_C2 | 2-channel matrix of float | + | F32_C3 | 3-channel matrix of float | + | F32_C4 | 4-channel matrix of float | + | U8_C1 | 1-channel matrix of unsigned char | + | U8_C2 | 2-channel matrix of unsigned char | + | U8_C3 | 3-channel matrix of unsigned char | + | U8_C4 | 4-channel matrix of unsigned char | + | U16_C1 | 1-channel matrix of unsigned short | + | S8_C4 | 4-channel matrix of signed char | + """ + F32_C1 = enum.auto() + F32_C2 = enum.auto() + F32_C3 = enum.auto() + F32_C4 = enum.auto() + U8_C1 = enum.auto() + U8_C2 = enum.auto() + U8_C3 = enum.auto() + U8_C4 = enum.auto() + U16_C1 = enum.auto() + S8_C4 = enum.auto() + +class SENSOR_TYPE(enum.Enum): + """ + Lists available sensor types. + .. note:: + Sensors are not available on sl.MODEL.ZED. + + + | Enumerator | | + |:---:|:---:| + | ACCELEROMETER | Three-axis accelerometer sensor to measure the inertial accelerations. | + | GYROSCOPE | Three-axis gyroscope sensor to measure the angular velocities. | + | MAGNETOMETER | Three-axis magnetometer sensor to measure the orientation of the device with respect to the Earth's magnetic field. | + | BAROMETER | Barometer sensor to measure the atmospheric pressure. | + """ + ACCELEROMETER = enum.auto() + GYROSCOPE = enum.auto() + MAGNETOMETER = enum.auto() + BAROMETER = enum.auto() + +class SENSORS_UNIT(enum.Enum): + """ + Lists available measurement units of onboard sensors. + .. note:: + Sensors are not available on sl.MODEL.ZED. + + + | Enumerator | | + |:---:|:---:| + | M_SEC_2 | m/s² (acceleration) | + | DEG_SEC | deg/s (angular velocity) | + | U_T | μT (magnetic field) | + | HPA | hPa (atmospheric pressure) | + | CELSIUS | °C (temperature) | + | HERTZ | Hz (frequency) | + """ + M_SEC_2 = enum.auto() + DEG_SEC = enum.auto() + U_T = enum.auto() + HPA = enum.auto() + CELSIUS = enum.auto() + HERTZ = enum.auto() + +class MODULE(enum.Enum): + """ + Lists available module + + + | MODULE | Description | + |:---:|:---:| + | ALL | All modules | + | DEPTH | For the depth module (includes all 'measures' in retrieveMeasure) | + | POSITIONAL_TRACKING | For the positional tracking module | + | OBJECT_DETECTION | For the object detection module | + | BODY_TRACKING | For the body tracking module | + | SPATIAL_MAPPING | For the spatial mapping module | + """ + ALL = enum.auto() + DEPTH = enum.auto() + POSITIONAL_TRACKING = enum.auto() + OBJECT_DETECTION = enum.auto() + BODY_TRACKING = enum.auto() + SPATIAL_MAPPING = enum.auto() + LAST = enum.auto() + def __str__(self) -> None: + pass + + def __repr__(self) -> None: + pass + + def __lt__(self, other) -> None: + pass + + def __le__(self, other) -> None: + pass + + def __gt__(self, other) -> None: + pass + + def __ge__(self, other) -> None: + pass + + +class OBJECT_CLASS(enum.Enum): + """ + Lists available object classes. + + + | OBJECT_CLASS | Description | + |:---:|:---:| + | PERSON | For people detection | + | VEHICLE | For vehicle detection (cars, trucks, buses, motorcycles, etc.) | + | BAG | For bag detection (backpack, handbag, suitcase, etc.) | + | ANIMAL | For animal detection (cow, sheep, horse, dog, cat, bird, etc.) | + | ELECTRONICS | For electronic device detection (cellphone, laptop, etc.) | + | FRUIT_VEGETABLE | For fruit and vegetable detection (banana, apple, orange, carrot, etc.) | + | SPORT | For sport-related object detection (sport ball, etc.) | + """ + PERSON = enum.auto() + VEHICLE = enum.auto() + BAG = enum.auto() + ANIMAL = enum.auto() + ELECTRONICS = enum.auto() + FRUIT_VEGETABLE = enum.auto() + SPORT = enum.auto() + LAST = enum.auto() + def __str__(self) -> None: + pass + + def __repr__(self) -> None: + pass + + def __lt__(self, other) -> None: + pass + + def __le__(self, other) -> None: + pass + + def __gt__(self, other) -> None: + pass + + def __ge__(self, other) -> None: + pass + + +class OBJECT_SUBCLASS(enum.Enum): + """ + List available object subclasses. + + Given as hint, when using object tracking an object can change of sl.OBJECT_SUBCLASS while keeping the same sl.OBJECT_CLASS + (i.e.: frame n: MOTORBIKE, frame n+1: BICYCLE). + + | OBJECT_SUBCLASS | OBJECT_CLASS | + |:---:|:---:| + | PERSON | PERSON | + | PERSON_HEAD | PERSON | + | BICYCLE | VEHICLE | + | CAR | VEHICLE | + | MOTORBIKE | VEHICLE | + | BUS | VEHICLE | + | TRUCK | VEHICLE | + | BOAT | VEHICLE | + | BACKPACK | BAG | + | HANDBAG | BAG | + | SUITCASE | BAG | + | BIRD | ANIMAL | + | CAT | ANIMAL | + | DOG | ANIMAL | + | HORSE | ANIMAL | + | SHEEP | ANIMAL | + | COW | ANIMAL | + | CELLPHONE | ELECTRONICS | + | LAPTOP | ELECTRONICS | + | BANANA | FRUIT_VEGETABLE | + | APPLE | FRUIT_VEGETABLE | + | ORANGE | FRUIT_VEGETABLE | + | CARROT | FRUIT_VEGETABLE | + | SPORTSBALL | SPORT | + | MACHINERY | VEHICLE | + """ + PERSON = enum.auto() + PERSON_HEAD = enum.auto() + BICYCLE = enum.auto() + CAR = enum.auto() + MOTORBIKE = enum.auto() + BUS = enum.auto() + TRUCK = enum.auto() + BOAT = enum.auto() + BACKPACK = enum.auto() + HANDBAG = enum.auto() + SUITCASE = enum.auto() + BIRD = enum.auto() + CAT = enum.auto() + DOG = enum.auto() + HORSE = enum.auto() + SHEEP = enum.auto() + COW = enum.auto() + CELLPHONE = enum.auto() + LAPTOP = enum.auto() + BANANA = enum.auto() + APPLE = enum.auto() + ORANGE = enum.auto() + CARROT = enum.auto() + SPORTSBALL = enum.auto() + MACHINERY = enum.auto() + LAST = enum.auto() + def __str__(self) -> None: + pass + + def __repr__(self) -> None: + pass + + def __lt__(self, other) -> None: + pass + + def __le__(self, other) -> None: + pass + + def __gt__(self, other) -> None: + pass + + def __ge__(self, other) -> None: + pass + + +class OBJECT_TRACKING_STATE(enum.Enum): + """ + Lists the different states of object tracking. + + + | Enumerator | | + |:---:|:---:| + | OFF | The tracking is not yet initialized. The object id is not usable. | + | OK | The object is tracked. | + | SEARCHING | The object could not be detected in the image and is potentially occluded. The trajectory is estimated. | + | TERMINATE | This is the last searching state of the track. The track will be deleted in the next sl.Camera.retrieve_objects(). | + """ + OFF = enum.auto() + OK = enum.auto() + SEARCHING = enum.auto() + TERMINATE = enum.auto() + LAST = enum.auto() + def __str__(self) -> None: + pass + + def __repr__(self) -> None: + pass + + def __lt__(self, other) -> None: + pass + + def __le__(self, other) -> None: + pass + + def __gt__(self, other) -> None: + pass + + def __ge__(self, other) -> None: + pass + + +class FLIP_MODE(enum.Enum): + """ + Lists possible flip modes of the camera. + + + | Enumerator | | + |:---:|:---:| + | OFF | No flip applied. Default behavior. | + | ON | Images and camera sensors' data are flipped useful when your camera is mounted upside down. | + | AUTO | In LIVE mode, use the camera orientation (if an IMU is available) to set the flip mode. In SVO mode, read the state of this enum when recorded. | + """ + OFF = enum.auto() + ON = enum.auto() + AUTO = enum.auto() + def __str__(self) -> None: + pass + + def __repr__(self) -> None: + pass + + def __lt__(self, other) -> None: + pass + + def __le__(self, other) -> None: + pass + + def __gt__(self, other) -> None: + pass + + def __ge__(self, other) -> None: + pass + + +class OBJECT_ACTION_STATE(enum.Enum): + """ + Lists the different states of an object's actions. + + + | Enumerator | | + |:---:|:---:| + | IDLE | The object is staying static. | + | MOVING | The object is moving. | + """ + IDLE = enum.auto() + MOVING = enum.auto() + LAST = enum.auto() + def __str__(self) -> None: + pass + + def __repr__(self) -> None: + pass + + def __lt__(self, other) -> None: + pass + + def __le__(self, other) -> None: + pass + + def __gt__(self, other) -> None: + pass + + def __ge__(self, other) -> None: + pass + + +class ObjectData: + """ + Class containing data of a detected object such as its bounding_box, label, id and its 3D position. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @head_position.setter + def head_position(self, head_position: Any) -> None: + pass + + @property + def confidence(self) -> float: + """ + Detection confidence value of the object. + From 0 to 100, a low value means the object might not be localized perfectly or the label (sl.OBJECT_CLASS) is uncertain. + """ + return float() + + @confidence.setter + def confidence(self, confidence: Any) -> None: + pass + + @property + def unique_object_id(self) -> str: + """ + Unique id to help identify and track AI detections. + It can be either generated externally, or by using generate_unique_id() or left empty. + """ + return str() + + @unique_object_id.setter + def unique_object_id(self, unique_object_id: Any) -> None: + pass + + @position.setter + def position(self, position: Any) -> None: + pass + + @property + def tracking_state(self) -> OBJECT_TRACKING_STATE: + """ + Object tracking state. + """ + return OBJECT_TRACKING_STATE() + + @tracking_state.setter + def tracking_state(self, tracking_state: Any) -> None: + pass + + @velocity.setter + def velocity(self, velocity: Any) -> None: + pass + + @property + def action_state(self) -> OBJECT_ACTION_STATE: + """ + Object action state. + """ + return OBJECT_ACTION_STATE() + + @action_state.setter + def action_state(self, action_state: Any) -> None: + pass + + @property + def id(self) -> int: + """ + Object identification number. + It is used as a reference when tracking the object through the frames. + .. note:: + Only available if sl.ObjectDetectionParameters.enable_tracking is activated. + + .. note:: + Otherwise, it will be set to -1. + """ + return int() + + @id.setter + def id(self, id: Any) -> None: + pass + + @property + def sublabel(self) -> OBJECT_SUBCLASS: + """ + Object sub-class/sub-category to identify the object type. + """ + return OBJECT_SUBCLASS() + + @sublabel.setter + def sublabel(self, sublabel: Any) -> None: + pass + + @property + def mask(self) -> Mat: + """ + Mask defining which pixels which belong to the object (in bounding_box_2d and set to 255) and those of the background (set to 0). + .. warning:: The mask information is only available for tracked objects (sl.OBJECT_TRACKING_STATE.OK) that have a valid depth. + .. warning:: Otherwise, the mask will not be initialized (```mask.is_init() == False```). + """ + return Mat() + + @mask.setter + def mask(self, mask: Any) -> None: + pass + + @position_covariance.setter + def position_covariance(self, position_covariance: Any) -> None: + pass + + @property + def raw_label(self) -> int: + """ + Object raw label. + It is forwarded from sl.CustomBoxObjectData when using sl.OBJECT_DETECTION_MODEL.CUSTOM_BOX_OBJECTS. + """ + return int() + + @raw_label.setter + def raw_label(self, raw_label: Any) -> None: + pass + + @dimensions.setter + def dimensions(self, dimensions: Any) -> None: + pass + + @bounding_box_2d.setter + def bounding_box_2d(self, bounding_box_2d: Any) -> None: + pass + + @bounding_box.setter + def bounding_box(self, bounding_box: Any) -> None: + pass + + @property + def label(self) -> OBJECT_CLASS: + """ + Object class/category to identify the object type. + """ + return OBJECT_CLASS() + + @label.setter + def label(self, label: Any) -> None: + pass + + def position(self) -> np.array[float]: + """ + Object 3D centroid. + .. note:: + It is defined in sl.InitParameters.coordinate_units and expressed in sl.RuntimeParameters.measure3D_reference_frame. + """ + return np.array[float]() + + def velocity(self) -> np.array[float]: + """ + Object 3D velocity. + .. note:: + It is defined in ```sl.InitParameters.coordinate_units / s``` and expressed in sl.RuntimeParameters.measure3D_reference_frame. + """ + return np.array[float]() + + def bounding_box(self) -> np.array[float][float]: + """ + 3D bounding box of the object represented as eight 3D points. + .. note:: + It is defined in sl.InitParameters.coordinate_units and expressed in sl.RuntimeParameters.measure3D_reference_frame. + + .. code-block:: text + + 1 ------ 2 + / /| + 0 ------ 3 | + | Object | 6 + | |/ + 4 ------ 7 + """ + return np.array[float][float]() + + def bounding_box_2d(self) -> np.array[int][int]: + """ + 2D bounding box of the object represented as four 2D points starting at the top left corner and rotation clockwise. + .. note:: + Expressed in pixels on the original image resolution, ```[0, 0]``` is the top left corner. + + .. code-block:: text + + A ------ B + | Object | + D ------ C + """ + return np.array[int][int]() + + def dimensions(self) -> np.array[float]: + """ + 3D object dimensions: width, height, length. + .. note:: + It is defined in sl.InitParameters.coordinate_units and expressed in sl.RuntimeParameters.measure3D_reference_frame. + """ + return np.array[float]() + + def head_bounding_box(self) -> np.array[float][float]: + """ + 3D bounding box of the head of the object (a person) represented as eight 3D points. + .. note:: + It is defined in sl.InitParameters.coordinate_units and expressed in sl.RuntimeParameters.measure3D_reference_frame. + + .. warning:: Not available with sl.OBJECT_DETECTION_MODEL.MULTI_CLASS_BOX_XXX. + """ + return np.array[float][float]() + + def head_bounding_box_2d(self) -> np.array[int][int]: + """ + 2D bounding box of the head of the object (a person) represented as four 2D points starting at the top left corner and rotation clockwise. + .. note:: + Expressed in pixels on the original image resolution, ```[0, 0]``` is the top left corner. + + .. warning:: Not available with sl.OBJECT_DETECTION_MODEL.MULTI_CLASS_BOX_XXX. + """ + return np.array[int][int]() + + def head_position(self) -> np.array[float]: + """ + 3D centroid of the head of the object (a person). + .. note:: + It is defined in sl.InitParameters.coordinate_units and expressed in sl.RuntimeParameters.measure3D_reference_frame. + + .. warning:: Not available with sl.OBJECT_DETECTION_MODEL.MULTI_CLASS_BOX_XXX. + """ + return np.array[float]() + + def position_covariance(self) -> np.array[float]: + """ + Covariance matrix of the 3D position. + .. note:: + It is represented by its upper triangular matrix value + + .. code-block:: text + + = [p0, p1, p2] + [p1, p3, p4] + [p2, p4, p5] + + where pi is ```position_covariance[i]``` + """ + return np.array[float]() + + +class BodyData: + """ + Class containing data of a detected body/person such as its bounding_box, id and its 3D position. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @head_position.setter + def head_position(self, head_position: Any) -> None: + pass + + @property + def id(self) -> int: + """ + Body/person identification number. + It is used as a reference when tracking the body through the frames. + .. note:: + Only available if sl.BodyTrackingParameters.enable_tracking is activated. + + .. note:: + Otherwise, it will be set to -1. + """ + return int() + + @id.setter + def id(self, id: Any) -> None: + pass + + @property + def confidence(self) -> float: + """ + Detection confidence value of the body/person. + From 0 to 100, a low value means the body might not be localized perfectly. + """ + return float() + + @confidence.setter + def confidence(self, confidence: Any) -> None: + pass + + @position_covariance.setter + def position_covariance(self, position_covariance: Any) -> None: + pass + + @property + def mask(self) -> Mat: + """ + Mask defining which pixels which belong to the body/person (in bounding_box_2d and set to 255) and those of the background (set to 0). + .. warning:: The mask information is only available for tracked bodies (sl.OBJECT_TRACKING_STATE.OK) that have a valid depth. + .. warning:: Otherwise, the mask will not be initialized (```mask.is_init() == False```). + """ + return Mat() + + @mask.setter + def mask(self, mask: Any) -> None: + pass + + @property + def unique_object_id(self) -> str: + """ + Unique id to help identify and track AI detections. + It can be either generated externally, or by using generate_unique_id() or left empty. + """ + return str() + + @unique_object_id.setter + def unique_object_id(self, unique_object_id: Any) -> None: + pass + + @position.setter + def position(self, position: Any) -> None: + pass + + @property + def tracking_state(self) -> OBJECT_TRACKING_STATE: + """ + Body/person tracking state. + """ + return OBJECT_TRACKING_STATE() + + @tracking_state.setter + def tracking_state(self, tracking_state: Any) -> None: + pass + + @velocity.setter + def velocity(self, velocity: Any) -> None: + pass + + @keypoints_covariance.setter + def keypoints_covariance(self, keypoints_covariance: Any) -> None: + pass + + @bounding_box_2d.setter + def bounding_box_2d(self, bounding_box_2d: Any) -> None: + pass + + @dimensions.setter + def dimensions(self, dimensions: Any) -> None: + pass + + @property + def action_state(self) -> OBJECT_ACTION_STATE: + """ + Body/person action state. + """ + return OBJECT_ACTION_STATE() + + @action_state.setter + def action_state(self, action_state: Any) -> None: + pass + + @bounding_box.setter + def bounding_box(self, bounding_box: Any) -> None: + pass + + def position(self) -> np.array[float]: + """ + Body/person 3D centroid. + .. note:: + It is defined in sl.InitParameters.coordinate_units and expressed in sl.RuntimeParameters.measure3D_reference_frame. + """ + return np.array[float]() + + def velocity(self) -> np.array[float]: + """ + Body/person 3D velocity. + .. note:: + It is defined in ```sl.InitParameters.coordinate_units / s``` and expressed in sl.RuntimeParameters.measure3D_reference_frame. + """ + return np.array[float]() + + def bounding_box(self) -> np.array[float][float]: + """ + 3D bounding box of the body/person represented as eight 3D points. + .. note:: + It is defined in sl.InitParameters.coordinate_units and expressed in sl.RuntimeParameters.measure3D_reference_frame. + + .. code-block:: text + + 1 ------ 2 + / /| + 0 ------ 3 | + | Object | 6 + | |/ + 4 ------ 7 + """ + return np.array[float][float]() + + def bounding_box_2d(self) -> np.array[int][int]: + """ + 2D bounding box of the body/person represented as four 2D points starting at the top left corner and rotation clockwise. + .. note:: + Expressed in pixels on the original image resolution, ```[0, 0]``` is the top left corner. + + .. code-block:: text + + A ------ B + | Object | + D ------ C + """ + return np.array[int][int]() + + def keypoints_covariance(self) -> np.array[float][float]: + """ + NumPy array of detection covariance for each keypoint. + .. warning:: In some cases, eg. body partially out of the image or missing depth data, some keypoints can not be detected. Their covariances will be 0. + """ + return np.array[float][float]() + + def position_covariance(self) -> np.array[float]: + """ + Covariance matrix of the 3D position. + .. note:: + It is represented by its upper triangular matrix value + + .. code-block:: text + + = [p0, p1, p2] + [p1, p3, p4] + [p2, p4, p5] + + where pi is ```position_covariance[i]``` + """ + return np.array[float]() + + def dimensions(self) -> np.array[float]: + """ + 3D body/person dimensions: width, height, length. + .. note:: + It is defined in sl.InitParameters.coordinate_units and expressed in sl.RuntimeParameters.measure3D_reference_frame. + """ + return np.array[float]() + + def keypoint(self) -> np.array[float][float]: + """ + Set of useful points representing the human body in 3D. + .. note:: + They are defined in sl.InitParameters.coordinate_units and expressed in sl.RuntimeParameters.measure3D_reference_frame. + + .. warning:: In some cases, eg. body partially out of the image or missing depth data, some keypoints can not be detected. They will have non finite values. + """ + return np.array[float][float]() + + def keypoint_2d(self) -> np.array[int][int]: + """ + Set of useful points representing the human body in 2D. + .. note:: + Expressed in pixels on the original image resolution, ```[0, 0]``` is the top left corner. + + .. warning:: In some cases, eg. body partially out of the image, some keypoints can not be detected. They will have negatives coordinates. + """ + return np.array[int][int]() + + def head_bounding_box(self) -> np.array[float][float]: + """ + 3D bounding box of the head of the body/person represented as eight 3D points. + .. note:: + It is defined in sl.InitParameters.coordinate_units and expressed in sl.RuntimeParameters.measure3D_reference_frame. + """ + return np.array[float][float]() + + def head_bounding_box_2d(self) -> np.array[int][int]: + """ + 2D bounding box of the head of the body/person represented as four 2D points starting at the top left corner and rotation clockwise. + .. note:: + Expressed in pixels on the original image resolution, ```[0, 0]``` is the top left corner. + """ + return np.array[int][int]() + + def head_position(self) -> np.array[float]: + """ + 3D centroid of the head of the body/person. + .. note:: + It is defined in sl.InitParameters.coordinate_units and expressed in sl.RuntimeParameters.measure3D_reference_frame. + """ + return np.array[float]() + + def keypoint_confidence(self) -> np.array[float]: + """ + NumPy array of detection confidences for each keypoint. + .. note:: + They can not be lower than the sl.BodyTrackingRuntimeParameters.detection_confidence_threshold. + + .. warning:: In some cases, eg. body partially out of the image or missing depth data, some keypoints can not be detected. They will have non finite values. + """ + return np.array[float]() + + def local_position_per_joint(self) -> np.array[float][float]: + """ + NumPy array of local position (position of the child keypoint with respect to its parent expressed in its parent coordinate frame) for each keypoint. + .. note:: + They are expressed in sl.REFERENCE_FRAME.CAMERA or sl.REFERENCE_FRAME.WORLD. + + .. warning:: Not available with sl.BODY_FORMAT.BODY_18. + """ + return np.array[float][float]() + + def local_orientation_per_joint(self) -> np.array[float][float]: + """ + NumPy array of local orientation for each keypoint. + .. note:: + The orientation is represented by a quaternion. + + .. warning:: Not available with sl.BODY_FORMAT.BODY_18. + """ + return np.array[float][float]() + + def global_root_orientation(self) -> np.array[float]: + """ + Global root orientation of the skeleton (NumPy array). + The orientation is also represented by a quaternion. + .. note:: + The global root position is already accessible in keypoint attribute by using the root index of a given sl.BODY_FORMAT. + + .. warning:: Not available with sl.BODY_FORMAT.BODY_18. + """ + return np.array[float]() + + +def generate_unique_id() -> None: + """ + Generate a UUID like unique id to help identify and track AI detections. + """ + pass + +class CustomBoxObjectData: + """ + Class that store externally detected objects. + + The objects can be ingested with sl.Camera.ingest_custom_box_objects() to extract 3D and tracking information over time. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def tracking_max_dist(self) -> float: + """ + Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. + By default, do not discard tracked object based on distance. + Only valid for static object. + """ + return float() + + @tracking_max_dist.setter + def tracking_max_dist(self, tracking_max_dist: Any) -> None: + pass + + @property + def is_grounded(self) -> bool: + """ + Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking. + - true: 2 DoF projected alongside the floor plane. Case for object standing on the ground such as person, vehicle, etc. + \n The projection implies that the objects cannot be superposed on multiple horizontal levels. + - false: 6 DoF (full 3D movements are allowed). + + .. note:: + This parameter cannot be changed for a given object tracking id. + + .. note:: + It is advised to set it by labels to avoid issues. + """ + return bool() + + @is_grounded.setter + def is_grounded(self, is_grounded: Any) -> None: + pass + + @property + def tracking_timeout(self) -> float: + """ + Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time. + By default, let the tracker decide internally based on the internal sub class of the tracked object. + """ + return float() + + @tracking_timeout.setter + def tracking_timeout(self, tracking_timeout: Any) -> None: + pass + + @property + def unique_object_id(self) -> str: + """ + Unique id to help identify and track AI detections. + It can be either generated externally, or by using generate_unique_id() or left empty. + """ + return str() + + @unique_object_id.setter + def unique_object_id(self, unique_object_id: Any) -> None: + pass + + @property + def probability(self) -> float: + """ + Detection confidence value of the object. + .. note:: + The value should be in ```[0-1]```. + + .. note:: + It can be used to improve the object tracking. + """ + return float() + + @probability.setter + def probability(self, probability: Any) -> None: + pass + + @property + def is_static(self) -> bool: + """ + Provide hypothesis about the object staticity to improve the object tracking. + - true: the object will be assumed to never move nor being moved. + - false: the object will be assumed to be able to move or being moved. + """ + return bool() + + @is_static.setter + def is_static(self, is_static: Any) -> None: + pass + + @bounding_box_2d.setter + def bounding_box_2d(self, bounding_box_2d: Any) -> None: + pass + + @property + def label(self) -> int: + """ + Object label. + This information is passed-through and can be used to improve object tracking. + .. note:: + It should define an object class. This means that any similar object (in classification) should share the same label number. + """ + return int() + + @label.setter + def label(self, label: Any) -> None: + pass + + def bounding_box_2d(self) -> np.array[int][int]: + """ + 2D bounding box of the object represented as four 2D points starting at the top left corner and rotation clockwise. + .. note:: + Expressed in pixels on the original image resolution, ```[0, 0]``` is the top left corner. + + .. code-block:: text + + A ------ B + | Object | + D ------ C + """ + return np.array[int][int]() + + +class CustomMaskObjectData: + """ + Class storing externally detected objects. + + The objects can be ingested with sl.Camera.ingest_custom_mask_objects() to extract 3D and tracking information over time. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def tracking_max_dist(self) -> float: + """ + Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. + By default, do not discard tracked object based on distance. + Only valid for static object. + """ + return float() + + @tracking_max_dist.setter + def tracking_max_dist(self, tracking_max_dist: Any) -> None: + pass + + @property + def is_grounded(self) -> bool: + """ + Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking. + - true: 2 DoF projected alongside the floor plane. Case for object standing on the ground such as person, vehicle, etc. + \n The projection implies that the objects cannot be superposed on multiple horizontal levels. + - false: 6 DoF (full 3D movements are allowed). + + .. note:: + This parameter cannot be changed for a given object tracking id. + + .. note:: + It is advised to set it by labels to avoid issues. + """ + return bool() + + @is_grounded.setter + def is_grounded(self, is_grounded: Any) -> None: + pass + + @property + def tracking_timeout(self) -> float: + """ + Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time. + By default, let the tracker decide internally based on the internal sub class of the tracked object. + """ + return float() + + @tracking_timeout.setter + def tracking_timeout(self, tracking_timeout: Any) -> None: + pass + + @property + def unique_object_id(self) -> str: + """ + Unique id to help identify and track AI detections. + It can be either generated externally, or by using generate_unique_id() or left empty. + """ + return str() + + @unique_object_id.setter + def unique_object_id(self, unique_object_id: Any) -> None: + pass + + @property + def probability(self) -> float: + """ + Detection confidence value of the object. + .. note:: + The value should be in ```[0-1]```. + + .. note:: + It can be used to improve the object tracking. + """ + return float() + + @probability.setter + def probability(self, probability: Any) -> None: + pass + + @property + def is_static(self) -> bool: + """ + Provide hypothesis about the object staticity to improve the object tracking. + - true: the object will be assumed to never move nor being moved. + - false: the object will be assumed to be able to move or being moved. + """ + return bool() + + @is_static.setter + def is_static(self, is_static: Any) -> None: + pass + + @bounding_box_2d.setter + def bounding_box_2d(self, bounding_box_2d: Any) -> None: + pass + + @property + def label(self) -> int: + """ + Object label. + This information is passed-through and can be used to improve object tracking. + .. note:: + It should define an object class. This means that any similar object (in classification) should share the same label number. + """ + return int() + + @label.setter + def label(self, label: Any) -> None: + pass + + @property + def box_mask(self) -> Mat: + """ + Mask defining which pixels which belong to the object (in bounding_box_2d and set to 255) and those of the background (set to 0). + """ + return Mat() + + @box_mask.setter + def box_mask(self, box_mask: Any) -> None: + pass + + def bounding_box_2d(self) -> np.array[int][int]: + """ + 2D bounding box of the object represented as four 2D points starting at the top left corner and rotation clockwise. + .. note:: + Expressed in pixels on the original image resolution, ```[0, 0]``` is the top left corner. + + .. code-block:: text + + A ------ B + | Object | + D ------ C + """ + return np.array[int][int]() + + +class BODY_18_PARTS(enum.Enum): + """ + Semantic of human body parts and order of sl.BodyData.keypoint for sl.BODY_FORMAT.BODY_18. + + | BODY_18_PARTS | Keypoint number | + |:---:|:---:| + | NOSE | 0 | + | NECK | 1 | + | RIGHT_SHOULDER | 2 | + | RIGHT_ELBOW | 3 | + | RIGHT_WRIST | 4 | + | LEFT_SHOULDER | 5 | + | LEFT_ELBOW | 6 | + | LEFT_WRIST | 7 | + | RIGHT_HIP | 8 | + | RIGHT_KNEE | 9 | + | RIGHT_ANKLE | 10 | + | LEFT_HIP | 11 | + | LEFT_KNEE | 12 | + | LEFT_ANKLE | 13 | + | RIGHT_EYE | 14 | + | LEFT_EYE | 15 | + | RIGHT_EAR | 16 | + | LEFT_EAR | 17 | + """ + NOSE = enum.auto() + NECK = enum.auto() + RIGHT_SHOULDER = enum.auto() + RIGHT_ELBOW = enum.auto() + RIGHT_WRIST = enum.auto() + LEFT_SHOULDER = enum.auto() + LEFT_ELBOW = enum.auto() + LEFT_WRIST = enum.auto() + RIGHT_HIP = enum.auto() + RIGHT_KNEE = enum.auto() + RIGHT_ANKLE = enum.auto() + LEFT_HIP = enum.auto() + LEFT_KNEE = enum.auto() + LEFT_ANKLE = enum.auto() + RIGHT_EYE = enum.auto() + LEFT_EYE = enum.auto() + RIGHT_EAR = enum.auto() + LEFT_EAR = enum.auto() + LAST = enum.auto() + +class BODY_34_PARTS(enum.Enum): + """ + Semantic of human body parts and order of sl.BodyData.keypoint for sl.BODY_FORMAT.BODY_34. + + | BODY_34_PARTS | Keypoint number | + |:---:|:---:| + | PELVIS | 0 | + | NAVAL_SPINE | 1 | + | CHEST_SPINE | 2 | + | NECK | 3 | + | LEFT_CLAVICLE | 4 | + | LEFT_SHOULDER | 5 | + | LEFT_ELBOW | 6 | + | LEFT_WRIST | 7 | + | LEFT_HAND | 8 | + | LEFT_HANDTIP | 9 | + | LEFT_THUMB | 10 | + | RIGHT_CLAVICLE | 11 | + | RIGHT_SHOULDER | 12 | + | RIGHT_ELBOW | 13 | + | RIGHT_WRIST | 14 | + | RIGHT_HAND | 15 | + | RIGHT_HANDTIP | 16 | + | RIGHT_THUMB | 17 | + | LEFT_HIP | 18 | + | LEFT_KNEE | 19 | + | LEFT_ANKLE | 20 | + | LEFT_FOOT | 21 | + | RIGHT_HIP | 22 | + | RIGHT_KNEE | 23 | + | RIGHT_ANKLE | 24 | + | RIGHT_FOOT | 25 | + | HEAD | 26 | + | NOSE | 27 | + | LEFT_EYE | 28 | + | LEFT_EAR | 29 | + | RIGHT_EYE | 30 | + | RIGHT_EAR | 31 | + | LEFT_HEEL | 32 | + | RIGHT_HEEL | 33 | + """ + PELVIS = enum.auto() + NAVAL_SPINE = enum.auto() + CHEST_SPINE = enum.auto() + NECK = enum.auto() + LEFT_CLAVICLE = enum.auto() + LEFT_SHOULDER = enum.auto() + LEFT_ELBOW = enum.auto() + LEFT_WRIST = enum.auto() + LEFT_HAND = enum.auto() + LEFT_HANDTIP = enum.auto() + LEFT_THUMB = enum.auto() + RIGHT_CLAVICLE = enum.auto() + RIGHT_SHOULDER = enum.auto() + RIGHT_ELBOW = enum.auto() + RIGHT_WRIST = enum.auto() + RIGHT_HAND = enum.auto() + RIGHT_HANDTIP = enum.auto() + RIGHT_THUMB = enum.auto() + LEFT_HIP = enum.auto() + LEFT_KNEE = enum.auto() + LEFT_ANKLE = enum.auto() + LEFT_FOOT = enum.auto() + RIGHT_HIP = enum.auto() + RIGHT_KNEE = enum.auto() + RIGHT_ANKLE = enum.auto() + RIGHT_FOOT = enum.auto() + HEAD = enum.auto() + NOSE = enum.auto() + LEFT_EYE = enum.auto() + LEFT_EAR = enum.auto() + RIGHT_EYE = enum.auto() + RIGHT_EAR = enum.auto() + LEFT_HEEL = enum.auto() + RIGHT_HEEL = enum.auto() + LAST = enum.auto() + +class BODY_38_PARTS(enum.Enum): + """ + Semantic of human body parts and order of sl.BodyData.keypoint for sl.BODY_FORMAT.BODY_38. + + | BODY_38_PARTS | Keypoint number | + |:---:|:---:| + | PELVIS | 0 | + | SPINE_1 | 1 | + | SPINE_2 | 2 | + | SPINE_3 | 3 | + | NECK | 4 | + | NOSE | 5 | + | LEFT_EYE | 6 | + | RIGHT_EYE | 7 | + | LEFT_EAR | 8 | + | RIGHT_EAR | 9 | + | LEFT_CLAVICLE | 10 | + | RIGHT_CLAVICLE | 11 | + | LEFT_SHOULDER | 12 | + | RIGHT_SHOULDER | 13 | + | LEFT_ELBOW | 14 | + | RIGHT_ELBOW | 15 | + | LEFT_WRIST | 16 | + | RIGHT_WRIST | 17 | + | LEFT_HIP | 18 | + | RIGHT_HIP | 19 | + | LEFT_KNEE | 20 | + | RIGHT_KNEE | 21 | + | LEFT_ANKLE | 22 | + | RIGHT_ANKLE | 23 | + | LEFT_BIG_TOE | 24 | + | RIGHT_BIG_TOE | 25 | + | LEFT_SMALL_TOE | 26 | + | RIGHT_SMALL_TOE | 27 | + | LEFT_HEEL | 28 | + | RIGHT_HEEL | 29 | + | LEFT_HAND_THUMB_4 | 30 | + | RIGHT_HAND_THUMB_4 | 31 | + | LEFT_HAND_INDEX_1 | 32 | + | RIGHT_HAND_INDEX_1 | 33 | + | LEFT_HAND_MIDDLE_4 | 34 | + | RIGHT_HAND_MIDDLE_4 | 35 | + | LEFT_HAND_PINKY_1 | 36 | + | RIGHT_HAND_PINKY_1 | 37 | + """ + PELVIS = enum.auto() + SPINE_1 = enum.auto() + SPINE_2 = enum.auto() + SPINE_3 = enum.auto() + NECK = enum.auto() + NOSE = enum.auto() + LEFT_EYE = enum.auto() + RIGHT_EYE = enum.auto() + LEFT_EAR = enum.auto() + RIGHT_EAR = enum.auto() + LEFT_CLAVICLE = enum.auto() + RIGHT_CLAVICLE = enum.auto() + LEFT_SHOULDER = enum.auto() + RIGHT_SHOULDER = enum.auto() + LEFT_ELBOW = enum.auto() + RIGHT_ELBOW = enum.auto() + LEFT_WRIST = enum.auto() + RIGHT_WRIST = enum.auto() + LEFT_HIP = enum.auto() + RIGHT_HIP = enum.auto() + LEFT_KNEE = enum.auto() + RIGHT_KNEE = enum.auto() + LEFT_ANKLE = enum.auto() + RIGHT_ANKLE = enum.auto() + LEFT_BIG_TOE = enum.auto() + RIGHT_BIG_TOE = enum.auto() + LEFT_SMALL_TOE = enum.auto() + RIGHT_SMALL_TOE = enum.auto() + LEFT_HEEL = enum.auto() + RIGHT_HEEL = enum.auto() + LEFT_HAND_THUMB_4 = enum.auto() + RIGHT_HAND_THUMB_4 = enum.auto() + LEFT_HAND_INDEX_1 = enum.auto() + RIGHT_HAND_INDEX_1 = enum.auto() + LEFT_HAND_MIDDLE_4 = enum.auto() + RIGHT_HAND_MIDDLE_4 = enum.auto() + LEFT_HAND_PINKY_1 = enum.auto() + RIGHT_HAND_PINKY_1 = enum.auto() + LAST = enum.auto() + +class INFERENCE_PRECISION(enum.Enum): + """ + Report the actual inference precision used + + | Enumerator | | + |:---:|:---:| + | FP32 | | + | FP16 | | + | INT8 | | + """ + FP32 = enum.auto() + FP16 = enum.auto() + INT8 = enum.auto() + LAST = enum.auto() + +class BODY_FORMAT(enum.Enum): + """ + Lists supported skeleton body models. + + | Enumerator | | + |:---:|:---:| + | BODY_18 | 18-keypoint model Basic body model | + | BODY_34 | 34-keypoint model Note: Requires body fitting enabled. | + | BODY_38 | 38-keypoint model Including simplified face, hands and feet.Note: Early Access | + """ + BODY_18 = enum.auto() + BODY_34 = enum.auto() + BODY_38 = enum.auto() + LAST = enum.auto() + +class BODY_KEYPOINTS_SELECTION(enum.Enum): + """ + Lists supported models for skeleton keypoints selection. + + | Enumerator | | + |:---:|:---:| + | FULL | Full keypoint model | + | UPPER_BODY | Upper body keypoint model Will output only upper body (from hip). | + """ + FULL = enum.auto() + UPPER_BODY = enum.auto() + LAST = enum.auto() + +def get_idx(part: BODY_18_PARTS) -> int: + """ + Return associated index of each sl.BODY_18_PARTS. + """ + return int() + +def get_idx_34(part: BODY_34_PARTS) -> int: + """ + Return associated index of each sl.BODY_34_PARTS. + """ + return int() + +def get_idx_38(part: BODY_38_PARTS) -> int: + """ + Return associated index of each sl.BODY_38_PARTS. + """ + return int() + +class ObjectsBatch: + """ + Class containing batched data of a detected objects from the object detection module. + + This class can be used to store trajectories. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def id(self) -> int: + """ + Id of the batch. + """ + return int() + + @id.setter + def id(self, id: Any) -> None: + pass + + @property + def sublabel(self) -> OBJECT_SUBCLASS: + """ + Objects sub-class/sub-category to identify the object type. + """ + return OBJECT_SUBCLASS() + + @sublabel.setter + def sublabel(self, sublabel: Any) -> None: + pass + + @property + def tracking_state(self) -> OBJECT_TRACKING_STATE: + """ + Objects tracking state. + """ + return OBJECT_TRACKING_STATE() + + @tracking_state.setter + def tracking_state(self, tracking_state: Any) -> None: + pass + + @property + def timestamps(self) -> list[Timestamp]: + """ + List of timestamps for each object. + """ + return list[Timestamp]() + + @property + def label(self) -> OBJECT_CLASS: + """ + Objects class/category to identify the object type. + """ + return OBJECT_CLASS() + + @label.setter + def label(self, label: Any) -> None: + pass + + @property + def action_states(self) -> list[OBJECT_ACTION_STATE]: + """ + List of action states for each object. + """ + return list[OBJECT_ACTION_STATE]() + + def positions(self) -> np.array[float][float]: + """ + NumPy array of positions for each object. + """ + return np.array[float][float]() + + def position_covariances(self) -> np.array[float][float]: + """ + NumPy array of positions' covariances for each object. + """ + return np.array[float][float]() + + def velocities(self) -> np.array[float][float]: + """ + NumPy array of 3D velocities for each object. + """ + return np.array[float][float]() + + def bounding_boxes(self) -> np.array[float][float][float]: + """ + NumPy array of 3D bounding boxes for each object. + .. note:: + They are defined in sl.InitParameters.coordinate_units and expressed in sl.RuntimeParameters.measure3D_reference_frame. + + .. code-block:: text + + 1 ------ 2 + / /| + 0 ------ 3 | + | Object | 6 + | |/ + 4 ------ 7 + """ + return np.array[float][float][float]() + + def bounding_boxes_2d(self) -> np.array[int][int][int]: + """ + NumPy array of 2D bounding boxes for each object. + .. note:: + Expressed in pixels on the original image resolution, ```[0, 0]``` is the top left corner. + + .. code-block:: text + + A ------ B + | Object | + D ------ C + """ + return np.array[int][int][int]() + + def confidences(self) -> np.array[float]: + """ + NumPy array of confidences for each object. + """ + return np.array[float]() + + def head_bounding_boxes_2d(self) -> np.array[int][int][int]: + return np.array[int][int][int]() + + def head_bounding_boxes(self) -> np.array[float][float][float]: + return np.array[float][float][float]() + + def head_positions(self) -> np.array[float][float]: + return np.array[float][float]() + + +class Objects: + """ + Class containing the results of the object detection module. + + The detected objects are listed in object_list. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def object_list(self) -> list[ObjectData]: + """ + List of detected objects. + """ + return list[ObjectData]() + + @object_list.setter + def object_list(self, object_list: Any) -> None: + pass + + @property + def is_new(self) -> bool: + """ + Whether object_list has already been retrieved or not. + Default: False + """ + return bool() + + @is_new.setter + def is_new(self, is_new: Any) -> None: + pass + + @property + def timestamp(self) -> Timestamp: + """ + Timestamp corresponding to the frame acquisition. + This value is especially useful for the async mode to synchronize the data. + """ + return Timestamp() + + @timestamp.setter + def timestamp(self, timestamp: Any) -> None: + pass + + @property + def is_tracked(self) -> bool: + """ + Whether both the object tracking and the world orientation has been setup. + Default: False + """ + return bool() + + @is_tracked.setter + def is_tracked(self, is_tracked: Any) -> None: + pass + + def get_object_data_from_id(self, py_object_data: ObjectData, object_data_id: int) -> bool: + """ + Method that looks for a given object id in the current objects list. + :param py_object_data: sl.ObjectData to fill if the search succeeded. (Direction: out) + :param object_data_id: Id of the sl.ObjectData to search. (Direction: in) + :return: True if found, otherwise False. + """ + return bool() + + +class BodiesBatch: + """ + Class containing batched data of a detected bodies/persons from the body tracking module. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def id(self) -> int: + """ + Id of the batch. + """ + return int() + + @id.setter + def id(self, id: Any) -> None: + pass + + @property + def action_states(self) -> list[OBJECT_ACTION_STATE]: + """ + List of action states for each body/person. + """ + return list[OBJECT_ACTION_STATE]() + + @property + def timestamps(self) -> list[Timestamp]: + """ + List of timestamps for each body/person. + """ + return list[Timestamp]() + + @property + def tracking_state(self) -> OBJECT_TRACKING_STATE: + """ + Bodies/persons tracking state. + """ + return OBJECT_TRACKING_STATE() + + @tracking_state.setter + def tracking_state(self, tracking_state: Any) -> None: + pass + + def positions(self) -> np.array[float][float]: + """ + NumPy array of positions for each body/person. + """ + return np.array[float][float]() + + def position_covariances(self) -> np.array[float][float]: + """ + NumPy array of positions' covariances for each body/person. + """ + return np.array[float][float]() + + def velocities(self) -> np.array[float][float]: + """ + NumPy array of 3D velocities for each body/person. + """ + return np.array[float][float]() + + def bounding_boxes(self) -> np.array[float][float][float]: + """ + NumPy array of 3D bounding boxes for each body/person. + .. note:: + They are defined in sl.InitParameters.coordinate_units and expressed in sl.RuntimeParameters.measure3D_reference_frame. + + .. code-block:: text + + 1 ------ 2 + / /| + 0 ------ 3 | + | Object | 6 + | |/ + 4 ------ 7 + """ + return np.array[float][float][float]() + + def bounding_boxes_2d(self) -> np.array[int][int][int]: + """ + NumPy array of 2D bounding boxes for each body/person. + .. note:: + Expressed in pixels on the original image resolution, ```[0, 0]``` is the top left corner. + + .. code-block:: text + + A ------ B + | Object | + D ------ C + """ + return np.array[int][int][int]() + + def confidences(self) -> np.array[float]: + """ + NumPy array of confidences for each body/person. + """ + return np.array[float]() + + def keypoints_2d(self) -> np.array[int][int][int]: + return np.array[int][int][int]() + + def keypoints(self) -> np.array[float][float][float]: + """ + NumPy array of 3D keypoints for each body/person. + """ + return np.array[float][float][float]() + + def head_bounding_boxes_2d(self) -> np.array[int][int][int]: + """ + NumPy array of 3D keypoints for each body/person. + """ + return np.array[int][int][int]() + + def head_bounding_boxes(self) -> np.array[float][float][float]: + """ + NumPy array of 3D keypoints for each body/person. + """ + return np.array[float][float][float]() + + def head_positions(self) -> np.array[float][float]: + """ + NumPy array of 3D keypoints for each body/person. + """ + return np.array[float][float]() + + def keypoint_confidences(self) -> np.array[float][float]: + """ + NumPy array of detection confidences NumPy array for each keypoint for each body/person. + """ + return np.array[float][float]() + + +class Bodies: + """ + Class containing the results of the body tracking module. + + The detected bodies/persons are listed in body_list. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def body_list(self) -> list[BodyData]: + """ + List of detected bodies/persons. + """ + return list[BodyData]() + + @body_list.setter + def body_list(self, body_list: Any) -> None: + pass + + @property + def is_tracked(self) -> bool: + """ + Whether both the body tracking and the world orientation has been setup. + Default: False + """ + return bool() + + @is_tracked.setter + def is_tracked(self, is_tracked: Any) -> None: + pass + + @property + def inference_precision_mode(self) -> INFERENCE_PRECISION: + """ + Status of the actual inference precision mode used to detect the bodies/persons. + .. note:: + It depends on the GPU hardware support, the sl.BodyTrackingParameters.allow_reduced_precision_inference input parameter and the model support. + """ + return INFERENCE_PRECISION() + + @inference_precision_mode.setter + def inference_precision_mode(self, inference_precision_mode: Any) -> None: + pass + + @property + def is_new(self) -> bool: + """ + Whether object_list has already been retrieved or not. + Default: False + """ + return bool() + + @is_new.setter + def is_new(self, is_new: Any) -> None: + pass + + @property + def body_format(self) -> BODY_FORMAT: + """ + Body format used in sl.BodyTrackingParameters.body_format parameter. + """ + return BODY_FORMAT() + + @body_format.setter + def body_format(self, body_format: Any) -> None: + pass + + @property + def timestamp(self) -> Timestamp: + """ + Timestamp corresponding to the frame acquisition. + This value is especially useful for the async mode to synchronize the data. + """ + return Timestamp() + + @timestamp.setter + def timestamp(self, timestamp: Any) -> None: + pass + + def get_body_data_from_id(self, py_body_data: BodyData, body_data_id: int) -> bool: + """ + Method that looks for a given body id in the current bodies list. + :param py_body_data: sl.BodyData to fill if the search succeeded. (Direction: out) + :param body_data_id: Id of the sl.BodyData to search. (Direction: in) + :return: True if found, otherwise False. + """ + return bool() + + +class BatchParameters: + """ + Class containing a set of parameters for batch object detection. + + The default constructor sets all parameters to their default settings. + .. note:: + Parameters can be adjusted by the user. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def latency(self) -> float: + """ + Trajectories will be output in batch with the desired latency in seconds. + During this waiting time, re-identification of objects is done in the background. + .. note:: + Specifying a short latency will limit the search (falling in timeout) for previously seen object ids but will be closer to real time output. + + .. note:: + Specifying a long latency will reduce the change of timeout in re-identification but increase difference with live output. + """ + return float() + + @latency.setter + def latency(self, latency: Any) -> None: + pass + + @property + def enable(self) -> bool: + """ + Whether to enable the batch option in the object detection module. + Batch queueing system provides: + - deep-learning based re-identification + - trajectory smoothing and filtering + + Default: False + .. note:: + To activate this option, enable must be set to True. + """ + return bool() + + @enable.setter + def enable(self, enable: Any) -> None: + pass + + @property + def id_retention_time(self) -> float: + """ + Max retention time in seconds of a detected object. + After this time, the same object will mostly have a different id. + """ + return float() + + @id_retention_time.setter + def id_retention_time(self, id_retention_time: Any) -> None: + pass + + def __dealloc__(self) -> None: + """ + Default constructor. + All the parameters are set to their default values. + param enable : Activates enable + param id_retention_time : Chosen id_retention_time + param batch_duration : Chosen latency + """ + pass + + +class ObjectDetectionParameters: + """ + Class containing a set of parameters for the object detection module. + + The default constructor sets all parameters to their default settings. + .. note:: + Parameters can be adjusted by the user. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def enable_segmentation(self) -> bool: + """ + Whether the object masks will be computed. + Default: False + """ + return bool() + + @enable_segmentation.setter + def enable_segmentation(self, enable_segmentation: Any) -> None: + pass + + @property + def allow_reduced_precision_inference(self) -> bool: + """ + Whether to allow inference to run at a lower precision to improve runtime and memory usage. + It might increase the initial optimization time and could include downloading calibration data or calibration cache and slightly reduce the accuracy. + .. note:: + The fp16 is automatically enabled if the GPU is compatible and provides a speed up of almost x2 and reduce memory usage by almost half, no precision loss. + + .. note:: + This setting allow int8 precision which can speed up by another x2 factor (compared to fp16, or x4 compared to fp32) and half the fp16 memory usage, however some accuracy could be lost. + + .. note:: + The accuracy loss should not exceed 1-2% on the compatible models. + + .. note:: + The current compatible models are all sl.AI_MODELS.HUMAN_BODY_XXXX. + """ + return bool() + + @allow_reduced_precision_inference.setter + def allow_reduced_precision_inference(self, allow_reduced_precision_inference: Any) -> None: + pass + + @property + def filtering_mode(self) -> OBJECT_FILTERING_MODE: + """ + Filtering mode that should be applied to raw detections. + Default: sl.OBJECT_FILTERING_MODE.NMS_3D (same behavior as previous ZED SDK version) + .. note:: + This parameter is only used in detection model sl.OBJECT_DETECTION_MODEL.MULTI_CLASS_BOX_XXX + + and sl.OBJECT_DETECTION_MODEL.CUSTOM_BOX_OBJECTS. + .. note:: + For custom object, it is recommended to use sl.OBJECT_FILTERING_MODE.NMS_3D_PER_CLASS + + or sl.OBJECT_FILTERING_MODE.NONE. + .. note:: + In this case, you might need to add your own NMS filter before ingesting the boxes into the object detection module. + """ + return OBJECT_FILTERING_MODE() + + @filtering_mode.setter + def filtering_mode(self, filtering_mode: Any) -> None: + pass + + @property + def batch_parameters(self) -> BatchParameters: + """ + Batching system parameters. + Batching system (introduced in 3.5) performs short-term re-identification with deep-learning and trajectories filtering. + \n sl.BatchParameters.enable must to be true to use this feature (by default disabled). + """ + return BatchParameters() + + @batch_parameters.setter + def batch_parameters(self, batch_parameters: Any) -> None: + pass + + @property + def instance_module_id(self) -> int: + """ + Id of the module instance. + This is used to identify which object detection module instance is used. + """ + return int() + + @instance_module_id.setter + def instance_module_id(self, instance_module_id: Any) -> None: + pass + + @property + def detection_model(self) -> OBJECT_DETECTION_MODEL: + """ + sl.OBJECT_DETECTION_MODEL to use. + Default: sl.OBJECT_DETECTION_MODEL.MULTI_CLASS_BOX_FAST + """ + return OBJECT_DETECTION_MODEL() + + @detection_model.setter + def detection_model(self, detection_model: Any) -> None: + pass + + @property + def fused_objects_group_name(self) -> str: + """ + In a multi camera setup, specify which group this model belongs to. + + In a multi camera setup, multiple cameras can be used to detect objects and multiple detector having similar output layout can see the same object. + Therefore, Fusion will fuse together the outputs received by multiple detectors only if they are part of the same fused_objects_group_name. + + .. note:: + This parameter is not used when not using a multi-camera setup and must be set in a multi camera setup. + """ + return str() + + @fused_objects_group_name.setter + def fused_objects_group_name(self, fused_objects_group_name: Any) -> None: + pass + + @property + def enable_tracking(self) -> bool: + """ + Whether the object detection system includes object tracking capabilities across a sequence of images. + Default: True + """ + return bool() + + @enable_tracking.setter + def enable_tracking(self, enable_tracking: Any) -> None: + pass + + @property + def custom_onnx_file(self) -> str: + """ + Path to the YOLO-like onnx file for custom object detection ran in the ZED SDK. + + When `detection_model` is OBJECT_DETECTION_MODEL::CUSTOM_YOLOLIKE_BOX_OBJECTS, a onnx model must be passed so that the ZED SDK can optimize it for your GPU and run inference on it. + + The resulting optimized model will be saved for re-use in the future. + + .. attention:: - The model must be a YOLO-like model. + .. attention:: - The caching uses the `custom_onnx_file` string along with your GPU specs to decide whether to use the cached optmized model or to optimize the passed onnx model. + If you want to use a different model (i.e. an onnx with different weights), you must use a different `custom_onnx_file` string or delete the cached optimized model in + /resources. + + .. note:: + This parameter is useless when detection_model is not OBJECT_DETECTION_MODEL::CUSTOM_YOLOLIKE_BOX_OBJECTS. + """ + return str() + + @custom_onnx_file.setter + def custom_onnx_file(self, custom_onnx_file: Any) -> None: + pass + + @property + def max_range(self) -> float: + """ + Upper depth range for detections. + Default: -1 (value set in sl.InitParameters.depth_maximum_distance) + .. note:: + The value cannot be greater than sl.InitParameters.depth_maximum_distance and its unit is defined in sl.InitParameters.coordinate_units. + """ + return float() + + @max_range.setter + def max_range(self, max_range: Any) -> None: + pass + + @property + def prediction_timeout_s(self) -> float: + """ + Prediction duration of the ZED SDK when an object is not detected anymore before switching its state to sl.OBJECT_TRACKING_STATE.SEARCHING. + It prevents the jittering of the object state when there is a short misdetection. + \n The user can define their own prediction time duration. + \n Default: 0.2 + .. note:: + During this time, the object will have sl.OBJECT_TRACKING_STATE.OK state even if it is not detected. + + .. note:: + The duration is expressed in seconds. + + .. warning:: prediction_timeout_s will be clamped to 1 second as the prediction is getting worse with time. + .. warning:: Setting this parameter to 0 disables the ZED SDK predictions. + """ + return float() + + @prediction_timeout_s.setter + def prediction_timeout_s(self, prediction_timeout_s: Any) -> None: + pass + + @property + def custom_onnx_dynamic_input_shape(self) -> Resolution: + """ + Resolution to the YOLO-like onnx file for custom object detection ran in the ZED SDK. This resolution defines the input tensor size for dynamic shape ONNX model only. The batch and channel dimensions are automatically handled, it assumes it's color images like default YOLO models. + + .. note:: + This parameter is only used when detection_model is OBJECT_DETECTION_MODEL::CUSTOM_YOLOLIKE_BOX_OBJECTS and the provided ONNX file is using dynamic shapes. + + .. attention:: - Multiple model only support squared images + + Default: Squared images 512x512 (input tensor will be 1x3x512x512) + """ + return Resolution() + + @custom_onnx_dynamic_input_shape.setter + def custom_onnx_dynamic_input_shape(self, custom_onnx_dynamic_input_shape: Any) -> None: + pass + + def __dealloc__(self) -> None: + """ + Default constructor. + All the parameters are set to their default values. + :param enable_tracking: Activates enable_tracking + :param enable_segmentation: Activates enable_segmentation + :param detection_model: Chosen detection_model + :param max_range: Chosen max_range + :param batch_trajectories_parameters: Chosen batch_parameters + :param filtering_mode: Chosen filtering_mode + :param prediction_timeout_s: Chosen prediction_timeout_s + :param allow_reduced_precision_inference: Activates allow_reduced_precision_inference + :param instance_module_id: Chosen instance_module_id + """ + pass + + +class ObjectDetectionRuntimeParameters: + """ + Class containing a set of runtime parameters for the object detection module. + + The default constructor sets all parameters to their default settings. + .. note:: + Parameters can be adjusted by the user. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def object_class_filter(self) -> list[OBJECT_CLASS]: + """ + Defines which object types to detect and track. + Default: [] (all classes are tracked) + .. note:: + Fewer object types can slightly speed up the process since every object is tracked. + + .. note:: + Will output only the selected classes. + + + In order to get all the available classes, the filter list must be empty : + .. code-block:: text + + object_class_filter = {}; + + + To select a set of specific object classes, like vehicles, persons and animals for instance: + .. code-block:: text + + object_class_filter = {sl.OBJECT_CLASS.VEHICLE, sl.OBJECT_CLASS.PERSON, sl.OBJECT_CLASS.ANIMAL}; + """ + return list[OBJECT_CLASS]() + + @object_class_filter.setter + def object_class_filter(self, object_class_filter: Any) -> None: + pass + + @property + def detection_confidence_threshold(self) -> float: + """ + Confidence threshold. + From 1 to 100, with 1 meaning a low threshold, more uncertain objects and 99 very few but very precise objects. + \n Default: 20 + .. note:: + If the scene contains a lot of objects, increasing the confidence can slightly speed up the process, since every object instance is tracked. + + .. note:: + detection_confidence_threshold is used as a fallback when sl::ObjectDetectionRuntimeParameters.object_class_detection_confidence_threshold is partially set. + """ + return float() + + @detection_confidence_threshold.setter + def detection_confidence_threshold(self, detection_confidence_threshold: Any) -> None: + pass + + @property + def object_class_detection_confidence_threshold(self) -> dict: + """ + Dictonary of confidence thresholds for each class (can be empty for some classes). + .. note:: + sl.ObjectDetectionRuntimeParameters.detection_confidence_threshold will be taken as fallback/default value. + """ + return {} + + @object_class_detection_confidence_threshold.setter + def object_class_detection_confidence_threshold(self, object_class_detection_confidence_threshold: Any) -> None: + pass + + def __dealloc__(self) -> None: + """ + Default constructor. + All the parameters are set to their default values. + :param detection_confidence_threshold: Chosen detection_confidence_threshold + :param object_class_filter: Chosen object_class_filter + :param object_class_detection_confidence_threshold: Chosen object_class_detection_confidence_threshold + """ + pass + + +class CustomObjectDetectionProperties: + """ + Class containing a set of runtime properties of a certain class ID for the object detection module using a custom model. + + The default constructor sets all parameters to their default settings. + .. note:: + Parameters can be adjusted by the user. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def tracking_max_dist(self) -> float: + """ + Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. + + By default, do not discard tracked object based on distance. + Only valid for static object. + """ + return float() + + @tracking_max_dist.setter + def tracking_max_dist(self, tracking_max_dist: Any) -> None: + pass + + @property + def is_grounded(self) -> bool: + """ + Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking. + - true: 2 DoF projected alongside the floor plane. Case for object standing on the ground such as person, vehicle, etc. + The projection implies that the objects cannot be superposed on multiple horizontal levels. + - false: 6 DoF (full 3D movements are allowed). + + .. note:: + This parameter cannot be changed for a given object tracking id. + + .. note:: + It is advised to set it by labels to avoid issues. + """ + return bool() + + @is_grounded.setter + def is_grounded(self, is_grounded: Any) -> None: + pass + + @property + def min_box_height_meters(self) -> float: + """ + Minimum allowed 3D height. + + Any prediction smaller than that will be either discarded (if object is tracked and in SEARCHING state) or clamped. + Default: -1 (no filtering) + """ + return float() + + @min_box_height_meters.setter + def min_box_height_meters(self, min_box_height_meters: Any) -> None: + pass + + @property + def enabled(self) -> bool: + """ + Whether the object object is kept or not + """ + return bool() + + @enabled.setter + def enabled(self, enabled: Any) -> None: + pass + + @property + def tracking_timeout(self) -> float: + """ + Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time. + + By default, let the tracker decide internally based on the internal sub class of the tracked object. + """ + return float() + + @tracking_timeout.setter + def tracking_timeout(self, tracking_timeout: Any) -> None: + pass + + @property + def min_box_width_normalized(self) -> float: + """ + Minimum allowed width normalized to the image size. + + Any prediction smaller than that will be filtered out. + Default: -1 (no filtering) + """ + return float() + + @min_box_width_normalized.setter + def min_box_width_normalized(self, min_box_width_normalized: Any) -> None: + pass + + @property + def object_acceleration_preset(self) -> OBJECT_ACCELERATION_PRESET: + """ + Preset defining the expected maximum acceleration of the tracked object. + + Determines how the ZED SDK interprets object acceleration, affecting tracking behavior and predictions. + Default: Default + """ + return OBJECT_ACCELERATION_PRESET() + + @object_acceleration_preset.setter + def object_acceleration_preset(self, object_acceleration_preset: Any) -> None: + pass + + @property + def max_box_height_meters(self) -> float: + """ + Maximum allowed 3D height. + + Any prediction bigger than that will be either discarded (if object is tracked and in SEARCHING state) or clamped. + Default: -1 (no filtering) + """ + return float() + + @max_box_height_meters.setter + def max_box_height_meters(self, max_box_height_meters: Any) -> None: + pass + + @property + def max_allowed_acceleration(self) -> float: + """ + Manually override the acceleration preset. + + If set, this value takes precedence over the selected preset, allowing for a custom maximum acceleration. + Unit is m/s^2. + """ + return float() + + @max_allowed_acceleration.setter + def max_allowed_acceleration(self, max_allowed_acceleration: Any) -> None: + pass + + @property + def max_box_width_normalized(self) -> float: + """ + Maximum allowed width normalized to the image size. + + Any prediction bigger than that will be filtered out. + Default: -1 (no filtering) + """ + return float() + + @max_box_width_normalized.setter + def max_box_width_normalized(self, max_box_width_normalized: Any) -> None: + pass + + @property + def max_box_width_meters(self) -> float: + """ + Maximum allowed 3D width. + + Any prediction bigger than that will be either discarded (if object is tracked and in SEARCHING state) or clamped. + Default: -1 (no filtering) + """ + return float() + + @max_box_width_meters.setter + def max_box_width_meters(self, max_box_width_meters: Any) -> None: + pass + + @property + def is_static(self) -> bool: + """ + Provide hypothesis about the object staticity to improve the object tracking. + - true: the object will be assumed to never move nor being moved. + - false: the object will be assumed to be able to move or being moved. + """ + return bool() + + @is_static.setter + def is_static(self, is_static: Any) -> None: + pass + + @property + def native_mapped_class(self) -> OBJECT_SUBCLASS: + """ + For increased accuracy, the native sl::OBJECT_SUBCLASS mapping, if any. + + Native objects have refined internal parameters for better 3D projection and tracking accuracy. + If one of the custom objects can be mapped to one the native sl::OBJECT_SUBCLASS, this can help to boost the tracking accuracy. + Default: no mapping + """ + return OBJECT_SUBCLASS() + + @native_mapped_class.setter + def native_mapped_class(self, native_mapped_class: Any) -> None: + pass + + @property + def detection_confidence_threshold(self) -> float: + """ + Confidence threshold. + + From 1 to 100, with 1 meaning a low threshold, more uncertain objects and 99 very few but very precise objects. + Default: 20.f + + .. note:: + If the scene contains a lot of objects, increasing the confidence can slightly speed up the process, since every object instance is tracked. + """ + return float() + + @detection_confidence_threshold.setter + def detection_confidence_threshold(self, detection_confidence_threshold: Any) -> None: + pass + + @property + def min_box_width_meters(self) -> float: + """ + Minimum allowed 3D width. + + Any prediction smaller than that will be either discarded (if object is tracked and in SEARCHING state) or clamped. + Default: -1 (no filtering) + """ + return float() + + @min_box_width_meters.setter + def min_box_width_meters(self, min_box_width_meters: Any) -> None: + pass + + @property + def min_box_height_normalized(self) -> float: + """ + Minimum allowed height normalized to the image size. + + Any prediction smaller than that will be filtered out. + Default: -1 (no filtering) + """ + return float() + + @min_box_height_normalized.setter + def min_box_height_normalized(self, min_box_height_normalized: Any) -> None: + pass + + @property + def max_box_height_normalized(self) -> float: + """ + Maximum allowed height normalized to the image size. + + Any prediction bigger than that will be filtered out. + Default: -1 (no filtering) + """ + return float() + + @max_box_height_normalized.setter + def max_box_height_normalized(self, max_box_height_normalized: Any) -> None: + pass + + def __dealloc__(self) -> None: + """ + Default constructor. + All the parameters are set to their default values. + :param detection_confidence_threshold: Chosen detection_confidence_threshold + :param object_class_filter: Chosen object_class_filter + :param object_class_detection_confidence_threshold: Chosen object_class_detection_confidence_threshold + """ + pass + + +class CustomObjectDetectionRuntimeParameters: + """ + Class containing a set of runtime parameters for the object detection module using your own model ran by the SDK. + + The default constructor sets all parameters to their default settings. + .. note:: + Parameters can be adjusted by the user. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def object_detection_properties(self) -> CustomObjectDetectionProperties: + """ + Global object detection properties. + + .. note:: + object_detection_properties is used as a fallback when sl::CustomObjectDetectionRuntimeParameters.object_class_detection_properties is partially set. + """ + return CustomObjectDetectionProperties() + + @object_detection_properties.setter + def object_detection_properties(self, object_detection_properties: Any) -> None: + pass + + @property + def object_class_detection_properties(self) -> dict: + """ + Per class object detection properties. + """ + return {} + + @object_class_detection_properties.setter + def object_class_detection_properties(self, object_class_detection_properties: Any) -> None: + pass + + def __dealloc__(self) -> None: + """ + Default constructor. + """ + pass + + +class BodyTrackingParameters: + """ + Class containing a set of parameters for the body tracking module. + + The default constructor sets all parameters to their default settings. + .. note:: + Parameters can be adjusted by the user. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def enable_segmentation(self) -> bool: + """ + Whether the body/person masks will be computed. + Default: False + """ + return bool() + + @enable_segmentation.setter + def enable_segmentation(self, enable_segmentation: Any) -> None: + pass + + @property + def allow_reduced_precision_inference(self) -> bool: + """ + Whether to allow inference to run at a lower precision to improve runtime and memory usage. + It might increase the initial optimization time and could include downloading calibration data or calibration cache and slightly reduce the accuracy. + .. note:: + The fp16 is automatically enabled if the GPU is compatible and provides a speed up of almost x2 and reduce memory usage by almost half, no precision loss. + + .. note:: + This setting allow int8 precision which can speed up by another x2 factor (compared to fp16, or x4 compared to fp32) and half the fp16 memory usage, however some accuracy could be lost. + + .. note:: + The accuracy loss should not exceed 1-2% on the compatible models. + + .. note:: + The current compatible models are all sl.AI_MODELS.HUMAN_BODY_XXXX. + """ + return bool() + + @allow_reduced_precision_inference.setter + def allow_reduced_precision_inference(self, allow_reduced_precision_inference: Any) -> None: + pass + + @property + def instance_module_id(self) -> int: + """ + Id of the module instance. + This is used to identify which body tracking module instance is used. + """ + return int() + + @instance_module_id.setter + def instance_module_id(self, instance_module_id: Any) -> None: + pass + + @property + def detection_model(self) -> BODY_TRACKING_MODEL: + """ + sl.BODY_TRACKING_MODEL to use. + Default: sl.BODY_TRACKING_MODEL.HUMAN_BODY_ACCURATE + """ + return BODY_TRACKING_MODEL() + + @detection_model.setter + def detection_model(self, detection_model: Any) -> None: + pass + + @property + def enable_body_fitting(self) -> bool: + """ + Whether to apply the body fitting. + Default: False + """ + return bool() + + @enable_body_fitting.setter + def enable_body_fitting(self, enable_body_fitting: Any) -> None: + pass + + @property + def body_format(self) -> BODY_FORMAT: + """ + Body format to be outputted by the ZED SDK with sl.Camera.retrieve_bodies(). + Default: sl.BODY_FORMAT.BODY_18 + """ + return BODY_FORMAT() + + @body_format.setter + def body_format(self, body_format: Any) -> None: + pass + + @property + def enable_tracking(self) -> bool: + """ + Whether the body tracking system includes body/person tracking capabilities across a sequence of images. + Default: True + """ + return bool() + + @enable_tracking.setter + def enable_tracking(self, enable_tracking: Any) -> None: + pass + + @property + def max_range(self) -> float: + """ + Upper depth range for detections. + Default: -1 (value set in sl.InitParameters.depth_maximum_distance) + .. note:: + The value cannot be greater than sl.InitParameters.depth_maximum_distance and its unit is defined in sl.InitParameters.coordinate_units. + """ + return float() + + @max_range.setter + def max_range(self, max_range: Any) -> None: + pass + + @property + def prediction_timeout_s(self) -> float: + """ + Prediction duration of the ZED SDK when an object is not detected anymore before switching its state to sl.OBJECT_TRACKING_STATE.SEARCHING. + It prevents the jittering of the object state when there is a short misdetection. + \n The user can define their own prediction time duration. + \n Default: 0.2 + .. note:: + During this time, the object will have sl.OBJECT_TRACKING_STATE.OK state even if it is not detected. + + .. note:: + The duration is expressed in seconds. + + .. warning:: prediction_timeout_s will be clamped to 1 second as the prediction is getting worse with time. + .. warning:: Setting this parameter to 0 disables the ZED SDK predictions. + """ + return float() + + @prediction_timeout_s.setter + def prediction_timeout_s(self, prediction_timeout_s: Any) -> None: + pass + + @property + def body_selection(self) -> BODY_KEYPOINTS_SELECTION: + """ + Selection of keypoints to be outputted by the ZED SDK with sl.Camera.retrieve_bodies(). + Default: sl.BODY_KEYPOINTS_SELECTION.FULL + """ + return BODY_KEYPOINTS_SELECTION() + + @body_selection.setter + def body_selection(self, body_selection: Any) -> None: + pass + + def __dealloc__(self) -> None: + """ + Default constructor. + All the parameters are set to their default values. + :param enable_tracking: Activates enable_tracking + :param enable_segmentation: Activates enable_segmentation + :param detection_model: Chosen detection_model + :param enable_body_fitting: Activates enable_body_fitting + :param max_range: Chosen max_range + :param body_format: Chosen body_format + :param body_selection: Chosen body_selection + :param prediction_timeout_s: Chosen prediction_timeout_s + :param allow_reduced_precision_inference: Activates allow_reduced_precision_inference + :param instance_module_id: Chosen instance_module_id + """ + pass + + +class BodyTrackingRuntimeParameters: + """ + Class containing a set of runtime parameters for the body tracking module. + + The default constructor sets all parameters to their default settings. + .. note:: + Parameters can be adjusted by the user. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def skeleton_smoothing(self) -> float: + """ + Control of the smoothing of the fitted fused skeleton. + It is ranged from 0 (low smoothing) and 1 (high smoothing). + \n Default: 0 + """ + return float() + + @skeleton_smoothing.setter + def skeleton_smoothing(self, skeleton_smoothing: Any) -> None: + pass + + @property + def detection_confidence_threshold(self) -> float: + """ + Confidence threshold. + From 1 to 100, with 1 meaning a low threshold, more uncertain objects and 99 very few but very precise objects. + \n Default: 20 + .. note:: + If the scene contains a lot of objects, increasing the confidence can slightly speed up the process, since every object instance is tracked. + """ + return float() + + @detection_confidence_threshold.setter + def detection_confidence_threshold(self, detection_confidence_threshold: Any) -> None: + pass + + @property + def minimum_keypoints_threshold(self) -> int: + """ + Minimum threshold for the keypoints. + The ZED SDK will only output the keypoints of the skeletons with threshold greater than this value. + \n Default: 0 + .. note:: + It is useful, for example, to remove unstable fitting results when a skeleton is partially occluded. + """ + return int() + + @minimum_keypoints_threshold.setter + def minimum_keypoints_threshold(self, minimum_keypoints_threshold: Any) -> None: + pass + + def __dealloc__(self) -> None: + """ + Default constructor. + All the parameters are set to their default values. + :param detection_confidence_threshold: Chosen detection_confidence_threshold + :param minimum_keypoints_threshold: Chosen minimum_keypoints_threshold + :param skeleton_smoothing: Chosen skeleton_smoothing + """ + pass + + +class PlaneDetectionParameters: + """ + Class containing a set of parameters for the plane detection functionality. + + The default constructor sets all parameters to their default settings. + .. note:: + Parameters can be adjusted by the user. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def normal_similarity_threshold(self) -> float: + """ + Controls the spread of plane by checking the angle difference. + Default: 15 degrees + """ + return float() + + @normal_similarity_threshold.setter + def normal_similarity_threshold(self, normal_similarity_threshold: Any) -> None: + pass + + @property + def max_distance_threshold(self) -> float: + """ + Controls the spread of plane by checking the position difference. + Default: 0.15 meters + """ + return float() + + @max_distance_threshold.setter + def max_distance_threshold(self, max_distance_threshold: Any) -> None: + pass + + def __dealloc__(self) -> None: + """ + Default constructor. + Values: + - max_distance_threshold : 0.15 meters + - normal_similarity_threshold : 15.0 degrees + """ + pass + + +class RegionOfInterestParameters: + """ + Class containing a set of parameters for the plane detection functionality. + + The default constructor sets all parameters to their default settings. + .. note:: + Parameters can be adjusted by the user. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def image_height_ratio_cutoff(self) -> float: + """ + By default consider only the lower half of the image, can be useful to filter out the sky + Default: 0.5, correspond to the lower half of the image + """ + return float() + + @image_height_ratio_cutoff.setter + def image_height_ratio_cutoff(self, image_height_ratio_cutoff: Any) -> None: + pass + + @property + def auto_apply_module(self) -> set[MODULE]: + """ + Once computed the ROI computed will be automatically applied + Default: Enabled + """ + return set[MODULE]() + + @auto_apply_module.setter + def auto_apply_module(self, auto_apply_module: Any) -> None: + pass + + @property + def depth_far_threshold_meters(self) -> float: + """ + Filtering how far object in the ROI should be considered, this is useful for a vehicle for instance + Default: 2.5 meters + """ + return float() + + @depth_far_threshold_meters.setter + def depth_far_threshold_meters(self, depth_far_threshold_meters: Any) -> None: + pass + + def __dealloc__(self) -> None: + pass + + +def get_current_timestamp() -> Timestamp: + """ + Class containing a set of parameters for the plane detection functionality. + + The default constructor sets all parameters to their default settings. + .. note:: + Parameters can be adjusted by the user. + """ + return Timestamp() + +class Resolution: + """ + Structure containing the width and height of an image. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def height(self) -> int: + """ + Height of the image in pixels. + """ + return int() + + @height.setter + def height(self, height: Any) -> None: + pass + + @property + def width(self) -> int: + """ + Width of the image in pixels. + """ + return int() + + @width.setter + def width(self, width: Any) -> None: + pass + + def area(self) -> int: + """ + Area (width * height) of the image. + """ + return int() + + def __richcmp__(left, right, op) -> None: + pass + + +class Rect: + """ + Class defining a 2D rectangle with top-left corner coordinates and width/height in pixels. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def x(self) -> int: + """ + x coordinates of top-left corner. + """ + return int() + + @x.setter + def x(self, x: Any) -> None: + pass + + @property + def y(self) -> int: + """ + y coordinates of top-left corner. + """ + return int() + + @y.setter + def y(self, y: Any) -> None: + pass + + @property + def height(self) -> int: + """ + Height of the rectangle in pixels. + """ + return int() + + @height.setter + def height(self, height: Any) -> None: + pass + + @property + def width(self) -> int: + """ + Width of the rectangle in pixels. + """ + return int() + + @width.setter + def width(self, width: Any) -> None: + pass + + def area(self) -> int: + """ + Returns the area of the rectangle. + """ + return int() + + def is_empty(self) -> bool: + """ + Tests if the given sl.Rect is empty (width or/and height is null). + """ + return bool() + + def contains(self, target: Rect, proper = False) -> bool: + """ + Tests if this sl.Rect contains the **target** sl.Rect. + :return: True if this rectangle contains the rectangle, otherwise False. + .. note:: + This method only returns true if the target rectangle is entirely inside this rectangle (not on the edge). + """ + return bool() + + def is_contained(self, target: Rect, proper = False) -> bool: + """ + Tests if this sl.Rect is contained inside the given **target** sl.Rect. + :return: True if this rectangle is inside the current **target** sl.Rect, otherwise False. + .. note:: + This method only returns True if this rectangle is entirely inside the rectangle (not on the edge). + """ + return bool() + + def __richcmp__(left, right, op) -> None: + """ + Tests if this sl.Rect is contained inside the given **target** sl.Rect. + :return: True if this rectangle is inside the current **target** sl.Rect, otherwise False. + .. note:: + This method only returns True if this rectangle is entirely inside the rectangle (not on the edge). + """ + pass + + +class CameraParameters: + """ + Class containing the intrinsic parameters of a camera. + That information about the camera will be returned by sl.Camera.get_camera_information(). + .. note:: + Similar to the sl.CalibrationParameters, those parameters are taken from the settings file (SNXXX.conf) and are modified during the sl.Camera.open() call when running a self-calibration). + + .. note:: + Those parameters given after sl.Camera.open() call, represent the camera matrix corresponding to rectified or unrectified images. + + .. note:: + When filled with rectified parameters, fx, fy, cx, cy must be the same for left and right camera once sl.Camera.open() has been called. + + .. note:: + Since distortion is corrected during rectification, distortion should not be considered on rectified images. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def d_fov(self) -> float: + """ + Diagonal field of view, in degrees. + """ + return float() + + @d_fov.setter + def d_fov(self, d_fov: Any) -> None: + pass + + @property + def cy(self) -> float: + """ + Optical center along y axis, defined in pixels (usually close to height / 2). + """ + return float() + + @cy.setter + def cy(self, cy: Any) -> None: + pass + + @property + def image_size(self) -> Resolution: + """ + Size in pixels of the images given by the camera. + """ + return Resolution() + + @image_size.setter + def image_size(self, image_size: Any) -> None: + pass + + @property + def focal_length_metric(self) -> float: + """ + Real focal length in millimeters. + """ + return float() + + @focal_length_metric.setter + def focal_length_metric(self, focal_length_metric: Any) -> None: + pass + + @property + def fy(self) -> float: + """ + Focal length in pixels along y axis. + """ + return float() + + @fy.setter + def fy(self, fy: Any) -> None: + pass + + @property + def v_fov(self) -> float: + """ + Vertical field of view, in degrees. + """ + return float() + + @v_fov.setter + def v_fov(self, v_fov: Any) -> None: + pass + + @property + def fx(self) -> float: + """ + Focal length in pixels along x axis. + """ + return float() + + @fx.setter + def fx(self, fx: Any) -> None: + pass + + @property + def disto(self) -> list[float]: + """ + Distortion factor : [k1, k2, p1, p2, k3, k4, k5, k6, s1, s2, s3, s4]. + + Radial (k1, k2, k3, k4, k5, k6), Tangential (p1,p2) and Prism (s1, s2, s3, s4) distortion. + """ + return list[float]() + + @property + def h_fov(self) -> float: + """ + Horizontal field of view, in degrees. + """ + return float() + + @h_fov.setter + def h_fov(self, h_fov: Any) -> None: + pass + + @property + def cx(self) -> float: + """ + Optical center along x axis, defined in pixels (usually close to width / 2). + """ + return float() + + @cx.setter + def cx(self, cx: Any) -> None: + pass + + def set_disto(self, value1: float, value2: float, value3: float, value4: float, value5: float) -> None: + """ + Sets the elements of the disto array. + :param value1: k1 + :param value2: k2 + :param value3: p1 + :param value4: p2 + :param value5: k3 + """ + pass + + def set_up(self, fx_: float, fy_: float, cx_: float, cy_: float) -> None: + """ + Setups the parameters of a camera. + :param fx_: Horizontal focal length + :param fy_: Vertical focal length + :param cx_: Horizontal optical center + :param cx_: Vertical optical center. + """ + pass + + def scale(self, resolution: Resolution) -> CameraParameters: + """ + Return the sl.CameraParameters for another resolution. + :param resolution: Resolution in which to get the new sl.CameraParameters. + :return: The sl.CameraParameters for the resolution given as input. + """ + return CameraParameters() + + +class CalibrationParameters: + """ + Class containing intrinsic and extrinsic parameters of the camera (translation and rotation). + + That information about the camera will be returned by sl.Camera.get_camera_information(). + .. note:: + The calibration/rectification process, called during sl.Camera.open(), is using the raw parameters defined in the SNXXX.conf file, where XXX is the serial number of the camera. + + .. note:: + Those values may be adjusted or not by the self-calibration to get a proper image alignment. + + .. note:: + After sl.Camera.open() is done (with or without self-calibration activated), most of the stereo parameters (except baseline of course) should be 0 or very close to 0. + + .. note:: + It means that images after rectification process (given by sl.Camera.retrieve_image()) are aligned as if they were taken by a "perfect" stereo camera, defined by the new sl.CalibrationParameters. + + .. warning:: CalibrationParameters are returned in sl.COORDINATE_SYSTEM.IMAGE, they are not impacted by the sl.InitParameters.coordinate_system. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def left_cam(self) -> CameraParameters: + """ + Intrinsic sl.CameraParameters of the left camera. + """ + return CameraParameters() + + @left_cam.setter + def left_cam(self, left_cam: Any) -> None: + pass + + @property + def stereo_transform(self) -> Transform: + """ + Left to right camera transform, expressed in user coordinate system and unit (defined by sl.InitParameters.coordinate_system). + """ + return Transform() + + @property + def right_cam(self) -> CameraParameters: + """ + Intrinsic sl.CameraParameters of the right camera. + """ + return CameraParameters() + + @right_cam.setter + def right_cam(self, right_cam: Any) -> None: + pass + + def set(self) -> None: + pass + + def get_camera_baseline(self) -> float: + """ + Returns the baseline of the camera in the sl.UNIT defined in sl.InitParameters.coordinate_units. + """ + return float() + + +class SensorParameters: + """ + Class containing information about a single sensor available in the current device. + + Information about the camera sensors is available in the sl.CameraInformation struct returned by sl.Camera.get_camera_information(). + .. note:: + This class is meant to be used as a read-only container. + + .. note:: + Editing any of its fields will not impact the ZED SDK. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def sensor_unit(self) -> SENSORS_UNIT: + """ + Unit of the sensor. + """ + return SENSORS_UNIT() + + @property + def random_walk(self) -> float: + """ + Random walk derived from the Allan Variance given as continuous (frequency-independent). + .. note:: + The units will be expressed in ```sensor_unit / √(Hz)```. + + .. note:: + `NAN` if the information is not available. + """ + return float() + + @random_walk.setter + def random_walk(self, random_walk: Any) -> None: + pass + + @property + def noise_density(self) -> float: + """ + White noise density given as continuous (frequency-independent). + .. note:: + The units will be expressed in ```sensor_unit / √(Hz)```. + + .. note:: + `NAN` if the information is not available. + """ + return float() + + @noise_density.setter + def noise_density(self, noise_density: Any) -> None: + pass + + @property + def sensor_type(self) -> SENSOR_TYPE: + """ + Type of the sensor. + """ + return SENSOR_TYPE() + + @property + def sampling_rate(self) -> float: + """ + Sampling rate (or ODR) of the sensor. + """ + return float() + + @sampling_rate.setter + def sampling_rate(self, sampling_rate: Any) -> None: + pass + + @property + def resolution(self) -> float: + """ + Resolution of the sensor. + """ + return float() + + @resolution.setter + def resolution(self, resolution: Any) -> None: + pass + + @property + def is_available(self) -> bool: + """ + Whether the sensor is available in your camera. + """ + return bool() + + def set(self) -> None: + pass + + def sensor_range(self) -> np.array[float]: + """ + Range (NumPy array) of the sensor (minimum: `sensor_range[0]`, maximum: `sensor_range[1]`). + """ + return np.array[float]() + + def set_sensor_range(self, value1: float, value2: float) -> None: + """ + Sets the minimum and the maximum values of the sensor range. + \param float value1 : Minimum of the range to set. + \param float value2 : Maximum of the range to set. + """ + pass + + +class SensorsConfiguration: + """ + Class containing information about all the sensors available in the current device. + + Information about the camera sensors is available in the sl.CameraInformation struct returned by sl.Camera.get_camera_information(). + .. note:: + This class is meant to be used as a read-only container. + + .. note:: + Editing any of its fields will not impact the ZED SDK. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def camera_imu_transform(self) -> Transform: + """ + IMU to left camera transform matrix. + .. note:: + It contains the rotation and translation between the IMU frame and camera frame. + """ + return Transform() + + @property + def barometer_parameters(self) -> SensorParameters: + """ + Configuration of the barometer. + """ + return SensorParameters() + + @property + def magnetometer_parameters(self) -> SensorParameters: + """ + Configuration of the magnetometer. + """ + return SensorParameters() + + @property + def imu_magnetometer_transform(self) -> Transform: + """ + Magnetometer to IMU transform matrix. + .. note:: + It contains rotation and translation between IMU frame and magnetometer frame. + """ + return Transform() + + @property + def firmware_version(self) -> int: + """ + Firmware version of the sensor module. + .. note:: + 0 if no sensors are available (sl.MODEL.ZED). + """ + return int() + + @property + def gyroscope_parameters(self) -> SensorParameters: + """ + Configuration of the gyroscope. + """ + return SensorParameters() + + @property + def accelerometer_parameters(self) -> SensorParameters: + """ + Configuration of the accelerometer. + """ + return SensorParameters() + + def __set_from_camera(self, py_camera, resizer = Resolution(0, 0)) -> None: + pass + + def __set_from_cameraone(self, py_camera, resizer = Resolution(0, 0)) -> None: + pass + + def is_sensor_available(self, sensor_type) -> bool: + """ + Checks if a sensor is available on the device. + :param sensor_type: Sensor type to check. + :return: True if the sensor is available on the device, otherwise False. + """ + return bool() + + +class CameraConfiguration: + """ + Structure containing information about the camera sensor. + + Information about the camera is available in the sl.CameraInformation struct returned by sl.Camera.get_camera_information(). + .. note:: + This object is meant to be used as a read-only container, editing any of its field won't impact the SDK. + + .. warning:: sl.CalibrationParameters are returned in sl.COORDINATE_SYSTEM.IMAGE, they are not impacted by the sl.InitParameters.coordinate_system. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def calibration_parameters_raw(self) -> CalibrationParameters: + """ + Intrinsics and extrinsic stereo parameters for unrectified/distorted images. + """ + return CalibrationParameters() + + @property + def fps(self) -> float: + """ + FPS of the camera. + """ + return float() + + @property + def firmware_version(self) -> int: + """ + Internal firmware version of the camera. + """ + return int() + + @property + def calibration_parameters(self) -> CalibrationParameters: + """ + Intrinsics and extrinsic stereo parameters for rectified/undistorted images. + """ + return CalibrationParameters() + + @property + def resolution(self) -> Resolution: + """ + Resolution of the camera. + """ + return Resolution() + + +class CameraInformation: + """ + Structure containing information of a single camera (serial number, model, calibration, etc.) + That information about the camera will be returned by Camera.get_camera_information() + .. note:: + This object is meant to be used as a read-only container, editing any of its fields won't impact the SDK. + + .. warning:: CalibrationParameters are returned in COORDINATE_SYSTEM.IMAGE , they are not impacted by the InitParameters.coordinate_system + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def camera_model(self) -> MODEL: + """ + Model of the camera (see sl.MODEL). + """ + return MODEL() + + @property + def serial_number(self) -> int: + """ + Serial number of the camera. + """ + return int() + + @property + def camera_configuration(self) -> CameraConfiguration: + """ + Camera configuration parameters stored in a sl.CameraConfiguration. + """ + return CameraConfiguration() + + @property + def sensors_configuration(self) -> SensorsConfiguration: + """ + Sensors configuration parameters stored in a sl.SensorsConfiguration. + """ + return SensorsConfiguration() + + @property + def input_type(self) -> INPUT_TYPE: + """ + Input type used in the ZED SDK. + """ + return INPUT_TYPE() + + +class Mat: + """ + Class representing 1 to 4-channel matrix of float or uchar, stored on CPU and/or GPU side. + + This class is defined in a row-major order, meaning that for an image buffer, the rows are stored consecutively from top to bottom. + .. note:: + The ZED SDK Python wrapper does not support GPU data storage/access. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def verbose(self) -> bool: + """ + Whether the sl.Mat can display information. + """ + return bool() + + @verbose.setter + def verbose(self, verbose: Any) -> None: + pass + + @property + def name(self) -> str: + """ + The name of the sl.Mat (optional). + In verbose mode, it iss used to indicate which sl.Mat is printing information. + \n Default set to "n/a" to avoid empty string if not filled. + """ + return str() + + @name.setter + def name(self, name: Any) -> None: + pass + + @property + def timestamp(self) -> int: + """ + Timestamp of the last manipulation of the data of the matrix by a method/function. + """ + return int() + + @timestamp.setter + def timestamp(self, timestamp: Any) -> None: + pass + + def init_mat_type(self, width, height, mat_type, memory_type = MEM.CPU) -> None: + """ + Initilizes a new sl.Mat and allocates the requested memory by calling alloc_size(). + :param width: Width of the matrix in pixels. Default: 0 + :param height: Height of the matrix in pixels. Default: 0 + :param mat_type: Type of the matrix (sl.MAT_TYPE.F32_C1, sl.MAT_TYPE.U8_C4, etc.).\n Default: sl.MAT_TYPE.F32_C1 + :param memory_type: Where the buffer will be stored. Default: sl.MEM.CPU (you cannot change this default value) + """ + pass + + def init_mat_cpu(self, width: int, height: int, mat_type: MAT_TYPE, ptr, step, memory_type = MEM.CPU) -> None: + """ + Initilizes a new sl.Mat from an existing data pointer. + This method does not allocate the memory. + :param width: Width of the matrix in pixels. + :param height: Height of the matrix in pixels. + :param mat_type: Type of the matrix (sl.MAT_TYPE.F32_C1, sl.MAT_TYPE.U8_C4, etc.).\n Default: sl.MAT_TYPE.F32_C1 + :param ptr: Pointer to the data array. + :param step: Step of the data array (bytes size of one pixel row). + :param memory_type: Where the buffer will be stored. Default: sl.MEM.CPU (you cannot change this default value) + """ + pass + + def init_mat_resolution(self, resolution: Resolution, mat_type: MAT_TYPE, memory_type = MEM.CPU) -> None: + """ + Initilizes a new sl.Mat and allocates the requested memory by calling alloc_size(). + :param resolution: Size of the matrix in pixels. + :param mat_type: Type of the matrix (sl.MAT_TYPE.F32_C1, sl.MAT_TYPE.U8_C4, etc.).\n Default: sl.MAT_TYPE.F32_C1 + :param memory_type: Where the buffer will be stored. Default: sl.MEM.CPU (you cannot change this default value) + """ + pass + + def init_mat_resolution_cpu(self, resolution: Resolution, mat_type, ptr, step, memory_type = MEM.CPU) -> None: + """ + Initilizes a new sl.Mat from an existing data pointer. + This method does not allocate the memory. + :param resolution: the size of the matrix in pixels. + :param mat_type: Type of the matrix (sl.MAT_TYPE.F32_C1, sl.MAT_TYPE.U8_C4, etc.).\n Default: sl.MAT_TYPE.F32_C1 + :param ptr: Pointer to the data array (CPU or GPU). + :param step: Step of the data array (bytes size of one pixel row). + :param memory_type: Where the buffer will be stored. Default: sl.MEM.CPU (you cannot change this default value) + """ + pass + + def init_mat(self, matrix: Mat) -> None: + """ + Initilizes a new sl.Mat by copy (shallow copy). + This method does not allocate the memory. + :param mat: sl.Mat to copy. + """ + pass + + def alloc_size(self, width, height, mat_type, memory_type = MEM.CPU) -> None: + """ + Allocates the sl.Mat memory. + :param width: Width of the matrix in pixels. + :param height: Height of the matrix in pixels. + :param mat_type: Type of the matrix (sl.MAT_TYPE.F32_C1, sl.MAT_TYPE.U8_C4, etc.).\n Default: sl.MAT_TYPE.F32_C1 + :param memory_type: Where the buffer will be stored. Default: sl.MEM.CPU (you cannot change this default value) + + .. warning:: It erases previously allocated memory. + """ + pass + + def alloc_resolution(self, resolution: Resolution, mat_type: MAT_TYPE, memory_type = MEM.CPU) -> None: + """ + Allocates the sl.Mat memory. + :param resolution: Size of the matrix in pixels. + :param mat_type: Type of the matrix (sl.MAT_TYPE.F32_C1, sl.MAT_TYPE.U8_C4, etc.).\n Default: sl.MAT_TYPE.F32_C1 + :param memory_type: Where the buffer will be stored. Default: sl.MEM.CPU (you cannot change this default value) + + .. warning:: It erases previously allocated memory. + """ + pass + + def free(self, memory_type = MEM.CPU) -> None: + """ + Free the owned memory. + :param memory_type: Specifies which memory you wish to free. Default: sl.MEM.CPU (you cannot change this default value) + """ + pass + + def copy_to(self, dst: Mat, cpy_type = COPY_TYPE.CPU_CPU) -> ERROR_CODE: + """ + Copies data to another sl.Mat (deep copy). + + :param dst: sl.Mat where the data will be copied to. + :param cpy_type: Specifies the memory that will be used for the copy. Default: sl.COPY_TYPE.CPU_CPU (you cannot change this default value) + :return: sl.ERROR_CODE.SUCCESS if everything went well, sl.ERROR_CODE.FAILURE otherwise. + + .. note:: + If the destination is not allocated or does not have a compatible sl.MAT_TYPE or sl.Resolution, + + current memory is freed and new memory is directly allocated. + """ + return ERROR_CODE() + + def update_cpu_from_gpu(self) -> ERROR_CODE: + """ + Downloads data from DEVICE (GPU) to HOST (CPU), if possible. + .. note:: + If no CPU or GPU memory are available for this sl::Mat, some are directly allocated. + + .. note:: + If verbose is set to true, you have information in case of failure. + """ + return ERROR_CODE() + + def update_gpu_from_cpu(self) -> ERROR_CODE: + """ + Uploads data from HOST (CPU) to DEVICE (GPU), if possible. + .. note:: + If no CPU or GPU memory are available for this sl::Mat, some are directly allocated. + + .. note:: + If verbose is set to true, you have information in case of failure. + """ + return ERROR_CODE() + + def set_from(self, src: Mat, cpy_type = COPY_TYPE.CPU_CPU) -> ERROR_CODE: + """ + Copies data from an other sl.Mat (deep copy). + :param src: sl.Mat where the data will be copied from. + :param cpy_type: Specifies the memory that will be used for the copy. Default: sl.COPY_TYPE.CPU_CPU (you cannot change this default value) + :return: sl.ERROR_CODE.SUCCESS if everything went well, sl.ERROR_CODE.FAILURE otherwise. + + .. note:: + If the destination is not allocated or does not have a compatible sl.MAT_TYPE or sl.Resolution, + + current memory is freed and new memory is directly allocated. + """ + return ERROR_CODE() + + def read(self, filepath: str) -> ERROR_CODE: + """ + Reads an image from a file (only if sl.MEM.CPU is available on the current sl.Mat). + Supported input files format are PNG and JPEG. + :param filepath: Path of the file to read from (including the name and extension). + :return: sl.ERROR_CODE.SUCCESS if everything went well, sl.ERROR_CODE.FAILURE otherwise. + + .. note:: + Supported sl.MAT_TYPE are : + + - MAT_TYPE.F32_C1 for PNG/PFM/PGM + - MAT_TYPE.F32_C3 for PCD/PLY/VTK/XYZ + - MAT_TYPE.F32_C4 for PCD/PLY/VTK/WYZ + - MAT_TYPE.U8_C1 for PNG/JPG + - MAT_TYPE.U8_C3 for PNG/JPG + - MAT_TYPE.U8_C4 for PNG/JPG + """ + return ERROR_CODE() + + def write(self, filepath: str, memory_type = MEM.CPU, compression_level = -1) -> ERROR_CODE: + """ + Writes the sl.Mat (only if sl.MEM.CPU is available on the current sl.Mat) into a file as an image. + Supported output files format are PNG and JPEG. + :param filepath: Path of the file to write (including the name and extension). + :param memory_type: Memory type of the sl.Mat. Default: sl.MEM.CPU (you cannot change the default value) + :param compression_level: Level of compression between 0 (lowest compression == highest size == highest quality(jpg)) and 100 (highest compression == lowest size == lowest quality(jpg)). + .. note:: + Specific/default value for compression_level = -1 : This will set the default quality for PNG(30) or JPEG(5). + + .. note:: + compression_level is only supported for [U8_Cx] (MAT_TYPE). + + :return: sl.ERROR_CODE.SUCCESS if everything went well, sl.ERROR_CODE.FAILURE otherwise. + + .. note:: + Supported sl.MAT_TYPE are : + + - MAT_TYPE.F32_C1 for PNG/PFM/PGM + - MAT_TYPE.F32_C3 for PCD/PLY/VTK/XYZ + - MAT_TYPE.F32_C4 for PCD/PLY/VTK/WYZ + - MAT_TYPE.U8_C1 for PNG/JPG + - MAT_TYPE.U8_C3 for PNG/JPG + - MAT_TYPE.U8_C4 for PNG/JPG + """ + return ERROR_CODE() + + def set_to(self, value, memory_type = MEM.CPU) -> ERROR_CODE: + """ + Fills the sl.Mat with the given value. + This method overwrites all the matrix. + :param value: Value to be copied all over the matrix. + :param memory_type: Which buffer to fill. Default: sl.MEM.CPU (you cannot change the default value) + """ + return ERROR_CODE() + + def set_value(self, x: int, y: int, value, memory_type = MEM.CPU) -> ERROR_CODE: + """ + Sets a value to a specific point in the matrix. + :param x: Column of the point to change. + :param y: Row of the point to change. + :param value: Value to be set. + :param memory_type: Which memory will be updated. + :return: sl.ERROR_CODE.SUCCESS if everything went well, sl.ERROR_CODE.FAILURE otherwise. + + .. warning:: Not efficient for sl.MEM.GPU, use it on sparse data. + """ + return ERROR_CODE() + + def get_value(self, x: int, y: int, memory_type = MEM.CPU) -> ERROR_CODE: + """ + Returns the value of a specific point in the matrix. + :param x: Column of the point to get the value from. + :param y: Row of the point to get the value from. + :param memory_type: Which memory should be read. + :return: sl.ERROR_CODE.SUCCESS if everything went well, sl.ERROR_CODE.FAILURE otherwise. + + .. warning:: Not efficient for sl.MEM.GPU, use it on sparse data. + """ + return ERROR_CODE() + + def get_width(self) -> int: + """ + Returns the width of the matrix. + :return: Width of the matrix in pixels. + """ + return int() + + def get_height(self) -> int: + """ + Returns the height of the matrix. + :return: Height of the matrix in pixels. + """ + return int() + + def get_resolution(self) -> Resolution: + """ + Returns the resolution (width and height) of the matrix. + :return: Resolution of the matrix in pixels. + """ + return Resolution() + + def get_channels(self) -> int: + """ + Returns the number of values stored in one pixel. + :return: Number of values in a pixel. + """ + return int() + + def get_data_type(self) -> MAT_TYPE: + """ + Returns the format of the matrix. + :return: Format of the current sl.Mat. + """ + return MAT_TYPE() + + def get_memory_type(self) -> MEM: + """ + Returns the type of memory (CPU and/or GPU). + :return: Type of allocated memory. + """ + return MEM() + + def numpy(self, force = False) -> np.array: + """ + Returns the sl.Mat as a NumPy array. + This is for convenience to mimic the [PyTorch API](https://pytorch.org/docs/stable/generated/torch.Tensor.numpy.html). + \n This is like an alias of get_data() method. + :param force: Whether the memory of the sl.Mat need to be duplicated. + :return: NumPy array containing the sl.Mat data. + .. note:: + The fastest is **force at False but the sl.Mat memory must not be released to use the NumPy array. + """ + return np.array() + + def get_data(self, memory_type = MEM.CPU, deep_copy = False) -> np.array: + """ + Cast the data of the sl.Mat in a NumPy array (with or without copy). + :param memory_type: Which memory should be read. If MEM.GPU, you should have CuPy installed. Default: MEM.CPU + :param deep_copy: Whether the memory of the sl.Mat need to be duplicated. + :return: NumPy array containing the sl.Mat data. + .. note:: + The fastest is **deep_copy at False but the sl.Mat memory must not be released to use the NumPy array. + """ + return np.array() + + def get_step_bytes(self, memory_type = MEM.CPU) -> int: + """ + Returns the memory step in bytes (size of one pixel row). + :param memory_type: Specifies whether you want sl.MEM.CPU or sl.MEM.GPU step.\n Default: sl.MEM.CPU (you cannot change the default value) + :return: The step in bytes of the specified memory. + """ + return int() + + def get_step(self, memory_type = MEM.CPU) -> int: + """ + Returns the memory step in number of elements (size in one pixel row). + :param memory_type: Specifies whether you want sl.MEM.CPU or sl.MEM.GPU step.\n Default: sl.MEM.CPU (you cannot change the default value) + :return: The step in number of elements. + """ + return int() + + def get_pixel_bytes(self) -> int: + """ + Returns the size of one pixel in bytes. + :return: Size of a pixel in bytes. + """ + return int() + + def get_width_bytes(self) -> int: + """ + Returns the size of a row in bytes. + :return: Size of a row in bytes. + """ + return int() + + def get_infos(self) -> str: + """ + Returns the information about the sl.Mat into a string. + :return: String containing the sl.Mat information. + """ + return str() + + def is_init(self) -> bool: + """ + Returns whether the sl.Mat is initialized or not. + :return: True if current sl.Mat has been allocated (by the constructor or therefore). + """ + return bool() + + def is_memory_owner(self) -> bool: + """ + Returns whether the sl.Mat is the owner of the memory it accesses. + + If not, the memory won't be freed if the sl.Mat is destroyed. + :return: True if the sl.Mat is owning its memory, else False. + """ + return bool() + + def clone(self, py_mat: Mat) -> ERROR_CODE: + """ + Duplicates a sl.Mat by copy (deep copy). + :param py_mat: sl.Mat to copy. + + This method copies the data array(s) and it marks the new sl.Mat as the memory owner. + """ + return ERROR_CODE() + + def move(self, py_mat: Mat) -> ERROR_CODE: + """ + Moves the data of the sl.Mat to another sl.Mat. + + This method gives the attribute of the current s.Mat to the specified one. (No copy.) + :param py_mat: sl.Mat to move to. + .. note:: + : The current sl.Mat is then no more usable since its loose its attributes. + """ + return ERROR_CODE() + + def convert_color_inplace(self, memory_type = MEM.CPU) -> ERROR_CODE: + """ + Convert the color channels of the Mat (RGB<->BGR or RGBA<->BGRA) + This methods works only on 8U_C4 or 8U_C3 + """ + return ERROR_CODE() + + def convert_color(mat1: Mat, mat2: Mat, swap_RB_channels: bool, remove_alpha_channels: bool, memory_type = MEM.CPU) -> ERROR_CODE: + """ + Convert the color channels of the Mat into another Mat + This methods works only on 8U_C4 if remove_alpha_channels is enabled, or 8U_C4 and 8U_C3 if swap_RB_channels is enabled + The inplace method sl::Mat::convertColor can be used for only swapping the Red and Blue channel efficiently + """ + return ERROR_CODE() + + def swap(mat1: Mat, mat2: Mat) -> None: + """ + Swaps the content of the provided sl::Mat (only swaps the pointers, no data copy). + :param mat1: First matrix to swap. + :param mat2: Second matrix to swap. + """ + pass + + def get_pointer(self, memory_type = MEM.CPU) -> int: + """ + Gets the pointer of the content of the sl.Mat. + :param memory_type: Which memory you want to get. Default: sl.MEM.CPU (you cannot change the default value) + :return: Pointer of the content of the sl.Mat. + """ + return int() + + def __repr__(self) -> None: + pass + + +def blob_from_image(mat1: Mat, mat2: Mat, resolution: Resolution, scale: float, mean: tuple, stdev: tuple, keep_aspect_ratio: bool, swap_RB_channels: bool) -> ERROR_CODE: + """ + Convert an image into a GPU Tensor in planar channel configuration (NCHW), ready to use for deep learning model + :param image_in: input image to convert + :param tensor_out: output GPU tensor + :param resolution_out: resolution of the output image, generally square, although not mandatory + :param scalefactor: Scale factor applied to each pixel value, typically to convert the char value into [0-1] float + :param mean: mean, statistic to normalized the pixel values, applied AFTER the scale. For instance for imagenet statistics the mean would be sl::float3(0.485, 0.456, 0.406) + :param stddev: standard deviation, statistic to normalized the pixel values, applied AFTER the scale. For instance for imagenet statistics the standard deviation would be sl::float3(0.229, 0.224, 0.225) + :param keep_aspect_ratio: indicates if the original width and height ratio should be kept using padding (sometimes refer to as letterboxing) or if the image should be stretched + :param swap_RB_channels: indicates if the Red and Blue channels should be swapped (RGB<->BGR or RGBA<->BGRA) + :return: ERROR_CODE : The error code gives information about the success of the function + + Example usage, for a 416x416 squared RGB image (letterboxed), with a scale factor of 1/255, and using the imagenet statistics for normalization: + .. code-block:: text + + + image = sl.Mat() + blob = sl.Mat() + resolution = sl.Resolution(416,416) + scale = 1.0/255.0 # Scale factor to apply to each pixel value + keep_aspect_ratio = True # Add padding to keep the aspect ratio + swap_RB_channels = True # ZED SDK outputs BGR images, so we need to swap the R and B channels + zed.retrieve_image(image, sl.VIEW.LEFT, type=sl.MEM.GPU) # Get the ZED image (GPU only is more efficient in that case) + err = sl.blob_from_image(image, blob, resolution, scale, (0.485, 0.456, 0.406), (0.229, 0.224, 0.225), keep_aspect_ratio, swap_RB_channels) + # By default the blob is in GPU memory, you can move it to CPU memory if needed + blob.update_cpu_from_gpu() + + """ + return ERROR_CODE() + +def is_camera_one(camera_model: MODEL) -> bool: + """ + Check if the camera is a ZED One (Monocular) or ZED (Stereo) + :param camera_model: The camera model to check + """ + return bool() + +def is_resolution_available(resolution: RESOLUTION, camera_model: MODEL) -> bool: + """ + Check if a resolution is available for a given camera model + :param resolution: Resolution to check + :param camera_model: The camera model to check + """ + return bool() + +def is_FPS_available(fps, resolution: RESOLUTION, camera_model: MODEL) -> bool: + """ + Check if a frame rate is available for a given resolution and camera model + :param fps: Frame rate to check + :param resolution: Resolution to check + :param camera_model: The camera model to check + """ + return bool() + +def is_HDR_available(resolution: RESOLUTION, camera_model: MODEL) -> bool: + """ + Check if a resolution for a given camera model is available for HDR + :param resolution: Resolution to check + :param camera_model: The camera model to check + """ + return bool() + +class Rotation(Matrix3f): + """ + Class representing a rotation for the positional tracking module. + + It inherits from the generic sl.Matrix3f class. + """ + def __init__(self, *args, **kwargs) -> None: ... + + def __dealloc__(self) -> None: + pass + + def init_rotation(self, rot: Rotation) -> None: + """ + Deep copy from another sl.Rotation. + :param rot: sl.Rotation to copy. + """ + pass + + def init_matrix(self, matrix: Matrix3f) -> None: + """ + Initializes the sl.Rotation from a sl.Matrix3f. + :param matrix: sl.Matrix3f to be used. + """ + pass + + def init_orientation(self, orient: Orientation) -> None: + """ + Initializes the sl.Rotation from an sl.Orientation. + :param orient: sl.Orientation to be used. + """ + pass + + def init_angle_translation(self, angle: float, axis: Translation) -> None: + """ + Initializes the sl.Rotation from an angle and an axis. + :param angle: Rotation angle in radian. + :param axis: 3D axis to rotate around. + """ + pass + + def set_orientation(self, py_orientation: Orientation) -> None: + """ + Sets the sl.Rotation from an sl.Orientation. + :param py_orientation: sl.Orientation containing the rotation to set. + """ + pass + + def get_orientation(self) -> Orientation: + """ + Returns the sl.Orientation corresponding to the current sl.Rotation. + :return: Rotation of the current orientation. + """ + return Orientation() + + def get_rotation_vector(self) -> np.array[float]: + """ + Returns the 3x1 rotation vector obtained from 3x3 rotation matrix using Rodrigues formula. + :return: Rotation vector (NumPy array) created from the sl.Orientation values. + """ + return np.array[float]() + + def set_rotation_vector(self, input0: float, input1: float, input2: float) -> None: + """ + Sets the sl.Rotation from a rotation vector (using Rodrigues' transformation). + :param input0: ```rx``` component of the rotation vector. + :param input1: ```ry``` component of the rotation vector. + :param input2: ```rz``` component of the rotation vector. + """ + pass + + def get_euler_angles(self, radian = True) -> np.array[float]: + """ + Converts the sl.Rotation into Euler angles. + :param radian: Whether the angle will be returned in radian or degree. Default: True + :return: Euler angles (NumPy array) created from the sl.Rotation values representing the rotations around the X, Y and Z axes using YZX convention. + """ + return np.array[float]() + + def set_euler_angles(self, input0: float, input1: float, input2: float, radian = True) -> None: + """ + Sets the sl.Rotation from Euler angles. + :param input0: Roll value. + :param input1: Pitch value. + :param input2: Yaw value. + :param radian: Whether the angle is in radian or degree. Default: True + """ + pass + + +class Translation: + """ + Class representing a translation for the positional tracking module. + + sl.Translation is a vector as ```[tx, ty, tz]```. + \n You can access the data with the get() method that returns a NumPy array. + """ + def __init__(self, *args, **kwargs) -> None: ... + + def init_translation(self, tr) -> None: + """ + Deep copy from another sl.Translation. + :param tr: sl.Translation to copy. + """ + pass + + def init_vector(self, t1, t2, t3) -> None: + """ + Initializes the sl.Translation with its components. + :param t1: First component. + :param t2: Second component. + :param t3: Third component. + """ + pass + + def normalize(self) -> None: + """ + Normalizes the current sl.Translation. + """ + pass + + def normalize_translation(self, tr) -> Translation: + """ + Gets the normalized sl.Translation of a given sl.Translation. + :param tr: sl.Translation to be get the normalized translation from. + :return: Another sl.Translation object equal to [**tr.normalize()](normalize). + """ + return Translation() + + def size(self) -> int: + """ + Gets the size of the sl.Translation. + :return: Size of the sl.Translation. + """ + return int() + + def dot_translation(tr1: Translation, tr2) -> float: + """ + Computes the dot product of two sl.Translation objects. + :param tr1: First sl.Translation to get the dot product from. + :param tr2: Sencond sl.Translation to get the dot product from. + :return: Dot product of **tr1 and **tr2. + """ + return float() + + def get(self) -> np.array[float]: + """ + Gets the sl.Translation as an NumPy array. + :return: NumPy array containing the components of the sl.Translation. + """ + return np.array[float]() + + def __mul__(self, other) -> None: + """ + Gets the sl.Translation as an NumPy array. + :return: NumPy array containing the components of the sl.Translation. + """ + pass + + def __repr__(self) -> None: + """ + Gets the sl.Translation as an NumPy array. + :return: NumPy array containing the components of the sl.Translation. + """ + pass + + +class Orientation: + """ + Class representing an orientation/quaternion for the positional tracking module. + + sl.Orientation is a vector defined as ```[ox, oy, oz, ow]```. + """ + def __init__(self, *args, **kwargs) -> None: ... + + def init_orientation(self, orient) -> None: + """ + Deep copy from another sl.Orientation. + :param orient: sl.Orientation to copy. + """ + pass + + def init_vector(self, v0, v1, v2, v3) -> None: + """ + Initializes the sl.Orientation with its components. + :param v0: ox component. + :param v1: oy component. + :param v2: oz component. + :param v3: ow component. + """ + pass + + def init_rotation(self, rotation) -> None: + """ + Initializes the sl.Orientation from an sl.Rotation. + + It converts the sl.Rotation representation to the sl.Orientation one. + :param rotation: sl.Rotation to be used. + """ + pass + + def init_translation(self, tr1, tr2) -> None: + """ + Initializes the sl.Orientation from a vector represented by two sl.Translation. + :param tr1: First point of the vector. + :param tr2: Second point of the vector. + """ + pass + + def set_rotation_matrix(self, py_rotation) -> None: + """ + Sets the rotation component of the current sl.Transform from an sl.Rotation. + :param py_rotation: sl.Rotation to be used. + """ + pass + + def get_rotation_matrix(self) -> Rotation: + """ + Returns the current sl.Orientation as an sl.Rotation. + :return: The rotation computed from the orientation data. + """ + return Rotation() + + def set_identity(self) -> None: + """ + Sets the current sl.Orientation to identity. + """ + pass + + def identity(self, orient = Orientation()) -> Orientation: + """ + Creates an sl.Orientation initialized to identity. + :return: Identity sl.Orientation. + """ + return Orientation() + + def set_zeros(self) -> None: + """ + Fills the current sl.Orientation with zeros. + """ + pass + + def zeros(self, orient = Orientation()) -> Orientation: + """ + Creates an sl.Orientation filled with zeros. + :return: sl.Orientation filled with zeros. + """ + return Orientation() + + def normalize(self) -> None: + """ + Normalizes the current sl.Orientation. + """ + pass + + def normalize_orientation(orient) -> Orientation: + """ + Gets the normalized sl.Orientation of a given sl.Orientation. + :param orient: sl.Orientation to be get the normalized orientation from. + :return: Another sl.Orientation object equal to [**orient.normalize()](normalize). + """ + return Orientation() + + def size(self) -> int: + """ + Gets the size of the sl.Orientation. + :return: Size of the sl.Orientation. + """ + return int() + + def get(self) -> np.array[float]: + """ + Returns a numpy array of the Orientation . + :return: A numpy array of the Orientation . + """ + return np.array[float]() + + def __mul__(self, other) -> None: + """ + Returns a numpy array of the Orientation . + :return: A numpy array of the Orientation . + """ + pass + + def __repr__(self) -> None: + """ + Returns a numpy array of the Orientation . + :return: A numpy array of the Orientation . + """ + pass + + +class Transform(Matrix4f): + """ + Class representing a transformation (translation and rotation) for the positional tracking module. + + It can be used to create any type of Matrix4x4 or sl::Matrix4f that must be specifically used for handling a rotation and position information (OpenGL, Tracking, etc.). + \n It inherits from the generic sl::Matrix4f class. + """ + def __init__(self, *args, **kwargs) -> None: ... + + def __dealloc__(self) -> None: + pass + + def init_transform(self, motion: Transform) -> None: + """ + Deep copy from another sl.Transform. + :param motion: sl.Transform to copy. + """ + pass + + def init_matrix(self, matrix: Matrix4f) -> None: + """ + Initializes the sl.Transform from a sl.Matrix4f. + :param matrix: sl.Matrix4f to be used. + """ + pass + + def init_rotation_translation(self, rot: Rotation, tr: Translation) -> None: + """ + Initializes the sl.Transform from an sl.Rotation and a sl.Translation. + :param rot: sl.Rotation to be used. + :param tr: sl.Translation to be used. + """ + pass + + def init_orientation_translation(self, orient: Orientation, tr: Translation) -> None: + """ + Initializes the sl.Transform from an sl.Orientation and a sl.Translation. + :param orient: Orientation to be used + :param tr: Translation to be used + """ + pass + + def set_rotation_matrix(self, py_rotation: Rotation) -> None: + """ + Sets the rotation component of the current sl.Transform from an sl.Rotation. + :param py_rotation: sl.Rotation to be used. + """ + pass + + def get_rotation_matrix(self) -> Rotation: + """ + Returns the sl.Rotation corresponding to the current sl.Transform. + :return: sl.Rotation created from the sl.Transform values. + .. warning:: The given sl.Rotation contains a copy of the sl.Transform values. + """ + return Rotation() + + def set_translation(self, py_translation: Translation) -> None: + """ + Sets the translation component of the current sl.Transform from an sl.Translation. + :param py_translation: sl.Translation to be used. + """ + pass + + def get_translation(self) -> Translation: + """ + Returns the sl.Translation corresponding to the current sl.Transform. + :return: sl.Translation created from the sl.Transform values. + .. warning:: The given sl.Translation contains a copy of the sl.Transform values. + """ + return Translation() + + def set_orientation(self, py_orientation: Orientation) -> None: + """ + Sets the orientation component of the current sl.Transform from an sl.Orientation. + :param py_orientation: sl.Orientation to be used. + """ + pass + + def get_orientation(self) -> Orientation: + """ + Returns the sl.Orientation corresponding to the current sl.Transform. + :return: sl.Orientation created from the sl.Transform values. + .. warning:: The given sl.Orientation contains a copy of the sl.Transform values. + """ + return Orientation() + + def get_rotation_vector(self) -> np.array[float]: + """ + Returns the 3x1 rotation vector obtained from 3x3 rotation matrix using Rodrigues formula. + :return: Rotation vector (NumPy array) created from the sl.Transform values. + """ + return np.array[float]() + + def set_rotation_vector(self, input0: float, input1: float, input2: float) -> None: + """ + Sets the rotation component of the sl.Transform with a 3x1 rotation vector (using Rodrigues' transformation). + :param input0: ```rx``` component of the rotation vector. + :param input1: ```ry``` component of the rotation vector. + :param input2: ```rz``` component of the rotation vector. + """ + pass + + def get_euler_angles(self, radian = True) -> np.array[float]: + """ + Converts the rotation component of the sl.Transform into Euler angles. + :param radian: Whether the angle will be returned in radian or degree. Default: True + :return: Euler angles (Numpy array) created from the sl.Transform values representing the rotations around the X, Y and Z axes using YZX convention. + """ + return np.array[float]() + + def set_euler_angles(self, input0: float, input1: float, input2: float, radian = True) -> None: + """ + Sets the rotation component of the sl.Transform from Euler angles. + :param input0: Roll value. + :param input1: Pitch value. + :param input2: Yaw value. + :param radian: Whether the angle is in radian or degree. Default: True + """ + pass + + +class MESH_FILE_FORMAT(enum.Enum): + """ + Lists available mesh file formats. + + | Enumerator | | + |:---:|:---:| + | PLY | Contains only vertices and faces. | + | PLY_BIN | Contains only vertices and faces encoded in binary. | + | OBJ | Contains vertices, normals, faces, and texture information (if possible). | + """ + PLY = enum.auto() + PLY_BIN = enum.auto() + OBJ = enum.auto() + LAST = enum.auto() + +class MESH_TEXTURE_FORMAT(enum.Enum): + """ + Lists available mesh texture formats. + + | Enumerator | | + |:---:|:---:| + | RGB | The texture will be on 3 channels. | + | RGBA | The texture will be on 4 channels. | + """ + RGB = enum.auto() + RGBA = enum.auto() + LAST = enum.auto() + +class MESH_FILTER(enum.Enum): + """ + Lists available mesh filtering intensities. + + | Enumerator | | + |:---:|:---:| + | LOW | Clean the mesh by closing small holes and removing isolated faces. | + | MEDIUM | Soft faces decimation and smoothing. | + | HIGH | Drastically reduce the number of faces and apply a soft smooth. | + """ + LOW = enum.auto() + MEDIUM = enum.auto() + HIGH = enum.auto() + +class PLANE_TYPE(enum.Enum): + """ + Lists the available plane types detected based on the orientation. + + + | Enumerator | | + |:---:|:---:| + | HORIZONTAL | Horizontal plane, such as a tabletop, floor, etc. | + | VERTICAL | Vertical plane, such as a wall. | + | UNKNOWN | Unknown plane orientation. | + """ + HORIZONTAL = enum.auto() + VERTICAL = enum.auto() + UNKNOWN = enum.auto() + LAST = enum.auto() + +class MeshFilterParameters: + """ + Class containing a set of parameters for the [mesh filtration](Mesh.filter) functionality. + + The default constructor sets all parameters to their default settings. + .. note:: + Parameters can be adjusted by the user. + """ + def __init__(self, *args, **kwargs) -> None: ... + + def __dealloc__(self) -> None: + pass + + def set(self, filter = MESH_FILTER.LOW) -> None: + """ + Set the filtering intensity. + :param filter: Desired sl.MESH_FILTER. + """ + pass + + def save(self, filename: str) -> bool: + """ + Saves the current set of parameters into a file to be reloaded with the load() method. + :param filename: Name of the file which will be created to store the parameters. + :return: True if the file was successfully saved, otherwise False. + .. warning:: For security reasons, the file must not already exist. + .. warning:: In case a file already exists, the method will return False and existing file will not be updated. + """ + return bool() + + def load(self, filename: str) -> bool: + """ + Loads a set of parameters from the values contained in a previously save() "saved" file. + :param filename: Path to the file from which the parameters will be loaded. + :return: True if the file was successfully loaded, otherwise False. + """ + return bool() + + +class PointCloudChunk: + """ + Class representing a sub-point cloud containing local vertices and colors. + + .. note:: + vertices and normals have the same size. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def has_been_updated(self) -> bool: + """ + Whether the point cloud chunk has been updated by an inner process. + """ + return bool() + + @property + def timestamp(self) -> int: + """ + Timestamp of the latest update. + """ + return int() + + def vertices(self) -> np.array[float]: + """ + NumPy array of vertices. + Vertices are defined by a colored 3D point ```[x, y, z, rgba]```. + """ + return np.array[float]() + + def normals(self) -> np.array[float]: + """ + NumPy array of normals. + Normals are defined by three components ```[nx, ny, nz]```. + .. note:: + A normal is defined for each vertex. + """ + return np.array[float]() + + def barycenter(self) -> np.array[float]: + """ + 3D centroid of the chunk. + """ + return np.array[float]() + + def clear(self) -> None: + """ + Clears all data. + """ + pass + + +class Chunk: + """ + Class representing a sub-mesh containing local vertices and triangles. + + Vertices and normals have the same size and are linked by id stored in triangles. + .. note:: + uv contains data only if your mesh have textures (by loading it or after calling sl.Mesh.apply_texture()). + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def has_been_updated(self) -> bool: + """ + Whether the chunk has been updated by an inner process. + """ + return bool() + + @property + def timestamp(self) -> int: + """ + Timestamp of the latest update. + """ + return int() + + def vertices(self) -> np.array[float]: + """ + NumPy array of vertices. + Vertices are defined by a 3D point ```[x, y, z]```. + """ + return np.array[float]() + + def triangles(self) -> np.array[int]: + """ + NumPy array of triangles/faces. + Triangle defined as a set of three vertices indexes ```[v1, v2, v3]```. + """ + return np.array[int]() + + def normals(self) -> np.array[float]: + """ + NumPy array of normals. + Normals are defined by three components ```[nx, ny, nz]```. + .. note:: + A normal is defined for each vertex. + """ + return np.array[float]() + + def colors(self) -> np.array[int]: + """ + NumPy array of colors. + Colors are defined by three components ```[r, g, b]```. + .. note:: + A color is defined for each vertex. + """ + return np.array[int]() + + def uv(self) -> np.array[float]: + """ + UVs defines the 2D projection of each vertices onto the texture. + Values are normalized [0, 1] and start from the bottom left corner of the texture (as requested by OpenGL). + \n In order to display a textured mesh you need to bind the texture and then draw each triangle by picking its uv values. + .. note:: + Contains data only if your mesh has textures (by loading it or calling sl.Mesh.apply_texture()). + """ + return np.array[float]() + + def barycenter(self) -> np.array[float]: + """ + 3D centroid of the chunk. + """ + return np.array[float]() + + def clear(self) -> None: + """ + Clears all data. + """ + pass + + +class FusedPointCloud: + """ + Class representing a fused point cloud and containing the geometric and color data of the scene captured by the spatial mapping module. + + By default the fused point cloud is defined as a set of point cloud chunks. + \n This way we update only the required data, avoiding a time consuming remapping process every time a small part of the sl.FusedPointCloud cloud is changed. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def chunks(self) -> list[PointCloudChunk]: + """ + List of chunks constituting the sl.FusedPointCloud. + """ + return list[PointCloudChunk]() + + def __dealloc__(self) -> None: + pass + + def __getitem__(self, x) -> PointCloudChunk: + """ + Gets a chunk from chunks. + """ + return PointCloudChunk() + + def vertices(self) -> np.array[float]: + """ + NumPy array of vertices. + Vertices are defined by a colored 3D point ```[x, y, z, rgba]```. + """ + return np.array[float]() + + def normals(self) -> np.array[float]: + """ + NumPy array of normals. + Normals are defined by three components ```[nx, ny, nz]```. + .. note:: + A normal is defined for each vertex. + """ + return np.array[float]() + + def save(self, filename: str, typeMesh = MESH_FILE_FORMAT.OBJ, id = []) -> bool: + """ + Saves the current sl.FusedPointCloud into a file. + :param filename: Path of the file to store the fused point cloud in. + :param typeMesh: File extension type. Default: sl.MESH_FILE_FORMAT.OBJ. + :param id: Set of chunks to be saved. Default: (empty) (all chunks are saved) + :return: True if the file was successfully saved, otherwise False. + + .. note:: + This method operates on the sl.FusedPointCloud not on chunks. + + .. note:: + This way you can save different parts of your sl.FusedPointCloud by updating it with update_from_chunklist(). + """ + return bool() + + def load(self, filename: str, update_chunk_only = False) -> bool: + """ + Loads the fused point cloud from a file. + :param filename: Path of the file to load the fused point cloud from. + :param update_chunk_only: Whether to only load data in chunks (and not vertices / normals).\n Default: False. + :return: True if the mesh was successfully loaded, otherwise False. + + .. note:: + Updating a sl.FusedPointCloud is time consuming. Consider using only chunks for better performances. + """ + return bool() + + def clear(self) -> None: + """ + Clears all the data. + """ + pass + + def update_from_chunklist(self, id = []) -> None: + """ + Updates vertices and normals from chunk data pointed by the given list of id. + :param id: Indices of chunks which will be concatenated. Default: (empty). + .. note:: + If the given list of id is empty, all chunks will be used to update the current sl.FusedPointCloud. + """ + pass + + def get_number_of_points(self) -> int: + """ + Computes the total number of points stored in all chunks. + :return: The number of points stored in all chunks. + """ + return int() + + +class Mesh: + """ + Class representing a mesh and containing the geometric (and optionally texture) data of the scene captured by the spatial mapping module. + + By default the mesh is defined as a set of chunks. + \n This way we update only the data that has to be updated avoiding a time consuming remapping process every time a small part of the sl.Mesh is updated. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def chunks(self) -> list[Chunk]: + """ + List of chunks constituting the sl.Mesh. + """ + return list[Chunk]() + + @property + def texture(self) -> Mat: + """ + Texture of the sl.Mesh. + .. note:: + Contains data only if your mesh has textures (by loading it or calling sl.Mesh.apply_texture()). + """ + return Mat() + + def __dealloc__(self) -> None: + pass + + def __getitem__(self, x) -> Chunk: + """ + Gets a chunk from chunks. + """ + return Chunk() + + def filter(self, params = MeshFilterParameters(), update_chunk_only = False) -> bool: + """ + Filters the mesh. + The resulting mesh is smoothed, small holes are filled, and small blobs of non-connected triangles are deleted. + :param params: Filtering parameters. Default: a preset of sl.MeshFilterParameters. + :param update_chunk_only: Whether to only update chunks (and not vertices / normals / triangles).\n Default: False. + :return: True if the mesh was successfully filtered, otherwise False. + + .. note:: + The filtering is a costly operation. + + .. note:: + It is not recommended to call it every time you retrieve a mesh but only at the end of your spatial mapping process. + """ + return bool() + + def apply_texture(self, texture_format = MESH_TEXTURE_FORMAT.RGB) -> bool: + """ + Applies a texture to the mesh. + By using this method you will get access to uv, and texture. + \n The number of triangles in the mesh may slightly differ before and after calling this method due to missing texture information. + \n There is only one texture for the mesh, the uv of each chunk are expressed for it in its entirety. + \n NumPy arrays of vertices / normals and uv have now the same size. + :param texture_format: Number of channels desired for the computed texture.\n Default: sl.MESH_TEXTURE_FORMAT.RGB. + :return: True if the mesh was successfully textured, otherwise False. + + .. note:: + This method can be called as long as you do not start a new spatial mapping process (due to shared memory). + + .. note:: + This method can require a lot of computation time depending on the number of triangles in the mesh. + + .. note:: + It is recommended to call it once at the end of your spatial mapping process. + + + .. warning:: The sl.SpatialMappingParameters.save_texture parameter must be set to True when enabling the spatial mapping to be able to apply the textures. + .. warning:: The mesh should be filtered before calling this method since filter() will erase the textures. + .. warning:: The texturing is also significantly slower on non-filtered meshes. + """ + return bool() + + def save(self, filename: str, typeMesh = MESH_FILE_FORMAT.OBJ, id = []) -> bool: + """ + Saves the current sl.Mesh into a file. + :param filename: Path of the file to store the mesh in. + :param typeMesh: File extension type. Default: sl.MESH_FILE_FORMAT.OBJ. + :param id: Set of chunks to be saved. Default: (empty) (all chunks are saved) + :return: True if the file was successfully saved, otherwise False. + + .. note:: + Only sl.MESH_FILE_FORMAT.OBJ supports textures data. + + .. note:: + This method operates on the sl.Mesh not on chunks. + + .. note:: + This way you can save different parts of your sl.Mesh by updating it with update_mesh_from_chunklist(). + """ + return bool() + + def load(self, filename: str, update_mesh = False) -> bool: + """ + Loads the mesh from a file. + :param filename: Path of the file to load the mesh from. + :param update_mesh: Whether to only load data in chunks (and not vertices / normals / triangles).\n Default: False. + :return: True if the mesh was successfully loaded, otherwise False. + + .. note:: + Updating a sl::Mesh is time consuming. Consider using only chunks for better performances. + """ + return bool() + + def clear(self) -> None: + """ + Clears all the data. + """ + pass + + def vertices(self) -> np.array[float]: + """ + NumPy array of vertices. + Vertices are defined by a 3D point ```[x, y, z]```. + """ + return np.array[float]() + + def triangles(self) -> np.array[int]: + """ + NumPy array of triangles/faces. + Triangle defined as a set of three vertices indexes ```[v1, v2, v3]```. + """ + return np.array[int]() + + def normals(self) -> np.array[float]: + """ + NumPy array of normals. + Normals are defined by three components ```[nx, ny, nz]```. + .. note:: + A normal is defined for each vertex. + """ + return np.array[float]() + + def colors(self) -> np.array[int]: + """ + NumPy array of colors. + Colors are defined by three components ```[r, g, b]```. + .. note:: + A color is defined for each vertex. + """ + return np.array[int]() + + def uv(self) -> np.array[float]: + """ + UVs defines the 2D projection of each vertices onto the texture. + Values are normalized [0, 1] and start from the bottom left corner of the texture (as requested by OpenGL). + In order to display a textured mesh you need to bind the texture and then draw each triangle by picking its uv values. + .. note:: + Contains data only if your mesh has textures (by loading it or calling sl.Mesh.apply_texture()). + """ + return np.array[float]() + + def get_number_of_triangles(self) -> int: + """ + Computes the total number of triangles stored in all chunks. + :return: The number of triangles stored in all chunks. + """ + return int() + + def get_boundaries(self) -> np.array[int]: + """ + Compute the indices of boundary vertices. + :return: The indices of boundary vertices. + """ + return np.array[int]() + + def merge_chunks(self, faces_per_chunk: int) -> None: + """ + Merges current chunks. + This method can be used to merge chunks into bigger sets to improve rendering process. + :param faces_per_chunk: Number of faces per chunk. + + .. note:: + This method is useful for Unity, which does not handle chunks with more than 65K vertices. + + .. warning:: This method should not be called during spatial mapping process since mesh updates will revert this changes. + """ + pass + + def get_gravity_estimate(self) -> np.array[float]: + """ + Estimates the gravity vector. + This method looks for a dominant plane in the whole mesh considering that it is the floor (or a horizontal plane). + :return: The estimated gravity vector (NumPy array). + + .. note:: + This can be used to find the gravity to create realistic physical interactions. + """ + return np.array[float]() + + def get_visible_list(self, camera_pose: Transform) -> list[int]: + """ + Computes the id list of visible chunks from a specific point of view. + :param camera_pose: Point of view (given in the same reference as the vertices). + :return: The list of id of visible chunks. + """ + return list[int]() + + def get_surrounding_list(self, camera_pose: Transform, radius: float) -> list[int]: + """ + Computes the id list of chunks close to a specific point of view. + :param camera_pose: Point of view (given in the same reference as the vertices). + :param radius: Radius determining closeness (given in the same unit as the mesh). + :return: The list of id of chunks close to the given point. + """ + return list[int]() + + def update_mesh_from_chunklist(self, id = []) -> None: + """ + Updates vertices / normals / triangles / uv from chunk data pointed by the given list of id. + :param id: Indices of chunks which will be concatenated. Default: (empty). + .. note:: + If the given list of id is empty, all chunks will be used to update the current sl.Mesh. + """ + pass + + +class Plane: + """ + Class representing a plane defined by a point and a normal, or a plane equation. + + Other elements can be extracted such as the mesh, the 3D bounds, etc. + .. note:: + The plane measurements are expressed in reference defined by sl.RuntimeParameters.measure3D_reference_frame. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def type(self) -> PLANE_TYPE: + """ + Type of the plane defined by its orientation. + .. note:: + It is deduced from the gravity vector and is therefore not available with on sl.MODEL.ZED. + + .. note:: + sl.MODEL.ZED will give sl.PLANE_TYPE.UNKNOWN for every planes. + """ + return PLANE_TYPE() + + @type.setter + def type(self, type: Any) -> None: + pass + + def get_normal(self) -> np.array[float]: + """ + Gets the plane normal vector. + :return: sl.Plane normalized normal vector (NumPy array). + """ + return np.array[float]() + + def get_center(self) -> np.array[float]: + """ + Gets the plane center point + :return: sl.Plane center point + """ + return np.array[float]() + + def get_pose(self, py_pose = Transform()) -> Transform: + """ + Gets the plane pose relative to the global reference frame. + :param py_pose: sl.Transform to fill (or it creates one by default). + :return: Transformation matrix (rotation and translation) of the plane pose. + .. note:: + Can be used to transform the global reference frame center ```(0, 0, 0)``` to the plane center. + """ + return Transform() + + def get_extents(self) -> np.array[float]: + """ + Gets the width and height of the bounding rectangle around the plane contours. + :return: Width and height of the bounding plane contours (NumPy array). + .. warning:: This value is expressed in the plane reference frame. + """ + return np.array[float]() + + def get_plane_equation(self) -> np.array[float]: + """ + Gets the plane equation. + :return: Plane equation coefficients ```[a, b, c, d]``` (NumPy array). + .. note:: + The plane equation has the following form: ```ax + by + cz = d```. + """ + return np.array[float]() + + def get_bounds(self) -> np.array[float][float]: + """ + Gets the polygon bounds of the plane. + :return: Vector of 3D points forming a polygon bounds corresponding to the current visible limits of the plane (NumPy array). + """ + return np.array[float][float]() + + def extract_mesh(self) -> Mesh: + """ + Compute and return the mesh of the bounds polygon. + :return: sl::Mesh representing the plane delimited by the visible bounds. + """ + return Mesh() + + def get_closest_distance(self, point = [0, 0, 0]) -> float: + """ + Gets the distance between the input point and the projected point alongside the normal vector onto the plane (the closest point on the plane). + :param point: Point to project into the plane. + :return: The Euclidean distance between the input point and the projected point. + """ + return float() + + def clear(self) -> None: + """ + Clears all the data. + """ + pass + + +class MAPPING_RESOLUTION(enum.Enum): + """ + Lists the spatial mapping resolution presets. + + | Enumerator | | + |:---:|:---:| + | HIGH | Creates a detailed geometry. Requires lots of memory. | + | MEDIUM | Small variations in the geometry will disappear. Useful for big objects. | + | LOW | Keeps only huge variations of the geometry. Useful for outdoor purposes. | + """ + HIGH = enum.auto() + MEDIUM = enum.auto() + LOW = enum.auto() + +class MAPPING_RANGE(enum.Enum): + """ + Lists the spatial mapping depth range presets. + + | Enumerator | | + |:---:|:---:| + | SHORT | Only depth close to the camera will be used during spatial mapping. | + | MEDIUM | Medium depth range. | + | LONG | Takes into account objects that are far. Useful for outdoor purposes. | + | AUTO | Depth range will be computed based on current sl.Camera state and parameters. | + """ + SHORT = enum.auto() + MEDIUM = enum.auto() + LONG = enum.auto() + AUTO = enum.auto() + +class SPATIAL_MAP_TYPE(enum.Enum): + """ + Lists the types of spatial maps that can be created. + + | Enumerator | | + |:---:|:---:| + | MESH | The geometry is represented by a set of vertices connected by edges and forming faces. No color information is available. | + | FUSED_POINT_CLOUD | The geometry is represented by a set of 3D colored points. | + """ + MESH = enum.auto() + FUSED_POINT_CLOUD = enum.auto() + +class BUS_TYPE(enum.Enum): + """ + Lists available LIVE input type in the ZED SDK. + + | Enumerator | | + |:---:|:---:| + | USB | USB input mode | + | GMSL | GMSL input mode Note: Only on NVIDIA Jetson. | + | AUTO | Automatically select the input type. Trying first for available USB cameras, then GMSL. | + """ + USB = enum.auto() + GMSL = enum.auto() + AUTO = enum.auto() + LAST = enum.auto() + +def generate_virtual_stereo_serial_number(serial_left, serial_right) -> "unsigned int": + """ + Generate a unique identifier for virtual stereo based on the serial numbers of the two ZED Ones + :param serial_l: Serial number of the left camera. + :param serial_r: Serial number of the right camera. + :return: A unique hash for the given pair of serial numbers, or 0 if an error occurred (e.g: same serial number). + """ + return "unsigned int"() + +class InputType: + """ + Class defining the input type used in the ZED SDK. + It can be used to select a specific camera with an id or serial number, or from a SVO file. + """ + def __init__(self, *args, **kwargs) -> None: ... + + def __dealloc__(self) -> None: + pass + + def set_from_camera_id(self, cam_id, bus_type : BUS_TYPE = BUS_TYPE.AUTO) -> None: + """ + Set the input as the camera with specified id. + + .. note:: + The id is not related to the serial number of the camera. The id is assigned by the OS depending on the order the cameras are plugged. + + .. warning:: Using id is not recommended if you have multiple cameras plugged in the system, prefer using the serial number instead. + + :param id: Id of the camera to open. The default, -1, will open the first available camera. A number >= 0 will try to open the camera with the corresponding id. + :param bus_type: Whether the camera is a USB or a GMSL camera. + """ + pass + + def set_from_serial_number(self, serial_number) -> None: + """ + Set the input as the camera with specified serial number. + :param camera_serial_number: Serial number of the camera to open + """ + pass + + def set_virtual_stereo_from_camera_id(self, id_left, id_right, virtual_serial_number) -> bool: + """ + Set the input as a virtual stereo camera from two cameras with specified ids. + :param id_left: Id of the left camera. + :param id_right: Id of the right camera. + :param virtual_serial_number: Serial number of the virtual stereo camera. + .. note:: + : The virtual serial number must fall within an interval that reflects the Product ID range. + + This is necessary to avoid, for instance, downloading calibration data from an unrelated product. + The valid range is 110000000 to 119999999. + A support function can be used, based on the ZED One serial number, to compute a valid virtual serial number: generate_virtual_stereo_serial_number + :return: False if there's no error and the camera was successfully created, otherwise True. + """ + return bool() + + def set_virtual_stereo_from_serial_numbers(self, camera_left_serial_number, camera_right_serial_number, virtual_serial_number) -> bool: + """ + Set the input as a virtual stereo camera from two cameras with specified serial numbers. + :param camera_left_serial_number: Serial number of the left camera. + :param camera_right_serial_number: Serial number of the right camera. + :param virtual_serial_number: Serial number of the virtual stereo camera. + .. note:: + : The virtual serial number must fall within an interval that reflects the Product ID range. + + This is necessary to avoid, for instance, downloading calibration data from an unrelated product. + The valid range is 110000000 to 119999999. + A support function can be used, based on the ZED One serial number, to compute a valid virtual serial number: generate_virtual_stereo_serial_number + :return: False if there's no error and the camera was successfully created, otherwise True. + """ + return bool() + + def set_from_svo_file(self, svo_input_filename) -> None: + """ + Set the input as the svo specified with the filename + :param svo_input_filename: The path to the desired SVO file + """ + pass + + def set_from_stream(self, sender_ip, port = 30000) -> None: + """ + Set the input to stream with the specified ip and port + :param sender_ip: The IP address of the streaming sender + :param port: The port on which to listen. Default: 30000 + .. note:: + The protocol used for the streaming module is based on RTP/RTCP. + + .. warning:: Port must be even number, since the port+1 is used for control data. + """ + pass + + def get_type(self) -> INPUT_TYPE: + """ + Returns the current input type. + """ + return INPUT_TYPE() + + def get_configuration(self) -> str: + """ + Returns the current input configuration as a string e.g: SVO name, serial number, streaming ip, etc. + """ + return str() + + def is_init(self) -> bool: + """ + Check whether the input is set. + """ + return bool() + + +class InitParameters: + """ + Class containing the options used to initialize the sl.Camera object. + + This class allows you to select multiple parameters for the sl.Camera such as the selected camera, resolution, depth mode, coordinate system, and units of measurement. + \n Once filled with the desired options, it should be passed to the sl.Camera.open() method. + + .. code-block:: text + + + import pyzed.sl as sl + + def main() : + zed = sl.Camera() # Create a ZED camera object + + init_params = sl.InitParameters() # Set initial parameters + init_params.sdk_verbose = 0 # Disable verbose mode + + # Use the camera in LIVE mode + init_params.camera_resolution = sl.RESOLUTION.HD1080 # Use HD1080 video mode + init_params.camera_fps = 30 # Set fps at 30 + + # Or use the camera in SVO (offline) mode + #init_params.set_from_svo_file("xxxx.svo") + + # Or use the camera in STREAM mode + #init_params.set_from_stream("192.168.1.12", 30000) + + # Other parameters are left to their default values + + # Open the camera + err = zed.open(init_params) + if err != sl.ERROR_CODE.SUCCESS: + exit(-1) + + # Close the camera + zed.close() + return 0 + + if __name__ == "__main__" : + main() + + + + With its default values, it opens the camera in live mode at sl.RESOLUTION.HD720 + (or sl.RESOLUTION.HD1200 for the ZED X/X Mini) and sets the depth mode to sl.DEPTH_MODE.NEURAL + \n You can customize it to fit your application. + .. note:: + The parameters can also be saved and reloaded using its save() and load() methods. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def enable_image_enhancement(self) -> bool: + """ + Enable the Enhanced Contrast Technology, to improve image quality. + + Default: True. + + \n If set to true, image enhancement will be activated in camera ISP. Otherwise, the image will not be enhanced by the IPS. + .. note:: + This only works for firmware version starting from 1523 and up. + """ + return bool() + + @enable_image_enhancement.setter + def enable_image_enhancement(self, enable_image_enhancement: Any) -> None: + pass + + @property + def camera_image_flip(self) -> FLIP_MODE: + """ + Defines if a flip of the images is needed. + + If you are using the camera upside down, setting this parameter to sl.FLIP_MODE.ON will cancel its rotation. + \n The images will be horizontally flipped. + \n Default: sl.FLIP_MODE.AUTO + .. note:: + From ZED SDK 3.2 a new sl.FLIP_MODE enum was introduced to add the automatic flip mode detection based on the IMU gravity detection. + + .. note:: + This does not work on sl.MODEL.ZED cameras since they do not have the necessary sensors. + """ + return FLIP_MODE() + + @camera_image_flip.setter + def camera_image_flip(self, camera_image_flip: Any) -> None: + pass + + @property + def maximum_working_resolution(self) -> Resolution: + """ + Set a maximum size for all SDK output, like retrieveImage and retrieveMeasure functions. + + This will override the default (0,0) and instead of outputting native image size sl::Mat, the ZED SDK will take this size as default. + A custom lower size can also be used at runtime, but not bigger. This is used for internal optimization of compute and memory allocations + + The default is similar to previous version with (0,0), meaning native image size + + .. note:: + : if maximum_working_resolution field are lower than 64, it will be interpreted as dividing scale factor; + + - maximum_working_resolution = sl::Resolution(1280, 16) -> 1280 x (image_height/2) = 1280 x half height + - maximum_working_resolution = sl::Resolution(4, 4) -> (image_width/4) x (image_height/4) = quarter size + """ + return Resolution() + + @maximum_working_resolution.setter + def maximum_working_resolution(self, maximum_working_resolution: Any) -> None: + pass + + @property + def sdk_gpu_id(self) -> int: + """ + NVIDIA graphics card id to use. + + By default the SDK will use the most powerful NVIDIA graphics card found. + \n However, when running several applications, or using several cameras at the same time, splitting the load over available GPUs can be useful. + \n This parameter allows you to select the GPU used by the sl.Camera using an ID from 0 to n-1 GPUs in your PC. + \n Default: -1 + .. note:: + A non-positive value will search for all CUDA capable devices and select the most powerful. + """ + return int() + + @sdk_gpu_id.setter + def sdk_gpu_id(self, sdk_gpu_id: Any) -> None: + pass + + @property + def optional_settings_path(self) -> str: + """ + Optional path where the ZED SDK has to search for the settings file (SN.conf file). + + This file contains the calibration information of the camera. + \n Default: "" + + .. note:: + The settings file will be searched in the default directory: + + * **Linux**: /usr/local/zed/settings/ + * **Windows**: C:/ProgramData/stereolabs/settings + + .. note:: + If a path is specified and no file has been found, the ZED SDK will search the settings file in the default directory. + + .. note:: + An automatic download of the settings file (through **ZED Explorer** or the installer) will still download the files on the default path. + + + .. code-block:: text + + init_params = sl.InitParameters() # Set initial parameters + home = "/path/to/home" + path = home + "/Documents/settings/" # assuming /path/to/home/Documents/settings/SNXXXX.conf exists. Otherwise, it will be searched in /usr/local/zed/settings/ + init_params.optional_settings_path = path + """ + return str() + + @optional_settings_path.setter + def optional_settings_path(self, optional_settings_path: Any) -> None: + pass + + @property + def coordinate_system(self) -> COORDINATE_SYSTEM: + """ + sl.COORDINATE_SYSTEM to be used as reference for positional tracking, mesh, point clouds, etc. + + This parameter allows you to select the sl.COORDINATE_SYSTEM used by the sl.Camera object to return its measures. + \n This defines the order and the direction of the axis of the coordinate system. + \n Default: sl.COORDINATE_SYSTEM.IMAGE + """ + return COORDINATE_SYSTEM() + + @coordinate_system.setter + def coordinate_system(self, coordinate_system: Any) -> None: + pass + + @property + def grab_compute_capping_fps(self) -> float: + """ + Define a computation upper limit to the grab frequency. + + This can be useful to get a known constant fixed rate or limit the computation load while keeping a short exposure time by setting a high camera capture framerate. + \n The value should be inferior to the sl.InitParameters.camera_fps and strictly positive. + .. note:: + It has no effect when reading an SVO file. + + + This is an upper limit and won't make a difference if the computation is slower than the desired compute capping FPS. + .. note:: + Internally the sl.Camera.grab() method always tries to get the latest available image while respecting the desired FPS as much as possible. + """ + return float() + + @grab_compute_capping_fps.setter + def grab_compute_capping_fps(self, grab_compute_capping_fps: Any) -> None: + pass + + @property + def async_grab_camera_recovery(self) -> bool: + """ + Define the behavior of the automatic camera recovery during sl.Camera.grab() method call. + + When async is enabled and there's an issue with the communication with the sl.Camera object, + sl.Camera.grab() will exit after a short period and return the sl.ERROR_CODE.CAMERA_REBOOTING warning. + \n The recovery will run in the background until the correct communication is restored. + \n When async_grab_camera_recovery is false, the sl.Camera.grab() method is blocking and will return + only once the camera communication is restored or the timeout is reached. + \n Default: False + """ + return bool() + + @async_grab_camera_recovery.setter + def async_grab_camera_recovery(self, async_grab_camera_recovery: Any) -> None: + pass + + @property + def enable_right_side_measure(self) -> bool: + """ + Enable the measurement computation on the right images. + + By default, the ZED SDK only computes a single depth map, aligned with the left camera image. + \n This parameter allows you to enable sl.MEASURE.DEPTH_RIGHT and other sl.MEASURE.XXX_RIGHT at the cost of additional computation time. + \n For example, mixed reality pass-through applications require one depth map per eye, so this parameter can be activated. + \n Default: False + """ + return bool() + + @enable_right_side_measure.setter + def enable_right_side_measure(self, enable_right_side_measure: Any) -> None: + pass + + @property + def svo_real_time_mode(self) -> bool: + """ + Defines if sl.Camera object return the frame in real time mode. + + When playing back an SVO file, each call to sl.Camera.grab() will extract a new frame and use it. + \n However, it ignores the real capture rate of the images saved in the SVO file. + \n Enabling this parameter will bring the SDK closer to a real simulation when playing back a file by using the images' timestamps. + \n Default: False + .. note:: + sl.Camera.grab() will return an error when trying to play too fast, and frames will be dropped when playing too slowly. + """ + return bool() + + @svo_real_time_mode.setter + def svo_real_time_mode(self, svo_real_time_mode: Any) -> None: + pass + + @property + def sdk_verbose_log_file(self) -> str: + """ + File path to store the ZED SDK logs (if sdk_verbose is enabled). + + The file will be created if it does not exist. + \n Default: "" + + .. note:: + Setting this parameter to any value will redirect all standard output print calls of the entire program. + + .. note:: + This means that your own standard output print calls will be redirected to the log file. + + .. warning:: The log file won't be cleared after successive executions of the application. + .. warning:: This means that it can grow indefinitely if not cleared. + """ + return str() + + @sdk_verbose_log_file.setter + def sdk_verbose_log_file(self, sdk_verbose_log_file: Any) -> None: + pass + + @property + def depth_minimum_distance(self) -> float: + """ + Minimum depth distance to be returned, measured in the sl.UNIT defined in coordinate_units. + + This parameter allows you to specify the minimum depth value (from the camera) that will be computed. + \n Setting this value to any negative or null value will select the default minimum depth distance available for the used ZED Camera (depending on the camera focal length and baseline). + \n Default: -1 + + \n When using deprecated depth modes ( sl.DEPTH_MODE.PERFORMANCE, sl.DEPTH_MODE.QUALITY or sl.DEPTH_MODE.ULTRA), + the default minimum depth distances are given by `this table `_. + + .. note:: + This value cannot be greater than 3 meters. + """ + return float() + + @depth_minimum_distance.setter + def depth_minimum_distance(self, depth_minimum_distance: Any) -> None: + pass + + @property + def coordinate_units(self) -> UNIT: + """ + Unit of spatial data (depth, point cloud, tracking, mesh, etc.) for retrieval. + + Default: sl.UNIT.MILLIMETER + """ + return UNIT() + + @coordinate_units.setter + def coordinate_units(self, coordinate_units: Any) -> None: + pass + + @property + def open_timeout_sec(self) -> float: + """ + Define a timeout in seconds after which an error is reported if the sl.Camera.open() method fails. + + Set to '-1' to try to open the camera endlessly without returning error in case of failure. + \n Set to '0' to return error in case of failure at the first attempt. + \n Default: 5.0 + .. note:: + This parameter only impacts the LIVE mode. + """ + return float() + + @open_timeout_sec.setter + def open_timeout_sec(self, open_timeout_sec: Any) -> None: + pass + + @property + def depth_stabilization(self) -> int: + return int() + + @depth_stabilization.setter + def depth_stabilization(self, depth_stabilization: Any) -> None: + pass + + @property + def depth_mode(self) -> DEPTH_MODE: + """ + sl.DEPTH_MODE to be used. + + The ZED SDK offers several sl.DEPTH_MODE, offering various levels of performance and accuracy. + \n This parameter allows you to set the sl.DEPTH_MODE that best matches your needs. + \n Default: sl.DEPTH_MODE.NEURAL + .. note:: + Available depth mode are listed here: sl.DEPTH_MODE. + """ + return DEPTH_MODE() + + @depth_mode.setter + def depth_mode(self, depth_mode: Any) -> None: + pass + + @property + def depth_maximum_distance(self) -> float: + """ + Maximum depth distance to be returned, measured in the sl.UNIT defined in coordinate_units. + + When estimating the depth, the ZED SDK uses this upper limit to turn higher values into **inf** ones. + \n Changing this value has no impact on performance and doesn't affect the positional tracking nor the spatial mapping. + \n It only change values the depth, point cloud and normals. + \n Setting this value to any negative or null value will select the default maximum depth distance available. + + \n Default: -1 + """ + return float() + + @depth_maximum_distance.setter + def depth_maximum_distance(self, depth_maximum_distance: Any) -> None: + pass + + @property + def enable_image_validity_check(self) -> int: + """ + Enable or disable the image validity verification. + This will perform additional verification on the image to identify corrupted data. This verification is done in the sl.Camera.grab() method and requires some computations. + \n If an issue is found, the sl.Camera.grab() method will output a warning as sl.ERROR_CODE.CORRUPTED_FRAME. + \n This version doesn't detect frame tearing currently. + \n Default: False (disabled) + """ + return int() + + @enable_image_validity_check.setter + def enable_image_validity_check(self, enable_image_validity_check: Any) -> None: + pass + + @property + def async_image_retrieval(self) -> bool: + """ + Enable async image retrieval. + + If set to true will camera image retrieve at a framerate different from grab() application framerate. This is useful for recording SVO or sending camera stream at different rate than application. + \n Default: false + """ + return bool() + + @async_image_retrieval.setter + def async_image_retrieval(self, async_image_retrieval: Any) -> None: + pass + + @property + def sensors_required(self) -> bool: + """ + Requires the successful opening of the motion sensors before opening the camera. + + Default: False. + + .. note:: + If set to false, the ZED SDK will try to **open and use** the IMU (second USB device on USB2.0) and will open the camera successfully even if the sensors failed to open. + + + This can be used for example when using a USB3.0 only extension cable (some fiber extension for example). + .. note:: + This parameter only impacts the LIVE mode. + + .. note:: + If set to true, sl.Camera.open() will fail if the sensors cannot be opened. + + .. note:: + This parameter should be used when the IMU data must be available, such as object detection module or when the gravity is needed. + + + \nNote: This setting is not taken into account for sl.MODEL.ZED camera since it does not include sensors. + """ + return bool() + + @sensors_required.setter + def sensors_required(self, sensors_required: Any) -> None: + pass + + @property + def camera_fps(self) -> int: + """ + Requested camera frame rate. + + If set to 0, the highest FPS of the specified camera_resolution will be used. + \n Default: 0 + \n\n See sl.RESOLUTION for a list of supported frame rates. + .. note:: + If the requested camera_fps is unsupported, the closest available FPS will be used. + """ + return int() + + @camera_fps.setter + def camera_fps(self, camera_fps: Any) -> None: + pass + + @property + def optional_opencv_calibration_file(self) -> str: + """ + Optional path where the ZED SDK can find a file containing the calibration information of the camera computed by OpenCV. + + .. note:: + Using this will disable the factory calibration of the camera. + + .. note:: + The file must be in a XML/YAML/JSON formatting provided by OpenCV. + + .. note:: + It also must contain the following keys: Size, K_LEFT (intrinsic left), K_RIGHT (intrinsic right), + + D_LEFT (distortion left), D_RIGHT (distortion right), R (extrinsic rotation), T (extrinsic translation). + .. warning:: Erroneous calibration values can lead to poor accuracy in all ZED SDK modules. + """ + return str() + + @optional_opencv_calibration_file.setter + def optional_opencv_calibration_file(self, optional_opencv_calibration_file: Any) -> None: + pass + + @property + def camera_resolution(self) -> RESOLUTION: + """ + Desired camera resolution. + .. note:: + Small resolutions offer higher framerate and lower computation time. + + .. note:: + In most situations, sl.RESOLUTION.HD720 at 60 FPS is the best balance between image quality and framerate. + + + Default: + * ZED X/X Mini: sl.RESOLUTION.HD1200 + * other cameras: sl.RESOLUTION.HD720 + .. note:: + Available resolutions are listed here: sl.RESOLUTION. + """ + return RESOLUTION() + + @camera_resolution.setter + def camera_resolution(self, camera_resolution: Any) -> None: + pass + + @property + def camera_disable_self_calib(self) -> bool: + """ + Disables the self-calibration process at camera opening. + + At initialization, sl.Camera runs a self-calibration process that corrects small offsets from the device's factory calibration. + \n A drawback is that calibration parameters will slightly change from one (live) run to another, which can be an issue for repeatability. + \n If set to true, self-calibration will be disabled and calibration parameters won't be optimized, raw calibration parameters from the configuration file will be used. + \n Default: false + .. note:: + In most situations, self calibration should remain enabled. + + .. note:: + You can also trigger the self-calibration at anytime after sl.Camera.open() by calling sl.Camera.update_self_calibration(), even if this parameter is set to true. + """ + return bool() + + @camera_disable_self_calib.setter + def camera_disable_self_calib(self, camera_disable_self_calib: Any) -> None: + pass + + @property + def sdk_verbose(self) -> int: + """ + Enable the ZED SDK verbose mode. + + This parameter allows you to enable the verbosity of the ZED SDK to get a variety of runtime information in the console. + \n When developing an application, enabling verbose (``sdk_verbose >= 1``) mode can help you understand the current ZED SDK behavior. + \n However, this might not be desirable in a shipped version. + \n Default: 1 (verbose messages enabled) + .. note:: + The verbose messages can also be exported into a log file. + + .. note:: + See sdk_verbose_log_file for more. + """ + return int() + + @sdk_verbose.setter + def sdk_verbose(self, sdk_verbose: Any) -> None: + pass + + def __dealloc__(self) -> None: + """ + Default constructor. + + All the parameters are set to their default and optimized values. + :param camera_resolution: Chosen camera_resolution + :param camera_fps: Chosen camera_fps + :param svo_real_time_mode: Activates svo_real_time_mode + :param depth_mode: Chosen depth_mode + :param coordinate_units: Chosen coordinate_units + :param coordinate_system: Chosen coordinate_system + :param sdk_verbose: Sets sdk_verbose + :param sdk_gpu_id: Chosen sdk_gpu_id + :param depth_minimum_distance: Chosen depth_minimum_distance + :param depth_maximum_distance: Chosen depth_maximum_distance + :param camera_disable_self_calib: Activates camera_disable_self_calib + :param camera_image_flip: Sets camera_image_flip + :param enable_right_side_measure: Activates enable_right_side_measure + :param sdk_verbose_log_file: Chosen sdk_verbose_log_file + :param depth_stabilization: Activates depth_stabilization + :param input_t: Chosen input_t (InputType ) + :param optional_settings_path: Chosen optional_settings_path + :param sensors_required: Activates sensors_required + :param enable_image_enhancement: Activates enable_image_enhancement + :param optional_opencv_calibration_file: Sets optional_opencv_calibration_file + :param open_timeout_sec: Sets open_timeout_sec + :param async_grab_camera_recovery: Sets async_grab_camera_recovery + :param grab_compute_capping_fps: Sets grab_compute_capping_fps + :param enable_image_validity_check: Sets enable_image_validity_check + :param maximum_working_resolution: Sets maximum_working_resolution + + .. code-block:: text + + params = sl.InitParameters(camera_resolution=sl.RESOLUTION.HD720, camera_fps=30, depth_mode=sl.DEPTH_MODE.NEURAL) + """ + pass + + def save(self, filename) -> bool: + """ + Saves the current set of parameters into a file to be reloaded with the load() method. + :param filename: Name of the file which will be created to store the parameters (extension '.yml' will be added if not set). + :return: True if file was successfully saved, otherwise False. + .. warning:: For security reason, the file must not exist. + .. warning:: In case a file already exists, the method will return False and existing file will not be updated + + .. code-block:: text + + init_params = sl.InitParameters() # Set initial parameters + init_params.sdk_verbose = 1 # Enable verbose mode + init_params.set_from_svo_file("/path/to/file.svo") # Selects the and SVO file to be read + init_params.save("initParameters.conf") # Export the parameters into a file + """ + return bool() + + def load(self, filename) -> bool: + """ + Loads a set of parameters from the values contained in a previously save() "saved" file. + :param filename: Path to the file from which the parameters will be loaded (extension '.yml' will be added at the end of the filename if not set). + :return: True if the file was successfully loaded, otherwise false. + + .. code-block:: text + + init_params = sl.InitParameters() # Set initial parameters + init_params.load("initParameters.conf") # Load the init_params from a previously exported file + """ + return bool() + + def input(self, input_t) -> None: + """ + The SDK can handle different input types: + - Select a camera by its ID (/dev/videoX on Linux, and 0 to N cameras connected on Windows) + - Select a camera by its serial number + - Open a recorded sequence in the SVO file format + - Open a streaming camera from its IP address and port + + This parameter allows you to select to desired input. It should be used like this: + .. code-block:: text + + init_params = sl.InitParameters() # Set initial parameters + init_params.sdk_verbose = 1 # Enable verbose mode + input_t = sl.InputType() + input_t.set_from_camera_id(0) # Selects the camera with ID = 0 + init_params.input = input_t + init_params.set_from_camera_id(0) # You can also use this + + + .. code-block:: text + + init_params = sl.InitParameters() # Set initial parameters + init_params.sdk_verbose = 1 # Enable verbose mode + input_t = sl.InputType() + input_t.set_from_serial_number(1010) # Selects the camera with serial number = 101 + init_params.input = input_t + init_params.set_from_serial_number(1010) # You can also use this + + + .. code-block:: text + + init_params = sl.InitParameters() # Set initial parameters + init_params.sdk_verbose = 1 # Enable verbose mode + input_t = sl.InputType() + input_t.set_from_svo_file("/path/to/file.svo") # Selects the and SVO file to be read + init_params.input = input_t + init_params.set_from_svo_file("/path/to/file.svo") # You can also use this + + + .. code-block:: text + + init_params = sl.InitParameters() # Set initial parameters + init_params.sdk_verbose = 1 # Enable verbose mode + input_t = sl.InputType() + input_t.set_from_stream("192.168.1.42") + init_params.input = input_t + init_params.set_from_stream("192.168.1.42") # You can also use this + + + Available cameras and their ID/serial can be listed using get_device_list() and get_streaming_device_list() + Each Camera will create its own memory (CPU and GPU), therefore the number of ZED used at the same time can be limited by the configuration of your computer. (GPU/CPU memory and capabilities) + + default : empty + See InputType for complementary information. + + .. warning:: Using the ZED SDK Python API, using init_params.input.set_from_XXX won't work, use init_params.set_from_XXX instead + @property + def input(self) -> InputType: + input_t = InputType() + input_t.input = self.init.input + return input_t + """ + pass + + def set_from_camera_id(self, cam_id, bus_type : BUS_TYPE = BUS_TYPE.AUTO) -> None: + """ + Defines the input source with a camera id to initialize and open an sl.Camera object from. + :param id: Id of the desired camera to open. + :param bus_type: sl.BUS_TYPE of the desired camera to open. + """ + pass + + def set_from_serial_number(self, serial_number) -> None: + """ + Defines the input source with a serial number to initialize and open an sl.Camera object from. + :param serial_number: Serial number of the desired camera to open. + """ + pass + + def set_from_svo_file(self, svo_input_filename) -> None: + """ + Defines the input source with an SVO file to initialize and open an sl.Camera object from. + :param svo_input_filename: Path to the desired SVO file to open. + """ + pass + + def set_from_stream(self, sender_ip, port = 30000) -> None: + """ + Defines the input source from a stream to initialize and open an sl.Camera object from. + :param sender_ip: IP address of the streaming sender. + :param port: Port on which to listen. Default: 30000 + """ + pass + + +class RuntimeParameters: + """ + Class containing parameters that defines the behavior of sl.Camera.grab(). + The default constructor sets all parameters to their default settings. + .. note:: + Parameters can be adjusted by the user. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def texture_confidence_threshold(self) -> int: + """ + Threshold to reject depth values based on their texture confidence. + + The texture confidence range is [1, 100]. + \n Decreasing this value will remove depth data from image areas which are uniform. + \n Default: 100 (no depth pixel will be rejected) + .. note:: + Pixels with a value close to 100 are not to be trusted. Accurate depth pixels tends to be closer to lower values. + """ + return int() + + @texture_confidence_threshold.setter + def texture_confidence_threshold(self, texture_confidence_threshold: Any) -> None: + pass + + @property + def measure3D_reference_frame(self) -> REFERENCE_FRAME: + """ + Reference frame in which to provides the 3D measures (point cloud, normals, etc.). + + Default: sl.REFERENCE_FRAME.CAMERA + """ + return REFERENCE_FRAME() + + @measure3D_reference_frame.setter + def measure3D_reference_frame(self, measure3D_reference_frame: Any) -> None: + pass + + @property + def confidence_threshold(self) -> int: + """ + Threshold to reject depth values based on their confidence. + + Each depth pixel has a corresponding confidence (sl.MEASURE.CONFIDENCE) in the range [1, 100]. + \n Decreasing this value will remove depth data from both objects edges and low textured areas, to keep only confident depth estimation data. + \n Default: 95 (no depth pixel will be rejected) + .. note:: + Pixels with a value close to 100 are not to be trusted. Accurate depth pixels tends to be closer to lower values. + + .. note:: + It can be seen as a probability of error, scaled to 100. + """ + return int() + + @confidence_threshold.setter + def confidence_threshold(self, confidence_threshold: Any) -> None: + pass + + @property + def enable_fill_mode(self) -> bool: + """ + Defines if the depth map should be completed or not. + + Default: False + .. note:: + It is similar to the removed sl.SENSING_MODE.FILL. + + .. warning:: Enabling this will override the confidence values confidence_threshold and texture_confidence_threshold as well as remove_saturated_areas. + """ + return bool() + + @enable_fill_mode.setter + def enable_fill_mode(self, enable_fill_mode: Any) -> None: + pass + + @property + def enable_depth(self) -> bool: + """ + Defines if the depth map should be computed. + + Default: True + .. note:: + If set to False, only the images are available. + """ + return bool() + + @enable_depth.setter + def enable_depth(self, enable_depth: Any) -> None: + pass + + @property + def remove_saturated_areas(self) -> bool: + """ + Defines if the saturated area (luminance>=255) must be removed from depth map estimation. + + Default: True + .. note:: + It is recommended to keep this parameter at True because saturated area can create false detection. + """ + return bool() + + @remove_saturated_areas.setter + def remove_saturated_areas(self, remove_saturated_areas: Any) -> None: + pass + + def __dealloc__(self) -> None: + """ + Default constructor. + + All the parameters are set to their default values. + :param enable_depth: Activates enable_depth + :param enable_fill_mode: Activates enable_fill_mode + :param confidence_threshold: Chosen confidence_threshold + :param texture_confidence_threshold: Chosen texture_confidence_threshold + :param measure3D_reference_frame: Chosen measure3D_reference_frame + :param remove_saturated_areas: Activates remove_saturated_areas + """ + pass + + def save(self, filename: str) -> bool: + """ + Saves the current set of parameters into a file to be reloaded with the load() method. + :param filename: Name of the file which will be created to store the parameters (extension '.yml' will be added if not set). + :return: True if the file was successfully saved, otherwise False. + .. warning:: For security reasons, the file must not already exist. + .. warning:: In case a file already exists, the method will return False and existing file will not be updated. + """ + return bool() + + def load(self, filename: str) -> bool: + """ + Loads a set of parameters from the values contained in a previously save() "saved" file. + :param filename: Path to the file from which the parameters will be loaded (extension '.yml' will be added at the end of the filename if not detected). + :return: True if the file was successfully loaded, otherwise False. + """ + return bool() + + +class PositionalTrackingParameters: + """ + Class containing a set of parameters for the positional tracking module initialization. + + The default constructor sets all parameters to their default settings. + .. note:: + Parameters can be adjusted by the user. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def enable_2d_ground_mode(self) -> bool: + """ + Whether to enable 2D localization mode + """ + return bool() + + @enable_2d_ground_mode.setter + def enable_2d_ground_mode(self, enable_2d_ground_mode: Any) -> None: + pass + + @property + def set_gravity_as_origin(self) -> bool: + """ + Whether to override 2 of the 3 rotations from initial_world_transform using the IMU gravity. + Default: True + .. note:: + This parameter does nothing on sl.ZED.MODEL since it does not have an IMU. + """ + return bool() + + @set_gravity_as_origin.setter + def set_gravity_as_origin(self, set_gravity_as_origin: Any) -> None: + pass + + @property + def enable_area_memory(self) -> bool: + """ + Whether the camera can remember its surroundings. + This helps correct positional tracking drift and can be helpful for positioning different cameras relative to one other in space. + \n Default: true + + .. warning:: This mode requires more resources to run, but greatly improves tracking accuracy. + .. warning:: We recommend leaving it on by default. + """ + return bool() + + @enable_area_memory.setter + def enable_area_memory(self, enable_area_memory: Any) -> None: + pass + + @property + def area_file_path(self) -> str: + """ + Path of an area localization file that describes the surroundings (saved from a previous tracking session). + Default: (empty) + .. note:: + Loading an area file will start a search phase, during which the camera will try to position itself in the previously learned area. + + .. warning:: The area file describes a specific location. If you are using an area file describing a different location, the tracking function will continuously search for a position and may not find a correct one. + .. warning:: The '.area' file can only be used with the same depth mode (sl.DEPTH_MODE) as the one used during area recording. + """ + return str() + + @area_file_path.setter + def area_file_path(self, area_file_path: Any) -> None: + pass + + @property + def mode(self) -> POSITIONAL_TRACKING_MODE: + """ + Positional tracking mode used. + Can be used to improve accuracy in some types of scene at the cost of longer runtime. + \n Default: sl.POSITIONAL_TRACKING_MODE.GEN_1 + """ + return POSITIONAL_TRACKING_MODE() + + @mode.setter + def mode(self, mode: Any) -> None: + pass + + @property + def set_floor_as_origin(self) -> bool: + """ + Initializes the tracking to be aligned with the floor plane to better position the camera in space. + Default: False + .. note:: + This launches floor plane detection in the background until a suitable floor plane is found. + + .. note:: + The tracking will start in sl.POSITIONAL_TRACKING_STATE.SEARCHING state. + + .. warning:: This features does not work with sl.MODEL.ZED since it needs an IMU to classify the floor. + .. warning:: The camera needs to look at the floor during initialization for optimum results. + """ + return bool() + + @set_floor_as_origin.setter + def set_floor_as_origin(self, set_floor_as_origin: Any) -> None: + pass + + @property + def set_as_static(self) -> bool: + """ + Whether to define the camera as static. + If true, it will not move in the environment. This allows you to set its position using initial_world_transform. + \n All ZED SDK functionalities requiring positional tracking will be enabled without additional computation. + \n sl.Camera.get_position() will return the value set as initial_world_transform. + Default: False + """ + return bool() + + @set_as_static.setter + def set_as_static(self, set_as_static: Any) -> None: + pass + + @property + def enable_imu_fusion(self) -> bool: + """ + Whether to enable the IMU fusion. + When set to False, only the optical odometry will be used. + \n Default: True + .. note:: + This setting has no impact on the tracking of a camera. + + .. note:: + sl.MODEL.ZED does not have an IMU. + """ + return bool() + + @enable_imu_fusion.setter + def enable_imu_fusion(self, enable_imu_fusion: Any) -> None: + pass + + @property + def enable_localization_only(self) -> bool: + """ + Whether to enable the area mode in localize only mode. + """ + return bool() + + @enable_localization_only.setter + def enable_localization_only(self, enable_localization_only: Any) -> None: + pass + + @property + def depth_min_range(self) -> float: + """ + Minimum depth used by the ZED SDK for positional tracking. + It may be useful for example if any steady objects are in front of the camera and may perturb the positional tracking algorithm. + \n Default: -1 (no minimum depth) + """ + return float() + + @depth_min_range.setter + def depth_min_range(self, depth_min_range: Any) -> None: + pass + + @property + def enable_pose_smoothing(self) -> bool: + """ + Whether to enable smooth pose correction for small drift correction. + Default: False + """ + return bool() + + @enable_pose_smoothing.setter + def enable_pose_smoothing(self, enable_pose_smoothing: Any) -> None: + pass + + def __dealloc__(self) -> None: + """ + Default constructor. + :param _init_pos: Chosen initial camera position in the world frame (Transform) + :param _enable_memory: Activates enable_memory + :param _enable_pose_smoothing: Activates enable_pose_smoothing + :param _area_path: Chosen area_path + :param _set_floor_as_origin: Activates set_floor_as_origin + :param _enable_imu_fusion: Activates enable_imu_fusion + :param _set_as_static: Activates set_as_static + :param _depth_min_range: Activates depth_min_range + :param _set_gravity_as_origin: Activates set_gravity_as_origin + :param _mode: Chosen mode + + .. code-block:: text + + params = sl.PositionalTrackingParameters(init_pos=sl.Transform(), _enable_pose_smoothing=True) + """ + pass + + def save(self, filename: str) -> bool: + """ + Saves the current set of parameters into a file to be reloaded with the load() method. + :param filename: Name of the file which will be created to store the parameters. + :return: True if the file was successfully saved, otherwise False. + .. warning:: For security reasons, the file must not already exist. + .. warning:: In case a file already exists, the method will return False and existing file will not be updated. + """ + return bool() + + def load(self, filename: str) -> bool: + """ + Loads a set of parameters from the values contained in a previously save() "saved" file. + :param filename: Path to the file from which the parameters will be loaded. + :return: True if the file was successfully loaded, otherwise False. + """ + return bool() + + def initial_world_transform(self, init_pos = Transform()) -> Transform: + """ + Position of the camera in the world frame when the camera is started. + Use this sl.Transform to place the camera frame in the world frame. + \n Default: Identity matrix. + + .. note:: + The camera frame (which defines the reference frame for the camera) is by default positioned at the world frame when tracking is started. + """ + return Transform() + + def set_initial_world_transform(self, value: Transform) -> None: + """ + Set the position of the camera in the world frame when the camera is started. + :param value: Position of the camera in the world frame when the camera will start. + """ + pass + + +class STREAMING_CODEC(enum.Enum): + """ + Lists the different encoding types for image streaming. + + | Enumerator | | + |:---:|:---:| + | H264 | AVCHD/H264 encoding | + | H265 | HEVC/H265 encoding | + """ + H264 = enum.auto() + H265 = enum.auto() + LAST = enum.auto() + +class StreamingProperties: + """ + Class containing information about the properties of a streaming device. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def serial_number(self) -> int: + """ + Serial number of the streaming camera. + + Default: 0 + """ + return int() + + @serial_number.setter + def serial_number(self, serial_number: Any) -> None: + pass + + @property + def ip(self) -> str: + """ + IP address of the streaming device. + + Default: "" + """ + return str() + + @ip.setter + def ip(self, ip: Any) -> None: + pass + + @property + def port(self) -> int: + """ + Streaming port of the streaming device. + + Default: 0 + """ + return int() + + @port.setter + def port(self, port: Any) -> None: + pass + + @property + def codec(self) -> STREAMING_CODEC: + """ + Current codec used for compression in streaming device. + + Default: sl.STREAMING_CODEC.H265 + """ + return STREAMING_CODEC() + + @codec.setter + def codec(self, codec: Any) -> None: + pass + + @property + def current_bitrate(self) -> int: + """ + Current bitrate of encoding of the streaming device. + + Default: 0 + """ + return int() + + @current_bitrate.setter + def current_bitrate(self, current_bitrate: Any) -> None: + pass + + +class StreamingParameters: + """ + Class containing the options used to stream with the ZED SDK. + + The default constructor sets all parameters to their default settings. + .. note:: + Parameters can be adjusted by the user. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def gop_size(self) -> int: + """ + GOP size in number of frames. + + Default: -1 (the GOP size will last at maximum 2 seconds, depending on camera FPS) + .. note:: + The GOP size determines the maximum distance between IDR/I-frames. Very high GOP size will result in slightly more efficient compression, especially on static scenes. But latency will increase. + + .. note:: + Maximum value: 256 + """ + return int() + + @gop_size.setter + def gop_size(self, gop_size: Any) -> None: + pass + + @property + def adaptative_bitrate(self) -> bool: + """ + Defines whether the adaptive bitrate is enable. + + Default: False + .. note:: + Bitrate will be adjusted depending the number of packet dropped during streaming. + + .. note:: + If activated, the bitrate can vary between [bitrate/4, bitrate]. + + .. warning:: Currently, the adaptive bitrate only works when "sending" device is a NVIDIA Jetson (X1, X2, Xavier, Nano). + """ + return bool() + + @adaptative_bitrate.setter + def adaptative_bitrate(self, adaptative_bitrate: Any) -> None: + pass + + @property + def target_framerate(self) -> int: + """ + Framerate for the streaming output. + + Default: 0 (camera framerate will be taken) + .. warning:: This framerate must be below or equal to the camera framerate. + .. warning:: Allowed framerates are 15, 30, 60 or 100 if possible. + .. warning:: Any other values will be discarded and camera FPS will be taken. + """ + return int() + + @target_framerate.setter + def target_framerate(self, target_framerate: Any) -> None: + pass + + @property + def chunk_size(self) -> int: + """ + Size of a single chunk. + + Default: 16084 + .. note:: + Stream buffers are divided into X number of chunks where each chunk is chunk_size bytes long. + + .. note:: + You can lower chunk_size value if network generates a lot of packet lost: this will + + generates more chunk for a single image, but each chunk sent will be lighter to avoid inside-chunk corruption. + .. note:: + Increasing this value can decrease latency. + + + \n Note: Available range: [1024 - 65000] + """ + return int() + + @chunk_size.setter + def chunk_size(self, chunk_size: Any) -> None: + pass + + @property + def port(self) -> int: + """ + Port used for streaming. + .. warning:: Port must be an even number. Any odd number will be rejected. + .. warning:: Port must be opened. + """ + return int() + + @port.setter + def port(self, port: Any) -> None: + pass + + @property + def codec(self) -> STREAMING_CODEC: + """ + Encoding used for streaming. + """ + return STREAMING_CODEC() + + @codec.setter + def codec(self, codec: Any) -> None: + pass + + @property + def bitrate(self) -> int: + """ + Defines the streaming bitrate in Kbits/s + | STREAMING_CODEC | RESOLUTION | FPS | Bitrate (kbps) | + |:---:|:---:|:---:|:---:| + | H264 | HD2K | 15 | 8500 | + | H264 | HD1080 | 30 | 12500 | + | H264 | HD720 | 60 | 7000 | + | H265 | HD2K | 15 | 7000 | + | H265 | HD1080 | 30 | 11000 | + | H265 | HD720 | 60 | 6000 | + + Default: 0 (it will be set to the best value depending on your resolution/FPS) + .. note:: + Available range: [1000 - 60000] + """ + return int() + + @bitrate.setter + def bitrate(self, bitrate: Any) -> None: + pass + + def __dealloc__(self) -> None: + """ + Default constructor. + + All the parameters are set to their default values. + :param codec: Chosen codec + :param port: Chosen port + :param bitrate: Chosen bitrate + :param gop_size: Chosen gop_size + :param adaptative_bitrate: Activtates adaptative_bitrate + :param chunk_size: Chosen chunk_size + :param target_framerate: Chosen target_framerate + + .. code-block:: text + + params = sl.StreamingParameters(port=30000) + """ + pass + + +class RecordingParameters: + """ + Class containing the options used to record. + + The default constructor sets all parameters to their default settings. + .. note:: + Parameters can be adjusted by the user. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def transcode_streaming_input(self) -> bool: + """ + Defines whether to decode and re-encode a streaming source. + + Default: False + .. note:: + If set to False, it will avoid decoding/re-encoding and convert directly streaming input into a SVO file. + + .. note:: + This saves a encoding session and can be especially useful on NVIDIA Geforce cards where the number of encoding session is limited. + + .. note:: + compression_mode, target_framerate and bitrate will be ignored in this mode. + """ + return bool() + + @transcode_streaming_input.setter + def transcode_streaming_input(self, transcode_streaming_input: Any) -> None: + pass + + @property + def target_framerate(self) -> int: + """ + Framerate for the recording file. + + Default: 0 (camera framerate will be taken) + .. warning:: This framerate must be below or equal to the camera framerate and camera framerate must be a multiple of the target framerate. + .. warning:: It means that it must respect `` camera_framerate%target_framerate == 0``. + .. warning:: Allowed framerates are 15,30, 60 or 100 if possible. + .. warning:: Any other values will be discarded and camera FPS will be taken. + """ + return int() + + @target_framerate.setter + def target_framerate(self, target_framerate: Any) -> None: + pass + + @property + def compression_mode(self) -> SVO_COMPRESSION_MODE: + """ + Compression mode the recording. + + Default: sl.SVO_COMPRESSION_MODE.H264 + """ + return SVO_COMPRESSION_MODE() + + @compression_mode.setter + def compression_mode(self, compression_mode: Any) -> None: + pass + + @property + def bitrate(self) -> int: + """ + Overrides the default bitrate of the SVO file, in kbits/s. + + Default: 0 (the default values associated with the resolution) + .. note:: + Only works if compression_mode is H264 or H265. + + .. note:: + Available range: 0 or [1000 - 60000] + """ + return int() + + @bitrate.setter + def bitrate(self, bitrate: Any) -> None: + pass + + @property + def video_filename(self) -> str: + """ + Filename of the file to save the recording into. + """ + return str() + + @video_filename.setter + def video_filename(self, video_filename: Any) -> None: + pass + + def __dealloc__(self) -> None: + """ + Default constructor. + + All the parameters are set to their default values. + :param video_filename: Chosen video_filename + :param compression_mode: Chosen compression_mode + :param target_framerate: Chosen target_framerate + :param bitrate: Chosen bitrate + :param transcode_streaming_input: Enables transcode_streaming_input + + .. code-block:: text + + params = sl.RecordingParameters(video_filename="record.svo",compression_mode=SVO_COMPRESSION_MODE.H264) + """ + pass + + +class SpatialMappingParameters: + """ + Class containing a set of parameters for the spatial mapping module. + + The default constructor sets all parameters to their default settings. + .. note:: + Parameters can be adjusted by the user. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def save_texture(self) -> bool: + """ + Whether to save the texture. + If set to true, you will be able to apply the texture to your mesh after it is created. + \n Default: False + .. note:: + This option will consume more memory. + + .. note:: + This option is only available for sl.SPATIAL_MAP_TYPE.MESH. + """ + return bool() + + @save_texture.setter + def save_texture(self, save_texture: Any) -> None: + pass + + @property + def range_meter(self) -> float: + """ + Depth range in meters. + Can be different from the value set by sl.InitParameters.depth_maximum_distance. + .. note:: + Set to 0 by default. In this case, the range is computed from resolution_meter + + and from the current internal parameters to fit your application. + """ + return float() + + @range_meter.setter + def range_meter(self, range_meter: Any) -> None: + pass + + @property + def map_type(self) -> SPATIAL_MAP_TYPE: + """ + The type of spatial map to be created. + This dictates the format that will be used for the mapping (e.g. mesh, point cloud). + \n See sl.SPATIAL_MAP_TYPE. + """ + return SPATIAL_MAP_TYPE() + + @map_type.setter + def map_type(self, map_type: Any) -> None: + pass + + @property + def use_chunk_only(self) -> bool: + """ + Whether to only use chunks. + If set to False, you will ensure consistency between the mesh and its inner chunk data. + \n Default: False + .. note:: + Updating the mesh is time-consuming. + + .. note:: + Setting this to True results in better performance. + """ + return bool() + + @use_chunk_only.setter + def use_chunk_only(self, use_chunk_only: Any) -> None: + pass + + @property + def reverse_vertex_order(self) -> bool: + """ + Whether to inverse the order of the vertices of the triangles. + If your display process does not handle front and back face culling, you can use this to correct it. + \n Default: False + .. note:: + This option is only available for sl.SPATIAL_MAP_TYPE.MESH. + """ + return bool() + + @reverse_vertex_order.setter + def reverse_vertex_order(self, reverse_vertex_order: Any) -> None: + pass + + @property + def stability_counter(self) -> int: + """ + Control the integration rate of the current depth into the mapping process. + This parameter controls how many times a stable 3D points should be seen before it is integrated into the spatial mapping. + \n Default: 0 (this will define the stability counter based on the mesh resolution, the higher the resolution, the higher the stability counter) + """ + return int() + + @stability_counter.setter + def stability_counter(self, stability_counter: Any) -> None: + pass + + @property + def resolution_meter(self) -> float: + """ + Spatial mapping resolution in meters. + Default: 0.05 + .. note:: + It should fit allowed_resolution. + """ + return float() + + @resolution_meter.setter + def resolution_meter(self, resolution_meter: Any) -> None: + pass + + @property + def max_memory_usage(self) -> int: + """ + The maximum CPU memory (in MB) allocated for the meshing process. + Default: 2048 + """ + return int() + + @max_memory_usage.setter + def max_memory_usage(self, max_memory_usage: Any) -> None: + pass + + def __dealloc__(self) -> None: + """ + Default constructor. + Sets all parameters to their default and optimized values. + :param resolution: Chosen MAPPING_RESOLUTION + :param mapping_range: Chosen MAPPING_RANGE + :param max_memory_usage: Chosen max_memory_usage + :param save_texture: Activates save_texture + :param use_chunk_only: Activates use_chunk_only + :param reverse_vertex_order: Activates reverse_vertex_order + :param map_type: Chosen map_type + + .. code-block:: text + + params = sl.SpatialMappingParameters(resolution=sl.MAPPING_RESOLUTION.HIGH) + """ + pass + + def set_resolution(self, resolution = MAPPING_RESOLUTION.HIGH) -> None: + """ + Sets the resolution to a sl.MAPPING_RESOLUTION preset. + :param resolution: The desired sl.MAPPING_RESOLUTION. Default: sl.MAPPING_RESOLUTION.HIGH + """ + pass + + def set_range(self, mapping_range = MAPPING_RANGE.AUTO) -> None: + """ + Sets the range to a sl.MAPPING_RANGE preset. + :param mapping_range: The desired sl.MAPPING_RANGE. Default: [sl.MAPPING_RANGE::AUTO](MAPPING_RANGE) + """ + pass + + def get_range_preset(self, mapping_range = MAPPING_RANGE.AUTO) -> float: + """ + Returns the value corresponding to a sl.MAPPING_RANGE preset in meters. + :param mapping_range: The desired sl.MAPPING_RANGE. Default: [sl.MAPPING_RANGE::AUTO](MAPPING_RANGE) + :return: The value of **mapping_range in meters. + """ + return float() + + def get_resolution_preset(self, resolution = MAPPING_RESOLUTION.HIGH) -> float: + """ + Returns the value corresponding to a sl.MAPPING_RESOLUTION preset in meters. + :param resolution: The desired sl.MAPPING_RESOLUTION. Default: sl.MAPPING_RESOLUTION.HIGH + :return: The value of **resolution in meters. + """ + return float() + + def get_recommended_range(self, resolution, py_cam: Camera) -> float: + """ + Returns the recommended maximum depth value corresponding to a resolution. + :param resolution: The desired resolution, either defined by a sl.MAPPING_RESOLUTION preset or a resolution value in meters. + :param py_cam: The sl.Camera object which will run the spatial mapping. + :return: The maximum value of depth in meters. + """ + return float() + + def allowed_range(self) -> np.array[float]: + """ + The maximum depth allowed by spatial mapping: + - **allowed_range.first is the minimum value allowed + - **allowed_range.second is the maximum value allowed + """ + return np.array[float]() + + def allowed_resolution(self) -> np.array[float]: + """ + The resolution allowed by the spatial mapping: + - **allowed_resolution.first is the minimum value allowed + - **allowed_resolution.second is the maximum value allowed + """ + return np.array[float]() + + def save(self, filename: str) -> bool: + """ + Saves the current set of parameters into a file to be reloaded with the load() method. + :param filename: Name of the file which will be created to store the parameters (extension '.yml' will be added if not set). + :return: True if the file was successfully saved, otherwise False. + .. warning:: For security reasons, the file must not already exist. + .. warning:: In case a file already exists, the method will return False and existing file will not be updated. + """ + return bool() + + def load(self, filename: str) -> bool: + """ + Loads a set of parameters from the values contained in a previously save() "saved" file. + :param filename: Path to the file from which the parameters will be loaded (extension '.yml' will be added at the end of the filename if not detected). + :return: True if the file was successfully loaded, otherwise False. + """ + return bool() + + +class Pose: + """ + Class containing positional tracking data giving the position and orientation of the camera in 3D space. + + Different representations of position and orientation can be retrieved, along with timestamp and pose confidence. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @pose_covariance.setter + def pose_covariance(self, pose_covariance: Any) -> None: + pass + + @property + def pose_confidence(self) -> int: + """ + Confidence/quality of the pose estimation for the target frame. + A confidence metric of the tracking [0-100] with: + - 0: tracking is lost + - 100: tracking can be fully trusted + """ + return int() + + @pose_confidence.setter + def pose_confidence(self, pose_confidence: Any) -> None: + pass + + @property + def valid(self) -> bool: + """ + Whether the tracking is activated or not. + .. note:: + You should check that first if something is wrong. + """ + return bool() + + @valid.setter + def valid(self, valid: Any) -> None: + pass + + @twist.setter + def twist(self, twist: Any) -> None: + pass + + @property + def timestamp(self) -> Timestamp: + """ + sl.Timestamp of the sl.Pose. + This timestamp should be compared with the camera timestamp for synchronization. + """ + return Timestamp() + + @timestamp.setter + def timestamp(self, timestamp: Any) -> None: + pass + + @twist_covariance.setter + def twist_covariance(self, twist_covariance: Any) -> None: + pass + + def init_pose(self, pose: Pose) -> None: + """ + Deep copy from another sl.Pose. + :param pose: sl.Pose to copy. + """ + pass + + def init_transform(self, pose_data: Transform, timestamp = 0, confidence = 0) -> None: + """ + Initializes the sl.Pose from a sl.Transform. + :param pose_data: sl.Transform containing pose data to copy. + :param timestamp: Timestamp of the pose data. + :param confidence: Confidence of the pose data. + """ + pass + + def get_translation(self, py_translation = Translation()) -> Translation: + """ + Returns the sl.Translation corresponding to the current sl.Pose. + :param py_translation: sl.Translation to be returned. It creates one by default. + :return: sl.Translation filled with values from the sl.Pose. + """ + return Translation() + + def get_orientation(self, py_orientation = Orientation()) -> Orientation: + """ + Returns the sl.Orientation corresponding to the current sl.Pose. + :param py_orientation: sl.Orientation to be returned. It creates one by default. + :return: sl.Orientation filled with values from the sl.Pose. + """ + return Orientation() + + def get_rotation_matrix(self, py_rotation = Rotation()) -> Rotation: + """ + Returns the sl.Rotation corresponding to the current sl.Pose. + :param py_rotation: sl.Rotation to be returned. It creates one by default. + :return: sl.Rotation filled with values from the sl.Pose. + """ + return Rotation() + + def get_rotation_vector(self) -> np.array[float]: + """ + Returns the the 3x1 rotation vector (obtained from 3x3 rotation matrix using Rodrigues formula) corresponding to the current sl.Pose. + :param py_rotation: sl.Rotation to be returned. It creates one by default. + :return: Rotation vector (NumPy array) created from the sl.Pose values. + """ + return np.array[float]() + + def get_euler_angles(self, radian = True) -> np.array[float]: + """ + Converts the rotation component of the sl.Pose into Euler angles. + :param radian: Whether the angle will be returned in radian or degree. Default: True + :return: Euler angles (Numpy array) created from the sl.Pose values representing the rotations around the X, Y and Z axes using YZX convention. + """ + return np.array[float]() + + def pose_data(self, pose_data = Transform()) -> Transform: + """ + sl.Transform containing the rotation and translation data of the sl.Pose. + :param pose_data: sl.Transform to be returned. It creates one by default. + :return: sl.Transform containing the rotation and translation data of the sl.Pose. + """ + return Transform() + + def pose_covariance(self) -> np.array[float]: + """ + 6x6 pose covariance matrix (NumPy array) of translation (the first 3 values) and rotation in so3 (the last 3 values). + .. note:: + Computed only if PositionalTrackingParameters.enable_spatial_memory is disabled. + """ + return np.array[float]() + + def twist(self) -> np.array[float]: + """ + Twist of the camera available in reference camera. + This expresses velocity in free space, broken into its linear and angular parts. + """ + return np.array[float]() + + def twist_covariance(self) -> np.array[float]: + """ + Row-major representation of the 6x6 twist covariance matrix of the camera. + This expresses the uncertainty of the twist. + """ + return np.array[float]() + + +class CAMERA_MOTION_STATE(enum.Enum): + """ + Lists different states of the camera motion. + + | Enumerator | | + |:---:|:---:| + | STATIC | The camera is static. | + | MOVING | The camera is moving. | + | FALLING | The camera is falling. | + """ + STATIC = enum.auto() + MOVING = enum.auto() + FALLING = enum.auto() + LAST = enum.auto() + +class SENSOR_LOCATION(enum.Enum): + """ + Lists possible locations of temperature sensors. + + | Enumerator | | + |:---:|:---:| + | IMU | The temperature sensor is in the IMU. | + | BAROMETER | The temperature sensor is in the barometer. | + | ONBOARD_LEFT | The temperature sensor is next to the left image sensor. | + | ONBOARD_RIGHT | The temperature sensor is next to the right image sensor. | + """ + IMU = enum.auto() + BAROMETER = enum.auto() + ONBOARD_LEFT = enum.auto() + ONBOARD_RIGHT = enum.auto() + LAST = enum.auto() + +class BarometerData: + """ + Class containing data from the barometer sensor. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def pressure(self) -> float: + """ + Ambient air pressure in hectopascal (hPa). + """ + return float() + + @pressure.setter + def pressure(self, pressure: Any) -> None: + pass + + @property + def effective_rate(self) -> float: + """ + Realtime data acquisition rate in hertz (Hz). + """ + return float() + + @effective_rate.setter + def effective_rate(self, effective_rate: Any) -> None: + pass + + @property + def relative_altitude(self) -> float: + """ + Relative altitude from first camera position (at sl.Camera.open() time). + """ + return float() + + @relative_altitude.setter + def relative_altitude(self, relative_altitude: Any) -> None: + pass + + @property + def is_available(self) -> bool: + """ + Whether the barometer sensor is available in your camera. + """ + return bool() + + @is_available.setter + def is_available(self, is_available: Any) -> None: + pass + + @property + def timestamp(self) -> Timestamp: + """ + Data acquisition timestamp. + """ + return Timestamp() + + @timestamp.setter + def timestamp(self, timestamp: Any) -> None: + pass + + +class TemperatureData: + """ + Class containing data from the temperature sensors. + """ + def __init__(self, *args, **kwargs) -> None: ... + + def get(self, location) -> float: + """ + Gets the temperature value at a temperature sensor location. + :param location: Location of the temperature sensor to request. + :return: Temperature at the requested location. + """ + return float() + + +class HEADING_STATE(enum.Enum): + """ + Lists the different states of the magnetic heading. + + | Enumerator | | + |:---:|:---:| + | GOOD | The heading is reliable and not affected by iron interferences. | + | OK | The heading is reliable, but affected by slight iron interferences. | + | NOT_GOOD | The heading is not reliable because affected by strong iron interferences. | + | NOT_CALIBRATED | The magnetometer has not been calibrated. | + | MAG_NOT_AVAILABLE | The magnetometer sensor is not available. | + """ + GOOD = enum.auto() + OK = enum.auto() + NOT_GOOD = enum.auto() + NOT_CALIBRATED = enum.auto() + MAG_NOT_AVAILABLE = enum.auto() + LAST = enum.auto() + +class MagnetometerData: + """ + Class containing data from the magnetometer sensor. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def magnetic_heading_state(self) -> HEADING_STATE: + """ + State of magnetic_heading. + """ + return HEADING_STATE() + + @magnetic_heading_state.setter + def magnetic_heading_state(self, magnetic_heading_state: Any) -> None: + pass + + @property + def magnetic_heading_accuracy(self) -> float: + """ + Accuracy of magnetic_heading measure in the range [0.0, 1.0]. + .. note:: + A negative value means that the magnetometer must be calibrated using **ZED **Sensor **Viewer tool. + """ + return float() + + @magnetic_heading_accuracy.setter + def magnetic_heading_accuracy(self, magnetic_heading_accuracy: Any) -> None: + pass + + @property + def effective_rate(self) -> float: + """ + Realtime data acquisition rate in hertz (Hz). + """ + return float() + + @effective_rate.setter + def effective_rate(self, effective_rate: Any) -> None: + pass + + @property + def magnetic_heading(self) -> float: + """ + Camera heading in degrees relative to the magnetic North Pole. + .. note:: + The magnetic North Pole has an offset with respect to the geographic North Pole, depending on the geographic position of the camera. + + .. note:: + To get a correct magnetic heading, the magnetometer sensor must be calibrated using **ZED **Sensor **Viewer tool. + """ + return float() + + @magnetic_heading.setter + def magnetic_heading(self, magnetic_heading: Any) -> None: + pass + + @property + def timestamp(self) -> int: + """ + Data acquisition timestamp. + """ + return int() + + @timestamp.setter + def timestamp(self, timestamp: Any) -> None: + pass + + @property + def is_available(self) -> bool: + """ + Whether the magnetometer sensor is available in your camera. + """ + return bool() + + @is_available.setter + def is_available(self, is_available: Any) -> None: + pass + + def get_magnetic_field_uncalibrated(self) -> np.array[float]: + """ + Gets the uncalibrated magnetic field local vector in microtesla (μT). + .. note:: + The magnetometer raw values are affected by soft and hard iron interferences. + + .. note:: + The sensor must be calibrated by placing the camera in the working environment and using **ZED **Sensor **Viewer tool. + + .. note:: + Not available in SVO or STREAM mode. + """ + return np.array[float]() + + def get_magnetic_field_calibrated(self) -> np.array[float]: + """ + Gets the magnetic field local vector in microtesla (μT). + .. note:: + To calibrate the magnetometer sensor, please use **ZED **Sensor **Viewer tool after placing the camera in the final operating environment. + """ + return np.array[float]() + + +class SensorsData: + """ + Class containing all sensors data (except image sensors) to be used for positional tracking or environment study. + + .. note:: + Some data are not available in SVO and streaming input mode. + + .. note:: + They are specified by a note "Not available in SVO or STREAM mode." in the documentation of a specific data. + + .. note:: + If nothing is mentioned in the documentation, they are available in all input modes. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def camera_moving_state(self) -> CAMERA_MOTION_STATE: + """ + Motion state of the camera. + """ + return CAMERA_MOTION_STATE() + + @camera_moving_state.setter + def camera_moving_state(self, camera_moving_state: Any) -> None: + pass + + @property + def image_sync_trigger(self) -> int: + """ + Indicates if the sensors data has been taken during a frame capture on sensor. + If the value is 1, the data has been retrieved during a left sensor frame acquisition (the time precision is linked to the IMU rate, therefore 800Hz == 1.3ms). + \n If the value is 0, the data has not been taken during a frame acquisition. + """ + return int() + + @image_sync_trigger.setter + def image_sync_trigger(self, image_sync_trigger: Any) -> None: + pass + + def init_sensorsData(self, sensorsData: SensorsData) -> None: + """ + Copy constructor. + :param sensorsData: sl.SensorsData object to copy. + """ + pass + + def get_imu_data(self) -> IMUData: + """ + Gets the IMU data. + :return: sl.IMUData containing the IMU data. + """ + return IMUData() + + def get_barometer_data(self) -> BarometerData: + """ + Gets the barometer data. + :return: sl.BarometerData containing the barometer data. + """ + return BarometerData() + + def get_magnetometer_data(self) -> MagnetometerData: + """ + Gets the magnetometer data. + :return: sl.MagnetometerData containing the magnetometer data. + """ + return MagnetometerData() + + def get_temperature_data(self) -> TemperatureData: + """ + Gets the temperature data. + :return: sl.TemperatureData containing the temperature data. + """ + return TemperatureData() + + +class IMUData: + """ + Class containing data from the IMU sensor. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def is_available(self) -> bool: + """ + Whether the IMU sensor is available in your camera. + """ + return bool() + + @is_available.setter + def is_available(self, is_available: Any) -> None: + pass + + @property + def timestamp(self) -> int: + """ + Data acquisition timestamp. + """ + return int() + + @timestamp.setter + def timestamp(self, timestamp: Any) -> None: + pass + + @property + def effective_rate(self) -> float: + """ + Realtime data acquisition rate in hertz (Hz). + """ + return float() + + @effective_rate.setter + def effective_rate(self, effective_rate: Any) -> None: + pass + + def get_angular_velocity_uncalibrated(self, angular_velocity_uncalibrated = [0, 0, 0]) -> list[float]: + """ + Gets the angular velocity vector (3x1) of the gyroscope in deg/s (uncorrected from the IMU calibration). + :param angular_velocity_uncalibrated: List to be returned. It creates one by default. + :return: List fill with the raw angular velocity vector. + .. note:: + The value is the exact raw values from the IMU. + + .. note:: + Not available in SVO or STREAM mode. + """ + return list[float]() + + def get_angular_velocity(self, angular_velocity = [0, 0, 0]) -> list[float]: + """ + Gets the angular velocity vector (3x1) of the gyroscope in deg/s. + The value is corrected from bias, scale and misalignment. + :param angular_velocity: List to be returned. It creates one by default. + :return: List fill with the angular velocity vector. + .. note:: + The value can be directly ingested in an IMU fusion algorithm to extract a quaternion. + + .. note:: + Not available in SVO or STREAM mode. + """ + return list[float]() + + def get_linear_acceleration(self, linear_acceleration = [0, 0, 0]) -> list[float]: + """ + Gets the linear acceleration vector (3x1) of the gyroscope in m/s². + The value is corrected from bias, scale and misalignment. + :param linear_acceleration: List to be returned. It creates one by default. + :return: List fill with the linear acceleration vector. + .. note:: + The value can be directly ingested in an IMU fusion algorithm to extract a quaternion. + + .. note:: + Not available in SVO or STREAM mode. + """ + return list[float]() + + def get_linear_acceleration_uncalibrated(self, linear_acceleration_uncalibrated = [0, 0, 0]) -> list[float]: + """ + Gets the linear acceleration vector (3x1) of the gyroscope in m/s² (uncorrected from the IMU calibration). + The value is corrected from bias, scale and misalignment. + :param linear_acceleration_uncalibrated: List to be returned. It creates one by default. + :return: List fill with the raw linear acceleration vector. + .. note:: + The value is the exact raw values from the IMU. + + .. note:: + Not available in SVO or STREAM mode. + """ + return list[float]() + + def get_angular_velocity_covariance(self, angular_velocity_covariance = Matrix3f()) -> Matrix3f: + """ + Gets the covariance matrix of the angular velocity of the gyroscope in deg/s (get_angular_velocity()). + :param angular_velocity_covariance: sl.Matrix3f to be returned. It creates one by default. + :return: sl.Matrix3f filled with the covariance matrix of the angular velocity. + .. note:: + Not available in SVO or STREAM mode. + """ + return Matrix3f() + + def get_linear_acceleration_covariance(self, linear_acceleration_covariance = Matrix3f()) -> Matrix3f: + """ + Gets the covariance matrix of the linear acceleration of the gyroscope in deg/s (get_angular_velocity()). + :param linear_acceleration_covariance: sl.Matrix3f to be returned. It creates one by default. + :return: sl.Matrix3f filled with the covariance matrix of the linear acceleration. + .. note:: + Not available in SVO or STREAM mode. + """ + return Matrix3f() + + def get_pose_covariance(self, pose_covariance = Matrix3f()) -> Matrix3f: + """ + Covariance matrix of the IMU pose (get_pose()). + :param pose_covariance: sl.Matrix3f to be returned. It creates one by default. + :return: sl.Matrix3f filled with the covariance matrix. + """ + return Matrix3f() + + def get_pose(self, pose = Transform()) -> Transform: + """ + IMU pose (IMU 6-DoF fusion). + :param pose: sl.Transform() to be returned. It creates one by default. + :return: sl.Transform filled with the IMU pose. + """ + return Transform() + + +class HealthStatus: + """ + Structure containing the self diagnostic results of the image/depth + That information can be retrieved by sl::Camera::get_health_status(), and enabled by sl::InitParameters::enable_image_validity_check + \n + The default value of sl::InitParameters::enable_image_validity_check is enabled using the fastest setting, + the integer given can be increased to include more advanced and heavier processing to detect issues (up to 3). + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def low_depth_reliability(self) -> bool: + """ + This status indicates low depth map reliability + If the image are unreliable or if the scene condition are very challenging this status report a warning. + This is using the depth confidence and general depth distribution. Typically due to obstructed eye (included very close object, + strong occlusions) or degraded condition like heavy fog/water on the optics + """ + return bool() + + @low_depth_reliability.setter + def low_depth_reliability(self, low_depth_reliability: Any) -> None: + pass + + @property + def enabled(self) -> bool: + """ + Indicates if the Health check is enabled + """ + return bool() + + @enabled.setter + def enabled(self, enabled: Any) -> None: + pass + + @property + def low_image_quality(self) -> bool: + """ + This status indicates poor image quality + It can indicates camera issue, like incorrect manual video settings, damaged hardware, corrupted video stream from the camera, + dirt or other partial or total occlusion, stuck ISP (black/white/green/purple images, incorrect exposure, etc), blurry images + It also includes widely different left and right images which leads to unavailable depth information + In case of very low light this will be reported by this status and the dedicated HealthStatus::low_lighting + + .. note:: + : Frame tearing is currently not detected. Advanced blur detection requires heavier processing and is enabled only when setting Initparameters::enable_image_validity_check to 3 and above + """ + return bool() + + @low_image_quality.setter + def low_image_quality(self, low_image_quality: Any) -> None: + pass + + @property + def low_motion_sensors_reliability(self) -> bool: + """ + This status indicates motion sensors data reliability issue. + This indicates the IMU is providing low quality data. Possible underlying can be regarding the data stream like corrupted data, + timestamp inconsistency, resonance frequencies, saturated sensors / very high acceleration or rotation, shocks + """ + return bool() + + @low_motion_sensors_reliability.setter + def low_motion_sensors_reliability(self, low_motion_sensors_reliability: Any) -> None: + pass + + @property + def low_lighting(self) -> bool: + """ + This status indicates low light scene. + As the camera are passive sensors working in the visible range, they requires some external light to operate. + This status warns if the lighting condition become suboptimal and worst. + This is based on the scene illuminance in LUX for the ZED X cameras series (available with VIDEO_SETTINGS::SCENE_ILLUMINANCE) + For other camera models or when using SVO files, this is based on computer vision processing from the image characteristics. + """ + return bool() + + @low_lighting.setter + def low_lighting(self, low_lighting: Any) -> None: + pass + + +class RecordingStatus: + """ + Class containing information about the status of the recording. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def average_compression_time(self) -> float: + """ + Average compression time in milliseconds since beginning of recording. + """ + return float() + + @average_compression_time.setter + def average_compression_time(self, average_compression_time: Any) -> None: + pass + + @property + def status(self) -> bool: + """ + Status of current frame. + + True for success or False if the frame could not be written in the SVO file. + """ + return bool() + + @status.setter + def status(self, status: Any) -> None: + pass + + @property + def is_recording(self) -> bool: + """ + Report if the recording has been enabled. + """ + return bool() + + @is_recording.setter + def is_recording(self, is_recording: Any) -> None: + pass + + @property + def is_paused(self) -> bool: + """ + Report if the recording has been paused. + """ + return bool() + + @is_paused.setter + def is_paused(self, is_paused: Any) -> None: + pass + + @property + def number_frames_ingested(self) -> int: + """ + Number of frames ingested in SVO encoding/writing. + """ + return int() + + @number_frames_ingested.setter + def number_frames_ingested(self, number_frames_ingested: Any) -> None: + pass + + @property + def current_compression_time(self) -> float: + """ + Compression time for the current frame in milliseconds. + """ + return float() + + @current_compression_time.setter + def current_compression_time(self, current_compression_time: Any) -> None: + pass + + @property + def number_frames_encoded(self) -> int: + """ + Number of frames effectively encoded and written. Might be different from the number of frames ingested. The difference will show the encoder latency + """ + return int() + + @number_frames_encoded.setter + def number_frames_encoded(self, number_frames_encoded: Any) -> None: + pass + + @property + def average_compression_ratio(self) -> float: + """ + Average compression ratio (% of raw size) since beginning of recording. + """ + return float() + + @average_compression_ratio.setter + def average_compression_ratio(self, average_compression_ratio: Any) -> None: + pass + + @property + def current_compression_ratio(self) -> float: + """ + Compression ratio (% of raw size) for the current frame. + """ + return float() + + @current_compression_ratio.setter + def current_compression_ratio(self, current_compression_ratio: Any) -> None: + pass + + +class Camera: + """ + This class serves as the primary interface between the camera and the various features provided by the SDK. + It enables seamless integration and access to a wide array of capabilities, including video streaming, depth sensing, object tracking, mapping, and much more. + + A standard program will use the Camera class like this: + .. code-block:: text + + + import pyzed.sl as sl + + def main(): + # --- Initialize a Camera object and open the ZED + # Create a ZED camera object + zed = sl.Camera() + + # Set configuration parameters + init_params = sl.InitParameters() + init_params.camera_resolution = sl.RESOLUTION.HD720 # Use HD720 video mode for USB cameras + # init_params.camera_resolution = sl.RESOLUTION.HD1200 # Use HD1200 video mode for GMSL cameras + init_params.camera_fps = 60 # Set fps at 60 + + # Open the camera + err = zed.open(init_params) + if err != sl.ERROR_CODE.SUCCESS: + print(repr(err)) + exit(-1) + + runtime_param = sl.RuntimeParameters() + + # --- Main loop grabbing images and depth values + # Capture 50 frames and stop + i = 0 + image = sl.Mat() + depth = sl.Mat() + while i < 50 : + # Grab an image + if zed.grab(runtime_param) == sl.ERROR_CODE.SUCCESS: # A new image is available if grab() returns SUCCESS + # Display a pixel color + zed.retrieve_image(image, sl.VIEW.LEFT) # Get the left image + err, center_rgb = image.get_value(image.get_width() / 2, image.get_height() / 2) + if err == sl.ERROR_CODE.SUCCESS: + print("Image ", i, " center pixel R:", int(center_rgb[0]), " G:", int(center_rgb[1]), " B:", int(center_rgb[2])) + else: + print("Image ", i, " error:", err) + + # Display a pixel depth + zed.retrieve_measure(depth, sl.MEASURE.DEPTH) # Get the depth map + err, center_depth = depth.get_value(depth.get_width() / 2, depth.get_height() /2) + if err == sl.ERROR_CODE.SUCCESS: + print("Image ", i," center depth:", center_depth) + else: + print("Image ", i, " error:", err) + + i = i+1 + + # --- Close the Camera + zed.close() + return 0 + + if __name__ == "__main__": + main() + + """ + def __init__(self, *args, **kwargs) -> None: ... + + def __dealloc__(self) -> None: + pass + + def close(self) -> None: + """ + Close an opened camera. + + If open() has been called, this method will close the connection to the camera (or the SVO file) and free the corresponding memory. + + If open() wasn't called or failed, this method won't have any effect. + + .. note:: + If an asynchronous task is running within the Camera object, like save_area_map(), this method will wait for its completion. + + .. note:: + To apply a new InitParameters, you will need to close the camera first and then open it again with the new InitParameters values. + + .. warning:: If the CUDA context was created by open(), this method will destroy it. + .. warning:: Therefore you need to make sure to delete your GPU sl.Mat objects before the context is destroyed. + """ + pass + + def open(self, py_init = None) -> ERROR_CODE: + """ + Opens the ZED camera from the provided InitParameters. + The method will also check the hardware requirements and run a self-calibration. + :param py_init: A structure containing all the initial parameters. Default: a preset of InitParameters. + :return: An error code giving information about the internal process. If ERROR_CODE.SUCCESS is returned, the camera is ready to use. Every other code indicates an error and the program should be stopped. + + Here is the proper way to call this function: + + .. code-block:: text + + zed = sl.Camera() # Create a ZED camera object + + init_params = sl.InitParameters() # Set configuration parameters + init_params.camera_resolution = sl.RESOLUTION.HD720 # Use HD720 video mode + init_params.camera_fps = 60 # Set fps at 60 + + # Open the camera + err = zed.open(init_params) + if (err != sl.ERROR_CODE.SUCCESS) : + print(repr(err)) # Display the error + exit(-1) + + + .. note:: + If you are having issues opening a camera, the diagnostic tool provided in the SDK can help you identify to problems. + + - **Windows:** C:\\Program Files (x86)\\ZED SDK\\tools\\ZED Diagnostic.exe + - **Linux:** /usr/local/zed/tools/ZED Diagnostic + .. note:: + If this method is called on an already opened camera, close() will be called. + """ + return ERROR_CODE() + + def is_opened(self) -> bool: + """ + Reports if the camera has been successfully opened. + It has the same behavior as checking if open() returns ERROR_CODE.SUCCESS. + :return: True if the ZED camera is already setup, otherwise false. + """ + return bool() + + def read(self) -> ERROR_CODE: + """ + Read the latest images and IMU from the camera and rectify the images. + + This method is meant to be called frequently in the main loop of your application. + + .. note:: + If no new frames is available until timeout is reached, read() will return ERROR_CODE "ERROR_CODE::CAMERA_NOT_DETECTED" since the camera has probably been disconnected. + + .. note:: + Returned errors can be displayed using toString(). + + + :return: ERROR_CODE "ERROR_CODE::SUCCESS" means that no problem was encountered. + """ + return ERROR_CODE() + + def grab(self, py_runtime = None) -> ERROR_CODE: + """ + This method will grab the latest images from the camera, rectify them, and compute the retrieve_measure() "measurements" based on the RuntimeParameters provided (depth, point cloud, tracking, etc.) + + As measures are created in this method, its execution can last a few milliseconds, depending on your parameters and your hardware. + \n The exact duration will mostly depend on the following parameters: + + - InitParameters.enable_right_side_measure : Activating this parameter increases computation time. + - InitParameters.camera_resolution : Lower resolutions are faster to compute. + - enable_positional_tracking() : Activating the tracking is an additional load. + - RuntimeParameters.enable_depth : Avoiding the depth computation must be faster. However, it is required by most SDK features (tracking, spatial mapping, plane estimation, etc.) + - InitParameters.depth_mode : DEPTH_MODE.PERFORMANCE will run faster than DEPTH_MODE.ULTRA. + - InitParameters.depth_stabilization : Stabilizing the depth requires an additional computation load as it enables tracking. + + This method is meant to be called frequently in the main loop of your application. + .. note:: + Since ZED SDK 3.0, this method is blocking. It means that grab() will wait until a new frame is detected and available. + + .. note:: + If no new frames is available until timeout is reached, grab() will return ERROR_CODE.CAMERA_NOT_DETECTED since the camera has probably been disconnected. + + + :param py_runtime: A structure containing all the runtime parameters. Default: a preset of RuntimeParameters. + :return: ERROR_CODE.SUCCESS means that no problem was encountered. + .. note:: + Returned errors can be displayed using ``str()``. + + + .. code-block:: text + + # Set runtime parameters after opening the camera + runtime_param = sl.RuntimeParameters() + + image = sl.Mat() + while True: + # Grab an image + if zed.grab(runtime_param) == sl.ERROR_CODE.SUCCESS: # A new image is available if grab() returns SUCCESS + zed.retrieve_image(image, sl.VIEW.LEFT) # Get the left image + # Use the image for your application + """ + return ERROR_CODE() + + def retrieve_image(self, py_mat, view: VIEW = VIEW.LEFT, mem_type: MEM = MEM.CPU, resolution = None) -> ERROR_CODE: + """ + Retrieves images from the camera (or SVO file). + + Multiple images are available along with a view of various measures for display purposes. + \n Available images and views are listed here. + \n As an example, VIEW.DEPTH can be used to get a gray-scale version of the depth map, but the actual depth values can be retrieved using retrieve_measure() . + \n + \n **Pixels** + \n Most VIEW modes output image with 4 channels as BGRA (Blue, Green, Red, Alpha), for more information see enum VIEW + \n + \n **Memory** + \n By default, images are copied from GPU memory to CPU memory (RAM) when this function is called. + \n If your application can use GPU images, using the **type** parameter can increase performance by avoiding this copy. + \n If the provided sl.Mat object is already allocated and matches the requested image format, memory won't be re-allocated. + \n + \n **Image size** + \n By default, images are returned in the resolution provided by Resolution "get_camera_information().camera_configuration.resolution". + \n However, you can request custom resolutions. For example, requesting a smaller image can help you speed up your application. + .. warning:: A sl.Mat resolution higher than the camera resolution **cannot** be requested. + + :param py_mat: The sl.Mat to store the image. (Direction: out) + :param view: Defines the image you want (see VIEW). Default: VIEW.LEFT. (Direction: in) + :param mem_type: Defines on which memory the image should be allocated. Default: MEM.CPU. (Direction: in) + :param resolution: If specified, defines the Resolution of the output sl.Mat. If set to Resolution "Resolution(0,0)", the camera resolution will be taken. Default: (0,0). (Direction: in) + :return: ERROR_CODE.SUCCESS if the method succeeded. + :return: ERROR_CODE.INVALID_FUNCTION_PARAMETERS if the view mode requires a module not enabled (VIEW.DEPTH with DEPTH_MODE.NONE for example). + :return: ERROR_CODE.INVALID_RESOLUTION if the resolution is higher than one provided by Resolution "get_camera_information().camera_configuration.resolution". + :return: ERROR_CODE.FAILURE if another error occurred. + + .. note:: + As this method retrieves the images grabbed by the grab() method, it should be called afterward. + + + .. code-block:: text + + # create sl.Mat objects to store the images + left_image = sl.Mat() + while True: + # Grab an image + if zed.grab() == sl.ERROR_CODE.SUCCESS: # A new image is available if grab() returns SUCCESS + zed.retrieve_image(left_image, sl.VIEW.LEFT) # Get the rectified left image + + # Display the center pixel colors + err, left_center = left_image.get_value(left_image.get_width() / 2, left_image.get_height() / 2) + if err == sl.ERROR_CODE.SUCCESS: + print("left_image center pixel R:", int(left_center[0]), " G:", int(left_center[1]), " B:", int(left_center[2])) + else: + print("error:", err) + """ + return ERROR_CODE() + + def retrieve_measure(self, py_mat, measure: MEASURE = MEASURE.DEPTH, mem_type: MEM = MEM.CPU, resolution = None) -> ERROR_CODE: + """ + Computed measures, like depth, point cloud, or normals, can be retrieved using this method. + + Multiple measures are available after a grab() call. A full list is available here. + + \n **Memory** + \n By default, images are copied from GPU memory to CPU memory (RAM) when this function is called. + \n If your application can use GPU images, using the **type parameter can increase performance by avoiding this copy. + \n If the provided Mat object is already allocated and matches the requested image format, memory won't be re-allocated. + + \n **Measure size** + \n By default, measures are returned in the resolution provided by get_camera_information() in CameraInformations.camera_resolution . + \n However, custom resolutions can be requested. For example, requesting a smaller measure can help you speed up your application. + .. warning:: A sl.Mat resolution higher than the camera resolution **cannot** be requested. + + :param py_mat: The sl.Mat to store the measures. (Direction: out) + :param measure: Defines the measure you want (see MEASURE). Default: MEASURE.DEPTH. (Direction: in) + :param mem_type: Defines on which memory the image should be allocated. Default: MEM.CPU (you cannot change this default value). (Direction: in) + :param resolution: If specified, defines the Resolution of the output sl.Mat. If set to Resolution "Resolution(0,0)", the camera resolution will be taken. Default: (0,0). (Direction: in) + :return: ERROR_CODE.SUCCESS if the method succeeded. + :return: ERROR_CODE.INVALID_FUNCTION_PARAMETERS if the view mode requires a module not enabled (VIEW.DEPTH with DEPTH_MODE.NONE for example). + :return: ERROR_CODE.INVALID_RESOLUTION if the resolution is higher than one provided by Resolution "get_camera_information().camera_configuration.resolution". + :return: ERROR_CODE.FAILURE if another error occured. + + .. note:: + As this method retrieves the images grabbed by the grab() method, it should be called afterward. + + + .. code-block:: text + + depth_map = sl.Mat() + point_cloud = sl.Mat() + resolution = zed.get_camera_information().camera_configuration.resolution + x = int(resolution.width / 2) # Center coordinates + y = int(resolution.height / 2) + + while True : + if zed.grab() == sl.ERROR_CODE.SUCCESS: # Grab an image + + zed.retrieve_measure(depth_map, sl.MEASURE.DEPTH) # Get the depth map + + # Read a depth value + err, center_depth = depth_map.get_value(x, y) # each depth map pixel is a float value + if err == sl.ERROR_CODE.SUCCESS: # + Inf is "too far", -Inf is "too close", Nan is "unknown/occlusion" + print("Depth value at center:", center_depth, init_params.coordinate_units) + zed.retrieve_measure(point_cloud, sl.MEASURE.XYZRGBA) # Get the point cloud + + # Read a point cloud value + err, pc_value = point_cloud.get_value(x, y) # each point cloud pixel contains 4 floats, so we are using a numpy array + + # Get 3D coordinates + if err == sl.ERROR_CODE.SUCCESS: + print("Point cloud coordinates at center: X=", pc_value[0], ", Y=", pc_value[1], ", Z=", pc_value[2]) + + # Get color information using Python struct package to unpack the unsigned char array containing RGBA values + import struct + packed = struct.pack('f', pc_value[3]) + char_array = struct.unpack('BBBB', packed) + print("Color values at center: R=", char_array[0], ", G=", char_array[1], ", B=", char_array[2], ", A=", char_array[3]) + + """ + return ERROR_CODE() + + def set_region_of_interest(self, py_mat, modules = [MODULE.ALL]) -> ERROR_CODE: + """ + Defines a region of interest to focus on for all the SDK, discarding other parts. + :param roi_mask: The Mat defining the requested region of interest, pixels lower than 127 will be discarded from all modules: depth, positional tracking, etc. + If empty, set all pixels as valid. The mask can be either at lower or higher resolution than the current images. + :return: An ERROR_CODE if something went wrong. + .. note:: + The method support MAT_TYPE "U8_C1/U8_C3/U8_C4" images type. + """ + return ERROR_CODE() + + def get_region_of_interest(self, py_mat, resolution = None, module: MODULE = MODULE.ALL) -> ERROR_CODE: + """ + Get the previously set or computed region of interest + :param roi_mask: The Mat returned + :param image_size: The optional size of the returned mask + :return: An ERROR_CODE if something went wrong. + """ + return ERROR_CODE() + + def start_region_of_interest_auto_detection(self, roi_param = None) -> ERROR_CODE: + """ + Start the auto detection of a region of interest to focus on for all the SDK, discarding other parts. + This detection is based on the general motion of the camera combined with the motion in the scene. + The camera must move for this process, an internal motion detector is used, based on the Positional Tracking module. + It requires a few hundreds frames of motion to compute the mask. + :param roi_param: The RegionOfInterestParameters defining parameters for the detection + + .. note:: + This module is expecting a static portion, typically a fairly close vehicle hood at the bottom of the image. + + This module may not work correctly or detect incorrect background area, especially with slow motion, if there's no static element. + This module work asynchronously, the status can be obtained using get_region_of_interest_auto_detection_status(), the result is either auto applied, + or can be retrieve using get_region_of_interest function. + :return: An ERROR_CODE if something went wrong. + """ + return ERROR_CODE() + + def get_region_of_interest_auto_detection_status(self) -> REGION_OF_INTEREST_AUTO_DETECTION_STATE: + """ + Return the status of the automatic Region of Interest Detection + The automatic Region of Interest Detection is enabled by using startRegionOfInterestAutoDetection + :return: REGION_OF_INTEREST_AUTO_DETECTION_STATE the status + """ + return REGION_OF_INTEREST_AUTO_DETECTION_STATE() + + def start_publishing(self, communication_parameters) -> ERROR_CODE: + """ + Set this camera as a data provider for the Fusion module. + + Metadata is exchanged with the Fusion. + :param communication_parameters: A structure containing all the initial parameters. Default: a preset of CommunicationParameters. + :return: ERROR_CODE.SUCCESS if everything went fine, ERROR_CODE.FAILURE otherwise. + """ + return ERROR_CODE() + + def stop_publishing(self) -> ERROR_CODE: + """ + Set this camera as normal camera (without data providing). + + Stop to send camera data to fusion. + :return: ERROR_CODE.SUCCESS if everything went fine, ERROR_CODE.FAILURE otherwise. + """ + return ERROR_CODE() + + def set_svo_position(self, frame_number) -> None: + """ + Sets the playback cursor to the desired frame number in the SVO file. + + This method allows you to move around within a played-back SVO file. After calling, the next call to grab() will read the provided frame number. + + :param frame_number: The number of the desired frame to be decoded. + + .. note:: + The method works only if the camera is open in SVO playback mode. + + + .. code-block:: text + + + import pyzed.sl as sl + + def main(): + # Create a ZED camera object + zed = sl.Camera() + + # Set configuration parameters + init_params = sl.InitParameters() + init_params.set_from_svo_file("path/to/my/file.svo") + + # Open the camera + err = zed.open(init_params) + if err != sl.ERROR_CODE.SUCCESS: + print(repr(err)) + exit(-1) + + # Loop between frames 0 and 50 + left_image = sl.Mat() + while zed.get_svo_position() < zed.get_svo_number_of_frames() - 1: + + print("Current frame: ", zed.get_svo_position()) + + # Loop if we reached frame 50 + if zed.get_svo_position() == 50: + zed.set_svo_position(0) + + # Grab an image + if zed.grab() == sl.ERROR_CODE.SUCCESS: + zed.retrieve_image(left_image, sl.VIEW.LEFT) # Get the rectified left image + + # Use the image in your application + + # Close the Camera + zed.close() + return 0 + + if __name__ == "__main__" : + main() + + """ + pass + + def pause_svo_reading(self, status) -> None: + """ + Pauses or resumes SVO reading when using SVO Real time mode + :param status: If true, the reading is paused. If false, the reading is resumed. + .. note:: + This is only relevant for SVO InitParameters::svo_real_time_mode + """ + pass + + def get_svo_position(self) -> int: + """ + Returns the current playback position in the SVO file. + + The position corresponds to the number of frames already read from the SVO file, starting from 0 to n. + + Each grab() call increases this value by one (except when using InitParameters.svo_real_time_mode). + :return: The current frame position in the SVO file. -1 if the SDK is not reading an SVO. + + .. note:: + The method works only if the camera is open in SVO playback mode. + + + See set_svo_position() for an example. + """ + return int() + + def get_svo_number_of_frames(self) -> int: + """ + Returns the number of frames in the SVO file. + + :return: The total number of frames in the SVO file. -1 if the SDK is not reading a SVO. + + The method works only if the camera is open in SVO playback mode. + """ + return int() + + def ingest_data_into_svo(self, data) -> ERROR_CODE: + """ + ingest a SVOData in the SVO file. + + :return: An error code stating the success, or not. + + The method works only if the camera is open in SVO recording mode. + """ + return ERROR_CODE() + + def get_svo_data_keys(self) -> list: + """ + Get the external channels that can be retrieved from the SVO file. + + :return: a list of keys + + The method works only if the camera is open in SVO playback mode. + """ + return [] + + def retrieve_svo_data(self, key, data, ts_begin, ts_end) -> ERROR_CODE: + """ + retrieve SVO datas from the SVO file at the given channel key and in the given timestamp range. + + :return: An error code stating the success, or not. + :param key: The channel key. + :param data: The dict to be filled with SVOData objects, with timestamps as keys. + :param ts_begin: The beginning of the range. + :param ts_end: The end of the range. + + The method works only if the camera is open in SVO playback mode. + """ + return ERROR_CODE() + + def set_camera_settings(self, settings: VIDEO_SETTINGS, value = -1) -> ERROR_CODE: + """ + retrieve SVO datas from the SVO file at the given channel key and in the given timestamp range. + + :return: An error code stating the success, or not. + :param key: The channel key. + :param data: The dict to be filled with SVOData objects, with timestamps as keys. + :param ts_begin: The beginning of the range. + :param ts_end: The end of the range. + + The method works only if the camera is open in SVO playback mode. + """ + return ERROR_CODE() + + def set_camera_settings_range(self, settings: VIDEO_SETTINGS, mini = -1, maxi = -1) -> ERROR_CODE: + """ + Sets the value of the requested VIDEO_SETTINGS "camera setting" that supports two values (min/max). + + This method only works with the following VIDEO_SETTINGS: + - sl.VIDEO_SETTINGS.AUTO_EXPOSURE_TIME_RANGE + - sl.VIDEO_SETTINGS.AUTO_ANALOG_GAIN_RANGE + - sl.VIDEO_SETTINGS.AUTO_DIGITAL_GAIN_RANGE + + :param settings: The setting to be set. + :param min: The minimum value that can be reached (-1 or 0 gives full range). + :param max: The maximum value that can be reached (-1 or 0 gives full range). + :return: ERROR_CODE to indicate if the method was successful. + + .. warning:: If VIDEO_SETTINGS settings is not supported or min >= max, it will return ERROR_CODE.INVALID_FUNCTION_PARAMETERS. + .. note:: + The method works only if the camera is open in LIVE or STREAM mode. + + + .. code-block:: text + + # For ZED X based product, set the automatic exposure from 2ms to 5ms. Expected exposure time cannot go beyond those values + zed.set_camera_settings_range(sl.VIDEO_SETTINGS.AEC_RANGE, 2000, 5000); + """ + return ERROR_CODE() + + def set_camera_settings_roi(self, settings: VIDEO_SETTINGS, roi, eye: SIDE = SIDE.BOTH, reset = False) -> ERROR_CODE: + """ + Overloaded method for VIDEO_SETTINGS.AEC_AGC_ROI which takes a Rect as parameter. + + :param settings: Must be set at VIDEO_SETTINGS.AEC_AGC_ROI, otherwise the method will have no impact. + :param roi: Rect that defines the target to be applied for AEC/AGC computation. Must be given according to camera resolution. + :param eye: SIDE on which to be applied for AEC/AGC computation. Default: SIDE.BOTH + :param reset: Cancel the manual ROI and reset it to the full image. Default: False + + .. note:: + The method works only if the camera is open in LIVE or STREAM mode. + + + .. code-block:: text + + roi = sl.Rect(42, 56, 120, 15) + zed.set_camera_settings_roi(sl.VIDEO_SETTINGS.AEC_AGC_ROI, roi, sl.SIDE.BOTH) + + """ + return ERROR_CODE() + + def get_camera_settings(self, setting: VIDEO_SETTINGS) -> tuple[ERROR_CODE, int]: + """ + Returns the current value of the requested VIDEO_SETTINGS "camera setting" (gain, brightness, hue, exposure, etc.). + + Possible values (range) of each setting are available here. + + :param setting: The requested setting. + :return: ERROR_CODE to indicate if the method was successful. + :return: The current value for the corresponding setting. + + .. code-block:: text + + err, gain = zed.get_camera_settings(sl.VIDEO_SETTINGS.GAIN) + if err == sl.ERROR_CODE.SUCCESS: + print("Current gain value:", gain) + else: + print("error:", err) + + + .. note:: + The method works only if the camera is open in LIVE or STREAM mode. + + .. note:: + Settings are not exported in the SVO file format. + """ + return tuple[ERROR_CODE, int]() + + def get_camera_settings_range(self, setting: VIDEO_SETTINGS) -> tuple[ERROR_CODE, int, int]: + """ + Returns the values of the requested settings for VIDEO_SETTINGS that supports two values (min/max). + + This method only works with the following VIDEO_SETTINGS: + - sl.VIDEO_SETTINGS.AUTO_EXPOSURE_TIME_RANGE + - sl.VIDEO_SETTINGS.AUTO_ANALOG_GAIN_RANGE + - sl.VIDEO_SETTINGS.AUTO_DIGITAL_GAIN_RANGE + + Possible values (range) of each setting are available here. + :param setting: The requested setting. + :return: ERROR_CODE to indicate if the method was successful. + :return: The current value of the minimum for the corresponding setting. + :return: The current value of the maximum for the corresponding setting. + + .. code-block:: text + + err, aec_range_min, aec_range_max = zed.get_camera_settings(sl.VIDEO_SETTINGS.AUTO_EXPOSURE_TIME_RANGE) + if err == sl.ERROR_CODE.SUCCESS: + print("Current AUTO_EXPOSURE_TIME_RANGE range values ==> min:", aec_range_min, "max:", aec_range_max) + else: + print("error:", err) + + + .. note:: + Works only with ZED X that supports low-level controls + """ + return tuple[ERROR_CODE, int, int]() + + def get_camera_settings_roi(self, setting: VIDEO_SETTINGS, roi, eye: SIDE = SIDE.BOTH) -> ERROR_CODE: + """ + Returns the current value of the currently used ROI for the camera setting AEC_AGC_ROI. + + :param setting: Must be set at VIDEO_SETTINGS.AEC_AGC_ROI, otherwise the method will have no impact. (Direction: in) + :param roi: Roi that will be filled. (Direction: out) + :param eye: The requested side. Default: SIDE.BOTH (Direction: in) + :return: ERROR_CODE to indicate if the method was successful. + + .. code-block:: text + + roi = sl.Rect() + err = zed.get_camera_settings_roi(sl.VIDEO_SETTINGS.AEC_AGC_ROI, roi, sl.SIDE.BOTH) + print("Current ROI for AEC_AGC: " + str(roi.x) + " " + str(roi.y)+ " " + str(roi.width) + " " + str(roi.height)) + + + .. note:: + Works only if the camera is open in LIVE or STREAM mode with VIDEO_SETTINGS.AEC_AGC_ROI. + + .. note:: + It will return ERROR_CODE.INVALID_FUNCTION_CALL or ERROR_CODE.INVALID_FUNCTION_PARAMETERS otherwise. + """ + return ERROR_CODE() + + def is_camera_setting_supported(self, setting: VIDEO_SETTINGS) -> bool: + """ + Returns if the video setting is supported by the camera or not + + :param setting: the video setting to test (Direction: in) + :return: True if the VIDEO_SETTINGS is supported by the camera, False otherwise + """ + return bool() + + def get_current_fps(self) -> float: + """ + Returns the current framerate at which the grab() method is successfully called. + + The returned value is based on the difference of camera get_timestamp() "timestamps" between two successful grab() calls. + + :return: The current SDK framerate + + .. warning:: The returned framerate (number of images grabbed per second) can be lower than InitParameters.camera_fps if the grab() function runs slower than the image stream or is called too often. + + .. code-block:: text + + current_fps = zed.get_current_fps() + print("Current framerate: ", current_fps) + """ + return float() + + def get_timestamp(self, time_reference: TIME_REFERENCE) -> Timestamp: + """ + Returns the timestamp in the requested TIME_REFERENCE. + + - When requesting the TIME_REFERENCE.IMAGE timestamp, the UNIX nanosecond timestamp of the latest grab() "grabbed" image will be returned. + \n This value corresponds to the time at which the entire image was available in the PC memory. As such, it ignores the communication time that corresponds to 2 or 3 frame-time based on the fps (ex: 33.3ms to 50ms at 60fps). + + - When requesting the TIME_REFERENCE.CURRENT timestamp, the current UNIX nanosecond timestamp is returned. + + This function can also be used when playing back an SVO file. + + :param time_reference: The selected TIME_REFERENCE. + :return: The Timestamp in nanosecond. 0 if not available (SVO file without compression). + + .. note:: + As this function returns UNIX timestamps, the reference it uses is common across several Camera instances. + + \n This can help to organized the grabbed images in a multi-camera application. + + .. code-block:: text + + last_image_timestamp = zed.get_timestamp(sl.TIME_REFERENCE.IMAGE) + current_timestamp = zed.get_timestamp(sl.TIME_REFERENCE.CURRENT) + print("Latest image timestamp: ", last_image_timestamp.get_nanoseconds(), "ns from Epoch.") + print("Current timestamp: ", current_timestamp.get_nanoseconds(), "ns from Epoch.") + """ + return Timestamp() + + def get_frame_dropped_count(self) -> int: + """ + Returns the number of frames dropped since grab() was called for the first time. + + A dropped frame corresponds to a frame that never made it to the grab method. + \n This can happen if two frames were extracted from the camera when grab() is called. The older frame will be dropped so as to always use the latest (which minimizes latency). + + :return: The number of frames dropped since the first grab() call. + """ + return int() + + def get_current_min_max_depth(self) -> tuple[ERROR_CODE, float, float]: + """ + Gets the current range of perceived depth. + :param min: Minimum depth detected (in selected sl.UNIT). (Direction: out) + :param max: Maximum depth detected (in selected sl.UNIT). (Direction: out) + :return: ERROR_CODE.SUCCESS if values can be extracted, ERROR_CODE.FAILURE otherwise. + """ + return tuple[ERROR_CODE, float, float]() + + def get_camera_information(self, resizer = None) -> CameraInformation: + """ + Returns the CameraInformation associated the camera being used. + + To ensure accurate calibration, it is possible to specify a custom resolution as a parameter when obtaining scaled information, as calibration parameters are resolution-dependent. + \n When reading an SVO file, the parameters will correspond to the camera used for recording. + + :param resizer: You can specify a size different from the default image size to get the scaled camera information. + Default = (0,0) meaning original image size (given by CameraConfiguration.resolution "get_camera_information().camera_configuration.resolution"). + :return: CameraInformation containing the calibration parameters of the ZED, as well as serial number and firmware version. + + .. warning:: The returned parameters might vary between two execution due to the InitParameters.camera_disable_self_calib "self-calibration" being run in the open() method. + .. note:: + The calibration file SNXXXX.conf can be found in: + + - **Windows:** C:/ProgramData/Stereolabs/settings/ + - **Linux:** /usr/local/zed/settings/ + """ + return CameraInformation() + + def get_runtime_parameters(self) -> RuntimeParameters: + """ + Returns the RuntimeParameters used. + It corresponds to the structure given as argument to the grab() method. + + :return: RuntimeParameters containing the parameters that define the behavior of the grab method. + """ + return RuntimeParameters() + + def get_init_parameters(self) -> InitParameters: + """ + Returns the InitParameters associated with the Camera object. + It corresponds to the structure given as argument to open() method. + + :return: InitParameters containing the parameters used to initialize the Camera object. + """ + return InitParameters() + + def get_positional_tracking_parameters(self) -> PositionalTrackingParameters: + """ + Returns the PositionalTrackingParameters used. + + It corresponds to the structure given as argument to the enable_positional_tracking() method. + + :return: PositionalTrackingParameters containing the parameters used for positional tracking initialization. + """ + return PositionalTrackingParameters() + + def get_spatial_mapping_parameters(self) -> SpatialMappingParameters: + """ + Returns the SpatialMappingParameters used. + + It corresponds to the structure given as argument to the enable_spatial_mapping() method. + + :return: SpatialMappingParameters containing the parameters used for spatial mapping initialization. + """ + return SpatialMappingParameters() + + def get_object_detection_parameters(self, instance_module_id = 0) -> ObjectDetectionParameters: + """ + Returns the ObjectDetectionParameters used. + + It corresponds to the structure given as argument to the enable_object_detection() method. + :return: ObjectDetectionParameters containing the parameters used for object detection initialization. + """ + return ObjectDetectionParameters() + + def get_body_tracking_parameters(self, instance_id = 0) -> BodyTrackingParameters: + """ + Returns the BodyTrackingParameters used. + + It corresponds to the structure given as argument to the enable_body_tracking() method. + + :return: BodyTrackingParameters containing the parameters used for body tracking initialization. + """ + return BodyTrackingParameters() + + def get_streaming_parameters(self) -> StreamingParameters: + """ + Returns the StreamingParameters used. + + It corresponds to the structure given as argument to the enable_streaming() method. + + :return: StreamingParameters containing the parameters used for streaming initialization. + """ + return StreamingParameters() + + def enable_positional_tracking(self, py_tracking = None) -> ERROR_CODE: + """ + Initializes and starts the positional tracking processes. + + This method allows you to enable the position estimation of the SDK. It only has to be called once in the camera's lifetime. + \n When enabled, the position will be update at each grab() call. + \n Tracking-specific parameters can be set by providing PositionalTrackingParameters to this method. + + :param py_tracking: A structure containing all the specific parameters for the positional tracking. Default: a preset of PositionalTrackingParameters. + :return: ERROR_CODE.FAILURE if the PositionalTrackingParameters.area_file_path file wasn't found, ERROR_CODE.SUCCESS otherwise. + + .. warning:: The positional tracking feature benefits from a high framerate. We found HD720@60fps to be the best compromise between image quality and framerate. + + .. code-block:: text + + + import pyzed.sl as sl + + def main() : + # --- Initialize a Camera object and open the ZED + # Create a ZED camera object + zed = sl.Camera() + + # Set configuration parameters + init_params = sl.InitParameters() + init_params.camera_resolution = sl.RESOLUTION.HD720 # Use HD720 video mode + init_params.camera_fps = 60 # Set fps at 60 + + # Open the camera + err = zed.open(init_params) + if err != sl.ERROR_CODE.SUCCESS: + print(repr(err)) + exit(-1) + + # Set tracking parameters + track_params = sl.PositionalTrackingParameters() + + # Enable positional tracking + err = zed.enable_positional_tracking(track_params) + if err != sl.ERROR_CODE.SUCCESS: + print("Tracking error: ", repr(err)) + exit(-1) + + # --- Main loop + while True: + if zed.grab() == sl.ERROR_CODE.SUCCESS: # Grab an image and computes the tracking + camera_pose = sl.Pose() + zed.get_position(camera_pose, sl.REFERENCE_FRAME.WORLD) + translation = camera_pose.get_translation().get() + print("Camera position: X=", translation[0], " Y=", translation[1], " Z=", translation[2]) + + # --- Close the Camera + zed.close() + return 0 + + if __name__ == "__main__" : + main() + + """ + return ERROR_CODE() + + def update_self_calibration(self) -> None: + """ + Performs a new self-calibration process. + In some cases, due to temperature changes or strong vibrations, the stereo calibration becomes less accurate. + \n Use this method to update the self-calibration data and get more reliable depth values. + .. note:: + The self-calibration will occur at the next grab() call. + + .. note:: + This method is similar to the previous reset_self_calibration() used in 2.X SDK versions. + + .. warning:: New values will then be available in get_camera_information(), be sure to get them to still have consistent 2D <-> 3D conversion. + """ + pass + + def enable_body_tracking(self, body_tracking_parameters = None) -> ERROR_CODE: + """ + Initializes and starts the body tracking module. + + The body tracking module currently supports multiple classes of human skeleton detection with the BODY_TRACKING_MODEL.HUMAN_BODY_FAST, + BODY_TRACKING_MODEL "BODY_TRACKING_MODEL::HUMAN_BODY_MEDIUM" or BODY_TRACKING_MODEL "BODY_TRACKING_MODEL::HUMAN_BODY_ACCURATE". + \n This model only detects humans but provides a full skeleton map for each person. + + \n Detected objects can be retrieved using the retrieve_bodies() method. + + .. note:: + - **This Deep Learning detection module is not available for MODEL.ZED cameras (first generation ZED cameras).** + + .. note:: + - This feature uses AI to locate objects and requires a powerful GPU. A GPU with at least 3GB of memory is recommended. + + + :param body_tracking_parameters: A structure containing all the specific parameters for the object detection. Default: a preset of BodyTrackingParameters. + :return: ERROR_CODE.SUCCESS if everything went fine. + :return: ERROR_CODE.OBJECT_DETECTION_NOT_AVAILABLE if the AI model is missing or corrupted. In this case, the SDK needs to be reinstalled + :return: ERROR_CODE.OBJECT_DETECTION_MODULE_NOT_COMPATIBLE_WITH_CAMERA if the camera used does not have an IMU (MODEL.ZED). + :return: ERROR_CODE.SENSORS_NOT_DETECTED if the camera model is correct (not MODEL.ZED) but the IMU is missing. It probably happens because InitParameters.sensors_required was set to False and that IMU has not been found. + :return: ERROR_CODE.INVALID_FUNCTION_CALL if one of the **body_tracking_parameters** parameter is not compatible with other modules parameters (for example, **depth_mode** has been set to DEPTH_MODE.NONE). + :return: ERROR_CODE.FAILURE otherwise. + + .. code-block:: text + + import pyzed.sl as sl + + def main() : + # Create a ZED camera object + zed = sl.Camera() + + # Open the camera + err = zed.open() + if err != sl.ERROR_CODE.SUCCESS: + print("Opening camera error:", repr(err)) + exit(-1) + + # Enable position tracking (mandatory for object detection) + tracking_params = sl.PositionalTrackingParameters() + err = zed.enable_positional_tracking(tracking_params) + if err != sl.ERROR_CODE.SUCCESS: + print("Enabling Positional Tracking error:", repr(err)) + exit(-1) + + # Set the body tracking parameters + body_tracking_params = sl.BodyTrackingParameters() + + # Enable the body tracking + err = zed.enable_body_tracking(body_tracking_params) + if err != sl.ERROR_CODE.SUCCESS: + print("Enabling Body Tracking error:", repr(err)) + exit(-1) + + # Grab an image and detect bodies on it + bodies = sl.Bodies() + while True : + if zed.grab() == sl.ERROR_CODE.SUCCESS: + zed.retrieve_bodies(bodies) + print(len(bodies.body_list), "bodies detected") + # Use the bodies in your application + + # Close the camera + zed.disable_body_tracking() + zed.close() + + if __name__ == "__main__": + main() + """ + return ERROR_CODE() + + def disable_body_tracking(self, instance_id = 0, force_disable_all_instances = False) -> None: + """ + Disables the body tracking process. + + The body tracking module immediately stops and frees its memory allocations. + + :param instance_id: Id of the body tracking instance. Used when multiple instances of the body tracking module are enabled at the same time. + :param force_disable_all_instances: Should disable all instances of the body tracking module or just **instance_module_id**. + + .. note:: + If the body tracking has been enabled, this method will automatically be called by close(). + """ + pass + + def retrieve_bodies(self, bodies, body_tracking_runtime_parameters = None, instance_id = 0) -> ERROR_CODE: + """ + Retrieves body tracking data from the body tracking module. + + This method returns the result of the body tracking, whether the module is running synchronously or asynchronously. + + - **Asynchronous:** this method immediately returns the last bodies tracked. If the current tracking isn't done, the bodies from the last tracking will be returned, and Bodies.is_new will be set to False. + - **Synchronous:** this method executes tracking and waits for it to finish before returning the detected objects. + + It is recommended to keep the same Bodies object as the input of all calls to this method. This will enable the identification and the tracking of every detected object. + + :param bodies: The detected bodies will be saved into this object. If the object already contains data from a previous tracking, it will be updated, keeping a unique ID for the same person. + :param body_tracking_runtime_parameters: Body tracking runtime settings, can be changed at each tracking. In async mode, the parameters update is applied on the next iteration. If None, the previously used parameters will be used. + :param instance_id: Id of the body tracking instance. Used when multiple instances of the body tracking module are enabled at the same time. + :return: ERROR_CODE.SUCCESS if everything went fine, ERROR_CODE.FAILURE otherwise. + + .. code-block:: text + + bodies = sl.Bodies() # Unique Bodies to be updated after each grab + # Main loop + while True: + if zed.grab() == sl.ERROR_CODE.SUCCESS: # Grab an image from the camera + zed.retrieve_bodies(bodies) + print(len(bodies.body_list), "bodies detected") + """ + return ERROR_CODE() + + def set_body_tracking_runtime_parameters(self, body_tracking_runtime_parameters, instance_module_id = 0) -> ERROR_CODE: + """ + Set the body tracking runtime parameters + """ + return ERROR_CODE() + + def is_body_tracking_enabled(self, instance_id = 0) -> bool: + """ + Tells if the body tracking module is enabled. + """ + return bool() + + def get_sensors_data(self, py_sensor_data, time_reference = TIME_REFERENCE.CURRENT) -> ERROR_CODE: + """ + Retrieves the SensorsData (IMU, magnetometer, barometer) at a specific time reference. + + - Calling get_sensors_data with TIME_REFERENCE.CURRENT gives you the latest sensors data received. Getting all the data requires to call this method at 800Hz in a thread. + - Calling get_sensors_data with TIME_REFERENCE.IMAGE gives you the sensors data at the time of the latest image grab() "grabbed". + + SensorsData object contains the previous IMUData structure that was used in ZED SDK v2.X: + \n For IMU data, the values are provided in 2 ways : + +
  • **Time-fused** pose estimation that can be accessed using: + * IMUData.get_pose "data.get_imu_data().get_pose()" +
  • +
  • **Raw values** from the IMU sensor: + + * IMUData.get_angular_velocity "data.get_imu_data().get_angular_velocity()", corresponding to the gyroscope + * IMUData.get_linear_acceleration "data.get_imu_data().get_linear_acceleration()", corresponding to the accelerometer + both the gyroscope and accelerometer are synchronized. +
  • + + + The delta time between previous and current values can be calculated using data.imu.timestamp + + .. note:: + The IMU quaternion (fused data) is given in the specified COORDINATE_SYSTEM of InitParameters. + + + :param py_sensor_data: The SensorsData variable to store the data. (Direction: out) + :param time_reference: Defines the reference from which you want the data to be expressed. Default: REFERENCE_FRAME.WORLD. (Direction: in) + :return: ERROR_CODE.SUCCESS if sensors data have been extracted. + :return: ERROR_CODE.SENSORS_NOT_AVAILABLE if the camera model is a MODEL.ZED. + :return: ERROR_CODE.MOTION_SENSORS_REQUIRED if the camera model is correct but the sensors module is not opened. + :return: ERROR_CODE.INVALID_FUNCTION_PARAMETERS if the **reference_time** is not valid. See Warning. + + .. warning:: In SVO reading mode, the TIME_REFERENCE.CURRENT is currently not available (yielding ERROR_CODE.INVALID_FUNCTION_PARAMETERS. + .. warning:: Only the quaternion data and barometer data (if available) at TIME_REFERENCE.IMAGE are available. Other values will be set to 0. + """ + return ERROR_CODE() + + def get_sensors_data_batch(self, py_sensor_data) -> ERROR_CODE: + """ + Retrieves all SensorsData associated to most recent grabbed frame in the specified COORDINATE_SYSTEM of InitParameters. + + For IMU data, the values are provided in 2 ways: + +
  • **Time-fused** pose estimation that can be accessed using: + * IMUData.get_pose "data.get_imu_data().get_pose()" +
  • +
  • **Raw values** from the IMU sensor: + + * IMUData.get_angular_velocity "data.get_imu_data().get_angular_velocity()", corresponding to the gyroscope + * IMUData.get_linear_acceleration "data.get_imu_data().get_linear_acceleration()", corresponding to the accelerometer + both the gyroscope and accelerometer are synchronized. +
  • + + + The delta time between previous and current values can be calculated using data.imu.timestamp + + :param py_sensor_data: The SensorsData list to store the data. (Direction: out) + :return: ERROR_CODE.SUCCESS if sensors data have been extracted. + :return: ERROR_CODE.SENSORS_NOT_AVAILABLE if the camera model is a MODEL.ZED. + :return: ERROR_CODE.MOTION_SENSORS_REQUIRED if the camera model is correct but the sensors module is not opened. + :return: ERROR_CODE.INVALID_FUNCTION_PARAMETERS if the **reference_time** is not valid. See Warning. + + .. code-block:: text + + if zed.grab() == sl.ERROR_CODE.SUCCESS: + sensors_data = [] + if (zed.get_sensors_data_batch(sensors_data) == sl.ERROR_CODE.SUCCESS): + for data in sensors_data: + print("IMU data: ", data.imu.get_angular_velocity(), data.imu.get_linear_acceleration()) + print("IMU pose: ", data.imu.get_pose().get_translation()) + print("IMU orientation: ", data.imu.get_orientation().get()) + """ + return ERROR_CODE() + + def set_imu_prior(self, transfom) -> ERROR_CODE: + """ + Set an optional IMU orientation hint that will be used to assist the tracking during the next grab(). + + This method can be used to assist the positional tracking rotation. + + .. note:: + This method is only effective if the camera has a model other than a MODEL.ZED, which does not contains internal sensors. + + .. warning:: It needs to be called before the grab() method. + :param transform: Transform to be ingested into IMU fusion. Note that only the rotation is used. + :return: ERROR_CODE.SUCCESS if the transform has been passed, ERROR_CODE.INVALID_FUNCTION_CALL otherwise (e.g. when used with a ZED camera which doesn't have IMU data). + """ + return ERROR_CODE() + + def get_position(self, py_pose, reference_frame: REFERENCE_FRAME = REFERENCE_FRAME.WORLD) -> POSITIONAL_TRACKING_STATE: + """ + Retrieves the estimated position and orientation of the camera in the specified REFERENCE_FRAME "reference frame". + + - Using REFERENCE_FRAME.WORLD, the returned pose relates to the initial position of the camera (PositionalTrackingParameters.initial_world_transform ). + - Using REFERENCE_FRAME.CAMERA, the returned pose relates to the previous position of the camera. + + If the tracking has been initialized with PositionalTrackingParameters.enable_area_memory to True (default), this method can return POSITIONAL_TRACKING_STATE.SEARCHING. + This means that the tracking lost its link to the initial referential and is currently trying to relocate the camera. However, it will keep on providing position estimations. + + :param camera_pose: The pose containing the position of the camera and other information (timestamp, confidence). (Direction: out) + :param reference_frame: Defines the reference from which you want the pose to be expressed. Default: REFERENCE_FRAME.WORLD. (Direction: in) + :return: The current state of the tracking process. + + .. note:: + Extract Rotation Matrix: Pose.get_rotation_matrix() + + .. note:: + Extract Translation Vector: Pose.get_translation() + + .. note:: + Extract Orientation / Quaternion: Pose.get_orientation() + + + .. warning:: This method requires the tracking to be enabled. enablePositionalTracking() . + + .. note:: + The position is provided in the InitParameters.coordinate_system . See COORDINATE_SYSTEM for its physical origin. + + + .. code-block:: text + + while True: + if zed.grab() == sl.ERROR_CODE.SUCCESS: # Grab an image and computes the tracking + camera_pose = sl.Pose() + zed.get_position(camera_pose, sl.REFERENCE_FRAME.WORLD) + + translation = camera_pose.get_translation().get() + print("Camera position: X=", translation[0], " Y=", translation[1], " Z=", translation[2]) + print("Camera Euler rotation: X=", camera_pose.get_euler_angles()[0], " Y=", camera_pose.get_euler_angles()[1], " Z=", camera_pose.get_euler_angles()[2]) + print("Camera Rodrigues rotation: X=", camera_pose.get_rotation_vector()[0], " Y=", camera_pose.get_rotation_vector()[1], " Z=", camera_pose.get_rotation_vector()[2]) + orientation = camera_pose.get_orientation().get() + print("Camera quaternion orientation: X=", orientation[0], " Y=", orientation[1], " Z=", orientation[2], " W=", orientation[3]) + """ + return POSITIONAL_TRACKING_STATE() + + def get_positional_tracking_landmarks(self, landmarks) -> ERROR_CODE: + """ + Get the current positional tracking landmarks. + :param landmarks: The dictionary of landmarks_id and landmark. + :return: ERROR_CODE that indicate if the function succeed or not. + """ + return ERROR_CODE() + + def get_positional_tracking_landmarks2d(self, landmark2d) -> ERROR_CODE: + """ + Get the current positional tracking landmark. + :param landmark: The landmark. + :return: ERROR_CODE that indicate if the function succeed or not. + """ + return ERROR_CODE() + + def get_positional_tracking_status(self) -> PositionalTrackingStatus: + """ + Return the current status of positional tracking module. + + :return: sl::PositionalTrackingStatus current status of positional tracking module. + """ + return PositionalTrackingStatus() + + def get_area_export_state(self) -> AREA_EXPORTING_STATE: + """ + Returns the state of the spatial memory export process. + + As Camera.save_area_map() only starts the exportation, this method allows you to know when the exportation finished or if it failed. + :return: The current state of the spatial memory export process. + """ + return AREA_EXPORTING_STATE() + + def save_area_map(self, area_file_path = "") -> ERROR_CODE: + """ + Saves the current area learning file. The file will contain spatial memory data generated by the tracking. + + If the tracking has been initialized with PositionalTrackingParameters.enable_area_memory to True (default), the method allows you to export the spatial memory. + \n Reloading the exported file in a future session with PositionalTrackingParameters.area_file_path initializes the tracking within the same referential. + \n This method is asynchronous, and only triggers the file generation. You can use get_area_export_state() to get the export state. + The positional tracking keeps running while exporting. + + :param area_file_path: Path of an '.area' file to save the spatial memory database in. + :return: ERROR_CODE.FAILURE if the **area_file_path** file wasn't found, ERROR_CODE.SUCCESS otherwise. + + See get_area_export_state() + + .. note:: + Please note that this method will also flush the area database that was built/loaded. + + + .. warning:: If the camera wasn't moved during the tracking session, or not enough, the spatial memory won't be usable and the file won't be exported. + .. warning:: The get_area_export_state() will return AREA_EXPORTING_STATE.FILE_EMPTY. + .. warning:: A few meters (~3m) of translation or a full rotation should be enough to get usable spatial memory. + .. warning:: However, as it should be used for relocation purposes, visiting a significant portion of the environment is recommended before exporting. + + .. code-block:: text + + while True : + if zed.grab() == sl.ERROR_CODE.SUCCESS: # Grab an image and computes the tracking + camera_pose = Pose() + zed.get_position(camera_pose, REFERENCE_FRAME.WORLD) + + # Export the spatial memory for future sessions + zed.save_area_map("office.area") # The actual file will be created asynchronously. + print(repr(zed.get_area_export_state())) + + # Close the camera + zed.close() + """ + return ERROR_CODE() + + def disable_positional_tracking(self, area_file_path = "") -> None: + """ + Disables the positional tracking. + + The positional tracking is immediately stopped. If a file path is given, save_area_map() will be called asynchronously. See get_area_export_state() to get the exportation state. + If the tracking has been enabled, this function will automatically be called by close() . + + :param area_file_path: If set, saves the spatial memory into an '.area' file. Default: (empty) + \n **area_file_path** is the name and path of the database, e.g. path/to/file/myArea1.area". + """ + pass + + def is_positional_tracking_enabled(self) -> bool: + """ + Tells if the tracking module is enabled + """ + return bool() + + def reset_positional_tracking(self, path) -> ERROR_CODE: + """ + Resets the tracking, and re-initializes the position with the given transformation matrix. + :param path: Position of the camera in the world frame when the method is called. + :return: ERROR_CODE.SUCCESS if the tracking has been reset, ERROR_CODE.FAILURE otherwise. + + .. note:: + Please note that this method will also flush the accumulated or loaded spatial memory. + """ + return ERROR_CODE() + + def enable_spatial_mapping(self, py_spatial = None) -> ERROR_CODE: + """ + Initializes and starts the spatial mapping processes. + + The spatial mapping will create a geometric representation of the scene based on both tracking data and 3D point clouds. + The resulting output can be a Mesh or a FusedPointCloud. It can be be obtained by calling extract_whole_spatial_map() or retrieve_spatial_map_async(). + Note that retrieve_spatial_map_async should be called after request_spatial_map_async(). + + :param py_spatial: A structure containing all the specific parameters for the spatial mapping. + Default: a balanced parameter preset between geometric fidelity and output file size. For more information, see the SpatialMappingParameters documentation. + :return: ERROR_CODE.SUCCESS if everything went fine, ERROR_CODE.FAILURE otherwise. + + .. warning:: The tracking (enable_positional_tracking() ) and the depth (RuntimeParameters.enable_depth ) needs to be enabled to use the spatial mapping. + .. warning:: The performance greatly depends on the **py_spatial**. + .. warning:: Lower SpatialMappingParameters.range_meter and SpatialMappingParameters.resolution_meter for higher performance. + If the mapping framerate is too slow in live mode, consider using an SVO file, or choose a lower mesh resolution. + + .. note:: + This feature uses host memory (RAM) to store the 3D map. The maximum amount of available memory allowed can be tweaked using the SpatialMappingParameters. + + \n Exceeding the maximum memory allowed immediately stops the mapping. + + .. code-block:: text + + import pyzed.sl as sl + + def main() : + # Create a ZED camera object + zed = sl.Camera() + + # Set initial parameters + init_params = sl.InitParameters() + init_params.camera_resolution = sl.RESOLUTION.HD720 # Use HD720 video mode (default fps: 60) + init_params.coordinate_system = sl.COORDINATE_SYSTEM.RIGHT_HANDED_Y_UP # Use a right-handed Y-up coordinate system (The OpenGL one) + init_params.coordinate_units = sl.UNIT.METER # Set units in meters + + # Open the camera + err = zed.open(init_params) + if err != sl.ERROR_CODE.SUCCESS: + exit(-1) + + # Positional tracking needs to be enabled before using spatial mapping + tracking_parameters = sl.PositionalTrackingParameters() + err = zed.enable_positional_tracking(tracking_parameters) + if err != sl.ERROR_CODE.SUCCESS: + exit(-1) + + # Enable spatial mapping + mapping_parameters = sl.SpatialMappingParameters() + err = zed.enable_spatial_mapping(mapping_parameters) + if err != sl.ERROR_CODE.SUCCESS: + exit(-1) + + # Grab data during 500 frames + i = 0 + mesh = sl.Mesh() # Create a mesh object + while i < 500 : + # For each new grab, mesh data is updated + if zed.grab() == sl.ERROR_CODE.SUCCESS : + # In the background, the spatial mapping will use newly retrieved images, depth and pose to update the mesh + mapping_state = zed.get_spatial_mapping_state() + + # Print spatial mapping state + print("Images captured: ", i, "/ 500 || Spatial mapping state: ", repr(mapping_state)) + i = i + 1 + + # Extract, filter and save the mesh in a .obj file + print("Extracting Mesh ...") + zed.extract_whole_spatial_map(mesh) # Extract the whole mesh + print("Filtering Mesh ...") + mesh.filter(sl.MESH_FILTER.LOW) # Filter the mesh (remove unnecessary vertices and faces) + print("Saving Mesh in mesh.obj ...") + mesh.save("mesh.obj") # Save the mesh in an obj file + + # Disable tracking and mapping and close the camera + zed.disable_spatial_mapping() + zed.disable_positional_tracking() + zed.close() + return 0 + + if __name__ == "__main__" : + main() + """ + return ERROR_CODE() + + def pause_spatial_mapping(self, status) -> None: + """ + Pauses or resumes the spatial mapping processes. + + As spatial mapping runs asynchronously, using this method can pause its computation to free some processing power, and resume it again later. + \n For example, it can be used to avoid mapping a specific area or to pause the mapping when the camera is static. + :param status: If True, the integration is paused. If False, the spatial mapping is resumed. + """ + pass + + def get_spatial_mapping_state(self) -> SPATIAL_MAPPING_STATE: + """ + Returns the current spatial mapping state. + + As the spatial mapping runs asynchronously, this method allows you to get reported errors or status info. + :return: The current state of the spatial mapping process. + + See also SPATIAL_MAPPING_STATE + """ + return SPATIAL_MAPPING_STATE() + + def request_spatial_map_async(self) -> None: + """ + Starts the spatial map generation process in a non-blocking thread from the spatial mapping process. + + The spatial map generation can take a long time depending on the mapping resolution and covered area. This function will trigger the generation of a mesh without blocking the program. + You can get info about the current generation using get_spatial_map_request_status_async(), and retrieve the mesh using retrieve_spatial_map_async(). + + .. note:: + Only one mesh can be generated at a time. If the previous mesh generation is not over, new calls of the function will be ignored. + """ + pass + + def get_spatial_map_request_status_async(self) -> ERROR_CODE: + """ + Returns the spatial map generation status. + + This status allows you to know if the mesh can be retrieved by calling retrieve_spatial_map_async(). + :return: ERROR_CODE.SUCCESS if the mesh is ready and not yet retrieved, otherwise ERROR_CODE.FAILURE. + """ + return ERROR_CODE() + + def retrieve_spatial_map_async(self, py_mesh) -> ERROR_CODE: + """ + Retrieves the current generated spatial map. + + After calling request_spatial_map_async(), this method allows you to retrieve the generated mesh or fused point cloud. + \n The Mesh or FusedPointCloud will only be available when get_spatial_map_request_status_async() returns ERROR_CODE.SUCCESS. + + :param py_mesh: The Mesh or FusedPointCloud to be filled with the generated spatial map. (Direction: out) + :return: ERROR_CODE.SUCCESS if the mesh is retrieved, otherwise ERROR_CODE.FAILURE. + + .. note:: + This method only updates the necessary chunks and adds the new ones in order to improve update speed. + + .. warning:: You should not modify the mesh / fused point cloud between two calls of this method, otherwise it can lead to a corrupted mesh / fused point cloud. + See request_spatial_map_async() for an example. + """ + return ERROR_CODE() + + def extract_whole_spatial_map(self, py_mesh) -> ERROR_CODE: + """ + Extract the current spatial map from the spatial mapping process. + + If the object to be filled already contains a previous version of the mesh / fused point cloud, only changes will be updated, optimizing performance. + + :param py_mesh: The Mesh or FusedPointCloud to be filled with the generated spatial map. (Direction: out) + + :return: ERROR_CODE.SUCCESS if the mesh is filled and available, otherwise ERROR_CODE.FAILURE. + + .. warning:: This is a blocking function. You should either call it in a thread or at the end of the mapping process. + The extraction can be long, calling this function in the grab loop will block the depth and tracking computation giving bad results. + """ + return ERROR_CODE() + + def find_plane_at_hit(self, coord, py_plane: Plane, parameters = PlaneDetectionParameters()) -> ERROR_CODE: + """ + Checks the plane at the given left image coordinates. + + This method gives the 3D plane corresponding to a given pixel in the latest left image grab() "grabbed". + \n The pixel coordinates are expected to be contained x=[0;width-1] and y=[0;height-1], where width/height are defined by the input resolution. + + :param coord: The image coordinate. The coordinate must be taken from the full-size image (Direction: in) + :param plane: The detected plane if the method succeeded. (Direction: out) + :param parameters: A structure containing all the specific parameters for the plane detection. Default: a preset of PlaneDetectionParameters. (Direction: in) + :return: ERROR_CODE.SUCCESS if a plane is found otherwise ERROR_CODE.PLANE_NOT_FOUND. + + .. note:: + The reference frame is defined by the RuntimeParameters.measure3D_reference_frame given to the grab() method. + """ + return ERROR_CODE() + + def find_floor_plane(self, py_plane, reset_tracking_floor_frame, floor_height_prior = float('nan'), world_orientation_prior = Rotation(Matrix3f().zeros()), floor_height_prior_tolerance = float('nan')) -> ERROR_CODE: + """ + Detect the floor plane of the scene. + + This method analyses the latest image and depth to estimate the floor plane of the scene. + \n It expects the floor plane to be visible and bigger than other candidate planes, like a table. + + :param py_plane: The detected floor plane if the method succeeded. (Direction: out) + :param reset_tracking_floor_frame: The transform to align the tracking with the floor plane. (Direction: out) + \n The initial position will then be at ground height, with the axis align with the gravity. + \n The positional tracking needs to be reset/enabled with this transform as a parameter (PositionalTrackingParameters.initial_world_transform). + :param floor_height_prior: Prior set to locate the floor plane depending on the known camera distance to the ground, expressed in the same unit as the ZED. (Direction: in) + \n If the prior is too far from the detected floor plane, the method will return ERROR_CODE.PLANE_NOT_FOUND. + :param world_orientation_prior: Prior set to locate the floor plane depending on the known camera orientation to the ground. (Direction: in) + \n If the prior is too far from the detected floor plane, the method will return ERROR_CODE "ERROR_CODE.PLANE_NOT_FOUND. + :param floor_height_prior_tolerance: Prior height tolerance, absolute value. (Direction: in) + :return: ERROR_CODE.SUCCESS if the floor plane is found and matches the priors (if defined), otherwise ERROR_CODE.PLANE_NOT_FOUND. + + .. note:: + The reference frame is defined by the sl.RuntimeParameters (measure3D_reference_frame) given to the grab() method. + + .. note:: + The length unit is defined by sl.InitParameters (coordinate_units). + + .. note:: + With the ZED, the assumption is made that the floor plane is the dominant plane in the scene. The ZED Mini uses gravity as prior. + + """ + return ERROR_CODE() + + def disable_spatial_mapping(self) -> None: + """ + Disables the spatial mapping process. + + The spatial mapping is immediately stopped. + \n If the mapping has been enabled, this method will automatically be called by close(). + .. note:: + This method frees the memory allocated for the spatial mapping, consequently, meshes and fused point clouds cannot be retrieved after this call. + """ + pass + + def enable_streaming(self, streaming_parameters = None) -> ERROR_CODE: + """ + Creates a streaming pipeline. + + :param streaming_parameters: A structure containing all the specific parameters for the streaming. Default: a reset of StreamingParameters . + :return: ERROR_CODE.SUCCESS if the streaming was successfully started. + :return: ERROR_CODE.INVALID_FUNCTION_CALL if open() was not successfully called before. + :return: ERROR_CODE.FAILURE if streaming RTSP protocol was not able to start. + :return: ERROR_CODE.NO_GPU_COMPATIBLE if the streaming codec is not supported (in this case, use H264 codec which is supported on all NVIDIA GPU the ZED SDK supports). + + .. code-block:: text + + import pyzed.sl as sl + + def main() : + # Create a ZED camera object + zed = sl.Camera() + + # Set initial parameters + init_params = sl.InitParameters() + init_params.camera_resolution = sl.RESOLUTION.HD720 # Use HD720 video mode (default fps: 60) + + # Open the camera + err = zed.open(init_params) + if err != sl.ERROR_CODE.SUCCESS : + print(repr(err)) + exit(-1) + + # Enable streaming + stream_params = sl.StreamingParameters() + stream_params.port = 30000 + stream_params.bitrate = 8000 + err = zed.enable_streaming(stream_params) + if err != sl.ERROR_CODE.SUCCESS : + print(repr(err)) + exit(-1) + + # Grab data during 500 frames + i = 0 + while i < 500 : + if zed.grab() == sl.ERROR_CODE.SUCCESS : + i = i+1 + + zed.disable_streaming() + zed.close() + return 0 + + if __name__ == "__main__" : + main() + """ + return ERROR_CODE() + + def disable_streaming(self) -> None: + """ + Disables the streaming initiated by enable_streaming(). + .. note:: + This method will automatically be called by close() if enable_streaming() was called. + + + See enable_streaming() for an example. + """ + pass + + def is_streaming_enabled(self) -> bool: + """ + Tells if the streaming is running. + :return: True if the stream is running, False otherwise. + """ + return bool() + + def enable_recording(self, record) -> ERROR_CODE: + """ + Creates an SVO file to be filled by enable_recording() and disable_recording(). + + \n SVO files are custom video files containing the un-rectified images from the camera along with some meta-data like timestamps or IMU orientation (if applicable). + \n They can be used to simulate a live ZED and test a sequence with various SDK parameters. + \n Depending on the application, various compression modes are available. See SVO_COMPRESSION_MODE. + + :param record: A structure containing all the specific parameters for the recording such as filename and compression mode. Default: a reset of RecordingParameters . + :return: An ERROR_CODE that defines if the SVO file was successfully created and can be filled with images. + + .. warning:: This method can be called multiple times during a camera lifetime, but if **video_filename** is already existing, the file will be erased. + + + .. code-block:: text + + import pyzed.sl as sl + + def main() : + # Create a ZED camera object + zed = sl.Camera() + # Set initial parameters + init_params = sl.InitParameters() + init_params.camera_resolution = sl.RESOLUTION.HD720 # Use HD720 video mode (default fps: 60) + init_params.coordinate_units = sl.UNIT.METER # Set units in meters + # Open the camera + err = zed.open(init_params) + if (err != sl.ERROR_CODE.SUCCESS): + print(repr(err)) + exit(-1) + + # Enable video recording + record_params = sl.RecordingParameters("myVideoFile.svo") + err = zed.enable_recording(record_params) + if (err != sl.ERROR_CODE.SUCCESS): + print(repr(err)) + exit(-1) + + # Grab data during 500 frames + i = 0 + while i < 500 : + # Grab a new frame + if zed.grab() == sl.ERROR_CODE.SUCCESS: + # Record the grabbed frame in the video file + i = i + 1 + + zed.disable_recording() + print("Video has been saved ...") + zed.close() + return 0 + + if __name__ == "__main__" : + main() + """ + return ERROR_CODE() + + def disable_recording(self) -> None: + """ + Disables the recording initiated by enable_recording() and closes the generated file. + + .. note:: + This method will automatically be called by close() if enable_recording() was called. + + + See enable_recording() for an example. + """ + pass + + def get_recording_status(self) -> RecordingStatus: + """ + Get the recording information. + :return: The recording state structure. For more details, see RecordingStatus. + """ + return RecordingStatus() + + def pause_recording(self, value = True) -> None: + """ + Pauses or resumes the recording. + :param status: If True, the recording is paused. If False, the recording is resumed. + """ + pass + + def get_recording_parameters(self) -> RecordingParameters: + """ + Returns the RecordingParameters used. + + It corresponds to the structure given as argument to the enable_recording() method. + :return: RecordingParameters containing the parameters used for recording initialization. + """ + return RecordingParameters() + + def get_health_status(self) -> HealthStatus: + """ + Get the Health information. + :return: The health state structure. For more details, see HealthStatus. + """ + return HealthStatus() + + def get_retrieve_image_resolution(self, resolution = None) -> Resolution: + """ + Get the Health information. + :return: The health state structure. For more details, see HealthStatus. + """ + return Resolution() + + def get_retrieve_measure_resolution(self, resolution = None) -> Resolution: + """ + Get the Health information. + :return: The health state structure. For more details, see HealthStatus. + """ + return Resolution() + + def enable_object_detection(self, object_detection_parameters = None) -> ERROR_CODE: + """ + Initializes and starts object detection module. + + The object detection module currently support multiple StereoLabs' model for different purposes: "MULTI_CLASS", "PERSON_HEAD" + \n The full list of model is available through OBJECT_DETECTION_MODEL and the full list of detectable objects is available through OBJECT_CLASS and OBJECT_SUBCLASS. + + \n Detected objects can be retrieved using the retrieve_objects() method. + + \n Alternatively, the object detection module supports custom class of objects with the OBJECT_DETECTION_MODEL.CUSTOM_BOX_OBJECTS (see ingestCustomBoxObjects or ingestCustomMaskObjects) + or OBJECT_DETECTION_MODEL.CUSTOM_YOLOLIKE_BOX_OBJECTS (see ObjectDetectionParameters.custom_onnx_file). + + \n Detected custom objects can be retrieved using the retrieve_custom_objects() method. + + .. note:: + - **This Depth Learning detection module is not available MODEL.ZED cameras.** + + .. note:: + - This feature uses AI to locate objects and requires a powerful GPU. A GPU with at least 3GB of memory is recommended. + + + :param object_detection_parameters: A structure containing all the specific parameters for the object detection. Default: a preset of ObjectDetectionParameters. + :return: ERROR_CODE.SUCCESS if everything went fine. + :return: ERROR_CODE.OBJECT_DETECTION_NOT_AVAILABLE if the AI model is missing or corrupted. In this case, the SDK needs to be reinstalled + :return: ERROR_CODE.OBJECT_DETECTION_MODULE_NOT_COMPATIBLE_WITH_CAMERA if the camera used does not have an IMU (MODEL.ZED). + :return: ERROR_CODE.SENSORS_NOT_DETECTED if the camera model is correct (not MODEL.ZED) but the IMU is missing. It probably happens because InitParameters.sensors_required was set to False and that IMU has not been found. + :return: ERROR_CODE.INVALID_FUNCTION_CALL if one of the **object_detection_parameters** parameter is not compatible with other modules parameters (for example, **depth_mode** has been set to DEPTH_MODE.NONE). + :return: ERROR_CODE.FAILURE otherwise. + + .. note:: + The IMU gives the gravity vector that helps in the 3D box localization. Therefore the object detection module is not available for the MODEL.ZED models. + + + .. code-block:: text + + import pyzed.sl as sl + + def main(): + # Create a ZED camera object + zed = sl.Camera() + + # Open the camera + err = zed.open() + if err != sl.ERROR_CODE.SUCCESS: + print("Opening camera error:", repr(err)) + exit(-1) + + # Enable position tracking (mandatory for object detection) + tracking_params = sl.PositionalTrackingParameters() + err = zed.enable_positional_tracking(tracking_params) + if err != sl.ERROR_CODE.SUCCESS: + print("Enabling Positional Tracking error:", repr(err)) + exit(-1) + + # Set the object detection parameters + object_detection_params = sl.ObjectDetectionParameters() + + # Enable the object detection + err = zed.enable_object_detection(object_detection_params) + if err != sl.ERROR_CODE.SUCCESS: + print("Enabling Object Detection error:", repr(err)) + exit(-1) + + # Grab an image and detect objects on it + objects = sl.Objects() + while True: + if zed.grab() == sl.ERROR_CODE.SUCCESS: + zed.retrieve_objects(objects) + print(len(objects.object_list), "objects detected") + # Use the objects in your application + + # Close the camera + zed.disable_object_detection() + zed.close() + + if __name__ == "__main__": + main() + """ + return ERROR_CODE() + + def disable_object_detection(self, instance_module_id = 0, force_disable_all_instances = False) -> None: + """ + Disables the object detection process. + + The object detection module immediately stops and frees its memory allocations. + + :param instance_module_id: Id of the object detection instance. Used when multiple instances of the object detection module are enabled at the same time. + :param force_disable_all_instances: Should disable all instances of the object detection module or just **instance_module_id**. + + .. note:: + If the object detection has been enabled, this method will automatically be called by close(). + """ + pass + + def set_object_detection_runtime_parameters(self, object_detection_parameters, instance_module_id = 0) -> ERROR_CODE: + """ + Set the object detection runtime parameters + """ + return ERROR_CODE() + + def set_custom_object_detection_runtime_parameters(self, custom_object_detection_parameters, instance_module_id = 0) -> ERROR_CODE: + """ + Set the custom object detection runtime parameters + """ + return ERROR_CODE() + + def retrieve_objects(self, py_objects, py_object_detection_parameters = None, instance_module_id = 0) -> ERROR_CODE: + """ + Retrieve objects detected by the object detection module. + + This method returns the result of the object detection, whether the module is running synchronously or asynchronously. + + - **Asynchronous:** this method immediately returns the last objects detected. If the current detection isn't done, the objects from the last detection will be returned, and Objects.is_new will be set to False. + - **Synchronous:** this method executes detection and waits for it to finish before returning the detected objects. + + It is recommended to keep the same Objects object as the input of all calls to this method. This will enable the identification and tracking of every object detected. + + :param py_objects: The detected objects will be saved into this object. If the object already contains data from a previous detection, it will be updated, keeping a unique ID for the same person. (Direction: out) + :param py_object_detection_parameters: Object detection runtime settings, can be changed at each detection. In async mode, the parameters update is applied on the next iteration. If None, use the previously passed parameters. (Direction: in) + :param instance_module_id: Id of the object detection instance. Used when multiple instances of the object detection module are enabled at the same time. + :return: ERROR_CODE.SUCCESS if everything went fine, ERROR_CODE.FAILURE otherwise. + + .. code-block:: text + + objects = sl.Objects() + while True: + if zed.grab() == sl.ERROR_CODE.SUCCESS: + zed.retrieve_objects(objects) + object_list = objects.object_list + for i in range(len(object_list)): + print(repr(object_list[i].label)) + """ + return ERROR_CODE() + + def retrieve_custom_objects(self, py_objects, custom_object_detection_parameters = None, instance_module_id = 0) -> ERROR_CODE: + """ + Retrieve custom objects detected by the object detection module. + + If the object detection module is initialized with OBJECT_DETECTION_MODEL.CUSTOM_BOX_OBJECTS, the objects retrieved will be the ones from ingest_custom_box_objects or ingest_custom_mask_objects. + If the object detection module is initialized with OBJECT_DETECTION_MODEL.CUSTOM_YOLOLIKE_BOX_OBJECTS, the objects retrieved will be the ones detected using the optimized ObjectDetectionParameters.custom_onnx_file model. + + When running the detection internally, this method returns the result of the object detection, whether the module is running synchronously or asynchronously. + + - **Asynchronous:** this method immediately returns the last objects detected. If the current detection isn't done, the objects from the last detection will be returned, and Objects::is_new will be set to false. + - **Synchronous:** this method executes detection and waits for it to finish before returning the detected objects. + + It is recommended to keep the same Objects object as the input of all calls to this method. This will enable the identification and tracking of every object detected. + + :param py_objects: The detected objects will be saved into this object. If the object already contains data from a previous detection, it will be updated, keeping a unique ID for the same person. + :param custom_object_detection_parameters: Custom object detection runtime settings, can be changed at each detection. In async mode, the parameters update is applied on the next iteration. If None, use the previously passed parameters. + :param instance_module_id: Id of the object detection instance. Used when multiple instances of the object detection module are enabled at the same time. + :return: ERROR_CODE "ERROR_CODE::SUCCESS" if everything went fine, ERROR_CODE "ERROR_CODE::FAILURE" otherwise. + + set_custom_object_detection_runtime_parameters and retrieve_objects methods should be used instead. + + .. code-block:: text + + objects = sl.Objects() + while True: + if zed.grab() == sl.ERROR_CODE.SUCCESS: + zed.retrieve_custom_objects(objects) + object_list = objects.object_list + for i in range(len(object_list)): + print(repr(object_list[i].label)) + """ + return ERROR_CODE() + + def get_objects_batch(self, trajectories, instance_module_id = 0) -> ERROR_CODE: + """ + Get a batch of detected objects. + .. warning:: This method needs to be called after retrieve_objects, otherwise trajectories will be empty. + \n It is the retrieve_objects method that ingest the current/live objects into the batching queue. + + :param trajectories: list of sl.ObjectsBatch that will be filled by the batching queue process. An empty list should be passed to the function + :param instance_module_id: Id of the object detection instance. Used when multiple instances of the object detection module are enabled at the same time. + :return: ERROR_CODE.SUCCESS if everything went fine + :return: ERROR_CODE.INVALID_FUNCTION_CALL if batching module is not available (TensorRT!=7.1) or if object tracking was not enabled. + + .. note:: + Most of the time, the vector will be empty and will be filled every BatchParameters::latency. + + + .. code-block:: text + + objects = sl.Objects() # Unique Objects to be updated after each grab + while True: # Main loop + if zed.grab() == sl.ERROR_CODE.SUCCESS: # Grab an image from the camera + zed.retrieve_objects(objects) # Call retrieve_objects so that objects are ingested in the batching system + trajectories = [] # Create an empty list of trajectories + zed.get_objects_batch(trajectories) # Get batch of objects + print("Size of batch: {}".format(len(trajectories))) + """ + return ERROR_CODE() + + def ingest_custom_box_objects(self, objects_in, instance_module_id = 0) -> ERROR_CODE: + """ + Feed the 3D Object tracking function with your own 2D bounding boxes from your own detection algorithm. + :param objects_in: List of CustomBoxObjectData to feed the object detection. + :param instance_module_id: Id of the object detection instance. Used when multiple instances of the object detection module are enabled at the same time. + :return: ERROR_CODE.SUCCESS if everything went fine. + .. note:: + The detection should be done on the current grabbed left image as the internal process will use all currently available data to extract 3D information and perform object tracking. + """ + return ERROR_CODE() + + def ingest_custom_mask_objects(self, objects_in, instance_module_id = 0) -> ERROR_CODE: + """ + Feed the 3D Object tracking function with your own 2D bounding boxes with masks from your own detection algorithm. + :param objects_in: List of CustomMaskObjectData to feed the object detection. + :param instance_module_id: Id of the object detection instance. Used when multiple instances of the object detection module are enabled at the same time. + :return: ERROR_CODE.SUCCESS if everything went fine. + .. note:: + The detection should be done on the current grabbed left image as the internal process will use all currently available data to extract 3D information and perform object tracking. + """ + return ERROR_CODE() + + def is_object_detection_enabled(self, instance_id = 0) -> bool: + """ + Tells if the object detection module is enabled. + """ + return bool() + + def get_sdk_version() -> str: + """ + Returns the version of the currently installed ZED SDK. + :return: The ZED SDK version as a string with the following format: MAJOR.MINOR.PATCH + + .. code-block:: text + + print(sl.Camera.get_sdk_version()) + """ + return str() + + def get_device_list() -> list[DeviceProperties]: + """ + List all the connected devices with their associated information. + + This method lists all the cameras available and provides their serial number, models and other information. + :return: The device properties for each connected camera. + """ + return list[DeviceProperties]() + + def get_streaming_device_list() -> list[StreamingProperties]: + """ + Lists all the streaming devices with their associated information. + + :return: The streaming properties for each connected camera. + .. warning:: This method takes around 2 seconds to make sure all network informations has been captured. Make sure to run this method in a thread. + """ + return list[StreamingProperties]() + + def reboot(sn : int, full_reboot: bool =True) -> ERROR_CODE: + """ + Performs a hardware reset of the ZED 2 and the ZED 2i. + + :param sn: Serial number of the camera to reset, or 0 to reset the first camera detected. + :param full_reboot: Perform a full reboot (sensors and video modules) if True, otherwise only the video module will be rebooted. + :return: ERROR_CODE "ERROR_CODE::SUCCESS" if everything went fine. + :return: ERROR_CODE "ERROR_CODE::CAMERA_NOT_DETECTED" if no camera was detected. + :return: ERROR_CODE "ERROR_CODE::FAILURE" otherwise. + + .. note:: + This method only works for ZED 2, ZED 2i, and newer camera models. + + + .. warning:: This method will invalidate any sl.Camera object, since the device is rebooting. + """ + return ERROR_CODE() + + def reboot_from_input(input_type: INPUT_TYPE) -> ERROR_CODE: + """ + Performs a hardware reset of all devices matching the InputType. + + :param input_type: Input type of the devices to reset. + :return: ERROR_CODE "ERROR_CODE::SUCCESS" if everything went fine. + :return: ERROR_CODE "ERROR_CODE::CAMERA_NOT_DETECTED" if no camera was detected. + :return: ERROR_CODE "ERROR_CODE::FAILURE" otherwise. + :return: ERROR_CODE "ERROR_CODE::INVALID_FUNCTION_PARAMETERS" for SVOs and streams. + + .. warning:: This method will invalidate any sl.Camera object, since the device is rebooting. + """ + return ERROR_CODE() + + +class COMM_TYPE(enum.Enum): + """ + Lists the different types of communications available for Fusion module. + + | Enumerator | | + |:---:|:---:| + | LOCAL_NETWORK | The sender and receiver are on the same local network and communicate by RTP. The communication can be affected by the local network load. | + | INTRA_PROCESS | Both sender and receiver are declared by the same process and can be in different threads. This type of communication is optimized. | + """ + LOCAL_NETWORK = enum.auto() + INTRA_PROCESS = enum.auto() + LAST = enum.auto() + +class FUSION_ERROR_CODE(enum.Enum): + """ + Lists the types of error that can be raised by the Fusion. + + + | Enumerator | | + |:---:|:---:| + | GNSS_DATA_NEED_FIX | GNSS Data need fix status in order to run fusion. | + | GNSS_DATA_COVARIANCE_MUST_VARY | Ingested covariance data must vary between ingest. | + | BODY_FORMAT_MISMATCH | The senders are using different body formats. Consider changing them. | + | NOT_ENABLED | The following module was not enabled. | + | SOURCE_MISMATCH | Some sources are provided by SVO and others by LIVE stream. | + | CONNECTION_TIMED_OUT | Connection timed out. Unable to reach the sender. Verify the sender's IP/port. | + | SHARED_MEMORY_LEAK | Intra-process shared memory allocation issue. Multiple connections to the same data. | + | INVALID_IP_ADDRESS | The provided IP address format is incorrect. Please provide the IP in the format 'a.b.c.d', where (a, b, c, d) are numbers between 0 and 255. | + | CONNECTION_ERROR | Something goes bad in the connection between sender and receiver. | + | FAILURE | Standard code for unsuccessful behavior. | + | SUCCESS | Standard code for successful behavior. | + | FUSION_INCONSISTENT_FPS | Significant differences observed between sender's FPS. | + | FUSION_FPS_TOO_LOW | At least one sender has an FPS lower than 10 FPS. | + | INVALID_TIMESTAMP | Problem detected with the ingested timestamp. Sample data will be ignored. | + | INVALID_COVARIANCE | Problem detected with the ingested covariance. Sample data will be ignored. | + | NO_NEW_DATA_AVAILABLE | All data from all sources has been consumed. No new data is available for processing. | + """ + GNSS_DATA_NEED_FIX = enum.auto() + GNSS_DATA_COVARIANCE_MUST_VARY = enum.auto() + BODY_FORMAT_MISMATCH = enum.auto() + MODULE_NOT_ENABLED = enum.auto() + SOURCE_MISMATCH = enum.auto() + CONNECTION_TIMED_OUT = enum.auto() + MEMORY_ALREADY_USED = enum.auto() + INVALID_IP_ADDRESS = enum.auto() + FAILURE = enum.auto() + SUCCESS = enum.auto() + FUSION_INCONSISTENT_FPS = enum.auto() + FUSION_FPS_TOO_LOW = enum.auto() + INVALID_TIMESTAMP = enum.auto() + INVALID_COVARIANCE = enum.auto() + NO_NEW_DATA_AVAILABLE = enum.auto() + def __str__(self) -> None: + pass + + def __repr__(self) -> None: + pass + + +def _initialize_fusion_error_codes() -> None: + """ + Lists the types of error that can be raised by the Fusion. + + + | Enumerator | | + |:---:|:---:| + | GNSS_DATA_NEED_FIX | GNSS Data need fix status in order to run fusion. | + | GNSS_DATA_COVARIANCE_MUST_VARY | Ingested covariance data must vary between ingest. | + | BODY_FORMAT_MISMATCH | The senders are using different body formats. Consider changing them. | + | NOT_ENABLED | The following module was not enabled. | + | SOURCE_MISMATCH | Some sources are provided by SVO and others by LIVE stream. | + | CONNECTION_TIMED_OUT | Connection timed out. Unable to reach the sender. Verify the sender's IP/port. | + | SHARED_MEMORY_LEAK | Intra-process shared memory allocation issue. Multiple connections to the same data. | + | INVALID_IP_ADDRESS | The provided IP address format is incorrect. Please provide the IP in the format 'a.b.c.d', where (a, b, c, d) are numbers between 0 and 255. | + | CONNECTION_ERROR | Something goes bad in the connection between sender and receiver. | + | FAILURE | Standard code for unsuccessful behavior. | + | SUCCESS | Standard code for successful behavior. | + | FUSION_INCONSISTENT_FPS | Significant differences observed between sender's FPS. | + | FUSION_FPS_TOO_LOW | At least one sender has an FPS lower than 10 FPS. | + | INVALID_TIMESTAMP | Problem detected with the ingested timestamp. Sample data will be ignored. | + | INVALID_COVARIANCE | Problem detected with the ingested covariance. Sample data will be ignored. | + | NO_NEW_DATA_AVAILABLE | All data from all sources has been consumed. No new data is available for processing. | + """ + pass + +class SENDER_ERROR_CODE(enum.Enum): + """ + Lists the types of error that can be raised during the Fusion by senders. + + + | Enumerator | | + |:---:|:---:| + | DISCONNECTED | The sender has been disconnected. | + | SUCCESS | Standard code for successful behavior. | + | GRAB_ERROR | The sender encountered a grab error. | + | INCONSISTENT_FPS | The sender does not run with a constant frame rate. | + | FPS_TOO_LOW | The frame rate of the sender is lower than 10 FPS. | + """ + DISCONNECTED = enum.auto() + SUCCESS = enum.auto() + GRAB_ERROR = enum.auto() + INCONSISTENT_FPS = enum.auto() + FPS_TOO_LOW = enum.auto() + def __str__(self) -> None: + pass + + def __repr__(self) -> None: + pass + + +class POSITION_TYPE(enum.Enum): + """ + Lists the types of possible position outputs. + + + | Enumerator | | + |:---:|:---:| + | RAW | The output position will be the raw position data. | + | FUSION | The output position will be the fused position projected into the requested camera repository. | + """ + RAW = enum.auto() + FUSION = enum.auto() + LAST = enum.auto() + +class FUSION_REFERENCE_FRAME(enum.Enum): + """ + Enum to define the reference frame of the fusion SDK. + + + | Enumerator | | + |:---:|:---:| + | WORLD | The world frame is the reference frame of the world according to the fused positional Tracking. | + | BASELINK | The base link frame is the reference frame where camera calibration is given. | + """ + WORLD = enum.auto() + BASELINK = enum.auto() + +class CommunicationParameters: + """ + Holds the communication parameter to configure the connection between senders and receiver + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def comm_type(self) -> COMM_TYPE: + """ + The type of the used communication + """ + return COMM_TYPE() + + @property + def port(self) -> int: + """ + The comm port used for streaming the data + """ + return int() + + @property + def ip_address(self) -> str: + """ + The IP address of the sender + """ + return str() + + def __dealloc__(self) -> None: + """ + Default constructor. All the parameters are set to their default and optimized values. + """ + pass + + def set_for_shared_memory(self) -> None: + """ + Setup the communication to used shared memory for intra process workflow, senders and receiver in different threads. + """ + pass + + def set_for_local_network(self, port : int, ip : str = "") -> None: + """ + Setup local Network connection information + """ + pass + + +class FusionConfiguration: + """ + Useful struct to store the Fusion configuration, can be read from /write to a JSON file. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def pose(self) -> Transform: + """ + The WORLD Pose of the camera for Fusion in the unit and coordinate system defined by the user in the InitFusionParameters. + """ + return Transform() + + @pose.setter + def pose(self, pose: Any) -> None: + pass + + @property + def serial_number(self) -> int: + """ + The serial number of the used ZED camera. + """ + return int() + + @serial_number.setter + def serial_number(self, serial_number: Any) -> None: + pass + + @property + def communication_parameters(self) -> CommunicationParameters: + """ + The communication parameters to connect this camera to the Fusion. + """ + return CommunicationParameters() + + @communication_parameters.setter + def communication_parameters(self, communication_parameters: Any) -> None: + pass + + @property + def override_gravity(self) -> bool: + """ + Indicates the behavior of the fusion with respect to given calibration pose. + - If true : The calibration pose directly specifies the camera's absolute pose relative to a global reference frame. + - If false : The calibration pose (Pose_rel) is defined relative to the camera's IMU rotational pose. To determine the true absolute position, the Fusion process will compute Pose_abs = Pose_rel * Rot_IMU_camera. + """ + return bool() + + @override_gravity.setter + def override_gravity(self, override_gravity: Any) -> None: + pass + + @property + def input_type(self) -> InputType: + """ + The input type for the current camera. + """ + return InputType() + + @input_type.setter + def input_type(self, input_type: Any) -> None: + pass + + +def read_fusion_configuration_file_from_serial(self, json_config_filename : str, serial_number : int, coord_system : COORDINATE_SYSTEM, unit: UNIT) -> FusionConfiguration: + """ + Read a configuration JSON file to configure a fusion process. + :param json_config_filename: The name of the JSON file containing the configuration. + :param serial_number: The serial number of the ZED Camera you want to retrieve. + :param coord_system: The COORDINATE_SYSTEM in which you want the World Pose to be in. + :param unit: The UNIT in which you want the World Pose to be in. + + :return: A FusionConfiguration for the requested camera. + .. note:: + Empty if no data were found for the requested camera. + """ + return FusionConfiguration() + +def read_fusion_configuration_file(json_config_filename : str, coord_system : COORDINATE_SYSTEM, unit: UNIT) -> list[FusionConfiguration]: + """ + Read a Configuration JSON file to configure a fusion process. + :param json_config_filename: The name of the JSON file containing the configuration. + :param coord_sys: The COORDINATE_SYSTEM in which you want the World Pose to be in. + :param unit: The UNIT in which you want the World Pose to be in. + + :return: A list of FusionConfiguration for all the camera present in the file. + .. note:: + Empty if no data were found for the requested camera. + """ + return list[FusionConfiguration]() + +def read_fusion_configuration_json(fusion_configuration : dict, coord_system : COORDINATE_SYSTEM, unit: UNIT) -> list[FusionConfiguration]: + """ + Read a Configuration JSON to configure a fusion process. + :param fusion_configuration: The JSON containing the configuration. + :param coord_sys: The COORDINATE_SYSTEM in which you want the World Pose to be in. + :param unit: The UNIT in which you want the World Pose to be in. + + :return: A list of FusionConfiguration for all the camera present in the file. + .. note:: + Empty if no data were found for the requested camera. + """ + return list[FusionConfiguration]() + +def write_configuration_file(json_config_filename : str, fusion_configurations : list, coord_sys : COORDINATE_SYSTEM, unit: UNIT) -> None: + """ + Write a Configuration JSON file to configure a fusion process. + :param json_config_filename: The name of the JSON that will contain the information. + :param conf: A list of FusionConfiguration listing all the camera configurations. + :param coord_sys: The COORDINATE_SYSTEM in which the World Pose is. + :param unit: The UNIT in which the World Pose is. + """ + pass + +class GNSSCalibrationParameters: + """ + Holds the options used for calibrating GNSS / VIO. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def target_translation_uncertainty(self) -> float: + """ + This parameter defines the target translation uncertainty at which the calibration process between GNSS and VIO concludes. + + Default: 10e-2 (10 centimeters) + # + """ + return float() + + @target_translation_uncertainty.setter + def target_translation_uncertainty(self, target_translation_uncertainty: Any) -> None: + pass + + @gnss_antenna_position.setter + def gnss_antenna_position(self, gnss_antenna_position: Any) -> None: + pass + + @property + def enable_reinitialization(self) -> bool: + """ + This parameter determines whether reinitialization should be performed between GNSS and VIO fusion when a significant disparity is detected between GNSS data and the current fusion data. + It becomes particularly crucial during prolonged GNSS signal loss scenarios. + + Default: True + # + """ + return bool() + + @enable_reinitialization.setter + def enable_reinitialization(self, enable_reinitialization: Any) -> None: + pass + + @property + def gnss_vio_reinit_threshold(self) -> float: + """ + This parameter determines the threshold for GNSS/VIO reinitialization. + If the fused position deviates beyond out of the region defined by the product of the GNSS covariance and the gnss_vio_reinit_threshold, a reinitialization will be triggered. + + Default: 5 + # + """ + return float() + + @gnss_vio_reinit_threshold.setter + def gnss_vio_reinit_threshold(self, gnss_vio_reinit_threshold: Any) -> None: + pass + + @property + def target_yaw_uncertainty(self) -> float: + """ + This parameter defines the target yaw uncertainty at which the calibration process between GNSS and VIO concludes. + The unit of this parameter is in radian. + + Default: 0.1 radians + # + """ + return float() + + @target_yaw_uncertainty.setter + def target_yaw_uncertainty(self, target_yaw_uncertainty: Any) -> None: + pass + + @property + def enable_translation_uncertainty_target(self) -> bool: + """ + When this parameter is enabled (set to true), the calibration process between GNSS and VIO accounts for the uncertainty in the determined translation, thereby facilitating the calibration termination. + The maximum allowable uncertainty is controlled by the 'target_translation_uncertainty' parameter. + + Default: False + # + """ + return bool() + + @enable_translation_uncertainty_target.setter + def enable_translation_uncertainty_target(self, enable_translation_uncertainty_target: Any) -> None: + pass + + @property + def enable_rolling_calibration(self) -> bool: + """ + If this parameter is set to true, the fusion algorithm will used a rough VIO / GNSS calibration at first and then refine it. This allow you to quickly get a fused position. + + Default: True + # + """ + return bool() + + @enable_rolling_calibration.setter + def enable_rolling_calibration(self, enable_rolling_calibration: Any) -> None: + pass + + def gnss_antenna_position(self) -> np.array[float]: + """ + Define a transform between the GNSS antenna and the camera system for the VIO / GNSS calibration. + + Default value is [0,0,0], this position can be refined by the calibration if enabled + # + """ + return np.array[float]() + + +class PositionalTrackingFusionParameters: + """ + Holds the options used for initializing the positional tracking fusion module. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def base_footprint_to_world_transform(self) -> Transform: + """ + Position and orientation of the base footprint with respect to the user world. + This transform represents a basis change from base footprint coordinate frame to user world coordinate frame + """ + return Transform() + + @base_footprint_to_world_transform.setter + def base_footprint_to_world_transform(self, base_footprint_to_world_transform: Any) -> None: + pass + + @property + def set_gravity_as_origin(self) -> bool: + """ + Whether to override 2 of the 3 rotations from base_footprint_to_world_transform using the IMU gravity. + + Default: False + """ + return bool() + + @set_gravity_as_origin.setter + def set_gravity_as_origin(self, set_gravity_as_origin: Any) -> None: + pass + + @property + def enable_GNSS_fusion(self) -> bool: + """ + This attribute is responsible for enabling or not GNSS positional tracking fusion. + + Default: False + """ + return bool() + + @enable_GNSS_fusion.setter + def enable_GNSS_fusion(self, enable_GNSS_fusion: Any) -> None: + pass + + @property + def tracking_camera_id(self) -> CameraIdentifier: + """ + ID of the camera used for positional tracking. If not specified, will use the first camera called with the subscribe() method. + """ + return CameraIdentifier() + + @tracking_camera_id.setter + def tracking_camera_id(self, tracking_camera_id: Any) -> None: + pass + + @property + def gnss_calibration_parameters(self) -> GNSSCalibrationParameters: + """ + Control the VIO / GNSS calibration process. + """ + return GNSSCalibrationParameters() + + @gnss_calibration_parameters.setter + def gnss_calibration_parameters(self, gnss_calibration_parameters: Any) -> None: + pass + + @property + def base_footprint_to_baselink_transform(self) -> Transform: + """ + Position and orientation of the base footprint with respect to the baselink. + This transform represents a basis change from base footprint coordinate frame to baselink coordinate frame + """ + return Transform() + + @base_footprint_to_baselink_transform.setter + def base_footprint_to_baselink_transform(self, base_footprint_to_baselink_transform: Any) -> None: + pass + + +class SpatialMappingFusionParameters: + """ + Holds the options used for initializing the positional tracking fusion module. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def range_meter(self) -> float: + """ + Depth range in meters. + + Can be different from the value set by sl::InitParameters::depth_maximum_distance. + + Default: 0. In this case, the range is computed from resolution_meter + and from the current internal parameters to fit your application. + """ + return float() + + @range_meter.setter + def range_meter(self, range_meter: Any) -> None: + pass + + @property + def decay(self) -> float: + """ + Adjust the weighting factor for the current depth during the integration process. + + Setting it to 0 discards all previous data and solely integrates the current depth. + + Default: 1, which results in the complete integration and fusion of the current depth with the previously integrated depth. + """ + return float() + + @decay.setter + def decay(self, decay: Any) -> None: + pass + + @property + def enable_forget_past(self) -> bool: + """ + Default: false + """ + return bool() + + @enable_forget_past.setter + def enable_forget_past(self, enable_forget_past: Any) -> None: + pass + + @property + def map_type(self) -> SPATIAL_MAP_TYPE: + """ + The type of spatial map to be created. This dictates the format that will be used for the mapping(e.g. mesh, point cloud). See SPATIAL_MAP_TYPE + + Default: SPATIAL_MAP_TYPE.MESH. + """ + return SPATIAL_MAP_TYPE() + + @map_type.setter + def map_type(self, map_type: Any) -> None: + pass + + @property + def use_chunk_only(self) -> bool: + """ + Set to false if you want to ensure consistency between the mesh and its inner chunk data. + + .. note:: + Updating the mesh is time-consuming. Setting this to true results in better performance. + + + Default: False + """ + return bool() + + @use_chunk_only.setter + def use_chunk_only(self, use_chunk_only: Any) -> None: + pass + + @property + def stability_counter(self) -> int: + """ + Control the integration rate of the current depth into the mapping process. + This parameter controls how many times a stable 3D points should be seen before it is integrated into the spatial mapping. + Default: 0, this will define the stability counter based on the mesh resolution, the higher the resolution, the higher the stability counter. + """ + return int() + + @stability_counter.setter + def stability_counter(self, stability_counter: Any) -> None: + pass + + @property + def disparity_std(self) -> float: + """ + Control the disparity noise (standard deviation) in px. set a very small value (<0.1) if the depth map of the scene is accurate. Set a big value (>0.5) if the depth map is noisy. + + Default: 0.3 + """ + return float() + + @disparity_std.setter + def disparity_std(self, disparity_std: Any) -> None: + pass + + @property + def resolution_meter(self) -> float: + """ + Spatial mapping resolution in meters. + + Default: 0.05 m + """ + return float() + + @resolution_meter.setter + def resolution_meter(self, resolution_meter: Any) -> None: + pass + + @property + def max_memory_usage(self) -> int: + """ + The maximum CPU memory (in MB) allocated for the meshing process. + + Default: 2048 MB + """ + return int() + + @max_memory_usage.setter + def max_memory_usage(self, max_memory_usage: Any) -> None: + pass + + +class BodyTrackingFusionParameters: + """ + Holds the options used to initialize the body tracking module of the Fusion. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def enable_body_fitting(self) -> bool: + """ + Defines if the body fitting will be applied. + + Default: False + .. note:: + If you enable it and the camera provides data as BODY_18 the fused body format will be BODY_34. + """ + return bool() + + @enable_body_fitting.setter + def enable_body_fitting(self, enable_body_fitting: Any) -> None: + pass + + @property + def enable_tracking(self) -> bool: + """ + Defines if the object detection will track objects across images flow. + + Default: True + """ + return bool() + + @enable_tracking.setter + def enable_tracking(self, enable_tracking: Any) -> None: + pass + + +class BodyTrackingFusionRuntimeParameters: + """ + Holds the options used to change the behavior of the body tracking module at runtime. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def skeleton_minimum_allowed_keypoints(self) -> int: + """ + If the fused skeleton has less than skeleton_minimum_allowed_keypoints keypoints, it will be discarded. + + Default: -1. + """ + return int() + + @skeleton_minimum_allowed_keypoints.setter + def skeleton_minimum_allowed_keypoints(self, skeleton_minimum_allowed_keypoints: Any) -> None: + pass + + @property + def skeleton_smoothing(self) -> float: + """ + This value controls the smoothing of the tracked or fitted fused skeleton. + + It is ranged from 0 (low smoothing) and 1 (high smoothing). + \n Default: 0. + """ + return float() + + @skeleton_smoothing.setter + def skeleton_smoothing(self, skeleton_smoothing: Any) -> None: + pass + + @property + def skeleton_minimum_allowed_camera(self) -> int: + """ + If a skeleton was detected in less than skeleton_minimum_allowed_camera cameras, it will be discarded. + + Default: -1. + """ + return int() + + @skeleton_minimum_allowed_camera.setter + def skeleton_minimum_allowed_camera(self, skeleton_minimum_allowed_camera: Any) -> None: + pass + + +class ObjectDetectionFusionParameters: + """ + Holds the options used to initialize the object detection module of the Fusion + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def enable_tracking(self) -> bool: + """ + Defines if the object detection will track objects across images flow. + + Default: True. + """ + return bool() + + @enable_tracking.setter + def enable_tracking(self, enable_tracking: Any) -> None: + pass + + +class CameraMetrics: + """ + Holds the metrics of a sender in the fusion process. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def received_fps(self) -> float: + """ + FPS of the received data. + """ + return float() + + @received_fps.setter + def received_fps(self, received_fps: Any) -> None: + pass + + @property + def ratio_detection(self) -> float: + """ + Skeleton detection percent during the last second. + Number of frames with at least one detection / number of frames, over the last second. + A low value means few detections occured lately for this sender. + """ + return float() + + @ratio_detection.setter + def ratio_detection(self, ratio_detection: Any) -> None: + pass + + @property + def is_present(self) -> bool: + """ + Is set to false if no data in this batch of metrics. + """ + return bool() + + @is_present.setter + def is_present(self, is_present: Any) -> None: + pass + + @property + def received_latency(self) -> float: + """ + Latency (in second) of the received data. + Timestamp difference between the time when the data are sent and the time they are received (mostly introduced when using the local network workflow). + """ + return float() + + @received_latency.setter + def received_latency(self, received_latency: Any) -> None: + pass + + @property + def delta_ts(self) -> float: + """ + Average data acquisition timestamp difference. + Average standard deviation of sender's period since the start. + """ + return float() + + @delta_ts.setter + def delta_ts(self, delta_ts: Any) -> None: + pass + + @property + def synced_latency(self) -> float: + """ + Latency (in seconds) after Fusion synchronization. + Difference between the timestamp of the data received and the timestamp at the end of the Fusion synchronization. + """ + return float() + + @synced_latency.setter + def synced_latency(self, synced_latency: Any) -> None: + pass + + +class FusionMetrics: + """ + Holds the metrics of the fusion process. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def mean_stdev_between_camera(self) -> float: + """ + Standard deviation of the data timestamp fused, the lower the better. + """ + return float() + + @mean_stdev_between_camera.setter + def mean_stdev_between_camera(self, mean_stdev_between_camera: Any) -> None: + pass + + @property + def camera_individual_stats(self) -> dict: + """ + Sender metrics. + """ + return {} + + @camera_individual_stats.setter + def camera_individual_stats(self, camera_individual_stats: Any) -> None: + pass + + @property + def mean_camera_fused(self) -> float: + """ + Mean number of camera that provides data during the past second. + """ + return float() + + @mean_camera_fused.setter + def mean_camera_fused(self, mean_camera_fused: Any) -> None: + pass + + def reset(self) -> None: + """ + Reset the current metrics. + """ + pass + + +class CameraIdentifier: + """ + Used to identify a specific camera in the Fusion API + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def serial_number(self) -> int: + return int() + + @serial_number.setter + def serial_number(self, serial_number: Any) -> None: + pass + + +class ECEF: + """ + Represents a world position in ECEF format. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def z(self) -> double: + """ + z coordinate of ECEF. + """ + return double() + + @z.setter + def z(self, z: Any) -> None: + pass + + @property + def y(self) -> double: + """ + y coordinate of ECEF. + """ + return double() + + @y.setter + def y(self, y: Any) -> None: + pass + + @property + def x(self) -> double: + """ + x coordinate of ECEF. + """ + return double() + + @x.setter + def x(self, x: Any) -> None: + pass + + +class LatLng: + """ + Represents a world position in LatLng format. + """ + def __init__(self, *args, **kwargs) -> None: ... + + def get_latitude(self, in_radian : bool = True) -> None: + """ + Get the latitude coordinate + + :param in_radian: Is the output should be in radian or degree. + :return: Latitude in radian or in degree depending in_radian parameter. + """ + pass + + def get_longitude(self, in_radian = True) -> None: + """ + Get the longitude coordinate + + :param in_radian: Is the output should be in radian or degree. + :return: Longitude in radian or in degree depending in_radian parameter. + """ + pass + + def get_altitude(self) -> None: + """ + Get the altitude coordinate + + :return: Altitude coordinate in meters. + """ + pass + + def get_coordinates(self, in_radian = True) -> None: + """ + Get the coordinates in radians (default) or in degrees. + + :param latitude: Latitude coordinate. + :param longitude: Longitude coordinate. + :param altitude: Altitude coordinate. + :param in_radian: Should the output be expressed in radians or degrees. + """ + pass + + def set_coordinates(self, latitude: double, longitude: double, altitude: double, in_radian = True) -> None: + """ + Set the coordinates in radians (default) or in degrees. + + :param latitude: Latitude coordinate. + :param longitude: Longitude coordinate. + :param altitude: Altitude coordinate. + \@param in_radian: Is input are in radians or in degrees. + """ + pass + + +class UTM: + """ + Represents a world position in UTM format. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def easting(self) -> double: + """ + Easting coordinate. + """ + return double() + + @easting.setter + def easting(self, easting: Any) -> None: + pass + + @property + def gamma(self) -> double: + """ + Gamma coordinate. + """ + return double() + + @gamma.setter + def gamma(self, gamma: Any) -> None: + pass + + @property + def UTM_zone(self) -> str: + """ + UTMZone of the coordinate. + """ + return str() + + @UTM_zone.setter + def UTM_zone(self, UTM_zone: Any) -> None: + pass + + @property + def northing(self) -> double: + """ + Northing coordinate. + """ + return double() + + @northing.setter + def northing(self, northing: Any) -> None: + pass + + +class ENU: + """ + Represent a world position in ENU format. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def east(self) -> double: + """ + East parameter + """ + return double() + + @east.setter + def east(self, east: Any) -> None: + pass + + @property + def up(self) -> double: + """ + Up parameter + """ + return double() + + @up.setter + def up(self, up: Any) -> None: + pass + + @property + def north(self) -> double: + """ + North parameter + """ + return double() + + @north.setter + def north(self, north: Any) -> None: + pass + + +class GeoConverter: + """ + Purely static class for Geo functions. + """ + def __init__(self, *args, **kwargs) -> None: ... + + def ecef2latlng(input: ECEF) -> LatLng: + """ + Convert ECEF coordinates to Lat/Long coordinates. + """ + return LatLng() + + def ecef2utm(input: ECEF) -> UTM: + """ + Convert ECEF coordinates to UTM coordinates. + """ + return UTM() + + def latlng2ecef(input: LatLng) -> ECEF: + """ + Convert Lat/Long coordinates to ECEF coordinates. + """ + return ECEF() + + def latlng2utm(input: LatLng) -> UTM: + """ + Convert Lat/Long coordinates to UTM coordinates. + """ + return UTM() + + def utm2ecef(input: UTM) -> ECEF: + """ + Convert UTM coordinates to ECEF coordinates. + """ + return ECEF() + + def utm2latlng(input: UTM) -> LatLng: + """ + Convert UTM coordinates to Lat/Long coordinates. + """ + return LatLng() + + +class GeoPose: + """ + Holds Geo reference position. + Holds geographic reference position information. + + This class represents a geographic pose, including position, orientation, and accuracy information. + It is used for storing and manipulating geographic data, such as latitude, longitude, altitude, + pose matrices, covariances, and timestamps. + + The pose data is defined in the East-North-Up (ENU) reference frame. The ENU frame is a local + Cartesian coordinate system commonly used in geodetic applications. In this frame, the X-axis + points towards the East, the Y-axis points towards the North, and the Z-axis points upwards. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def heading(self) -> double: + """ + The heading (orientation) of the pose in radians (rad). It indicates the direction in which the object or observer is facing, with 0 degrees corresponding to North and increasing in a counter-clockwise direction. + """ + return double() + + @heading.setter + def heading(self, heading: Any) -> None: + pass + + @property + def horizontal_accuracy(self) -> double: + """ + The horizontal accuracy of the pose in meters. + """ + return double() + + @horizontal_accuracy.setter + def horizontal_accuracy(self, horizontal_accuracy: Any) -> None: + pass + + @property + def pose_data(self) -> Transform: + """ + The 4x4 matrix defining the pose in the East-North-Up (ENU) coordinate system. + """ + return Transform() + + @pose_data.setter + def pose_data(self, pose_data: Any) -> None: + pass + + @property + def vertical_accuracy(self) -> double: + """ + The vertical accuracy of the pose in meters. + """ + return double() + + @vertical_accuracy.setter + def vertical_accuracy(self, vertical_accuracy: Any) -> None: + pass + + @pose_covariance.setter + def pose_covariance(self, pose_covariance: Any) -> None: + pass + + @property + def latlng_coordinates(self) -> LatLng: + """ + The latitude, longitude, and altitude coordinates of the pose. + """ + return LatLng() + + @latlng_coordinates.setter + def latlng_coordinates(self, latlng_coordinates: Any) -> None: + pass + + @property + def timestamp(self) -> Timestamp: + """ + The timestamp associated with the GeoPose. + """ + return Timestamp() + + @timestamp.setter + def timestamp(self, timestamp: Any) -> None: + pass + + def pose_covariance(self) -> np.array[float]: + """ + The pose covariance matrix in ENU. + """ + return np.array[float]() + + +class GNSSData: + """ + Class containing GNSS data to be used for positional tracking as prior. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def gnss_mode(self) -> GNSS_MODE: + """ + Represents the current mode of GNSS. + """ + return GNSS_MODE() + + @gnss_mode.setter + def gnss_mode(self, gnss_mode: Any) -> None: + pass + + @property + def ts(self) -> Timestamp: + """ + Timestamp of the GNSS position (must be aligned with the camera time reference). + """ + return Timestamp() + + @ts.setter + def ts(self, ts: Any) -> None: + pass + + @property + def gnss_status(self) -> GNSS_STATUS: + """ + Represents the current status of GNSS. + """ + return GNSS_STATUS() + + @gnss_status.setter + def gnss_status(self, gnss_status: Any) -> None: + pass + + @property + def latitude_std(self) -> float: + """ + Latitude standard deviation. + """ + return float() + + @latitude_std.setter + def latitude_std(self, latitude_std: Any) -> None: + pass + + @property + def position_covariances(self) -> list[float]: + """ + Covariance of the position in meter (must be expressed in the ENU coordinate system). + For eph, epv GNSS sensors, set it as follow: ```{eph*eph, 0, 0, 0, eph*eph, 0, 0, 0, epv*epv}```. + """ + return list[float]() + + @position_covariances.setter + def position_covariances(self, position_covariances: Any) -> None: + pass + + @property + def longitude_std(self) -> float: + """ + Longitude standard deviation. + """ + return float() + + @longitude_std.setter + def longitude_std(self, longitude_std: Any) -> None: + pass + + @property + def altitude_std(self) -> float: + """ + Altitude standard deviation + """ + return float() + + @altitude_std.setter + def altitude_std(self, altitude_std: Any) -> None: + pass + + def get_coordinates(self, in_radian = True) -> tuple[float, float, float]: + """ + Get the coordinates of the sl.GNSSData. + The sl.LatLng coordinates could be expressed in degrees or radians. + :param latitude: Latitude coordinate. + :param longitude: Longitude coordinate. + :param altitude: Altitude coordinate. + :param is_radian: Should the output be expressed in radians or degrees. + """ + return tuple[float, float, float]() + + def set_coordinates(self, latitude: double, longitude: double, altitude: double, in_radian = True) -> None: + """ + Set the sl.LatLng coordinates of sl.GNSSData. + The sl.LatLng coordinates could be expressed in degrees or radians. + :param latitude: Latitude coordinate. + :param longitude: Longitude coordinate. + :param altitude: Altitude coordinate. + :param is_radian: Are the inputs expressed in radians or in degrees. + """ + pass + + +class SynchronizationParameter: + """ + Class containing GNSS data to be used for positional tracking as prior. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def windows_size(self) -> double: + return double() + + @windows_size.setter + def windows_size(self, windows_size: Any) -> None: + pass + + @property + def data_source_timeout(self) -> double: + return double() + + @data_source_timeout.setter + def data_source_timeout(self, data_source_timeout: Any) -> None: + pass + + @property + def maximum_lateness(self) -> double: + return double() + + @maximum_lateness.setter + def maximum_lateness(self, maximum_lateness: Any) -> None: + pass + + @property + def keep_last_data(self) -> bool: + return bool() + + @keep_last_data.setter + def keep_last_data(self, keep_last_data: Any) -> None: + pass + + +class InitFusionParameters: + """ + Holds the options used to initialize the Fusion object. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def verbose(self) -> bool: + """ + Enable the verbosity mode of the SDK. + """ + return bool() + + @verbose.setter + def verbose(self, verbose: Any) -> None: + pass + + @property + def maximum_working_resolution(self) -> Resolution: + """ + Sets the maximum resolution for all Fusion outputs, such as images and measures. + + The default value is (-1, -1), which allows the Fusion to automatically select the optimal resolution for the best quality/runtime ratio. + + - For images, the output resolution can be up to the native resolution of the camera. + - For measures involving depth, the output resolution can be up to the maximum working resolution. + + Setting this parameter to (-1, -1) will ensure the best balance between quality and performance for depth measures. + """ + return Resolution() + + @maximum_working_resolution.setter + def maximum_working_resolution(self, maximum_working_resolution: Any) -> None: + pass + + @property + def coordinate_units(self) -> UNIT: + """ + This parameter allows you to select the unit to be used for all metric values of the SDK (depth, point cloud, tracking, mesh, and others). + Default : UNIT "UNIT::MILLIMETER" + """ + return UNIT() + + @coordinate_units.setter + def coordinate_units(self, coordinate_units: Any) -> None: + pass + + @property + def timeout_period_number(self) -> int: + """ + If specified change the number of period necessary for a source to go in timeout without data. For example, if you set this to 5 then, if any source do not receive data during 5 period, these sources will go to timeout and will be ignored. + """ + return int() + + @timeout_period_number.setter + def timeout_period_number(self, timeout_period_number: Any) -> None: + pass + + @property + def sdk_gpu_id(self) -> int: + """ + NVIDIA graphics card id to use. + + By default the SDK will use the most powerful NVIDIA graphics card found. + \n However, when running several applications, or using several cameras at the same time, splitting the load over available GPUs can be useful. + \n This parameter allows you to select the GPU used by the sl.Camera using an ID from 0 to n-1 GPUs in your PC. + \n Default: -1 + .. note:: + A non-positive value will search for all CUDA capable devices and select the most powerful. + """ + return int() + + @sdk_gpu_id.setter + def sdk_gpu_id(self, sdk_gpu_id: Any) -> None: + pass + + @property + def coordinate_system(self) -> COORDINATE_SYSTEM: + """ + Positional tracking, point clouds and many other features require a given COORDINATE_SYSTEM to be used as reference. + This parameter allows you to select the COORDINATE_SYSTEM used by the Camera to return its measures. + \n This defines the order and the direction of the axis of the coordinate system. + \n Default : COORDINATE_SYSTEM "COORDINATE_SYSTEM::IMAGE" + """ + return COORDINATE_SYSTEM() + + @coordinate_system.setter + def coordinate_system(self, coordinate_system: Any) -> None: + pass + + @property + def output_performance_metrics(self) -> bool: + """ + It allows users to extract some stats of the Fusion API like drop frame of each camera, latency, etc... + """ + return bool() + + @output_performance_metrics.setter + def output_performance_metrics(self, output_performance_metrics: Any) -> None: + pass + + @property + def synchronization_parameters(self) -> SynchronizationParameter: + """ + Specifies the parameters used for data synchronization during fusion. + + The SynchronizationParameter struct encapsulates the synchronization parameters that control the data fusion process. + """ + return SynchronizationParameter() + + @synchronization_parameters.setter + def synchronization_parameters(self, synchronization_parameters: Any) -> None: + pass + + def __dealloc__(self) -> None: + pass + + +class Fusion: + """ + Holds Fusion process data and functions + """ + def __init__(self, *args, **kwargs) -> None: ... + + def __dealloc__(self) -> None: + pass + + def init(self, init_fusion_parameters : InitFusionParameters) -> FUSION_ERROR_CODE: + """ + Initialize the fusion module with the requested parameters. + :param init_parameters: Initialization parameters. + :return: ERROR_CODE.SUCCESS if it goes as it should, otherwise it returns an ERROR_CODE. + """ + return FUSION_ERROR_CODE() + + def close(self) -> None: + """ + Will deactivate all the fusion modules and free internal data. + """ + pass + + def subscribe(self, uuid : CameraIdentifier, communication_parameters: CommunicationParameters, pose: Transform) -> FUSION_ERROR_CODE: + """ + Set the specified camera as a data provider. + :param uuid: The requested camera identifier. + :param communication_parameters: The communication parameters to connect to the camera. + :param pose: The World position of the camera, regarding the other camera of the setup. + :return: FUSION_ERROR_CODE.SUCCESS if it goes as it should, otherwise it returns an FUSION_ERROR_CODE. + """ + return FUSION_ERROR_CODE() + + def unsubscribe(self, uuid : CameraIdentifier) -> FUSION_ERROR_CODE: + """ + Remove the specified camera from data provider. + :param uuid: The requested camera identifier. + :return: FUSION_ERROR_CODE.SUCCESS if it goes as it should, otherwise it returns an FUSION_ERROR_CODE. + """ + return FUSION_ERROR_CODE() + + def update_pose(self, uuid : CameraIdentifier, pose: Transform) -> FUSION_ERROR_CODE: + """ + Updates the specified camera position inside fusion WORLD. + :param uuid: The requested camera identifier. + :param pose: The World position of the camera, regarding the other camera of the setup. + :return: FUSION_ERROR_CODE.SUCCESS if it goes as it should, otherwise it returns an FUSION_ERROR_CODE. + """ + return FUSION_ERROR_CODE() + + def get_process_metrics(self) -> tuple[FUSION_ERROR_CODE, FusionMetrics]: + """ + Get the metrics of the Fusion process, for the fused data as well as individual camera provider data. + :param metrics: The process metrics. + :return: FUSION_ERROR_CODE.SUCCESS if it goes as it should, otherwise it returns an FUSION_ERROR_CODE. + :return: The process metrics. + """ + return tuple[FUSION_ERROR_CODE, FusionMetrics]() + + def get_sender_state(self) -> dict: + """ + Returns the state of each connected data senders. + :return: The individual state of each connected senders. + """ + return {} + + def process(self) -> FUSION_ERROR_CODE: + """ + Runs the main function of the Fusion, this trigger the retrieve and synchronization of all connected senders and updates the enabled modules. + :return: FUSION_ERROR_CODE.SUCCESS if it goes as it should, otherwise it returns an FUSION_ERROR_CODE. + """ + return FUSION_ERROR_CODE() + + def enable_body_tracking(self, params : BodyTrackingFusionParameters) -> FUSION_ERROR_CODE: + """ + Enables the body tracking fusion module. + :param params: Structure containing all specific parameters for body tracking fusion. + :return: FUSION_ERROR_CODE.SUCCESS if it goes as it should, otherwise it returns an FUSION_ERROR_CODE. + """ + return FUSION_ERROR_CODE() + + def retrieve_bodies(self, bodies : Bodies, parameters : BodyTrackingFusionRuntimeParameters, uuid : CameraIdentifier = CameraIdentifier(0), reference_frame: FUSION_REFERENCE_FRAME = FUSION_REFERENCE_FRAME.BASELINK) -> FUSION_ERROR_CODE: + """ + Retrieves the body data, can be the fused data (default), or the raw data provided by a specific sender. + :param bodies: The fused bodies will be saved into this objects. + :param parameters: Body detection runtime settings, can be changed at each detection. + :param uuid: The id of the sender. + :param reference_frame: The reference frame in which the objects will be expressed. Default: FUSION_REFERENCE_FRAME "FUSION_REFERENCE_FRAME::BASELINK". + :return: FUSION_ERROR_CODE.SUCCESS if it goes as it should, otherwise it returns an FUSION_ERROR_CODE. + """ + return FUSION_ERROR_CODE() + + def enable_object_detection(self, params = ObjectDetectionFusionParameters()) -> FUSION_ERROR_CODE: + """ + Enables the object detection fusion module. + :param params: Structure containing all specific parameters for object detection fusion. + \n For more information, see the ObjectDetectionFusionParameters documentation. + :return: SUCCESS if it goes as it should, otherwise it returns an FUSION_ERROR_CODE. + """ + return FUSION_ERROR_CODE() + + def retrieve_objects_all_od_groups(self, objs, reference_frame: FUSION_REFERENCE_FRAME = FUSION_REFERENCE_FRAME.BASELINK) -> FUSION_ERROR_CODE: + """ + Retrieves all the fused objects data. + :param objs: The fused objects will be saved into this dictionary of objects. + :param reference_frame: The reference frame in which the objects will be expressed. Default: FUSION_REFERENCE_FRAME "FUSION_REFERENCE_FRAME::BASELINK". + :return: SUCCESS if it goes as it should, otherwise it returns a FUSION_ERROR_CODE. + """ + return FUSION_ERROR_CODE() + + def retrieve_objects_one_od_group(self, objs, fused_od_group_name, reference_frame: FUSION_REFERENCE_FRAME = FUSION_REFERENCE_FRAME.BASELINK) -> FUSION_ERROR_CODE: + """ + Retrieves the fused objects of a given fused OD group. + :param objs: The fused objects will be saved into this objects. + :param fused_od_group_name: The name of the fused objects group to retrieve. + :param reference_frame: The reference frame in which the objects will be expressed. Default: FUSION_REFERENCE_FRAME "FUSION_REFERENCE_FRAME::BASELINK". + :return: SUCCESS if it goes as it should, otherwise it returns a FUSION_ERROR_CODE. + """ + return FUSION_ERROR_CODE() + + def retrieve_raw_objects_all_ids(self, objs, uuid) -> FUSION_ERROR_CODE: + """ + Retrieves all the raw objects data provided by a specific sender. + :param objs: The fused objects will be saved into this dictionary of objects. + :param uuid: Retrieve the raw data provided by this sender. + """ + return FUSION_ERROR_CODE() + + def retrieve_raw_objects_one_id(self, py_objects, uuid, instance_id) -> FUSION_ERROR_CODE: + """ + Retrieves the raw objects data provided by a specific sender and a specific instance id. + :param objs: The fused objects will be saved into this objects. + :param uuid: Retrieve the raw data provided by this sender. + :param instance_id: Retrieve the objects inferred by the model with this ID only. + :return: SUCCESS if it goes as it should, otherwise it returns a FUSION_ERROR_CODE. + """ + return FUSION_ERROR_CODE() + + def disable_objects_detection(self) -> None: + """ + Disable the body fusion tracking module. + """ + pass + + def retrieve_image(self, mat, uuid, resolution = Resolution(0, 0)) -> FUSION_ERROR_CODE: + """ + Returns the current sl.VIEW.LEFT of the specified camera, the data is synchronized. + :param mat: the CPU BGRA image of the requested camera. + :param resolution: the requested resolution of the output image, can be lower or equal (default) to the original image resolution. + :param uuid: If set to a sender serial number (different from 0), this will retrieve the raw data provided by this sender. + :return: FUSION_ERROR_CODE.SUCCESS if it goes as it should, otherwise it returns an FUSION_ERROR_CODE. + """ + return FUSION_ERROR_CODE() + + def retrieve_measure(self, mat, uuid, measure: MEASURE, resolution = Resolution(0, 0), reference_frame: FUSION_REFERENCE_FRAME = FUSION_REFERENCE_FRAME.BASELINK) -> FUSION_ERROR_CODE: + """ + Returns the current measure of the specified camera, the data is synchronized. + :param mat: the CPU data of the requested camera. + :param uuid: The id of the sender. + :param measure: measure: the requested measure type, by default DEPTH (F32_C1). + :param resolution: the requested resolution of the output image, can be lower or equal (default) to the original image resolution. + :param reference_frame: The reference frame in which the objects will be expressed. Default: FUSION_REFERENCE_FRAME "FUSION_REFERENCE_FRAME::BASELINK". + :return: FUSION_ERROR_CODE.SUCCESS if it goes as it should, otherwise it returns an FUSION_ERROR_CODE. + """ + return FUSION_ERROR_CODE() + + def disable_body_tracking(self) -> None: + """ + Disable the body fusion tracking module. + """ + pass + + def enable_positionnal_tracking(self, parameters : PositionalTrackingFusionParameters) -> FUSION_ERROR_CODE: + """ + Enables positional tracking fusion module. + :param parameters: A structure containing all the PositionalTrackingFusionParameters that define positional tracking fusion module. + :return: FUSION_ERROR_CODE.SUCCESS if it goes as it should, otherwise it returns an FUSION_ERROR_CODE. + """ + return FUSION_ERROR_CODE() + + def ingest_gnss_data(self, gnss_data : GNSSData) -> FUSION_ERROR_CODE: + """ + Ingest GNSS data from an external sensor into the fusion module. + :param gnss_data: The current GNSS data to combine with the current positional tracking data. + :return: FUSION_ERROR_CODE.SUCCESS if it goes as it should, otherwise it returns an FUSION_ERROR_CODE. + """ + return FUSION_ERROR_CODE() + + def get_position(self, camera_pose : Pose, reference_frame : REFERENCE_FRAME = REFERENCE_FRAME.WORLD, uuid: CameraIdentifier = CameraIdentifier(), position_type : POSITION_TYPE = POSITION_TYPE.FUSION) -> POSITIONAL_TRACKING_STATE: + """ + Get the Fused Position referenced to the first camera subscribed. If uuid is specified then project position on the referenced camera. + :param camera_pose: Will contain the fused position referenced by default in world (world is given by the calibration of the cameras system). + :param reference_frame: Defines the reference from which you want the pose to be expressed. Default : REFERENCE_FRAME.WORLD. + :param uuid: If set to a sender serial number (different from 0), this will retrieve position projected on the requested camera if position_type is equal to POSITION_TYPE.FUSION or raw sender position if position_type is equal to POSITION_TYPE.RAW. + :param position_type: Select if the position should the fused position re-projected in the camera with uuid or if the position should be the raw position (without fusion) of camera with uui. + :return: POSITIONAL_TRACKING_STATE is the current state of the tracking process. + """ + return POSITIONAL_TRACKING_STATE() + + def get_fused_positional_tracking_status(self) -> FusedPositionalTrackingStatus: + """ + Get the Fused Position referenced to the first camera subscribed. If uuid is specified then project position on the referenced camera. + :param camera_pose: Will contain the fused position referenced by default in world (world is given by the calibration of the cameras system). + :param reference_frame: Defines the reference from which you want the pose to be expressed. Default : REFERENCE_FRAME.WORLD. + :param uuid: If set to a sender serial number (different from 0), this will retrieve position projected on the requested camera if position_type is equal to POSITION_TYPE.FUSION or raw sender position if position_type is equal to POSITION_TYPE.RAW. + :param position_type: Select if the position should the fused position re-projected in the camera with uuid or if the position should be the raw position (without fusion) of camera with uui. + :return: POSITIONAL_TRACKING_STATE is the current state of the tracking process. + """ + return FusedPositionalTrackingStatus() + + def get_current_gnss_data(self, gnss_data : GNSSData) -> POSITIONAL_TRACKING_STATE: + """ + Returns the last synchronized gnss data. + :param out: Last synchronized gnss data. (Direction: out) + :return: POSITIONAL_TRACKING_STATE is the current state of the tracking process. + """ + return POSITIONAL_TRACKING_STATE() + + def get_geo_pose(self, pose : GeoPose) -> GNSS_FUSION_STATUS: + """ + Returns the current GeoPose. + :param pose: The current GeoPose. (Direction: out) + :return: GNSS_FUSION_STATUS is the current state of the tracking process. + """ + return GNSS_FUSION_STATUS() + + def geo_to_camera(self, input : LatLng, output : Pose) -> GNSS_FUSION_STATUS: + """ + Convert latitude / longitude into position in sl::Fusion coordinate system. + :param input: The latitude / longitude to be converted in sl::Fusion coordinate system. (Direction: in) + :param out: Converted position in sl.Fusion coordinate system. (Direction: out) + :return: GNSS_FUSION_STATUS is the current state of the tracking process. + """ + return GNSS_FUSION_STATUS() + + def camera_to_geo(self, input : Pose, output : GeoPose) -> GNSS_FUSION_STATUS: + """ + Convert a position in sl.Fusion coordinate system in global world coordinate. + :param pose: Position to convert in global world coordinate. (Direction: in) + :param pose: Converted position in global world coordinate. (Direction: out) + :return: GNSS_FUSION_STATUS is the current state of the tracking process. + """ + return GNSS_FUSION_STATUS() + + def get_current_timestamp(self) -> Timestamp: + """ + Return the current fusion timestamp, aligned with the synchronized GNSS and camera data. + :return: current fusion timestamp. + """ + return Timestamp() + + def disable_positionnal_tracking(self) -> None: + """ + Disable the fusion positional tracking module. + + The positional tracking is immediately stopped. If a file path is given, saveAreaMap(area_file_path) will be called asynchronously. See getAreaExportState() to get the exportation state. + """ + pass + + def ENU_to_geo(self, input: ENU, output: LatLng) -> FUSION_ERROR_CODE: + """ + Convert ENU to LatLng + + Concert an ENU position into LatLng + """ + return FUSION_ERROR_CODE() + + def geo_to_ENU(self, input : LatLng, out : ENU) -> FUSION_ERROR_CODE: + """ + Convert LatLng to ENU + + Convert am LatLng to ENU + """ + return FUSION_ERROR_CODE() + + def get_current_gnss_calibration_std(self) -> tuple[GNSS_FUSION_STATUS, float, np.array]: + """ + Get the current calibration uncertainty obtained during calibration process. + :return: sl.GNSS_FUSION_STATUS representing current initialisation status. + :return: Output yaw uncertainty. + :return: Output position uncertainty. + # + """ + return tuple[GNSS_FUSION_STATUS, float, np.array]() + + def get_geo_tracking_calibration(self) -> Transform: + """ + Get the calibration found between VIO and GNSS. + + :return: sl.Transform is the calibration found between VIO and GNSS during calibration process. + # + """ + return Transform() + + def enable_spatial_mapping(self, parameters) -> FUSION_ERROR_CODE: + """ + Initializes and starts the spatial mapping processes. + + The spatial mapping will create a geometric representation of the scene based on both tracking data and 3D point clouds. + + The resulting output can be a Mesh or a FusedPointCloud. It can be be obtained by calling retrieve_spatial_map_async(). + Note that retrieve_spatial_map_async() should be called after request_spatial_map_async(). + + \param parameters The structure containing all the specific parameters for the spatial mapping. Default: a balanced parameter preset between geometric fidelity and output file size. For more information, see the SpatialMappingParameters documentation. + :return: SUCCESS if everything went fine, FUSION_ERROR_CODE "FUSION_ERROR_CODE::FAILURE" otherwise. + + .. note:: + The tracking (enable_positional_tracking()) needs to be enabled to use the spatial mapping. + + .. note:: + Lower SpatialMappingParameters.range_meter and SpatialMappingParameters.resolution_meter for higher performance. + + .. warning:: This fuction is only available for INTRA_PROCESS communication type. + """ + return FUSION_ERROR_CODE() + + def request_spatial_map_async(self) -> None: + """ + Starts the spatial map generation process in a non blocking thread from the spatial mapping process. + + The spatial map generation can take a long time depending on the mapping resolution and covered area. This function will trigger the generation of a mesh without blocking the program. + You can get info about the current generation using get_spatial_map_request_status_async(), and retrieve the mesh using request_spatial_map_async(...) . + + .. note:: + Only one mesh can be generated at a time. If the previous mesh generation is not over, new calls of the function will be ignored. + + # + """ + pass + + def get_spatial_map_request_status_async(self) -> FUSION_ERROR_CODE: + """ + Returns the spatial map generation status. This status allows to know if the mesh can be retrieved by calling retrieve_spatial_map_async(). + :return: SUCCESS if the mesh is ready and not yet retrieved, otherwise FUSION_ERROR_CODE "FUSION_ERROR_CODE::FAILURE". + + \n See request_spatial_map_async() for an example. + # + """ + return FUSION_ERROR_CODE() + + def retrieve_spatial_map_async(self, py_mesh) -> FUSION_ERROR_CODE: + """ + Retrieves the current generated spatial map. + + After calling request_spatial_map_async(), this method allows you to retrieve the generated mesh or fused point cloud. + \n The Mesh or FusedPointCloud will only be available when get_spatial_map_request_status_async() returns FUSION_ERROR_CODE.SUCCESS. + + :param py_mesh: The Mesh or FusedPointCloud to be filled with the generated spatial map. (Direction: out) + :return: FUSION_ERROR_CODE.SUCCESS if the mesh is retrieved, otherwise FUSION_ERROR_CODE.FAILURE. + + .. note:: + This method only updates the necessary chunks and adds the new ones in order to improve update speed. + + .. warning:: You should not modify the mesh / fused point cloud between two calls of this method, otherwise it can lead to a corrupted mesh / fused point cloud. + See request_spatial_map_async() for an example. + """ + return FUSION_ERROR_CODE() + + def disable_spatial_mapping(self) -> None: + """ + Disables the spatial mapping process. + + The spatial mapping is immediately stopped. + + If the mapping has been enabled, this function will automatically be called by close(). + + .. note:: + This function frees the memory allocated for the spatial mapping, consequently, the spatial map cannot be retrieved after this call. + """ + pass + + +class SVOData: + """ + Class containing SVO data to be ingested/retrieved to/from SVO. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def timestamp_ns(self) -> Timestamp: + """ + Timestamp of the data. + """ + return Timestamp() + + @timestamp_ns.setter + def timestamp_ns(self, timestamp_ns: Any) -> None: + pass + + @property + def key(self) -> str: + """ + Key of the data. + """ + return str() + + @key.setter + def key(self, key: Any) -> None: + pass + + def get_content_as_string(self) -> str: + """ + Get the content of the sl.SVOData as a string. + + :return: The content of the sl.SVOData as a string. + """ + return str() + + def set_string_content(self, data: str) -> str: + """ + Set the content of the sl.SVOData as a string. + + \param data The string data content to set. + """ + return str() + + +class CameraOneConfiguration: + """ + Structure containing information about the camera sensor. + + Information about the camera is available in the sl.CameraInformation struct returned by sl.Camera.get_camera_information(). + .. note:: + This object is meant to be used as a read-only container, editing any of its field won't impact the SDK. + + .. warning:: sl.CalibrationOneParameters are returned in sl.COORDINATE_SYSTEM.IMAGE, they are not impacted by the sl.InitParametersOne.coordinate_system. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def calibration_parameters_raw(self) -> CameraParameters: + """ + Intrinsics and extrinsic stereo parameters for unrectified/distorted images. + """ + return CameraParameters() + + @property + def fps(self) -> float: + """ + FPS of the camera. + """ + return float() + + @property + def firmware_version(self) -> int: + """ + Internal firmware version of the camera. + """ + return int() + + @property + def calibration_parameters(self) -> CameraParameters: + """ + Intrinsics and extrinsic stereo parameters for rectified/undistorted images. + """ + return CameraParameters() + + @property + def resolution(self) -> Resolution: + """ + Resolution of the camera. + """ + return Resolution() + + +class CameraOneInformation: + """ + Structure containing information of a single camera (serial number, model, calibration, etc.) + That information about the camera will be returned by CameraOne.get_camera_information() + .. note:: + This object is meant to be used as a read-only container, editing any of its fields won't impact the SDK. + + .. warning:: CalibrationParameters are returned in COORDINATE_SYSTEM.IMAGE , they are not impacted by the InitParametersOne.coordinate_system + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def camera_model(self) -> MODEL: + """ + Model of the camera (see sl.MODEL). + """ + return MODEL() + + @property + def serial_number(self) -> int: + """ + Serial number of the camera. + """ + return int() + + @property + def camera_configuration(self) -> CameraOneConfiguration: + """ + Camera configuration parameters stored in a sl.CameraOneConfiguration. + """ + return CameraOneConfiguration() + + @property + def sensors_configuration(self) -> SensorsConfiguration: + """ + Sensors configuration parameters stored in a sl.SensorsConfiguration. + """ + return SensorsConfiguration() + + @property + def input_type(self) -> INPUT_TYPE: + """ + Input type used in the ZED SDK. + """ + return INPUT_TYPE() + + +class InitParametersOne: + """ + Class containing the options used to initialize the sl.CameraOne object. + + This class allows you to select multiple parameters for the sl.Camera such as the selected camera, resolution, depth mode, coordinate system, and units of measurement. + \n Once filled with the desired options, it should be passed to the sl.Camera.open() method. + + .. code-block:: text + + + import pyzed.sl as sl + + def main() : + zed = sl.CameraOne() # Create a ZED camera object + + init_params = sl.InitParametersOne() # Set initial parameters + init_params.sdk_verbose = 0 # Disable verbose mode + + # Use the camera in LIVE mode + init_params.camera_resolution = sl.RESOLUTION.HD1080 # Use HD1080 video mode + init_params.camera_fps = 30 # Set fps at 30 + + # Or use the camera in SVO (offline) mode + #init_params.set_from_svo_file("xxxx.svo") + + # Or use the camera in STREAM mode + #init_params.set_from_stream("192.168.1.12", 30000) + + # Other parameters are left to their default values + + # Open the camera + err = zed.open(init_params) + if err != sl.ERROR_CODE.SUCCESS: + exit(-1) + + # Close the camera + zed.close() + return 0 + + if __name__ == "__main__" : + main() + + + + With its default values, it opens the camera in live mode at sl.RESOLUTION.HD720 + \n You can customize it to fit your application. + .. note:: + The parameters can also be saved and reloaded using its save() and load() methods. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def coordinate_units(self) -> UNIT: + """ + Unit of spatial data (depth, point cloud, tracking, mesh, etc.) for retrieval. + + Default: sl.UNIT.MILLIMETER + """ + return UNIT() + + @coordinate_units.setter + def coordinate_units(self, coordinate_units: Any) -> None: + pass + + @property + def optional_settings_path(self) -> str: + """ + Optional path where the ZED SDK has to search for the settings file (SN.conf file). + + This file contains the calibration information of the camera. + \n Default: "" + + .. note:: + The settings file will be searched in the default directory: + + * **Linux**: /usr/local/zed/settings/ + * **Windows**: C:/ProgramData/stereolabs/settings + + .. note:: + If a path is specified and no file has been found, the ZED SDK will search the settings file in the default directory. + + .. note:: + An automatic download of the settings file (through **ZED Explorer** or the installer) will still download the files on the default path. + + + .. code-block:: text + + init_params = sl.InitParametersOne() # Set initial parameters + home = "/path/to/home" + path = home + "/Documents/settings/" # assuming /path/to/home/Documents/settings/SNXXXX.conf exists. Otherwise, it will be searched in /usr/local/zed/settings/ + init_params.optional_settings_path = path + """ + return str() + + @optional_settings_path.setter + def optional_settings_path(self, optional_settings_path: Any) -> None: + pass + + @property + def coordinate_system(self) -> COORDINATE_SYSTEM: + """ + sl.COORDINATE_SYSTEM to be used as reference for positional tracking, mesh, point clouds, etc. + + This parameter allows you to select the sl.COORDINATE_SYSTEM used by the sl.Camera object to return its measures. + \n This defines the order and the direction of the axis of the coordinate system. + \n Default: sl.COORDINATE_SYSTEM.IMAGE + """ + return COORDINATE_SYSTEM() + + @coordinate_system.setter + def coordinate_system(self, coordinate_system: Any) -> None: + pass + + @property + def async_grab_camera_recovery(self) -> bool: + """ + Define the behavior of the automatic camera recovery during sl.Camera.grab() method call. + + When async is enabled and there's an issue with the communication with the sl.Camera object, + sl.Camera.grab() will exit after a short period and return the sl.ERROR_CODE.CAMERA_REBOOTING warning. + \n The recovery will run in the background until the correct communication is restored. + \n When async_grab_camera_recovery is false, the sl.Camera.grab() method is blocking and will return + only once the camera communication is restored or the timeout is reached. + \n Default: False + """ + return bool() + + @async_grab_camera_recovery.setter + def async_grab_camera_recovery(self, async_grab_camera_recovery: Any) -> None: + pass + + @property + def enable_hdr(self) -> bool: + """ + Activates HDR support for the current resolution/mode. Only active if the camera supports HDR for this resolution + + \n Default: False + """ + return bool() + + @enable_hdr.setter + def enable_hdr(self, enable_hdr: Any) -> None: + pass + + @property + def camera_fps(self) -> int: + """ + Requested camera frame rate. + + If set to 0, the highest FPS of the specified camera_resolution will be used. + \n Default: 0 + \n\n See sl.RESOLUTION for a list of supported frame rates. + .. note:: + If the requested camera_fps is unsupported, the closest available FPS will be used. + """ + return int() + + @camera_fps.setter + def camera_fps(self, camera_fps: Any) -> None: + pass + + @property + def svo_real_time_mode(self) -> bool: + """ + Defines if sl.Camera object return the frame in real time mode. + + When playing back an SVO file, each call to sl.Camera.grab() will extract a new frame and use it. + \n However, it ignores the real capture rate of the images saved in the SVO file. + \n Enabling this parameter will bring the SDK closer to a real simulation when playing back a file by using the images' timestamps. + \n Default: False + .. note:: + sl.Camera.grab() will return an error when trying to play too fast, and frames will be dropped when playing too slowly. + """ + return bool() + + @svo_real_time_mode.setter + def svo_real_time_mode(self, svo_real_time_mode: Any) -> None: + pass + + @property + def camera_resolution(self) -> RESOLUTION: + """ + Desired camera resolution. + .. note:: + Small resolutions offer higher framerate and lower computation time. + + .. note:: + In most situations, sl.RESOLUTION.HD720 at 60 FPS is the best balance between image quality and framerate. + + + Default: + * ZED X/X Mini: sl.RESOLUTION.HD1200 + * other cameras: sl.RESOLUTION.HD720 + .. note:: + Available resolutions are listed here: sl.RESOLUTION. + """ + return RESOLUTION() + + @camera_resolution.setter + def camera_resolution(self, camera_resolution: Any) -> None: + pass + + @property + def sdk_verbose_log_file(self) -> str: + """ + File path to store the ZED SDK logs (if sdk_verbose is enabled). + + The file will be created if it does not exist. + \n Default: "" + + .. note:: + Setting this parameter to any value will redirect all standard output print calls of the entire program. + + .. note:: + This means that your own standard output print calls will be redirected to the log file. + + .. warning:: The log file won't be cleared after successive executions of the application. + .. warning:: This means that it can grow indefinitely if not cleared. + """ + return str() + + @sdk_verbose_log_file.setter + def sdk_verbose_log_file(self, sdk_verbose_log_file: Any) -> None: + pass + + @property + def sdk_verbose(self) -> int: + """ + Enable the ZED SDK verbose mode. + + This parameter allows you to enable the verbosity of the ZED SDK to get a variety of runtime information in the console. + \n When developing an application, enabling verbose (``sdk_verbose >= 1``) mode can help you understand the current ZED SDK behavior. + \n However, this might not be desirable in a shipped version. + \n Default: 0 (no verbose message) + .. note:: + The verbose messages can also be exported into a log file. + + .. note:: + See sdk_verbose_log_file for more. + """ + return int() + + @sdk_verbose.setter + def sdk_verbose(self, sdk_verbose: Any) -> None: + pass + + def input(self, input_t) -> None: + """ + The SDK can handle different input types: + - Select a camera by its ID (/dev/videoX on Linux, and 0 to N cameras connected on Windows) + - Select a camera by its serial number + - Open a recorded sequence in the SVO file format + - Open a streaming camera from its IP address and port + + This parameter allows you to select to desired input. It should be used like this: + .. code-block:: text + + init_params = sl.InitParametersOne() # Set initial parameters + init_params.sdk_verbose = 1 # Enable verbose mode + input_t = sl.InputType() + input_t.set_from_camera_id(0) # Selects the camera with ID = 0 + init_params.input = input_t + init_params.set_from_camera_id(0) # You can also use this + + + .. code-block:: text + + init_params = sl.InitParametersOne() # Set initial parameters + init_params.sdk_verbose = 1 # Enable verbose mode + input_t = sl.InputType() + input_t.set_from_serial_number(1010) # Selects the camera with serial number = 101 + init_params.input = input_t + init_params.set_from_serial_number(1010) # You can also use this + + + .. code-block:: text + + init_params = sl.InitParametersOne() # Set initial parameters + init_params.sdk_verbose = 1 # Enable verbose mode + input_t = sl.InputType() + input_t.set_from_svo_file("/path/to/file.svo") # Selects the and SVO file to be read + init_params.input = input_t + init_params.set_from_svo_file("/path/to/file.svo") # You can also use this + + + .. code-block:: text + + init_params = sl.InitParametersOne() # Set initial parameters + init_params.sdk_verbose = 1 # Enable verbose mode + input_t = sl.InputType() + input_t.set_from_stream("192.168.1.42") + init_params.input = input_t + init_params.set_from_stream("192.168.1.42") # You can also use this + + + Available cameras and their ID/serial can be listed using get_device_list() and get_streaming_device_list() + Each Camera will create its own memory (CPU and GPU), therefore the number of ZED used at the same time can be limited by the configuration of your computer. (GPU/CPU memory and capabilities) + + default : empty + See InputType for complementary information. + + .. warning:: Using the ZED SDK Python API, using init_params.input.set_from_XXX won't work, use init_params.set_from_XXX instead + @property + def input(self) -> InputType: + input_t = InputType() + input_t.input = self.init.input + return input_t + """ + pass + + def set_from_camera_id(self, cam_id, bus_type : BUS_TYPE = BUS_TYPE.AUTO) -> None: + """ + Defines the input source with a camera id to initialize and open an sl.CameraOne object from. + :param id: Id of the desired camera to open. + :param bus_type: sl.BUS_TYPE of the desired camera to open. + """ + pass + + def set_from_serial_number(self, serial_number) -> None: + """ + Defines the input source with a serial number to initialize and open an sl.CameraOne object from. + :param serial_number: Serial number of the desired camera to open. + """ + pass + + def set_from_svo_file(self, svo_input_filename) -> None: + """ + Defines the input source with an SVO file to initialize and open an sl.CameraOne object from. + :param svo_input_filename: Path to the desired SVO file to open. + """ + pass + + def set_from_stream(self, sender_ip, port = 30000) -> None: + """ + Defines the input source from a stream to initialize and open an sl.CameraOne object from. + :param sender_ip: IP address of the streaming sender. + :param port: Port on which to listen. Default: 30000 + """ + pass + + +class CameraOne: + """ + Class containing the options used to initialize the sl.CameraOne object. + + This class allows you to select multiple parameters for the sl.Camera such as the selected camera, resolution, depth mode, coordinate system, and units of measurement. + \n Once filled with the desired options, it should be passed to the sl.Camera.open() method. + + .. code-block:: text + + + import pyzed.sl as sl + + def main() : + zed = sl.CameraOne() # Create a ZED camera object + + init_params = sl.InitParametersOne() # Set initial parameters + init_params.sdk_verbose = 0 # Disable verbose mode + + # Use the camera in LIVE mode + init_params.camera_resolution = sl.RESOLUTION.HD1080 # Use HD1080 video mode + init_params.camera_fps = 30 # Set fps at 30 + + # Or use the camera in SVO (offline) mode + #init_params.set_from_svo_file("xxxx.svo") + + # Or use the camera in STREAM mode + #init_params.set_from_stream("192.168.1.12", 30000) + + # Other parameters are left to their default values + + # Open the camera + err = zed.open(init_params) + if err != sl.ERROR_CODE.SUCCESS: + exit(-1) + + # Close the camera + zed.close() + return 0 + + if __name__ == "__main__" : + main() + + + + With its default values, it opens the camera in live mode at sl.RESOLUTION.HD720 + \n You can customize it to fit your application. + .. note:: + The parameters can also be saved and reloaded using its save() and load() methods. + """ + def __init__(self, *args, **kwargs) -> None: ... + + def close(self) -> None: + """ + Close an opened camera. + + If open() has been called, this method will close the connection to the camera (or the SVO file) and free the corresponding memory. + + If open() wasn't called or failed, this method won't have any effect. + + .. note:: + If an asynchronous task is running within the Camera object, like save_area_map(), this method will wait for its completion. + + .. note:: + To apply a new InitParametersOne, you will need to close the camera first and then open it again with the new InitParameters values. + + .. warning:: Therefore you need to make sure to delete your GPU sl.Mat objects before the context is destroyed. + """ + pass + + def open(self, py_init : InitParametersOne = InitParametersOne()) -> ERROR_CODE: + """ + Opens the ZED camera from the provided InitParametersOne. + The method will also check the hardware requirements and run a self-calibration. + :param py_init: A structure containing all the initial parameters. Default: a preset of InitParametersOne. + :return: An error code giving information about the internal process. If ERROR_CODE.SUCCESS is returned, the camera is ready to use. Every other code indicates an error and the program should be stopped. + + Here is the proper way to call this function: + + .. code-block:: text + + zed = sl.CameraOne() # Create a ZED camera object + + init_params = sl.InitParametersOne() # Set configuration parameters + init_params.camera_resolution = sl.RESOLUTION.HD720 # Use HD720 video mode + init_params.camera_fps = 60 # Set fps at 60 + + # Open the camera + err = zed.open(init_params) + if (err != sl.ERROR_CODE.SUCCESS) : + print(repr(err)) # Display the error + exit(-1) + + + .. note:: + If you are having issues opening a camera, the diagnostic tool provided in the SDK can help you identify to problems. + + - **Windows:** C:\\Program Files (x86)\\ZED SDK\\tools\\ZED Diagnostic.exe + - **Linux:** /usr/local/zed/tools/ZED Diagnostic + .. note:: + If this method is called on an already opened camera, close() will be called. + """ + return ERROR_CODE() + + def is_opened(self) -> bool: + """ + Reports if the camera has been successfully opened. + It has the same behavior as checking if open() returns ERROR_CODE.SUCCESS. + :return: True if the ZED camera is already setup, otherwise false. + """ + return bool() + + def grab(self) -> ERROR_CODE: + """ + This method will grab the latest images from the camera, rectify them, and compute the retrieve_measure() "measurements" based on the RuntimeParameters provided (depth, point cloud, tracking, etc.) + + As measures are created in this method, its execution can last a few milliseconds, depending on your parameters and your hardware. + \n The exact duration will mostly depend on the following parameters: + + - InitParametersOne.camera_resolution : Lower resolutions are faster to compute. + + This method is meant to be called frequently in the main loop of your application. + .. note:: + Since ZED SDK 3.0, this method is blocking. It means that grab() will wait until a new frame is detected and available. + + .. note:: + If no new frames is available until timeout is reached, grab() will return ERROR_CODE.CAMERA_NOT_DETECTED since the camera has probably been disconnected. + + + :return: ERROR_CODE.SUCCESS means that no problem was encountered. + .. note:: + Returned errors can be displayed using ``str()``. + + + .. code-block:: text + + image = sl.Mat() + while True: + # Grab an image + if zed.grab() == sl.ERROR_CODE.SUCCESS: # A new image is available if grab() returns SUCCESS + zed.retrieve_image(image) # Get the left image + # Use the image for your application + """ + return ERROR_CODE() + + def retrieve_image(self, py_mat, view = VIEW.LEFT, mem_type = MEM.CPU, resolution = Resolution(0, 0)) -> ERROR_CODE: + """ + Retrieves images from the camera (or SVO file). + + Multiple images are available along with a view of various measures for display purposes. + \n Available images and views are listed here. + \n As an example, VIEW.DEPTH can be used to get a gray-scale version of the depth map, but the actual depth values can be retrieved using retrieve_measure() . + \n + \n **Pixels** + \n Most VIEW modes output image with 4 channels as BGRA (Blue, Green, Red, Alpha), for more information see enum VIEW + \n + \n **Memory** + \n By default, images are copied from GPU memory to CPU memory (RAM) when this function is called. + \n If your application can use GPU images, using the **type** parameter can increase performance by avoiding this copy. + \n If the provided sl.Mat object is already allocated and matches the requested image format, memory won't be re-allocated. + \n + \n **Image size** + \n By default, images are returned in the resolution provided by Resolution "get_camera_information().camera_configuration.resolution". + \n However, you can request custom resolutions. For example, requesting a smaller image can help you speed up your application. + .. warning:: A sl.Mat resolution higher than the camera resolution **cannot** be requested. + + :param py_mat: The sl.Mat to store the image. (Direction: out) + :param view: Defines the image you want (see VIEW). Default: VIEW.LEFT. (Direction: in) + :param mem_type: Defines on which memory the image should be allocated. Default: MEM.CPU (you cannot change this default value). (Direction: in) + :param resolution: If specified, defines the Resolution of the output sl.Mat. If set to Resolution "Resolution(0,0)", the camera resolution will be taken. Default: (0,0). (Direction: in) + :return: ERROR_CODE.SUCCESS if the method succeeded. + :return: ERROR_CODE.INVALID_FUNCTION_PARAMETERS if the view mode requires a module not enabled (VIEW.DEPTH with DEPTH_MODE.NONE for example). + :return: ERROR_CODE.FAILURE if another error occurred. + + .. note:: + As this method retrieves the images grabbed by the grab() method, it should be called afterward. + + + .. code-block:: text + + # create sl.Mat objects to store the images + left_image = sl.Mat() + while True: + # Grab an image + if zed.grab() == sl.ERROR_CODE.SUCCESS: # A new image is available if grab() returns SUCCESS + zed.retrieve_image(left_image, sl.VIEW.LEFT) # Get the rectified left image + + # Display the center pixel colors + err, left_center = left_image.get_value(left_image.get_width() / 2, left_image.get_height() / 2) + if err == sl.ERROR_CODE.SUCCESS: + print("left_image center pixel R:", int(left_center[0]), " G:", int(left_center[1]), " B:", int(left_center[2])) + else: + print("error:", err) + """ + return ERROR_CODE() + + def set_svo_position(self, frame_number: int) -> None: + """ + Sets the playback cursor to the desired frame number in the SVO file. + + This method allows you to move around within a played-back SVO file. After calling, the next call to grab() will read the provided frame number. + + :param frame_number: The number of the desired frame to be decoded. + + .. note:: + The method works only if the camera is open in SVO playback mode. + + + .. code-block:: text + + + import pyzed.sl as sl + + def main(): + # Create a ZED camera object + zed = sl.CameraOne() + + # Set configuration parameters + init_params = sl.InitParametersOne() + init_params.set_from_svo_file("path/to/my/file.svo") + + # Open the camera + err = zed.open(init_params) + if err != sl.ERROR_CODE.SUCCESS: + print(repr(err)) + exit(-1) + + # Loop between frames 0 and 50 + left_image = sl.Mat() + while zed.get_svo_position() < zed.get_svo_number_of_frames() - 1: + + print("Current frame: ", zed.get_svo_position()) + + # Loop if we reached frame 50 + if zed.get_svo_position() == 50: + zed.set_svo_position(0) + + # Grab an image + if zed.grab() == sl.ERROR_CODE.SUCCESS: + zed.retrieve_image(left_image, sl.VIEW.LEFT) # Get the rectified left image + + # Use the image in your application + + # Close the Camera + zed.close() + return 0 + + if __name__ == "__main__" : + main() + + """ + pass + + def get_svo_position(self) -> int: + """ + Returns the current playback position in the SVO file. + + The position corresponds to the number of frames already read from the SVO file, starting from 0 to n. + + Each grab() call increases this value by one (except when using InitParametersOne.svo_real_time_mode). + :return: The current frame position in the SVO file. -1 if the SDK is not reading an SVO. + + .. note:: + The method works only if the camera is open in SVO playback mode. + + + See set_svo_position() for an example. + """ + return int() + + def get_svo_number_of_frames(self) -> int: + """ + Returns the number of frames in the SVO file. + + :return: The total number of frames in the SVO file. -1 if the SDK is not reading a SVO. + + The method works only if the camera is open in SVO playback mode. + """ + return int() + + def ingest_data_into_svo(self, data: SVOData) -> ERROR_CODE: + """ + ingest a SVOData in the SVO file. + + :return: An error code stating the success, or not. + + The method works only if the camera is open in SVO recording mode. + """ + return ERROR_CODE() + + def get_svo_data_keys(self) -> list: + """ + Get the external channels that can be retrieved from the SVO file. + + :return: a list of keys + + The method works only if the camera is open in SVO playback mode. + """ + return [] + + def retrieve_svo_data(self, key: str, data: dict, ts_begin: Timestamp, ts_end: Timestamp) -> ERROR_CODE: + """ + retrieve SVO datas from the SVO file at the given channel key and in the given timestamp range. + + :return: An error code stating the success, or not. + :param key: The channel key. + :param data: The dict to be filled with SVOData objects, with timestamps as keys. + :param ts_begin: The beginning of the range. + :param ts_end: The end of the range. + + The method works only if the camera is open in SVO playback mode. + """ + return ERROR_CODE() + + def set_camera_settings(self, settings: VIDEO_SETTINGS, value = -1) -> ERROR_CODE: + """ + retrieve SVO datas from the SVO file at the given channel key and in the given timestamp range. + + :return: An error code stating the success, or not. + :param key: The channel key. + :param data: The dict to be filled with SVOData objects, with timestamps as keys. + :param ts_begin: The beginning of the range. + :param ts_end: The end of the range. + + The method works only if the camera is open in SVO playback mode. + """ + return ERROR_CODE() + + def set_camera_settings_range(self, settings: VIDEO_SETTINGS, value_min = -1, value_max = -1) -> ERROR_CODE: + """ + Sets the value of the requested VIDEO_SETTINGS "camera setting" that supports two values (min/max). + + This method only works with the following VIDEO_SETTINGS: + - sl.VIDEO_SETTINGS.AUTO_EXPOSURE_TIME_RANGE + - sl.VIDEO_SETTINGS.AUTO_ANALOG_GAIN_RANGE + - sl.VIDEO_SETTINGS.AUTO_DIGITAL_GAIN_RANGE + + :param settings: The setting to be set. + :param min: The minimum value that can be reached (-1 or 0 gives full range). + :param max: The maximum value that can be reached (-1 or 0 gives full range). + :return: ERROR_CODE to indicate if the method was successful. + + .. warning:: If VIDEO_SETTINGS settings is not supported or min >= max, it will return ERROR_CODE.INVALID_FUNCTION_PARAMETERS. + .. note:: + The method works only if the camera is open in LIVE or STREAM mode. + + + .. code-block:: text + + # For ZED X based product, set the automatic exposure from 2ms to 5ms. Expected exposure time cannot go beyond those values + zed.set_camera_settings_range(sl.VIDEO_SETTINGS.AEC_RANGE, 2000, 5000); + """ + return ERROR_CODE() + + def set_camera_settings_roi(self, settings: VIDEO_SETTINGS, roi: Rect, reset = False) -> ERROR_CODE: + """ + Overloaded method for VIDEO_SETTINGS.AEC_AGC_ROI which takes a Rect as parameter. + + :param settings: Must be set at VIDEO_SETTINGS.AEC_AGC_ROI, otherwise the method will have no impact. + :param roi: Rect that defines the target to be applied for AEC/AGC computation. Must be given according to camera resolution. + :param eye: SIDE on which to be applied for AEC/AGC computation. Default: SIDE.BOTH + :param reset: Cancel the manual ROI and reset it to the full image. Default: False + + .. note:: + The method works only if the camera is open in LIVE or STREAM mode. + + + .. code-block:: text + + roi = sl.Rect(42, 56, 120, 15) + zed.set_camera_settings_roi(sl.VIDEO_SETTINGS.AEC_AGC_ROI, roi, sl.SIDE.BOTH) + + """ + return ERROR_CODE() + + def get_camera_settings(self, setting: VIDEO_SETTINGS) -> tuple[ERROR_CODE, int]: + """ + Returns the current value of the requested VIDEO_SETTINGS "camera setting" (gain, brightness, hue, exposure, etc.). + + Possible values (range) of each setting are available here. + + :param setting: The requested setting. + :return: ERROR_CODE to indicate if the method was successful. + :return: The current value for the corresponding setting. + + .. code-block:: text + + err, gain = zed.get_camera_settings(sl.VIDEO_SETTINGS.GAIN) + if err == sl.ERROR_CODE.SUCCESS: + print("Current gain value:", gain) + else: + print("error:", err) + + + .. note:: + The method works only if the camera is open in LIVE or STREAM mode. + + .. note:: + Settings are not exported in the SVO file format. + """ + return tuple[ERROR_CODE, int]() + + def get_camera_settings_range(self, setting: VIDEO_SETTINGS) -> tuple[ERROR_CODE, int, int]: + """ + Returns the values of the requested settings for VIDEO_SETTINGS that supports two values (min/max). + + This method only works with the following VIDEO_SETTINGS: + - sl.VIDEO_SETTINGS.AUTO_EXPOSURE_TIME_RANGE + - sl.VIDEO_SETTINGS.AUTO_ANALOG_GAIN_RANGE + - sl.VIDEO_SETTINGS.AUTO_DIGITAL_GAIN_RANGE + + Possible values (range) of each setting are available here. + :param setting: The requested setting. + :return: ERROR_CODE to indicate if the method was successful. + :return: The current value of the minimum for the corresponding setting. + :return: The current value of the maximum for the corresponding setting. + + .. code-block:: text + + err, aec_range_min, aec_range_max = zed.get_camera_settings(sl.VIDEO_SETTINGS.AUTO_EXPOSURE_TIME_RANGE) + if err == sl.ERROR_CODE.SUCCESS: + print("Current AUTO_EXPOSURE_TIME_RANGE range values ==> min:", aec_range_min, "max:", aec_range_max) + else: + print("error:", err) + + + .. note:: + Works only with ZED X that supports low-level controls + """ + return tuple[ERROR_CODE, int, int]() + + def get_camera_settings_roi(self, setting: VIDEO_SETTINGS, roi: Rect) -> ERROR_CODE: + """ + Returns the current value of the currently used ROI for the camera setting AEC_AGC_ROI. + + :param setting: Must be set at VIDEO_SETTINGS.AEC_AGC_ROI, otherwise the method will have no impact. (Direction: in) + :param roi: Roi that will be filled. (Direction: out) + :param eye: The requested side. Default: SIDE.BOTH (Direction: in) + :return: ERROR_CODE to indicate if the method was successful. + + .. code-block:: text + + roi = sl.Rect() + err = zed.get_camera_settings_roi(sl.VIDEO_SETTINGS.AEC_AGC_ROI, roi, sl.SIDE.BOTH) + print("Current ROI for AEC_AGC: " + str(roi.x) + " " + str(roi.y)+ " " + str(roi.width) + " " + str(roi.height)) + + + .. note:: + Works only if the camera is open in LIVE or STREAM mode with VIDEO_SETTINGS.AEC_AGC_ROI. + + .. note:: + It will return ERROR_CODE.INVALID_FUNCTION_CALL or ERROR_CODE.INVALID_FUNCTION_PARAMETERS otherwise. + """ + return ERROR_CODE() + + def is_camera_setting_supported(self, setting: VIDEO_SETTINGS) -> bool: + """ + Returns if the video setting is supported by the camera or not + + :param setting: the video setting to test (Direction: in) + :return: True if the VIDEO_SETTINGS is supported by the camera, False otherwise + """ + return bool() + + def get_current_fps(self) -> float: + """ + Returns the current framerate at which the grab() method is successfully called. + + The returned value is based on the difference of camera get_timestamp() "timestamps" between two successful grab() calls. + + :return: The current SDK framerate + + .. warning:: The returned framerate (number of images grabbed per second) can be lower than InitParametersOne.camera_fps if the grab() function runs slower than the image stream or is called too often. + + .. code-block:: text + + current_fps = zed.get_current_fps() + print("Current framerate: ", current_fps) + """ + return float() + + def get_timestamp(self, time_reference: TIME_REFERENCE) -> Timestamp: + """ + Returns the timestamp in the requested TIME_REFERENCE. + + - When requesting the TIME_REFERENCE.IMAGE timestamp, the UNIX nanosecond timestamp of the latest grab() "grabbed" image will be returned. + \n This value corresponds to the time at which the entire image was available in the PC memory. As such, it ignores the communication time that corresponds to 2 or 3 frame-time based on the fps (ex: 33.3ms to 50ms at 60fps). + + - When requesting the TIME_REFERENCE.CURRENT timestamp, the current UNIX nanosecond timestamp is returned. + + This function can also be used when playing back an SVO file. + + :param time_reference: The selected TIME_REFERENCE. + :return: The Timestamp in nanosecond. 0 if not available (SVO file without compression). + + .. note:: + As this function returns UNIX timestamps, the reference it uses is common across several Camera instances. + + \n This can help to organized the grabbed images in a multi-camera application. + + .. code-block:: text + + last_image_timestamp = zed.get_timestamp(sl.TIME_REFERENCE.IMAGE) + current_timestamp = zed.get_timestamp(sl.TIME_REFERENCE.CURRENT) + print("Latest image timestamp: ", last_image_timestamp.get_nanoseconds(), "ns from Epoch.") + print("Current timestamp: ", current_timestamp.get_nanoseconds(), "ns from Epoch.") + """ + return Timestamp() + + def get_frame_dropped_count(self) -> int: + """ + Returns the number of frames dropped since grab() was called for the first time. + + A dropped frame corresponds to a frame that never made it to the grab method. + \n This can happen if two frames were extracted from the camera when grab() is called. The older frame will be dropped so as to always use the latest (which minimizes latency). + + :return: The number of frames dropped since the first grab() call. + """ + return int() + + def get_camera_information(self, resizer = Resolution(0, 0)) -> CameraOneInformation: + """ + Returns the number of frames dropped since grab() was called for the first time. + + A dropped frame corresponds to a frame that never made it to the grab method. + \n This can happen if two frames were extracted from the camera when grab() is called. The older frame will be dropped so as to always use the latest (which minimizes latency). + + :return: The number of frames dropped since the first grab() call. + """ + return CameraOneInformation() + + def get_init_parameters(self) -> InitParametersOne: + """ + Returns the InitParametersOne associated with the Camera object. + It corresponds to the structure given as argument to open() method. + + :return: InitParametersOne containing the parameters used to initialize the Camera object. + """ + return InitParametersOne() + + def get_streaming_parameters(self) -> StreamingParameters: + """ + Returns the StreamingParameters used. + + It corresponds to the structure given as argument to the enable_streaming() method. + + :return: StreamingParameters containing the parameters used for streaming initialization. + """ + return StreamingParameters() + + def get_sensors_data(self, py_sensor_data, time_reference = TIME_REFERENCE.CURRENT) -> ERROR_CODE: + """ + Retrieves the SensorsData (IMU, magnetometer, barometer) at a specific time reference. + + - Calling get_sensors_data with TIME_REFERENCE.CURRENT gives you the latest sensors data received. Getting all the data requires to call this method at 800Hz in a thread. + - Calling get_sensors_data with TIME_REFERENCE.IMAGE gives you the sensors data at the time of the latest image grab() "grabbed". + + SensorsData object contains the previous IMUData structure that was used in ZED SDK v2.X: + \n For IMU data, the values are provided in 2 ways : + +
  • **Time-fused** pose estimation that can be accessed using: + * IMUData.get_pose "data.get_imu_data().get_pose()" +
  • +
  • **Raw values** from the IMU sensor: + + * IMUData.get_angular_velocity "data.get_imu_data().get_angular_velocity()", corresponding to the gyroscope + * IMUData.get_linear_acceleration "data.get_imu_data().get_linear_acceleration()", corresponding to the accelerometer + both the gyroscope and accelerometer are synchronized. +
  • + + + The delta time between previous and current values can be calculated using data.imu.timestamp + + .. note:: + The IMU quaternion (fused data) is given in the specified COORDINATE_SYSTEM of InitParametersOne. + + + :param py_sensor_data: The SensorsData variable to store the data. (Direction: out) + :param reference_frame: Defines the reference from which you want the data to be expressed. Default: REFERENCE_FRAME.WORLD. (Direction: in) + :return: ERROR_CODE.SUCCESS if sensors data have been extracted. + :return: ERROR_CODE.SENSORS_NOT_AVAILABLE if the camera model is a MODEL.ZED. + :return: ERROR_CODE.MOTION_SENSORS_REQUIRED if the camera model is correct but the sensors module is not opened. + :return: ERROR_CODE.INVALID_FUNCTION_PARAMETERS if the **reference_time** is not valid. See Warning. + + .. warning:: In SVO reading mode, the TIME_REFERENCE.CURRENT is currently not available (yielding ERROR_CODE.INVALID_FUNCTION_PARAMETERS. + .. warning:: Only the quaternion data and barometer data (if available) at TIME_REFERENCE.IMAGE are available. Other values will be set to 0. + """ + return ERROR_CODE() + + def get_sensors_data_batch(self, py_sensor_data) -> ERROR_CODE: + """ + Retrieves all SensorsData (IMU only) associated to most recent grabbed frame in the specified COORDINATE_SYSTEM of InitParameters. + + For IMU data, the values are provided in 2 ways: + +
  • **Time-fused** pose estimation that can be accessed using: + * IMUData.get_pose "data.get_imu_data().get_pose()" +
  • +
  • **Raw values** from the IMU sensor: + + * IMUData.get_angular_velocity "data.get_imu_data().get_angular_velocity()", corresponding to the gyroscope + * IMUData.get_linear_acceleration "data.get_imu_data().get_linear_acceleration()", corresponding to the accelerometer + both the gyroscope and accelerometer are synchronized. +
  • + + + The delta time between previous and current values can be calculated using data.imu.timestamp + + :param py_sensor_data: The SensorsData list to store the data. (Direction: out) + :return: ERROR_CODE.SUCCESS if sensors data have been extracted. + :return: ERROR_CODE.SENSORS_NOT_AVAILABLE if the camera model is a MODEL.ZED. + :return: ERROR_CODE.MOTION_SENSORS_REQUIRED if the camera model is correct but the sensors module is not opened. + :return: ERROR_CODE.INVALID_FUNCTION_PARAMETERS if the **reference_time** is not valid. See Warning. + + .. code-block:: text + + if zed.grab() == sl.ERROR_CODE.SUCCESS: + sensors_data = [] + if (zed.get_sensors_data_batch(sensors_data) == sl.ERROR_CODE.SUCCESS): + for data in sensors_data: + print("IMU data: ", data.imu.get_angular_velocity(), data.imu.get_linear_acceleration()) + print("IMU pose: ", data.imu.get_pose().get_translation()) + print("IMU orientation: ", data.imu.get_orientation().get()) + """ + return ERROR_CODE() + + def enable_streaming(self, streaming_parameters = StreamingParameters()) -> ERROR_CODE: + """ + Creates a streaming pipeline. + + :param streaming_parameters: A structure containing all the specific parameters for the streaming. Default: a reset of StreamingParameters . + :return: ERROR_CODE.SUCCESS if the streaming was successfully started. + :return: ERROR_CODE.INVALID_FUNCTION_CALL if open() was not successfully called before. + :return: ERROR_CODE.FAILURE if streaming RTSP protocol was not able to start. + :return: ERROR_CODE.NO_GPU_COMPATIBLE if the streaming codec is not supported (in this case, use H264 codec which is supported on all NVIDIA GPU the ZED SDK supports). + + .. code-block:: text + + import pyzed.sl as sl + + def main() : + # Create a ZED camera object + zed = sl.CameraOneOne() + + # Set initial parameters + init_params = sl.InitParametersOne() + init_params.camera_resolution = sl.RESOLUTION.HD720 # Use HD720 video mode (default fps: 60) + + # Open the camera + err = zed.open(init_params) + if err != sl.ERROR_CODE.SUCCESS : + print(repr(err)) + exit(-1) + + # Enable streaming + stream_params = sl.StreamingParameters() + stream_params.port = 30000 + stream_params.bitrate = 8000 + err = zed.enable_streaming(stream_params) + if err != sl.ERROR_CODE.SUCCESS : + print(repr(err)) + exit(-1) + + # Grab data during 500 frames + i = 0 + while i < 500 : + if zed.grab() == sl.ERROR_CODE.SUCCESS : + i = i+1 + + zed.disable_streaming() + zed.close() + return 0 + + if __name__ == "__main__" : + main() + """ + return ERROR_CODE() + + def disable_streaming(self) -> None: + """ + Disables the streaming initiated by enable_streaming(). + .. note:: + This method will automatically be called by close() if enable_streaming() was called. + + + See enable_streaming() for an example. + """ + pass + + def is_streaming_enabled(self) -> bool: + """ + Tells if the streaming is running. + :return: True if the stream is running, False otherwise. + """ + return bool() + + def enable_recording(self, record: RecordingParameters) -> ERROR_CODE: + """ + Creates an SVO file to be filled by enable_recording() and disable_recording(). + + \n SVO files are custom video files containing the un-rectified images from the camera along with some meta-data like timestamps or IMU orientation (if applicable). + \n They can be used to simulate a live ZED and test a sequence with various SDK parameters. + \n Depending on the application, various compression modes are available. See SVO_COMPRESSION_MODE. + + :param record: A structure containing all the specific parameters for the recording such as filename and compression mode. Default: a reset of RecordingParameters . + :return: An ERROR_CODE that defines if the SVO file was successfully created and can be filled with images. + + .. warning:: This method can be called multiple times during a camera lifetime, but if **video_filename** is already existing, the file will be erased. + + + .. code-block:: text + + import pyzed.sl as sl + + def main() : + # Create a ZED camera object + zed = sl.CameraOneOne() + # Set initial parameters + init_params = sl.InitParametersOne() + init_params.camera_resolution = sl.RESOLUTION.HD720 # Use HD720 video mode (default fps: 60) + init_params.coordinate_units = sl.UNIT.METER # Set units in meters + # Open the camera + err = zed.open(init_params) + if (err != sl.ERROR_CODE.SUCCESS): + print(repr(err)) + exit(-1) + + # Enable video recording + record_params = sl.RecordingParameters("myVideoFile.svo") + err = zed.enable_recording(record_params) + if (err != sl.ERROR_CODE.SUCCESS): + print(repr(err)) + exit(-1) + + # Grab data during 500 frames + i = 0 + while i < 500 : + # Grab a new frame + if zed.grab() == sl.ERROR_CODE.SUCCESS: + # Record the grabbed frame in the video file + i = i + 1 + + zed.disable_recording() + print("Video has been saved ...") + zed.close() + return 0 + + if __name__ == "__main__" : + main() + """ + return ERROR_CODE() + + def disable_recording(self) -> None: + """ + Disables the recording initiated by enable_recording() and closes the generated file. + + .. note:: + This method will automatically be called by close() if enable_recording() was called. + + + See enable_recording() for an example. + """ + pass + + def get_recording_status(self) -> RecordingStatus: + """ + Get the recording information. + :return: The recording state structure. For more details, see RecordingStatus. + """ + return RecordingStatus() + + def pause_recording(self, value = True) -> None: + """ + Pauses or resumes the recording. + :param status: If True, the recording is paused. If False, the recording is resumed. + """ + pass + + def get_device_list() -> list[DeviceProperties]: + """ + List all the connected devices with their associated information. + + This method lists all the cameras available and provides their serial number, models and other information. + :return: The device properties for each connected camera. + """ + return list[DeviceProperties]() + + def reboot(sn : int, full_reboot: bool =True) -> ERROR_CODE: + """ + Performs a hardware reset of the ZED 2 and the ZED 2i. + + :param sn: Serial number of the camera to reset, or 0 to reset the first camera detected. + :param full_reboot: Perform a full reboot (sensors and video modules) if True, otherwise only the video module will be rebooted. + :return: ERROR_CODE "ERROR_CODE::SUCCESS" if everything went fine. + :return: ERROR_CODE "ERROR_CODE::CAMERA_NOT_DETECTED" if no camera was detected. + :return: ERROR_CODE "ERROR_CODE::FAILURE" otherwise. + + .. note:: + This method only works for ZED 2, ZED 2i, and newer camera models. + + + .. warning:: This method will invalidate any sl.Camera object, since the device is rebooting. + """ + return ERROR_CODE() + + def reboot_from_input(input_type: INPUT_TYPE) -> ERROR_CODE: + """ + Performs a hardware reset of all devices matching the InputType. + + :param input_type: Input type of the devices to reset. + :return: ERROR_CODE "ERROR_CODE::SUCCESS" if everything went fine. + :return: ERROR_CODE "ERROR_CODE::CAMERA_NOT_DETECTED" if no camera was detected. + :return: ERROR_CODE "ERROR_CODE::FAILURE" otherwise. + :return: ERROR_CODE "ERROR_CODE::INVALID_FUNCTION_PARAMETERS" for SVOs and streams. + + .. warning:: This method will invalidate any sl.Camera object, since the device is rebooting. + """ + return ERROR_CODE() + + diff --git a/py_workspace/libs/pyzed_pkg/pyproject.toml b/py_workspace/libs/pyzed_pkg/pyproject.toml new file mode 100644 index 0000000..54d6b34 --- /dev/null +++ b/py_workspace/libs/pyzed_pkg/pyproject.toml @@ -0,0 +1,17 @@ +[project] +name = "pyzed" +version = "0.1.0" +description = "Wrapper for ZED SDK" +requires-python = ">=3.12" +dependencies = [] + +[build-system] +requires = ["setuptools", "wheel"] +build-backend = "setuptools.build_meta" + +[tool.setuptools] +packages = ["pyzed"] + +[tool.setuptools.package-data] +pyzed = ["*.so", "*.pyi"] + diff --git a/py_workspace/libs/pyzed_pkg/pyzed.egg-info/PKG-INFO b/py_workspace/libs/pyzed_pkg/pyzed.egg-info/PKG-INFO new file mode 100644 index 0000000..66b3012 --- /dev/null +++ b/py_workspace/libs/pyzed_pkg/pyzed.egg-info/PKG-INFO @@ -0,0 +1,5 @@ +Metadata-Version: 2.4 +Name: pyzed +Version: 0.1.0 +Summary: Wrapper for ZED SDK +Requires-Python: >=3.12 diff --git a/py_workspace/libs/pyzed_pkg/pyzed.egg-info/SOURCES.txt b/py_workspace/libs/pyzed_pkg/pyzed.egg-info/SOURCES.txt new file mode 100644 index 0000000..65dcf18 --- /dev/null +++ b/py_workspace/libs/pyzed_pkg/pyzed.egg-info/SOURCES.txt @@ -0,0 +1,8 @@ +pyproject.toml +pyzed/__init__.py +pyzed/sl.cpython-312-x86_64-linux-gnu.so +pyzed/sl.pyi +pyzed.egg-info/PKG-INFO +pyzed.egg-info/SOURCES.txt +pyzed.egg-info/dependency_links.txt +pyzed.egg-info/top_level.txt \ No newline at end of file diff --git a/py_workspace/libs/pyzed_pkg/pyzed.egg-info/dependency_links.txt b/py_workspace/libs/pyzed_pkg/pyzed.egg-info/dependency_links.txt new file mode 100644 index 0000000..8b13789 --- /dev/null +++ b/py_workspace/libs/pyzed_pkg/pyzed.egg-info/dependency_links.txt @@ -0,0 +1 @@ + diff --git a/py_workspace/libs/pyzed_pkg/pyzed.egg-info/top_level.txt b/py_workspace/libs/pyzed_pkg/pyzed.egg-info/top_level.txt new file mode 100644 index 0000000..247f07b --- /dev/null +++ b/py_workspace/libs/pyzed_pkg/pyzed.egg-info/top_level.txt @@ -0,0 +1 @@ +pyzed diff --git a/py_workspace/libs/pyzed_pkg/pyzed/__init__.py b/py_workspace/libs/pyzed_pkg/pyzed/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/py_workspace/libs/pyzed_pkg/pyzed/__pycache__/__init__.cpython-312.pyc b/py_workspace/libs/pyzed_pkg/pyzed/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ae21d2dd138e43d7e3db17e996c0724308db942c GIT binary patch literal 154 zcmX@j%ge<81j5PrnIQTxh(HIQS%4zb87dhx8U0o=6fpsLpFwJVS?ZS-7wPBZCnx6U z=VT`77gUyHwtE1ma>4<0CU8BV!RWkOctG$s`{D literal 0 HcmV?d00001 diff --git a/py_workspace/libs/pyzed_pkg/pyzed/build/lib/pyzed/__init__.py b/py_workspace/libs/pyzed_pkg/pyzed/build/lib/pyzed/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/py_workspace/libs/pyzed_pkg/pyzed/build/lib/pyzed/sl.pyi b/py_workspace/libs/pyzed_pkg/pyzed/build/lib/pyzed/sl.pyi new file mode 100644 index 0000000..33c405e --- /dev/null +++ b/py_workspace/libs/pyzed_pkg/pyzed/build/lib/pyzed/sl.pyi @@ -0,0 +1,14672 @@ +import enum +import numpy as np +from typing import List, Tuple, Dict, Optional, Union, Any, overload, Mapping, MutableMapping + +class Timestamp(): + """ + Structure representing timestamps with utilities. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def data_ns(self) -> int: + """ + Timestamp in nanoseconds. + """ + return int() + + @data_ns.setter + def data_ns(self, data_ns: Any) -> None: + pass + + def get_nanoseconds(self) -> int: + """ + Returns the timestamp in nanoseconds. + """ + return int() + + def get_microseconds(self) -> int: + """ + Returns the timestamp in microseconds. + """ + return int() + + def get_milliseconds(self) -> int: + """ + Returns the timestamp in milliseconds. + """ + return int() + + def get_seconds(self) -> int: + """ + Returns the timestamp in seconds. + """ + return int() + + def set_nanoseconds(self, t_ns: int) -> None: + """ + Sets the timestamp to a value in nanoseconds. + """ + pass + + def set_microseconds(self, t_us: int) -> None: + """ + Sets the timestamp to a value in microseconds. + """ + pass + + def set_milliseconds(self, t_ms: int) -> None: + """ + Sets the timestamp to a value in milliseconds. + """ + pass + + def set_seconds(self, t_s: int) -> None: + """ + Sets the timestamp to a value in seconds. + """ + pass + + +class ERROR_CODE(enum.Enum): + """ + Lists error codes in the ZED SDK. + + | Enumerator | | + |:---:|:---:| + | POTENTIAL_CALIBRATION_ISSUE | The camera has a potential calibration issue. | + | CONFIGURATION_FALLBACK | The operation could not proceed with the target configuration but did success with a fallback. | + | SENSORS_DATA_REQUIRED | The input data does not contains the high frequency sensors data, this is usually because it requires newer SVO/Streaming. In order to work this modules needs inertial data present in it input. | + | CORRUPTED_FRAME | The image could be corrupted, Enabled with the parameter InitParameters.enable_image_validity_check. + | CAMERA_REBOOTING | The camera is currently rebooting. | + | SUCCESS | Standard code for successful behavior. | + | FAILURE | Standard code for unsuccessful behavior. | + | NO_GPU_COMPATIBLE | No GPU found or CUDA capability of the device is not supported. | + | NOT_ENOUGH_GPU_MEMORY | Not enough GPU memory for this depth mode. Try a different mode (such as PERFORMANCE), or increase the minimum depth value (see InitParameters.depth_minimum_distance). | + | CAMERA_NOT_DETECTED | No camera was detected. | + | SENSORS_NOT_INITIALIZED | The MCU that controls the sensors module has an invalid serial number. You can try to recover it by launching the **ZED Diagnostic** tool from the command line with the option ``-r``. | + | SENSORS_NOT_AVAILABLE | A camera with sensor is detected but the sensors (IMU, barometer, ...) cannot be opened. Only the MODEL.ZED does not has sensors. Unplug/replug is required. | + | INVALID_RESOLUTION | In case of invalid resolution parameter, such as an upsize beyond the original image size in Camera.retrieve_image. | + | LOW_USB_BANDWIDTH | Insufficient bandwidth for the correct use of the camera. This issue can occur when you use multiple cameras or a USB 2.0 port. | + | CALIBRATION_FILE_NOT_AVAILABLE | The calibration file of the camera is not found on the host machine. Use **ZED Explorer** or **ZED Calibration** to download the factory calibration file. | + | INVALID_CALIBRATION_FILE | The calibration file is not valid. Try to download the factory calibration file or recalibrate your camera using **ZED Calibration**. | + | INVALID_SVO_FILE | The provided SVO file is not valid. | + | SVO_RECORDING_ERROR | An error occurred while trying to record an SVO (not enough free storage, invalid file, ...). | + | SVO_UNSUPPORTED_COMPRESSION | An SVO related error, occurs when NVIDIA based compression cannot be loaded. | + | END_OF_SVOFILE_REACHED | SVO end of file has been reached. No frame will be available until the SVO position is reset. | + | INVALID_COORDINATE_SYSTEM | The requested coordinate system is not available. | + | INVALID_FIRMWARE | The firmware of the camera is out of date. Update to the latest version. | + | INVALID_FUNCTION_PARAMETERS | Invalid parameters have been given for the function. | + | CUDA_ERROR | A CUDA error has been detected in the process, in sl.Camera.grab() or sl.Camera.retrieve_xxx() only. Activate verbose in sl.Camera.open() for more info. | + | CAMERA_NOT_INITIALIZED | The ZED SDK is not initialized. Probably a missing call to sl.Camera.open(). | + | NVIDIA_DRIVER_OUT_OF_DATE | Your NVIDIA driver is too old and not compatible with your current CUDA version. | + | INVALID_FUNCTION_CALL | The call of the function is not valid in the current context. Could be a missing call of sl.Camera.open(). | + | CORRUPTED_SDK_INSTALLATION | The ZED SDK was not able to load its dependencies or some assets are missing. Reinstall the ZED SDK or check for missing dependencies (cuDNN, TensorRT). | + | INCOMPATIBLE_SDK_VERSION | The installed ZED SDK is incompatible with the one used to compile the program. | + | INVALID_AREA_FILE | The given area file does not exist. Check the path. | + | INCOMPATIBLE_AREA_FILE | The area file does not contain enough data to be used or the sl.DEPTH_MODE used during the creation of the area file is different from the one currently set. | + | CAMERA_FAILED_TO_SETUP | Failed to open the camera at the proper resolution. Try another resolution or make sure that the UVC driver is properly installed. | + | CAMERA_DETECTION_ISSUE | Your camera can not be opened. Try replugging it to another port or flipping the USB-C connector (if there is one). | + | CANNOT_START_CAMERA_STREAM | Cannot start the camera stream. Make sure your camera is not already used by another process or blocked by firewall or antivirus. | + | NO_GPU_DETECTED | No GPU found. CUDA is unable to list it. Can be a driver/reboot issue. | + | PLANE_NOT_FOUND | Plane not found. Either no plane is detected in the scene, at the location or corresponding to the floor, or the floor plane doesn't match the prior given. | + | MODULE_NOT_COMPATIBLE_WITH_CAMERA | The module you try to use is not compatible with your camera sl.MODEL. Note: sl.MODEL.ZED does not has an IMU and does not support the AI modules. | + | MOTION_SENSORS_REQUIRED | The module needs the sensors to be enabled (see InitParameters.sensors_required). | + | MODULE_NOT_COMPATIBLE_WITH_CUDA_VERSION | The module needs a newer version of CUDA. | + """ + POTENTIAL_CALIBRATION_ISSUE = enum.auto() + CONFIGURATION_FALLBACK = enum.auto() + SENSORS_DATA_REQUIRED = enum.auto() + CORRUPTED_FRAME = enum.auto() + CAMERA_REBOOTING = enum.auto() + SUCCESS = enum.auto() + FAILURE = enum.auto() + NO_GPU_COMPATIBLE = enum.auto() + NOT_ENOUGH_GPU_MEMORY = enum.auto() + CAMERA_NOT_DETECTED = enum.auto() + SENSORS_NOT_INITIALIZED = enum.auto() + SENSORS_NOT_AVAILABLE = enum.auto() + INVALID_RESOLUTION = enum.auto() + LOW_USB_BANDWIDTH = enum.auto() + CALIBRATION_FILE_NOT_AVAILABLE = enum.auto() + INVALID_CALIBRATION_FILE = enum.auto() + INVALID_SVO_FILE = enum.auto() + SVO_RECORDING_ERROR = enum.auto() + END_OF_SVOFILE_REACHED = enum.auto() + SVO_UNSUPPORTED_COMPRESSION = enum.auto() + INVALID_COORDINATE_SYSTEM = enum.auto() + INVALID_FIRMWARE = enum.auto() + INVALID_FUNCTION_PARAMETERS = enum.auto() + CUDA_ERROR = enum.auto() + CAMERA_NOT_INITIALIZED = enum.auto() + NVIDIA_DRIVER_OUT_OF_DATE = enum.auto() + INVALID_FUNCTION_CALL = enum.auto() + CORRUPTED_SDK_INSTALLATION = enum.auto() + INCOMPATIBLE_SDK_VERSION = enum.auto() + INVALID_AREA_FILE = enum.auto() + INCOMPATIBLE_AREA_FILE = enum.auto() + CAMERA_FAILED_TO_SETUP = enum.auto() + CAMERA_DETECTION_ISSUE = enum.auto() + CANNOT_START_CAMERA_STREAM = enum.auto() + NO_GPU_DETECTED = enum.auto() + PLANE_NOT_FOUND = enum.auto() + MODULE_NOT_COMPATIBLE_WITH_CAMERA = enum.auto() + MOTION_SENSORS_REQUIRED = enum.auto() + MODULE_NOT_COMPATIBLE_WITH_CUDA_VERSION = enum.auto() + DRIVER_FAILURE = enum.auto() + LAST = enum.auto() + def __str__(self) -> None: + pass + + def __repr__(self) -> None: + pass + + def __lt__(self, other) -> None: + pass + + def __le__(self, other) -> None: + pass + + def __gt__(self, other) -> None: + pass + + def __ge__(self, other) -> None: + pass + + +def _initialize_error_codes() -> None: + """ + Lists error codes in the ZED SDK. + + | Enumerator | | + |:---:|:---:| + | POTENTIAL_CALIBRATION_ISSUE | The camera has a potential calibration issue. | + | CONFIGURATION_FALLBACK | The operation could not proceed with the target configuration but did success with a fallback. | + | SENSORS_DATA_REQUIRED | The input data does not contains the high frequency sensors data, this is usually because it requires newer SVO/Streaming. In order to work this modules needs inertial data present in it input. | + | CORRUPTED_FRAME | The image could be corrupted, Enabled with the parameter InitParameters.enable_image_validity_check. + | CAMERA_REBOOTING | The camera is currently rebooting. | + | SUCCESS | Standard code for successful behavior. | + | FAILURE | Standard code for unsuccessful behavior. | + | NO_GPU_COMPATIBLE | No GPU found or CUDA capability of the device is not supported. | + | NOT_ENOUGH_GPU_MEMORY | Not enough GPU memory for this depth mode. Try a different mode (such as PERFORMANCE), or increase the minimum depth value (see InitParameters.depth_minimum_distance). | + | CAMERA_NOT_DETECTED | No camera was detected. | + | SENSORS_NOT_INITIALIZED | The MCU that controls the sensors module has an invalid serial number. You can try to recover it by launching the **ZED Diagnostic** tool from the command line with the option ``-r``. | + | SENSORS_NOT_AVAILABLE | A camera with sensor is detected but the sensors (IMU, barometer, ...) cannot be opened. Only the MODEL.ZED does not has sensors. Unplug/replug is required. | + | INVALID_RESOLUTION | In case of invalid resolution parameter, such as an upsize beyond the original image size in Camera.retrieve_image. | + | LOW_USB_BANDWIDTH | Insufficient bandwidth for the correct use of the camera. This issue can occur when you use multiple cameras or a USB 2.0 port. | + | CALIBRATION_FILE_NOT_AVAILABLE | The calibration file of the camera is not found on the host machine. Use **ZED Explorer** or **ZED Calibration** to download the factory calibration file. | + | INVALID_CALIBRATION_FILE | The calibration file is not valid. Try to download the factory calibration file or recalibrate your camera using **ZED Calibration**. | + | INVALID_SVO_FILE | The provided SVO file is not valid. | + | SVO_RECORDING_ERROR | An error occurred while trying to record an SVO (not enough free storage, invalid file, ...). | + | SVO_UNSUPPORTED_COMPRESSION | An SVO related error, occurs when NVIDIA based compression cannot be loaded. | + | END_OF_SVOFILE_REACHED | SVO end of file has been reached. No frame will be available until the SVO position is reset. | + | INVALID_COORDINATE_SYSTEM | The requested coordinate system is not available. | + | INVALID_FIRMWARE | The firmware of the camera is out of date. Update to the latest version. | + | INVALID_FUNCTION_PARAMETERS | Invalid parameters have been given for the function. | + | CUDA_ERROR | A CUDA error has been detected in the process, in sl.Camera.grab() or sl.Camera.retrieve_xxx() only. Activate verbose in sl.Camera.open() for more info. | + | CAMERA_NOT_INITIALIZED | The ZED SDK is not initialized. Probably a missing call to sl.Camera.open(). | + | NVIDIA_DRIVER_OUT_OF_DATE | Your NVIDIA driver is too old and not compatible with your current CUDA version. | + | INVALID_FUNCTION_CALL | The call of the function is not valid in the current context. Could be a missing call of sl.Camera.open(). | + | CORRUPTED_SDK_INSTALLATION | The ZED SDK was not able to load its dependencies or some assets are missing. Reinstall the ZED SDK or check for missing dependencies (cuDNN, TensorRT). | + | INCOMPATIBLE_SDK_VERSION | The installed ZED SDK is incompatible with the one used to compile the program. | + | INVALID_AREA_FILE | The given area file does not exist. Check the path. | + | INCOMPATIBLE_AREA_FILE | The area file does not contain enough data to be used or the sl.DEPTH_MODE used during the creation of the area file is different from the one currently set. | + | CAMERA_FAILED_TO_SETUP | Failed to open the camera at the proper resolution. Try another resolution or make sure that the UVC driver is properly installed. | + | CAMERA_DETECTION_ISSUE | Your camera can not be opened. Try replugging it to another port or flipping the USB-C connector (if there is one). | + | CANNOT_START_CAMERA_STREAM | Cannot start the camera stream. Make sure your camera is not already used by another process or blocked by firewall or antivirus. | + | NO_GPU_DETECTED | No GPU found. CUDA is unable to list it. Can be a driver/reboot issue. | + | PLANE_NOT_FOUND | Plane not found. Either no plane is detected in the scene, at the location or corresponding to the floor, or the floor plane doesn't match the prior given. | + | MODULE_NOT_COMPATIBLE_WITH_CAMERA | The module you try to use is not compatible with your camera sl.MODEL. Note: sl.MODEL.ZED does not has an IMU and does not support the AI modules. | + | MOTION_SENSORS_REQUIRED | The module needs the sensors to be enabled (see InitParameters.sensors_required). | + | MODULE_NOT_COMPATIBLE_WITH_CUDA_VERSION | The module needs a newer version of CUDA. | + """ + pass + +class MODEL(enum.Enum): + """ + Lists ZED camera model. + + + | Enumerator | | + |:---:|:---:| + | ZED | ZED camera model | + | ZED_M | ZED Mini (ZED M) camera model | + | ZED2 | ZED 2 camera model | + | ZED2i | ZED 2i camera model | + | ZED_X | ZED X camera model | + | ZED_XM | ZED X Mini (ZED XM) camera model | + | ZED_X_HDR | ZED X HDR camera model | + | ZED_X_HDR_MINI | ZED X HDR Mini camera model | + | ZED_X_HDR_MAX | ZED X HDR Wide camera model | + | VIRTUAL_ZED_X | Virtual ZED X generated from 2 ZED X One | + | ZED_XONE_GS | ZED X One with global shutter AR0234 sensor | + | ZED_XONE_UHD | ZED X One with 4K rolling shutter IMX678 sensor | + | ZED_XONE_HDR | ZED X One HDR | + """ + ZED = enum.auto() + ZED_M = enum.auto() + ZED2 = enum.auto() + ZED2i = enum.auto() + ZED_X = enum.auto() + ZED_XM = enum.auto() + ZED_X_HDR = enum.auto() + ZED_X_HDR_MINI = enum.auto() + ZED_X_HDR_MAX = enum.auto() + VIRTUAL_ZED_X = enum.auto() + ZED_XONE_GS = enum.auto() + ZED_XONE_UHD = enum.auto() + ZED_XONE_HDR = enum.auto() + LAST = enum.auto() + def __str__(self) -> None: + pass + + def __repr__(self) -> None: + pass + + def __lt__(self, other) -> None: + pass + + def __le__(self, other) -> None: + pass + + def __gt__(self, other) -> None: + pass + + def __ge__(self, other) -> None: + pass + + +class INPUT_TYPE(enum.Enum): + """ + Lists available input types in the ZED SDK. + + + | Enumerator | | + |:---:|:---:| + | USB | USB input mode | + | SVO | SVO file input mode | + | STREAM | STREAM input mode (requires to use Camera.enable_streaming "enable_streaming()" / Camera.disable_streaming "disable_streaming()" on the "sender" side) | + | GMSL | GMSL input mode (only on NVIDIA Jetson) | + """ + USB = enum.auto() + SVO = enum.auto() + STREAM = enum.auto() + GMSL = enum.auto() + LAST = enum.auto() + def __lt__(self, other) -> None: + pass + + def __le__(self, other) -> None: + pass + + def __gt__(self, other) -> None: + pass + + def __ge__(self, other) -> None: + pass + + +class AI_MODELS(enum.Enum): + """ + Lists available AI models. + + | Enumerator | | + |:---:|:---:| + | MULTI_CLASS_DETECTION | Related to sl.OBJECT_DETECTION_MODEL.MULTI_CLASS_BOX_FAST | + | MULTI_CLASS_MEDIUM_DETECTION | Related to sl.OBJECT_DETECTION_MODEL.MULTI_CLASS_BOX_MEDIUM | + | MULTI_CLASS_ACCURATE_DETECTION | Related to sl.OBJECT_DETECTION_MODEL.MULTI_CLASS_BOX_ACCURATE | + | HUMAN_BODY_FAST_DETECTION | Related to sl.BODY_TRACKING_MODEL.HUMAN_BODY_FAST | + | HUMAN_BODY_MEDIUM_DETECTION | Related to sl.BODY_TRACKING_MODEL.HUMAN_BODY_MEDIUM | + | HUMAN_BODY_ACCURATE_DETECTION | Related to sl.BODY_TRACKING_MODEL.HUMAN_BODY_ACCURATE | + | HUMAN_BODY_38_FAST_DETECTION | Related to sl.BODY_TRACKING_MODEL.HUMAN_BODY_FAST | + | HUMAN_BODY_38_MEDIUM_DETECTION | Related to sl.BODY_TRACKING_MODEL.HUMAN_BODY_FAST | + | HUMAN_BODY_38_ACCURATE_DETECTION | Related to sl.BODY_TRACKING_MODEL.HUMAN_BODY_FAST | + | PERSON_HEAD_DETECTION | Related to sl.OBJECT_DETECTION_MODEL.PERSON_HEAD_BOX_FAST | + | PERSON_HEAD_ACCURATE_DETECTION | Related to sl.OBJECT_DETECTION_MODEL.PERSON_HEAD_BOX_ACCURATE | + | REID_ASSOCIATION | Related to sl.BatchParameters.enable | + | NEURAL_LIGHT_DEPTH | Related to sl.DEPTH_MODE.NEURAL_LIGHT_DEPTH | + | NEURAL_DEPTH | Related to sl.DEPTH_MODE.NEURAL | + | NEURAL_PLUS_DEPTH | Related to sl.DEPTH_MODE.NEURAL_PLUS_DEPTH | + """ + MULTI_CLASS_DETECTION = enum.auto() + MULTI_CLASS_MEDIUM_DETECTION = enum.auto() + MULTI_CLASS_ACCURATE_DETECTION = enum.auto() + HUMAN_BODY_FAST_DETECTION = enum.auto() + HUMAN_BODY_MEDIUM_DETECTION = enum.auto() + HUMAN_BODY_ACCURATE_DETECTION = enum.auto() + HUMAN_BODY_38_FAST_DETECTION = enum.auto() + HUMAN_BODY_38_MEDIUM_DETECTION = enum.auto() + HUMAN_BODY_38_ACCURATE_DETECTION = enum.auto() + PERSON_HEAD_DETECTION = enum.auto() + PERSON_HEAD_ACCURATE_DETECTION = enum.auto() + REID_ASSOCIATION = enum.auto() + NEURAL_LIGHT_DEPTH = enum.auto() + NEURAL_DEPTH = enum.auto() + NEURAL_PLUS_DEPTH = enum.auto() + LAST = enum.auto() + def __lt__(self, other) -> None: + pass + + def __le__(self, other) -> None: + pass + + def __gt__(self, other) -> None: + pass + + def __ge__(self, other) -> None: + pass + + +class OBJECT_DETECTION_MODEL(enum.Enum): + """ + Lists available models for the object detection module. + + + | Enumerator | | + |:---:|:---:| + | MULTI_CLASS_BOX_FAST | Any objects, bounding box based. | + | MULTI_CLASS_BOX_ACCURATE | Any objects, bounding box based, more accurate but slower than the base model. | + | MULTI_CLASS_BOX_MEDIUM | Any objects, bounding box based, compromise between accuracy and speed. | + | PERSON_HEAD_BOX_FAST | Bounding box detector specialized in person heads particularly well suited for crowded environments. The person localization is also improved. | + | PERSON_HEAD_BOX_ACCURATE | Bounding box detector specialized in person heads, particularly well suited for crowded environments. The person localization is also improved, more accurate but slower than the base model. | + | CUSTOM_BOX_OBJECTS | For external inference, using your own custom model and/or frameworks. This mode disables the internal inference engine, the 2D bounding box detection must be provided. | + """ + MULTI_CLASS_BOX_FAST = enum.auto() + MULTI_CLASS_BOX_MEDIUM = enum.auto() + MULTI_CLASS_BOX_ACCURATE = enum.auto() + PERSON_HEAD_BOX_FAST = enum.auto() + PERSON_HEAD_BOX_ACCURATE = enum.auto() + CUSTOM_BOX_OBJECTS = enum.auto() + CUSTOM_YOLOLIKE_BOX_OBJECTS = enum.auto() + LAST = enum.auto() + def __lt__(self, other) -> None: + pass + + def __le__(self, other) -> None: + pass + + def __gt__(self, other) -> None: + pass + + def __ge__(self, other) -> None: + pass + + +class BODY_TRACKING_MODEL(enum.Enum): + """ + Lists available models for the body tracking module. + + + | Enumerator | | + |:---:|:---:| + | HUMAN_BODY_FAST | Keypoints based, specific to human skeleton, real time performance even on Jetson or low end GPU cards. | + | HUMAN_BODY_ACCURATE | Keypoints based, specific to human skeleton, state of the art accuracy, requires powerful GPU. | + | HUMAN_BODY_MEDIUM | Keypoints based, specific to human skeleton, compromise between accuracy and speed. | + """ + HUMAN_BODY_FAST = enum.auto() + HUMAN_BODY_ACCURATE = enum.auto() + HUMAN_BODY_MEDIUM = enum.auto() + LAST = enum.auto() + def __lt__(self, other) -> None: + pass + + def __le__(self, other) -> None: + pass + + def __gt__(self, other) -> None: + pass + + def __ge__(self, other) -> None: + pass + + +class OBJECT_FILTERING_MODE(enum.Enum): + """ + Lists supported bounding box preprocessing. + + + | Enumerator | | + |:---:|:---:| + | NONE | The ZED SDK will not apply any preprocessing to the detected objects. | + | NMS3D | The ZED SDK will remove objects that are in the same 3D position as an already tracked object (independent of class id). | + | NMS3D_PER_CLASS | The ZED SDK will remove objects that are in the same 3D position as an already tracked object of the same class id. | + """ + NONE = enum.auto() + NMS3D = enum.auto() + NMS3D_PER_CLASS = enum.auto() + LAST = enum.auto() + def __lt__(self, other) -> None: + pass + + def __le__(self, other) -> None: + pass + + def __gt__(self, other) -> None: + pass + + def __ge__(self, other) -> None: + pass + + +class OBJECT_ACCELERATION_PRESET(enum.Enum): + """ + Lists supported presets for maximum acceleration allowed for a given tracked object. + + + | Enumerator | | + |:---:|:---:| + | DEFAULT | The ZED SDK will automatically determine the appropriate maximum acceleration. | + | LOW | Suitable for objects with relatively low maximum acceleration (e.g., a person walking). | + | MEDIUM | Suitable for objects with moderate maximum acceleration (e.g., a person running). | + | HIGH | Suitable for objects with high maximum acceleration (e.g., a car accelerating, a kicked sports ball). | + """ + DEFAULT = enum.auto() + LOW = enum.auto() + MEDIUM = enum.auto() + HIGH = enum.auto() + LAST = enum.auto() + def __lt__(self, other) -> None: + pass + + def __le__(self, other) -> None: + pass + + def __gt__(self, other) -> None: + pass + + def __ge__(self, other) -> None: + pass + + +class CAMERA_STATE(enum.Enum): + """ + Lists possible camera states. + + + | Enumerator | | + |:---:|:---:| + | AVAILABLE | The camera can be opened by the ZED SDK. | + | NOT_AVAILABLE | The camera is already opened and unavailable. | + """ + AVAILABLE = enum.auto() + NOT_AVAILABLE = enum.auto() + LAST = enum.auto() + def __str__(self) -> None: + pass + + def __repr__(self) -> None: + pass + + def __lt__(self, other) -> None: + pass + + def __le__(self, other) -> None: + pass + + def __gt__(self, other) -> None: + pass + + def __ge__(self, other) -> None: + pass + + +class SIDE(enum.Enum): + """ + Lists possible sides on which to get data from. + + | Enumerator | | + |:---:|:---:| + | LEFT | Left side only. | + | RIGHT | Right side only. | + | BOTH | Left and right side. | + """ + LEFT = enum.auto() + RIGHT = enum.auto() + BOTH = enum.auto() + def __lt__(self, other) -> None: + pass + + def __le__(self, other) -> None: + pass + + def __gt__(self, other) -> None: + pass + + def __ge__(self, other) -> None: + pass + + +class RESOLUTION(enum.Enum): + """ + Lists available resolutions. + .. note:: + The VGA resolution does not respect the 640*480 standard to better fit the camera sensor (672*376 is used). + + .. warning:: All resolutions are not available for every camera. + .. warning:: You can find the available resolutions for each camera in `our documentation `_. + + | Enumerator | | + |:---:|:---:| + | HD4K | 3856x2180 for imx678 mono | + | QHDPLUS | 3800x1800 | + | HD2K | 2208*1242 (x2) Available FPS: 15 | + | HD1080 | 1920*1080 (x2) Available FPS: 15, 30 | + | HD1200 | 1920*1200 (x2) Available FPS: 15, 30, 60 | + | HD1536 | 1920*1536 (x2) Available FPS: 30 | + | HD720 | 1280*720 (x2) Available FPS: 15, 30, 60 | + | SVGA | 960*600 (x2) Available FPS: 15, 30, 60, 120 | + | VGA | 672*376 (x2) Available FPS: 15, 30, 60, 100 | + | AUTO | Select the resolution compatible with the camera: * ZED X/X Mini: HD1200* other cameras: HD720 | + """ + HD4K = enum.auto() + QHDPLUS = enum.auto() + HD2K = enum.auto() + HD1080 = enum.auto() + HD1200 = enum.auto() + HD1536 = enum.auto() + HD720 = enum.auto() + SVGA = enum.auto() + VGA = enum.auto() + AUTO = enum.auto() + LAST = enum.auto() + def __lt__(self, other) -> None: + pass + + def __le__(self, other) -> None: + pass + + def __gt__(self, other) -> None: + pass + + def __ge__(self, other) -> None: + pass + + +def sleep_ms(time: int) -> None: + """ + Blocks the execution of the current thread for **time milliseconds. + :param time: Number of milliseconds to wait. + """ + pass + +def sleep_us(time: int) -> None: + """ + Blocks the execution of the current thread for **time microseconds. + :param time: Number of microseconds to wait. + """ + pass + +def get_resolution(resolution: RESOLUTION) -> Resolution: + """ + Gets the corresponding sl.Resolution from an sl.RESOLUTION. + + :param resolution: The wanted sl.RESOLUTION. + :return: The sl.Resolution corresponding to sl.RESOLUTION given as argument. + """ + return Resolution() + +class DeviceProperties: + """ + Class containing information about the properties of a camera. + + .. note:: + A camera_model sl.MODEL.ZED_M with an id '-1' can be due to an inverted USB-C cable. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def camera_state(self) -> CAMERA_STATE: + """ + State of the camera. + + Default: sl.CAMERA_STATE.NOT_AVAILABLE + """ + return CAMERA_STATE() + + @camera_state.setter + def camera_state(self, camera_state: Any) -> None: + pass + + @property + def id(self) -> int: + """ + Id of the camera. + + Default: -1 + """ + return int() + + @id.setter + def id(self, id: Any) -> None: + pass + + @property + def camera_name(self) -> str: + """ + Name of Camera in DT (ZED_CAM1) + """ + return str() + + @camera_name.setter + def camera_name(self, camera_name: Any) -> None: + pass + + @property + def i2c_port(self) -> int: + """ + i2c port of the camera. + """ + return int() + + @i2c_port.setter + def i2c_port(self, i2c_port: Any) -> None: + pass + + @property + def camera_model(self) -> MODEL: + """ + Model of the camera. + """ + return MODEL() + + @camera_model.setter + def camera_model(self, camera_model: Any) -> None: + pass + + @identifier.setter + def identifier(self, identifier: Any) -> None: + pass + + @property + def camera_sensor_model(self) -> str: + """ + Name of sensor (zedx) + """ + return str() + + @camera_sensor_model.setter + def camera_sensor_model(self, camera_sensor_model: Any) -> None: + pass + + @property + def path(self) -> str: + """ + System path of the camera. + """ + return str() + + @path.setter + def path(self, path: Any) -> None: + pass + + @property + def sensor_address_right(self) -> int: + """ + sensor_address when available (ZED-X HDR/XOne HDR only) + """ + return int() + + @sensor_address_right.setter + def sensor_address_right(self, sensor_address_right: Any) -> None: + pass + + @property + def serial_number(self) -> int: + """ + Serial number of the camera. + + Default: 0 + .. warning:: Not provided for Windows. + """ + return int() + + @serial_number.setter + def serial_number(self, serial_number: Any) -> None: + pass + + @property + def sensor_address_left(self) -> int: + """ + sensor_address when available (ZED-X HDR/XOne HDR only) + """ + return int() + + @sensor_address_left.setter + def sensor_address_left(self, sensor_address_left: Any) -> None: + pass + + @property + def camera_badge(self) -> str: + """ + Badge name (zedx_ar0234) + """ + return str() + + @camera_badge.setter + def camera_badge(self, camera_badge: Any) -> None: + pass + + @property + def input_type(self) -> INPUT_TYPE: + """ + Input type of the camera. + """ + return INPUT_TYPE() + + @input_type.setter + def input_type(self, input_type: Any) -> None: + pass + + def identifier(self) -> np.numpy[np.uint8]: + """ + sensor_address when available (ZED-X HDR/XOne HDR only) + """ + return np.numpy[np.uint8]() + + def __str__(self) -> None: + pass + + def __repr__(self) -> None: + pass + + +class Matrix3f: + """ + Class representing a generic 3*3 matrix. + + It is defined in a row-major order, it means that, in the value buffer, the entire first row is stored first, followed by the entire second row, and so on. + \n The data value of the matrix can be accessed with the r() method. + .. code-block:: text + + | r00 r01 r02 | + | r10 r11 r12 | + | r20 r21 r22 | + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def matrix_name(self) -> str: + """ + Name of the matrix (optional). + """ + return str() + + @matrix_name.setter + def matrix_name(self, matrix_name: Any) -> None: + pass + + @r.setter + def r(self, r: Any) -> None: + pass + + @property + def nbElem(self) -> int: + return int() + + def _initialize_from_input(self, input_data) -> None: + pass + + def __dealloc__(self) -> None: + pass + + def init_matrix(self, matrix) -> None: + """ + Copy the values from another sl.Matrix3f. + :param matrix: sl.Matrix3f to copy. + """ + pass + + def inverse(self) -> None: + """ + Sets the sl.Matrix3f to its inverse. + """ + pass + + def inverse_mat(self, rotation) -> Matrix3f: + """ + Returns the inverse of a sl.Matrix3f. + :param rotation: sl.Matrix3f to compute the inverse from. + :return: The inverse of the sl.Matrix3f given as input. + """ + return Matrix3f() + + def transpose(self) -> None: + """ + Sets the sl.Matrix3f to its transpose. + """ + pass + + def transpose_mat(self, rotation) -> Matrix3f: + """ + Returns the transpose of a sl.Matrix3f. + :param rotation: sl.Matrix3f to compute the transpose from. + :return: The transpose of the sl.Matrix3f given as input. + """ + return Matrix3f() + + def set_identity(self) -> Matrix3f: + """ + Sets the sl.Matrix3f to identity. + :return: itself + """ + return Matrix3f() + + def identity(self) -> Matrix3f: + """ + Creates an identity sl.Matrix3f. + :return: A sl.Matrix3f set to identity. + """ + return Matrix3f() + + def set_zeros(self) -> None: + """ + Sets the sl.Matrix3f to zero. + """ + pass + + def zeros(self) -> Matrix3f: + """ + Creates a sl.Matrix3f filled with zeros. + :return: A sl.Matrix3f filled with zeros. + """ + return Matrix3f() + + def get_infos(self) -> str: + """ + Returns the components of the sl.Matrix3f in a string. + :return: A string containing the components of the current sl.Matrix3f. + """ + return str() + + def r(self) -> np.numpy[float][float]: + """ + 3*3 numpy array of inner data. + """ + return np.numpy[float][float]() + + def __mul__(self, other) -> None: + pass + + def __richcmp__(left, right, op) -> None: + pass + + def __getitem__(self, key) -> None: + pass + + def __setitem__(self, key, value) -> None: + pass + + def __repr__(self) -> None: + pass + + +class Matrix4f: + """ + Class representing a generic 4*4 matrix. + + It is defined in a row-major order, it means that, in the value buffer, the entire first row is stored first, followed by the entire second row, and so on. + \n The data value of the matrix can be accessed with the r() method. + .. code-block:: text + + | r00 r01 r02 tx | + | r10 r11 r12 ty | + | r20 r21 r22 tz | + | m30 m31 m32 m33 | + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def matrix_name(self) -> str: + """ + Returns the name of the matrix (optional). + """ + return str() + + @matrix_name.setter + def matrix_name(self, matrix_name: Any) -> None: + pass + + @m.setter + def m(self, m: Any) -> None: + pass + + def _initialize_from_input(self, input_data) -> None: + pass + + def __dealloc__(self) -> None: + pass + + def init_matrix(self, matrix: Matrix4f) -> None: + """ + Copy the values from another sl.Matrix4f. + :param matrix: sl.Matrix4f to copy. + """ + pass + + def inverse(self) -> ERROR_CODE: + """ + Sets the sl.Matrix4f to its inverse. + :return: sl.ERROR_CODE.SUCCESS if the inverse has been computed, sl.ERROR_CODE.FAILURE is not (det = 0). + """ + return ERROR_CODE() + + def inverse_mat(self, rotation: Matrix4f) -> Matrix4f: + """ + Returns the inverse of a sl.Matrix4f. + :param rotation: sl.Matrix4f to compute the inverse from. + :return: The inverse of the sl.Matrix4f given as input. + """ + return Matrix4f() + + def transpose(self) -> None: + """ + Sets the sl.Matrix4f to its transpose. + """ + pass + + def transpose_mat(self, rotation: Matrix4f) -> Matrix4f: + """ + Returns the transpose of a sl.Matrix4f. + :param rotation: sl.Matrix4f to compute the transpose from. + :return: The transpose of the sl.Matrix4f given as input. + """ + return Matrix4f() + + def set_identity(self) -> Matrix4f: + """ + Sets the sl.Matrix4f to identity. + :return: itself + """ + return Matrix4f() + + def identity(self) -> Matrix4f: + """ + Creates an identity sl.Matrix4f. + :return: A sl.Matrix3f set to identity. + """ + return Matrix4f() + + def set_zeros(self) -> None: + """ + Sets the sl.Matrix4f to zero. + """ + pass + + def zeros(self) -> Matrix4f: + """ + Creates a sl.Matrix4f filled with zeros. + :return: A sl.Matrix4f filled with zeros. + """ + return Matrix4f() + + def get_infos(self) -> str: + """ + Returns the components of the sl.Matrix4f in a string. + :return: A string containing the components of the current sl.Matrix4f. + """ + return str() + + def set_sub_matrix3f(self, input: Matrix3f, row = 0, column = 0) -> ERROR_CODE: + """ + Sets a sl.Matrix3f inside the sl.Matrix4f. + .. note:: + Can be used to set the rotation matrix when the sl.Matrix4f is a pose or an isometric matrix. + + :param input: Sub-matrix to put inside the sl.Matrix4f. + :param row: Index of the row to start the 3x3 block. Must be 0 or 1. + :param column: Index of the column to start the 3x3 block. Must be 0 or 1. + + :return: sl.ERROR_CODE.SUCCESS if everything went well, sl.ERROR_CODE.FAILURE otherwise. + """ + return ERROR_CODE() + + def set_sub_vector3f(self, input0: float, input1: float, input2: float, column = 3) -> ERROR_CODE: + """ + Sets a 3x1 Vector inside the sl.Matrix4f at the specified column index. + .. note:: + Can be used to set the translation/position matrix when the sl.Matrix4f is a pose or an isometry. + + :param input0: First value of the 3x1 Vector to put inside the sl.Matrix4f. + :param input1: Second value of the 3x1 Vector to put inside the sl.Matrix4f. + :param input2: Third value of the 3x1 Vector to put inside the sl.Matrix4f. + :param column: Index of the column to start the 3x3 block. By default, it is the last column (translation for a sl.Pose). + :return: sl.ERROR_CODE.SUCCESS if everything went well, sl.ERROR_CODE.FAILURE otherwise. + """ + return ERROR_CODE() + + def set_sub_vector4f(self, input0: float, input1: float, input2: float, input3: float, column = 3) -> ERROR_CODE: + """ + Sets a 4x1 Vector inside the sl.Matrix4f at the specified column index. + :param input0: First value of the 4x1 Vector to put inside the sl.Matrix4f. + :param input1: Second value of the 4x1 Vector to put inside the sl.Matrix4f. + :param input2: Third value of the 4x1 Vector to put inside the sl.Matrix4f. + :param input3: Fourth value of the 4x1 Vector to put inside the sl.Matrix4f. + :param column: Index of the column to start the 3x3 block. By default, it is the last column (translation for a sl.Pose). + :return: sl.ERROR_CODE.SUCCESS if everything went well, sl.ERROR_CODE.FAILURE otherwise. + """ + return ERROR_CODE() + + def m(self) -> np.numpy[float][float]: + """ + 4*4 numpy array of inner data. + """ + return np.numpy[float][float]() + + def __mul__(self, other) -> None: + pass + + def __richcmp__(left, right, op) -> None: + pass + + def __getitem__(self, key) -> None: + pass + + def __setitem__(self, key, value) -> None: + pass + + def __repr__(self) -> None: + pass + + +class VIDEO_SETTINGS(enum.Enum): + """ + Lists available camera settings for the camera (contrast, hue, saturation, gain, ...). + + .. warning:: All VIDEO_SETTINGS are not supported for all camera models. You can find the supported VIDEO_SETTINGS for each ZED camera in our `documentation `_.\n\n + GAIN and EXPOSURE are linked in auto/default mode (see sl.Camera.set_camera_settings()). + + | Enumerator | | + |:---:|:---:| + | BRIGHTNESS | Brightness control Affected value should be between 0 and 8. Note: Not available for ZED X/X Mini cameras. | + | CONTRAST | Contrast control Affected value should be between 0 and 8. Note: Not available for ZED X/X Mini cameras. | + | HUE | Hue control Affected value should be between 0 and 11. Note: Not available for ZED X/X Mini cameras. | + | SATURATION | Saturation control Affected value should be between 0 and 8. | + | SHARPNESS | Digital sharpening control Affected value should be between 0 and 8. | + | GAMMA | ISP gamma control Affected value should be between 1 and 9. | + | GAIN | Gain control Affected value should be between 0 and 100 for manual control. Note: If EXPOSURE is set to -1 (automatic mode), then GAIN will be automatic as well. | + | EXPOSURE | Exposure control Affected value should be between 0 and 100 for manual control. The exposition is mapped linearly in a percentage of the following max values. Special case for ``EXPOSURE = 0`` that corresponds to 0.17072ms. The conversion to milliseconds depends on the framerate: * 15fps & ``EXPOSURE = 100`` -> 19.97ms* 30fps & ``EXPOSURE = 100`` -> 19.97ms* 60fps & ``EXPOSURE = 100`` -> 10.84072ms* 100fps & ``EXPOSURE = 100`` -> 10.106624ms | + | AEC_AGC | Defines if the GAIN and EXPOSURE are in automatic mode or not. Setting GAIN or EXPOSURE values will automatically set this value to 0. | + | AEC_AGC_ROI | Defines the region of interest for automatic exposure/gain computation. To be used with the dedicated Camera.set_camera_settings_roi "set_camera_settings_roi()" / Camera.get_camera_settings_roi "get_camera_settings_roi()" methods. | + | WHITEBALANCE_TEMPERATURE | Color temperature control Affected value should be between 2800 and 6500 with a step of 100.Note: Setting a value will automatically set WHITEBALANCE_AUTO to 0. | + | WHITEBALANCE_AUTO | Defines if the white balance is in automatic mode or not. | + | LED_STATUS | Status of the front LED of the camera. Set to 0 to disable the light, 1 to enable the light. Default value is on. Note: Requires camera firmware 1523 at least. | + | EXPOSURE_TIME | Real exposure time control in microseconds. Note: Only available for ZED X/X Mini cameras.Note: Replace EXPOSURE setting. | + | ANALOG_GAIN | Real analog gain (sensor) control in mDB. The range is defined by Jetson DTS and by default [1000-16000]. Note: Only available for ZED X/X Mini cameras.Note: Replace GAIN settings. | + | DIGITAL_GAIN | Real digital gain (ISP) as a factor. The range is defined by Jetson DTS and by default [1-256]. Note: Only available for ZED X/X Mini cameras.Note: Replace GAIN settings. | + | AUTO_EXPOSURE_TIME_RANGE | Range of exposure auto control in micro seconds. Used with Camera.set_camera_settings_range "set_camera_settings_range()". Min/max range between max range defined in DTS. By default: [28000 - or 19000] us. Note: Only available for ZED X/X Mini cameras. | + | AUTO_ANALOG_GAIN_RANGE | Range of sensor gain in automatic control. Used with Camera.set_camera_settings_range "set_camera_settings_range()". Min/max range between max range defined in DTS. By default: [1000 - 16000] mdB. Note: Only available for ZED X/X Mini cameras. | + | AUTO_DIGITAL_GAIN_RANGE | Range of digital ISP gain in automatic control. Used with Camera.set_camera_settings_range "set_camera_settings_range()". Min/max range between max range defined in DTS. By default: [1 - 256]. Note: Only available for ZED X/X Mini cameras. | + | EXPOSURE_COMPENSATION | Exposure-target compensation made after auto exposure. Reduces the overall illumination target by factor of F-stops. Affected value should be between 0 and 100 (mapped between [-2.0,2.0]). Default value is 50, i.e. no compensation applied. Note: Only available for ZED X/X Mini cameras. | + | DENOISING | Level of denoising applied on both left and right images. Affected value should be between 0 and 100. Default value is 50. Note: Only available for ZED X/X Mini cameras. | + """ + BRIGHTNESS = enum.auto() + CONTRAST = enum.auto() + HUE = enum.auto() + SATURATION = enum.auto() + SHARPNESS = enum.auto() + GAMMA = enum.auto() + GAIN = enum.auto() + EXPOSURE = enum.auto() + AEC_AGC = enum.auto() + AEC_AGC_ROI = enum.auto() + WHITEBALANCE_TEMPERATURE = enum.auto() + WHITEBALANCE_AUTO = enum.auto() + LED_STATUS = enum.auto() + EXPOSURE_TIME = enum.auto() + ANALOG_GAIN = enum.auto() + DIGITAL_GAIN = enum.auto() + AUTO_EXPOSURE_TIME_RANGE = enum.auto() + AUTO_ANALOG_GAIN_RANGE = enum.auto() + AUTO_DIGITAL_GAIN_RANGE = enum.auto() + EXPOSURE_COMPENSATION = enum.auto() + DENOISING = enum.auto() + LAST = enum.auto() + def __lt__(self, other) -> None: + pass + + def __le__(self, other) -> None: + pass + + def __gt__(self, other) -> None: + pass + + def __ge__(self, other) -> None: + pass + + +class DEPTH_MODE(enum.Enum): + """ + Lists available depth computation modes. + + | Enumerator | | + |:---:|:---:| + | NONE | No depth map computation. Only rectified stereo images will be available. | + | PERFORMANCE | Computation mode optimized for speed. | + | QUALITY | Computation mode designed for challenging areas with untextured surfaces. | + | ULTRA | Computation mode that favors edges and sharpness. Requires more GPU memory and computation power. | + | NEURAL_LIGHT | End to End Neural disparity estimation. Requires AI module. | + | NEURAL | End to End Neural disparity estimation. Requires AI module. | + | NEURAL_PLUS | End to End Neural disparity estimation. More precise but requires more GPU memory and computation power. Requires AI module. | + """ + NONE = enum.auto() + PERFORMANCE = enum.auto() + QUALITY = enum.auto() + ULTRA = enum.auto() + NEURAL_LIGHT = enum.auto() + NEURAL = enum.auto() + NEURAL_PLUS = enum.auto() + LAST = enum.auto() + def __lt__(self, other) -> None: + pass + + def __le__(self, other) -> None: + pass + + def __gt__(self, other) -> None: + pass + + def __ge__(self, other) -> None: + pass + + +class UNIT(enum.Enum): + """ + Lists available units for measures. + + | Enumerator | | + |:---:|:---:| + | MILLIMETER | International System (1/1000 meters) | + | CENTIMETER | International System (1/100 meters) | + | METER | International System (1 meter) | + | INCH | Imperial Unit (1/12 feet) | + | FOOT | Imperial Unit (1 foot) | + """ + MILLIMETER = enum.auto() + CENTIMETER = enum.auto() + METER = enum.auto() + INCH = enum.auto() + FOOT = enum.auto() + LAST = enum.auto() + def __lt__(self, other) -> None: + pass + + def __le__(self, other) -> None: + pass + + def __gt__(self, other) -> None: + pass + + def __ge__(self, other) -> None: + pass + + +class COORDINATE_SYSTEM(enum.Enum): + """ + Lists available coordinates systems for positional tracking and 3D measures. + + | Enumerator | | + |:---:|:---:| + | IMAGE | Standard coordinates system in computer vision. Used in OpenCV: see `here `_. | + | LEFT_HANDED_Y_UP | Left-handed with Y up and Z forward. Used in Unity with DirectX. | + | RIGHT_HANDED_Y_UP | Right-handed with Y pointing up and Z backward. Used in OpenGL. | + | RIGHT_HANDED_Z_UP | Right-handed with Z pointing up and Y forward. Used in 3DSMax. | + | LEFT_HANDED_Z_UP | Left-handed with Z axis pointing up and X forward. Used in Unreal Engine. | + | RIGHT_HANDED_Z_UP_X_FWD | Right-handed with Z pointing up and X forward. Used in ROS (REP 103). | + """ + IMAGE = enum.auto() + LEFT_HANDED_Y_UP = enum.auto() + RIGHT_HANDED_Y_UP = enum.auto() + RIGHT_HANDED_Z_UP = enum.auto() + LEFT_HANDED_Z_UP = enum.auto() + RIGHT_HANDED_Z_UP_X_FWD = enum.auto() + LAST = enum.auto() + def __str__(self) -> None: + pass + + def __repr__(self) -> None: + pass + + def __lt__(self, other) -> None: + pass + + def __le__(self, other) -> None: + pass + + def __gt__(self, other) -> None: + pass + + def __ge__(self, other) -> None: + pass + + +class MEASURE(enum.Enum): + """ + Lists retrievable measures. + | Enumerator | | + |:---:|:---:| + | DISPARITY | Disparity map. Each pixel contains 1 float. Type: sl.MAT_TYPE.F32_C1 | + | DEPTH | Depth map in sl.UNIT defined in sl.InitParameters.coordinate_units. Each pixel contains 1 float. Type: sl.MAT_TYPE.F32_C1 | + | CONFIDENCE | Certainty/confidence of the depth map. Each pixel contains 1 float. Type: sl.MAT_TYPE.F32_C1 | + | XYZ | Point cloud. Each pixel contains 4 float (X, Y, Z, not used). Type: sl.MAT_TYPE.F32_C4 | + | XYZRGBA | Colored point cloud. Each pixel contains 4 float (X, Y, Z, color). The color should to be read as an unsigned char[4] representing the RGBA color. Type: sl.MAT_TYPE.F32_C4 | + | XYZBGRA | Colored point cloud. Each pixel contains 4 float (X, Y, Z, color). The color should to be read as an unsigned char[4] representing the BGRA color. Type: sl.MAT_TYPE.F32_C4 | + | XYZARGB | Colored point cloud. Each pixel contains 4 float (X, Y, Z, color). The color should to be read as an unsigned char[4] representing the ARGB color. Type: sl.MAT_TYPE.F32_C4 | + | XYZABGR | Colored point cloud. Each pixel contains 4 float (X, Y, Z, color). The color should to be read as an unsigned char[4] representing the ABGR color. Type: sl.MAT_TYPE.F32_C4 | + | NORMALS | Normal vectors map. Each pixel contains 4 float (X, Y, Z, 0). Type: sl.MAT_TYPE.F32_C4 | + | DISPARITY_RIGHT | Disparity map for right sensor. Each pixel contains 1 float. Type: sl.MAT_TYPE.F32_C1 | + | DEPTH_RIGHT | Depth map for right sensor. Each pixel contains 1 float. Type: sl.MAT_TYPE.F32_C1 | + | XYZ_RIGHT | Point cloud for right sensor. Each pixel contains 4 float (X, Y, Z, not used). Type: sl.MAT_TYPE.F32_C4 | + | XYZRGBA_RIGHT | Colored point cloud for right sensor. Each pixel contains 4 float (X, Y, Z, color). The color needs to be read as an unsigned char[4] representing the RGBA color. Type: sl.MAT_TYPE.F32_C4 | + | XYZBGRA_RIGHT | Colored point cloud for right sensor. Each pixel contains 4 float (X, Y, Z, color). The color needs to be read as an unsigned char[4] representing the BGRA color. Type: sl.MAT_TYPE.F32_C4 | + | XYZARGB_RIGHT | Colored point cloud for right sensor. Each pixel contains 4 float (X, Y, Z, color). The color needs to be read as an unsigned char[4] representing the ARGB color. Type: sl.MAT_TYPE.F32_C4 | + | XYZABGR_RIGHT | Colored point cloud for right sensor. Each pixel contains 4 float (X, Y, Z, color). The color needs to be read as an unsigned char[4] representing the ABGR color. Type: sl.MAT_TYPE.F32_C4 | + | NORMALS_RIGHT | Normal vectors map for right view. Each pixel contains 4 float (X, Y, Z, 0). Type: sl.MAT_TYPE.F32_C4 | + | DEPTH_U16_MM | Depth map in millimeter whatever the sl.UNIT defined in sl.InitParameters.coordinate_units. Invalid values are set to 0 and depth values are clamped at 65000. Each pixel contains 1 unsigned short. Type: sl.MAT_TYPE.U16_C1 | + | DEPTH_U16_MM_RIGHT | Depth map in millimeter for right sensor. Each pixel contains 1 unsigned short. Type: sl.MAT_TYPE.U16_C1 | + """ + DISPARITY = enum.auto() + DEPTH = enum.auto() + CONFIDENCE = enum.auto() + XYZ = enum.auto() + XYZRGBA = enum.auto() + XYZBGRA = enum.auto() + XYZARGB = enum.auto() + XYZABGR = enum.auto() + NORMALS = enum.auto() + DISPARITY_RIGHT = enum.auto() + DEPTH_RIGHT = enum.auto() + XYZ_RIGHT = enum.auto() + XYZRGBA_RIGHT = enum.auto() + XYZBGRA_RIGHT = enum.auto() + XYZARGB_RIGHT = enum.auto() + XYZABGR_RIGHT = enum.auto() + NORMALS_RIGHT = enum.auto() + DEPTH_U16_MM = enum.auto() + DEPTH_U16_MM_RIGHT = enum.auto() + LAST = enum.auto() + def __str__(self) -> None: + pass + + def __repr__(self) -> None: + pass + + def __lt__(self, other) -> None: + pass + + def __le__(self, other) -> None: + pass + + def __gt__(self, other) -> None: + pass + + def __ge__(self, other) -> None: + pass + + +class VIEW(enum.Enum): + """ + Lists available views. + + | Enumerator | | + |:---:|:---:| + | LEFT | Left BGRA image. Each pixel contains 4 unsigned char (B, G, R, A). Type: sl.MAT_TYPE.U8_C4 | + | RIGHT | Right BGRA image. Each pixel contains 4 unsigned char (B, G, R, A). Type: sl.MAT_TYPE.U8_C4 | + | LEFT_GRAY | Left gray image. Each pixel contains 1 unsigned char. Type: sl.MAT_TYPE.U8_C1| + | RIGHT_GRAY | Right gray image. Each pixel contains 1 unsigned char. Type: sl.MAT_TYPE.U8_C1| + | LEFT_UNRECTIFIED | Left BGRA unrectified image. Each pixel contains 4 unsigned char (B, G, R, A). Type: sl.MAT_TYPE.U8_C4 | + | RIGHT_UNRECTIFIED | Right BGRA unrectified image. Each pixel contains 4 unsigned char (B, G, R, A). Type: sl.MAT_TYPE.U8_C4 | + | LEFT_UNRECTIFIED_GRAY | Left gray unrectified image. Each pixel contains 1 unsigned char. Type: sl.MAT_TYPE.U8_C1 | + | RIGHT_UNRECTIFIED_GRAY | Right gray unrectified image. Each pixel contains 1 unsigned char. Type: sl.MAT_TYPE.U8_C1 | + | SIDE_BY_SIDE | Left and right image (the image width is therefore doubled). Each pixel contains 4 unsigned char (B, G, R, A). Type: sl.MAT_TYPE.U8_C4 | + | DEPTH | Color rendering of the depth. Each pixel contains 4 unsigned char (B, G, R, A). Type: sl.MAT_TYPE.U8_C4 Note: Use sl.MEASURE.DEPTH with sl.Camera.retrieve_measure() to get depth values. | + | CONFIDENCE | Color rendering of the depth confidence. Each pixel contains 4 unsigned char (B, G, R, A). Type: sl.MAT_TYPE.U8_C4 Note: Use sl.MEASURE.CONFIDENCE with sl.Camera.retrieve_measure() to get confidence values. | + | NORMALS | Color rendering of the normals. Each pixel contains 4 unsigned char (B, G, R, A). Type: sl.MAT_TYPE.U8_C4 Note: Use sl.MEASURE.NORMALS with sl.Camera.retrieve_measure() to get normal values. | + | DEPTH_RIGHT | Color rendering of the right depth mapped on right sensor. Each pixel contains 4 unsigned char (B, G, R, A). Type: sl.MAT_TYPE.U8_C4 Note: Use sl.MEASURE.DEPTH_RIGHT with sl.Camera.retrieve_measure() to get depth right values. | + | NORMALS_RIGHT | Color rendering of the normals mapped on right sensor. Each pixel contains 4 unsigned char (B, G, R, A). Type: sl.MAT_TYPE.U8_C4 Note: Use sl.MEASURE.NORMALS_RIGHT with sl.Camera.retrieve_measure() to get normal right values. | + """ + LEFT = enum.auto() + RIGHT = enum.auto() + LEFT_GRAY = enum.auto() + RIGHT_GRAY = enum.auto() + LEFT_UNRECTIFIED = enum.auto() + RIGHT_UNRECTIFIED = enum.auto() + LEFT_UNRECTIFIED_GRAY = enum.auto() + RIGHT_UNRECTIFIED_GRAY = enum.auto() + SIDE_BY_SIDE = enum.auto() + DEPTH = enum.auto() + CONFIDENCE = enum.auto() + NORMALS = enum.auto() + DEPTH_RIGHT = enum.auto() + NORMALS_RIGHT = enum.auto() + LEFT_BGRA = enum.auto() + LEFT_BGR = enum.auto() + RIGHT_BGRA = enum.auto() + RIGHT_BGR = enum.auto() + LEFT_UNRECTIFIED_BGRA = enum.auto() + LEFT_UNRECTIFIED_BGR = enum.auto() + RIGHT_UNRECTIFIED_BGRA = enum.auto() + RIGHT_UNRECTIFIED_BGR = enum.auto() + SIDE_BY_SIDE_BGRA = enum.auto() + SIDE_BY_SIDE_BGR = enum.auto() + SIDE_BY_SIDE_GRAY = enum.auto() + SIDE_BY_SIDE_UNRECTIFIED_BGRA = enum.auto() + SIDE_BY_SIDE_UNRECTIFIED_BGR = enum.auto() + SIDE_BY_SIDE_UNRECTIFIED_GRAY = enum.auto() + DEPTH_BGRA = enum.auto() + DEPTH_BGR = enum.auto() + DEPTH_GRAY = enum.auto() + CONFIDENCE_BGRA = enum.auto() + CONFIDENCE_BGR = enum.auto() + CONFIDENCE_GRAY = enum.auto() + NORMALS_BGRA = enum.auto() + NORMALS_BGR = enum.auto() + NORMALS_GRAY = enum.auto() + DEPTH_RIGHT_BGRA = enum.auto() + DEPTH_RIGHT_BGR = enum.auto() + DEPTH_RIGHT_GRAY = enum.auto() + NORMALS_RIGHT_BGRA = enum.auto() + NORMALS_RIGHT_BGR = enum.auto() + NORMALS_RIGHT_GRAY = enum.auto() + LAST = enum.auto() + def __str__(self) -> None: + pass + + def __repr__(self) -> None: + pass + + def __lt__(self, other) -> None: + pass + + def __le__(self, other) -> None: + pass + + def __gt__(self, other) -> None: + pass + + def __ge__(self, other) -> None: + pass + + +class POSITIONAL_TRACKING_STATE(enum.Enum): + """ + Lists the different states of positional tracking. + + | Enumerator | | + |:---:|:---:| + | SEARCHING | Warning: DEPRECATED: This state is no longer in use. | + | OK | The positional tracking is functioning normally. | + | OFF | The positional tracking is currently disabled. | + | FPS_TOO_LOW | The effective FPS is too low to provide accurate motion tracking results. Consider adjusting performance parameters (e.g., depth mode, camera resolution) to improve tracking quality.| + | SEARCHING_FLOOR_PLANE | The camera is currently searching for the floor plane to establish its position relative to it. The world reference frame will be set afterward. | + | UNAVAILABLE | The tracking module was unable to perform tracking from the previous frame to the current frame. | + """ + SEARCHING = enum.auto() + OK = enum.auto() + OFF = enum.auto() + FPS_TOO_LOW = enum.auto() + SEARCHING_FLOOR_PLANE = enum.auto() + UNAVAILABLE = enum.auto() + LAST = enum.auto() + def __str__(self) -> None: + pass + + def __repr__(self) -> None: + pass + + def __lt__(self, other) -> None: + pass + + def __le__(self, other) -> None: + pass + + def __gt__(self, other) -> None: + pass + + def __ge__(self, other) -> None: + pass + + +class ODOMETRY_STATUS(enum.Enum): + """ + Report the status of current odom tracking. + + | Enumerator | | + |:----------:|:---------------------------| + | OK | The positional tracking module successfully tracked from the previous frame to the current frame. | + | UNAVAILABLE | The positional tracking module cannot track the current frame. | + | INSUFFICIENT_FEATURES | The positional tracking failed to track the current frame because it could not find enought features. | + """ + OK = enum.auto() + UNAVAILABLE = enum.auto() + INSUFFICIENT_FEATURES = enum.auto() + LAST = enum.auto() + def __str__(self) -> None: + pass + + def __repr__(self) -> None: + pass + + def __lt__(self, other) -> None: + pass + + def __le__(self, other) -> None: + pass + + def __gt__(self, other) -> None: + pass + + def __ge__(self, other) -> None: + pass + + +class SPATIAL_MEMORY_STATUS(enum.Enum): + """ + Report the status of current map tracking. + + | Enumerator | | + |:-----------:|:---------------------------| + | OK | The positional tracking module is operating normally. | + | LOOP_CLOSED | The positional tracking module detected a loop and corrected its position. | + | SEARCHING | The positional tracking module is searching for recognizable areas in the global map to relocate. | + | INITIALIZING| Displayed until the cameras has acquired enough memory (Initial Area Mapping) or has found its first loop closure and is localized in the loaded area map (Lifelong Mapping/Localization). Users need to keep moving the camera for it to get updated. | + | MAP_UPDATE | Displayed when the robot is mapping (Initial Area Mapping) or when the robot is getting out of the area map bounds (Lifelong Mapping). Displayed as “Tracking” when in exploratory mode with SLAM engaged. | + | KNOWN_MAP | Displayed when the camera is localized within the loaded area map. | + | LOST | Displayed when localization cannot operate anymore (camera completely obstructed, sudden localization jumps after being localized) in Mapping/ Localization modes. It can also include the case where the camera jumps or is located out of map bounds in Localization mode. This should be an indicator for users to stop the robot. | + | OFF | Displayed when the spatial memory is turned off.| + """ + OK = enum.auto() + LOOP_CLOSED = enum.auto() + SEARCHING = enum.auto() + INITIALIZING = enum.auto() + MAP_UPDATE = enum.auto() + KNOWN_MAP = enum.auto() + LOST = enum.auto() + OFF = enum.auto() + LAST = enum.auto() + def __str__(self) -> None: + pass + + def __repr__(self) -> None: + pass + + def __lt__(self, other) -> None: + pass + + def __le__(self, other) -> None: + pass + + def __gt__(self, other) -> None: + pass + + def __ge__(self, other) -> None: + pass + + +class POSITIONAL_TRACKING_FUSION_STATUS(enum.Enum): + """ + Report the status of the positional tracking fusion. + + | Enumerator | | + |:----------:|:---------------------------| + | VISUAL_INERTIAL | The positional tracking module is fusing visual and inertial data. | + | VISUAL | The positional tracking module is fusing visual data only. | + | INERTIAL | The positional tracking module is fusing inertial data only. | + | GNSS | The positional tracking module is fusing GNSS data only. | + | VISUAL_INERTIAL_GNSS | The positional tracking module is fusing visual, inertial, and GNSS data. | + | VISUAL_GNSS | The positional tracking module is fusing visual and GNSS data. | + | INERTIAL_GNSS | The positional tracking module is fusing inertial and GNSS data. | + | UNAVAILABLE | The positional tracking module is unavailable. | + """ + VISUAL_INERTIAL = enum.auto() + VISUAL = enum.auto() + INERTIAL = enum.auto() + GNSS = enum.auto() + VISUAL_INERTIAL_GNSS = enum.auto() + VISUAL_GNSS = enum.auto() + INERTIAL_GNSS = enum.auto() + UNAVAILABLE = enum.auto() + LAST = enum.auto() + def __str__(self) -> None: + pass + + def __repr__(self) -> None: + pass + + def __lt__(self, other) -> None: + pass + + def __le__(self, other) -> None: + pass + + def __gt__(self, other) -> None: + pass + + def __ge__(self, other) -> None: + pass + + +class GNSS_STATUS(enum.Enum): + """ + Lists that represents the status of the of GNSS signal. + + | Enumerator | | + |:---:|:---:| + | UNKNOWN | No GNSS fix data is available. | + | SINGLE | Single Point Positioning. | + | DGNSS | Differential GNSS. | + | PPS | Precise Positioning Service. | + | RTK_FLOAT | Real Time Kinematic Float. | + | RTK_FIX | Real Time Kinematic Fixed. | + """ + UNKNOWN = enum.auto() + SINGLE = enum.auto() + DGNSS = enum.auto() + PPS = enum.auto() + RTK_FLOAT = enum.auto() + RTK_FIX = enum.auto() + LAST = enum.auto() + def __str__(self) -> None: + pass + + def __repr__(self) -> None: + pass + + def __lt__(self, other) -> None: + pass + + def __le__(self, other) -> None: + pass + + def __gt__(self, other) -> None: + pass + + def __ge__(self, other) -> None: + pass + + +class GNSS_MODE(enum.Enum): + """ + Lists that represents the mode of GNSS signal. + + | Enumerator | | + |:---:|:---:| + | UNKNOWN | No GNSS fix data is available. | + | NO_FIX | No GNSS fix is available. | + | FIX_2D | 2D GNSS fix, providing latitude and longitude coordinates but without altitude information. | + | FIX_3D | 3D GNSS fix, providing latitude, longitude, and altitude coordinates. | + """ + UNKNOWN = enum.auto() + NO_FIX = enum.auto() + FIX_2D = enum.auto() + FIX_3D = enum.auto() + LAST = enum.auto() + def __str__(self) -> None: + pass + + def __repr__(self) -> None: + pass + + def __lt__(self, other) -> None: + pass + + def __le__(self, other) -> None: + pass + + def __gt__(self, other) -> None: + pass + + def __ge__(self, other) -> None: + pass + + +class GNSS_FUSION_STATUS(enum.Enum): + """ + Lists that represents the current GNSS fusion status + + | Enumerator | | + |:---:|:---:| + | OK | The GNSS fusion module is calibrated and working successfully. | + | OFF | The GNSS fusion module is not enabled. | + | CALIBRATION_IN_PROGRESS | Calibration of the GNSS/VIO fusion module is in progress. | + | RECALIBRATION_IN_PROGRESS | Re-alignment of GNSS/VIO data is in progress, leading to potentially inaccurate global position. | + """ + OK = enum.auto() + OFF = enum.auto() + CALIBRATION_IN_PROGRESS = enum.auto() + RECALIBRATION_IN_PROGRESS = enum.auto() + LAST = enum.auto() + def __str__(self) -> None: + pass + + def __repr__(self) -> None: + pass + + def __lt__(self, other) -> None: + pass + + def __le__(self, other) -> None: + pass + + def __gt__(self, other) -> None: + pass + + def __ge__(self, other) -> None: + pass + + +class Landmark: + """ + Represents a 3d landmark. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def position(self) -> list[float]: + """ + The position of the landmark. + """ + return list[float]() + + @position.setter + def position(self, position: Any) -> None: + pass + + @property + def id(self) -> int: + """ + The ID of the landmark. + """ + return int() + + @id.setter + def id(self, id: Any) -> None: + pass + + +class Landmark2D: + """ + Represents the projection of a 3d landmark in the image. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @position.setter + def position(self, position: Any) -> None: + pass + + @property + def id(self) -> int: + """ + Unique identifier of the corresponding landmark. + """ + return int() + + @property + def dynamic_confidence(self) -> float: + """ + Confidence score indicating the likelihood that the landmark is associated with a dynamic object. + + The value ranges from 0 to 1, where a smaller value indicates greater confidence that the landmark + is owned by a dynamic object. + """ + return float() + + def position(self) -> np.array: + """ + The position of the landmark in the image. + """ + return np.array() + + +class PositionalTrackingStatus: + """ + Lists the different status of the positional tracking + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def odometry_status(self) -> ODOMETRY_STATUS: + """ + Represents the current state of Visual-Inertial Odometry (VIO) tracking between the previous frame and the current frame. + """ + return ODOMETRY_STATUS() + + @odometry_status.setter + def odometry_status(self, odometry_status: Any) -> None: + pass + + @property + def tracking_fusion_status(self) -> POSITIONAL_TRACKING_FUSION_STATUS: + """ + Represents the current state of the positional tracking fusion. + """ + return POSITIONAL_TRACKING_FUSION_STATUS() + + @tracking_fusion_status.setter + def tracking_fusion_status(self, tracking_fusion_status: Any) -> None: + pass + + @property + def spatial_memory_status(self) -> SPATIAL_MEMORY_STATUS: + """ + Represents the current state of camera tracking in the global map. + """ + return SPATIAL_MEMORY_STATUS() + + @spatial_memory_status.setter + def spatial_memory_status(self, spatial_memory_status: Any) -> None: + pass + + +class FusedPositionalTrackingStatus: + """ + Lists the different status of the positional tracking + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def odometry_status(self) -> ODOMETRY_STATUS: + return ODOMETRY_STATUS() + + @odometry_status.setter + def odometry_status(self, odometry_status: Any) -> None: + pass + + @property + def tracking_fusion_status(self) -> POSITIONAL_TRACKING_FUSION_STATUS: + return POSITIONAL_TRACKING_FUSION_STATUS() + + @tracking_fusion_status.setter + def tracking_fusion_status(self, tracking_fusion_status: Any) -> None: + pass + + @property + def gnss_mode(self) -> GNSS_MODE: + return GNSS_MODE() + + @gnss_mode.setter + def gnss_mode(self, gnss_mode: Any) -> None: + pass + + @property + def spatial_memory_status(self) -> SPATIAL_MEMORY_STATUS: + return SPATIAL_MEMORY_STATUS() + + @spatial_memory_status.setter + def spatial_memory_status(self, spatial_memory_status: Any) -> None: + pass + + @property + def gnss_status(self) -> GNSS_STATUS: + return GNSS_STATUS() + + @gnss_status.setter + def gnss_status(self, gnss_status: Any) -> None: + pass + + @property + def gnss_fusion_status(self) -> GNSS_FUSION_STATUS: + return GNSS_FUSION_STATUS() + + @gnss_fusion_status.setter + def gnss_fusion_status(self, gnss_fusion_status: Any) -> None: + pass + + +class POSITIONAL_TRACKING_MODE(enum.Enum): + """ + Lists the mode of positional tracking that can be used. + + | Enumerator | | + |:---:|:---:| + | GEN_1 | Default mode. Fast and stable mode. Requires depth computation. Less robust than GEN_3. | + | GEN_2 | Warning: DEPRECATED. | + | GEN_3 | Fast and accurate, in both exploratory mode and mapped environments. Note: Can be used even if depth_mode is set to DEPTH_MODE::NONE. | + """ + GEN_1 = enum.auto() + GEN_2 = enum.auto() + GEN_3 = enum.auto() + def __str__(self) -> None: + pass + + def __repr__(self) -> None: + pass + + def __lt__(self, other) -> None: + pass + + def __le__(self, other) -> None: + pass + + def __gt__(self, other) -> None: + pass + + def __ge__(self, other) -> None: + pass + + +class AREA_EXPORTING_STATE(enum.Enum): + """ + Lists the different states of spatial memory area export. + + | Enumerator | | + |:---:|:---:| + | SUCCESS | The spatial memory file has been successfully created. | + | RUNNING | The spatial memory is currently being written. | + | NOT_STARTED | The spatial memory file exportation has not been called. | + | FILE_EMPTY | The spatial memory contains no data, the file is empty. | + | FILE_ERROR | The spatial memory file has not been written because of a wrong file name. | + | SPATIAL_MEMORY_DISABLED | The spatial memory learning is disabled. No file can be created. | + """ + SUCCESS = enum.auto() + RUNNING = enum.auto() + NOT_STARTED = enum.auto() + FILE_EMPTY = enum.auto() + FILE_ERROR = enum.auto() + SPATIAL_MEMORY_DISABLED = enum.auto() + LAST = enum.auto() + def __str__(self) -> None: + pass + + def __repr__(self) -> None: + pass + + def __lt__(self, other) -> None: + pass + + def __le__(self, other) -> None: + pass + + def __gt__(self, other) -> None: + pass + + def __ge__(self, other) -> None: + pass + + +class REFERENCE_FRAME(enum.Enum): + """ + Lists possible types of position matrix used to store camera path and pose. + + | Enumerator | | + |:---:|:---:| + | WORLD | The transform of sl.Pose will contain the motion with reference to the world frame (previously called sl.PATH). | + | CAMERA | The transform of sl.Pose will contain the motion with reference to the previous camera frame (previously called sl.POSE). | + """ + WORLD = enum.auto() + CAMERA = enum.auto() + LAST = enum.auto() + def __str__(self) -> None: + pass + + def __repr__(self) -> None: + pass + + def __lt__(self, other) -> None: + pass + + def __le__(self, other) -> None: + pass + + def __gt__(self, other) -> None: + pass + + def __ge__(self, other) -> None: + pass + + +class TIME_REFERENCE(enum.Enum): + """ + Lists possible time references for timestamps or data. + + + | Enumerator | | + |:---:|:---:| + | IMAGE | The requested timestamp or data will be at the time of the frame extraction. | + | CURRENT | The requested timestamp or data will be at the time of the function call. | + """ + IMAGE = enum.auto() + CURRENT = enum.auto() + LAST = enum.auto() + def __str__(self) -> None: + pass + + def __repr__(self) -> None: + pass + + def __lt__(self, other) -> None: + pass + + def __le__(self, other) -> None: + pass + + def __gt__(self, other) -> None: + pass + + def __ge__(self, other) -> None: + pass + + +class SPATIAL_MAPPING_STATE(enum.Enum): + """ + Lists the different states of spatial mapping. + + | Enumerator | | + |:---:|:---:| + | INITIALIZING | The spatial mapping is initializing. | + | OK | The depth and tracking data were correctly integrated in the mapping algorithm. | + | NOT_ENOUGH_MEMORY | The maximum memory dedicated to the scanning has been reached. The mesh will no longer be updated. | + | NOT_ENABLED | sl.Camera.enable_spatial_mapping() wasn't called or the scanning was stopped and not relaunched. | + | FPS_TOO_LOW | The effective FPS is too low to give proper results for spatial mapping. Consider using performance parameters (sl.DEPTH_MODE.PERFORMANCE, sl.MAPPING_RESOLUTION.LOW, low camera resolution (RESOLUTION "sl.RESOLUTION.VGA/SVGA" or sl.RESOLUTION.HD720). | + """ + INITIALIZING = enum.auto() + OK = enum.auto() + NOT_ENOUGH_MEMORY = enum.auto() + NOT_ENABLED = enum.auto() + FPS_TOO_LOW = enum.auto() + LAST = enum.auto() + +class REGION_OF_INTEREST_AUTO_DETECTION_STATE(enum.Enum): + """ + Lists the different states of region of interest auto detection. + + | Enumerator | | + |:---:|:---:| + | RUNNING | The region of interest auto detection is initializing. | + | READY | The region of interest mask is ready, if auto_apply was enabled, the region of interest mask is being used | + | NOT_ENABLED | The region of interest auto detection is not enabled | + """ + RUNNING = enum.auto() + READY = enum.auto() + NOT_ENABLED = enum.auto() + LAST = enum.auto() + +class SVO_COMPRESSION_MODE(enum.Enum): + """ + Lists available compression modes for SVO recording. + .. note:: + LOSSLESS is an improvement of previous lossless compression (used in ZED Explorer), even if size may be bigger, compression time is much faster. + + + | Enumerator | | + |:---:|:---:| + | LOSSLESS | PNG/ZSTD (lossless) CPU based compression. Average size: 42% of RAW | + | H264 | H264 (AVCHD) GPU based compression. Average size: 1% of RAW Note: Requires a NVIDIA GPU. | + | H265 | H265 (HEVC) GPU based compression. Average size: 1% of RAW Note: Requires a NVIDIA GPU. | + | H264_LOSSLESS | H264 Lossless GPU/Hardware based compression. Average size: 25% of RAW Provides a SSIM/PSNR result (vs RAW) >= 99.9%. Note: Requires a NVIDIA GPU. | + | H265_LOSSLESS | H265 Lossless GPU/Hardware based compression. Average size: 25% of RAW Provides a SSIM/PSNR result (vs RAW) >= 99.9%. Note: Requires a NVIDIA GPU. | + """ + LOSSLESS = enum.auto() + H264 = enum.auto() + H265 = enum.auto() + H264_LOSSLESS = enum.auto() + H265_LOSSLESS = enum.auto() + LAST = enum.auto() + def __str__(self) -> None: + pass + + def __repr__(self) -> None: + pass + + def __lt__(self, other) -> None: + pass + + def __le__(self, other) -> None: + pass + + def __gt__(self, other) -> None: + pass + + def __ge__(self, other) -> None: + pass + + +class MEM(enum.Enum): + """ + Lists available memory type. + .. note:: + The ZED SDK Python wrapper does not support GPU data storage/access. + + + | Enumerator | | + |:---:|:---:| + | CPU | Data will be stored on the CPU (processor side). | + | GPU | Data will be stored on the GPU | + | BOTH | Data will be stored on both the CPU and GPU memory | + """ + CPU = enum.auto() + GPU = enum.auto() + BOTH = enum.auto() + +class COPY_TYPE(enum.Enum): + """ + Lists available copy operation on sl.Mat. + .. note:: + The ZED SDK Python wrapper does not support GPU data storage/access. + + + | Enumerator | | + |:---:|:---:| + | CPU_CPU | Copy data from CPU to CPU. | + | GPU_CPU | Copy data from GPU to CPU. | + | CPU_GPU | Copy data from CPU to GPU. | + | GPU_GPU | Copy data from GPU to GPU. | + """ + CPU_CPU = enum.auto() + GPU_CPU = enum.auto() + CPU_GPU = enum.auto() + GPU_GPU = enum.auto() + +class MAT_TYPE(enum.Enum): + """ + Lists available sl.Mat formats. + .. note:: + sl.Mat type depends on image or measure type. + + .. note:: + For the dependencies, see sl.VIEW and sl.MEASURE. + + + | Enumerator | | + |:---:|:---:| + | F32_C1 | 1-channel matrix of float | + | F32_C2 | 2-channel matrix of float | + | F32_C3 | 3-channel matrix of float | + | F32_C4 | 4-channel matrix of float | + | U8_C1 | 1-channel matrix of unsigned char | + | U8_C2 | 2-channel matrix of unsigned char | + | U8_C3 | 3-channel matrix of unsigned char | + | U8_C4 | 4-channel matrix of unsigned char | + | U16_C1 | 1-channel matrix of unsigned short | + | S8_C4 | 4-channel matrix of signed char | + """ + F32_C1 = enum.auto() + F32_C2 = enum.auto() + F32_C3 = enum.auto() + F32_C4 = enum.auto() + U8_C1 = enum.auto() + U8_C2 = enum.auto() + U8_C3 = enum.auto() + U8_C4 = enum.auto() + U16_C1 = enum.auto() + S8_C4 = enum.auto() + +class SENSOR_TYPE(enum.Enum): + """ + Lists available sensor types. + .. note:: + Sensors are not available on sl.MODEL.ZED. + + + | Enumerator | | + |:---:|:---:| + | ACCELEROMETER | Three-axis accelerometer sensor to measure the inertial accelerations. | + | GYROSCOPE | Three-axis gyroscope sensor to measure the angular velocities. | + | MAGNETOMETER | Three-axis magnetometer sensor to measure the orientation of the device with respect to the Earth's magnetic field. | + | BAROMETER | Barometer sensor to measure the atmospheric pressure. | + """ + ACCELEROMETER = enum.auto() + GYROSCOPE = enum.auto() + MAGNETOMETER = enum.auto() + BAROMETER = enum.auto() + +class SENSORS_UNIT(enum.Enum): + """ + Lists available measurement units of onboard sensors. + .. note:: + Sensors are not available on sl.MODEL.ZED. + + + | Enumerator | | + |:---:|:---:| + | M_SEC_2 | m/s² (acceleration) | + | DEG_SEC | deg/s (angular velocity) | + | U_T | μT (magnetic field) | + | HPA | hPa (atmospheric pressure) | + | CELSIUS | °C (temperature) | + | HERTZ | Hz (frequency) | + """ + M_SEC_2 = enum.auto() + DEG_SEC = enum.auto() + U_T = enum.auto() + HPA = enum.auto() + CELSIUS = enum.auto() + HERTZ = enum.auto() + +class MODULE(enum.Enum): + """ + Lists available module + + + | MODULE | Description | + |:---:|:---:| + | ALL | All modules | + | DEPTH | For the depth module (includes all 'measures' in retrieveMeasure) | + | POSITIONAL_TRACKING | For the positional tracking module | + | OBJECT_DETECTION | For the object detection module | + | BODY_TRACKING | For the body tracking module | + | SPATIAL_MAPPING | For the spatial mapping module | + """ + ALL = enum.auto() + DEPTH = enum.auto() + POSITIONAL_TRACKING = enum.auto() + OBJECT_DETECTION = enum.auto() + BODY_TRACKING = enum.auto() + SPATIAL_MAPPING = enum.auto() + LAST = enum.auto() + def __str__(self) -> None: + pass + + def __repr__(self) -> None: + pass + + def __lt__(self, other) -> None: + pass + + def __le__(self, other) -> None: + pass + + def __gt__(self, other) -> None: + pass + + def __ge__(self, other) -> None: + pass + + +class OBJECT_CLASS(enum.Enum): + """ + Lists available object classes. + + + | OBJECT_CLASS | Description | + |:---:|:---:| + | PERSON | For people detection | + | VEHICLE | For vehicle detection (cars, trucks, buses, motorcycles, etc.) | + | BAG | For bag detection (backpack, handbag, suitcase, etc.) | + | ANIMAL | For animal detection (cow, sheep, horse, dog, cat, bird, etc.) | + | ELECTRONICS | For electronic device detection (cellphone, laptop, etc.) | + | FRUIT_VEGETABLE | For fruit and vegetable detection (banana, apple, orange, carrot, etc.) | + | SPORT | For sport-related object detection (sport ball, etc.) | + """ + PERSON = enum.auto() + VEHICLE = enum.auto() + BAG = enum.auto() + ANIMAL = enum.auto() + ELECTRONICS = enum.auto() + FRUIT_VEGETABLE = enum.auto() + SPORT = enum.auto() + LAST = enum.auto() + def __str__(self) -> None: + pass + + def __repr__(self) -> None: + pass + + def __lt__(self, other) -> None: + pass + + def __le__(self, other) -> None: + pass + + def __gt__(self, other) -> None: + pass + + def __ge__(self, other) -> None: + pass + + +class OBJECT_SUBCLASS(enum.Enum): + """ + List available object subclasses. + + Given as hint, when using object tracking an object can change of sl.OBJECT_SUBCLASS while keeping the same sl.OBJECT_CLASS + (i.e.: frame n: MOTORBIKE, frame n+1: BICYCLE). + + | OBJECT_SUBCLASS | OBJECT_CLASS | + |:---:|:---:| + | PERSON | PERSON | + | PERSON_HEAD | PERSON | + | BICYCLE | VEHICLE | + | CAR | VEHICLE | + | MOTORBIKE | VEHICLE | + | BUS | VEHICLE | + | TRUCK | VEHICLE | + | BOAT | VEHICLE | + | BACKPACK | BAG | + | HANDBAG | BAG | + | SUITCASE | BAG | + | BIRD | ANIMAL | + | CAT | ANIMAL | + | DOG | ANIMAL | + | HORSE | ANIMAL | + | SHEEP | ANIMAL | + | COW | ANIMAL | + | CELLPHONE | ELECTRONICS | + | LAPTOP | ELECTRONICS | + | BANANA | FRUIT_VEGETABLE | + | APPLE | FRUIT_VEGETABLE | + | ORANGE | FRUIT_VEGETABLE | + | CARROT | FRUIT_VEGETABLE | + | SPORTSBALL | SPORT | + | MACHINERY | VEHICLE | + """ + PERSON = enum.auto() + PERSON_HEAD = enum.auto() + BICYCLE = enum.auto() + CAR = enum.auto() + MOTORBIKE = enum.auto() + BUS = enum.auto() + TRUCK = enum.auto() + BOAT = enum.auto() + BACKPACK = enum.auto() + HANDBAG = enum.auto() + SUITCASE = enum.auto() + BIRD = enum.auto() + CAT = enum.auto() + DOG = enum.auto() + HORSE = enum.auto() + SHEEP = enum.auto() + COW = enum.auto() + CELLPHONE = enum.auto() + LAPTOP = enum.auto() + BANANA = enum.auto() + APPLE = enum.auto() + ORANGE = enum.auto() + CARROT = enum.auto() + SPORTSBALL = enum.auto() + MACHINERY = enum.auto() + LAST = enum.auto() + def __str__(self) -> None: + pass + + def __repr__(self) -> None: + pass + + def __lt__(self, other) -> None: + pass + + def __le__(self, other) -> None: + pass + + def __gt__(self, other) -> None: + pass + + def __ge__(self, other) -> None: + pass + + +class OBJECT_TRACKING_STATE(enum.Enum): + """ + Lists the different states of object tracking. + + + | Enumerator | | + |:---:|:---:| + | OFF | The tracking is not yet initialized. The object id is not usable. | + | OK | The object is tracked. | + | SEARCHING | The object could not be detected in the image and is potentially occluded. The trajectory is estimated. | + | TERMINATE | This is the last searching state of the track. The track will be deleted in the next sl.Camera.retrieve_objects(). | + """ + OFF = enum.auto() + OK = enum.auto() + SEARCHING = enum.auto() + TERMINATE = enum.auto() + LAST = enum.auto() + def __str__(self) -> None: + pass + + def __repr__(self) -> None: + pass + + def __lt__(self, other) -> None: + pass + + def __le__(self, other) -> None: + pass + + def __gt__(self, other) -> None: + pass + + def __ge__(self, other) -> None: + pass + + +class FLIP_MODE(enum.Enum): + """ + Lists possible flip modes of the camera. + + + | Enumerator | | + |:---:|:---:| + | OFF | No flip applied. Default behavior. | + | ON | Images and camera sensors' data are flipped useful when your camera is mounted upside down. | + | AUTO | In LIVE mode, use the camera orientation (if an IMU is available) to set the flip mode. In SVO mode, read the state of this enum when recorded. | + """ + OFF = enum.auto() + ON = enum.auto() + AUTO = enum.auto() + def __str__(self) -> None: + pass + + def __repr__(self) -> None: + pass + + def __lt__(self, other) -> None: + pass + + def __le__(self, other) -> None: + pass + + def __gt__(self, other) -> None: + pass + + def __ge__(self, other) -> None: + pass + + +class OBJECT_ACTION_STATE(enum.Enum): + """ + Lists the different states of an object's actions. + + + | Enumerator | | + |:---:|:---:| + | IDLE | The object is staying static. | + | MOVING | The object is moving. | + """ + IDLE = enum.auto() + MOVING = enum.auto() + LAST = enum.auto() + def __str__(self) -> None: + pass + + def __repr__(self) -> None: + pass + + def __lt__(self, other) -> None: + pass + + def __le__(self, other) -> None: + pass + + def __gt__(self, other) -> None: + pass + + def __ge__(self, other) -> None: + pass + + +class ObjectData: + """ + Class containing data of a detected object such as its bounding_box, label, id and its 3D position. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @head_position.setter + def head_position(self, head_position: Any) -> None: + pass + + @property + def confidence(self) -> float: + """ + Detection confidence value of the object. + From 0 to 100, a low value means the object might not be localized perfectly or the label (sl.OBJECT_CLASS) is uncertain. + """ + return float() + + @confidence.setter + def confidence(self, confidence: Any) -> None: + pass + + @property + def unique_object_id(self) -> str: + """ + Unique id to help identify and track AI detections. + It can be either generated externally, or by using generate_unique_id() or left empty. + """ + return str() + + @unique_object_id.setter + def unique_object_id(self, unique_object_id: Any) -> None: + pass + + @position.setter + def position(self, position: Any) -> None: + pass + + @property + def tracking_state(self) -> OBJECT_TRACKING_STATE: + """ + Object tracking state. + """ + return OBJECT_TRACKING_STATE() + + @tracking_state.setter + def tracking_state(self, tracking_state: Any) -> None: + pass + + @velocity.setter + def velocity(self, velocity: Any) -> None: + pass + + @property + def action_state(self) -> OBJECT_ACTION_STATE: + """ + Object action state. + """ + return OBJECT_ACTION_STATE() + + @action_state.setter + def action_state(self, action_state: Any) -> None: + pass + + @property + def id(self) -> int: + """ + Object identification number. + It is used as a reference when tracking the object through the frames. + .. note:: + Only available if sl.ObjectDetectionParameters.enable_tracking is activated. + + .. note:: + Otherwise, it will be set to -1. + """ + return int() + + @id.setter + def id(self, id: Any) -> None: + pass + + @property + def sublabel(self) -> OBJECT_SUBCLASS: + """ + Object sub-class/sub-category to identify the object type. + """ + return OBJECT_SUBCLASS() + + @sublabel.setter + def sublabel(self, sublabel: Any) -> None: + pass + + @property + def mask(self) -> Mat: + """ + Mask defining which pixels which belong to the object (in bounding_box_2d and set to 255) and those of the background (set to 0). + .. warning:: The mask information is only available for tracked objects (sl.OBJECT_TRACKING_STATE.OK) that have a valid depth. + .. warning:: Otherwise, the mask will not be initialized (```mask.is_init() == False```). + """ + return Mat() + + @mask.setter + def mask(self, mask: Any) -> None: + pass + + @position_covariance.setter + def position_covariance(self, position_covariance: Any) -> None: + pass + + @property + def raw_label(self) -> int: + """ + Object raw label. + It is forwarded from sl.CustomBoxObjectData when using sl.OBJECT_DETECTION_MODEL.CUSTOM_BOX_OBJECTS. + """ + return int() + + @raw_label.setter + def raw_label(self, raw_label: Any) -> None: + pass + + @dimensions.setter + def dimensions(self, dimensions: Any) -> None: + pass + + @bounding_box_2d.setter + def bounding_box_2d(self, bounding_box_2d: Any) -> None: + pass + + @bounding_box.setter + def bounding_box(self, bounding_box: Any) -> None: + pass + + @property + def label(self) -> OBJECT_CLASS: + """ + Object class/category to identify the object type. + """ + return OBJECT_CLASS() + + @label.setter + def label(self, label: Any) -> None: + pass + + def position(self) -> np.array[float]: + """ + Object 3D centroid. + .. note:: + It is defined in sl.InitParameters.coordinate_units and expressed in sl.RuntimeParameters.measure3D_reference_frame. + """ + return np.array[float]() + + def velocity(self) -> np.array[float]: + """ + Object 3D velocity. + .. note:: + It is defined in ```sl.InitParameters.coordinate_units / s``` and expressed in sl.RuntimeParameters.measure3D_reference_frame. + """ + return np.array[float]() + + def bounding_box(self) -> np.array[float][float]: + """ + 3D bounding box of the object represented as eight 3D points. + .. note:: + It is defined in sl.InitParameters.coordinate_units and expressed in sl.RuntimeParameters.measure3D_reference_frame. + + .. code-block:: text + + 1 ------ 2 + / /| + 0 ------ 3 | + | Object | 6 + | |/ + 4 ------ 7 + """ + return np.array[float][float]() + + def bounding_box_2d(self) -> np.array[int][int]: + """ + 2D bounding box of the object represented as four 2D points starting at the top left corner and rotation clockwise. + .. note:: + Expressed in pixels on the original image resolution, ```[0, 0]``` is the top left corner. + + .. code-block:: text + + A ------ B + | Object | + D ------ C + """ + return np.array[int][int]() + + def dimensions(self) -> np.array[float]: + """ + 3D object dimensions: width, height, length. + .. note:: + It is defined in sl.InitParameters.coordinate_units and expressed in sl.RuntimeParameters.measure3D_reference_frame. + """ + return np.array[float]() + + def head_bounding_box(self) -> np.array[float][float]: + """ + 3D bounding box of the head of the object (a person) represented as eight 3D points. + .. note:: + It is defined in sl.InitParameters.coordinate_units and expressed in sl.RuntimeParameters.measure3D_reference_frame. + + .. warning:: Not available with sl.OBJECT_DETECTION_MODEL.MULTI_CLASS_BOX_XXX. + """ + return np.array[float][float]() + + def head_bounding_box_2d(self) -> np.array[int][int]: + """ + 2D bounding box of the head of the object (a person) represented as four 2D points starting at the top left corner and rotation clockwise. + .. note:: + Expressed in pixels on the original image resolution, ```[0, 0]``` is the top left corner. + + .. warning:: Not available with sl.OBJECT_DETECTION_MODEL.MULTI_CLASS_BOX_XXX. + """ + return np.array[int][int]() + + def head_position(self) -> np.array[float]: + """ + 3D centroid of the head of the object (a person). + .. note:: + It is defined in sl.InitParameters.coordinate_units and expressed in sl.RuntimeParameters.measure3D_reference_frame. + + .. warning:: Not available with sl.OBJECT_DETECTION_MODEL.MULTI_CLASS_BOX_XXX. + """ + return np.array[float]() + + def position_covariance(self) -> np.array[float]: + """ + Covariance matrix of the 3D position. + .. note:: + It is represented by its upper triangular matrix value + + .. code-block:: text + + = [p0, p1, p2] + [p1, p3, p4] + [p2, p4, p5] + + where pi is ```position_covariance[i]``` + """ + return np.array[float]() + + +class BodyData: + """ + Class containing data of a detected body/person such as its bounding_box, id and its 3D position. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @head_position.setter + def head_position(self, head_position: Any) -> None: + pass + + @property + def id(self) -> int: + """ + Body/person identification number. + It is used as a reference when tracking the body through the frames. + .. note:: + Only available if sl.BodyTrackingParameters.enable_tracking is activated. + + .. note:: + Otherwise, it will be set to -1. + """ + return int() + + @id.setter + def id(self, id: Any) -> None: + pass + + @property + def confidence(self) -> float: + """ + Detection confidence value of the body/person. + From 0 to 100, a low value means the body might not be localized perfectly. + """ + return float() + + @confidence.setter + def confidence(self, confidence: Any) -> None: + pass + + @position_covariance.setter + def position_covariance(self, position_covariance: Any) -> None: + pass + + @property + def mask(self) -> Mat: + """ + Mask defining which pixels which belong to the body/person (in bounding_box_2d and set to 255) and those of the background (set to 0). + .. warning:: The mask information is only available for tracked bodies (sl.OBJECT_TRACKING_STATE.OK) that have a valid depth. + .. warning:: Otherwise, the mask will not be initialized (```mask.is_init() == False```). + """ + return Mat() + + @mask.setter + def mask(self, mask: Any) -> None: + pass + + @property + def unique_object_id(self) -> str: + """ + Unique id to help identify and track AI detections. + It can be either generated externally, or by using generate_unique_id() or left empty. + """ + return str() + + @unique_object_id.setter + def unique_object_id(self, unique_object_id: Any) -> None: + pass + + @position.setter + def position(self, position: Any) -> None: + pass + + @property + def tracking_state(self) -> OBJECT_TRACKING_STATE: + """ + Body/person tracking state. + """ + return OBJECT_TRACKING_STATE() + + @tracking_state.setter + def tracking_state(self, tracking_state: Any) -> None: + pass + + @velocity.setter + def velocity(self, velocity: Any) -> None: + pass + + @keypoints_covariance.setter + def keypoints_covariance(self, keypoints_covariance: Any) -> None: + pass + + @bounding_box_2d.setter + def bounding_box_2d(self, bounding_box_2d: Any) -> None: + pass + + @dimensions.setter + def dimensions(self, dimensions: Any) -> None: + pass + + @property + def action_state(self) -> OBJECT_ACTION_STATE: + """ + Body/person action state. + """ + return OBJECT_ACTION_STATE() + + @action_state.setter + def action_state(self, action_state: Any) -> None: + pass + + @bounding_box.setter + def bounding_box(self, bounding_box: Any) -> None: + pass + + def position(self) -> np.array[float]: + """ + Body/person 3D centroid. + .. note:: + It is defined in sl.InitParameters.coordinate_units and expressed in sl.RuntimeParameters.measure3D_reference_frame. + """ + return np.array[float]() + + def velocity(self) -> np.array[float]: + """ + Body/person 3D velocity. + .. note:: + It is defined in ```sl.InitParameters.coordinate_units / s``` and expressed in sl.RuntimeParameters.measure3D_reference_frame. + """ + return np.array[float]() + + def bounding_box(self) -> np.array[float][float]: + """ + 3D bounding box of the body/person represented as eight 3D points. + .. note:: + It is defined in sl.InitParameters.coordinate_units and expressed in sl.RuntimeParameters.measure3D_reference_frame. + + .. code-block:: text + + 1 ------ 2 + / /| + 0 ------ 3 | + | Object | 6 + | |/ + 4 ------ 7 + """ + return np.array[float][float]() + + def bounding_box_2d(self) -> np.array[int][int]: + """ + 2D bounding box of the body/person represented as four 2D points starting at the top left corner and rotation clockwise. + .. note:: + Expressed in pixels on the original image resolution, ```[0, 0]``` is the top left corner. + + .. code-block:: text + + A ------ B + | Object | + D ------ C + """ + return np.array[int][int]() + + def keypoints_covariance(self) -> np.array[float][float]: + """ + NumPy array of detection covariance for each keypoint. + .. warning:: In some cases, eg. body partially out of the image or missing depth data, some keypoints can not be detected. Their covariances will be 0. + """ + return np.array[float][float]() + + def position_covariance(self) -> np.array[float]: + """ + Covariance matrix of the 3D position. + .. note:: + It is represented by its upper triangular matrix value + + .. code-block:: text + + = [p0, p1, p2] + [p1, p3, p4] + [p2, p4, p5] + + where pi is ```position_covariance[i]``` + """ + return np.array[float]() + + def dimensions(self) -> np.array[float]: + """ + 3D body/person dimensions: width, height, length. + .. note:: + It is defined in sl.InitParameters.coordinate_units and expressed in sl.RuntimeParameters.measure3D_reference_frame. + """ + return np.array[float]() + + def keypoint(self) -> np.array[float][float]: + """ + Set of useful points representing the human body in 3D. + .. note:: + They are defined in sl.InitParameters.coordinate_units and expressed in sl.RuntimeParameters.measure3D_reference_frame. + + .. warning:: In some cases, eg. body partially out of the image or missing depth data, some keypoints can not be detected. They will have non finite values. + """ + return np.array[float][float]() + + def keypoint_2d(self) -> np.array[int][int]: + """ + Set of useful points representing the human body in 2D. + .. note:: + Expressed in pixels on the original image resolution, ```[0, 0]``` is the top left corner. + + .. warning:: In some cases, eg. body partially out of the image, some keypoints can not be detected. They will have negatives coordinates. + """ + return np.array[int][int]() + + def head_bounding_box(self) -> np.array[float][float]: + """ + 3D bounding box of the head of the body/person represented as eight 3D points. + .. note:: + It is defined in sl.InitParameters.coordinate_units and expressed in sl.RuntimeParameters.measure3D_reference_frame. + """ + return np.array[float][float]() + + def head_bounding_box_2d(self) -> np.array[int][int]: + """ + 2D bounding box of the head of the body/person represented as four 2D points starting at the top left corner and rotation clockwise. + .. note:: + Expressed in pixels on the original image resolution, ```[0, 0]``` is the top left corner. + """ + return np.array[int][int]() + + def head_position(self) -> np.array[float]: + """ + 3D centroid of the head of the body/person. + .. note:: + It is defined in sl.InitParameters.coordinate_units and expressed in sl.RuntimeParameters.measure3D_reference_frame. + """ + return np.array[float]() + + def keypoint_confidence(self) -> np.array[float]: + """ + NumPy array of detection confidences for each keypoint. + .. note:: + They can not be lower than the sl.BodyTrackingRuntimeParameters.detection_confidence_threshold. + + .. warning:: In some cases, eg. body partially out of the image or missing depth data, some keypoints can not be detected. They will have non finite values. + """ + return np.array[float]() + + def local_position_per_joint(self) -> np.array[float][float]: + """ + NumPy array of local position (position of the child keypoint with respect to its parent expressed in its parent coordinate frame) for each keypoint. + .. note:: + They are expressed in sl.REFERENCE_FRAME.CAMERA or sl.REFERENCE_FRAME.WORLD. + + .. warning:: Not available with sl.BODY_FORMAT.BODY_18. + """ + return np.array[float][float]() + + def local_orientation_per_joint(self) -> np.array[float][float]: + """ + NumPy array of local orientation for each keypoint. + .. note:: + The orientation is represented by a quaternion. + + .. warning:: Not available with sl.BODY_FORMAT.BODY_18. + """ + return np.array[float][float]() + + def global_root_orientation(self) -> np.array[float]: + """ + Global root orientation of the skeleton (NumPy array). + The orientation is also represented by a quaternion. + .. note:: + The global root position is already accessible in keypoint attribute by using the root index of a given sl.BODY_FORMAT. + + .. warning:: Not available with sl.BODY_FORMAT.BODY_18. + """ + return np.array[float]() + + +def generate_unique_id() -> None: + """ + Generate a UUID like unique id to help identify and track AI detections. + """ + pass + +class CustomBoxObjectData: + """ + Class that store externally detected objects. + + The objects can be ingested with sl.Camera.ingest_custom_box_objects() to extract 3D and tracking information over time. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def tracking_max_dist(self) -> float: + """ + Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. + By default, do not discard tracked object based on distance. + Only valid for static object. + """ + return float() + + @tracking_max_dist.setter + def tracking_max_dist(self, tracking_max_dist: Any) -> None: + pass + + @property + def is_grounded(self) -> bool: + """ + Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking. + - true: 2 DoF projected alongside the floor plane. Case for object standing on the ground such as person, vehicle, etc. + \n The projection implies that the objects cannot be superposed on multiple horizontal levels. + - false: 6 DoF (full 3D movements are allowed). + + .. note:: + This parameter cannot be changed for a given object tracking id. + + .. note:: + It is advised to set it by labels to avoid issues. + """ + return bool() + + @is_grounded.setter + def is_grounded(self, is_grounded: Any) -> None: + pass + + @property + def tracking_timeout(self) -> float: + """ + Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time. + By default, let the tracker decide internally based on the internal sub class of the tracked object. + """ + return float() + + @tracking_timeout.setter + def tracking_timeout(self, tracking_timeout: Any) -> None: + pass + + @property + def unique_object_id(self) -> str: + """ + Unique id to help identify and track AI detections. + It can be either generated externally, or by using generate_unique_id() or left empty. + """ + return str() + + @unique_object_id.setter + def unique_object_id(self, unique_object_id: Any) -> None: + pass + + @property + def probability(self) -> float: + """ + Detection confidence value of the object. + .. note:: + The value should be in ```[0-1]```. + + .. note:: + It can be used to improve the object tracking. + """ + return float() + + @probability.setter + def probability(self, probability: Any) -> None: + pass + + @property + def is_static(self) -> bool: + """ + Provide hypothesis about the object staticity to improve the object tracking. + - true: the object will be assumed to never move nor being moved. + - false: the object will be assumed to be able to move or being moved. + """ + return bool() + + @is_static.setter + def is_static(self, is_static: Any) -> None: + pass + + @bounding_box_2d.setter + def bounding_box_2d(self, bounding_box_2d: Any) -> None: + pass + + @property + def label(self) -> int: + """ + Object label. + This information is passed-through and can be used to improve object tracking. + .. note:: + It should define an object class. This means that any similar object (in classification) should share the same label number. + """ + return int() + + @label.setter + def label(self, label: Any) -> None: + pass + + def bounding_box_2d(self) -> np.array[int][int]: + """ + 2D bounding box of the object represented as four 2D points starting at the top left corner and rotation clockwise. + .. note:: + Expressed in pixels on the original image resolution, ```[0, 0]``` is the top left corner. + + .. code-block:: text + + A ------ B + | Object | + D ------ C + """ + return np.array[int][int]() + + +class CustomMaskObjectData: + """ + Class storing externally detected objects. + + The objects can be ingested with sl.Camera.ingest_custom_mask_objects() to extract 3D and tracking information over time. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def tracking_max_dist(self) -> float: + """ + Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. + By default, do not discard tracked object based on distance. + Only valid for static object. + """ + return float() + + @tracking_max_dist.setter + def tracking_max_dist(self, tracking_max_dist: Any) -> None: + pass + + @property + def is_grounded(self) -> bool: + """ + Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking. + - true: 2 DoF projected alongside the floor plane. Case for object standing on the ground such as person, vehicle, etc. + \n The projection implies that the objects cannot be superposed on multiple horizontal levels. + - false: 6 DoF (full 3D movements are allowed). + + .. note:: + This parameter cannot be changed for a given object tracking id. + + .. note:: + It is advised to set it by labels to avoid issues. + """ + return bool() + + @is_grounded.setter + def is_grounded(self, is_grounded: Any) -> None: + pass + + @property + def tracking_timeout(self) -> float: + """ + Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time. + By default, let the tracker decide internally based on the internal sub class of the tracked object. + """ + return float() + + @tracking_timeout.setter + def tracking_timeout(self, tracking_timeout: Any) -> None: + pass + + @property + def unique_object_id(self) -> str: + """ + Unique id to help identify and track AI detections. + It can be either generated externally, or by using generate_unique_id() or left empty. + """ + return str() + + @unique_object_id.setter + def unique_object_id(self, unique_object_id: Any) -> None: + pass + + @property + def probability(self) -> float: + """ + Detection confidence value of the object. + .. note:: + The value should be in ```[0-1]```. + + .. note:: + It can be used to improve the object tracking. + """ + return float() + + @probability.setter + def probability(self, probability: Any) -> None: + pass + + @property + def is_static(self) -> bool: + """ + Provide hypothesis about the object staticity to improve the object tracking. + - true: the object will be assumed to never move nor being moved. + - false: the object will be assumed to be able to move or being moved. + """ + return bool() + + @is_static.setter + def is_static(self, is_static: Any) -> None: + pass + + @bounding_box_2d.setter + def bounding_box_2d(self, bounding_box_2d: Any) -> None: + pass + + @property + def label(self) -> int: + """ + Object label. + This information is passed-through and can be used to improve object tracking. + .. note:: + It should define an object class. This means that any similar object (in classification) should share the same label number. + """ + return int() + + @label.setter + def label(self, label: Any) -> None: + pass + + @property + def box_mask(self) -> Mat: + """ + Mask defining which pixels which belong to the object (in bounding_box_2d and set to 255) and those of the background (set to 0). + """ + return Mat() + + @box_mask.setter + def box_mask(self, box_mask: Any) -> None: + pass + + def bounding_box_2d(self) -> np.array[int][int]: + """ + 2D bounding box of the object represented as four 2D points starting at the top left corner and rotation clockwise. + .. note:: + Expressed in pixels on the original image resolution, ```[0, 0]``` is the top left corner. + + .. code-block:: text + + A ------ B + | Object | + D ------ C + """ + return np.array[int][int]() + + +class BODY_18_PARTS(enum.Enum): + """ + Semantic of human body parts and order of sl.BodyData.keypoint for sl.BODY_FORMAT.BODY_18. + + | BODY_18_PARTS | Keypoint number | + |:---:|:---:| + | NOSE | 0 | + | NECK | 1 | + | RIGHT_SHOULDER | 2 | + | RIGHT_ELBOW | 3 | + | RIGHT_WRIST | 4 | + | LEFT_SHOULDER | 5 | + | LEFT_ELBOW | 6 | + | LEFT_WRIST | 7 | + | RIGHT_HIP | 8 | + | RIGHT_KNEE | 9 | + | RIGHT_ANKLE | 10 | + | LEFT_HIP | 11 | + | LEFT_KNEE | 12 | + | LEFT_ANKLE | 13 | + | RIGHT_EYE | 14 | + | LEFT_EYE | 15 | + | RIGHT_EAR | 16 | + | LEFT_EAR | 17 | + """ + NOSE = enum.auto() + NECK = enum.auto() + RIGHT_SHOULDER = enum.auto() + RIGHT_ELBOW = enum.auto() + RIGHT_WRIST = enum.auto() + LEFT_SHOULDER = enum.auto() + LEFT_ELBOW = enum.auto() + LEFT_WRIST = enum.auto() + RIGHT_HIP = enum.auto() + RIGHT_KNEE = enum.auto() + RIGHT_ANKLE = enum.auto() + LEFT_HIP = enum.auto() + LEFT_KNEE = enum.auto() + LEFT_ANKLE = enum.auto() + RIGHT_EYE = enum.auto() + LEFT_EYE = enum.auto() + RIGHT_EAR = enum.auto() + LEFT_EAR = enum.auto() + LAST = enum.auto() + +class BODY_34_PARTS(enum.Enum): + """ + Semantic of human body parts and order of sl.BodyData.keypoint for sl.BODY_FORMAT.BODY_34. + + | BODY_34_PARTS | Keypoint number | + |:---:|:---:| + | PELVIS | 0 | + | NAVAL_SPINE | 1 | + | CHEST_SPINE | 2 | + | NECK | 3 | + | LEFT_CLAVICLE | 4 | + | LEFT_SHOULDER | 5 | + | LEFT_ELBOW | 6 | + | LEFT_WRIST | 7 | + | LEFT_HAND | 8 | + | LEFT_HANDTIP | 9 | + | LEFT_THUMB | 10 | + | RIGHT_CLAVICLE | 11 | + | RIGHT_SHOULDER | 12 | + | RIGHT_ELBOW | 13 | + | RIGHT_WRIST | 14 | + | RIGHT_HAND | 15 | + | RIGHT_HANDTIP | 16 | + | RIGHT_THUMB | 17 | + | LEFT_HIP | 18 | + | LEFT_KNEE | 19 | + | LEFT_ANKLE | 20 | + | LEFT_FOOT | 21 | + | RIGHT_HIP | 22 | + | RIGHT_KNEE | 23 | + | RIGHT_ANKLE | 24 | + | RIGHT_FOOT | 25 | + | HEAD | 26 | + | NOSE | 27 | + | LEFT_EYE | 28 | + | LEFT_EAR | 29 | + | RIGHT_EYE | 30 | + | RIGHT_EAR | 31 | + | LEFT_HEEL | 32 | + | RIGHT_HEEL | 33 | + """ + PELVIS = enum.auto() + NAVAL_SPINE = enum.auto() + CHEST_SPINE = enum.auto() + NECK = enum.auto() + LEFT_CLAVICLE = enum.auto() + LEFT_SHOULDER = enum.auto() + LEFT_ELBOW = enum.auto() + LEFT_WRIST = enum.auto() + LEFT_HAND = enum.auto() + LEFT_HANDTIP = enum.auto() + LEFT_THUMB = enum.auto() + RIGHT_CLAVICLE = enum.auto() + RIGHT_SHOULDER = enum.auto() + RIGHT_ELBOW = enum.auto() + RIGHT_WRIST = enum.auto() + RIGHT_HAND = enum.auto() + RIGHT_HANDTIP = enum.auto() + RIGHT_THUMB = enum.auto() + LEFT_HIP = enum.auto() + LEFT_KNEE = enum.auto() + LEFT_ANKLE = enum.auto() + LEFT_FOOT = enum.auto() + RIGHT_HIP = enum.auto() + RIGHT_KNEE = enum.auto() + RIGHT_ANKLE = enum.auto() + RIGHT_FOOT = enum.auto() + HEAD = enum.auto() + NOSE = enum.auto() + LEFT_EYE = enum.auto() + LEFT_EAR = enum.auto() + RIGHT_EYE = enum.auto() + RIGHT_EAR = enum.auto() + LEFT_HEEL = enum.auto() + RIGHT_HEEL = enum.auto() + LAST = enum.auto() + +class BODY_38_PARTS(enum.Enum): + """ + Semantic of human body parts and order of sl.BodyData.keypoint for sl.BODY_FORMAT.BODY_38. + + | BODY_38_PARTS | Keypoint number | + |:---:|:---:| + | PELVIS | 0 | + | SPINE_1 | 1 | + | SPINE_2 | 2 | + | SPINE_3 | 3 | + | NECK | 4 | + | NOSE | 5 | + | LEFT_EYE | 6 | + | RIGHT_EYE | 7 | + | LEFT_EAR | 8 | + | RIGHT_EAR | 9 | + | LEFT_CLAVICLE | 10 | + | RIGHT_CLAVICLE | 11 | + | LEFT_SHOULDER | 12 | + | RIGHT_SHOULDER | 13 | + | LEFT_ELBOW | 14 | + | RIGHT_ELBOW | 15 | + | LEFT_WRIST | 16 | + | RIGHT_WRIST | 17 | + | LEFT_HIP | 18 | + | RIGHT_HIP | 19 | + | LEFT_KNEE | 20 | + | RIGHT_KNEE | 21 | + | LEFT_ANKLE | 22 | + | RIGHT_ANKLE | 23 | + | LEFT_BIG_TOE | 24 | + | RIGHT_BIG_TOE | 25 | + | LEFT_SMALL_TOE | 26 | + | RIGHT_SMALL_TOE | 27 | + | LEFT_HEEL | 28 | + | RIGHT_HEEL | 29 | + | LEFT_HAND_THUMB_4 | 30 | + | RIGHT_HAND_THUMB_4 | 31 | + | LEFT_HAND_INDEX_1 | 32 | + | RIGHT_HAND_INDEX_1 | 33 | + | LEFT_HAND_MIDDLE_4 | 34 | + | RIGHT_HAND_MIDDLE_4 | 35 | + | LEFT_HAND_PINKY_1 | 36 | + | RIGHT_HAND_PINKY_1 | 37 | + """ + PELVIS = enum.auto() + SPINE_1 = enum.auto() + SPINE_2 = enum.auto() + SPINE_3 = enum.auto() + NECK = enum.auto() + NOSE = enum.auto() + LEFT_EYE = enum.auto() + RIGHT_EYE = enum.auto() + LEFT_EAR = enum.auto() + RIGHT_EAR = enum.auto() + LEFT_CLAVICLE = enum.auto() + RIGHT_CLAVICLE = enum.auto() + LEFT_SHOULDER = enum.auto() + RIGHT_SHOULDER = enum.auto() + LEFT_ELBOW = enum.auto() + RIGHT_ELBOW = enum.auto() + LEFT_WRIST = enum.auto() + RIGHT_WRIST = enum.auto() + LEFT_HIP = enum.auto() + RIGHT_HIP = enum.auto() + LEFT_KNEE = enum.auto() + RIGHT_KNEE = enum.auto() + LEFT_ANKLE = enum.auto() + RIGHT_ANKLE = enum.auto() + LEFT_BIG_TOE = enum.auto() + RIGHT_BIG_TOE = enum.auto() + LEFT_SMALL_TOE = enum.auto() + RIGHT_SMALL_TOE = enum.auto() + LEFT_HEEL = enum.auto() + RIGHT_HEEL = enum.auto() + LEFT_HAND_THUMB_4 = enum.auto() + RIGHT_HAND_THUMB_4 = enum.auto() + LEFT_HAND_INDEX_1 = enum.auto() + RIGHT_HAND_INDEX_1 = enum.auto() + LEFT_HAND_MIDDLE_4 = enum.auto() + RIGHT_HAND_MIDDLE_4 = enum.auto() + LEFT_HAND_PINKY_1 = enum.auto() + RIGHT_HAND_PINKY_1 = enum.auto() + LAST = enum.auto() + +class INFERENCE_PRECISION(enum.Enum): + """ + Report the actual inference precision used + + | Enumerator | | + |:---:|:---:| + | FP32 | | + | FP16 | | + | INT8 | | + """ + FP32 = enum.auto() + FP16 = enum.auto() + INT8 = enum.auto() + LAST = enum.auto() + +class BODY_FORMAT(enum.Enum): + """ + Lists supported skeleton body models. + + | Enumerator | | + |:---:|:---:| + | BODY_18 | 18-keypoint model Basic body model | + | BODY_34 | 34-keypoint model Note: Requires body fitting enabled. | + | BODY_38 | 38-keypoint model Including simplified face, hands and feet.Note: Early Access | + """ + BODY_18 = enum.auto() + BODY_34 = enum.auto() + BODY_38 = enum.auto() + LAST = enum.auto() + +class BODY_KEYPOINTS_SELECTION(enum.Enum): + """ + Lists supported models for skeleton keypoints selection. + + | Enumerator | | + |:---:|:---:| + | FULL | Full keypoint model | + | UPPER_BODY | Upper body keypoint model Will output only upper body (from hip). | + """ + FULL = enum.auto() + UPPER_BODY = enum.auto() + LAST = enum.auto() + +def get_idx(part: BODY_18_PARTS) -> int: + """ + Return associated index of each sl.BODY_18_PARTS. + """ + return int() + +def get_idx_34(part: BODY_34_PARTS) -> int: + """ + Return associated index of each sl.BODY_34_PARTS. + """ + return int() + +def get_idx_38(part: BODY_38_PARTS) -> int: + """ + Return associated index of each sl.BODY_38_PARTS. + """ + return int() + +class ObjectsBatch: + """ + Class containing batched data of a detected objects from the object detection module. + + This class can be used to store trajectories. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def id(self) -> int: + """ + Id of the batch. + """ + return int() + + @id.setter + def id(self, id: Any) -> None: + pass + + @property + def sublabel(self) -> OBJECT_SUBCLASS: + """ + Objects sub-class/sub-category to identify the object type. + """ + return OBJECT_SUBCLASS() + + @sublabel.setter + def sublabel(self, sublabel: Any) -> None: + pass + + @property + def tracking_state(self) -> OBJECT_TRACKING_STATE: + """ + Objects tracking state. + """ + return OBJECT_TRACKING_STATE() + + @tracking_state.setter + def tracking_state(self, tracking_state: Any) -> None: + pass + + @property + def timestamps(self) -> list[Timestamp]: + """ + List of timestamps for each object. + """ + return list[Timestamp]() + + @property + def label(self) -> OBJECT_CLASS: + """ + Objects class/category to identify the object type. + """ + return OBJECT_CLASS() + + @label.setter + def label(self, label: Any) -> None: + pass + + @property + def action_states(self) -> list[OBJECT_ACTION_STATE]: + """ + List of action states for each object. + """ + return list[OBJECT_ACTION_STATE]() + + def positions(self) -> np.array[float][float]: + """ + NumPy array of positions for each object. + """ + return np.array[float][float]() + + def position_covariances(self) -> np.array[float][float]: + """ + NumPy array of positions' covariances for each object. + """ + return np.array[float][float]() + + def velocities(self) -> np.array[float][float]: + """ + NumPy array of 3D velocities for each object. + """ + return np.array[float][float]() + + def bounding_boxes(self) -> np.array[float][float][float]: + """ + NumPy array of 3D bounding boxes for each object. + .. note:: + They are defined in sl.InitParameters.coordinate_units and expressed in sl.RuntimeParameters.measure3D_reference_frame. + + .. code-block:: text + + 1 ------ 2 + / /| + 0 ------ 3 | + | Object | 6 + | |/ + 4 ------ 7 + """ + return np.array[float][float][float]() + + def bounding_boxes_2d(self) -> np.array[int][int][int]: + """ + NumPy array of 2D bounding boxes for each object. + .. note:: + Expressed in pixels on the original image resolution, ```[0, 0]``` is the top left corner. + + .. code-block:: text + + A ------ B + | Object | + D ------ C + """ + return np.array[int][int][int]() + + def confidences(self) -> np.array[float]: + """ + NumPy array of confidences for each object. + """ + return np.array[float]() + + def head_bounding_boxes_2d(self) -> np.array[int][int][int]: + return np.array[int][int][int]() + + def head_bounding_boxes(self) -> np.array[float][float][float]: + return np.array[float][float][float]() + + def head_positions(self) -> np.array[float][float]: + return np.array[float][float]() + + +class Objects: + """ + Class containing the results of the object detection module. + + The detected objects are listed in object_list. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def object_list(self) -> list[ObjectData]: + """ + List of detected objects. + """ + return list[ObjectData]() + + @object_list.setter + def object_list(self, object_list: Any) -> None: + pass + + @property + def is_new(self) -> bool: + """ + Whether object_list has already been retrieved or not. + Default: False + """ + return bool() + + @is_new.setter + def is_new(self, is_new: Any) -> None: + pass + + @property + def timestamp(self) -> Timestamp: + """ + Timestamp corresponding to the frame acquisition. + This value is especially useful for the async mode to synchronize the data. + """ + return Timestamp() + + @timestamp.setter + def timestamp(self, timestamp: Any) -> None: + pass + + @property + def is_tracked(self) -> bool: + """ + Whether both the object tracking and the world orientation has been setup. + Default: False + """ + return bool() + + @is_tracked.setter + def is_tracked(self, is_tracked: Any) -> None: + pass + + def get_object_data_from_id(self, py_object_data: ObjectData, object_data_id: int) -> bool: + """ + Method that looks for a given object id in the current objects list. + :param py_object_data: sl.ObjectData to fill if the search succeeded. (Direction: out) + :param object_data_id: Id of the sl.ObjectData to search. (Direction: in) + :return: True if found, otherwise False. + """ + return bool() + + +class BodiesBatch: + """ + Class containing batched data of a detected bodies/persons from the body tracking module. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def id(self) -> int: + """ + Id of the batch. + """ + return int() + + @id.setter + def id(self, id: Any) -> None: + pass + + @property + def action_states(self) -> list[OBJECT_ACTION_STATE]: + """ + List of action states for each body/person. + """ + return list[OBJECT_ACTION_STATE]() + + @property + def timestamps(self) -> list[Timestamp]: + """ + List of timestamps for each body/person. + """ + return list[Timestamp]() + + @property + def tracking_state(self) -> OBJECT_TRACKING_STATE: + """ + Bodies/persons tracking state. + """ + return OBJECT_TRACKING_STATE() + + @tracking_state.setter + def tracking_state(self, tracking_state: Any) -> None: + pass + + def positions(self) -> np.array[float][float]: + """ + NumPy array of positions for each body/person. + """ + return np.array[float][float]() + + def position_covariances(self) -> np.array[float][float]: + """ + NumPy array of positions' covariances for each body/person. + """ + return np.array[float][float]() + + def velocities(self) -> np.array[float][float]: + """ + NumPy array of 3D velocities for each body/person. + """ + return np.array[float][float]() + + def bounding_boxes(self) -> np.array[float][float][float]: + """ + NumPy array of 3D bounding boxes for each body/person. + .. note:: + They are defined in sl.InitParameters.coordinate_units and expressed in sl.RuntimeParameters.measure3D_reference_frame. + + .. code-block:: text + + 1 ------ 2 + / /| + 0 ------ 3 | + | Object | 6 + | |/ + 4 ------ 7 + """ + return np.array[float][float][float]() + + def bounding_boxes_2d(self) -> np.array[int][int][int]: + """ + NumPy array of 2D bounding boxes for each body/person. + .. note:: + Expressed in pixels on the original image resolution, ```[0, 0]``` is the top left corner. + + .. code-block:: text + + A ------ B + | Object | + D ------ C + """ + return np.array[int][int][int]() + + def confidences(self) -> np.array[float]: + """ + NumPy array of confidences for each body/person. + """ + return np.array[float]() + + def keypoints_2d(self) -> np.array[int][int][int]: + return np.array[int][int][int]() + + def keypoints(self) -> np.array[float][float][float]: + """ + NumPy array of 3D keypoints for each body/person. + """ + return np.array[float][float][float]() + + def head_bounding_boxes_2d(self) -> np.array[int][int][int]: + """ + NumPy array of 3D keypoints for each body/person. + """ + return np.array[int][int][int]() + + def head_bounding_boxes(self) -> np.array[float][float][float]: + """ + NumPy array of 3D keypoints for each body/person. + """ + return np.array[float][float][float]() + + def head_positions(self) -> np.array[float][float]: + """ + NumPy array of 3D keypoints for each body/person. + """ + return np.array[float][float]() + + def keypoint_confidences(self) -> np.array[float][float]: + """ + NumPy array of detection confidences NumPy array for each keypoint for each body/person. + """ + return np.array[float][float]() + + +class Bodies: + """ + Class containing the results of the body tracking module. + + The detected bodies/persons are listed in body_list. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def body_list(self) -> list[BodyData]: + """ + List of detected bodies/persons. + """ + return list[BodyData]() + + @body_list.setter + def body_list(self, body_list: Any) -> None: + pass + + @property + def is_tracked(self) -> bool: + """ + Whether both the body tracking and the world orientation has been setup. + Default: False + """ + return bool() + + @is_tracked.setter + def is_tracked(self, is_tracked: Any) -> None: + pass + + @property + def inference_precision_mode(self) -> INFERENCE_PRECISION: + """ + Status of the actual inference precision mode used to detect the bodies/persons. + .. note:: + It depends on the GPU hardware support, the sl.BodyTrackingParameters.allow_reduced_precision_inference input parameter and the model support. + """ + return INFERENCE_PRECISION() + + @inference_precision_mode.setter + def inference_precision_mode(self, inference_precision_mode: Any) -> None: + pass + + @property + def is_new(self) -> bool: + """ + Whether object_list has already been retrieved or not. + Default: False + """ + return bool() + + @is_new.setter + def is_new(self, is_new: Any) -> None: + pass + + @property + def body_format(self) -> BODY_FORMAT: + """ + Body format used in sl.BodyTrackingParameters.body_format parameter. + """ + return BODY_FORMAT() + + @body_format.setter + def body_format(self, body_format: Any) -> None: + pass + + @property + def timestamp(self) -> Timestamp: + """ + Timestamp corresponding to the frame acquisition. + This value is especially useful for the async mode to synchronize the data. + """ + return Timestamp() + + @timestamp.setter + def timestamp(self, timestamp: Any) -> None: + pass + + def get_body_data_from_id(self, py_body_data: BodyData, body_data_id: int) -> bool: + """ + Method that looks for a given body id in the current bodies list. + :param py_body_data: sl.BodyData to fill if the search succeeded. (Direction: out) + :param body_data_id: Id of the sl.BodyData to search. (Direction: in) + :return: True if found, otherwise False. + """ + return bool() + + +class BatchParameters: + """ + Class containing a set of parameters for batch object detection. + + The default constructor sets all parameters to their default settings. + .. note:: + Parameters can be adjusted by the user. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def latency(self) -> float: + """ + Trajectories will be output in batch with the desired latency in seconds. + During this waiting time, re-identification of objects is done in the background. + .. note:: + Specifying a short latency will limit the search (falling in timeout) for previously seen object ids but will be closer to real time output. + + .. note:: + Specifying a long latency will reduce the change of timeout in re-identification but increase difference with live output. + """ + return float() + + @latency.setter + def latency(self, latency: Any) -> None: + pass + + @property + def enable(self) -> bool: + """ + Whether to enable the batch option in the object detection module. + Batch queueing system provides: + - deep-learning based re-identification + - trajectory smoothing and filtering + + Default: False + .. note:: + To activate this option, enable must be set to True. + """ + return bool() + + @enable.setter + def enable(self, enable: Any) -> None: + pass + + @property + def id_retention_time(self) -> float: + """ + Max retention time in seconds of a detected object. + After this time, the same object will mostly have a different id. + """ + return float() + + @id_retention_time.setter + def id_retention_time(self, id_retention_time: Any) -> None: + pass + + def __dealloc__(self) -> None: + """ + Default constructor. + All the parameters are set to their default values. + param enable : Activates enable + param id_retention_time : Chosen id_retention_time + param batch_duration : Chosen latency + """ + pass + + +class ObjectDetectionParameters: + """ + Class containing a set of parameters for the object detection module. + + The default constructor sets all parameters to their default settings. + .. note:: + Parameters can be adjusted by the user. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def enable_segmentation(self) -> bool: + """ + Whether the object masks will be computed. + Default: False + """ + return bool() + + @enable_segmentation.setter + def enable_segmentation(self, enable_segmentation: Any) -> None: + pass + + @property + def allow_reduced_precision_inference(self) -> bool: + """ + Whether to allow inference to run at a lower precision to improve runtime and memory usage. + It might increase the initial optimization time and could include downloading calibration data or calibration cache and slightly reduce the accuracy. + .. note:: + The fp16 is automatically enabled if the GPU is compatible and provides a speed up of almost x2 and reduce memory usage by almost half, no precision loss. + + .. note:: + This setting allow int8 precision which can speed up by another x2 factor (compared to fp16, or x4 compared to fp32) and half the fp16 memory usage, however some accuracy could be lost. + + .. note:: + The accuracy loss should not exceed 1-2% on the compatible models. + + .. note:: + The current compatible models are all sl.AI_MODELS.HUMAN_BODY_XXXX. + """ + return bool() + + @allow_reduced_precision_inference.setter + def allow_reduced_precision_inference(self, allow_reduced_precision_inference: Any) -> None: + pass + + @property + def filtering_mode(self) -> OBJECT_FILTERING_MODE: + """ + Filtering mode that should be applied to raw detections. + Default: sl.OBJECT_FILTERING_MODE.NMS_3D (same behavior as previous ZED SDK version) + .. note:: + This parameter is only used in detection model sl.OBJECT_DETECTION_MODEL.MULTI_CLASS_BOX_XXX + + and sl.OBJECT_DETECTION_MODEL.CUSTOM_BOX_OBJECTS. + .. note:: + For custom object, it is recommended to use sl.OBJECT_FILTERING_MODE.NMS_3D_PER_CLASS + + or sl.OBJECT_FILTERING_MODE.NONE. + .. note:: + In this case, you might need to add your own NMS filter before ingesting the boxes into the object detection module. + """ + return OBJECT_FILTERING_MODE() + + @filtering_mode.setter + def filtering_mode(self, filtering_mode: Any) -> None: + pass + + @property + def batch_parameters(self) -> BatchParameters: + """ + Batching system parameters. + Batching system (introduced in 3.5) performs short-term re-identification with deep-learning and trajectories filtering. + \n sl.BatchParameters.enable must to be true to use this feature (by default disabled). + """ + return BatchParameters() + + @batch_parameters.setter + def batch_parameters(self, batch_parameters: Any) -> None: + pass + + @property + def instance_module_id(self) -> int: + """ + Id of the module instance. + This is used to identify which object detection module instance is used. + """ + return int() + + @instance_module_id.setter + def instance_module_id(self, instance_module_id: Any) -> None: + pass + + @property + def detection_model(self) -> OBJECT_DETECTION_MODEL: + """ + sl.OBJECT_DETECTION_MODEL to use. + Default: sl.OBJECT_DETECTION_MODEL.MULTI_CLASS_BOX_FAST + """ + return OBJECT_DETECTION_MODEL() + + @detection_model.setter + def detection_model(self, detection_model: Any) -> None: + pass + + @property + def fused_objects_group_name(self) -> str: + """ + In a multi camera setup, specify which group this model belongs to. + + In a multi camera setup, multiple cameras can be used to detect objects and multiple detector having similar output layout can see the same object. + Therefore, Fusion will fuse together the outputs received by multiple detectors only if they are part of the same fused_objects_group_name. + + .. note:: + This parameter is not used when not using a multi-camera setup and must be set in a multi camera setup. + """ + return str() + + @fused_objects_group_name.setter + def fused_objects_group_name(self, fused_objects_group_name: Any) -> None: + pass + + @property + def enable_tracking(self) -> bool: + """ + Whether the object detection system includes object tracking capabilities across a sequence of images. + Default: True + """ + return bool() + + @enable_tracking.setter + def enable_tracking(self, enable_tracking: Any) -> None: + pass + + @property + def custom_onnx_file(self) -> str: + """ + Path to the YOLO-like onnx file for custom object detection ran in the ZED SDK. + + When `detection_model` is OBJECT_DETECTION_MODEL::CUSTOM_YOLOLIKE_BOX_OBJECTS, a onnx model must be passed so that the ZED SDK can optimize it for your GPU and run inference on it. + + The resulting optimized model will be saved for re-use in the future. + + .. attention:: - The model must be a YOLO-like model. + .. attention:: - The caching uses the `custom_onnx_file` string along with your GPU specs to decide whether to use the cached optmized model or to optimize the passed onnx model. + If you want to use a different model (i.e. an onnx with different weights), you must use a different `custom_onnx_file` string or delete the cached optimized model in + /resources. + + .. note:: + This parameter is useless when detection_model is not OBJECT_DETECTION_MODEL::CUSTOM_YOLOLIKE_BOX_OBJECTS. + """ + return str() + + @custom_onnx_file.setter + def custom_onnx_file(self, custom_onnx_file: Any) -> None: + pass + + @property + def max_range(self) -> float: + """ + Upper depth range for detections. + Default: -1 (value set in sl.InitParameters.depth_maximum_distance) + .. note:: + The value cannot be greater than sl.InitParameters.depth_maximum_distance and its unit is defined in sl.InitParameters.coordinate_units. + """ + return float() + + @max_range.setter + def max_range(self, max_range: Any) -> None: + pass + + @property + def prediction_timeout_s(self) -> float: + """ + Prediction duration of the ZED SDK when an object is not detected anymore before switching its state to sl.OBJECT_TRACKING_STATE.SEARCHING. + It prevents the jittering of the object state when there is a short misdetection. + \n The user can define their own prediction time duration. + \n Default: 0.2 + .. note:: + During this time, the object will have sl.OBJECT_TRACKING_STATE.OK state even if it is not detected. + + .. note:: + The duration is expressed in seconds. + + .. warning:: prediction_timeout_s will be clamped to 1 second as the prediction is getting worse with time. + .. warning:: Setting this parameter to 0 disables the ZED SDK predictions. + """ + return float() + + @prediction_timeout_s.setter + def prediction_timeout_s(self, prediction_timeout_s: Any) -> None: + pass + + @property + def custom_onnx_dynamic_input_shape(self) -> Resolution: + """ + Resolution to the YOLO-like onnx file for custom object detection ran in the ZED SDK. This resolution defines the input tensor size for dynamic shape ONNX model only. The batch and channel dimensions are automatically handled, it assumes it's color images like default YOLO models. + + .. note:: + This parameter is only used when detection_model is OBJECT_DETECTION_MODEL::CUSTOM_YOLOLIKE_BOX_OBJECTS and the provided ONNX file is using dynamic shapes. + + .. attention:: - Multiple model only support squared images + + Default: Squared images 512x512 (input tensor will be 1x3x512x512) + """ + return Resolution() + + @custom_onnx_dynamic_input_shape.setter + def custom_onnx_dynamic_input_shape(self, custom_onnx_dynamic_input_shape: Any) -> None: + pass + + def __dealloc__(self) -> None: + """ + Default constructor. + All the parameters are set to their default values. + :param enable_tracking: Activates enable_tracking + :param enable_segmentation: Activates enable_segmentation + :param detection_model: Chosen detection_model + :param max_range: Chosen max_range + :param batch_trajectories_parameters: Chosen batch_parameters + :param filtering_mode: Chosen filtering_mode + :param prediction_timeout_s: Chosen prediction_timeout_s + :param allow_reduced_precision_inference: Activates allow_reduced_precision_inference + :param instance_module_id: Chosen instance_module_id + """ + pass + + +class ObjectDetectionRuntimeParameters: + """ + Class containing a set of runtime parameters for the object detection module. + + The default constructor sets all parameters to their default settings. + .. note:: + Parameters can be adjusted by the user. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def object_class_filter(self) -> list[OBJECT_CLASS]: + """ + Defines which object types to detect and track. + Default: [] (all classes are tracked) + .. note:: + Fewer object types can slightly speed up the process since every object is tracked. + + .. note:: + Will output only the selected classes. + + + In order to get all the available classes, the filter list must be empty : + .. code-block:: text + + object_class_filter = {}; + + + To select a set of specific object classes, like vehicles, persons and animals for instance: + .. code-block:: text + + object_class_filter = {sl.OBJECT_CLASS.VEHICLE, sl.OBJECT_CLASS.PERSON, sl.OBJECT_CLASS.ANIMAL}; + """ + return list[OBJECT_CLASS]() + + @object_class_filter.setter + def object_class_filter(self, object_class_filter: Any) -> None: + pass + + @property + def detection_confidence_threshold(self) -> float: + """ + Confidence threshold. + From 1 to 100, with 1 meaning a low threshold, more uncertain objects and 99 very few but very precise objects. + \n Default: 20 + .. note:: + If the scene contains a lot of objects, increasing the confidence can slightly speed up the process, since every object instance is tracked. + + .. note:: + detection_confidence_threshold is used as a fallback when sl::ObjectDetectionRuntimeParameters.object_class_detection_confidence_threshold is partially set. + """ + return float() + + @detection_confidence_threshold.setter + def detection_confidence_threshold(self, detection_confidence_threshold: Any) -> None: + pass + + @property + def object_class_detection_confidence_threshold(self) -> dict: + """ + Dictonary of confidence thresholds for each class (can be empty for some classes). + .. note:: + sl.ObjectDetectionRuntimeParameters.detection_confidence_threshold will be taken as fallback/default value. + """ + return {} + + @object_class_detection_confidence_threshold.setter + def object_class_detection_confidence_threshold(self, object_class_detection_confidence_threshold: Any) -> None: + pass + + def __dealloc__(self) -> None: + """ + Default constructor. + All the parameters are set to their default values. + :param detection_confidence_threshold: Chosen detection_confidence_threshold + :param object_class_filter: Chosen object_class_filter + :param object_class_detection_confidence_threshold: Chosen object_class_detection_confidence_threshold + """ + pass + + +class CustomObjectDetectionProperties: + """ + Class containing a set of runtime properties of a certain class ID for the object detection module using a custom model. + + The default constructor sets all parameters to their default settings. + .. note:: + Parameters can be adjusted by the user. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def tracking_max_dist(self) -> float: + """ + Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. + + By default, do not discard tracked object based on distance. + Only valid for static object. + """ + return float() + + @tracking_max_dist.setter + def tracking_max_dist(self, tracking_max_dist: Any) -> None: + pass + + @property + def is_grounded(self) -> bool: + """ + Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking. + - true: 2 DoF projected alongside the floor plane. Case for object standing on the ground such as person, vehicle, etc. + The projection implies that the objects cannot be superposed on multiple horizontal levels. + - false: 6 DoF (full 3D movements are allowed). + + .. note:: + This parameter cannot be changed for a given object tracking id. + + .. note:: + It is advised to set it by labels to avoid issues. + """ + return bool() + + @is_grounded.setter + def is_grounded(self, is_grounded: Any) -> None: + pass + + @property + def min_box_height_meters(self) -> float: + """ + Minimum allowed 3D height. + + Any prediction smaller than that will be either discarded (if object is tracked and in SEARCHING state) or clamped. + Default: -1 (no filtering) + """ + return float() + + @min_box_height_meters.setter + def min_box_height_meters(self, min_box_height_meters: Any) -> None: + pass + + @property + def enabled(self) -> bool: + """ + Whether the object object is kept or not + """ + return bool() + + @enabled.setter + def enabled(self, enabled: Any) -> None: + pass + + @property + def tracking_timeout(self) -> float: + """ + Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time. + + By default, let the tracker decide internally based on the internal sub class of the tracked object. + """ + return float() + + @tracking_timeout.setter + def tracking_timeout(self, tracking_timeout: Any) -> None: + pass + + @property + def min_box_width_normalized(self) -> float: + """ + Minimum allowed width normalized to the image size. + + Any prediction smaller than that will be filtered out. + Default: -1 (no filtering) + """ + return float() + + @min_box_width_normalized.setter + def min_box_width_normalized(self, min_box_width_normalized: Any) -> None: + pass + + @property + def object_acceleration_preset(self) -> OBJECT_ACCELERATION_PRESET: + """ + Preset defining the expected maximum acceleration of the tracked object. + + Determines how the ZED SDK interprets object acceleration, affecting tracking behavior and predictions. + Default: Default + """ + return OBJECT_ACCELERATION_PRESET() + + @object_acceleration_preset.setter + def object_acceleration_preset(self, object_acceleration_preset: Any) -> None: + pass + + @property + def max_box_height_meters(self) -> float: + """ + Maximum allowed 3D height. + + Any prediction bigger than that will be either discarded (if object is tracked and in SEARCHING state) or clamped. + Default: -1 (no filtering) + """ + return float() + + @max_box_height_meters.setter + def max_box_height_meters(self, max_box_height_meters: Any) -> None: + pass + + @property + def max_allowed_acceleration(self) -> float: + """ + Manually override the acceleration preset. + + If set, this value takes precedence over the selected preset, allowing for a custom maximum acceleration. + Unit is m/s^2. + """ + return float() + + @max_allowed_acceleration.setter + def max_allowed_acceleration(self, max_allowed_acceleration: Any) -> None: + pass + + @property + def max_box_width_normalized(self) -> float: + """ + Maximum allowed width normalized to the image size. + + Any prediction bigger than that will be filtered out. + Default: -1 (no filtering) + """ + return float() + + @max_box_width_normalized.setter + def max_box_width_normalized(self, max_box_width_normalized: Any) -> None: + pass + + @property + def max_box_width_meters(self) -> float: + """ + Maximum allowed 3D width. + + Any prediction bigger than that will be either discarded (if object is tracked and in SEARCHING state) or clamped. + Default: -1 (no filtering) + """ + return float() + + @max_box_width_meters.setter + def max_box_width_meters(self, max_box_width_meters: Any) -> None: + pass + + @property + def is_static(self) -> bool: + """ + Provide hypothesis about the object staticity to improve the object tracking. + - true: the object will be assumed to never move nor being moved. + - false: the object will be assumed to be able to move or being moved. + """ + return bool() + + @is_static.setter + def is_static(self, is_static: Any) -> None: + pass + + @property + def native_mapped_class(self) -> OBJECT_SUBCLASS: + """ + For increased accuracy, the native sl::OBJECT_SUBCLASS mapping, if any. + + Native objects have refined internal parameters for better 3D projection and tracking accuracy. + If one of the custom objects can be mapped to one the native sl::OBJECT_SUBCLASS, this can help to boost the tracking accuracy. + Default: no mapping + """ + return OBJECT_SUBCLASS() + + @native_mapped_class.setter + def native_mapped_class(self, native_mapped_class: Any) -> None: + pass + + @property + def detection_confidence_threshold(self) -> float: + """ + Confidence threshold. + + From 1 to 100, with 1 meaning a low threshold, more uncertain objects and 99 very few but very precise objects. + Default: 20.f + + .. note:: + If the scene contains a lot of objects, increasing the confidence can slightly speed up the process, since every object instance is tracked. + """ + return float() + + @detection_confidence_threshold.setter + def detection_confidence_threshold(self, detection_confidence_threshold: Any) -> None: + pass + + @property + def min_box_width_meters(self) -> float: + """ + Minimum allowed 3D width. + + Any prediction smaller than that will be either discarded (if object is tracked and in SEARCHING state) or clamped. + Default: -1 (no filtering) + """ + return float() + + @min_box_width_meters.setter + def min_box_width_meters(self, min_box_width_meters: Any) -> None: + pass + + @property + def min_box_height_normalized(self) -> float: + """ + Minimum allowed height normalized to the image size. + + Any prediction smaller than that will be filtered out. + Default: -1 (no filtering) + """ + return float() + + @min_box_height_normalized.setter + def min_box_height_normalized(self, min_box_height_normalized: Any) -> None: + pass + + @property + def max_box_height_normalized(self) -> float: + """ + Maximum allowed height normalized to the image size. + + Any prediction bigger than that will be filtered out. + Default: -1 (no filtering) + """ + return float() + + @max_box_height_normalized.setter + def max_box_height_normalized(self, max_box_height_normalized: Any) -> None: + pass + + def __dealloc__(self) -> None: + """ + Default constructor. + All the parameters are set to their default values. + :param detection_confidence_threshold: Chosen detection_confidence_threshold + :param object_class_filter: Chosen object_class_filter + :param object_class_detection_confidence_threshold: Chosen object_class_detection_confidence_threshold + """ + pass + + +class CustomObjectDetectionRuntimeParameters: + """ + Class containing a set of runtime parameters for the object detection module using your own model ran by the SDK. + + The default constructor sets all parameters to their default settings. + .. note:: + Parameters can be adjusted by the user. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def object_detection_properties(self) -> CustomObjectDetectionProperties: + """ + Global object detection properties. + + .. note:: + object_detection_properties is used as a fallback when sl::CustomObjectDetectionRuntimeParameters.object_class_detection_properties is partially set. + """ + return CustomObjectDetectionProperties() + + @object_detection_properties.setter + def object_detection_properties(self, object_detection_properties: Any) -> None: + pass + + @property + def object_class_detection_properties(self) -> dict: + """ + Per class object detection properties. + """ + return {} + + @object_class_detection_properties.setter + def object_class_detection_properties(self, object_class_detection_properties: Any) -> None: + pass + + def __dealloc__(self) -> None: + """ + Default constructor. + """ + pass + + +class BodyTrackingParameters: + """ + Class containing a set of parameters for the body tracking module. + + The default constructor sets all parameters to their default settings. + .. note:: + Parameters can be adjusted by the user. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def enable_segmentation(self) -> bool: + """ + Whether the body/person masks will be computed. + Default: False + """ + return bool() + + @enable_segmentation.setter + def enable_segmentation(self, enable_segmentation: Any) -> None: + pass + + @property + def allow_reduced_precision_inference(self) -> bool: + """ + Whether to allow inference to run at a lower precision to improve runtime and memory usage. + It might increase the initial optimization time and could include downloading calibration data or calibration cache and slightly reduce the accuracy. + .. note:: + The fp16 is automatically enabled if the GPU is compatible and provides a speed up of almost x2 and reduce memory usage by almost half, no precision loss. + + .. note:: + This setting allow int8 precision which can speed up by another x2 factor (compared to fp16, or x4 compared to fp32) and half the fp16 memory usage, however some accuracy could be lost. + + .. note:: + The accuracy loss should not exceed 1-2% on the compatible models. + + .. note:: + The current compatible models are all sl.AI_MODELS.HUMAN_BODY_XXXX. + """ + return bool() + + @allow_reduced_precision_inference.setter + def allow_reduced_precision_inference(self, allow_reduced_precision_inference: Any) -> None: + pass + + @property + def instance_module_id(self) -> int: + """ + Id of the module instance. + This is used to identify which body tracking module instance is used. + """ + return int() + + @instance_module_id.setter + def instance_module_id(self, instance_module_id: Any) -> None: + pass + + @property + def detection_model(self) -> BODY_TRACKING_MODEL: + """ + sl.BODY_TRACKING_MODEL to use. + Default: sl.BODY_TRACKING_MODEL.HUMAN_BODY_ACCURATE + """ + return BODY_TRACKING_MODEL() + + @detection_model.setter + def detection_model(self, detection_model: Any) -> None: + pass + + @property + def enable_body_fitting(self) -> bool: + """ + Whether to apply the body fitting. + Default: False + """ + return bool() + + @enable_body_fitting.setter + def enable_body_fitting(self, enable_body_fitting: Any) -> None: + pass + + @property + def body_format(self) -> BODY_FORMAT: + """ + Body format to be outputted by the ZED SDK with sl.Camera.retrieve_bodies(). + Default: sl.BODY_FORMAT.BODY_18 + """ + return BODY_FORMAT() + + @body_format.setter + def body_format(self, body_format: Any) -> None: + pass + + @property + def enable_tracking(self) -> bool: + """ + Whether the body tracking system includes body/person tracking capabilities across a sequence of images. + Default: True + """ + return bool() + + @enable_tracking.setter + def enable_tracking(self, enable_tracking: Any) -> None: + pass + + @property + def max_range(self) -> float: + """ + Upper depth range for detections. + Default: -1 (value set in sl.InitParameters.depth_maximum_distance) + .. note:: + The value cannot be greater than sl.InitParameters.depth_maximum_distance and its unit is defined in sl.InitParameters.coordinate_units. + """ + return float() + + @max_range.setter + def max_range(self, max_range: Any) -> None: + pass + + @property + def prediction_timeout_s(self) -> float: + """ + Prediction duration of the ZED SDK when an object is not detected anymore before switching its state to sl.OBJECT_TRACKING_STATE.SEARCHING. + It prevents the jittering of the object state when there is a short misdetection. + \n The user can define their own prediction time duration. + \n Default: 0.2 + .. note:: + During this time, the object will have sl.OBJECT_TRACKING_STATE.OK state even if it is not detected. + + .. note:: + The duration is expressed in seconds. + + .. warning:: prediction_timeout_s will be clamped to 1 second as the prediction is getting worse with time. + .. warning:: Setting this parameter to 0 disables the ZED SDK predictions. + """ + return float() + + @prediction_timeout_s.setter + def prediction_timeout_s(self, prediction_timeout_s: Any) -> None: + pass + + @property + def body_selection(self) -> BODY_KEYPOINTS_SELECTION: + """ + Selection of keypoints to be outputted by the ZED SDK with sl.Camera.retrieve_bodies(). + Default: sl.BODY_KEYPOINTS_SELECTION.FULL + """ + return BODY_KEYPOINTS_SELECTION() + + @body_selection.setter + def body_selection(self, body_selection: Any) -> None: + pass + + def __dealloc__(self) -> None: + """ + Default constructor. + All the parameters are set to their default values. + :param enable_tracking: Activates enable_tracking + :param enable_segmentation: Activates enable_segmentation + :param detection_model: Chosen detection_model + :param enable_body_fitting: Activates enable_body_fitting + :param max_range: Chosen max_range + :param body_format: Chosen body_format + :param body_selection: Chosen body_selection + :param prediction_timeout_s: Chosen prediction_timeout_s + :param allow_reduced_precision_inference: Activates allow_reduced_precision_inference + :param instance_module_id: Chosen instance_module_id + """ + pass + + +class BodyTrackingRuntimeParameters: + """ + Class containing a set of runtime parameters for the body tracking module. + + The default constructor sets all parameters to their default settings. + .. note:: + Parameters can be adjusted by the user. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def skeleton_smoothing(self) -> float: + """ + Control of the smoothing of the fitted fused skeleton. + It is ranged from 0 (low smoothing) and 1 (high smoothing). + \n Default: 0 + """ + return float() + + @skeleton_smoothing.setter + def skeleton_smoothing(self, skeleton_smoothing: Any) -> None: + pass + + @property + def detection_confidence_threshold(self) -> float: + """ + Confidence threshold. + From 1 to 100, with 1 meaning a low threshold, more uncertain objects and 99 very few but very precise objects. + \n Default: 20 + .. note:: + If the scene contains a lot of objects, increasing the confidence can slightly speed up the process, since every object instance is tracked. + """ + return float() + + @detection_confidence_threshold.setter + def detection_confidence_threshold(self, detection_confidence_threshold: Any) -> None: + pass + + @property + def minimum_keypoints_threshold(self) -> int: + """ + Minimum threshold for the keypoints. + The ZED SDK will only output the keypoints of the skeletons with threshold greater than this value. + \n Default: 0 + .. note:: + It is useful, for example, to remove unstable fitting results when a skeleton is partially occluded. + """ + return int() + + @minimum_keypoints_threshold.setter + def minimum_keypoints_threshold(self, minimum_keypoints_threshold: Any) -> None: + pass + + def __dealloc__(self) -> None: + """ + Default constructor. + All the parameters are set to their default values. + :param detection_confidence_threshold: Chosen detection_confidence_threshold + :param minimum_keypoints_threshold: Chosen minimum_keypoints_threshold + :param skeleton_smoothing: Chosen skeleton_smoothing + """ + pass + + +class PlaneDetectionParameters: + """ + Class containing a set of parameters for the plane detection functionality. + + The default constructor sets all parameters to their default settings. + .. note:: + Parameters can be adjusted by the user. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def normal_similarity_threshold(self) -> float: + """ + Controls the spread of plane by checking the angle difference. + Default: 15 degrees + """ + return float() + + @normal_similarity_threshold.setter + def normal_similarity_threshold(self, normal_similarity_threshold: Any) -> None: + pass + + @property + def max_distance_threshold(self) -> float: + """ + Controls the spread of plane by checking the position difference. + Default: 0.15 meters + """ + return float() + + @max_distance_threshold.setter + def max_distance_threshold(self, max_distance_threshold: Any) -> None: + pass + + def __dealloc__(self) -> None: + """ + Default constructor. + Values: + - max_distance_threshold : 0.15 meters + - normal_similarity_threshold : 15.0 degrees + """ + pass + + +class RegionOfInterestParameters: + """ + Class containing a set of parameters for the plane detection functionality. + + The default constructor sets all parameters to their default settings. + .. note:: + Parameters can be adjusted by the user. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def image_height_ratio_cutoff(self) -> float: + """ + By default consider only the lower half of the image, can be useful to filter out the sky + Default: 0.5, correspond to the lower half of the image + """ + return float() + + @image_height_ratio_cutoff.setter + def image_height_ratio_cutoff(self, image_height_ratio_cutoff: Any) -> None: + pass + + @property + def auto_apply_module(self) -> set[MODULE]: + """ + Once computed the ROI computed will be automatically applied + Default: Enabled + """ + return set[MODULE]() + + @auto_apply_module.setter + def auto_apply_module(self, auto_apply_module: Any) -> None: + pass + + @property + def depth_far_threshold_meters(self) -> float: + """ + Filtering how far object in the ROI should be considered, this is useful for a vehicle for instance + Default: 2.5 meters + """ + return float() + + @depth_far_threshold_meters.setter + def depth_far_threshold_meters(self, depth_far_threshold_meters: Any) -> None: + pass + + def __dealloc__(self) -> None: + pass + + +def get_current_timestamp() -> Timestamp: + """ + Class containing a set of parameters for the plane detection functionality. + + The default constructor sets all parameters to their default settings. + .. note:: + Parameters can be adjusted by the user. + """ + return Timestamp() + +class Resolution: + """ + Structure containing the width and height of an image. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def height(self) -> int: + """ + Height of the image in pixels. + """ + return int() + + @height.setter + def height(self, height: Any) -> None: + pass + + @property + def width(self) -> int: + """ + Width of the image in pixels. + """ + return int() + + @width.setter + def width(self, width: Any) -> None: + pass + + def area(self) -> int: + """ + Area (width * height) of the image. + """ + return int() + + def __richcmp__(left, right, op) -> None: + pass + + +class Rect: + """ + Class defining a 2D rectangle with top-left corner coordinates and width/height in pixels. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def x(self) -> int: + """ + x coordinates of top-left corner. + """ + return int() + + @x.setter + def x(self, x: Any) -> None: + pass + + @property + def y(self) -> int: + """ + y coordinates of top-left corner. + """ + return int() + + @y.setter + def y(self, y: Any) -> None: + pass + + @property + def height(self) -> int: + """ + Height of the rectangle in pixels. + """ + return int() + + @height.setter + def height(self, height: Any) -> None: + pass + + @property + def width(self) -> int: + """ + Width of the rectangle in pixels. + """ + return int() + + @width.setter + def width(self, width: Any) -> None: + pass + + def area(self) -> int: + """ + Returns the area of the rectangle. + """ + return int() + + def is_empty(self) -> bool: + """ + Tests if the given sl.Rect is empty (width or/and height is null). + """ + return bool() + + def contains(self, target: Rect, proper = False) -> bool: + """ + Tests if this sl.Rect contains the **target** sl.Rect. + :return: True if this rectangle contains the rectangle, otherwise False. + .. note:: + This method only returns true if the target rectangle is entirely inside this rectangle (not on the edge). + """ + return bool() + + def is_contained(self, target: Rect, proper = False) -> bool: + """ + Tests if this sl.Rect is contained inside the given **target** sl.Rect. + :return: True if this rectangle is inside the current **target** sl.Rect, otherwise False. + .. note:: + This method only returns True if this rectangle is entirely inside the rectangle (not on the edge). + """ + return bool() + + def __richcmp__(left, right, op) -> None: + """ + Tests if this sl.Rect is contained inside the given **target** sl.Rect. + :return: True if this rectangle is inside the current **target** sl.Rect, otherwise False. + .. note:: + This method only returns True if this rectangle is entirely inside the rectangle (not on the edge). + """ + pass + + +class CameraParameters: + """ + Class containing the intrinsic parameters of a camera. + That information about the camera will be returned by sl.Camera.get_camera_information(). + .. note:: + Similar to the sl.CalibrationParameters, those parameters are taken from the settings file (SNXXX.conf) and are modified during the sl.Camera.open() call when running a self-calibration). + + .. note:: + Those parameters given after sl.Camera.open() call, represent the camera matrix corresponding to rectified or unrectified images. + + .. note:: + When filled with rectified parameters, fx, fy, cx, cy must be the same for left and right camera once sl.Camera.open() has been called. + + .. note:: + Since distortion is corrected during rectification, distortion should not be considered on rectified images. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def d_fov(self) -> float: + """ + Diagonal field of view, in degrees. + """ + return float() + + @d_fov.setter + def d_fov(self, d_fov: Any) -> None: + pass + + @property + def cy(self) -> float: + """ + Optical center along y axis, defined in pixels (usually close to height / 2). + """ + return float() + + @cy.setter + def cy(self, cy: Any) -> None: + pass + + @property + def image_size(self) -> Resolution: + """ + Size in pixels of the images given by the camera. + """ + return Resolution() + + @image_size.setter + def image_size(self, image_size: Any) -> None: + pass + + @property + def focal_length_metric(self) -> float: + """ + Real focal length in millimeters. + """ + return float() + + @focal_length_metric.setter + def focal_length_metric(self, focal_length_metric: Any) -> None: + pass + + @property + def fy(self) -> float: + """ + Focal length in pixels along y axis. + """ + return float() + + @fy.setter + def fy(self, fy: Any) -> None: + pass + + @property + def v_fov(self) -> float: + """ + Vertical field of view, in degrees. + """ + return float() + + @v_fov.setter + def v_fov(self, v_fov: Any) -> None: + pass + + @property + def fx(self) -> float: + """ + Focal length in pixels along x axis. + """ + return float() + + @fx.setter + def fx(self, fx: Any) -> None: + pass + + @property + def disto(self) -> list[float]: + """ + Distortion factor : [k1, k2, p1, p2, k3, k4, k5, k6, s1, s2, s3, s4]. + + Radial (k1, k2, k3, k4, k5, k6), Tangential (p1,p2) and Prism (s1, s2, s3, s4) distortion. + """ + return list[float]() + + @property + def h_fov(self) -> float: + """ + Horizontal field of view, in degrees. + """ + return float() + + @h_fov.setter + def h_fov(self, h_fov: Any) -> None: + pass + + @property + def cx(self) -> float: + """ + Optical center along x axis, defined in pixels (usually close to width / 2). + """ + return float() + + @cx.setter + def cx(self, cx: Any) -> None: + pass + + def set_disto(self, value1: float, value2: float, value3: float, value4: float, value5: float) -> None: + """ + Sets the elements of the disto array. + :param value1: k1 + :param value2: k2 + :param value3: p1 + :param value4: p2 + :param value5: k3 + """ + pass + + def set_up(self, fx_: float, fy_: float, cx_: float, cy_: float) -> None: + """ + Setups the parameters of a camera. + :param fx_: Horizontal focal length + :param fy_: Vertical focal length + :param cx_: Horizontal optical center + :param cx_: Vertical optical center. + """ + pass + + def scale(self, resolution: Resolution) -> CameraParameters: + """ + Return the sl.CameraParameters for another resolution. + :param resolution: Resolution in which to get the new sl.CameraParameters. + :return: The sl.CameraParameters for the resolution given as input. + """ + return CameraParameters() + + +class CalibrationParameters: + """ + Class containing intrinsic and extrinsic parameters of the camera (translation and rotation). + + That information about the camera will be returned by sl.Camera.get_camera_information(). + .. note:: + The calibration/rectification process, called during sl.Camera.open(), is using the raw parameters defined in the SNXXX.conf file, where XXX is the serial number of the camera. + + .. note:: + Those values may be adjusted or not by the self-calibration to get a proper image alignment. + + .. note:: + After sl.Camera.open() is done (with or without self-calibration activated), most of the stereo parameters (except baseline of course) should be 0 or very close to 0. + + .. note:: + It means that images after rectification process (given by sl.Camera.retrieve_image()) are aligned as if they were taken by a "perfect" stereo camera, defined by the new sl.CalibrationParameters. + + .. warning:: CalibrationParameters are returned in sl.COORDINATE_SYSTEM.IMAGE, they are not impacted by the sl.InitParameters.coordinate_system. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def left_cam(self) -> CameraParameters: + """ + Intrinsic sl.CameraParameters of the left camera. + """ + return CameraParameters() + + @left_cam.setter + def left_cam(self, left_cam: Any) -> None: + pass + + @property + def stereo_transform(self) -> Transform: + """ + Left to right camera transform, expressed in user coordinate system and unit (defined by sl.InitParameters.coordinate_system). + """ + return Transform() + + @property + def right_cam(self) -> CameraParameters: + """ + Intrinsic sl.CameraParameters of the right camera. + """ + return CameraParameters() + + @right_cam.setter + def right_cam(self, right_cam: Any) -> None: + pass + + def set(self) -> None: + pass + + def get_camera_baseline(self) -> float: + """ + Returns the baseline of the camera in the sl.UNIT defined in sl.InitParameters.coordinate_units. + """ + return float() + + +class SensorParameters: + """ + Class containing information about a single sensor available in the current device. + + Information about the camera sensors is available in the sl.CameraInformation struct returned by sl.Camera.get_camera_information(). + .. note:: + This class is meant to be used as a read-only container. + + .. note:: + Editing any of its fields will not impact the ZED SDK. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def sensor_unit(self) -> SENSORS_UNIT: + """ + Unit of the sensor. + """ + return SENSORS_UNIT() + + @property + def random_walk(self) -> float: + """ + Random walk derived from the Allan Variance given as continuous (frequency-independent). + .. note:: + The units will be expressed in ```sensor_unit / √(Hz)```. + + .. note:: + `NAN` if the information is not available. + """ + return float() + + @random_walk.setter + def random_walk(self, random_walk: Any) -> None: + pass + + @property + def noise_density(self) -> float: + """ + White noise density given as continuous (frequency-independent). + .. note:: + The units will be expressed in ```sensor_unit / √(Hz)```. + + .. note:: + `NAN` if the information is not available. + """ + return float() + + @noise_density.setter + def noise_density(self, noise_density: Any) -> None: + pass + + @property + def sensor_type(self) -> SENSOR_TYPE: + """ + Type of the sensor. + """ + return SENSOR_TYPE() + + @property + def sampling_rate(self) -> float: + """ + Sampling rate (or ODR) of the sensor. + """ + return float() + + @sampling_rate.setter + def sampling_rate(self, sampling_rate: Any) -> None: + pass + + @property + def resolution(self) -> float: + """ + Resolution of the sensor. + """ + return float() + + @resolution.setter + def resolution(self, resolution: Any) -> None: + pass + + @property + def is_available(self) -> bool: + """ + Whether the sensor is available in your camera. + """ + return bool() + + def set(self) -> None: + pass + + def sensor_range(self) -> np.array[float]: + """ + Range (NumPy array) of the sensor (minimum: `sensor_range[0]`, maximum: `sensor_range[1]`). + """ + return np.array[float]() + + def set_sensor_range(self, value1: float, value2: float) -> None: + """ + Sets the minimum and the maximum values of the sensor range. + \param float value1 : Minimum of the range to set. + \param float value2 : Maximum of the range to set. + """ + pass + + +class SensorsConfiguration: + """ + Class containing information about all the sensors available in the current device. + + Information about the camera sensors is available in the sl.CameraInformation struct returned by sl.Camera.get_camera_information(). + .. note:: + This class is meant to be used as a read-only container. + + .. note:: + Editing any of its fields will not impact the ZED SDK. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def camera_imu_transform(self) -> Transform: + """ + IMU to left camera transform matrix. + .. note:: + It contains the rotation and translation between the IMU frame and camera frame. + """ + return Transform() + + @property + def barometer_parameters(self) -> SensorParameters: + """ + Configuration of the barometer. + """ + return SensorParameters() + + @property + def magnetometer_parameters(self) -> SensorParameters: + """ + Configuration of the magnetometer. + """ + return SensorParameters() + + @property + def imu_magnetometer_transform(self) -> Transform: + """ + Magnetometer to IMU transform matrix. + .. note:: + It contains rotation and translation between IMU frame and magnetometer frame. + """ + return Transform() + + @property + def firmware_version(self) -> int: + """ + Firmware version of the sensor module. + .. note:: + 0 if no sensors are available (sl.MODEL.ZED). + """ + return int() + + @property + def gyroscope_parameters(self) -> SensorParameters: + """ + Configuration of the gyroscope. + """ + return SensorParameters() + + @property + def accelerometer_parameters(self) -> SensorParameters: + """ + Configuration of the accelerometer. + """ + return SensorParameters() + + def __set_from_camera(self, py_camera, resizer = Resolution(0, 0)) -> None: + pass + + def __set_from_cameraone(self, py_camera, resizer = Resolution(0, 0)) -> None: + pass + + def is_sensor_available(self, sensor_type) -> bool: + """ + Checks if a sensor is available on the device. + :param sensor_type: Sensor type to check. + :return: True if the sensor is available on the device, otherwise False. + """ + return bool() + + +class CameraConfiguration: + """ + Structure containing information about the camera sensor. + + Information about the camera is available in the sl.CameraInformation struct returned by sl.Camera.get_camera_information(). + .. note:: + This object is meant to be used as a read-only container, editing any of its field won't impact the SDK. + + .. warning:: sl.CalibrationParameters are returned in sl.COORDINATE_SYSTEM.IMAGE, they are not impacted by the sl.InitParameters.coordinate_system. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def calibration_parameters_raw(self) -> CalibrationParameters: + """ + Intrinsics and extrinsic stereo parameters for unrectified/distorted images. + """ + return CalibrationParameters() + + @property + def fps(self) -> float: + """ + FPS of the camera. + """ + return float() + + @property + def firmware_version(self) -> int: + """ + Internal firmware version of the camera. + """ + return int() + + @property + def calibration_parameters(self) -> CalibrationParameters: + """ + Intrinsics and extrinsic stereo parameters for rectified/undistorted images. + """ + return CalibrationParameters() + + @property + def resolution(self) -> Resolution: + """ + Resolution of the camera. + """ + return Resolution() + + +class CameraInformation: + """ + Structure containing information of a single camera (serial number, model, calibration, etc.) + That information about the camera will be returned by Camera.get_camera_information() + .. note:: + This object is meant to be used as a read-only container, editing any of its fields won't impact the SDK. + + .. warning:: CalibrationParameters are returned in COORDINATE_SYSTEM.IMAGE , they are not impacted by the InitParameters.coordinate_system + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def camera_model(self) -> MODEL: + """ + Model of the camera (see sl.MODEL). + """ + return MODEL() + + @property + def serial_number(self) -> int: + """ + Serial number of the camera. + """ + return int() + + @property + def camera_configuration(self) -> CameraConfiguration: + """ + Camera configuration parameters stored in a sl.CameraConfiguration. + """ + return CameraConfiguration() + + @property + def sensors_configuration(self) -> SensorsConfiguration: + """ + Sensors configuration parameters stored in a sl.SensorsConfiguration. + """ + return SensorsConfiguration() + + @property + def input_type(self) -> INPUT_TYPE: + """ + Input type used in the ZED SDK. + """ + return INPUT_TYPE() + + +class Mat: + """ + Class representing 1 to 4-channel matrix of float or uchar, stored on CPU and/or GPU side. + + This class is defined in a row-major order, meaning that for an image buffer, the rows are stored consecutively from top to bottom. + .. note:: + The ZED SDK Python wrapper does not support GPU data storage/access. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def verbose(self) -> bool: + """ + Whether the sl.Mat can display information. + """ + return bool() + + @verbose.setter + def verbose(self, verbose: Any) -> None: + pass + + @property + def name(self) -> str: + """ + The name of the sl.Mat (optional). + In verbose mode, it iss used to indicate which sl.Mat is printing information. + \n Default set to "n/a" to avoid empty string if not filled. + """ + return str() + + @name.setter + def name(self, name: Any) -> None: + pass + + @property + def timestamp(self) -> int: + """ + Timestamp of the last manipulation of the data of the matrix by a method/function. + """ + return int() + + @timestamp.setter + def timestamp(self, timestamp: Any) -> None: + pass + + def init_mat_type(self, width, height, mat_type, memory_type = MEM.CPU) -> None: + """ + Initilizes a new sl.Mat and allocates the requested memory by calling alloc_size(). + :param width: Width of the matrix in pixels. Default: 0 + :param height: Height of the matrix in pixels. Default: 0 + :param mat_type: Type of the matrix (sl.MAT_TYPE.F32_C1, sl.MAT_TYPE.U8_C4, etc.).\n Default: sl.MAT_TYPE.F32_C1 + :param memory_type: Where the buffer will be stored. Default: sl.MEM.CPU (you cannot change this default value) + """ + pass + + def init_mat_cpu(self, width: int, height: int, mat_type: MAT_TYPE, ptr, step, memory_type = MEM.CPU) -> None: + """ + Initilizes a new sl.Mat from an existing data pointer. + This method does not allocate the memory. + :param width: Width of the matrix in pixels. + :param height: Height of the matrix in pixels. + :param mat_type: Type of the matrix (sl.MAT_TYPE.F32_C1, sl.MAT_TYPE.U8_C4, etc.).\n Default: sl.MAT_TYPE.F32_C1 + :param ptr: Pointer to the data array. + :param step: Step of the data array (bytes size of one pixel row). + :param memory_type: Where the buffer will be stored. Default: sl.MEM.CPU (you cannot change this default value) + """ + pass + + def init_mat_resolution(self, resolution: Resolution, mat_type: MAT_TYPE, memory_type = MEM.CPU) -> None: + """ + Initilizes a new sl.Mat and allocates the requested memory by calling alloc_size(). + :param resolution: Size of the matrix in pixels. + :param mat_type: Type of the matrix (sl.MAT_TYPE.F32_C1, sl.MAT_TYPE.U8_C4, etc.).\n Default: sl.MAT_TYPE.F32_C1 + :param memory_type: Where the buffer will be stored. Default: sl.MEM.CPU (you cannot change this default value) + """ + pass + + def init_mat_resolution_cpu(self, resolution: Resolution, mat_type, ptr, step, memory_type = MEM.CPU) -> None: + """ + Initilizes a new sl.Mat from an existing data pointer. + This method does not allocate the memory. + :param resolution: the size of the matrix in pixels. + :param mat_type: Type of the matrix (sl.MAT_TYPE.F32_C1, sl.MAT_TYPE.U8_C4, etc.).\n Default: sl.MAT_TYPE.F32_C1 + :param ptr: Pointer to the data array (CPU or GPU). + :param step: Step of the data array (bytes size of one pixel row). + :param memory_type: Where the buffer will be stored. Default: sl.MEM.CPU (you cannot change this default value) + """ + pass + + def init_mat(self, matrix: Mat) -> None: + """ + Initilizes a new sl.Mat by copy (shallow copy). + This method does not allocate the memory. + :param mat: sl.Mat to copy. + """ + pass + + def alloc_size(self, width, height, mat_type, memory_type = MEM.CPU) -> None: + """ + Allocates the sl.Mat memory. + :param width: Width of the matrix in pixels. + :param height: Height of the matrix in pixels. + :param mat_type: Type of the matrix (sl.MAT_TYPE.F32_C1, sl.MAT_TYPE.U8_C4, etc.).\n Default: sl.MAT_TYPE.F32_C1 + :param memory_type: Where the buffer will be stored. Default: sl.MEM.CPU (you cannot change this default value) + + .. warning:: It erases previously allocated memory. + """ + pass + + def alloc_resolution(self, resolution: Resolution, mat_type: MAT_TYPE, memory_type = MEM.CPU) -> None: + """ + Allocates the sl.Mat memory. + :param resolution: Size of the matrix in pixels. + :param mat_type: Type of the matrix (sl.MAT_TYPE.F32_C1, sl.MAT_TYPE.U8_C4, etc.).\n Default: sl.MAT_TYPE.F32_C1 + :param memory_type: Where the buffer will be stored. Default: sl.MEM.CPU (you cannot change this default value) + + .. warning:: It erases previously allocated memory. + """ + pass + + def free(self, memory_type = MEM.CPU) -> None: + """ + Free the owned memory. + :param memory_type: Specifies which memory you wish to free. Default: sl.MEM.CPU (you cannot change this default value) + """ + pass + + def copy_to(self, dst: Mat, cpy_type = COPY_TYPE.CPU_CPU) -> ERROR_CODE: + """ + Copies data to another sl.Mat (deep copy). + + :param dst: sl.Mat where the data will be copied to. + :param cpy_type: Specifies the memory that will be used for the copy. Default: sl.COPY_TYPE.CPU_CPU (you cannot change this default value) + :return: sl.ERROR_CODE.SUCCESS if everything went well, sl.ERROR_CODE.FAILURE otherwise. + + .. note:: + If the destination is not allocated or does not have a compatible sl.MAT_TYPE or sl.Resolution, + + current memory is freed and new memory is directly allocated. + """ + return ERROR_CODE() + + def update_cpu_from_gpu(self) -> ERROR_CODE: + """ + Downloads data from DEVICE (GPU) to HOST (CPU), if possible. + .. note:: + If no CPU or GPU memory are available for this sl::Mat, some are directly allocated. + + .. note:: + If verbose is set to true, you have information in case of failure. + """ + return ERROR_CODE() + + def update_gpu_from_cpu(self) -> ERROR_CODE: + """ + Uploads data from HOST (CPU) to DEVICE (GPU), if possible. + .. note:: + If no CPU or GPU memory are available for this sl::Mat, some are directly allocated. + + .. note:: + If verbose is set to true, you have information in case of failure. + """ + return ERROR_CODE() + + def set_from(self, src: Mat, cpy_type = COPY_TYPE.CPU_CPU) -> ERROR_CODE: + """ + Copies data from an other sl.Mat (deep copy). + :param src: sl.Mat where the data will be copied from. + :param cpy_type: Specifies the memory that will be used for the copy. Default: sl.COPY_TYPE.CPU_CPU (you cannot change this default value) + :return: sl.ERROR_CODE.SUCCESS if everything went well, sl.ERROR_CODE.FAILURE otherwise. + + .. note:: + If the destination is not allocated or does not have a compatible sl.MAT_TYPE or sl.Resolution, + + current memory is freed and new memory is directly allocated. + """ + return ERROR_CODE() + + def read(self, filepath: str) -> ERROR_CODE: + """ + Reads an image from a file (only if sl.MEM.CPU is available on the current sl.Mat). + Supported input files format are PNG and JPEG. + :param filepath: Path of the file to read from (including the name and extension). + :return: sl.ERROR_CODE.SUCCESS if everything went well, sl.ERROR_CODE.FAILURE otherwise. + + .. note:: + Supported sl.MAT_TYPE are : + + - MAT_TYPE.F32_C1 for PNG/PFM/PGM + - MAT_TYPE.F32_C3 for PCD/PLY/VTK/XYZ + - MAT_TYPE.F32_C4 for PCD/PLY/VTK/WYZ + - MAT_TYPE.U8_C1 for PNG/JPG + - MAT_TYPE.U8_C3 for PNG/JPG + - MAT_TYPE.U8_C4 for PNG/JPG + """ + return ERROR_CODE() + + def write(self, filepath: str, memory_type = MEM.CPU, compression_level = -1) -> ERROR_CODE: + """ + Writes the sl.Mat (only if sl.MEM.CPU is available on the current sl.Mat) into a file as an image. + Supported output files format are PNG and JPEG. + :param filepath: Path of the file to write (including the name and extension). + :param memory_type: Memory type of the sl.Mat. Default: sl.MEM.CPU (you cannot change the default value) + :param compression_level: Level of compression between 0 (lowest compression == highest size == highest quality(jpg)) and 100 (highest compression == lowest size == lowest quality(jpg)). + .. note:: + Specific/default value for compression_level = -1 : This will set the default quality for PNG(30) or JPEG(5). + + .. note:: + compression_level is only supported for [U8_Cx] (MAT_TYPE). + + :return: sl.ERROR_CODE.SUCCESS if everything went well, sl.ERROR_CODE.FAILURE otherwise. + + .. note:: + Supported sl.MAT_TYPE are : + + - MAT_TYPE.F32_C1 for PNG/PFM/PGM + - MAT_TYPE.F32_C3 for PCD/PLY/VTK/XYZ + - MAT_TYPE.F32_C4 for PCD/PLY/VTK/WYZ + - MAT_TYPE.U8_C1 for PNG/JPG + - MAT_TYPE.U8_C3 for PNG/JPG + - MAT_TYPE.U8_C4 for PNG/JPG + """ + return ERROR_CODE() + + def set_to(self, value, memory_type = MEM.CPU) -> ERROR_CODE: + """ + Fills the sl.Mat with the given value. + This method overwrites all the matrix. + :param value: Value to be copied all over the matrix. + :param memory_type: Which buffer to fill. Default: sl.MEM.CPU (you cannot change the default value) + """ + return ERROR_CODE() + + def set_value(self, x: int, y: int, value, memory_type = MEM.CPU) -> ERROR_CODE: + """ + Sets a value to a specific point in the matrix. + :param x: Column of the point to change. + :param y: Row of the point to change. + :param value: Value to be set. + :param memory_type: Which memory will be updated. + :return: sl.ERROR_CODE.SUCCESS if everything went well, sl.ERROR_CODE.FAILURE otherwise. + + .. warning:: Not efficient for sl.MEM.GPU, use it on sparse data. + """ + return ERROR_CODE() + + def get_value(self, x: int, y: int, memory_type = MEM.CPU) -> ERROR_CODE: + """ + Returns the value of a specific point in the matrix. + :param x: Column of the point to get the value from. + :param y: Row of the point to get the value from. + :param memory_type: Which memory should be read. + :return: sl.ERROR_CODE.SUCCESS if everything went well, sl.ERROR_CODE.FAILURE otherwise. + + .. warning:: Not efficient for sl.MEM.GPU, use it on sparse data. + """ + return ERROR_CODE() + + def get_width(self) -> int: + """ + Returns the width of the matrix. + :return: Width of the matrix in pixels. + """ + return int() + + def get_height(self) -> int: + """ + Returns the height of the matrix. + :return: Height of the matrix in pixels. + """ + return int() + + def get_resolution(self) -> Resolution: + """ + Returns the resolution (width and height) of the matrix. + :return: Resolution of the matrix in pixels. + """ + return Resolution() + + def get_channels(self) -> int: + """ + Returns the number of values stored in one pixel. + :return: Number of values in a pixel. + """ + return int() + + def get_data_type(self) -> MAT_TYPE: + """ + Returns the format of the matrix. + :return: Format of the current sl.Mat. + """ + return MAT_TYPE() + + def get_memory_type(self) -> MEM: + """ + Returns the type of memory (CPU and/or GPU). + :return: Type of allocated memory. + """ + return MEM() + + def numpy(self, force = False) -> np.array: + """ + Returns the sl.Mat as a NumPy array. + This is for convenience to mimic the [PyTorch API](https://pytorch.org/docs/stable/generated/torch.Tensor.numpy.html). + \n This is like an alias of get_data() method. + :param force: Whether the memory of the sl.Mat need to be duplicated. + :return: NumPy array containing the sl.Mat data. + .. note:: + The fastest is **force at False but the sl.Mat memory must not be released to use the NumPy array. + """ + return np.array() + + def get_data(self, memory_type = MEM.CPU, deep_copy = False) -> np.array: + """ + Cast the data of the sl.Mat in a NumPy array (with or without copy). + :param memory_type: Which memory should be read. If MEM.GPU, you should have CuPy installed. Default: MEM.CPU + :param deep_copy: Whether the memory of the sl.Mat need to be duplicated. + :return: NumPy array containing the sl.Mat data. + .. note:: + The fastest is **deep_copy at False but the sl.Mat memory must not be released to use the NumPy array. + """ + return np.array() + + def get_step_bytes(self, memory_type = MEM.CPU) -> int: + """ + Returns the memory step in bytes (size of one pixel row). + :param memory_type: Specifies whether you want sl.MEM.CPU or sl.MEM.GPU step.\n Default: sl.MEM.CPU (you cannot change the default value) + :return: The step in bytes of the specified memory. + """ + return int() + + def get_step(self, memory_type = MEM.CPU) -> int: + """ + Returns the memory step in number of elements (size in one pixel row). + :param memory_type: Specifies whether you want sl.MEM.CPU or sl.MEM.GPU step.\n Default: sl.MEM.CPU (you cannot change the default value) + :return: The step in number of elements. + """ + return int() + + def get_pixel_bytes(self) -> int: + """ + Returns the size of one pixel in bytes. + :return: Size of a pixel in bytes. + """ + return int() + + def get_width_bytes(self) -> int: + """ + Returns the size of a row in bytes. + :return: Size of a row in bytes. + """ + return int() + + def get_infos(self) -> str: + """ + Returns the information about the sl.Mat into a string. + :return: String containing the sl.Mat information. + """ + return str() + + def is_init(self) -> bool: + """ + Returns whether the sl.Mat is initialized or not. + :return: True if current sl.Mat has been allocated (by the constructor or therefore). + """ + return bool() + + def is_memory_owner(self) -> bool: + """ + Returns whether the sl.Mat is the owner of the memory it accesses. + + If not, the memory won't be freed if the sl.Mat is destroyed. + :return: True if the sl.Mat is owning its memory, else False. + """ + return bool() + + def clone(self, py_mat: Mat) -> ERROR_CODE: + """ + Duplicates a sl.Mat by copy (deep copy). + :param py_mat: sl.Mat to copy. + + This method copies the data array(s) and it marks the new sl.Mat as the memory owner. + """ + return ERROR_CODE() + + def move(self, py_mat: Mat) -> ERROR_CODE: + """ + Moves the data of the sl.Mat to another sl.Mat. + + This method gives the attribute of the current s.Mat to the specified one. (No copy.) + :param py_mat: sl.Mat to move to. + .. note:: + : The current sl.Mat is then no more usable since its loose its attributes. + """ + return ERROR_CODE() + + def convert_color_inplace(self, memory_type = MEM.CPU) -> ERROR_CODE: + """ + Convert the color channels of the Mat (RGB<->BGR or RGBA<->BGRA) + This methods works only on 8U_C4 or 8U_C3 + """ + return ERROR_CODE() + + def convert_color(mat1: Mat, mat2: Mat, swap_RB_channels: bool, remove_alpha_channels: bool, memory_type = MEM.CPU) -> ERROR_CODE: + """ + Convert the color channels of the Mat into another Mat + This methods works only on 8U_C4 if remove_alpha_channels is enabled, or 8U_C4 and 8U_C3 if swap_RB_channels is enabled + The inplace method sl::Mat::convertColor can be used for only swapping the Red and Blue channel efficiently + """ + return ERROR_CODE() + + def swap(mat1: Mat, mat2: Mat) -> None: + """ + Swaps the content of the provided sl::Mat (only swaps the pointers, no data copy). + :param mat1: First matrix to swap. + :param mat2: Second matrix to swap. + """ + pass + + def get_pointer(self, memory_type = MEM.CPU) -> int: + """ + Gets the pointer of the content of the sl.Mat. + :param memory_type: Which memory you want to get. Default: sl.MEM.CPU (you cannot change the default value) + :return: Pointer of the content of the sl.Mat. + """ + return int() + + def __repr__(self) -> None: + pass + + +def blob_from_image(mat1: Mat, mat2: Mat, resolution: Resolution, scale: float, mean: tuple, stdev: tuple, keep_aspect_ratio: bool, swap_RB_channels: bool) -> ERROR_CODE: + """ + Convert an image into a GPU Tensor in planar channel configuration (NCHW), ready to use for deep learning model + :param image_in: input image to convert + :param tensor_out: output GPU tensor + :param resolution_out: resolution of the output image, generally square, although not mandatory + :param scalefactor: Scale factor applied to each pixel value, typically to convert the char value into [0-1] float + :param mean: mean, statistic to normalized the pixel values, applied AFTER the scale. For instance for imagenet statistics the mean would be sl::float3(0.485, 0.456, 0.406) + :param stddev: standard deviation, statistic to normalized the pixel values, applied AFTER the scale. For instance for imagenet statistics the standard deviation would be sl::float3(0.229, 0.224, 0.225) + :param keep_aspect_ratio: indicates if the original width and height ratio should be kept using padding (sometimes refer to as letterboxing) or if the image should be stretched + :param swap_RB_channels: indicates if the Red and Blue channels should be swapped (RGB<->BGR or RGBA<->BGRA) + :return: ERROR_CODE : The error code gives information about the success of the function + + Example usage, for a 416x416 squared RGB image (letterboxed), with a scale factor of 1/255, and using the imagenet statistics for normalization: + .. code-block:: text + + + image = sl.Mat() + blob = sl.Mat() + resolution = sl.Resolution(416,416) + scale = 1.0/255.0 # Scale factor to apply to each pixel value + keep_aspect_ratio = True # Add padding to keep the aspect ratio + swap_RB_channels = True # ZED SDK outputs BGR images, so we need to swap the R and B channels + zed.retrieve_image(image, sl.VIEW.LEFT, type=sl.MEM.GPU) # Get the ZED image (GPU only is more efficient in that case) + err = sl.blob_from_image(image, blob, resolution, scale, (0.485, 0.456, 0.406), (0.229, 0.224, 0.225), keep_aspect_ratio, swap_RB_channels) + # By default the blob is in GPU memory, you can move it to CPU memory if needed + blob.update_cpu_from_gpu() + + """ + return ERROR_CODE() + +def is_camera_one(camera_model: MODEL) -> bool: + """ + Check if the camera is a ZED One (Monocular) or ZED (Stereo) + :param camera_model: The camera model to check + """ + return bool() + +def is_resolution_available(resolution: RESOLUTION, camera_model: MODEL) -> bool: + """ + Check if a resolution is available for a given camera model + :param resolution: Resolution to check + :param camera_model: The camera model to check + """ + return bool() + +def is_FPS_available(fps, resolution: RESOLUTION, camera_model: MODEL) -> bool: + """ + Check if a frame rate is available for a given resolution and camera model + :param fps: Frame rate to check + :param resolution: Resolution to check + :param camera_model: The camera model to check + """ + return bool() + +def is_HDR_available(resolution: RESOLUTION, camera_model: MODEL) -> bool: + """ + Check if a resolution for a given camera model is available for HDR + :param resolution: Resolution to check + :param camera_model: The camera model to check + """ + return bool() + +class Rotation(Matrix3f): + """ + Class representing a rotation for the positional tracking module. + + It inherits from the generic sl.Matrix3f class. + """ + def __init__(self, *args, **kwargs) -> None: ... + + def __dealloc__(self) -> None: + pass + + def init_rotation(self, rot: Rotation) -> None: + """ + Deep copy from another sl.Rotation. + :param rot: sl.Rotation to copy. + """ + pass + + def init_matrix(self, matrix: Matrix3f) -> None: + """ + Initializes the sl.Rotation from a sl.Matrix3f. + :param matrix: sl.Matrix3f to be used. + """ + pass + + def init_orientation(self, orient: Orientation) -> None: + """ + Initializes the sl.Rotation from an sl.Orientation. + :param orient: sl.Orientation to be used. + """ + pass + + def init_angle_translation(self, angle: float, axis: Translation) -> None: + """ + Initializes the sl.Rotation from an angle and an axis. + :param angle: Rotation angle in radian. + :param axis: 3D axis to rotate around. + """ + pass + + def set_orientation(self, py_orientation: Orientation) -> None: + """ + Sets the sl.Rotation from an sl.Orientation. + :param py_orientation: sl.Orientation containing the rotation to set. + """ + pass + + def get_orientation(self) -> Orientation: + """ + Returns the sl.Orientation corresponding to the current sl.Rotation. + :return: Rotation of the current orientation. + """ + return Orientation() + + def get_rotation_vector(self) -> np.array[float]: + """ + Returns the 3x1 rotation vector obtained from 3x3 rotation matrix using Rodrigues formula. + :return: Rotation vector (NumPy array) created from the sl.Orientation values. + """ + return np.array[float]() + + def set_rotation_vector(self, input0: float, input1: float, input2: float) -> None: + """ + Sets the sl.Rotation from a rotation vector (using Rodrigues' transformation). + :param input0: ```rx``` component of the rotation vector. + :param input1: ```ry``` component of the rotation vector. + :param input2: ```rz``` component of the rotation vector. + """ + pass + + def get_euler_angles(self, radian = True) -> np.array[float]: + """ + Converts the sl.Rotation into Euler angles. + :param radian: Whether the angle will be returned in radian or degree. Default: True + :return: Euler angles (NumPy array) created from the sl.Rotation values representing the rotations around the X, Y and Z axes using YZX convention. + """ + return np.array[float]() + + def set_euler_angles(self, input0: float, input1: float, input2: float, radian = True) -> None: + """ + Sets the sl.Rotation from Euler angles. + :param input0: Roll value. + :param input1: Pitch value. + :param input2: Yaw value. + :param radian: Whether the angle is in radian or degree. Default: True + """ + pass + + +class Translation: + """ + Class representing a translation for the positional tracking module. + + sl.Translation is a vector as ```[tx, ty, tz]```. + \n You can access the data with the get() method that returns a NumPy array. + """ + def __init__(self, *args, **kwargs) -> None: ... + + def init_translation(self, tr) -> None: + """ + Deep copy from another sl.Translation. + :param tr: sl.Translation to copy. + """ + pass + + def init_vector(self, t1, t2, t3) -> None: + """ + Initializes the sl.Translation with its components. + :param t1: First component. + :param t2: Second component. + :param t3: Third component. + """ + pass + + def normalize(self) -> None: + """ + Normalizes the current sl.Translation. + """ + pass + + def normalize_translation(self, tr) -> Translation: + """ + Gets the normalized sl.Translation of a given sl.Translation. + :param tr: sl.Translation to be get the normalized translation from. + :return: Another sl.Translation object equal to [**tr.normalize()](normalize). + """ + return Translation() + + def size(self) -> int: + """ + Gets the size of the sl.Translation. + :return: Size of the sl.Translation. + """ + return int() + + def dot_translation(tr1: Translation, tr2) -> float: + """ + Computes the dot product of two sl.Translation objects. + :param tr1: First sl.Translation to get the dot product from. + :param tr2: Sencond sl.Translation to get the dot product from. + :return: Dot product of **tr1 and **tr2. + """ + return float() + + def get(self) -> np.array[float]: + """ + Gets the sl.Translation as an NumPy array. + :return: NumPy array containing the components of the sl.Translation. + """ + return np.array[float]() + + def __mul__(self, other) -> None: + """ + Gets the sl.Translation as an NumPy array. + :return: NumPy array containing the components of the sl.Translation. + """ + pass + + def __repr__(self) -> None: + """ + Gets the sl.Translation as an NumPy array. + :return: NumPy array containing the components of the sl.Translation. + """ + pass + + +class Orientation: + """ + Class representing an orientation/quaternion for the positional tracking module. + + sl.Orientation is a vector defined as ```[ox, oy, oz, ow]```. + """ + def __init__(self, *args, **kwargs) -> None: ... + + def init_orientation(self, orient) -> None: + """ + Deep copy from another sl.Orientation. + :param orient: sl.Orientation to copy. + """ + pass + + def init_vector(self, v0, v1, v2, v3) -> None: + """ + Initializes the sl.Orientation with its components. + :param v0: ox component. + :param v1: oy component. + :param v2: oz component. + :param v3: ow component. + """ + pass + + def init_rotation(self, rotation) -> None: + """ + Initializes the sl.Orientation from an sl.Rotation. + + It converts the sl.Rotation representation to the sl.Orientation one. + :param rotation: sl.Rotation to be used. + """ + pass + + def init_translation(self, tr1, tr2) -> None: + """ + Initializes the sl.Orientation from a vector represented by two sl.Translation. + :param tr1: First point of the vector. + :param tr2: Second point of the vector. + """ + pass + + def set_rotation_matrix(self, py_rotation) -> None: + """ + Sets the rotation component of the current sl.Transform from an sl.Rotation. + :param py_rotation: sl.Rotation to be used. + """ + pass + + def get_rotation_matrix(self) -> Rotation: + """ + Returns the current sl.Orientation as an sl.Rotation. + :return: The rotation computed from the orientation data. + """ + return Rotation() + + def set_identity(self) -> None: + """ + Sets the current sl.Orientation to identity. + """ + pass + + def identity(self, orient = Orientation()) -> Orientation: + """ + Creates an sl.Orientation initialized to identity. + :return: Identity sl.Orientation. + """ + return Orientation() + + def set_zeros(self) -> None: + """ + Fills the current sl.Orientation with zeros. + """ + pass + + def zeros(self, orient = Orientation()) -> Orientation: + """ + Creates an sl.Orientation filled with zeros. + :return: sl.Orientation filled with zeros. + """ + return Orientation() + + def normalize(self) -> None: + """ + Normalizes the current sl.Orientation. + """ + pass + + def normalize_orientation(orient) -> Orientation: + """ + Gets the normalized sl.Orientation of a given sl.Orientation. + :param orient: sl.Orientation to be get the normalized orientation from. + :return: Another sl.Orientation object equal to [**orient.normalize()](normalize). + """ + return Orientation() + + def size(self) -> int: + """ + Gets the size of the sl.Orientation. + :return: Size of the sl.Orientation. + """ + return int() + + def get(self) -> np.array[float]: + """ + Returns a numpy array of the Orientation . + :return: A numpy array of the Orientation . + """ + return np.array[float]() + + def __mul__(self, other) -> None: + """ + Returns a numpy array of the Orientation . + :return: A numpy array of the Orientation . + """ + pass + + def __repr__(self) -> None: + """ + Returns a numpy array of the Orientation . + :return: A numpy array of the Orientation . + """ + pass + + +class Transform(Matrix4f): + """ + Class representing a transformation (translation and rotation) for the positional tracking module. + + It can be used to create any type of Matrix4x4 or sl::Matrix4f that must be specifically used for handling a rotation and position information (OpenGL, Tracking, etc.). + \n It inherits from the generic sl::Matrix4f class. + """ + def __init__(self, *args, **kwargs) -> None: ... + + def __dealloc__(self) -> None: + pass + + def init_transform(self, motion: Transform) -> None: + """ + Deep copy from another sl.Transform. + :param motion: sl.Transform to copy. + """ + pass + + def init_matrix(self, matrix: Matrix4f) -> None: + """ + Initializes the sl.Transform from a sl.Matrix4f. + :param matrix: sl.Matrix4f to be used. + """ + pass + + def init_rotation_translation(self, rot: Rotation, tr: Translation) -> None: + """ + Initializes the sl.Transform from an sl.Rotation and a sl.Translation. + :param rot: sl.Rotation to be used. + :param tr: sl.Translation to be used. + """ + pass + + def init_orientation_translation(self, orient: Orientation, tr: Translation) -> None: + """ + Initializes the sl.Transform from an sl.Orientation and a sl.Translation. + :param orient: Orientation to be used + :param tr: Translation to be used + """ + pass + + def set_rotation_matrix(self, py_rotation: Rotation) -> None: + """ + Sets the rotation component of the current sl.Transform from an sl.Rotation. + :param py_rotation: sl.Rotation to be used. + """ + pass + + def get_rotation_matrix(self) -> Rotation: + """ + Returns the sl.Rotation corresponding to the current sl.Transform. + :return: sl.Rotation created from the sl.Transform values. + .. warning:: The given sl.Rotation contains a copy of the sl.Transform values. + """ + return Rotation() + + def set_translation(self, py_translation: Translation) -> None: + """ + Sets the translation component of the current sl.Transform from an sl.Translation. + :param py_translation: sl.Translation to be used. + """ + pass + + def get_translation(self) -> Translation: + """ + Returns the sl.Translation corresponding to the current sl.Transform. + :return: sl.Translation created from the sl.Transform values. + .. warning:: The given sl.Translation contains a copy of the sl.Transform values. + """ + return Translation() + + def set_orientation(self, py_orientation: Orientation) -> None: + """ + Sets the orientation component of the current sl.Transform from an sl.Orientation. + :param py_orientation: sl.Orientation to be used. + """ + pass + + def get_orientation(self) -> Orientation: + """ + Returns the sl.Orientation corresponding to the current sl.Transform. + :return: sl.Orientation created from the sl.Transform values. + .. warning:: The given sl.Orientation contains a copy of the sl.Transform values. + """ + return Orientation() + + def get_rotation_vector(self) -> np.array[float]: + """ + Returns the 3x1 rotation vector obtained from 3x3 rotation matrix using Rodrigues formula. + :return: Rotation vector (NumPy array) created from the sl.Transform values. + """ + return np.array[float]() + + def set_rotation_vector(self, input0: float, input1: float, input2: float) -> None: + """ + Sets the rotation component of the sl.Transform with a 3x1 rotation vector (using Rodrigues' transformation). + :param input0: ```rx``` component of the rotation vector. + :param input1: ```ry``` component of the rotation vector. + :param input2: ```rz``` component of the rotation vector. + """ + pass + + def get_euler_angles(self, radian = True) -> np.array[float]: + """ + Converts the rotation component of the sl.Transform into Euler angles. + :param radian: Whether the angle will be returned in radian or degree. Default: True + :return: Euler angles (Numpy array) created from the sl.Transform values representing the rotations around the X, Y and Z axes using YZX convention. + """ + return np.array[float]() + + def set_euler_angles(self, input0: float, input1: float, input2: float, radian = True) -> None: + """ + Sets the rotation component of the sl.Transform from Euler angles. + :param input0: Roll value. + :param input1: Pitch value. + :param input2: Yaw value. + :param radian: Whether the angle is in radian or degree. Default: True + """ + pass + + +class MESH_FILE_FORMAT(enum.Enum): + """ + Lists available mesh file formats. + + | Enumerator | | + |:---:|:---:| + | PLY | Contains only vertices and faces. | + | PLY_BIN | Contains only vertices and faces encoded in binary. | + | OBJ | Contains vertices, normals, faces, and texture information (if possible). | + """ + PLY = enum.auto() + PLY_BIN = enum.auto() + OBJ = enum.auto() + LAST = enum.auto() + +class MESH_TEXTURE_FORMAT(enum.Enum): + """ + Lists available mesh texture formats. + + | Enumerator | | + |:---:|:---:| + | RGB | The texture will be on 3 channels. | + | RGBA | The texture will be on 4 channels. | + """ + RGB = enum.auto() + RGBA = enum.auto() + LAST = enum.auto() + +class MESH_FILTER(enum.Enum): + """ + Lists available mesh filtering intensities. + + | Enumerator | | + |:---:|:---:| + | LOW | Clean the mesh by closing small holes and removing isolated faces. | + | MEDIUM | Soft faces decimation and smoothing. | + | HIGH | Drastically reduce the number of faces and apply a soft smooth. | + """ + LOW = enum.auto() + MEDIUM = enum.auto() + HIGH = enum.auto() + +class PLANE_TYPE(enum.Enum): + """ + Lists the available plane types detected based on the orientation. + + + | Enumerator | | + |:---:|:---:| + | HORIZONTAL | Horizontal plane, such as a tabletop, floor, etc. | + | VERTICAL | Vertical plane, such as a wall. | + | UNKNOWN | Unknown plane orientation. | + """ + HORIZONTAL = enum.auto() + VERTICAL = enum.auto() + UNKNOWN = enum.auto() + LAST = enum.auto() + +class MeshFilterParameters: + """ + Class containing a set of parameters for the [mesh filtration](Mesh.filter) functionality. + + The default constructor sets all parameters to their default settings. + .. note:: + Parameters can be adjusted by the user. + """ + def __init__(self, *args, **kwargs) -> None: ... + + def __dealloc__(self) -> None: + pass + + def set(self, filter = MESH_FILTER.LOW) -> None: + """ + Set the filtering intensity. + :param filter: Desired sl.MESH_FILTER. + """ + pass + + def save(self, filename: str) -> bool: + """ + Saves the current set of parameters into a file to be reloaded with the load() method. + :param filename: Name of the file which will be created to store the parameters. + :return: True if the file was successfully saved, otherwise False. + .. warning:: For security reasons, the file must not already exist. + .. warning:: In case a file already exists, the method will return False and existing file will not be updated. + """ + return bool() + + def load(self, filename: str) -> bool: + """ + Loads a set of parameters from the values contained in a previously save() "saved" file. + :param filename: Path to the file from which the parameters will be loaded. + :return: True if the file was successfully loaded, otherwise False. + """ + return bool() + + +class PointCloudChunk: + """ + Class representing a sub-point cloud containing local vertices and colors. + + .. note:: + vertices and normals have the same size. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def has_been_updated(self) -> bool: + """ + Whether the point cloud chunk has been updated by an inner process. + """ + return bool() + + @property + def timestamp(self) -> int: + """ + Timestamp of the latest update. + """ + return int() + + def vertices(self) -> np.array[float]: + """ + NumPy array of vertices. + Vertices are defined by a colored 3D point ```[x, y, z, rgba]```. + """ + return np.array[float]() + + def normals(self) -> np.array[float]: + """ + NumPy array of normals. + Normals are defined by three components ```[nx, ny, nz]```. + .. note:: + A normal is defined for each vertex. + """ + return np.array[float]() + + def barycenter(self) -> np.array[float]: + """ + 3D centroid of the chunk. + """ + return np.array[float]() + + def clear(self) -> None: + """ + Clears all data. + """ + pass + + +class Chunk: + """ + Class representing a sub-mesh containing local vertices and triangles. + + Vertices and normals have the same size and are linked by id stored in triangles. + .. note:: + uv contains data only if your mesh have textures (by loading it or after calling sl.Mesh.apply_texture()). + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def has_been_updated(self) -> bool: + """ + Whether the chunk has been updated by an inner process. + """ + return bool() + + @property + def timestamp(self) -> int: + """ + Timestamp of the latest update. + """ + return int() + + def vertices(self) -> np.array[float]: + """ + NumPy array of vertices. + Vertices are defined by a 3D point ```[x, y, z]```. + """ + return np.array[float]() + + def triangles(self) -> np.array[int]: + """ + NumPy array of triangles/faces. + Triangle defined as a set of three vertices indexes ```[v1, v2, v3]```. + """ + return np.array[int]() + + def normals(self) -> np.array[float]: + """ + NumPy array of normals. + Normals are defined by three components ```[nx, ny, nz]```. + .. note:: + A normal is defined for each vertex. + """ + return np.array[float]() + + def colors(self) -> np.array[int]: + """ + NumPy array of colors. + Colors are defined by three components ```[r, g, b]```. + .. note:: + A color is defined for each vertex. + """ + return np.array[int]() + + def uv(self) -> np.array[float]: + """ + UVs defines the 2D projection of each vertices onto the texture. + Values are normalized [0, 1] and start from the bottom left corner of the texture (as requested by OpenGL). + \n In order to display a textured mesh you need to bind the texture and then draw each triangle by picking its uv values. + .. note:: + Contains data only if your mesh has textures (by loading it or calling sl.Mesh.apply_texture()). + """ + return np.array[float]() + + def barycenter(self) -> np.array[float]: + """ + 3D centroid of the chunk. + """ + return np.array[float]() + + def clear(self) -> None: + """ + Clears all data. + """ + pass + + +class FusedPointCloud: + """ + Class representing a fused point cloud and containing the geometric and color data of the scene captured by the spatial mapping module. + + By default the fused point cloud is defined as a set of point cloud chunks. + \n This way we update only the required data, avoiding a time consuming remapping process every time a small part of the sl.FusedPointCloud cloud is changed. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def chunks(self) -> list[PointCloudChunk]: + """ + List of chunks constituting the sl.FusedPointCloud. + """ + return list[PointCloudChunk]() + + def __dealloc__(self) -> None: + pass + + def __getitem__(self, x) -> PointCloudChunk: + """ + Gets a chunk from chunks. + """ + return PointCloudChunk() + + def vertices(self) -> np.array[float]: + """ + NumPy array of vertices. + Vertices are defined by a colored 3D point ```[x, y, z, rgba]```. + """ + return np.array[float]() + + def normals(self) -> np.array[float]: + """ + NumPy array of normals. + Normals are defined by three components ```[nx, ny, nz]```. + .. note:: + A normal is defined for each vertex. + """ + return np.array[float]() + + def save(self, filename: str, typeMesh = MESH_FILE_FORMAT.OBJ, id = []) -> bool: + """ + Saves the current sl.FusedPointCloud into a file. + :param filename: Path of the file to store the fused point cloud in. + :param typeMesh: File extension type. Default: sl.MESH_FILE_FORMAT.OBJ. + :param id: Set of chunks to be saved. Default: (empty) (all chunks are saved) + :return: True if the file was successfully saved, otherwise False. + + .. note:: + This method operates on the sl.FusedPointCloud not on chunks. + + .. note:: + This way you can save different parts of your sl.FusedPointCloud by updating it with update_from_chunklist(). + """ + return bool() + + def load(self, filename: str, update_chunk_only = False) -> bool: + """ + Loads the fused point cloud from a file. + :param filename: Path of the file to load the fused point cloud from. + :param update_chunk_only: Whether to only load data in chunks (and not vertices / normals).\n Default: False. + :return: True if the mesh was successfully loaded, otherwise False. + + .. note:: + Updating a sl.FusedPointCloud is time consuming. Consider using only chunks for better performances. + """ + return bool() + + def clear(self) -> None: + """ + Clears all the data. + """ + pass + + def update_from_chunklist(self, id = []) -> None: + """ + Updates vertices and normals from chunk data pointed by the given list of id. + :param id: Indices of chunks which will be concatenated. Default: (empty). + .. note:: + If the given list of id is empty, all chunks will be used to update the current sl.FusedPointCloud. + """ + pass + + def get_number_of_points(self) -> int: + """ + Computes the total number of points stored in all chunks. + :return: The number of points stored in all chunks. + """ + return int() + + +class Mesh: + """ + Class representing a mesh and containing the geometric (and optionally texture) data of the scene captured by the spatial mapping module. + + By default the mesh is defined as a set of chunks. + \n This way we update only the data that has to be updated avoiding a time consuming remapping process every time a small part of the sl.Mesh is updated. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def chunks(self) -> list[Chunk]: + """ + List of chunks constituting the sl.Mesh. + """ + return list[Chunk]() + + @property + def texture(self) -> Mat: + """ + Texture of the sl.Mesh. + .. note:: + Contains data only if your mesh has textures (by loading it or calling sl.Mesh.apply_texture()). + """ + return Mat() + + def __dealloc__(self) -> None: + pass + + def __getitem__(self, x) -> Chunk: + """ + Gets a chunk from chunks. + """ + return Chunk() + + def filter(self, params = MeshFilterParameters(), update_chunk_only = False) -> bool: + """ + Filters the mesh. + The resulting mesh is smoothed, small holes are filled, and small blobs of non-connected triangles are deleted. + :param params: Filtering parameters. Default: a preset of sl.MeshFilterParameters. + :param update_chunk_only: Whether to only update chunks (and not vertices / normals / triangles).\n Default: False. + :return: True if the mesh was successfully filtered, otherwise False. + + .. note:: + The filtering is a costly operation. + + .. note:: + It is not recommended to call it every time you retrieve a mesh but only at the end of your spatial mapping process. + """ + return bool() + + def apply_texture(self, texture_format = MESH_TEXTURE_FORMAT.RGB) -> bool: + """ + Applies a texture to the mesh. + By using this method you will get access to uv, and texture. + \n The number of triangles in the mesh may slightly differ before and after calling this method due to missing texture information. + \n There is only one texture for the mesh, the uv of each chunk are expressed for it in its entirety. + \n NumPy arrays of vertices / normals and uv have now the same size. + :param texture_format: Number of channels desired for the computed texture.\n Default: sl.MESH_TEXTURE_FORMAT.RGB. + :return: True if the mesh was successfully textured, otherwise False. + + .. note:: + This method can be called as long as you do not start a new spatial mapping process (due to shared memory). + + .. note:: + This method can require a lot of computation time depending on the number of triangles in the mesh. + + .. note:: + It is recommended to call it once at the end of your spatial mapping process. + + + .. warning:: The sl.SpatialMappingParameters.save_texture parameter must be set to True when enabling the spatial mapping to be able to apply the textures. + .. warning:: The mesh should be filtered before calling this method since filter() will erase the textures. + .. warning:: The texturing is also significantly slower on non-filtered meshes. + """ + return bool() + + def save(self, filename: str, typeMesh = MESH_FILE_FORMAT.OBJ, id = []) -> bool: + """ + Saves the current sl.Mesh into a file. + :param filename: Path of the file to store the mesh in. + :param typeMesh: File extension type. Default: sl.MESH_FILE_FORMAT.OBJ. + :param id: Set of chunks to be saved. Default: (empty) (all chunks are saved) + :return: True if the file was successfully saved, otherwise False. + + .. note:: + Only sl.MESH_FILE_FORMAT.OBJ supports textures data. + + .. note:: + This method operates on the sl.Mesh not on chunks. + + .. note:: + This way you can save different parts of your sl.Mesh by updating it with update_mesh_from_chunklist(). + """ + return bool() + + def load(self, filename: str, update_mesh = False) -> bool: + """ + Loads the mesh from a file. + :param filename: Path of the file to load the mesh from. + :param update_mesh: Whether to only load data in chunks (and not vertices / normals / triangles).\n Default: False. + :return: True if the mesh was successfully loaded, otherwise False. + + .. note:: + Updating a sl::Mesh is time consuming. Consider using only chunks for better performances. + """ + return bool() + + def clear(self) -> None: + """ + Clears all the data. + """ + pass + + def vertices(self) -> np.array[float]: + """ + NumPy array of vertices. + Vertices are defined by a 3D point ```[x, y, z]```. + """ + return np.array[float]() + + def triangles(self) -> np.array[int]: + """ + NumPy array of triangles/faces. + Triangle defined as a set of three vertices indexes ```[v1, v2, v3]```. + """ + return np.array[int]() + + def normals(self) -> np.array[float]: + """ + NumPy array of normals. + Normals are defined by three components ```[nx, ny, nz]```. + .. note:: + A normal is defined for each vertex. + """ + return np.array[float]() + + def colors(self) -> np.array[int]: + """ + NumPy array of colors. + Colors are defined by three components ```[r, g, b]```. + .. note:: + A color is defined for each vertex. + """ + return np.array[int]() + + def uv(self) -> np.array[float]: + """ + UVs defines the 2D projection of each vertices onto the texture. + Values are normalized [0, 1] and start from the bottom left corner of the texture (as requested by OpenGL). + In order to display a textured mesh you need to bind the texture and then draw each triangle by picking its uv values. + .. note:: + Contains data only if your mesh has textures (by loading it or calling sl.Mesh.apply_texture()). + """ + return np.array[float]() + + def get_number_of_triangles(self) -> int: + """ + Computes the total number of triangles stored in all chunks. + :return: The number of triangles stored in all chunks. + """ + return int() + + def get_boundaries(self) -> np.array[int]: + """ + Compute the indices of boundary vertices. + :return: The indices of boundary vertices. + """ + return np.array[int]() + + def merge_chunks(self, faces_per_chunk: int) -> None: + """ + Merges current chunks. + This method can be used to merge chunks into bigger sets to improve rendering process. + :param faces_per_chunk: Number of faces per chunk. + + .. note:: + This method is useful for Unity, which does not handle chunks with more than 65K vertices. + + .. warning:: This method should not be called during spatial mapping process since mesh updates will revert this changes. + """ + pass + + def get_gravity_estimate(self) -> np.array[float]: + """ + Estimates the gravity vector. + This method looks for a dominant plane in the whole mesh considering that it is the floor (or a horizontal plane). + :return: The estimated gravity vector (NumPy array). + + .. note:: + This can be used to find the gravity to create realistic physical interactions. + """ + return np.array[float]() + + def get_visible_list(self, camera_pose: Transform) -> list[int]: + """ + Computes the id list of visible chunks from a specific point of view. + :param camera_pose: Point of view (given in the same reference as the vertices). + :return: The list of id of visible chunks. + """ + return list[int]() + + def get_surrounding_list(self, camera_pose: Transform, radius: float) -> list[int]: + """ + Computes the id list of chunks close to a specific point of view. + :param camera_pose: Point of view (given in the same reference as the vertices). + :param radius: Radius determining closeness (given in the same unit as the mesh). + :return: The list of id of chunks close to the given point. + """ + return list[int]() + + def update_mesh_from_chunklist(self, id = []) -> None: + """ + Updates vertices / normals / triangles / uv from chunk data pointed by the given list of id. + :param id: Indices of chunks which will be concatenated. Default: (empty). + .. note:: + If the given list of id is empty, all chunks will be used to update the current sl.Mesh. + """ + pass + + +class Plane: + """ + Class representing a plane defined by a point and a normal, or a plane equation. + + Other elements can be extracted such as the mesh, the 3D bounds, etc. + .. note:: + The plane measurements are expressed in reference defined by sl.RuntimeParameters.measure3D_reference_frame. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def type(self) -> PLANE_TYPE: + """ + Type of the plane defined by its orientation. + .. note:: + It is deduced from the gravity vector and is therefore not available with on sl.MODEL.ZED. + + .. note:: + sl.MODEL.ZED will give sl.PLANE_TYPE.UNKNOWN for every planes. + """ + return PLANE_TYPE() + + @type.setter + def type(self, type: Any) -> None: + pass + + def get_normal(self) -> np.array[float]: + """ + Gets the plane normal vector. + :return: sl.Plane normalized normal vector (NumPy array). + """ + return np.array[float]() + + def get_center(self) -> np.array[float]: + """ + Gets the plane center point + :return: sl.Plane center point + """ + return np.array[float]() + + def get_pose(self, py_pose = Transform()) -> Transform: + """ + Gets the plane pose relative to the global reference frame. + :param py_pose: sl.Transform to fill (or it creates one by default). + :return: Transformation matrix (rotation and translation) of the plane pose. + .. note:: + Can be used to transform the global reference frame center ```(0, 0, 0)``` to the plane center. + """ + return Transform() + + def get_extents(self) -> np.array[float]: + """ + Gets the width and height of the bounding rectangle around the plane contours. + :return: Width and height of the bounding plane contours (NumPy array). + .. warning:: This value is expressed in the plane reference frame. + """ + return np.array[float]() + + def get_plane_equation(self) -> np.array[float]: + """ + Gets the plane equation. + :return: Plane equation coefficients ```[a, b, c, d]``` (NumPy array). + .. note:: + The plane equation has the following form: ```ax + by + cz = d```. + """ + return np.array[float]() + + def get_bounds(self) -> np.array[float][float]: + """ + Gets the polygon bounds of the plane. + :return: Vector of 3D points forming a polygon bounds corresponding to the current visible limits of the plane (NumPy array). + """ + return np.array[float][float]() + + def extract_mesh(self) -> Mesh: + """ + Compute and return the mesh of the bounds polygon. + :return: sl::Mesh representing the plane delimited by the visible bounds. + """ + return Mesh() + + def get_closest_distance(self, point = [0, 0, 0]) -> float: + """ + Gets the distance between the input point and the projected point alongside the normal vector onto the plane (the closest point on the plane). + :param point: Point to project into the plane. + :return: The Euclidean distance between the input point and the projected point. + """ + return float() + + def clear(self) -> None: + """ + Clears all the data. + """ + pass + + +class MAPPING_RESOLUTION(enum.Enum): + """ + Lists the spatial mapping resolution presets. + + | Enumerator | | + |:---:|:---:| + | HIGH | Creates a detailed geometry. Requires lots of memory. | + | MEDIUM | Small variations in the geometry will disappear. Useful for big objects. | + | LOW | Keeps only huge variations of the geometry. Useful for outdoor purposes. | + """ + HIGH = enum.auto() + MEDIUM = enum.auto() + LOW = enum.auto() + +class MAPPING_RANGE(enum.Enum): + """ + Lists the spatial mapping depth range presets. + + | Enumerator | | + |:---:|:---:| + | SHORT | Only depth close to the camera will be used during spatial mapping. | + | MEDIUM | Medium depth range. | + | LONG | Takes into account objects that are far. Useful for outdoor purposes. | + | AUTO | Depth range will be computed based on current sl.Camera state and parameters. | + """ + SHORT = enum.auto() + MEDIUM = enum.auto() + LONG = enum.auto() + AUTO = enum.auto() + +class SPATIAL_MAP_TYPE(enum.Enum): + """ + Lists the types of spatial maps that can be created. + + | Enumerator | | + |:---:|:---:| + | MESH | The geometry is represented by a set of vertices connected by edges and forming faces. No color information is available. | + | FUSED_POINT_CLOUD | The geometry is represented by a set of 3D colored points. | + """ + MESH = enum.auto() + FUSED_POINT_CLOUD = enum.auto() + +class BUS_TYPE(enum.Enum): + """ + Lists available LIVE input type in the ZED SDK. + + | Enumerator | | + |:---:|:---:| + | USB | USB input mode | + | GMSL | GMSL input mode Note: Only on NVIDIA Jetson. | + | AUTO | Automatically select the input type. Trying first for available USB cameras, then GMSL. | + """ + USB = enum.auto() + GMSL = enum.auto() + AUTO = enum.auto() + LAST = enum.auto() + +def generate_virtual_stereo_serial_number(serial_left, serial_right) -> "unsigned int": + """ + Generate a unique identifier for virtual stereo based on the serial numbers of the two ZED Ones + :param serial_l: Serial number of the left camera. + :param serial_r: Serial number of the right camera. + :return: A unique hash for the given pair of serial numbers, or 0 if an error occurred (e.g: same serial number). + """ + return "unsigned int"() + +class InputType: + """ + Class defining the input type used in the ZED SDK. + It can be used to select a specific camera with an id or serial number, or from a SVO file. + """ + def __init__(self, *args, **kwargs) -> None: ... + + def __dealloc__(self) -> None: + pass + + def set_from_camera_id(self, cam_id, bus_type : BUS_TYPE = BUS_TYPE.AUTO) -> None: + """ + Set the input as the camera with specified id. + + .. note:: + The id is not related to the serial number of the camera. The id is assigned by the OS depending on the order the cameras are plugged. + + .. warning:: Using id is not recommended if you have multiple cameras plugged in the system, prefer using the serial number instead. + + :param id: Id of the camera to open. The default, -1, will open the first available camera. A number >= 0 will try to open the camera with the corresponding id. + :param bus_type: Whether the camera is a USB or a GMSL camera. + """ + pass + + def set_from_serial_number(self, serial_number) -> None: + """ + Set the input as the camera with specified serial number. + :param camera_serial_number: Serial number of the camera to open + """ + pass + + def set_virtual_stereo_from_camera_id(self, id_left, id_right, virtual_serial_number) -> bool: + """ + Set the input as a virtual stereo camera from two cameras with specified ids. + :param id_left: Id of the left camera. + :param id_right: Id of the right camera. + :param virtual_serial_number: Serial number of the virtual stereo camera. + .. note:: + : The virtual serial number must fall within an interval that reflects the Product ID range. + + This is necessary to avoid, for instance, downloading calibration data from an unrelated product. + The valid range is 110000000 to 119999999. + A support function can be used, based on the ZED One serial number, to compute a valid virtual serial number: generate_virtual_stereo_serial_number + :return: False if there's no error and the camera was successfully created, otherwise True. + """ + return bool() + + def set_virtual_stereo_from_serial_numbers(self, camera_left_serial_number, camera_right_serial_number, virtual_serial_number) -> bool: + """ + Set the input as a virtual stereo camera from two cameras with specified serial numbers. + :param camera_left_serial_number: Serial number of the left camera. + :param camera_right_serial_number: Serial number of the right camera. + :param virtual_serial_number: Serial number of the virtual stereo camera. + .. note:: + : The virtual serial number must fall within an interval that reflects the Product ID range. + + This is necessary to avoid, for instance, downloading calibration data from an unrelated product. + The valid range is 110000000 to 119999999. + A support function can be used, based on the ZED One serial number, to compute a valid virtual serial number: generate_virtual_stereo_serial_number + :return: False if there's no error and the camera was successfully created, otherwise True. + """ + return bool() + + def set_from_svo_file(self, svo_input_filename) -> None: + """ + Set the input as the svo specified with the filename + :param svo_input_filename: The path to the desired SVO file + """ + pass + + def set_from_stream(self, sender_ip, port = 30000) -> None: + """ + Set the input to stream with the specified ip and port + :param sender_ip: The IP address of the streaming sender + :param port: The port on which to listen. Default: 30000 + .. note:: + The protocol used for the streaming module is based on RTP/RTCP. + + .. warning:: Port must be even number, since the port+1 is used for control data. + """ + pass + + def get_type(self) -> INPUT_TYPE: + """ + Returns the current input type. + """ + return INPUT_TYPE() + + def get_configuration(self) -> str: + """ + Returns the current input configuration as a string e.g: SVO name, serial number, streaming ip, etc. + """ + return str() + + def is_init(self) -> bool: + """ + Check whether the input is set. + """ + return bool() + + +class InitParameters: + """ + Class containing the options used to initialize the sl.Camera object. + + This class allows you to select multiple parameters for the sl.Camera such as the selected camera, resolution, depth mode, coordinate system, and units of measurement. + \n Once filled with the desired options, it should be passed to the sl.Camera.open() method. + + .. code-block:: text + + + import pyzed.sl as sl + + def main() : + zed = sl.Camera() # Create a ZED camera object + + init_params = sl.InitParameters() # Set initial parameters + init_params.sdk_verbose = 0 # Disable verbose mode + + # Use the camera in LIVE mode + init_params.camera_resolution = sl.RESOLUTION.HD1080 # Use HD1080 video mode + init_params.camera_fps = 30 # Set fps at 30 + + # Or use the camera in SVO (offline) mode + #init_params.set_from_svo_file("xxxx.svo") + + # Or use the camera in STREAM mode + #init_params.set_from_stream("192.168.1.12", 30000) + + # Other parameters are left to their default values + + # Open the camera + err = zed.open(init_params) + if err != sl.ERROR_CODE.SUCCESS: + exit(-1) + + # Close the camera + zed.close() + return 0 + + if __name__ == "__main__" : + main() + + + + With its default values, it opens the camera in live mode at sl.RESOLUTION.HD720 + (or sl.RESOLUTION.HD1200 for the ZED X/X Mini) and sets the depth mode to sl.DEPTH_MODE.NEURAL + \n You can customize it to fit your application. + .. note:: + The parameters can also be saved and reloaded using its save() and load() methods. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def enable_image_enhancement(self) -> bool: + """ + Enable the Enhanced Contrast Technology, to improve image quality. + + Default: True. + + \n If set to true, image enhancement will be activated in camera ISP. Otherwise, the image will not be enhanced by the IPS. + .. note:: + This only works for firmware version starting from 1523 and up. + """ + return bool() + + @enable_image_enhancement.setter + def enable_image_enhancement(self, enable_image_enhancement: Any) -> None: + pass + + @property + def camera_image_flip(self) -> FLIP_MODE: + """ + Defines if a flip of the images is needed. + + If you are using the camera upside down, setting this parameter to sl.FLIP_MODE.ON will cancel its rotation. + \n The images will be horizontally flipped. + \n Default: sl.FLIP_MODE.AUTO + .. note:: + From ZED SDK 3.2 a new sl.FLIP_MODE enum was introduced to add the automatic flip mode detection based on the IMU gravity detection. + + .. note:: + This does not work on sl.MODEL.ZED cameras since they do not have the necessary sensors. + """ + return FLIP_MODE() + + @camera_image_flip.setter + def camera_image_flip(self, camera_image_flip: Any) -> None: + pass + + @property + def maximum_working_resolution(self) -> Resolution: + """ + Set a maximum size for all SDK output, like retrieveImage and retrieveMeasure functions. + + This will override the default (0,0) and instead of outputting native image size sl::Mat, the ZED SDK will take this size as default. + A custom lower size can also be used at runtime, but not bigger. This is used for internal optimization of compute and memory allocations + + The default is similar to previous version with (0,0), meaning native image size + + .. note:: + : if maximum_working_resolution field are lower than 64, it will be interpreted as dividing scale factor; + + - maximum_working_resolution = sl::Resolution(1280, 16) -> 1280 x (image_height/2) = 1280 x half height + - maximum_working_resolution = sl::Resolution(4, 4) -> (image_width/4) x (image_height/4) = quarter size + """ + return Resolution() + + @maximum_working_resolution.setter + def maximum_working_resolution(self, maximum_working_resolution: Any) -> None: + pass + + @property + def sdk_gpu_id(self) -> int: + """ + NVIDIA graphics card id to use. + + By default the SDK will use the most powerful NVIDIA graphics card found. + \n However, when running several applications, or using several cameras at the same time, splitting the load over available GPUs can be useful. + \n This parameter allows you to select the GPU used by the sl.Camera using an ID from 0 to n-1 GPUs in your PC. + \n Default: -1 + .. note:: + A non-positive value will search for all CUDA capable devices and select the most powerful. + """ + return int() + + @sdk_gpu_id.setter + def sdk_gpu_id(self, sdk_gpu_id: Any) -> None: + pass + + @property + def optional_settings_path(self) -> str: + """ + Optional path where the ZED SDK has to search for the settings file (SN.conf file). + + This file contains the calibration information of the camera. + \n Default: "" + + .. note:: + The settings file will be searched in the default directory: + + * **Linux**: /usr/local/zed/settings/ + * **Windows**: C:/ProgramData/stereolabs/settings + + .. note:: + If a path is specified and no file has been found, the ZED SDK will search the settings file in the default directory. + + .. note:: + An automatic download of the settings file (through **ZED Explorer** or the installer) will still download the files on the default path. + + + .. code-block:: text + + init_params = sl.InitParameters() # Set initial parameters + home = "/path/to/home" + path = home + "/Documents/settings/" # assuming /path/to/home/Documents/settings/SNXXXX.conf exists. Otherwise, it will be searched in /usr/local/zed/settings/ + init_params.optional_settings_path = path + """ + return str() + + @optional_settings_path.setter + def optional_settings_path(self, optional_settings_path: Any) -> None: + pass + + @property + def coordinate_system(self) -> COORDINATE_SYSTEM: + """ + sl.COORDINATE_SYSTEM to be used as reference for positional tracking, mesh, point clouds, etc. + + This parameter allows you to select the sl.COORDINATE_SYSTEM used by the sl.Camera object to return its measures. + \n This defines the order and the direction of the axis of the coordinate system. + \n Default: sl.COORDINATE_SYSTEM.IMAGE + """ + return COORDINATE_SYSTEM() + + @coordinate_system.setter + def coordinate_system(self, coordinate_system: Any) -> None: + pass + + @property + def grab_compute_capping_fps(self) -> float: + """ + Define a computation upper limit to the grab frequency. + + This can be useful to get a known constant fixed rate or limit the computation load while keeping a short exposure time by setting a high camera capture framerate. + \n The value should be inferior to the sl.InitParameters.camera_fps and strictly positive. + .. note:: + It has no effect when reading an SVO file. + + + This is an upper limit and won't make a difference if the computation is slower than the desired compute capping FPS. + .. note:: + Internally the sl.Camera.grab() method always tries to get the latest available image while respecting the desired FPS as much as possible. + """ + return float() + + @grab_compute_capping_fps.setter + def grab_compute_capping_fps(self, grab_compute_capping_fps: Any) -> None: + pass + + @property + def async_grab_camera_recovery(self) -> bool: + """ + Define the behavior of the automatic camera recovery during sl.Camera.grab() method call. + + When async is enabled and there's an issue with the communication with the sl.Camera object, + sl.Camera.grab() will exit after a short period and return the sl.ERROR_CODE.CAMERA_REBOOTING warning. + \n The recovery will run in the background until the correct communication is restored. + \n When async_grab_camera_recovery is false, the sl.Camera.grab() method is blocking and will return + only once the camera communication is restored or the timeout is reached. + \n Default: False + """ + return bool() + + @async_grab_camera_recovery.setter + def async_grab_camera_recovery(self, async_grab_camera_recovery: Any) -> None: + pass + + @property + def enable_right_side_measure(self) -> bool: + """ + Enable the measurement computation on the right images. + + By default, the ZED SDK only computes a single depth map, aligned with the left camera image. + \n This parameter allows you to enable sl.MEASURE.DEPTH_RIGHT and other sl.MEASURE.XXX_RIGHT at the cost of additional computation time. + \n For example, mixed reality pass-through applications require one depth map per eye, so this parameter can be activated. + \n Default: False + """ + return bool() + + @enable_right_side_measure.setter + def enable_right_side_measure(self, enable_right_side_measure: Any) -> None: + pass + + @property + def svo_real_time_mode(self) -> bool: + """ + Defines if sl.Camera object return the frame in real time mode. + + When playing back an SVO file, each call to sl.Camera.grab() will extract a new frame and use it. + \n However, it ignores the real capture rate of the images saved in the SVO file. + \n Enabling this parameter will bring the SDK closer to a real simulation when playing back a file by using the images' timestamps. + \n Default: False + .. note:: + sl.Camera.grab() will return an error when trying to play too fast, and frames will be dropped when playing too slowly. + """ + return bool() + + @svo_real_time_mode.setter + def svo_real_time_mode(self, svo_real_time_mode: Any) -> None: + pass + + @property + def sdk_verbose_log_file(self) -> str: + """ + File path to store the ZED SDK logs (if sdk_verbose is enabled). + + The file will be created if it does not exist. + \n Default: "" + + .. note:: + Setting this parameter to any value will redirect all standard output print calls of the entire program. + + .. note:: + This means that your own standard output print calls will be redirected to the log file. + + .. warning:: The log file won't be cleared after successive executions of the application. + .. warning:: This means that it can grow indefinitely if not cleared. + """ + return str() + + @sdk_verbose_log_file.setter + def sdk_verbose_log_file(self, sdk_verbose_log_file: Any) -> None: + pass + + @property + def depth_minimum_distance(self) -> float: + """ + Minimum depth distance to be returned, measured in the sl.UNIT defined in coordinate_units. + + This parameter allows you to specify the minimum depth value (from the camera) that will be computed. + \n Setting this value to any negative or null value will select the default minimum depth distance available for the used ZED Camera (depending on the camera focal length and baseline). + \n Default: -1 + + \n When using deprecated depth modes ( sl.DEPTH_MODE.PERFORMANCE, sl.DEPTH_MODE.QUALITY or sl.DEPTH_MODE.ULTRA), + the default minimum depth distances are given by `this table `_. + + .. note:: + This value cannot be greater than 3 meters. + """ + return float() + + @depth_minimum_distance.setter + def depth_minimum_distance(self, depth_minimum_distance: Any) -> None: + pass + + @property + def coordinate_units(self) -> UNIT: + """ + Unit of spatial data (depth, point cloud, tracking, mesh, etc.) for retrieval. + + Default: sl.UNIT.MILLIMETER + """ + return UNIT() + + @coordinate_units.setter + def coordinate_units(self, coordinate_units: Any) -> None: + pass + + @property + def open_timeout_sec(self) -> float: + """ + Define a timeout in seconds after which an error is reported if the sl.Camera.open() method fails. + + Set to '-1' to try to open the camera endlessly without returning error in case of failure. + \n Set to '0' to return error in case of failure at the first attempt. + \n Default: 5.0 + .. note:: + This parameter only impacts the LIVE mode. + """ + return float() + + @open_timeout_sec.setter + def open_timeout_sec(self, open_timeout_sec: Any) -> None: + pass + + @property + def depth_stabilization(self) -> int: + return int() + + @depth_stabilization.setter + def depth_stabilization(self, depth_stabilization: Any) -> None: + pass + + @property + def depth_mode(self) -> DEPTH_MODE: + """ + sl.DEPTH_MODE to be used. + + The ZED SDK offers several sl.DEPTH_MODE, offering various levels of performance and accuracy. + \n This parameter allows you to set the sl.DEPTH_MODE that best matches your needs. + \n Default: sl.DEPTH_MODE.NEURAL + .. note:: + Available depth mode are listed here: sl.DEPTH_MODE. + """ + return DEPTH_MODE() + + @depth_mode.setter + def depth_mode(self, depth_mode: Any) -> None: + pass + + @property + def depth_maximum_distance(self) -> float: + """ + Maximum depth distance to be returned, measured in the sl.UNIT defined in coordinate_units. + + When estimating the depth, the ZED SDK uses this upper limit to turn higher values into **inf** ones. + \n Changing this value has no impact on performance and doesn't affect the positional tracking nor the spatial mapping. + \n It only change values the depth, point cloud and normals. + \n Setting this value to any negative or null value will select the default maximum depth distance available. + + \n Default: -1 + """ + return float() + + @depth_maximum_distance.setter + def depth_maximum_distance(self, depth_maximum_distance: Any) -> None: + pass + + @property + def enable_image_validity_check(self) -> int: + """ + Enable or disable the image validity verification. + This will perform additional verification on the image to identify corrupted data. This verification is done in the sl.Camera.grab() method and requires some computations. + \n If an issue is found, the sl.Camera.grab() method will output a warning as sl.ERROR_CODE.CORRUPTED_FRAME. + \n This version doesn't detect frame tearing currently. + \n Default: False (disabled) + """ + return int() + + @enable_image_validity_check.setter + def enable_image_validity_check(self, enable_image_validity_check: Any) -> None: + pass + + @property + def async_image_retrieval(self) -> bool: + """ + Enable async image retrieval. + + If set to true will camera image retrieve at a framerate different from grab() application framerate. This is useful for recording SVO or sending camera stream at different rate than application. + \n Default: false + """ + return bool() + + @async_image_retrieval.setter + def async_image_retrieval(self, async_image_retrieval: Any) -> None: + pass + + @property + def sensors_required(self) -> bool: + """ + Requires the successful opening of the motion sensors before opening the camera. + + Default: False. + + .. note:: + If set to false, the ZED SDK will try to **open and use** the IMU (second USB device on USB2.0) and will open the camera successfully even if the sensors failed to open. + + + This can be used for example when using a USB3.0 only extension cable (some fiber extension for example). + .. note:: + This parameter only impacts the LIVE mode. + + .. note:: + If set to true, sl.Camera.open() will fail if the sensors cannot be opened. + + .. note:: + This parameter should be used when the IMU data must be available, such as object detection module or when the gravity is needed. + + + \nNote: This setting is not taken into account for sl.MODEL.ZED camera since it does not include sensors. + """ + return bool() + + @sensors_required.setter + def sensors_required(self, sensors_required: Any) -> None: + pass + + @property + def camera_fps(self) -> int: + """ + Requested camera frame rate. + + If set to 0, the highest FPS of the specified camera_resolution will be used. + \n Default: 0 + \n\n See sl.RESOLUTION for a list of supported frame rates. + .. note:: + If the requested camera_fps is unsupported, the closest available FPS will be used. + """ + return int() + + @camera_fps.setter + def camera_fps(self, camera_fps: Any) -> None: + pass + + @property + def optional_opencv_calibration_file(self) -> str: + """ + Optional path where the ZED SDK can find a file containing the calibration information of the camera computed by OpenCV. + + .. note:: + Using this will disable the factory calibration of the camera. + + .. note:: + The file must be in a XML/YAML/JSON formatting provided by OpenCV. + + .. note:: + It also must contain the following keys: Size, K_LEFT (intrinsic left), K_RIGHT (intrinsic right), + + D_LEFT (distortion left), D_RIGHT (distortion right), R (extrinsic rotation), T (extrinsic translation). + .. warning:: Erroneous calibration values can lead to poor accuracy in all ZED SDK modules. + """ + return str() + + @optional_opencv_calibration_file.setter + def optional_opencv_calibration_file(self, optional_opencv_calibration_file: Any) -> None: + pass + + @property + def camera_resolution(self) -> RESOLUTION: + """ + Desired camera resolution. + .. note:: + Small resolutions offer higher framerate and lower computation time. + + .. note:: + In most situations, sl.RESOLUTION.HD720 at 60 FPS is the best balance between image quality and framerate. + + + Default: + * ZED X/X Mini: sl.RESOLUTION.HD1200 + * other cameras: sl.RESOLUTION.HD720 + .. note:: + Available resolutions are listed here: sl.RESOLUTION. + """ + return RESOLUTION() + + @camera_resolution.setter + def camera_resolution(self, camera_resolution: Any) -> None: + pass + + @property + def camera_disable_self_calib(self) -> bool: + """ + Disables the self-calibration process at camera opening. + + At initialization, sl.Camera runs a self-calibration process that corrects small offsets from the device's factory calibration. + \n A drawback is that calibration parameters will slightly change from one (live) run to another, which can be an issue for repeatability. + \n If set to true, self-calibration will be disabled and calibration parameters won't be optimized, raw calibration parameters from the configuration file will be used. + \n Default: false + .. note:: + In most situations, self calibration should remain enabled. + + .. note:: + You can also trigger the self-calibration at anytime after sl.Camera.open() by calling sl.Camera.update_self_calibration(), even if this parameter is set to true. + """ + return bool() + + @camera_disable_self_calib.setter + def camera_disable_self_calib(self, camera_disable_self_calib: Any) -> None: + pass + + @property + def sdk_verbose(self) -> int: + """ + Enable the ZED SDK verbose mode. + + This parameter allows you to enable the verbosity of the ZED SDK to get a variety of runtime information in the console. + \n When developing an application, enabling verbose (``sdk_verbose >= 1``) mode can help you understand the current ZED SDK behavior. + \n However, this might not be desirable in a shipped version. + \n Default: 1 (verbose messages enabled) + .. note:: + The verbose messages can also be exported into a log file. + + .. note:: + See sdk_verbose_log_file for more. + """ + return int() + + @sdk_verbose.setter + def sdk_verbose(self, sdk_verbose: Any) -> None: + pass + + def __dealloc__(self) -> None: + """ + Default constructor. + + All the parameters are set to their default and optimized values. + :param camera_resolution: Chosen camera_resolution + :param camera_fps: Chosen camera_fps + :param svo_real_time_mode: Activates svo_real_time_mode + :param depth_mode: Chosen depth_mode + :param coordinate_units: Chosen coordinate_units + :param coordinate_system: Chosen coordinate_system + :param sdk_verbose: Sets sdk_verbose + :param sdk_gpu_id: Chosen sdk_gpu_id + :param depth_minimum_distance: Chosen depth_minimum_distance + :param depth_maximum_distance: Chosen depth_maximum_distance + :param camera_disable_self_calib: Activates camera_disable_self_calib + :param camera_image_flip: Sets camera_image_flip + :param enable_right_side_measure: Activates enable_right_side_measure + :param sdk_verbose_log_file: Chosen sdk_verbose_log_file + :param depth_stabilization: Activates depth_stabilization + :param input_t: Chosen input_t (InputType ) + :param optional_settings_path: Chosen optional_settings_path + :param sensors_required: Activates sensors_required + :param enable_image_enhancement: Activates enable_image_enhancement + :param optional_opencv_calibration_file: Sets optional_opencv_calibration_file + :param open_timeout_sec: Sets open_timeout_sec + :param async_grab_camera_recovery: Sets async_grab_camera_recovery + :param grab_compute_capping_fps: Sets grab_compute_capping_fps + :param enable_image_validity_check: Sets enable_image_validity_check + :param maximum_working_resolution: Sets maximum_working_resolution + + .. code-block:: text + + params = sl.InitParameters(camera_resolution=sl.RESOLUTION.HD720, camera_fps=30, depth_mode=sl.DEPTH_MODE.NEURAL) + """ + pass + + def save(self, filename) -> bool: + """ + Saves the current set of parameters into a file to be reloaded with the load() method. + :param filename: Name of the file which will be created to store the parameters (extension '.yml' will be added if not set). + :return: True if file was successfully saved, otherwise False. + .. warning:: For security reason, the file must not exist. + .. warning:: In case a file already exists, the method will return False and existing file will not be updated + + .. code-block:: text + + init_params = sl.InitParameters() # Set initial parameters + init_params.sdk_verbose = 1 # Enable verbose mode + init_params.set_from_svo_file("/path/to/file.svo") # Selects the and SVO file to be read + init_params.save("initParameters.conf") # Export the parameters into a file + """ + return bool() + + def load(self, filename) -> bool: + """ + Loads a set of parameters from the values contained in a previously save() "saved" file. + :param filename: Path to the file from which the parameters will be loaded (extension '.yml' will be added at the end of the filename if not set). + :return: True if the file was successfully loaded, otherwise false. + + .. code-block:: text + + init_params = sl.InitParameters() # Set initial parameters + init_params.load("initParameters.conf") # Load the init_params from a previously exported file + """ + return bool() + + def input(self, input_t) -> None: + """ + The SDK can handle different input types: + - Select a camera by its ID (/dev/videoX on Linux, and 0 to N cameras connected on Windows) + - Select a camera by its serial number + - Open a recorded sequence in the SVO file format + - Open a streaming camera from its IP address and port + + This parameter allows you to select to desired input. It should be used like this: + .. code-block:: text + + init_params = sl.InitParameters() # Set initial parameters + init_params.sdk_verbose = 1 # Enable verbose mode + input_t = sl.InputType() + input_t.set_from_camera_id(0) # Selects the camera with ID = 0 + init_params.input = input_t + init_params.set_from_camera_id(0) # You can also use this + + + .. code-block:: text + + init_params = sl.InitParameters() # Set initial parameters + init_params.sdk_verbose = 1 # Enable verbose mode + input_t = sl.InputType() + input_t.set_from_serial_number(1010) # Selects the camera with serial number = 101 + init_params.input = input_t + init_params.set_from_serial_number(1010) # You can also use this + + + .. code-block:: text + + init_params = sl.InitParameters() # Set initial parameters + init_params.sdk_verbose = 1 # Enable verbose mode + input_t = sl.InputType() + input_t.set_from_svo_file("/path/to/file.svo") # Selects the and SVO file to be read + init_params.input = input_t + init_params.set_from_svo_file("/path/to/file.svo") # You can also use this + + + .. code-block:: text + + init_params = sl.InitParameters() # Set initial parameters + init_params.sdk_verbose = 1 # Enable verbose mode + input_t = sl.InputType() + input_t.set_from_stream("192.168.1.42") + init_params.input = input_t + init_params.set_from_stream("192.168.1.42") # You can also use this + + + Available cameras and their ID/serial can be listed using get_device_list() and get_streaming_device_list() + Each Camera will create its own memory (CPU and GPU), therefore the number of ZED used at the same time can be limited by the configuration of your computer. (GPU/CPU memory and capabilities) + + default : empty + See InputType for complementary information. + + .. warning:: Using the ZED SDK Python API, using init_params.input.set_from_XXX won't work, use init_params.set_from_XXX instead + @property + def input(self) -> InputType: + input_t = InputType() + input_t.input = self.init.input + return input_t + """ + pass + + def set_from_camera_id(self, cam_id, bus_type : BUS_TYPE = BUS_TYPE.AUTO) -> None: + """ + Defines the input source with a camera id to initialize and open an sl.Camera object from. + :param id: Id of the desired camera to open. + :param bus_type: sl.BUS_TYPE of the desired camera to open. + """ + pass + + def set_from_serial_number(self, serial_number) -> None: + """ + Defines the input source with a serial number to initialize and open an sl.Camera object from. + :param serial_number: Serial number of the desired camera to open. + """ + pass + + def set_from_svo_file(self, svo_input_filename) -> None: + """ + Defines the input source with an SVO file to initialize and open an sl.Camera object from. + :param svo_input_filename: Path to the desired SVO file to open. + """ + pass + + def set_from_stream(self, sender_ip, port = 30000) -> None: + """ + Defines the input source from a stream to initialize and open an sl.Camera object from. + :param sender_ip: IP address of the streaming sender. + :param port: Port on which to listen. Default: 30000 + """ + pass + + +class RuntimeParameters: + """ + Class containing parameters that defines the behavior of sl.Camera.grab(). + The default constructor sets all parameters to their default settings. + .. note:: + Parameters can be adjusted by the user. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def texture_confidence_threshold(self) -> int: + """ + Threshold to reject depth values based on their texture confidence. + + The texture confidence range is [1, 100]. + \n Decreasing this value will remove depth data from image areas which are uniform. + \n Default: 100 (no depth pixel will be rejected) + .. note:: + Pixels with a value close to 100 are not to be trusted. Accurate depth pixels tends to be closer to lower values. + """ + return int() + + @texture_confidence_threshold.setter + def texture_confidence_threshold(self, texture_confidence_threshold: Any) -> None: + pass + + @property + def measure3D_reference_frame(self) -> REFERENCE_FRAME: + """ + Reference frame in which to provides the 3D measures (point cloud, normals, etc.). + + Default: sl.REFERENCE_FRAME.CAMERA + """ + return REFERENCE_FRAME() + + @measure3D_reference_frame.setter + def measure3D_reference_frame(self, measure3D_reference_frame: Any) -> None: + pass + + @property + def confidence_threshold(self) -> int: + """ + Threshold to reject depth values based on their confidence. + + Each depth pixel has a corresponding confidence (sl.MEASURE.CONFIDENCE) in the range [1, 100]. + \n Decreasing this value will remove depth data from both objects edges and low textured areas, to keep only confident depth estimation data. + \n Default: 95 (no depth pixel will be rejected) + .. note:: + Pixels with a value close to 100 are not to be trusted. Accurate depth pixels tends to be closer to lower values. + + .. note:: + It can be seen as a probability of error, scaled to 100. + """ + return int() + + @confidence_threshold.setter + def confidence_threshold(self, confidence_threshold: Any) -> None: + pass + + @property + def enable_fill_mode(self) -> bool: + """ + Defines if the depth map should be completed or not. + + Default: False + .. note:: + It is similar to the removed sl.SENSING_MODE.FILL. + + .. warning:: Enabling this will override the confidence values confidence_threshold and texture_confidence_threshold as well as remove_saturated_areas. + """ + return bool() + + @enable_fill_mode.setter + def enable_fill_mode(self, enable_fill_mode: Any) -> None: + pass + + @property + def enable_depth(self) -> bool: + """ + Defines if the depth map should be computed. + + Default: True + .. note:: + If set to False, only the images are available. + """ + return bool() + + @enable_depth.setter + def enable_depth(self, enable_depth: Any) -> None: + pass + + @property + def remove_saturated_areas(self) -> bool: + """ + Defines if the saturated area (luminance>=255) must be removed from depth map estimation. + + Default: True + .. note:: + It is recommended to keep this parameter at True because saturated area can create false detection. + """ + return bool() + + @remove_saturated_areas.setter + def remove_saturated_areas(self, remove_saturated_areas: Any) -> None: + pass + + def __dealloc__(self) -> None: + """ + Default constructor. + + All the parameters are set to their default values. + :param enable_depth: Activates enable_depth + :param enable_fill_mode: Activates enable_fill_mode + :param confidence_threshold: Chosen confidence_threshold + :param texture_confidence_threshold: Chosen texture_confidence_threshold + :param measure3D_reference_frame: Chosen measure3D_reference_frame + :param remove_saturated_areas: Activates remove_saturated_areas + """ + pass + + def save(self, filename: str) -> bool: + """ + Saves the current set of parameters into a file to be reloaded with the load() method. + :param filename: Name of the file which will be created to store the parameters (extension '.yml' will be added if not set). + :return: True if the file was successfully saved, otherwise False. + .. warning:: For security reasons, the file must not already exist. + .. warning:: In case a file already exists, the method will return False and existing file will not be updated. + """ + return bool() + + def load(self, filename: str) -> bool: + """ + Loads a set of parameters from the values contained in a previously save() "saved" file. + :param filename: Path to the file from which the parameters will be loaded (extension '.yml' will be added at the end of the filename if not detected). + :return: True if the file was successfully loaded, otherwise False. + """ + return bool() + + +class PositionalTrackingParameters: + """ + Class containing a set of parameters for the positional tracking module initialization. + + The default constructor sets all parameters to their default settings. + .. note:: + Parameters can be adjusted by the user. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def enable_2d_ground_mode(self) -> bool: + """ + Whether to enable 2D localization mode + """ + return bool() + + @enable_2d_ground_mode.setter + def enable_2d_ground_mode(self, enable_2d_ground_mode: Any) -> None: + pass + + @property + def set_gravity_as_origin(self) -> bool: + """ + Whether to override 2 of the 3 rotations from initial_world_transform using the IMU gravity. + Default: True + .. note:: + This parameter does nothing on sl.ZED.MODEL since it does not have an IMU. + """ + return bool() + + @set_gravity_as_origin.setter + def set_gravity_as_origin(self, set_gravity_as_origin: Any) -> None: + pass + + @property + def enable_area_memory(self) -> bool: + """ + Whether the camera can remember its surroundings. + This helps correct positional tracking drift and can be helpful for positioning different cameras relative to one other in space. + \n Default: true + + .. warning:: This mode requires more resources to run, but greatly improves tracking accuracy. + .. warning:: We recommend leaving it on by default. + """ + return bool() + + @enable_area_memory.setter + def enable_area_memory(self, enable_area_memory: Any) -> None: + pass + + @property + def area_file_path(self) -> str: + """ + Path of an area localization file that describes the surroundings (saved from a previous tracking session). + Default: (empty) + .. note:: + Loading an area file will start a search phase, during which the camera will try to position itself in the previously learned area. + + .. warning:: The area file describes a specific location. If you are using an area file describing a different location, the tracking function will continuously search for a position and may not find a correct one. + .. warning:: The '.area' file can only be used with the same depth mode (sl.DEPTH_MODE) as the one used during area recording. + """ + return str() + + @area_file_path.setter + def area_file_path(self, area_file_path: Any) -> None: + pass + + @property + def mode(self) -> POSITIONAL_TRACKING_MODE: + """ + Positional tracking mode used. + Can be used to improve accuracy in some types of scene at the cost of longer runtime. + \n Default: sl.POSITIONAL_TRACKING_MODE.GEN_1 + """ + return POSITIONAL_TRACKING_MODE() + + @mode.setter + def mode(self, mode: Any) -> None: + pass + + @property + def set_floor_as_origin(self) -> bool: + """ + Initializes the tracking to be aligned with the floor plane to better position the camera in space. + Default: False + .. note:: + This launches floor plane detection in the background until a suitable floor plane is found. + + .. note:: + The tracking will start in sl.POSITIONAL_TRACKING_STATE.SEARCHING state. + + .. warning:: This features does not work with sl.MODEL.ZED since it needs an IMU to classify the floor. + .. warning:: The camera needs to look at the floor during initialization for optimum results. + """ + return bool() + + @set_floor_as_origin.setter + def set_floor_as_origin(self, set_floor_as_origin: Any) -> None: + pass + + @property + def set_as_static(self) -> bool: + """ + Whether to define the camera as static. + If true, it will not move in the environment. This allows you to set its position using initial_world_transform. + \n All ZED SDK functionalities requiring positional tracking will be enabled without additional computation. + \n sl.Camera.get_position() will return the value set as initial_world_transform. + Default: False + """ + return bool() + + @set_as_static.setter + def set_as_static(self, set_as_static: Any) -> None: + pass + + @property + def enable_imu_fusion(self) -> bool: + """ + Whether to enable the IMU fusion. + When set to False, only the optical odometry will be used. + \n Default: True + .. note:: + This setting has no impact on the tracking of a camera. + + .. note:: + sl.MODEL.ZED does not have an IMU. + """ + return bool() + + @enable_imu_fusion.setter + def enable_imu_fusion(self, enable_imu_fusion: Any) -> None: + pass + + @property + def enable_localization_only(self) -> bool: + """ + Whether to enable the area mode in localize only mode. + """ + return bool() + + @enable_localization_only.setter + def enable_localization_only(self, enable_localization_only: Any) -> None: + pass + + @property + def depth_min_range(self) -> float: + """ + Minimum depth used by the ZED SDK for positional tracking. + It may be useful for example if any steady objects are in front of the camera and may perturb the positional tracking algorithm. + \n Default: -1 (no minimum depth) + """ + return float() + + @depth_min_range.setter + def depth_min_range(self, depth_min_range: Any) -> None: + pass + + @property + def enable_pose_smoothing(self) -> bool: + """ + Whether to enable smooth pose correction for small drift correction. + Default: False + """ + return bool() + + @enable_pose_smoothing.setter + def enable_pose_smoothing(self, enable_pose_smoothing: Any) -> None: + pass + + def __dealloc__(self) -> None: + """ + Default constructor. + :param _init_pos: Chosen initial camera position in the world frame (Transform) + :param _enable_memory: Activates enable_memory + :param _enable_pose_smoothing: Activates enable_pose_smoothing + :param _area_path: Chosen area_path + :param _set_floor_as_origin: Activates set_floor_as_origin + :param _enable_imu_fusion: Activates enable_imu_fusion + :param _set_as_static: Activates set_as_static + :param _depth_min_range: Activates depth_min_range + :param _set_gravity_as_origin: Activates set_gravity_as_origin + :param _mode: Chosen mode + + .. code-block:: text + + params = sl.PositionalTrackingParameters(init_pos=sl.Transform(), _enable_pose_smoothing=True) + """ + pass + + def save(self, filename: str) -> bool: + """ + Saves the current set of parameters into a file to be reloaded with the load() method. + :param filename: Name of the file which will be created to store the parameters. + :return: True if the file was successfully saved, otherwise False. + .. warning:: For security reasons, the file must not already exist. + .. warning:: In case a file already exists, the method will return False and existing file will not be updated. + """ + return bool() + + def load(self, filename: str) -> bool: + """ + Loads a set of parameters from the values contained in a previously save() "saved" file. + :param filename: Path to the file from which the parameters will be loaded. + :return: True if the file was successfully loaded, otherwise False. + """ + return bool() + + def initial_world_transform(self, init_pos = Transform()) -> Transform: + """ + Position of the camera in the world frame when the camera is started. + Use this sl.Transform to place the camera frame in the world frame. + \n Default: Identity matrix. + + .. note:: + The camera frame (which defines the reference frame for the camera) is by default positioned at the world frame when tracking is started. + """ + return Transform() + + def set_initial_world_transform(self, value: Transform) -> None: + """ + Set the position of the camera in the world frame when the camera is started. + :param value: Position of the camera in the world frame when the camera will start. + """ + pass + + +class STREAMING_CODEC(enum.Enum): + """ + Lists the different encoding types for image streaming. + + | Enumerator | | + |:---:|:---:| + | H264 | AVCHD/H264 encoding | + | H265 | HEVC/H265 encoding | + """ + H264 = enum.auto() + H265 = enum.auto() + LAST = enum.auto() + +class StreamingProperties: + """ + Class containing information about the properties of a streaming device. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def serial_number(self) -> int: + """ + Serial number of the streaming camera. + + Default: 0 + """ + return int() + + @serial_number.setter + def serial_number(self, serial_number: Any) -> None: + pass + + @property + def ip(self) -> str: + """ + IP address of the streaming device. + + Default: "" + """ + return str() + + @ip.setter + def ip(self, ip: Any) -> None: + pass + + @property + def port(self) -> int: + """ + Streaming port of the streaming device. + + Default: 0 + """ + return int() + + @port.setter + def port(self, port: Any) -> None: + pass + + @property + def codec(self) -> STREAMING_CODEC: + """ + Current codec used for compression in streaming device. + + Default: sl.STREAMING_CODEC.H265 + """ + return STREAMING_CODEC() + + @codec.setter + def codec(self, codec: Any) -> None: + pass + + @property + def current_bitrate(self) -> int: + """ + Current bitrate of encoding of the streaming device. + + Default: 0 + """ + return int() + + @current_bitrate.setter + def current_bitrate(self, current_bitrate: Any) -> None: + pass + + +class StreamingParameters: + """ + Class containing the options used to stream with the ZED SDK. + + The default constructor sets all parameters to their default settings. + .. note:: + Parameters can be adjusted by the user. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def gop_size(self) -> int: + """ + GOP size in number of frames. + + Default: -1 (the GOP size will last at maximum 2 seconds, depending on camera FPS) + .. note:: + The GOP size determines the maximum distance between IDR/I-frames. Very high GOP size will result in slightly more efficient compression, especially on static scenes. But latency will increase. + + .. note:: + Maximum value: 256 + """ + return int() + + @gop_size.setter + def gop_size(self, gop_size: Any) -> None: + pass + + @property + def adaptative_bitrate(self) -> bool: + """ + Defines whether the adaptive bitrate is enable. + + Default: False + .. note:: + Bitrate will be adjusted depending the number of packet dropped during streaming. + + .. note:: + If activated, the bitrate can vary between [bitrate/4, bitrate]. + + .. warning:: Currently, the adaptive bitrate only works when "sending" device is a NVIDIA Jetson (X1, X2, Xavier, Nano). + """ + return bool() + + @adaptative_bitrate.setter + def adaptative_bitrate(self, adaptative_bitrate: Any) -> None: + pass + + @property + def target_framerate(self) -> int: + """ + Framerate for the streaming output. + + Default: 0 (camera framerate will be taken) + .. warning:: This framerate must be below or equal to the camera framerate. + .. warning:: Allowed framerates are 15, 30, 60 or 100 if possible. + .. warning:: Any other values will be discarded and camera FPS will be taken. + """ + return int() + + @target_framerate.setter + def target_framerate(self, target_framerate: Any) -> None: + pass + + @property + def chunk_size(self) -> int: + """ + Size of a single chunk. + + Default: 16084 + .. note:: + Stream buffers are divided into X number of chunks where each chunk is chunk_size bytes long. + + .. note:: + You can lower chunk_size value if network generates a lot of packet lost: this will + + generates more chunk for a single image, but each chunk sent will be lighter to avoid inside-chunk corruption. + .. note:: + Increasing this value can decrease latency. + + + \n Note: Available range: [1024 - 65000] + """ + return int() + + @chunk_size.setter + def chunk_size(self, chunk_size: Any) -> None: + pass + + @property + def port(self) -> int: + """ + Port used for streaming. + .. warning:: Port must be an even number. Any odd number will be rejected. + .. warning:: Port must be opened. + """ + return int() + + @port.setter + def port(self, port: Any) -> None: + pass + + @property + def codec(self) -> STREAMING_CODEC: + """ + Encoding used for streaming. + """ + return STREAMING_CODEC() + + @codec.setter + def codec(self, codec: Any) -> None: + pass + + @property + def bitrate(self) -> int: + """ + Defines the streaming bitrate in Kbits/s + | STREAMING_CODEC | RESOLUTION | FPS | Bitrate (kbps) | + |:---:|:---:|:---:|:---:| + | H264 | HD2K | 15 | 8500 | + | H264 | HD1080 | 30 | 12500 | + | H264 | HD720 | 60 | 7000 | + | H265 | HD2K | 15 | 7000 | + | H265 | HD1080 | 30 | 11000 | + | H265 | HD720 | 60 | 6000 | + + Default: 0 (it will be set to the best value depending on your resolution/FPS) + .. note:: + Available range: [1000 - 60000] + """ + return int() + + @bitrate.setter + def bitrate(self, bitrate: Any) -> None: + pass + + def __dealloc__(self) -> None: + """ + Default constructor. + + All the parameters are set to their default values. + :param codec: Chosen codec + :param port: Chosen port + :param bitrate: Chosen bitrate + :param gop_size: Chosen gop_size + :param adaptative_bitrate: Activtates adaptative_bitrate + :param chunk_size: Chosen chunk_size + :param target_framerate: Chosen target_framerate + + .. code-block:: text + + params = sl.StreamingParameters(port=30000) + """ + pass + + +class RecordingParameters: + """ + Class containing the options used to record. + + The default constructor sets all parameters to their default settings. + .. note:: + Parameters can be adjusted by the user. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def transcode_streaming_input(self) -> bool: + """ + Defines whether to decode and re-encode a streaming source. + + Default: False + .. note:: + If set to False, it will avoid decoding/re-encoding and convert directly streaming input into a SVO file. + + .. note:: + This saves a encoding session and can be especially useful on NVIDIA Geforce cards where the number of encoding session is limited. + + .. note:: + compression_mode, target_framerate and bitrate will be ignored in this mode. + """ + return bool() + + @transcode_streaming_input.setter + def transcode_streaming_input(self, transcode_streaming_input: Any) -> None: + pass + + @property + def target_framerate(self) -> int: + """ + Framerate for the recording file. + + Default: 0 (camera framerate will be taken) + .. warning:: This framerate must be below or equal to the camera framerate and camera framerate must be a multiple of the target framerate. + .. warning:: It means that it must respect `` camera_framerate%target_framerate == 0``. + .. warning:: Allowed framerates are 15,30, 60 or 100 if possible. + .. warning:: Any other values will be discarded and camera FPS will be taken. + """ + return int() + + @target_framerate.setter + def target_framerate(self, target_framerate: Any) -> None: + pass + + @property + def compression_mode(self) -> SVO_COMPRESSION_MODE: + """ + Compression mode the recording. + + Default: sl.SVO_COMPRESSION_MODE.H264 + """ + return SVO_COMPRESSION_MODE() + + @compression_mode.setter + def compression_mode(self, compression_mode: Any) -> None: + pass + + @property + def bitrate(self) -> int: + """ + Overrides the default bitrate of the SVO file, in kbits/s. + + Default: 0 (the default values associated with the resolution) + .. note:: + Only works if compression_mode is H264 or H265. + + .. note:: + Available range: 0 or [1000 - 60000] + """ + return int() + + @bitrate.setter + def bitrate(self, bitrate: Any) -> None: + pass + + @property + def video_filename(self) -> str: + """ + Filename of the file to save the recording into. + """ + return str() + + @video_filename.setter + def video_filename(self, video_filename: Any) -> None: + pass + + def __dealloc__(self) -> None: + """ + Default constructor. + + All the parameters are set to their default values. + :param video_filename: Chosen video_filename + :param compression_mode: Chosen compression_mode + :param target_framerate: Chosen target_framerate + :param bitrate: Chosen bitrate + :param transcode_streaming_input: Enables transcode_streaming_input + + .. code-block:: text + + params = sl.RecordingParameters(video_filename="record.svo",compression_mode=SVO_COMPRESSION_MODE.H264) + """ + pass + + +class SpatialMappingParameters: + """ + Class containing a set of parameters for the spatial mapping module. + + The default constructor sets all parameters to their default settings. + .. note:: + Parameters can be adjusted by the user. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def save_texture(self) -> bool: + """ + Whether to save the texture. + If set to true, you will be able to apply the texture to your mesh after it is created. + \n Default: False + .. note:: + This option will consume more memory. + + .. note:: + This option is only available for sl.SPATIAL_MAP_TYPE.MESH. + """ + return bool() + + @save_texture.setter + def save_texture(self, save_texture: Any) -> None: + pass + + @property + def range_meter(self) -> float: + """ + Depth range in meters. + Can be different from the value set by sl.InitParameters.depth_maximum_distance. + .. note:: + Set to 0 by default. In this case, the range is computed from resolution_meter + + and from the current internal parameters to fit your application. + """ + return float() + + @range_meter.setter + def range_meter(self, range_meter: Any) -> None: + pass + + @property + def map_type(self) -> SPATIAL_MAP_TYPE: + """ + The type of spatial map to be created. + This dictates the format that will be used for the mapping (e.g. mesh, point cloud). + \n See sl.SPATIAL_MAP_TYPE. + """ + return SPATIAL_MAP_TYPE() + + @map_type.setter + def map_type(self, map_type: Any) -> None: + pass + + @property + def use_chunk_only(self) -> bool: + """ + Whether to only use chunks. + If set to False, you will ensure consistency between the mesh and its inner chunk data. + \n Default: False + .. note:: + Updating the mesh is time-consuming. + + .. note:: + Setting this to True results in better performance. + """ + return bool() + + @use_chunk_only.setter + def use_chunk_only(self, use_chunk_only: Any) -> None: + pass + + @property + def reverse_vertex_order(self) -> bool: + """ + Whether to inverse the order of the vertices of the triangles. + If your display process does not handle front and back face culling, you can use this to correct it. + \n Default: False + .. note:: + This option is only available for sl.SPATIAL_MAP_TYPE.MESH. + """ + return bool() + + @reverse_vertex_order.setter + def reverse_vertex_order(self, reverse_vertex_order: Any) -> None: + pass + + @property + def stability_counter(self) -> int: + """ + Control the integration rate of the current depth into the mapping process. + This parameter controls how many times a stable 3D points should be seen before it is integrated into the spatial mapping. + \n Default: 0 (this will define the stability counter based on the mesh resolution, the higher the resolution, the higher the stability counter) + """ + return int() + + @stability_counter.setter + def stability_counter(self, stability_counter: Any) -> None: + pass + + @property + def resolution_meter(self) -> float: + """ + Spatial mapping resolution in meters. + Default: 0.05 + .. note:: + It should fit allowed_resolution. + """ + return float() + + @resolution_meter.setter + def resolution_meter(self, resolution_meter: Any) -> None: + pass + + @property + def max_memory_usage(self) -> int: + """ + The maximum CPU memory (in MB) allocated for the meshing process. + Default: 2048 + """ + return int() + + @max_memory_usage.setter + def max_memory_usage(self, max_memory_usage: Any) -> None: + pass + + def __dealloc__(self) -> None: + """ + Default constructor. + Sets all parameters to their default and optimized values. + :param resolution: Chosen MAPPING_RESOLUTION + :param mapping_range: Chosen MAPPING_RANGE + :param max_memory_usage: Chosen max_memory_usage + :param save_texture: Activates save_texture + :param use_chunk_only: Activates use_chunk_only + :param reverse_vertex_order: Activates reverse_vertex_order + :param map_type: Chosen map_type + + .. code-block:: text + + params = sl.SpatialMappingParameters(resolution=sl.MAPPING_RESOLUTION.HIGH) + """ + pass + + def set_resolution(self, resolution = MAPPING_RESOLUTION.HIGH) -> None: + """ + Sets the resolution to a sl.MAPPING_RESOLUTION preset. + :param resolution: The desired sl.MAPPING_RESOLUTION. Default: sl.MAPPING_RESOLUTION.HIGH + """ + pass + + def set_range(self, mapping_range = MAPPING_RANGE.AUTO) -> None: + """ + Sets the range to a sl.MAPPING_RANGE preset. + :param mapping_range: The desired sl.MAPPING_RANGE. Default: [sl.MAPPING_RANGE::AUTO](MAPPING_RANGE) + """ + pass + + def get_range_preset(self, mapping_range = MAPPING_RANGE.AUTO) -> float: + """ + Returns the value corresponding to a sl.MAPPING_RANGE preset in meters. + :param mapping_range: The desired sl.MAPPING_RANGE. Default: [sl.MAPPING_RANGE::AUTO](MAPPING_RANGE) + :return: The value of **mapping_range in meters. + """ + return float() + + def get_resolution_preset(self, resolution = MAPPING_RESOLUTION.HIGH) -> float: + """ + Returns the value corresponding to a sl.MAPPING_RESOLUTION preset in meters. + :param resolution: The desired sl.MAPPING_RESOLUTION. Default: sl.MAPPING_RESOLUTION.HIGH + :return: The value of **resolution in meters. + """ + return float() + + def get_recommended_range(self, resolution, py_cam: Camera) -> float: + """ + Returns the recommended maximum depth value corresponding to a resolution. + :param resolution: The desired resolution, either defined by a sl.MAPPING_RESOLUTION preset or a resolution value in meters. + :param py_cam: The sl.Camera object which will run the spatial mapping. + :return: The maximum value of depth in meters. + """ + return float() + + def allowed_range(self) -> np.array[float]: + """ + The maximum depth allowed by spatial mapping: + - **allowed_range.first is the minimum value allowed + - **allowed_range.second is the maximum value allowed + """ + return np.array[float]() + + def allowed_resolution(self) -> np.array[float]: + """ + The resolution allowed by the spatial mapping: + - **allowed_resolution.first is the minimum value allowed + - **allowed_resolution.second is the maximum value allowed + """ + return np.array[float]() + + def save(self, filename: str) -> bool: + """ + Saves the current set of parameters into a file to be reloaded with the load() method. + :param filename: Name of the file which will be created to store the parameters (extension '.yml' will be added if not set). + :return: True if the file was successfully saved, otherwise False. + .. warning:: For security reasons, the file must not already exist. + .. warning:: In case a file already exists, the method will return False and existing file will not be updated. + """ + return bool() + + def load(self, filename: str) -> bool: + """ + Loads a set of parameters from the values contained in a previously save() "saved" file. + :param filename: Path to the file from which the parameters will be loaded (extension '.yml' will be added at the end of the filename if not detected). + :return: True if the file was successfully loaded, otherwise False. + """ + return bool() + + +class Pose: + """ + Class containing positional tracking data giving the position and orientation of the camera in 3D space. + + Different representations of position and orientation can be retrieved, along with timestamp and pose confidence. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @pose_covariance.setter + def pose_covariance(self, pose_covariance: Any) -> None: + pass + + @property + def pose_confidence(self) -> int: + """ + Confidence/quality of the pose estimation for the target frame. + A confidence metric of the tracking [0-100] with: + - 0: tracking is lost + - 100: tracking can be fully trusted + """ + return int() + + @pose_confidence.setter + def pose_confidence(self, pose_confidence: Any) -> None: + pass + + @property + def valid(self) -> bool: + """ + Whether the tracking is activated or not. + .. note:: + You should check that first if something is wrong. + """ + return bool() + + @valid.setter + def valid(self, valid: Any) -> None: + pass + + @twist.setter + def twist(self, twist: Any) -> None: + pass + + @property + def timestamp(self) -> Timestamp: + """ + sl.Timestamp of the sl.Pose. + This timestamp should be compared with the camera timestamp for synchronization. + """ + return Timestamp() + + @timestamp.setter + def timestamp(self, timestamp: Any) -> None: + pass + + @twist_covariance.setter + def twist_covariance(self, twist_covariance: Any) -> None: + pass + + def init_pose(self, pose: Pose) -> None: + """ + Deep copy from another sl.Pose. + :param pose: sl.Pose to copy. + """ + pass + + def init_transform(self, pose_data: Transform, timestamp = 0, confidence = 0) -> None: + """ + Initializes the sl.Pose from a sl.Transform. + :param pose_data: sl.Transform containing pose data to copy. + :param timestamp: Timestamp of the pose data. + :param confidence: Confidence of the pose data. + """ + pass + + def get_translation(self, py_translation = Translation()) -> Translation: + """ + Returns the sl.Translation corresponding to the current sl.Pose. + :param py_translation: sl.Translation to be returned. It creates one by default. + :return: sl.Translation filled with values from the sl.Pose. + """ + return Translation() + + def get_orientation(self, py_orientation = Orientation()) -> Orientation: + """ + Returns the sl.Orientation corresponding to the current sl.Pose. + :param py_orientation: sl.Orientation to be returned. It creates one by default. + :return: sl.Orientation filled with values from the sl.Pose. + """ + return Orientation() + + def get_rotation_matrix(self, py_rotation = Rotation()) -> Rotation: + """ + Returns the sl.Rotation corresponding to the current sl.Pose. + :param py_rotation: sl.Rotation to be returned. It creates one by default. + :return: sl.Rotation filled with values from the sl.Pose. + """ + return Rotation() + + def get_rotation_vector(self) -> np.array[float]: + """ + Returns the the 3x1 rotation vector (obtained from 3x3 rotation matrix using Rodrigues formula) corresponding to the current sl.Pose. + :param py_rotation: sl.Rotation to be returned. It creates one by default. + :return: Rotation vector (NumPy array) created from the sl.Pose values. + """ + return np.array[float]() + + def get_euler_angles(self, radian = True) -> np.array[float]: + """ + Converts the rotation component of the sl.Pose into Euler angles. + :param radian: Whether the angle will be returned in radian or degree. Default: True + :return: Euler angles (Numpy array) created from the sl.Pose values representing the rotations around the X, Y and Z axes using YZX convention. + """ + return np.array[float]() + + def pose_data(self, pose_data = Transform()) -> Transform: + """ + sl.Transform containing the rotation and translation data of the sl.Pose. + :param pose_data: sl.Transform to be returned. It creates one by default. + :return: sl.Transform containing the rotation and translation data of the sl.Pose. + """ + return Transform() + + def pose_covariance(self) -> np.array[float]: + """ + 6x6 pose covariance matrix (NumPy array) of translation (the first 3 values) and rotation in so3 (the last 3 values). + .. note:: + Computed only if PositionalTrackingParameters.enable_spatial_memory is disabled. + """ + return np.array[float]() + + def twist(self) -> np.array[float]: + """ + Twist of the camera available in reference camera. + This expresses velocity in free space, broken into its linear and angular parts. + """ + return np.array[float]() + + def twist_covariance(self) -> np.array[float]: + """ + Row-major representation of the 6x6 twist covariance matrix of the camera. + This expresses the uncertainty of the twist. + """ + return np.array[float]() + + +class CAMERA_MOTION_STATE(enum.Enum): + """ + Lists different states of the camera motion. + + | Enumerator | | + |:---:|:---:| + | STATIC | The camera is static. | + | MOVING | The camera is moving. | + | FALLING | The camera is falling. | + """ + STATIC = enum.auto() + MOVING = enum.auto() + FALLING = enum.auto() + LAST = enum.auto() + +class SENSOR_LOCATION(enum.Enum): + """ + Lists possible locations of temperature sensors. + + | Enumerator | | + |:---:|:---:| + | IMU | The temperature sensor is in the IMU. | + | BAROMETER | The temperature sensor is in the barometer. | + | ONBOARD_LEFT | The temperature sensor is next to the left image sensor. | + | ONBOARD_RIGHT | The temperature sensor is next to the right image sensor. | + """ + IMU = enum.auto() + BAROMETER = enum.auto() + ONBOARD_LEFT = enum.auto() + ONBOARD_RIGHT = enum.auto() + LAST = enum.auto() + +class BarometerData: + """ + Class containing data from the barometer sensor. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def pressure(self) -> float: + """ + Ambient air pressure in hectopascal (hPa). + """ + return float() + + @pressure.setter + def pressure(self, pressure: Any) -> None: + pass + + @property + def effective_rate(self) -> float: + """ + Realtime data acquisition rate in hertz (Hz). + """ + return float() + + @effective_rate.setter + def effective_rate(self, effective_rate: Any) -> None: + pass + + @property + def relative_altitude(self) -> float: + """ + Relative altitude from first camera position (at sl.Camera.open() time). + """ + return float() + + @relative_altitude.setter + def relative_altitude(self, relative_altitude: Any) -> None: + pass + + @property + def is_available(self) -> bool: + """ + Whether the barometer sensor is available in your camera. + """ + return bool() + + @is_available.setter + def is_available(self, is_available: Any) -> None: + pass + + @property + def timestamp(self) -> Timestamp: + """ + Data acquisition timestamp. + """ + return Timestamp() + + @timestamp.setter + def timestamp(self, timestamp: Any) -> None: + pass + + +class TemperatureData: + """ + Class containing data from the temperature sensors. + """ + def __init__(self, *args, **kwargs) -> None: ... + + def get(self, location) -> float: + """ + Gets the temperature value at a temperature sensor location. + :param location: Location of the temperature sensor to request. + :return: Temperature at the requested location. + """ + return float() + + +class HEADING_STATE(enum.Enum): + """ + Lists the different states of the magnetic heading. + + | Enumerator | | + |:---:|:---:| + | GOOD | The heading is reliable and not affected by iron interferences. | + | OK | The heading is reliable, but affected by slight iron interferences. | + | NOT_GOOD | The heading is not reliable because affected by strong iron interferences. | + | NOT_CALIBRATED | The magnetometer has not been calibrated. | + | MAG_NOT_AVAILABLE | The magnetometer sensor is not available. | + """ + GOOD = enum.auto() + OK = enum.auto() + NOT_GOOD = enum.auto() + NOT_CALIBRATED = enum.auto() + MAG_NOT_AVAILABLE = enum.auto() + LAST = enum.auto() + +class MagnetometerData: + """ + Class containing data from the magnetometer sensor. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def magnetic_heading_state(self) -> HEADING_STATE: + """ + State of magnetic_heading. + """ + return HEADING_STATE() + + @magnetic_heading_state.setter + def magnetic_heading_state(self, magnetic_heading_state: Any) -> None: + pass + + @property + def magnetic_heading_accuracy(self) -> float: + """ + Accuracy of magnetic_heading measure in the range [0.0, 1.0]. + .. note:: + A negative value means that the magnetometer must be calibrated using **ZED **Sensor **Viewer tool. + """ + return float() + + @magnetic_heading_accuracy.setter + def magnetic_heading_accuracy(self, magnetic_heading_accuracy: Any) -> None: + pass + + @property + def effective_rate(self) -> float: + """ + Realtime data acquisition rate in hertz (Hz). + """ + return float() + + @effective_rate.setter + def effective_rate(self, effective_rate: Any) -> None: + pass + + @property + def magnetic_heading(self) -> float: + """ + Camera heading in degrees relative to the magnetic North Pole. + .. note:: + The magnetic North Pole has an offset with respect to the geographic North Pole, depending on the geographic position of the camera. + + .. note:: + To get a correct magnetic heading, the magnetometer sensor must be calibrated using **ZED **Sensor **Viewer tool. + """ + return float() + + @magnetic_heading.setter + def magnetic_heading(self, magnetic_heading: Any) -> None: + pass + + @property + def timestamp(self) -> int: + """ + Data acquisition timestamp. + """ + return int() + + @timestamp.setter + def timestamp(self, timestamp: Any) -> None: + pass + + @property + def is_available(self) -> bool: + """ + Whether the magnetometer sensor is available in your camera. + """ + return bool() + + @is_available.setter + def is_available(self, is_available: Any) -> None: + pass + + def get_magnetic_field_uncalibrated(self) -> np.array[float]: + """ + Gets the uncalibrated magnetic field local vector in microtesla (μT). + .. note:: + The magnetometer raw values are affected by soft and hard iron interferences. + + .. note:: + The sensor must be calibrated by placing the camera in the working environment and using **ZED **Sensor **Viewer tool. + + .. note:: + Not available in SVO or STREAM mode. + """ + return np.array[float]() + + def get_magnetic_field_calibrated(self) -> np.array[float]: + """ + Gets the magnetic field local vector in microtesla (μT). + .. note:: + To calibrate the magnetometer sensor, please use **ZED **Sensor **Viewer tool after placing the camera in the final operating environment. + """ + return np.array[float]() + + +class SensorsData: + """ + Class containing all sensors data (except image sensors) to be used for positional tracking or environment study. + + .. note:: + Some data are not available in SVO and streaming input mode. + + .. note:: + They are specified by a note "Not available in SVO or STREAM mode." in the documentation of a specific data. + + .. note:: + If nothing is mentioned in the documentation, they are available in all input modes. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def camera_moving_state(self) -> CAMERA_MOTION_STATE: + """ + Motion state of the camera. + """ + return CAMERA_MOTION_STATE() + + @camera_moving_state.setter + def camera_moving_state(self, camera_moving_state: Any) -> None: + pass + + @property + def image_sync_trigger(self) -> int: + """ + Indicates if the sensors data has been taken during a frame capture on sensor. + If the value is 1, the data has been retrieved during a left sensor frame acquisition (the time precision is linked to the IMU rate, therefore 800Hz == 1.3ms). + \n If the value is 0, the data has not been taken during a frame acquisition. + """ + return int() + + @image_sync_trigger.setter + def image_sync_trigger(self, image_sync_trigger: Any) -> None: + pass + + def init_sensorsData(self, sensorsData: SensorsData) -> None: + """ + Copy constructor. + :param sensorsData: sl.SensorsData object to copy. + """ + pass + + def get_imu_data(self) -> IMUData: + """ + Gets the IMU data. + :return: sl.IMUData containing the IMU data. + """ + return IMUData() + + def get_barometer_data(self) -> BarometerData: + """ + Gets the barometer data. + :return: sl.BarometerData containing the barometer data. + """ + return BarometerData() + + def get_magnetometer_data(self) -> MagnetometerData: + """ + Gets the magnetometer data. + :return: sl.MagnetometerData containing the magnetometer data. + """ + return MagnetometerData() + + def get_temperature_data(self) -> TemperatureData: + """ + Gets the temperature data. + :return: sl.TemperatureData containing the temperature data. + """ + return TemperatureData() + + +class IMUData: + """ + Class containing data from the IMU sensor. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def is_available(self) -> bool: + """ + Whether the IMU sensor is available in your camera. + """ + return bool() + + @is_available.setter + def is_available(self, is_available: Any) -> None: + pass + + @property + def timestamp(self) -> int: + """ + Data acquisition timestamp. + """ + return int() + + @timestamp.setter + def timestamp(self, timestamp: Any) -> None: + pass + + @property + def effective_rate(self) -> float: + """ + Realtime data acquisition rate in hertz (Hz). + """ + return float() + + @effective_rate.setter + def effective_rate(self, effective_rate: Any) -> None: + pass + + def get_angular_velocity_uncalibrated(self, angular_velocity_uncalibrated = [0, 0, 0]) -> list[float]: + """ + Gets the angular velocity vector (3x1) of the gyroscope in deg/s (uncorrected from the IMU calibration). + :param angular_velocity_uncalibrated: List to be returned. It creates one by default. + :return: List fill with the raw angular velocity vector. + .. note:: + The value is the exact raw values from the IMU. + + .. note:: + Not available in SVO or STREAM mode. + """ + return list[float]() + + def get_angular_velocity(self, angular_velocity = [0, 0, 0]) -> list[float]: + """ + Gets the angular velocity vector (3x1) of the gyroscope in deg/s. + The value is corrected from bias, scale and misalignment. + :param angular_velocity: List to be returned. It creates one by default. + :return: List fill with the angular velocity vector. + .. note:: + The value can be directly ingested in an IMU fusion algorithm to extract a quaternion. + + .. note:: + Not available in SVO or STREAM mode. + """ + return list[float]() + + def get_linear_acceleration(self, linear_acceleration = [0, 0, 0]) -> list[float]: + """ + Gets the linear acceleration vector (3x1) of the gyroscope in m/s². + The value is corrected from bias, scale and misalignment. + :param linear_acceleration: List to be returned. It creates one by default. + :return: List fill with the linear acceleration vector. + .. note:: + The value can be directly ingested in an IMU fusion algorithm to extract a quaternion. + + .. note:: + Not available in SVO or STREAM mode. + """ + return list[float]() + + def get_linear_acceleration_uncalibrated(self, linear_acceleration_uncalibrated = [0, 0, 0]) -> list[float]: + """ + Gets the linear acceleration vector (3x1) of the gyroscope in m/s² (uncorrected from the IMU calibration). + The value is corrected from bias, scale and misalignment. + :param linear_acceleration_uncalibrated: List to be returned. It creates one by default. + :return: List fill with the raw linear acceleration vector. + .. note:: + The value is the exact raw values from the IMU. + + .. note:: + Not available in SVO or STREAM mode. + """ + return list[float]() + + def get_angular_velocity_covariance(self, angular_velocity_covariance = Matrix3f()) -> Matrix3f: + """ + Gets the covariance matrix of the angular velocity of the gyroscope in deg/s (get_angular_velocity()). + :param angular_velocity_covariance: sl.Matrix3f to be returned. It creates one by default. + :return: sl.Matrix3f filled with the covariance matrix of the angular velocity. + .. note:: + Not available in SVO or STREAM mode. + """ + return Matrix3f() + + def get_linear_acceleration_covariance(self, linear_acceleration_covariance = Matrix3f()) -> Matrix3f: + """ + Gets the covariance matrix of the linear acceleration of the gyroscope in deg/s (get_angular_velocity()). + :param linear_acceleration_covariance: sl.Matrix3f to be returned. It creates one by default. + :return: sl.Matrix3f filled with the covariance matrix of the linear acceleration. + .. note:: + Not available in SVO or STREAM mode. + """ + return Matrix3f() + + def get_pose_covariance(self, pose_covariance = Matrix3f()) -> Matrix3f: + """ + Covariance matrix of the IMU pose (get_pose()). + :param pose_covariance: sl.Matrix3f to be returned. It creates one by default. + :return: sl.Matrix3f filled with the covariance matrix. + """ + return Matrix3f() + + def get_pose(self, pose = Transform()) -> Transform: + """ + IMU pose (IMU 6-DoF fusion). + :param pose: sl.Transform() to be returned. It creates one by default. + :return: sl.Transform filled with the IMU pose. + """ + return Transform() + + +class HealthStatus: + """ + Structure containing the self diagnostic results of the image/depth + That information can be retrieved by sl::Camera::get_health_status(), and enabled by sl::InitParameters::enable_image_validity_check + \n + The default value of sl::InitParameters::enable_image_validity_check is enabled using the fastest setting, + the integer given can be increased to include more advanced and heavier processing to detect issues (up to 3). + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def low_depth_reliability(self) -> bool: + """ + This status indicates low depth map reliability + If the image are unreliable or if the scene condition are very challenging this status report a warning. + This is using the depth confidence and general depth distribution. Typically due to obstructed eye (included very close object, + strong occlusions) or degraded condition like heavy fog/water on the optics + """ + return bool() + + @low_depth_reliability.setter + def low_depth_reliability(self, low_depth_reliability: Any) -> None: + pass + + @property + def enabled(self) -> bool: + """ + Indicates if the Health check is enabled + """ + return bool() + + @enabled.setter + def enabled(self, enabled: Any) -> None: + pass + + @property + def low_image_quality(self) -> bool: + """ + This status indicates poor image quality + It can indicates camera issue, like incorrect manual video settings, damaged hardware, corrupted video stream from the camera, + dirt or other partial or total occlusion, stuck ISP (black/white/green/purple images, incorrect exposure, etc), blurry images + It also includes widely different left and right images which leads to unavailable depth information + In case of very low light this will be reported by this status and the dedicated HealthStatus::low_lighting + + .. note:: + : Frame tearing is currently not detected. Advanced blur detection requires heavier processing and is enabled only when setting Initparameters::enable_image_validity_check to 3 and above + """ + return bool() + + @low_image_quality.setter + def low_image_quality(self, low_image_quality: Any) -> None: + pass + + @property + def low_motion_sensors_reliability(self) -> bool: + """ + This status indicates motion sensors data reliability issue. + This indicates the IMU is providing low quality data. Possible underlying can be regarding the data stream like corrupted data, + timestamp inconsistency, resonance frequencies, saturated sensors / very high acceleration or rotation, shocks + """ + return bool() + + @low_motion_sensors_reliability.setter + def low_motion_sensors_reliability(self, low_motion_sensors_reliability: Any) -> None: + pass + + @property + def low_lighting(self) -> bool: + """ + This status indicates low light scene. + As the camera are passive sensors working in the visible range, they requires some external light to operate. + This status warns if the lighting condition become suboptimal and worst. + This is based on the scene illuminance in LUX for the ZED X cameras series (available with VIDEO_SETTINGS::SCENE_ILLUMINANCE) + For other camera models or when using SVO files, this is based on computer vision processing from the image characteristics. + """ + return bool() + + @low_lighting.setter + def low_lighting(self, low_lighting: Any) -> None: + pass + + +class RecordingStatus: + """ + Class containing information about the status of the recording. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def average_compression_time(self) -> float: + """ + Average compression time in milliseconds since beginning of recording. + """ + return float() + + @average_compression_time.setter + def average_compression_time(self, average_compression_time: Any) -> None: + pass + + @property + def status(self) -> bool: + """ + Status of current frame. + + True for success or False if the frame could not be written in the SVO file. + """ + return bool() + + @status.setter + def status(self, status: Any) -> None: + pass + + @property + def is_recording(self) -> bool: + """ + Report if the recording has been enabled. + """ + return bool() + + @is_recording.setter + def is_recording(self, is_recording: Any) -> None: + pass + + @property + def is_paused(self) -> bool: + """ + Report if the recording has been paused. + """ + return bool() + + @is_paused.setter + def is_paused(self, is_paused: Any) -> None: + pass + + @property + def number_frames_ingested(self) -> int: + """ + Number of frames ingested in SVO encoding/writing. + """ + return int() + + @number_frames_ingested.setter + def number_frames_ingested(self, number_frames_ingested: Any) -> None: + pass + + @property + def current_compression_time(self) -> float: + """ + Compression time for the current frame in milliseconds. + """ + return float() + + @current_compression_time.setter + def current_compression_time(self, current_compression_time: Any) -> None: + pass + + @property + def number_frames_encoded(self) -> int: + """ + Number of frames effectively encoded and written. Might be different from the number of frames ingested. The difference will show the encoder latency + """ + return int() + + @number_frames_encoded.setter + def number_frames_encoded(self, number_frames_encoded: Any) -> None: + pass + + @property + def average_compression_ratio(self) -> float: + """ + Average compression ratio (% of raw size) since beginning of recording. + """ + return float() + + @average_compression_ratio.setter + def average_compression_ratio(self, average_compression_ratio: Any) -> None: + pass + + @property + def current_compression_ratio(self) -> float: + """ + Compression ratio (% of raw size) for the current frame. + """ + return float() + + @current_compression_ratio.setter + def current_compression_ratio(self, current_compression_ratio: Any) -> None: + pass + + +class Camera: + """ + This class serves as the primary interface between the camera and the various features provided by the SDK. + It enables seamless integration and access to a wide array of capabilities, including video streaming, depth sensing, object tracking, mapping, and much more. + + A standard program will use the Camera class like this: + .. code-block:: text + + + import pyzed.sl as sl + + def main(): + # --- Initialize a Camera object and open the ZED + # Create a ZED camera object + zed = sl.Camera() + + # Set configuration parameters + init_params = sl.InitParameters() + init_params.camera_resolution = sl.RESOLUTION.HD720 # Use HD720 video mode for USB cameras + # init_params.camera_resolution = sl.RESOLUTION.HD1200 # Use HD1200 video mode for GMSL cameras + init_params.camera_fps = 60 # Set fps at 60 + + # Open the camera + err = zed.open(init_params) + if err != sl.ERROR_CODE.SUCCESS: + print(repr(err)) + exit(-1) + + runtime_param = sl.RuntimeParameters() + + # --- Main loop grabbing images and depth values + # Capture 50 frames and stop + i = 0 + image = sl.Mat() + depth = sl.Mat() + while i < 50 : + # Grab an image + if zed.grab(runtime_param) == sl.ERROR_CODE.SUCCESS: # A new image is available if grab() returns SUCCESS + # Display a pixel color + zed.retrieve_image(image, sl.VIEW.LEFT) # Get the left image + err, center_rgb = image.get_value(image.get_width() / 2, image.get_height() / 2) + if err == sl.ERROR_CODE.SUCCESS: + print("Image ", i, " center pixel R:", int(center_rgb[0]), " G:", int(center_rgb[1]), " B:", int(center_rgb[2])) + else: + print("Image ", i, " error:", err) + + # Display a pixel depth + zed.retrieve_measure(depth, sl.MEASURE.DEPTH) # Get the depth map + err, center_depth = depth.get_value(depth.get_width() / 2, depth.get_height() /2) + if err == sl.ERROR_CODE.SUCCESS: + print("Image ", i," center depth:", center_depth) + else: + print("Image ", i, " error:", err) + + i = i+1 + + # --- Close the Camera + zed.close() + return 0 + + if __name__ == "__main__": + main() + + """ + def __init__(self, *args, **kwargs) -> None: ... + + def __dealloc__(self) -> None: + pass + + def close(self) -> None: + """ + Close an opened camera. + + If open() has been called, this method will close the connection to the camera (or the SVO file) and free the corresponding memory. + + If open() wasn't called or failed, this method won't have any effect. + + .. note:: + If an asynchronous task is running within the Camera object, like save_area_map(), this method will wait for its completion. + + .. note:: + To apply a new InitParameters, you will need to close the camera first and then open it again with the new InitParameters values. + + .. warning:: If the CUDA context was created by open(), this method will destroy it. + .. warning:: Therefore you need to make sure to delete your GPU sl.Mat objects before the context is destroyed. + """ + pass + + def open(self, py_init = None) -> ERROR_CODE: + """ + Opens the ZED camera from the provided InitParameters. + The method will also check the hardware requirements and run a self-calibration. + :param py_init: A structure containing all the initial parameters. Default: a preset of InitParameters. + :return: An error code giving information about the internal process. If ERROR_CODE.SUCCESS is returned, the camera is ready to use. Every other code indicates an error and the program should be stopped. + + Here is the proper way to call this function: + + .. code-block:: text + + zed = sl.Camera() # Create a ZED camera object + + init_params = sl.InitParameters() # Set configuration parameters + init_params.camera_resolution = sl.RESOLUTION.HD720 # Use HD720 video mode + init_params.camera_fps = 60 # Set fps at 60 + + # Open the camera + err = zed.open(init_params) + if (err != sl.ERROR_CODE.SUCCESS) : + print(repr(err)) # Display the error + exit(-1) + + + .. note:: + If you are having issues opening a camera, the diagnostic tool provided in the SDK can help you identify to problems. + + - **Windows:** C:\\Program Files (x86)\\ZED SDK\\tools\\ZED Diagnostic.exe + - **Linux:** /usr/local/zed/tools/ZED Diagnostic + .. note:: + If this method is called on an already opened camera, close() will be called. + """ + return ERROR_CODE() + + def is_opened(self) -> bool: + """ + Reports if the camera has been successfully opened. + It has the same behavior as checking if open() returns ERROR_CODE.SUCCESS. + :return: True if the ZED camera is already setup, otherwise false. + """ + return bool() + + def read(self) -> ERROR_CODE: + """ + Read the latest images and IMU from the camera and rectify the images. + + This method is meant to be called frequently in the main loop of your application. + + .. note:: + If no new frames is available until timeout is reached, read() will return ERROR_CODE "ERROR_CODE::CAMERA_NOT_DETECTED" since the camera has probably been disconnected. + + .. note:: + Returned errors can be displayed using toString(). + + + :return: ERROR_CODE "ERROR_CODE::SUCCESS" means that no problem was encountered. + """ + return ERROR_CODE() + + def grab(self, py_runtime = None) -> ERROR_CODE: + """ + This method will grab the latest images from the camera, rectify them, and compute the retrieve_measure() "measurements" based on the RuntimeParameters provided (depth, point cloud, tracking, etc.) + + As measures are created in this method, its execution can last a few milliseconds, depending on your parameters and your hardware. + \n The exact duration will mostly depend on the following parameters: + + - InitParameters.enable_right_side_measure : Activating this parameter increases computation time. + - InitParameters.camera_resolution : Lower resolutions are faster to compute. + - enable_positional_tracking() : Activating the tracking is an additional load. + - RuntimeParameters.enable_depth : Avoiding the depth computation must be faster. However, it is required by most SDK features (tracking, spatial mapping, plane estimation, etc.) + - InitParameters.depth_mode : DEPTH_MODE.PERFORMANCE will run faster than DEPTH_MODE.ULTRA. + - InitParameters.depth_stabilization : Stabilizing the depth requires an additional computation load as it enables tracking. + + This method is meant to be called frequently in the main loop of your application. + .. note:: + Since ZED SDK 3.0, this method is blocking. It means that grab() will wait until a new frame is detected and available. + + .. note:: + If no new frames is available until timeout is reached, grab() will return ERROR_CODE.CAMERA_NOT_DETECTED since the camera has probably been disconnected. + + + :param py_runtime: A structure containing all the runtime parameters. Default: a preset of RuntimeParameters. + :return: ERROR_CODE.SUCCESS means that no problem was encountered. + .. note:: + Returned errors can be displayed using ``str()``. + + + .. code-block:: text + + # Set runtime parameters after opening the camera + runtime_param = sl.RuntimeParameters() + + image = sl.Mat() + while True: + # Grab an image + if zed.grab(runtime_param) == sl.ERROR_CODE.SUCCESS: # A new image is available if grab() returns SUCCESS + zed.retrieve_image(image, sl.VIEW.LEFT) # Get the left image + # Use the image for your application + """ + return ERROR_CODE() + + def retrieve_image(self, py_mat, view: VIEW = VIEW.LEFT, mem_type: MEM = MEM.CPU, resolution = None) -> ERROR_CODE: + """ + Retrieves images from the camera (or SVO file). + + Multiple images are available along with a view of various measures for display purposes. + \n Available images and views are listed here. + \n As an example, VIEW.DEPTH can be used to get a gray-scale version of the depth map, but the actual depth values can be retrieved using retrieve_measure() . + \n + \n **Pixels** + \n Most VIEW modes output image with 4 channels as BGRA (Blue, Green, Red, Alpha), for more information see enum VIEW + \n + \n **Memory** + \n By default, images are copied from GPU memory to CPU memory (RAM) when this function is called. + \n If your application can use GPU images, using the **type** parameter can increase performance by avoiding this copy. + \n If the provided sl.Mat object is already allocated and matches the requested image format, memory won't be re-allocated. + \n + \n **Image size** + \n By default, images are returned in the resolution provided by Resolution "get_camera_information().camera_configuration.resolution". + \n However, you can request custom resolutions. For example, requesting a smaller image can help you speed up your application. + .. warning:: A sl.Mat resolution higher than the camera resolution **cannot** be requested. + + :param py_mat: The sl.Mat to store the image. (Direction: out) + :param view: Defines the image you want (see VIEW). Default: VIEW.LEFT. (Direction: in) + :param mem_type: Defines on which memory the image should be allocated. Default: MEM.CPU. (Direction: in) + :param resolution: If specified, defines the Resolution of the output sl.Mat. If set to Resolution "Resolution(0,0)", the camera resolution will be taken. Default: (0,0). (Direction: in) + :return: ERROR_CODE.SUCCESS if the method succeeded. + :return: ERROR_CODE.INVALID_FUNCTION_PARAMETERS if the view mode requires a module not enabled (VIEW.DEPTH with DEPTH_MODE.NONE for example). + :return: ERROR_CODE.INVALID_RESOLUTION if the resolution is higher than one provided by Resolution "get_camera_information().camera_configuration.resolution". + :return: ERROR_CODE.FAILURE if another error occurred. + + .. note:: + As this method retrieves the images grabbed by the grab() method, it should be called afterward. + + + .. code-block:: text + + # create sl.Mat objects to store the images + left_image = sl.Mat() + while True: + # Grab an image + if zed.grab() == sl.ERROR_CODE.SUCCESS: # A new image is available if grab() returns SUCCESS + zed.retrieve_image(left_image, sl.VIEW.LEFT) # Get the rectified left image + + # Display the center pixel colors + err, left_center = left_image.get_value(left_image.get_width() / 2, left_image.get_height() / 2) + if err == sl.ERROR_CODE.SUCCESS: + print("left_image center pixel R:", int(left_center[0]), " G:", int(left_center[1]), " B:", int(left_center[2])) + else: + print("error:", err) + """ + return ERROR_CODE() + + def retrieve_measure(self, py_mat, measure: MEASURE = MEASURE.DEPTH, mem_type: MEM = MEM.CPU, resolution = None) -> ERROR_CODE: + """ + Computed measures, like depth, point cloud, or normals, can be retrieved using this method. + + Multiple measures are available after a grab() call. A full list is available here. + + \n **Memory** + \n By default, images are copied from GPU memory to CPU memory (RAM) when this function is called. + \n If your application can use GPU images, using the **type parameter can increase performance by avoiding this copy. + \n If the provided Mat object is already allocated and matches the requested image format, memory won't be re-allocated. + + \n **Measure size** + \n By default, measures are returned in the resolution provided by get_camera_information() in CameraInformations.camera_resolution . + \n However, custom resolutions can be requested. For example, requesting a smaller measure can help you speed up your application. + .. warning:: A sl.Mat resolution higher than the camera resolution **cannot** be requested. + + :param py_mat: The sl.Mat to store the measures. (Direction: out) + :param measure: Defines the measure you want (see MEASURE). Default: MEASURE.DEPTH. (Direction: in) + :param mem_type: Defines on which memory the image should be allocated. Default: MEM.CPU (you cannot change this default value). (Direction: in) + :param resolution: If specified, defines the Resolution of the output sl.Mat. If set to Resolution "Resolution(0,0)", the camera resolution will be taken. Default: (0,0). (Direction: in) + :return: ERROR_CODE.SUCCESS if the method succeeded. + :return: ERROR_CODE.INVALID_FUNCTION_PARAMETERS if the view mode requires a module not enabled (VIEW.DEPTH with DEPTH_MODE.NONE for example). + :return: ERROR_CODE.INVALID_RESOLUTION if the resolution is higher than one provided by Resolution "get_camera_information().camera_configuration.resolution". + :return: ERROR_CODE.FAILURE if another error occured. + + .. note:: + As this method retrieves the images grabbed by the grab() method, it should be called afterward. + + + .. code-block:: text + + depth_map = sl.Mat() + point_cloud = sl.Mat() + resolution = zed.get_camera_information().camera_configuration.resolution + x = int(resolution.width / 2) # Center coordinates + y = int(resolution.height / 2) + + while True : + if zed.grab() == sl.ERROR_CODE.SUCCESS: # Grab an image + + zed.retrieve_measure(depth_map, sl.MEASURE.DEPTH) # Get the depth map + + # Read a depth value + err, center_depth = depth_map.get_value(x, y) # each depth map pixel is a float value + if err == sl.ERROR_CODE.SUCCESS: # + Inf is "too far", -Inf is "too close", Nan is "unknown/occlusion" + print("Depth value at center:", center_depth, init_params.coordinate_units) + zed.retrieve_measure(point_cloud, sl.MEASURE.XYZRGBA) # Get the point cloud + + # Read a point cloud value + err, pc_value = point_cloud.get_value(x, y) # each point cloud pixel contains 4 floats, so we are using a numpy array + + # Get 3D coordinates + if err == sl.ERROR_CODE.SUCCESS: + print("Point cloud coordinates at center: X=", pc_value[0], ", Y=", pc_value[1], ", Z=", pc_value[2]) + + # Get color information using Python struct package to unpack the unsigned char array containing RGBA values + import struct + packed = struct.pack('f', pc_value[3]) + char_array = struct.unpack('BBBB', packed) + print("Color values at center: R=", char_array[0], ", G=", char_array[1], ", B=", char_array[2], ", A=", char_array[3]) + + """ + return ERROR_CODE() + + def set_region_of_interest(self, py_mat, modules = [MODULE.ALL]) -> ERROR_CODE: + """ + Defines a region of interest to focus on for all the SDK, discarding other parts. + :param roi_mask: The Mat defining the requested region of interest, pixels lower than 127 will be discarded from all modules: depth, positional tracking, etc. + If empty, set all pixels as valid. The mask can be either at lower or higher resolution than the current images. + :return: An ERROR_CODE if something went wrong. + .. note:: + The method support MAT_TYPE "U8_C1/U8_C3/U8_C4" images type. + """ + return ERROR_CODE() + + def get_region_of_interest(self, py_mat, resolution = None, module: MODULE = MODULE.ALL) -> ERROR_CODE: + """ + Get the previously set or computed region of interest + :param roi_mask: The Mat returned + :param image_size: The optional size of the returned mask + :return: An ERROR_CODE if something went wrong. + """ + return ERROR_CODE() + + def start_region_of_interest_auto_detection(self, roi_param = None) -> ERROR_CODE: + """ + Start the auto detection of a region of interest to focus on for all the SDK, discarding other parts. + This detection is based on the general motion of the camera combined with the motion in the scene. + The camera must move for this process, an internal motion detector is used, based on the Positional Tracking module. + It requires a few hundreds frames of motion to compute the mask. + :param roi_param: The RegionOfInterestParameters defining parameters for the detection + + .. note:: + This module is expecting a static portion, typically a fairly close vehicle hood at the bottom of the image. + + This module may not work correctly or detect incorrect background area, especially with slow motion, if there's no static element. + This module work asynchronously, the status can be obtained using get_region_of_interest_auto_detection_status(), the result is either auto applied, + or can be retrieve using get_region_of_interest function. + :return: An ERROR_CODE if something went wrong. + """ + return ERROR_CODE() + + def get_region_of_interest_auto_detection_status(self) -> REGION_OF_INTEREST_AUTO_DETECTION_STATE: + """ + Return the status of the automatic Region of Interest Detection + The automatic Region of Interest Detection is enabled by using startRegionOfInterestAutoDetection + :return: REGION_OF_INTEREST_AUTO_DETECTION_STATE the status + """ + return REGION_OF_INTEREST_AUTO_DETECTION_STATE() + + def start_publishing(self, communication_parameters) -> ERROR_CODE: + """ + Set this camera as a data provider for the Fusion module. + + Metadata is exchanged with the Fusion. + :param communication_parameters: A structure containing all the initial parameters. Default: a preset of CommunicationParameters. + :return: ERROR_CODE.SUCCESS if everything went fine, ERROR_CODE.FAILURE otherwise. + """ + return ERROR_CODE() + + def stop_publishing(self) -> ERROR_CODE: + """ + Set this camera as normal camera (without data providing). + + Stop to send camera data to fusion. + :return: ERROR_CODE.SUCCESS if everything went fine, ERROR_CODE.FAILURE otherwise. + """ + return ERROR_CODE() + + def set_svo_position(self, frame_number) -> None: + """ + Sets the playback cursor to the desired frame number in the SVO file. + + This method allows you to move around within a played-back SVO file. After calling, the next call to grab() will read the provided frame number. + + :param frame_number: The number of the desired frame to be decoded. + + .. note:: + The method works only if the camera is open in SVO playback mode. + + + .. code-block:: text + + + import pyzed.sl as sl + + def main(): + # Create a ZED camera object + zed = sl.Camera() + + # Set configuration parameters + init_params = sl.InitParameters() + init_params.set_from_svo_file("path/to/my/file.svo") + + # Open the camera + err = zed.open(init_params) + if err != sl.ERROR_CODE.SUCCESS: + print(repr(err)) + exit(-1) + + # Loop between frames 0 and 50 + left_image = sl.Mat() + while zed.get_svo_position() < zed.get_svo_number_of_frames() - 1: + + print("Current frame: ", zed.get_svo_position()) + + # Loop if we reached frame 50 + if zed.get_svo_position() == 50: + zed.set_svo_position(0) + + # Grab an image + if zed.grab() == sl.ERROR_CODE.SUCCESS: + zed.retrieve_image(left_image, sl.VIEW.LEFT) # Get the rectified left image + + # Use the image in your application + + # Close the Camera + zed.close() + return 0 + + if __name__ == "__main__" : + main() + + """ + pass + + def pause_svo_reading(self, status) -> None: + """ + Pauses or resumes SVO reading when using SVO Real time mode + :param status: If true, the reading is paused. If false, the reading is resumed. + .. note:: + This is only relevant for SVO InitParameters::svo_real_time_mode + """ + pass + + def get_svo_position(self) -> int: + """ + Returns the current playback position in the SVO file. + + The position corresponds to the number of frames already read from the SVO file, starting from 0 to n. + + Each grab() call increases this value by one (except when using InitParameters.svo_real_time_mode). + :return: The current frame position in the SVO file. -1 if the SDK is not reading an SVO. + + .. note:: + The method works only if the camera is open in SVO playback mode. + + + See set_svo_position() for an example. + """ + return int() + + def get_svo_number_of_frames(self) -> int: + """ + Returns the number of frames in the SVO file. + + :return: The total number of frames in the SVO file. -1 if the SDK is not reading a SVO. + + The method works only if the camera is open in SVO playback mode. + """ + return int() + + def ingest_data_into_svo(self, data) -> ERROR_CODE: + """ + ingest a SVOData in the SVO file. + + :return: An error code stating the success, or not. + + The method works only if the camera is open in SVO recording mode. + """ + return ERROR_CODE() + + def get_svo_data_keys(self) -> list: + """ + Get the external channels that can be retrieved from the SVO file. + + :return: a list of keys + + The method works only if the camera is open in SVO playback mode. + """ + return [] + + def retrieve_svo_data(self, key, data, ts_begin, ts_end) -> ERROR_CODE: + """ + retrieve SVO datas from the SVO file at the given channel key and in the given timestamp range. + + :return: An error code stating the success, or not. + :param key: The channel key. + :param data: The dict to be filled with SVOData objects, with timestamps as keys. + :param ts_begin: The beginning of the range. + :param ts_end: The end of the range. + + The method works only if the camera is open in SVO playback mode. + """ + return ERROR_CODE() + + def set_camera_settings(self, settings: VIDEO_SETTINGS, value = -1) -> ERROR_CODE: + """ + retrieve SVO datas from the SVO file at the given channel key and in the given timestamp range. + + :return: An error code stating the success, or not. + :param key: The channel key. + :param data: The dict to be filled with SVOData objects, with timestamps as keys. + :param ts_begin: The beginning of the range. + :param ts_end: The end of the range. + + The method works only if the camera is open in SVO playback mode. + """ + return ERROR_CODE() + + def set_camera_settings_range(self, settings: VIDEO_SETTINGS, mini = -1, maxi = -1) -> ERROR_CODE: + """ + Sets the value of the requested VIDEO_SETTINGS "camera setting" that supports two values (min/max). + + This method only works with the following VIDEO_SETTINGS: + - sl.VIDEO_SETTINGS.AUTO_EXPOSURE_TIME_RANGE + - sl.VIDEO_SETTINGS.AUTO_ANALOG_GAIN_RANGE + - sl.VIDEO_SETTINGS.AUTO_DIGITAL_GAIN_RANGE + + :param settings: The setting to be set. + :param min: The minimum value that can be reached (-1 or 0 gives full range). + :param max: The maximum value that can be reached (-1 or 0 gives full range). + :return: ERROR_CODE to indicate if the method was successful. + + .. warning:: If VIDEO_SETTINGS settings is not supported or min >= max, it will return ERROR_CODE.INVALID_FUNCTION_PARAMETERS. + .. note:: + The method works only if the camera is open in LIVE or STREAM mode. + + + .. code-block:: text + + # For ZED X based product, set the automatic exposure from 2ms to 5ms. Expected exposure time cannot go beyond those values + zed.set_camera_settings_range(sl.VIDEO_SETTINGS.AEC_RANGE, 2000, 5000); + """ + return ERROR_CODE() + + def set_camera_settings_roi(self, settings: VIDEO_SETTINGS, roi, eye: SIDE = SIDE.BOTH, reset = False) -> ERROR_CODE: + """ + Overloaded method for VIDEO_SETTINGS.AEC_AGC_ROI which takes a Rect as parameter. + + :param settings: Must be set at VIDEO_SETTINGS.AEC_AGC_ROI, otherwise the method will have no impact. + :param roi: Rect that defines the target to be applied for AEC/AGC computation. Must be given according to camera resolution. + :param eye: SIDE on which to be applied for AEC/AGC computation. Default: SIDE.BOTH + :param reset: Cancel the manual ROI and reset it to the full image. Default: False + + .. note:: + The method works only if the camera is open in LIVE or STREAM mode. + + + .. code-block:: text + + roi = sl.Rect(42, 56, 120, 15) + zed.set_camera_settings_roi(sl.VIDEO_SETTINGS.AEC_AGC_ROI, roi, sl.SIDE.BOTH) + + """ + return ERROR_CODE() + + def get_camera_settings(self, setting: VIDEO_SETTINGS) -> tuple[ERROR_CODE, int]: + """ + Returns the current value of the requested VIDEO_SETTINGS "camera setting" (gain, brightness, hue, exposure, etc.). + + Possible values (range) of each setting are available here. + + :param setting: The requested setting. + :return: ERROR_CODE to indicate if the method was successful. + :return: The current value for the corresponding setting. + + .. code-block:: text + + err, gain = zed.get_camera_settings(sl.VIDEO_SETTINGS.GAIN) + if err == sl.ERROR_CODE.SUCCESS: + print("Current gain value:", gain) + else: + print("error:", err) + + + .. note:: + The method works only if the camera is open in LIVE or STREAM mode. + + .. note:: + Settings are not exported in the SVO file format. + """ + return tuple[ERROR_CODE, int]() + + def get_camera_settings_range(self, setting: VIDEO_SETTINGS) -> tuple[ERROR_CODE, int, int]: + """ + Returns the values of the requested settings for VIDEO_SETTINGS that supports two values (min/max). + + This method only works with the following VIDEO_SETTINGS: + - sl.VIDEO_SETTINGS.AUTO_EXPOSURE_TIME_RANGE + - sl.VIDEO_SETTINGS.AUTO_ANALOG_GAIN_RANGE + - sl.VIDEO_SETTINGS.AUTO_DIGITAL_GAIN_RANGE + + Possible values (range) of each setting are available here. + :param setting: The requested setting. + :return: ERROR_CODE to indicate if the method was successful. + :return: The current value of the minimum for the corresponding setting. + :return: The current value of the maximum for the corresponding setting. + + .. code-block:: text + + err, aec_range_min, aec_range_max = zed.get_camera_settings(sl.VIDEO_SETTINGS.AUTO_EXPOSURE_TIME_RANGE) + if err == sl.ERROR_CODE.SUCCESS: + print("Current AUTO_EXPOSURE_TIME_RANGE range values ==> min:", aec_range_min, "max:", aec_range_max) + else: + print("error:", err) + + + .. note:: + Works only with ZED X that supports low-level controls + """ + return tuple[ERROR_CODE, int, int]() + + def get_camera_settings_roi(self, setting: VIDEO_SETTINGS, roi, eye: SIDE = SIDE.BOTH) -> ERROR_CODE: + """ + Returns the current value of the currently used ROI for the camera setting AEC_AGC_ROI. + + :param setting: Must be set at VIDEO_SETTINGS.AEC_AGC_ROI, otherwise the method will have no impact. (Direction: in) + :param roi: Roi that will be filled. (Direction: out) + :param eye: The requested side. Default: SIDE.BOTH (Direction: in) + :return: ERROR_CODE to indicate if the method was successful. + + .. code-block:: text + + roi = sl.Rect() + err = zed.get_camera_settings_roi(sl.VIDEO_SETTINGS.AEC_AGC_ROI, roi, sl.SIDE.BOTH) + print("Current ROI for AEC_AGC: " + str(roi.x) + " " + str(roi.y)+ " " + str(roi.width) + " " + str(roi.height)) + + + .. note:: + Works only if the camera is open in LIVE or STREAM mode with VIDEO_SETTINGS.AEC_AGC_ROI. + + .. note:: + It will return ERROR_CODE.INVALID_FUNCTION_CALL or ERROR_CODE.INVALID_FUNCTION_PARAMETERS otherwise. + """ + return ERROR_CODE() + + def is_camera_setting_supported(self, setting: VIDEO_SETTINGS) -> bool: + """ + Returns if the video setting is supported by the camera or not + + :param setting: the video setting to test (Direction: in) + :return: True if the VIDEO_SETTINGS is supported by the camera, False otherwise + """ + return bool() + + def get_current_fps(self) -> float: + """ + Returns the current framerate at which the grab() method is successfully called. + + The returned value is based on the difference of camera get_timestamp() "timestamps" between two successful grab() calls. + + :return: The current SDK framerate + + .. warning:: The returned framerate (number of images grabbed per second) can be lower than InitParameters.camera_fps if the grab() function runs slower than the image stream or is called too often. + + .. code-block:: text + + current_fps = zed.get_current_fps() + print("Current framerate: ", current_fps) + """ + return float() + + def get_timestamp(self, time_reference: TIME_REFERENCE) -> Timestamp: + """ + Returns the timestamp in the requested TIME_REFERENCE. + + - When requesting the TIME_REFERENCE.IMAGE timestamp, the UNIX nanosecond timestamp of the latest grab() "grabbed" image will be returned. + \n This value corresponds to the time at which the entire image was available in the PC memory. As such, it ignores the communication time that corresponds to 2 or 3 frame-time based on the fps (ex: 33.3ms to 50ms at 60fps). + + - When requesting the TIME_REFERENCE.CURRENT timestamp, the current UNIX nanosecond timestamp is returned. + + This function can also be used when playing back an SVO file. + + :param time_reference: The selected TIME_REFERENCE. + :return: The Timestamp in nanosecond. 0 if not available (SVO file without compression). + + .. note:: + As this function returns UNIX timestamps, the reference it uses is common across several Camera instances. + + \n This can help to organized the grabbed images in a multi-camera application. + + .. code-block:: text + + last_image_timestamp = zed.get_timestamp(sl.TIME_REFERENCE.IMAGE) + current_timestamp = zed.get_timestamp(sl.TIME_REFERENCE.CURRENT) + print("Latest image timestamp: ", last_image_timestamp.get_nanoseconds(), "ns from Epoch.") + print("Current timestamp: ", current_timestamp.get_nanoseconds(), "ns from Epoch.") + """ + return Timestamp() + + def get_frame_dropped_count(self) -> int: + """ + Returns the number of frames dropped since grab() was called for the first time. + + A dropped frame corresponds to a frame that never made it to the grab method. + \n This can happen if two frames were extracted from the camera when grab() is called. The older frame will be dropped so as to always use the latest (which minimizes latency). + + :return: The number of frames dropped since the first grab() call. + """ + return int() + + def get_current_min_max_depth(self) -> tuple[ERROR_CODE, float, float]: + """ + Gets the current range of perceived depth. + :param min: Minimum depth detected (in selected sl.UNIT). (Direction: out) + :param max: Maximum depth detected (in selected sl.UNIT). (Direction: out) + :return: ERROR_CODE.SUCCESS if values can be extracted, ERROR_CODE.FAILURE otherwise. + """ + return tuple[ERROR_CODE, float, float]() + + def get_camera_information(self, resizer = None) -> CameraInformation: + """ + Returns the CameraInformation associated the camera being used. + + To ensure accurate calibration, it is possible to specify a custom resolution as a parameter when obtaining scaled information, as calibration parameters are resolution-dependent. + \n When reading an SVO file, the parameters will correspond to the camera used for recording. + + :param resizer: You can specify a size different from the default image size to get the scaled camera information. + Default = (0,0) meaning original image size (given by CameraConfiguration.resolution "get_camera_information().camera_configuration.resolution"). + :return: CameraInformation containing the calibration parameters of the ZED, as well as serial number and firmware version. + + .. warning:: The returned parameters might vary between two execution due to the InitParameters.camera_disable_self_calib "self-calibration" being run in the open() method. + .. note:: + The calibration file SNXXXX.conf can be found in: + + - **Windows:** C:/ProgramData/Stereolabs/settings/ + - **Linux:** /usr/local/zed/settings/ + """ + return CameraInformation() + + def get_runtime_parameters(self) -> RuntimeParameters: + """ + Returns the RuntimeParameters used. + It corresponds to the structure given as argument to the grab() method. + + :return: RuntimeParameters containing the parameters that define the behavior of the grab method. + """ + return RuntimeParameters() + + def get_init_parameters(self) -> InitParameters: + """ + Returns the InitParameters associated with the Camera object. + It corresponds to the structure given as argument to open() method. + + :return: InitParameters containing the parameters used to initialize the Camera object. + """ + return InitParameters() + + def get_positional_tracking_parameters(self) -> PositionalTrackingParameters: + """ + Returns the PositionalTrackingParameters used. + + It corresponds to the structure given as argument to the enable_positional_tracking() method. + + :return: PositionalTrackingParameters containing the parameters used for positional tracking initialization. + """ + return PositionalTrackingParameters() + + def get_spatial_mapping_parameters(self) -> SpatialMappingParameters: + """ + Returns the SpatialMappingParameters used. + + It corresponds to the structure given as argument to the enable_spatial_mapping() method. + + :return: SpatialMappingParameters containing the parameters used for spatial mapping initialization. + """ + return SpatialMappingParameters() + + def get_object_detection_parameters(self, instance_module_id = 0) -> ObjectDetectionParameters: + """ + Returns the ObjectDetectionParameters used. + + It corresponds to the structure given as argument to the enable_object_detection() method. + :return: ObjectDetectionParameters containing the parameters used for object detection initialization. + """ + return ObjectDetectionParameters() + + def get_body_tracking_parameters(self, instance_id = 0) -> BodyTrackingParameters: + """ + Returns the BodyTrackingParameters used. + + It corresponds to the structure given as argument to the enable_body_tracking() method. + + :return: BodyTrackingParameters containing the parameters used for body tracking initialization. + """ + return BodyTrackingParameters() + + def get_streaming_parameters(self) -> StreamingParameters: + """ + Returns the StreamingParameters used. + + It corresponds to the structure given as argument to the enable_streaming() method. + + :return: StreamingParameters containing the parameters used for streaming initialization. + """ + return StreamingParameters() + + def enable_positional_tracking(self, py_tracking = None) -> ERROR_CODE: + """ + Initializes and starts the positional tracking processes. + + This method allows you to enable the position estimation of the SDK. It only has to be called once in the camera's lifetime. + \n When enabled, the position will be update at each grab() call. + \n Tracking-specific parameters can be set by providing PositionalTrackingParameters to this method. + + :param py_tracking: A structure containing all the specific parameters for the positional tracking. Default: a preset of PositionalTrackingParameters. + :return: ERROR_CODE.FAILURE if the PositionalTrackingParameters.area_file_path file wasn't found, ERROR_CODE.SUCCESS otherwise. + + .. warning:: The positional tracking feature benefits from a high framerate. We found HD720@60fps to be the best compromise between image quality and framerate. + + .. code-block:: text + + + import pyzed.sl as sl + + def main() : + # --- Initialize a Camera object and open the ZED + # Create a ZED camera object + zed = sl.Camera() + + # Set configuration parameters + init_params = sl.InitParameters() + init_params.camera_resolution = sl.RESOLUTION.HD720 # Use HD720 video mode + init_params.camera_fps = 60 # Set fps at 60 + + # Open the camera + err = zed.open(init_params) + if err != sl.ERROR_CODE.SUCCESS: + print(repr(err)) + exit(-1) + + # Set tracking parameters + track_params = sl.PositionalTrackingParameters() + + # Enable positional tracking + err = zed.enable_positional_tracking(track_params) + if err != sl.ERROR_CODE.SUCCESS: + print("Tracking error: ", repr(err)) + exit(-1) + + # --- Main loop + while True: + if zed.grab() == sl.ERROR_CODE.SUCCESS: # Grab an image and computes the tracking + camera_pose = sl.Pose() + zed.get_position(camera_pose, sl.REFERENCE_FRAME.WORLD) + translation = camera_pose.get_translation().get() + print("Camera position: X=", translation[0], " Y=", translation[1], " Z=", translation[2]) + + # --- Close the Camera + zed.close() + return 0 + + if __name__ == "__main__" : + main() + + """ + return ERROR_CODE() + + def update_self_calibration(self) -> None: + """ + Performs a new self-calibration process. + In some cases, due to temperature changes or strong vibrations, the stereo calibration becomes less accurate. + \n Use this method to update the self-calibration data and get more reliable depth values. + .. note:: + The self-calibration will occur at the next grab() call. + + .. note:: + This method is similar to the previous reset_self_calibration() used in 2.X SDK versions. + + .. warning:: New values will then be available in get_camera_information(), be sure to get them to still have consistent 2D <-> 3D conversion. + """ + pass + + def enable_body_tracking(self, body_tracking_parameters = None) -> ERROR_CODE: + """ + Initializes and starts the body tracking module. + + The body tracking module currently supports multiple classes of human skeleton detection with the BODY_TRACKING_MODEL.HUMAN_BODY_FAST, + BODY_TRACKING_MODEL "BODY_TRACKING_MODEL::HUMAN_BODY_MEDIUM" or BODY_TRACKING_MODEL "BODY_TRACKING_MODEL::HUMAN_BODY_ACCURATE". + \n This model only detects humans but provides a full skeleton map for each person. + + \n Detected objects can be retrieved using the retrieve_bodies() method. + + .. note:: + - **This Deep Learning detection module is not available for MODEL.ZED cameras (first generation ZED cameras).** + + .. note:: + - This feature uses AI to locate objects and requires a powerful GPU. A GPU with at least 3GB of memory is recommended. + + + :param body_tracking_parameters: A structure containing all the specific parameters for the object detection. Default: a preset of BodyTrackingParameters. + :return: ERROR_CODE.SUCCESS if everything went fine. + :return: ERROR_CODE.OBJECT_DETECTION_NOT_AVAILABLE if the AI model is missing or corrupted. In this case, the SDK needs to be reinstalled + :return: ERROR_CODE.OBJECT_DETECTION_MODULE_NOT_COMPATIBLE_WITH_CAMERA if the camera used does not have an IMU (MODEL.ZED). + :return: ERROR_CODE.SENSORS_NOT_DETECTED if the camera model is correct (not MODEL.ZED) but the IMU is missing. It probably happens because InitParameters.sensors_required was set to False and that IMU has not been found. + :return: ERROR_CODE.INVALID_FUNCTION_CALL if one of the **body_tracking_parameters** parameter is not compatible with other modules parameters (for example, **depth_mode** has been set to DEPTH_MODE.NONE). + :return: ERROR_CODE.FAILURE otherwise. + + .. code-block:: text + + import pyzed.sl as sl + + def main() : + # Create a ZED camera object + zed = sl.Camera() + + # Open the camera + err = zed.open() + if err != sl.ERROR_CODE.SUCCESS: + print("Opening camera error:", repr(err)) + exit(-1) + + # Enable position tracking (mandatory for object detection) + tracking_params = sl.PositionalTrackingParameters() + err = zed.enable_positional_tracking(tracking_params) + if err != sl.ERROR_CODE.SUCCESS: + print("Enabling Positional Tracking error:", repr(err)) + exit(-1) + + # Set the body tracking parameters + body_tracking_params = sl.BodyTrackingParameters() + + # Enable the body tracking + err = zed.enable_body_tracking(body_tracking_params) + if err != sl.ERROR_CODE.SUCCESS: + print("Enabling Body Tracking error:", repr(err)) + exit(-1) + + # Grab an image and detect bodies on it + bodies = sl.Bodies() + while True : + if zed.grab() == sl.ERROR_CODE.SUCCESS: + zed.retrieve_bodies(bodies) + print(len(bodies.body_list), "bodies detected") + # Use the bodies in your application + + # Close the camera + zed.disable_body_tracking() + zed.close() + + if __name__ == "__main__": + main() + """ + return ERROR_CODE() + + def disable_body_tracking(self, instance_id = 0, force_disable_all_instances = False) -> None: + """ + Disables the body tracking process. + + The body tracking module immediately stops and frees its memory allocations. + + :param instance_id: Id of the body tracking instance. Used when multiple instances of the body tracking module are enabled at the same time. + :param force_disable_all_instances: Should disable all instances of the body tracking module or just **instance_module_id**. + + .. note:: + If the body tracking has been enabled, this method will automatically be called by close(). + """ + pass + + def retrieve_bodies(self, bodies, body_tracking_runtime_parameters = None, instance_id = 0) -> ERROR_CODE: + """ + Retrieves body tracking data from the body tracking module. + + This method returns the result of the body tracking, whether the module is running synchronously or asynchronously. + + - **Asynchronous:** this method immediately returns the last bodies tracked. If the current tracking isn't done, the bodies from the last tracking will be returned, and Bodies.is_new will be set to False. + - **Synchronous:** this method executes tracking and waits for it to finish before returning the detected objects. + + It is recommended to keep the same Bodies object as the input of all calls to this method. This will enable the identification and the tracking of every detected object. + + :param bodies: The detected bodies will be saved into this object. If the object already contains data from a previous tracking, it will be updated, keeping a unique ID for the same person. + :param body_tracking_runtime_parameters: Body tracking runtime settings, can be changed at each tracking. In async mode, the parameters update is applied on the next iteration. If None, the previously used parameters will be used. + :param instance_id: Id of the body tracking instance. Used when multiple instances of the body tracking module are enabled at the same time. + :return: ERROR_CODE.SUCCESS if everything went fine, ERROR_CODE.FAILURE otherwise. + + .. code-block:: text + + bodies = sl.Bodies() # Unique Bodies to be updated after each grab + # Main loop + while True: + if zed.grab() == sl.ERROR_CODE.SUCCESS: # Grab an image from the camera + zed.retrieve_bodies(bodies) + print(len(bodies.body_list), "bodies detected") + """ + return ERROR_CODE() + + def set_body_tracking_runtime_parameters(self, body_tracking_runtime_parameters, instance_module_id = 0) -> ERROR_CODE: + """ + Set the body tracking runtime parameters + """ + return ERROR_CODE() + + def is_body_tracking_enabled(self, instance_id = 0) -> bool: + """ + Tells if the body tracking module is enabled. + """ + return bool() + + def get_sensors_data(self, py_sensor_data, time_reference = TIME_REFERENCE.CURRENT) -> ERROR_CODE: + """ + Retrieves the SensorsData (IMU, magnetometer, barometer) at a specific time reference. + + - Calling get_sensors_data with TIME_REFERENCE.CURRENT gives you the latest sensors data received. Getting all the data requires to call this method at 800Hz in a thread. + - Calling get_sensors_data with TIME_REFERENCE.IMAGE gives you the sensors data at the time of the latest image grab() "grabbed". + + SensorsData object contains the previous IMUData structure that was used in ZED SDK v2.X: + \n For IMU data, the values are provided in 2 ways : + +
  • **Time-fused** pose estimation that can be accessed using: + * IMUData.get_pose "data.get_imu_data().get_pose()" +
  • +
  • **Raw values** from the IMU sensor: + + * IMUData.get_angular_velocity "data.get_imu_data().get_angular_velocity()", corresponding to the gyroscope + * IMUData.get_linear_acceleration "data.get_imu_data().get_linear_acceleration()", corresponding to the accelerometer + both the gyroscope and accelerometer are synchronized. +
  • + + + The delta time between previous and current values can be calculated using data.imu.timestamp + + .. note:: + The IMU quaternion (fused data) is given in the specified COORDINATE_SYSTEM of InitParameters. + + + :param py_sensor_data: The SensorsData variable to store the data. (Direction: out) + :param time_reference: Defines the reference from which you want the data to be expressed. Default: REFERENCE_FRAME.WORLD. (Direction: in) + :return: ERROR_CODE.SUCCESS if sensors data have been extracted. + :return: ERROR_CODE.SENSORS_NOT_AVAILABLE if the camera model is a MODEL.ZED. + :return: ERROR_CODE.MOTION_SENSORS_REQUIRED if the camera model is correct but the sensors module is not opened. + :return: ERROR_CODE.INVALID_FUNCTION_PARAMETERS if the **reference_time** is not valid. See Warning. + + .. warning:: In SVO reading mode, the TIME_REFERENCE.CURRENT is currently not available (yielding ERROR_CODE.INVALID_FUNCTION_PARAMETERS. + .. warning:: Only the quaternion data and barometer data (if available) at TIME_REFERENCE.IMAGE are available. Other values will be set to 0. + """ + return ERROR_CODE() + + def get_sensors_data_batch(self, py_sensor_data) -> ERROR_CODE: + """ + Retrieves all SensorsData associated to most recent grabbed frame in the specified COORDINATE_SYSTEM of InitParameters. + + For IMU data, the values are provided in 2 ways: + +
  • **Time-fused** pose estimation that can be accessed using: + * IMUData.get_pose "data.get_imu_data().get_pose()" +
  • +
  • **Raw values** from the IMU sensor: + + * IMUData.get_angular_velocity "data.get_imu_data().get_angular_velocity()", corresponding to the gyroscope + * IMUData.get_linear_acceleration "data.get_imu_data().get_linear_acceleration()", corresponding to the accelerometer + both the gyroscope and accelerometer are synchronized. +
  • + + + The delta time between previous and current values can be calculated using data.imu.timestamp + + :param py_sensor_data: The SensorsData list to store the data. (Direction: out) + :return: ERROR_CODE.SUCCESS if sensors data have been extracted. + :return: ERROR_CODE.SENSORS_NOT_AVAILABLE if the camera model is a MODEL.ZED. + :return: ERROR_CODE.MOTION_SENSORS_REQUIRED if the camera model is correct but the sensors module is not opened. + :return: ERROR_CODE.INVALID_FUNCTION_PARAMETERS if the **reference_time** is not valid. See Warning. + + .. code-block:: text + + if zed.grab() == sl.ERROR_CODE.SUCCESS: + sensors_data = [] + if (zed.get_sensors_data_batch(sensors_data) == sl.ERROR_CODE.SUCCESS): + for data in sensors_data: + print("IMU data: ", data.imu.get_angular_velocity(), data.imu.get_linear_acceleration()) + print("IMU pose: ", data.imu.get_pose().get_translation()) + print("IMU orientation: ", data.imu.get_orientation().get()) + """ + return ERROR_CODE() + + def set_imu_prior(self, transfom) -> ERROR_CODE: + """ + Set an optional IMU orientation hint that will be used to assist the tracking during the next grab(). + + This method can be used to assist the positional tracking rotation. + + .. note:: + This method is only effective if the camera has a model other than a MODEL.ZED, which does not contains internal sensors. + + .. warning:: It needs to be called before the grab() method. + :param transform: Transform to be ingested into IMU fusion. Note that only the rotation is used. + :return: ERROR_CODE.SUCCESS if the transform has been passed, ERROR_CODE.INVALID_FUNCTION_CALL otherwise (e.g. when used with a ZED camera which doesn't have IMU data). + """ + return ERROR_CODE() + + def get_position(self, py_pose, reference_frame: REFERENCE_FRAME = REFERENCE_FRAME.WORLD) -> POSITIONAL_TRACKING_STATE: + """ + Retrieves the estimated position and orientation of the camera in the specified REFERENCE_FRAME "reference frame". + + - Using REFERENCE_FRAME.WORLD, the returned pose relates to the initial position of the camera (PositionalTrackingParameters.initial_world_transform ). + - Using REFERENCE_FRAME.CAMERA, the returned pose relates to the previous position of the camera. + + If the tracking has been initialized with PositionalTrackingParameters.enable_area_memory to True (default), this method can return POSITIONAL_TRACKING_STATE.SEARCHING. + This means that the tracking lost its link to the initial referential and is currently trying to relocate the camera. However, it will keep on providing position estimations. + + :param camera_pose: The pose containing the position of the camera and other information (timestamp, confidence). (Direction: out) + :param reference_frame: Defines the reference from which you want the pose to be expressed. Default: REFERENCE_FRAME.WORLD. (Direction: in) + :return: The current state of the tracking process. + + .. note:: + Extract Rotation Matrix: Pose.get_rotation_matrix() + + .. note:: + Extract Translation Vector: Pose.get_translation() + + .. note:: + Extract Orientation / Quaternion: Pose.get_orientation() + + + .. warning:: This method requires the tracking to be enabled. enablePositionalTracking() . + + .. note:: + The position is provided in the InitParameters.coordinate_system . See COORDINATE_SYSTEM for its physical origin. + + + .. code-block:: text + + while True: + if zed.grab() == sl.ERROR_CODE.SUCCESS: # Grab an image and computes the tracking + camera_pose = sl.Pose() + zed.get_position(camera_pose, sl.REFERENCE_FRAME.WORLD) + + translation = camera_pose.get_translation().get() + print("Camera position: X=", translation[0], " Y=", translation[1], " Z=", translation[2]) + print("Camera Euler rotation: X=", camera_pose.get_euler_angles()[0], " Y=", camera_pose.get_euler_angles()[1], " Z=", camera_pose.get_euler_angles()[2]) + print("Camera Rodrigues rotation: X=", camera_pose.get_rotation_vector()[0], " Y=", camera_pose.get_rotation_vector()[1], " Z=", camera_pose.get_rotation_vector()[2]) + orientation = camera_pose.get_orientation().get() + print("Camera quaternion orientation: X=", orientation[0], " Y=", orientation[1], " Z=", orientation[2], " W=", orientation[3]) + """ + return POSITIONAL_TRACKING_STATE() + + def get_positional_tracking_landmarks(self, landmarks) -> ERROR_CODE: + """ + Get the current positional tracking landmarks. + :param landmarks: The dictionary of landmarks_id and landmark. + :return: ERROR_CODE that indicate if the function succeed or not. + """ + return ERROR_CODE() + + def get_positional_tracking_landmarks2d(self, landmark2d) -> ERROR_CODE: + """ + Get the current positional tracking landmark. + :param landmark: The landmark. + :return: ERROR_CODE that indicate if the function succeed or not. + """ + return ERROR_CODE() + + def get_positional_tracking_status(self) -> PositionalTrackingStatus: + """ + Return the current status of positional tracking module. + + :return: sl::PositionalTrackingStatus current status of positional tracking module. + """ + return PositionalTrackingStatus() + + def get_area_export_state(self) -> AREA_EXPORTING_STATE: + """ + Returns the state of the spatial memory export process. + + As Camera.save_area_map() only starts the exportation, this method allows you to know when the exportation finished or if it failed. + :return: The current state of the spatial memory export process. + """ + return AREA_EXPORTING_STATE() + + def save_area_map(self, area_file_path = "") -> ERROR_CODE: + """ + Saves the current area learning file. The file will contain spatial memory data generated by the tracking. + + If the tracking has been initialized with PositionalTrackingParameters.enable_area_memory to True (default), the method allows you to export the spatial memory. + \n Reloading the exported file in a future session with PositionalTrackingParameters.area_file_path initializes the tracking within the same referential. + \n This method is asynchronous, and only triggers the file generation. You can use get_area_export_state() to get the export state. + The positional tracking keeps running while exporting. + + :param area_file_path: Path of an '.area' file to save the spatial memory database in. + :return: ERROR_CODE.FAILURE if the **area_file_path** file wasn't found, ERROR_CODE.SUCCESS otherwise. + + See get_area_export_state() + + .. note:: + Please note that this method will also flush the area database that was built/loaded. + + + .. warning:: If the camera wasn't moved during the tracking session, or not enough, the spatial memory won't be usable and the file won't be exported. + .. warning:: The get_area_export_state() will return AREA_EXPORTING_STATE.FILE_EMPTY. + .. warning:: A few meters (~3m) of translation or a full rotation should be enough to get usable spatial memory. + .. warning:: However, as it should be used for relocation purposes, visiting a significant portion of the environment is recommended before exporting. + + .. code-block:: text + + while True : + if zed.grab() == sl.ERROR_CODE.SUCCESS: # Grab an image and computes the tracking + camera_pose = Pose() + zed.get_position(camera_pose, REFERENCE_FRAME.WORLD) + + # Export the spatial memory for future sessions + zed.save_area_map("office.area") # The actual file will be created asynchronously. + print(repr(zed.get_area_export_state())) + + # Close the camera + zed.close() + """ + return ERROR_CODE() + + def disable_positional_tracking(self, area_file_path = "") -> None: + """ + Disables the positional tracking. + + The positional tracking is immediately stopped. If a file path is given, save_area_map() will be called asynchronously. See get_area_export_state() to get the exportation state. + If the tracking has been enabled, this function will automatically be called by close() . + + :param area_file_path: If set, saves the spatial memory into an '.area' file. Default: (empty) + \n **area_file_path** is the name and path of the database, e.g. path/to/file/myArea1.area". + """ + pass + + def is_positional_tracking_enabled(self) -> bool: + """ + Tells if the tracking module is enabled + """ + return bool() + + def reset_positional_tracking(self, path) -> ERROR_CODE: + """ + Resets the tracking, and re-initializes the position with the given transformation matrix. + :param path: Position of the camera in the world frame when the method is called. + :return: ERROR_CODE.SUCCESS if the tracking has been reset, ERROR_CODE.FAILURE otherwise. + + .. note:: + Please note that this method will also flush the accumulated or loaded spatial memory. + """ + return ERROR_CODE() + + def enable_spatial_mapping(self, py_spatial = None) -> ERROR_CODE: + """ + Initializes and starts the spatial mapping processes. + + The spatial mapping will create a geometric representation of the scene based on both tracking data and 3D point clouds. + The resulting output can be a Mesh or a FusedPointCloud. It can be be obtained by calling extract_whole_spatial_map() or retrieve_spatial_map_async(). + Note that retrieve_spatial_map_async should be called after request_spatial_map_async(). + + :param py_spatial: A structure containing all the specific parameters for the spatial mapping. + Default: a balanced parameter preset between geometric fidelity and output file size. For more information, see the SpatialMappingParameters documentation. + :return: ERROR_CODE.SUCCESS if everything went fine, ERROR_CODE.FAILURE otherwise. + + .. warning:: The tracking (enable_positional_tracking() ) and the depth (RuntimeParameters.enable_depth ) needs to be enabled to use the spatial mapping. + .. warning:: The performance greatly depends on the **py_spatial**. + .. warning:: Lower SpatialMappingParameters.range_meter and SpatialMappingParameters.resolution_meter for higher performance. + If the mapping framerate is too slow in live mode, consider using an SVO file, or choose a lower mesh resolution. + + .. note:: + This feature uses host memory (RAM) to store the 3D map. The maximum amount of available memory allowed can be tweaked using the SpatialMappingParameters. + + \n Exceeding the maximum memory allowed immediately stops the mapping. + + .. code-block:: text + + import pyzed.sl as sl + + def main() : + # Create a ZED camera object + zed = sl.Camera() + + # Set initial parameters + init_params = sl.InitParameters() + init_params.camera_resolution = sl.RESOLUTION.HD720 # Use HD720 video mode (default fps: 60) + init_params.coordinate_system = sl.COORDINATE_SYSTEM.RIGHT_HANDED_Y_UP # Use a right-handed Y-up coordinate system (The OpenGL one) + init_params.coordinate_units = sl.UNIT.METER # Set units in meters + + # Open the camera + err = zed.open(init_params) + if err != sl.ERROR_CODE.SUCCESS: + exit(-1) + + # Positional tracking needs to be enabled before using spatial mapping + tracking_parameters = sl.PositionalTrackingParameters() + err = zed.enable_positional_tracking(tracking_parameters) + if err != sl.ERROR_CODE.SUCCESS: + exit(-1) + + # Enable spatial mapping + mapping_parameters = sl.SpatialMappingParameters() + err = zed.enable_spatial_mapping(mapping_parameters) + if err != sl.ERROR_CODE.SUCCESS: + exit(-1) + + # Grab data during 500 frames + i = 0 + mesh = sl.Mesh() # Create a mesh object + while i < 500 : + # For each new grab, mesh data is updated + if zed.grab() == sl.ERROR_CODE.SUCCESS : + # In the background, the spatial mapping will use newly retrieved images, depth and pose to update the mesh + mapping_state = zed.get_spatial_mapping_state() + + # Print spatial mapping state + print("Images captured: ", i, "/ 500 || Spatial mapping state: ", repr(mapping_state)) + i = i + 1 + + # Extract, filter and save the mesh in a .obj file + print("Extracting Mesh ...") + zed.extract_whole_spatial_map(mesh) # Extract the whole mesh + print("Filtering Mesh ...") + mesh.filter(sl.MESH_FILTER.LOW) # Filter the mesh (remove unnecessary vertices and faces) + print("Saving Mesh in mesh.obj ...") + mesh.save("mesh.obj") # Save the mesh in an obj file + + # Disable tracking and mapping and close the camera + zed.disable_spatial_mapping() + zed.disable_positional_tracking() + zed.close() + return 0 + + if __name__ == "__main__" : + main() + """ + return ERROR_CODE() + + def pause_spatial_mapping(self, status) -> None: + """ + Pauses or resumes the spatial mapping processes. + + As spatial mapping runs asynchronously, using this method can pause its computation to free some processing power, and resume it again later. + \n For example, it can be used to avoid mapping a specific area or to pause the mapping when the camera is static. + :param status: If True, the integration is paused. If False, the spatial mapping is resumed. + """ + pass + + def get_spatial_mapping_state(self) -> SPATIAL_MAPPING_STATE: + """ + Returns the current spatial mapping state. + + As the spatial mapping runs asynchronously, this method allows you to get reported errors or status info. + :return: The current state of the spatial mapping process. + + See also SPATIAL_MAPPING_STATE + """ + return SPATIAL_MAPPING_STATE() + + def request_spatial_map_async(self) -> None: + """ + Starts the spatial map generation process in a non-blocking thread from the spatial mapping process. + + The spatial map generation can take a long time depending on the mapping resolution and covered area. This function will trigger the generation of a mesh without blocking the program. + You can get info about the current generation using get_spatial_map_request_status_async(), and retrieve the mesh using retrieve_spatial_map_async(). + + .. note:: + Only one mesh can be generated at a time. If the previous mesh generation is not over, new calls of the function will be ignored. + """ + pass + + def get_spatial_map_request_status_async(self) -> ERROR_CODE: + """ + Returns the spatial map generation status. + + This status allows you to know if the mesh can be retrieved by calling retrieve_spatial_map_async(). + :return: ERROR_CODE.SUCCESS if the mesh is ready and not yet retrieved, otherwise ERROR_CODE.FAILURE. + """ + return ERROR_CODE() + + def retrieve_spatial_map_async(self, py_mesh) -> ERROR_CODE: + """ + Retrieves the current generated spatial map. + + After calling request_spatial_map_async(), this method allows you to retrieve the generated mesh or fused point cloud. + \n The Mesh or FusedPointCloud will only be available when get_spatial_map_request_status_async() returns ERROR_CODE.SUCCESS. + + :param py_mesh: The Mesh or FusedPointCloud to be filled with the generated spatial map. (Direction: out) + :return: ERROR_CODE.SUCCESS if the mesh is retrieved, otherwise ERROR_CODE.FAILURE. + + .. note:: + This method only updates the necessary chunks and adds the new ones in order to improve update speed. + + .. warning:: You should not modify the mesh / fused point cloud between two calls of this method, otherwise it can lead to a corrupted mesh / fused point cloud. + See request_spatial_map_async() for an example. + """ + return ERROR_CODE() + + def extract_whole_spatial_map(self, py_mesh) -> ERROR_CODE: + """ + Extract the current spatial map from the spatial mapping process. + + If the object to be filled already contains a previous version of the mesh / fused point cloud, only changes will be updated, optimizing performance. + + :param py_mesh: The Mesh or FusedPointCloud to be filled with the generated spatial map. (Direction: out) + + :return: ERROR_CODE.SUCCESS if the mesh is filled and available, otherwise ERROR_CODE.FAILURE. + + .. warning:: This is a blocking function. You should either call it in a thread or at the end of the mapping process. + The extraction can be long, calling this function in the grab loop will block the depth and tracking computation giving bad results. + """ + return ERROR_CODE() + + def find_plane_at_hit(self, coord, py_plane: Plane, parameters = PlaneDetectionParameters()) -> ERROR_CODE: + """ + Checks the plane at the given left image coordinates. + + This method gives the 3D plane corresponding to a given pixel in the latest left image grab() "grabbed". + \n The pixel coordinates are expected to be contained x=[0;width-1] and y=[0;height-1], where width/height are defined by the input resolution. + + :param coord: The image coordinate. The coordinate must be taken from the full-size image (Direction: in) + :param plane: The detected plane if the method succeeded. (Direction: out) + :param parameters: A structure containing all the specific parameters for the plane detection. Default: a preset of PlaneDetectionParameters. (Direction: in) + :return: ERROR_CODE.SUCCESS if a plane is found otherwise ERROR_CODE.PLANE_NOT_FOUND. + + .. note:: + The reference frame is defined by the RuntimeParameters.measure3D_reference_frame given to the grab() method. + """ + return ERROR_CODE() + + def find_floor_plane(self, py_plane, reset_tracking_floor_frame, floor_height_prior = float('nan'), world_orientation_prior = Rotation(Matrix3f().zeros()), floor_height_prior_tolerance = float('nan')) -> ERROR_CODE: + """ + Detect the floor plane of the scene. + + This method analyses the latest image and depth to estimate the floor plane of the scene. + \n It expects the floor plane to be visible and bigger than other candidate planes, like a table. + + :param py_plane: The detected floor plane if the method succeeded. (Direction: out) + :param reset_tracking_floor_frame: The transform to align the tracking with the floor plane. (Direction: out) + \n The initial position will then be at ground height, with the axis align with the gravity. + \n The positional tracking needs to be reset/enabled with this transform as a parameter (PositionalTrackingParameters.initial_world_transform). + :param floor_height_prior: Prior set to locate the floor plane depending on the known camera distance to the ground, expressed in the same unit as the ZED. (Direction: in) + \n If the prior is too far from the detected floor plane, the method will return ERROR_CODE.PLANE_NOT_FOUND. + :param world_orientation_prior: Prior set to locate the floor plane depending on the known camera orientation to the ground. (Direction: in) + \n If the prior is too far from the detected floor plane, the method will return ERROR_CODE "ERROR_CODE.PLANE_NOT_FOUND. + :param floor_height_prior_tolerance: Prior height tolerance, absolute value. (Direction: in) + :return: ERROR_CODE.SUCCESS if the floor plane is found and matches the priors (if defined), otherwise ERROR_CODE.PLANE_NOT_FOUND. + + .. note:: + The reference frame is defined by the sl.RuntimeParameters (measure3D_reference_frame) given to the grab() method. + + .. note:: + The length unit is defined by sl.InitParameters (coordinate_units). + + .. note:: + With the ZED, the assumption is made that the floor plane is the dominant plane in the scene. The ZED Mini uses gravity as prior. + + """ + return ERROR_CODE() + + def disable_spatial_mapping(self) -> None: + """ + Disables the spatial mapping process. + + The spatial mapping is immediately stopped. + \n If the mapping has been enabled, this method will automatically be called by close(). + .. note:: + This method frees the memory allocated for the spatial mapping, consequently, meshes and fused point clouds cannot be retrieved after this call. + """ + pass + + def enable_streaming(self, streaming_parameters = None) -> ERROR_CODE: + """ + Creates a streaming pipeline. + + :param streaming_parameters: A structure containing all the specific parameters for the streaming. Default: a reset of StreamingParameters . + :return: ERROR_CODE.SUCCESS if the streaming was successfully started. + :return: ERROR_CODE.INVALID_FUNCTION_CALL if open() was not successfully called before. + :return: ERROR_CODE.FAILURE if streaming RTSP protocol was not able to start. + :return: ERROR_CODE.NO_GPU_COMPATIBLE if the streaming codec is not supported (in this case, use H264 codec which is supported on all NVIDIA GPU the ZED SDK supports). + + .. code-block:: text + + import pyzed.sl as sl + + def main() : + # Create a ZED camera object + zed = sl.Camera() + + # Set initial parameters + init_params = sl.InitParameters() + init_params.camera_resolution = sl.RESOLUTION.HD720 # Use HD720 video mode (default fps: 60) + + # Open the camera + err = zed.open(init_params) + if err != sl.ERROR_CODE.SUCCESS : + print(repr(err)) + exit(-1) + + # Enable streaming + stream_params = sl.StreamingParameters() + stream_params.port = 30000 + stream_params.bitrate = 8000 + err = zed.enable_streaming(stream_params) + if err != sl.ERROR_CODE.SUCCESS : + print(repr(err)) + exit(-1) + + # Grab data during 500 frames + i = 0 + while i < 500 : + if zed.grab() == sl.ERROR_CODE.SUCCESS : + i = i+1 + + zed.disable_streaming() + zed.close() + return 0 + + if __name__ == "__main__" : + main() + """ + return ERROR_CODE() + + def disable_streaming(self) -> None: + """ + Disables the streaming initiated by enable_streaming(). + .. note:: + This method will automatically be called by close() if enable_streaming() was called. + + + See enable_streaming() for an example. + """ + pass + + def is_streaming_enabled(self) -> bool: + """ + Tells if the streaming is running. + :return: True if the stream is running, False otherwise. + """ + return bool() + + def enable_recording(self, record) -> ERROR_CODE: + """ + Creates an SVO file to be filled by enable_recording() and disable_recording(). + + \n SVO files are custom video files containing the un-rectified images from the camera along with some meta-data like timestamps or IMU orientation (if applicable). + \n They can be used to simulate a live ZED and test a sequence with various SDK parameters. + \n Depending on the application, various compression modes are available. See SVO_COMPRESSION_MODE. + + :param record: A structure containing all the specific parameters for the recording such as filename and compression mode. Default: a reset of RecordingParameters . + :return: An ERROR_CODE that defines if the SVO file was successfully created and can be filled with images. + + .. warning:: This method can be called multiple times during a camera lifetime, but if **video_filename** is already existing, the file will be erased. + + + .. code-block:: text + + import pyzed.sl as sl + + def main() : + # Create a ZED camera object + zed = sl.Camera() + # Set initial parameters + init_params = sl.InitParameters() + init_params.camera_resolution = sl.RESOLUTION.HD720 # Use HD720 video mode (default fps: 60) + init_params.coordinate_units = sl.UNIT.METER # Set units in meters + # Open the camera + err = zed.open(init_params) + if (err != sl.ERROR_CODE.SUCCESS): + print(repr(err)) + exit(-1) + + # Enable video recording + record_params = sl.RecordingParameters("myVideoFile.svo") + err = zed.enable_recording(record_params) + if (err != sl.ERROR_CODE.SUCCESS): + print(repr(err)) + exit(-1) + + # Grab data during 500 frames + i = 0 + while i < 500 : + # Grab a new frame + if zed.grab() == sl.ERROR_CODE.SUCCESS: + # Record the grabbed frame in the video file + i = i + 1 + + zed.disable_recording() + print("Video has been saved ...") + zed.close() + return 0 + + if __name__ == "__main__" : + main() + """ + return ERROR_CODE() + + def disable_recording(self) -> None: + """ + Disables the recording initiated by enable_recording() and closes the generated file. + + .. note:: + This method will automatically be called by close() if enable_recording() was called. + + + See enable_recording() for an example. + """ + pass + + def get_recording_status(self) -> RecordingStatus: + """ + Get the recording information. + :return: The recording state structure. For more details, see RecordingStatus. + """ + return RecordingStatus() + + def pause_recording(self, value = True) -> None: + """ + Pauses or resumes the recording. + :param status: If True, the recording is paused. If False, the recording is resumed. + """ + pass + + def get_recording_parameters(self) -> RecordingParameters: + """ + Returns the RecordingParameters used. + + It corresponds to the structure given as argument to the enable_recording() method. + :return: RecordingParameters containing the parameters used for recording initialization. + """ + return RecordingParameters() + + def get_health_status(self) -> HealthStatus: + """ + Get the Health information. + :return: The health state structure. For more details, see HealthStatus. + """ + return HealthStatus() + + def get_retrieve_image_resolution(self, resolution = None) -> Resolution: + """ + Get the Health information. + :return: The health state structure. For more details, see HealthStatus. + """ + return Resolution() + + def get_retrieve_measure_resolution(self, resolution = None) -> Resolution: + """ + Get the Health information. + :return: The health state structure. For more details, see HealthStatus. + """ + return Resolution() + + def enable_object_detection(self, object_detection_parameters = None) -> ERROR_CODE: + """ + Initializes and starts object detection module. + + The object detection module currently support multiple StereoLabs' model for different purposes: "MULTI_CLASS", "PERSON_HEAD" + \n The full list of model is available through OBJECT_DETECTION_MODEL and the full list of detectable objects is available through OBJECT_CLASS and OBJECT_SUBCLASS. + + \n Detected objects can be retrieved using the retrieve_objects() method. + + \n Alternatively, the object detection module supports custom class of objects with the OBJECT_DETECTION_MODEL.CUSTOM_BOX_OBJECTS (see ingestCustomBoxObjects or ingestCustomMaskObjects) + or OBJECT_DETECTION_MODEL.CUSTOM_YOLOLIKE_BOX_OBJECTS (see ObjectDetectionParameters.custom_onnx_file). + + \n Detected custom objects can be retrieved using the retrieve_custom_objects() method. + + .. note:: + - **This Depth Learning detection module is not available MODEL.ZED cameras.** + + .. note:: + - This feature uses AI to locate objects and requires a powerful GPU. A GPU with at least 3GB of memory is recommended. + + + :param object_detection_parameters: A structure containing all the specific parameters for the object detection. Default: a preset of ObjectDetectionParameters. + :return: ERROR_CODE.SUCCESS if everything went fine. + :return: ERROR_CODE.OBJECT_DETECTION_NOT_AVAILABLE if the AI model is missing or corrupted. In this case, the SDK needs to be reinstalled + :return: ERROR_CODE.OBJECT_DETECTION_MODULE_NOT_COMPATIBLE_WITH_CAMERA if the camera used does not have an IMU (MODEL.ZED). + :return: ERROR_CODE.SENSORS_NOT_DETECTED if the camera model is correct (not MODEL.ZED) but the IMU is missing. It probably happens because InitParameters.sensors_required was set to False and that IMU has not been found. + :return: ERROR_CODE.INVALID_FUNCTION_CALL if one of the **object_detection_parameters** parameter is not compatible with other modules parameters (for example, **depth_mode** has been set to DEPTH_MODE.NONE). + :return: ERROR_CODE.FAILURE otherwise. + + .. note:: + The IMU gives the gravity vector that helps in the 3D box localization. Therefore the object detection module is not available for the MODEL.ZED models. + + + .. code-block:: text + + import pyzed.sl as sl + + def main(): + # Create a ZED camera object + zed = sl.Camera() + + # Open the camera + err = zed.open() + if err != sl.ERROR_CODE.SUCCESS: + print("Opening camera error:", repr(err)) + exit(-1) + + # Enable position tracking (mandatory for object detection) + tracking_params = sl.PositionalTrackingParameters() + err = zed.enable_positional_tracking(tracking_params) + if err != sl.ERROR_CODE.SUCCESS: + print("Enabling Positional Tracking error:", repr(err)) + exit(-1) + + # Set the object detection parameters + object_detection_params = sl.ObjectDetectionParameters() + + # Enable the object detection + err = zed.enable_object_detection(object_detection_params) + if err != sl.ERROR_CODE.SUCCESS: + print("Enabling Object Detection error:", repr(err)) + exit(-1) + + # Grab an image and detect objects on it + objects = sl.Objects() + while True: + if zed.grab() == sl.ERROR_CODE.SUCCESS: + zed.retrieve_objects(objects) + print(len(objects.object_list), "objects detected") + # Use the objects in your application + + # Close the camera + zed.disable_object_detection() + zed.close() + + if __name__ == "__main__": + main() + """ + return ERROR_CODE() + + def disable_object_detection(self, instance_module_id = 0, force_disable_all_instances = False) -> None: + """ + Disables the object detection process. + + The object detection module immediately stops and frees its memory allocations. + + :param instance_module_id: Id of the object detection instance. Used when multiple instances of the object detection module are enabled at the same time. + :param force_disable_all_instances: Should disable all instances of the object detection module or just **instance_module_id**. + + .. note:: + If the object detection has been enabled, this method will automatically be called by close(). + """ + pass + + def set_object_detection_runtime_parameters(self, object_detection_parameters, instance_module_id = 0) -> ERROR_CODE: + """ + Set the object detection runtime parameters + """ + return ERROR_CODE() + + def set_custom_object_detection_runtime_parameters(self, custom_object_detection_parameters, instance_module_id = 0) -> ERROR_CODE: + """ + Set the custom object detection runtime parameters + """ + return ERROR_CODE() + + def retrieve_objects(self, py_objects, py_object_detection_parameters = None, instance_module_id = 0) -> ERROR_CODE: + """ + Retrieve objects detected by the object detection module. + + This method returns the result of the object detection, whether the module is running synchronously or asynchronously. + + - **Asynchronous:** this method immediately returns the last objects detected. If the current detection isn't done, the objects from the last detection will be returned, and Objects.is_new will be set to False. + - **Synchronous:** this method executes detection and waits for it to finish before returning the detected objects. + + It is recommended to keep the same Objects object as the input of all calls to this method. This will enable the identification and tracking of every object detected. + + :param py_objects: The detected objects will be saved into this object. If the object already contains data from a previous detection, it will be updated, keeping a unique ID for the same person. (Direction: out) + :param py_object_detection_parameters: Object detection runtime settings, can be changed at each detection. In async mode, the parameters update is applied on the next iteration. If None, use the previously passed parameters. (Direction: in) + :param instance_module_id: Id of the object detection instance. Used when multiple instances of the object detection module are enabled at the same time. + :return: ERROR_CODE.SUCCESS if everything went fine, ERROR_CODE.FAILURE otherwise. + + .. code-block:: text + + objects = sl.Objects() + while True: + if zed.grab() == sl.ERROR_CODE.SUCCESS: + zed.retrieve_objects(objects) + object_list = objects.object_list + for i in range(len(object_list)): + print(repr(object_list[i].label)) + """ + return ERROR_CODE() + + def retrieve_custom_objects(self, py_objects, custom_object_detection_parameters = None, instance_module_id = 0) -> ERROR_CODE: + """ + Retrieve custom objects detected by the object detection module. + + If the object detection module is initialized with OBJECT_DETECTION_MODEL.CUSTOM_BOX_OBJECTS, the objects retrieved will be the ones from ingest_custom_box_objects or ingest_custom_mask_objects. + If the object detection module is initialized with OBJECT_DETECTION_MODEL.CUSTOM_YOLOLIKE_BOX_OBJECTS, the objects retrieved will be the ones detected using the optimized ObjectDetectionParameters.custom_onnx_file model. + + When running the detection internally, this method returns the result of the object detection, whether the module is running synchronously or asynchronously. + + - **Asynchronous:** this method immediately returns the last objects detected. If the current detection isn't done, the objects from the last detection will be returned, and Objects::is_new will be set to false. + - **Synchronous:** this method executes detection and waits for it to finish before returning the detected objects. + + It is recommended to keep the same Objects object as the input of all calls to this method. This will enable the identification and tracking of every object detected. + + :param py_objects: The detected objects will be saved into this object. If the object already contains data from a previous detection, it will be updated, keeping a unique ID for the same person. + :param custom_object_detection_parameters: Custom object detection runtime settings, can be changed at each detection. In async mode, the parameters update is applied on the next iteration. If None, use the previously passed parameters. + :param instance_module_id: Id of the object detection instance. Used when multiple instances of the object detection module are enabled at the same time. + :return: ERROR_CODE "ERROR_CODE::SUCCESS" if everything went fine, ERROR_CODE "ERROR_CODE::FAILURE" otherwise. + + set_custom_object_detection_runtime_parameters and retrieve_objects methods should be used instead. + + .. code-block:: text + + objects = sl.Objects() + while True: + if zed.grab() == sl.ERROR_CODE.SUCCESS: + zed.retrieve_custom_objects(objects) + object_list = objects.object_list + for i in range(len(object_list)): + print(repr(object_list[i].label)) + """ + return ERROR_CODE() + + def get_objects_batch(self, trajectories, instance_module_id = 0) -> ERROR_CODE: + """ + Get a batch of detected objects. + .. warning:: This method needs to be called after retrieve_objects, otherwise trajectories will be empty. + \n It is the retrieve_objects method that ingest the current/live objects into the batching queue. + + :param trajectories: list of sl.ObjectsBatch that will be filled by the batching queue process. An empty list should be passed to the function + :param instance_module_id: Id of the object detection instance. Used when multiple instances of the object detection module are enabled at the same time. + :return: ERROR_CODE.SUCCESS if everything went fine + :return: ERROR_CODE.INVALID_FUNCTION_CALL if batching module is not available (TensorRT!=7.1) or if object tracking was not enabled. + + .. note:: + Most of the time, the vector will be empty and will be filled every BatchParameters::latency. + + + .. code-block:: text + + objects = sl.Objects() # Unique Objects to be updated after each grab + while True: # Main loop + if zed.grab() == sl.ERROR_CODE.SUCCESS: # Grab an image from the camera + zed.retrieve_objects(objects) # Call retrieve_objects so that objects are ingested in the batching system + trajectories = [] # Create an empty list of trajectories + zed.get_objects_batch(trajectories) # Get batch of objects + print("Size of batch: {}".format(len(trajectories))) + """ + return ERROR_CODE() + + def ingest_custom_box_objects(self, objects_in, instance_module_id = 0) -> ERROR_CODE: + """ + Feed the 3D Object tracking function with your own 2D bounding boxes from your own detection algorithm. + :param objects_in: List of CustomBoxObjectData to feed the object detection. + :param instance_module_id: Id of the object detection instance. Used when multiple instances of the object detection module are enabled at the same time. + :return: ERROR_CODE.SUCCESS if everything went fine. + .. note:: + The detection should be done on the current grabbed left image as the internal process will use all currently available data to extract 3D information and perform object tracking. + """ + return ERROR_CODE() + + def ingest_custom_mask_objects(self, objects_in, instance_module_id = 0) -> ERROR_CODE: + """ + Feed the 3D Object tracking function with your own 2D bounding boxes with masks from your own detection algorithm. + :param objects_in: List of CustomMaskObjectData to feed the object detection. + :param instance_module_id: Id of the object detection instance. Used when multiple instances of the object detection module are enabled at the same time. + :return: ERROR_CODE.SUCCESS if everything went fine. + .. note:: + The detection should be done on the current grabbed left image as the internal process will use all currently available data to extract 3D information and perform object tracking. + """ + return ERROR_CODE() + + def is_object_detection_enabled(self, instance_id = 0) -> bool: + """ + Tells if the object detection module is enabled. + """ + return bool() + + def get_sdk_version() -> str: + """ + Returns the version of the currently installed ZED SDK. + :return: The ZED SDK version as a string with the following format: MAJOR.MINOR.PATCH + + .. code-block:: text + + print(sl.Camera.get_sdk_version()) + """ + return str() + + def get_device_list() -> list[DeviceProperties]: + """ + List all the connected devices with their associated information. + + This method lists all the cameras available and provides their serial number, models and other information. + :return: The device properties for each connected camera. + """ + return list[DeviceProperties]() + + def get_streaming_device_list() -> list[StreamingProperties]: + """ + Lists all the streaming devices with their associated information. + + :return: The streaming properties for each connected camera. + .. warning:: This method takes around 2 seconds to make sure all network informations has been captured. Make sure to run this method in a thread. + """ + return list[StreamingProperties]() + + def reboot(sn : int, full_reboot: bool =True) -> ERROR_CODE: + """ + Performs a hardware reset of the ZED 2 and the ZED 2i. + + :param sn: Serial number of the camera to reset, or 0 to reset the first camera detected. + :param full_reboot: Perform a full reboot (sensors and video modules) if True, otherwise only the video module will be rebooted. + :return: ERROR_CODE "ERROR_CODE::SUCCESS" if everything went fine. + :return: ERROR_CODE "ERROR_CODE::CAMERA_NOT_DETECTED" if no camera was detected. + :return: ERROR_CODE "ERROR_CODE::FAILURE" otherwise. + + .. note:: + This method only works for ZED 2, ZED 2i, and newer camera models. + + + .. warning:: This method will invalidate any sl.Camera object, since the device is rebooting. + """ + return ERROR_CODE() + + def reboot_from_input(input_type: INPUT_TYPE) -> ERROR_CODE: + """ + Performs a hardware reset of all devices matching the InputType. + + :param input_type: Input type of the devices to reset. + :return: ERROR_CODE "ERROR_CODE::SUCCESS" if everything went fine. + :return: ERROR_CODE "ERROR_CODE::CAMERA_NOT_DETECTED" if no camera was detected. + :return: ERROR_CODE "ERROR_CODE::FAILURE" otherwise. + :return: ERROR_CODE "ERROR_CODE::INVALID_FUNCTION_PARAMETERS" for SVOs and streams. + + .. warning:: This method will invalidate any sl.Camera object, since the device is rebooting. + """ + return ERROR_CODE() + + +class COMM_TYPE(enum.Enum): + """ + Lists the different types of communications available for Fusion module. + + | Enumerator | | + |:---:|:---:| + | LOCAL_NETWORK | The sender and receiver are on the same local network and communicate by RTP. The communication can be affected by the local network load. | + | INTRA_PROCESS | Both sender and receiver are declared by the same process and can be in different threads. This type of communication is optimized. | + """ + LOCAL_NETWORK = enum.auto() + INTRA_PROCESS = enum.auto() + LAST = enum.auto() + +class FUSION_ERROR_CODE(enum.Enum): + """ + Lists the types of error that can be raised by the Fusion. + + + | Enumerator | | + |:---:|:---:| + | GNSS_DATA_NEED_FIX | GNSS Data need fix status in order to run fusion. | + | GNSS_DATA_COVARIANCE_MUST_VARY | Ingested covariance data must vary between ingest. | + | BODY_FORMAT_MISMATCH | The senders are using different body formats. Consider changing them. | + | NOT_ENABLED | The following module was not enabled. | + | SOURCE_MISMATCH | Some sources are provided by SVO and others by LIVE stream. | + | CONNECTION_TIMED_OUT | Connection timed out. Unable to reach the sender. Verify the sender's IP/port. | + | SHARED_MEMORY_LEAK | Intra-process shared memory allocation issue. Multiple connections to the same data. | + | INVALID_IP_ADDRESS | The provided IP address format is incorrect. Please provide the IP in the format 'a.b.c.d', where (a, b, c, d) are numbers between 0 and 255. | + | CONNECTION_ERROR | Something goes bad in the connection between sender and receiver. | + | FAILURE | Standard code for unsuccessful behavior. | + | SUCCESS | Standard code for successful behavior. | + | FUSION_INCONSISTENT_FPS | Significant differences observed between sender's FPS. | + | FUSION_FPS_TOO_LOW | At least one sender has an FPS lower than 10 FPS. | + | INVALID_TIMESTAMP | Problem detected with the ingested timestamp. Sample data will be ignored. | + | INVALID_COVARIANCE | Problem detected with the ingested covariance. Sample data will be ignored. | + | NO_NEW_DATA_AVAILABLE | All data from all sources has been consumed. No new data is available for processing. | + """ + GNSS_DATA_NEED_FIX = enum.auto() + GNSS_DATA_COVARIANCE_MUST_VARY = enum.auto() + BODY_FORMAT_MISMATCH = enum.auto() + MODULE_NOT_ENABLED = enum.auto() + SOURCE_MISMATCH = enum.auto() + CONNECTION_TIMED_OUT = enum.auto() + MEMORY_ALREADY_USED = enum.auto() + INVALID_IP_ADDRESS = enum.auto() + FAILURE = enum.auto() + SUCCESS = enum.auto() + FUSION_INCONSISTENT_FPS = enum.auto() + FUSION_FPS_TOO_LOW = enum.auto() + INVALID_TIMESTAMP = enum.auto() + INVALID_COVARIANCE = enum.auto() + NO_NEW_DATA_AVAILABLE = enum.auto() + def __str__(self) -> None: + pass + + def __repr__(self) -> None: + pass + + +def _initialize_fusion_error_codes() -> None: + """ + Lists the types of error that can be raised by the Fusion. + + + | Enumerator | | + |:---:|:---:| + | GNSS_DATA_NEED_FIX | GNSS Data need fix status in order to run fusion. | + | GNSS_DATA_COVARIANCE_MUST_VARY | Ingested covariance data must vary between ingest. | + | BODY_FORMAT_MISMATCH | The senders are using different body formats. Consider changing them. | + | NOT_ENABLED | The following module was not enabled. | + | SOURCE_MISMATCH | Some sources are provided by SVO and others by LIVE stream. | + | CONNECTION_TIMED_OUT | Connection timed out. Unable to reach the sender. Verify the sender's IP/port. | + | SHARED_MEMORY_LEAK | Intra-process shared memory allocation issue. Multiple connections to the same data. | + | INVALID_IP_ADDRESS | The provided IP address format is incorrect. Please provide the IP in the format 'a.b.c.d', where (a, b, c, d) are numbers between 0 and 255. | + | CONNECTION_ERROR | Something goes bad in the connection between sender and receiver. | + | FAILURE | Standard code for unsuccessful behavior. | + | SUCCESS | Standard code for successful behavior. | + | FUSION_INCONSISTENT_FPS | Significant differences observed between sender's FPS. | + | FUSION_FPS_TOO_LOW | At least one sender has an FPS lower than 10 FPS. | + | INVALID_TIMESTAMP | Problem detected with the ingested timestamp. Sample data will be ignored. | + | INVALID_COVARIANCE | Problem detected with the ingested covariance. Sample data will be ignored. | + | NO_NEW_DATA_AVAILABLE | All data from all sources has been consumed. No new data is available for processing. | + """ + pass + +class SENDER_ERROR_CODE(enum.Enum): + """ + Lists the types of error that can be raised during the Fusion by senders. + + + | Enumerator | | + |:---:|:---:| + | DISCONNECTED | The sender has been disconnected. | + | SUCCESS | Standard code for successful behavior. | + | GRAB_ERROR | The sender encountered a grab error. | + | INCONSISTENT_FPS | The sender does not run with a constant frame rate. | + | FPS_TOO_LOW | The frame rate of the sender is lower than 10 FPS. | + """ + DISCONNECTED = enum.auto() + SUCCESS = enum.auto() + GRAB_ERROR = enum.auto() + INCONSISTENT_FPS = enum.auto() + FPS_TOO_LOW = enum.auto() + def __str__(self) -> None: + pass + + def __repr__(self) -> None: + pass + + +class POSITION_TYPE(enum.Enum): + """ + Lists the types of possible position outputs. + + + | Enumerator | | + |:---:|:---:| + | RAW | The output position will be the raw position data. | + | FUSION | The output position will be the fused position projected into the requested camera repository. | + """ + RAW = enum.auto() + FUSION = enum.auto() + LAST = enum.auto() + +class FUSION_REFERENCE_FRAME(enum.Enum): + """ + Enum to define the reference frame of the fusion SDK. + + + | Enumerator | | + |:---:|:---:| + | WORLD | The world frame is the reference frame of the world according to the fused positional Tracking. | + | BASELINK | The base link frame is the reference frame where camera calibration is given. | + """ + WORLD = enum.auto() + BASELINK = enum.auto() + +class CommunicationParameters: + """ + Holds the communication parameter to configure the connection between senders and receiver + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def comm_type(self) -> COMM_TYPE: + """ + The type of the used communication + """ + return COMM_TYPE() + + @property + def port(self) -> int: + """ + The comm port used for streaming the data + """ + return int() + + @property + def ip_address(self) -> str: + """ + The IP address of the sender + """ + return str() + + def __dealloc__(self) -> None: + """ + Default constructor. All the parameters are set to their default and optimized values. + """ + pass + + def set_for_shared_memory(self) -> None: + """ + Setup the communication to used shared memory for intra process workflow, senders and receiver in different threads. + """ + pass + + def set_for_local_network(self, port : int, ip : str = "") -> None: + """ + Setup local Network connection information + """ + pass + + +class FusionConfiguration: + """ + Useful struct to store the Fusion configuration, can be read from /write to a JSON file. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def pose(self) -> Transform: + """ + The WORLD Pose of the camera for Fusion in the unit and coordinate system defined by the user in the InitFusionParameters. + """ + return Transform() + + @pose.setter + def pose(self, pose: Any) -> None: + pass + + @property + def serial_number(self) -> int: + """ + The serial number of the used ZED camera. + """ + return int() + + @serial_number.setter + def serial_number(self, serial_number: Any) -> None: + pass + + @property + def communication_parameters(self) -> CommunicationParameters: + """ + The communication parameters to connect this camera to the Fusion. + """ + return CommunicationParameters() + + @communication_parameters.setter + def communication_parameters(self, communication_parameters: Any) -> None: + pass + + @property + def override_gravity(self) -> bool: + """ + Indicates the behavior of the fusion with respect to given calibration pose. + - If true : The calibration pose directly specifies the camera's absolute pose relative to a global reference frame. + - If false : The calibration pose (Pose_rel) is defined relative to the camera's IMU rotational pose. To determine the true absolute position, the Fusion process will compute Pose_abs = Pose_rel * Rot_IMU_camera. + """ + return bool() + + @override_gravity.setter + def override_gravity(self, override_gravity: Any) -> None: + pass + + @property + def input_type(self) -> InputType: + """ + The input type for the current camera. + """ + return InputType() + + @input_type.setter + def input_type(self, input_type: Any) -> None: + pass + + +def read_fusion_configuration_file_from_serial(self, json_config_filename : str, serial_number : int, coord_system : COORDINATE_SYSTEM, unit: UNIT) -> FusionConfiguration: + """ + Read a configuration JSON file to configure a fusion process. + :param json_config_filename: The name of the JSON file containing the configuration. + :param serial_number: The serial number of the ZED Camera you want to retrieve. + :param coord_system: The COORDINATE_SYSTEM in which you want the World Pose to be in. + :param unit: The UNIT in which you want the World Pose to be in. + + :return: A FusionConfiguration for the requested camera. + .. note:: + Empty if no data were found for the requested camera. + """ + return FusionConfiguration() + +def read_fusion_configuration_file(json_config_filename : str, coord_system : COORDINATE_SYSTEM, unit: UNIT) -> list[FusionConfiguration]: + """ + Read a Configuration JSON file to configure a fusion process. + :param json_config_filename: The name of the JSON file containing the configuration. + :param coord_sys: The COORDINATE_SYSTEM in which you want the World Pose to be in. + :param unit: The UNIT in which you want the World Pose to be in. + + :return: A list of FusionConfiguration for all the camera present in the file. + .. note:: + Empty if no data were found for the requested camera. + """ + return list[FusionConfiguration]() + +def read_fusion_configuration_json(fusion_configuration : dict, coord_system : COORDINATE_SYSTEM, unit: UNIT) -> list[FusionConfiguration]: + """ + Read a Configuration JSON to configure a fusion process. + :param fusion_configuration: The JSON containing the configuration. + :param coord_sys: The COORDINATE_SYSTEM in which you want the World Pose to be in. + :param unit: The UNIT in which you want the World Pose to be in. + + :return: A list of FusionConfiguration for all the camera present in the file. + .. note:: + Empty if no data were found for the requested camera. + """ + return list[FusionConfiguration]() + +def write_configuration_file(json_config_filename : str, fusion_configurations : list, coord_sys : COORDINATE_SYSTEM, unit: UNIT) -> None: + """ + Write a Configuration JSON file to configure a fusion process. + :param json_config_filename: The name of the JSON that will contain the information. + :param conf: A list of FusionConfiguration listing all the camera configurations. + :param coord_sys: The COORDINATE_SYSTEM in which the World Pose is. + :param unit: The UNIT in which the World Pose is. + """ + pass + +class GNSSCalibrationParameters: + """ + Holds the options used for calibrating GNSS / VIO. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def target_translation_uncertainty(self) -> float: + """ + This parameter defines the target translation uncertainty at which the calibration process between GNSS and VIO concludes. + + Default: 10e-2 (10 centimeters) + # + """ + return float() + + @target_translation_uncertainty.setter + def target_translation_uncertainty(self, target_translation_uncertainty: Any) -> None: + pass + + @gnss_antenna_position.setter + def gnss_antenna_position(self, gnss_antenna_position: Any) -> None: + pass + + @property + def enable_reinitialization(self) -> bool: + """ + This parameter determines whether reinitialization should be performed between GNSS and VIO fusion when a significant disparity is detected between GNSS data and the current fusion data. + It becomes particularly crucial during prolonged GNSS signal loss scenarios. + + Default: True + # + """ + return bool() + + @enable_reinitialization.setter + def enable_reinitialization(self, enable_reinitialization: Any) -> None: + pass + + @property + def gnss_vio_reinit_threshold(self) -> float: + """ + This parameter determines the threshold for GNSS/VIO reinitialization. + If the fused position deviates beyond out of the region defined by the product of the GNSS covariance and the gnss_vio_reinit_threshold, a reinitialization will be triggered. + + Default: 5 + # + """ + return float() + + @gnss_vio_reinit_threshold.setter + def gnss_vio_reinit_threshold(self, gnss_vio_reinit_threshold: Any) -> None: + pass + + @property + def target_yaw_uncertainty(self) -> float: + """ + This parameter defines the target yaw uncertainty at which the calibration process between GNSS and VIO concludes. + The unit of this parameter is in radian. + + Default: 0.1 radians + # + """ + return float() + + @target_yaw_uncertainty.setter + def target_yaw_uncertainty(self, target_yaw_uncertainty: Any) -> None: + pass + + @property + def enable_translation_uncertainty_target(self) -> bool: + """ + When this parameter is enabled (set to true), the calibration process between GNSS and VIO accounts for the uncertainty in the determined translation, thereby facilitating the calibration termination. + The maximum allowable uncertainty is controlled by the 'target_translation_uncertainty' parameter. + + Default: False + # + """ + return bool() + + @enable_translation_uncertainty_target.setter + def enable_translation_uncertainty_target(self, enable_translation_uncertainty_target: Any) -> None: + pass + + @property + def enable_rolling_calibration(self) -> bool: + """ + If this parameter is set to true, the fusion algorithm will used a rough VIO / GNSS calibration at first and then refine it. This allow you to quickly get a fused position. + + Default: True + # + """ + return bool() + + @enable_rolling_calibration.setter + def enable_rolling_calibration(self, enable_rolling_calibration: Any) -> None: + pass + + def gnss_antenna_position(self) -> np.array[float]: + """ + Define a transform between the GNSS antenna and the camera system for the VIO / GNSS calibration. + + Default value is [0,0,0], this position can be refined by the calibration if enabled + # + """ + return np.array[float]() + + +class PositionalTrackingFusionParameters: + """ + Holds the options used for initializing the positional tracking fusion module. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def base_footprint_to_world_transform(self) -> Transform: + """ + Position and orientation of the base footprint with respect to the user world. + This transform represents a basis change from base footprint coordinate frame to user world coordinate frame + """ + return Transform() + + @base_footprint_to_world_transform.setter + def base_footprint_to_world_transform(self, base_footprint_to_world_transform: Any) -> None: + pass + + @property + def set_gravity_as_origin(self) -> bool: + """ + Whether to override 2 of the 3 rotations from base_footprint_to_world_transform using the IMU gravity. + + Default: False + """ + return bool() + + @set_gravity_as_origin.setter + def set_gravity_as_origin(self, set_gravity_as_origin: Any) -> None: + pass + + @property + def enable_GNSS_fusion(self) -> bool: + """ + This attribute is responsible for enabling or not GNSS positional tracking fusion. + + Default: False + """ + return bool() + + @enable_GNSS_fusion.setter + def enable_GNSS_fusion(self, enable_GNSS_fusion: Any) -> None: + pass + + @property + def tracking_camera_id(self) -> CameraIdentifier: + """ + ID of the camera used for positional tracking. If not specified, will use the first camera called with the subscribe() method. + """ + return CameraIdentifier() + + @tracking_camera_id.setter + def tracking_camera_id(self, tracking_camera_id: Any) -> None: + pass + + @property + def gnss_calibration_parameters(self) -> GNSSCalibrationParameters: + """ + Control the VIO / GNSS calibration process. + """ + return GNSSCalibrationParameters() + + @gnss_calibration_parameters.setter + def gnss_calibration_parameters(self, gnss_calibration_parameters: Any) -> None: + pass + + @property + def base_footprint_to_baselink_transform(self) -> Transform: + """ + Position and orientation of the base footprint with respect to the baselink. + This transform represents a basis change from base footprint coordinate frame to baselink coordinate frame + """ + return Transform() + + @base_footprint_to_baselink_transform.setter + def base_footprint_to_baselink_transform(self, base_footprint_to_baselink_transform: Any) -> None: + pass + + +class SpatialMappingFusionParameters: + """ + Holds the options used for initializing the positional tracking fusion module. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def range_meter(self) -> float: + """ + Depth range in meters. + + Can be different from the value set by sl::InitParameters::depth_maximum_distance. + + Default: 0. In this case, the range is computed from resolution_meter + and from the current internal parameters to fit your application. + """ + return float() + + @range_meter.setter + def range_meter(self, range_meter: Any) -> None: + pass + + @property + def decay(self) -> float: + """ + Adjust the weighting factor for the current depth during the integration process. + + Setting it to 0 discards all previous data and solely integrates the current depth. + + Default: 1, which results in the complete integration and fusion of the current depth with the previously integrated depth. + """ + return float() + + @decay.setter + def decay(self, decay: Any) -> None: + pass + + @property + def enable_forget_past(self) -> bool: + """ + Default: false + """ + return bool() + + @enable_forget_past.setter + def enable_forget_past(self, enable_forget_past: Any) -> None: + pass + + @property + def map_type(self) -> SPATIAL_MAP_TYPE: + """ + The type of spatial map to be created. This dictates the format that will be used for the mapping(e.g. mesh, point cloud). See SPATIAL_MAP_TYPE + + Default: SPATIAL_MAP_TYPE.MESH. + """ + return SPATIAL_MAP_TYPE() + + @map_type.setter + def map_type(self, map_type: Any) -> None: + pass + + @property + def use_chunk_only(self) -> bool: + """ + Set to false if you want to ensure consistency between the mesh and its inner chunk data. + + .. note:: + Updating the mesh is time-consuming. Setting this to true results in better performance. + + + Default: False + """ + return bool() + + @use_chunk_only.setter + def use_chunk_only(self, use_chunk_only: Any) -> None: + pass + + @property + def stability_counter(self) -> int: + """ + Control the integration rate of the current depth into the mapping process. + This parameter controls how many times a stable 3D points should be seen before it is integrated into the spatial mapping. + Default: 0, this will define the stability counter based on the mesh resolution, the higher the resolution, the higher the stability counter. + """ + return int() + + @stability_counter.setter + def stability_counter(self, stability_counter: Any) -> None: + pass + + @property + def disparity_std(self) -> float: + """ + Control the disparity noise (standard deviation) in px. set a very small value (<0.1) if the depth map of the scene is accurate. Set a big value (>0.5) if the depth map is noisy. + + Default: 0.3 + """ + return float() + + @disparity_std.setter + def disparity_std(self, disparity_std: Any) -> None: + pass + + @property + def resolution_meter(self) -> float: + """ + Spatial mapping resolution in meters. + + Default: 0.05 m + """ + return float() + + @resolution_meter.setter + def resolution_meter(self, resolution_meter: Any) -> None: + pass + + @property + def max_memory_usage(self) -> int: + """ + The maximum CPU memory (in MB) allocated for the meshing process. + + Default: 2048 MB + """ + return int() + + @max_memory_usage.setter + def max_memory_usage(self, max_memory_usage: Any) -> None: + pass + + +class BodyTrackingFusionParameters: + """ + Holds the options used to initialize the body tracking module of the Fusion. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def enable_body_fitting(self) -> bool: + """ + Defines if the body fitting will be applied. + + Default: False + .. note:: + If you enable it and the camera provides data as BODY_18 the fused body format will be BODY_34. + """ + return bool() + + @enable_body_fitting.setter + def enable_body_fitting(self, enable_body_fitting: Any) -> None: + pass + + @property + def enable_tracking(self) -> bool: + """ + Defines if the object detection will track objects across images flow. + + Default: True + """ + return bool() + + @enable_tracking.setter + def enable_tracking(self, enable_tracking: Any) -> None: + pass + + +class BodyTrackingFusionRuntimeParameters: + """ + Holds the options used to change the behavior of the body tracking module at runtime. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def skeleton_minimum_allowed_keypoints(self) -> int: + """ + If the fused skeleton has less than skeleton_minimum_allowed_keypoints keypoints, it will be discarded. + + Default: -1. + """ + return int() + + @skeleton_minimum_allowed_keypoints.setter + def skeleton_minimum_allowed_keypoints(self, skeleton_minimum_allowed_keypoints: Any) -> None: + pass + + @property + def skeleton_smoothing(self) -> float: + """ + This value controls the smoothing of the tracked or fitted fused skeleton. + + It is ranged from 0 (low smoothing) and 1 (high smoothing). + \n Default: 0. + """ + return float() + + @skeleton_smoothing.setter + def skeleton_smoothing(self, skeleton_smoothing: Any) -> None: + pass + + @property + def skeleton_minimum_allowed_camera(self) -> int: + """ + If a skeleton was detected in less than skeleton_minimum_allowed_camera cameras, it will be discarded. + + Default: -1. + """ + return int() + + @skeleton_minimum_allowed_camera.setter + def skeleton_minimum_allowed_camera(self, skeleton_minimum_allowed_camera: Any) -> None: + pass + + +class ObjectDetectionFusionParameters: + """ + Holds the options used to initialize the object detection module of the Fusion + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def enable_tracking(self) -> bool: + """ + Defines if the object detection will track objects across images flow. + + Default: True. + """ + return bool() + + @enable_tracking.setter + def enable_tracking(self, enable_tracking: Any) -> None: + pass + + +class CameraMetrics: + """ + Holds the metrics of a sender in the fusion process. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def received_fps(self) -> float: + """ + FPS of the received data. + """ + return float() + + @received_fps.setter + def received_fps(self, received_fps: Any) -> None: + pass + + @property + def ratio_detection(self) -> float: + """ + Skeleton detection percent during the last second. + Number of frames with at least one detection / number of frames, over the last second. + A low value means few detections occured lately for this sender. + """ + return float() + + @ratio_detection.setter + def ratio_detection(self, ratio_detection: Any) -> None: + pass + + @property + def is_present(self) -> bool: + """ + Is set to false if no data in this batch of metrics. + """ + return bool() + + @is_present.setter + def is_present(self, is_present: Any) -> None: + pass + + @property + def received_latency(self) -> float: + """ + Latency (in second) of the received data. + Timestamp difference between the time when the data are sent and the time they are received (mostly introduced when using the local network workflow). + """ + return float() + + @received_latency.setter + def received_latency(self, received_latency: Any) -> None: + pass + + @property + def delta_ts(self) -> float: + """ + Average data acquisition timestamp difference. + Average standard deviation of sender's period since the start. + """ + return float() + + @delta_ts.setter + def delta_ts(self, delta_ts: Any) -> None: + pass + + @property + def synced_latency(self) -> float: + """ + Latency (in seconds) after Fusion synchronization. + Difference between the timestamp of the data received and the timestamp at the end of the Fusion synchronization. + """ + return float() + + @synced_latency.setter + def synced_latency(self, synced_latency: Any) -> None: + pass + + +class FusionMetrics: + """ + Holds the metrics of the fusion process. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def mean_stdev_between_camera(self) -> float: + """ + Standard deviation of the data timestamp fused, the lower the better. + """ + return float() + + @mean_stdev_between_camera.setter + def mean_stdev_between_camera(self, mean_stdev_between_camera: Any) -> None: + pass + + @property + def camera_individual_stats(self) -> dict: + """ + Sender metrics. + """ + return {} + + @camera_individual_stats.setter + def camera_individual_stats(self, camera_individual_stats: Any) -> None: + pass + + @property + def mean_camera_fused(self) -> float: + """ + Mean number of camera that provides data during the past second. + """ + return float() + + @mean_camera_fused.setter + def mean_camera_fused(self, mean_camera_fused: Any) -> None: + pass + + def reset(self) -> None: + """ + Reset the current metrics. + """ + pass + + +class CameraIdentifier: + """ + Used to identify a specific camera in the Fusion API + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def serial_number(self) -> int: + return int() + + @serial_number.setter + def serial_number(self, serial_number: Any) -> None: + pass + + +class ECEF: + """ + Represents a world position in ECEF format. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def z(self) -> double: + """ + z coordinate of ECEF. + """ + return double() + + @z.setter + def z(self, z: Any) -> None: + pass + + @property + def y(self) -> double: + """ + y coordinate of ECEF. + """ + return double() + + @y.setter + def y(self, y: Any) -> None: + pass + + @property + def x(self) -> double: + """ + x coordinate of ECEF. + """ + return double() + + @x.setter + def x(self, x: Any) -> None: + pass + + +class LatLng: + """ + Represents a world position in LatLng format. + """ + def __init__(self, *args, **kwargs) -> None: ... + + def get_latitude(self, in_radian : bool = True) -> None: + """ + Get the latitude coordinate + + :param in_radian: Is the output should be in radian or degree. + :return: Latitude in radian or in degree depending in_radian parameter. + """ + pass + + def get_longitude(self, in_radian = True) -> None: + """ + Get the longitude coordinate + + :param in_radian: Is the output should be in radian or degree. + :return: Longitude in radian or in degree depending in_radian parameter. + """ + pass + + def get_altitude(self) -> None: + """ + Get the altitude coordinate + + :return: Altitude coordinate in meters. + """ + pass + + def get_coordinates(self, in_radian = True) -> None: + """ + Get the coordinates in radians (default) or in degrees. + + :param latitude: Latitude coordinate. + :param longitude: Longitude coordinate. + :param altitude: Altitude coordinate. + :param in_radian: Should the output be expressed in radians or degrees. + """ + pass + + def set_coordinates(self, latitude: double, longitude: double, altitude: double, in_radian = True) -> None: + """ + Set the coordinates in radians (default) or in degrees. + + :param latitude: Latitude coordinate. + :param longitude: Longitude coordinate. + :param altitude: Altitude coordinate. + \@param in_radian: Is input are in radians or in degrees. + """ + pass + + +class UTM: + """ + Represents a world position in UTM format. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def easting(self) -> double: + """ + Easting coordinate. + """ + return double() + + @easting.setter + def easting(self, easting: Any) -> None: + pass + + @property + def gamma(self) -> double: + """ + Gamma coordinate. + """ + return double() + + @gamma.setter + def gamma(self, gamma: Any) -> None: + pass + + @property + def UTM_zone(self) -> str: + """ + UTMZone of the coordinate. + """ + return str() + + @UTM_zone.setter + def UTM_zone(self, UTM_zone: Any) -> None: + pass + + @property + def northing(self) -> double: + """ + Northing coordinate. + """ + return double() + + @northing.setter + def northing(self, northing: Any) -> None: + pass + + +class ENU: + """ + Represent a world position in ENU format. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def east(self) -> double: + """ + East parameter + """ + return double() + + @east.setter + def east(self, east: Any) -> None: + pass + + @property + def up(self) -> double: + """ + Up parameter + """ + return double() + + @up.setter + def up(self, up: Any) -> None: + pass + + @property + def north(self) -> double: + """ + North parameter + """ + return double() + + @north.setter + def north(self, north: Any) -> None: + pass + + +class GeoConverter: + """ + Purely static class for Geo functions. + """ + def __init__(self, *args, **kwargs) -> None: ... + + def ecef2latlng(input: ECEF) -> LatLng: + """ + Convert ECEF coordinates to Lat/Long coordinates. + """ + return LatLng() + + def ecef2utm(input: ECEF) -> UTM: + """ + Convert ECEF coordinates to UTM coordinates. + """ + return UTM() + + def latlng2ecef(input: LatLng) -> ECEF: + """ + Convert Lat/Long coordinates to ECEF coordinates. + """ + return ECEF() + + def latlng2utm(input: LatLng) -> UTM: + """ + Convert Lat/Long coordinates to UTM coordinates. + """ + return UTM() + + def utm2ecef(input: UTM) -> ECEF: + """ + Convert UTM coordinates to ECEF coordinates. + """ + return ECEF() + + def utm2latlng(input: UTM) -> LatLng: + """ + Convert UTM coordinates to Lat/Long coordinates. + """ + return LatLng() + + +class GeoPose: + """ + Holds Geo reference position. + Holds geographic reference position information. + + This class represents a geographic pose, including position, orientation, and accuracy information. + It is used for storing and manipulating geographic data, such as latitude, longitude, altitude, + pose matrices, covariances, and timestamps. + + The pose data is defined in the East-North-Up (ENU) reference frame. The ENU frame is a local + Cartesian coordinate system commonly used in geodetic applications. In this frame, the X-axis + points towards the East, the Y-axis points towards the North, and the Z-axis points upwards. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def heading(self) -> double: + """ + The heading (orientation) of the pose in radians (rad). It indicates the direction in which the object or observer is facing, with 0 degrees corresponding to North and increasing in a counter-clockwise direction. + """ + return double() + + @heading.setter + def heading(self, heading: Any) -> None: + pass + + @property + def horizontal_accuracy(self) -> double: + """ + The horizontal accuracy of the pose in meters. + """ + return double() + + @horizontal_accuracy.setter + def horizontal_accuracy(self, horizontal_accuracy: Any) -> None: + pass + + @property + def pose_data(self) -> Transform: + """ + The 4x4 matrix defining the pose in the East-North-Up (ENU) coordinate system. + """ + return Transform() + + @pose_data.setter + def pose_data(self, pose_data: Any) -> None: + pass + + @property + def vertical_accuracy(self) -> double: + """ + The vertical accuracy of the pose in meters. + """ + return double() + + @vertical_accuracy.setter + def vertical_accuracy(self, vertical_accuracy: Any) -> None: + pass + + @pose_covariance.setter + def pose_covariance(self, pose_covariance: Any) -> None: + pass + + @property + def latlng_coordinates(self) -> LatLng: + """ + The latitude, longitude, and altitude coordinates of the pose. + """ + return LatLng() + + @latlng_coordinates.setter + def latlng_coordinates(self, latlng_coordinates: Any) -> None: + pass + + @property + def timestamp(self) -> Timestamp: + """ + The timestamp associated with the GeoPose. + """ + return Timestamp() + + @timestamp.setter + def timestamp(self, timestamp: Any) -> None: + pass + + def pose_covariance(self) -> np.array[float]: + """ + The pose covariance matrix in ENU. + """ + return np.array[float]() + + +class GNSSData: + """ + Class containing GNSS data to be used for positional tracking as prior. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def gnss_mode(self) -> GNSS_MODE: + """ + Represents the current mode of GNSS. + """ + return GNSS_MODE() + + @gnss_mode.setter + def gnss_mode(self, gnss_mode: Any) -> None: + pass + + @property + def ts(self) -> Timestamp: + """ + Timestamp of the GNSS position (must be aligned with the camera time reference). + """ + return Timestamp() + + @ts.setter + def ts(self, ts: Any) -> None: + pass + + @property + def gnss_status(self) -> GNSS_STATUS: + """ + Represents the current status of GNSS. + """ + return GNSS_STATUS() + + @gnss_status.setter + def gnss_status(self, gnss_status: Any) -> None: + pass + + @property + def latitude_std(self) -> float: + """ + Latitude standard deviation. + """ + return float() + + @latitude_std.setter + def latitude_std(self, latitude_std: Any) -> None: + pass + + @property + def position_covariances(self) -> list[float]: + """ + Covariance of the position in meter (must be expressed in the ENU coordinate system). + For eph, epv GNSS sensors, set it as follow: ```{eph*eph, 0, 0, 0, eph*eph, 0, 0, 0, epv*epv}```. + """ + return list[float]() + + @position_covariances.setter + def position_covariances(self, position_covariances: Any) -> None: + pass + + @property + def longitude_std(self) -> float: + """ + Longitude standard deviation. + """ + return float() + + @longitude_std.setter + def longitude_std(self, longitude_std: Any) -> None: + pass + + @property + def altitude_std(self) -> float: + """ + Altitude standard deviation + """ + return float() + + @altitude_std.setter + def altitude_std(self, altitude_std: Any) -> None: + pass + + def get_coordinates(self, in_radian = True) -> tuple[float, float, float]: + """ + Get the coordinates of the sl.GNSSData. + The sl.LatLng coordinates could be expressed in degrees or radians. + :param latitude: Latitude coordinate. + :param longitude: Longitude coordinate. + :param altitude: Altitude coordinate. + :param is_radian: Should the output be expressed in radians or degrees. + """ + return tuple[float, float, float]() + + def set_coordinates(self, latitude: double, longitude: double, altitude: double, in_radian = True) -> None: + """ + Set the sl.LatLng coordinates of sl.GNSSData. + The sl.LatLng coordinates could be expressed in degrees or radians. + :param latitude: Latitude coordinate. + :param longitude: Longitude coordinate. + :param altitude: Altitude coordinate. + :param is_radian: Are the inputs expressed in radians or in degrees. + """ + pass + + +class SynchronizationParameter: + """ + Class containing GNSS data to be used for positional tracking as prior. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def windows_size(self) -> double: + return double() + + @windows_size.setter + def windows_size(self, windows_size: Any) -> None: + pass + + @property + def data_source_timeout(self) -> double: + return double() + + @data_source_timeout.setter + def data_source_timeout(self, data_source_timeout: Any) -> None: + pass + + @property + def maximum_lateness(self) -> double: + return double() + + @maximum_lateness.setter + def maximum_lateness(self, maximum_lateness: Any) -> None: + pass + + @property + def keep_last_data(self) -> bool: + return bool() + + @keep_last_data.setter + def keep_last_data(self, keep_last_data: Any) -> None: + pass + + +class InitFusionParameters: + """ + Holds the options used to initialize the Fusion object. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def verbose(self) -> bool: + """ + Enable the verbosity mode of the SDK. + """ + return bool() + + @verbose.setter + def verbose(self, verbose: Any) -> None: + pass + + @property + def maximum_working_resolution(self) -> Resolution: + """ + Sets the maximum resolution for all Fusion outputs, such as images and measures. + + The default value is (-1, -1), which allows the Fusion to automatically select the optimal resolution for the best quality/runtime ratio. + + - For images, the output resolution can be up to the native resolution of the camera. + - For measures involving depth, the output resolution can be up to the maximum working resolution. + + Setting this parameter to (-1, -1) will ensure the best balance between quality and performance for depth measures. + """ + return Resolution() + + @maximum_working_resolution.setter + def maximum_working_resolution(self, maximum_working_resolution: Any) -> None: + pass + + @property + def coordinate_units(self) -> UNIT: + """ + This parameter allows you to select the unit to be used for all metric values of the SDK (depth, point cloud, tracking, mesh, and others). + Default : UNIT "UNIT::MILLIMETER" + """ + return UNIT() + + @coordinate_units.setter + def coordinate_units(self, coordinate_units: Any) -> None: + pass + + @property + def timeout_period_number(self) -> int: + """ + If specified change the number of period necessary for a source to go in timeout without data. For example, if you set this to 5 then, if any source do not receive data during 5 period, these sources will go to timeout and will be ignored. + """ + return int() + + @timeout_period_number.setter + def timeout_period_number(self, timeout_period_number: Any) -> None: + pass + + @property + def sdk_gpu_id(self) -> int: + """ + NVIDIA graphics card id to use. + + By default the SDK will use the most powerful NVIDIA graphics card found. + \n However, when running several applications, or using several cameras at the same time, splitting the load over available GPUs can be useful. + \n This parameter allows you to select the GPU used by the sl.Camera using an ID from 0 to n-1 GPUs in your PC. + \n Default: -1 + .. note:: + A non-positive value will search for all CUDA capable devices and select the most powerful. + """ + return int() + + @sdk_gpu_id.setter + def sdk_gpu_id(self, sdk_gpu_id: Any) -> None: + pass + + @property + def coordinate_system(self) -> COORDINATE_SYSTEM: + """ + Positional tracking, point clouds and many other features require a given COORDINATE_SYSTEM to be used as reference. + This parameter allows you to select the COORDINATE_SYSTEM used by the Camera to return its measures. + \n This defines the order and the direction of the axis of the coordinate system. + \n Default : COORDINATE_SYSTEM "COORDINATE_SYSTEM::IMAGE" + """ + return COORDINATE_SYSTEM() + + @coordinate_system.setter + def coordinate_system(self, coordinate_system: Any) -> None: + pass + + @property + def output_performance_metrics(self) -> bool: + """ + It allows users to extract some stats of the Fusion API like drop frame of each camera, latency, etc... + """ + return bool() + + @output_performance_metrics.setter + def output_performance_metrics(self, output_performance_metrics: Any) -> None: + pass + + @property + def synchronization_parameters(self) -> SynchronizationParameter: + """ + Specifies the parameters used for data synchronization during fusion. + + The SynchronizationParameter struct encapsulates the synchronization parameters that control the data fusion process. + """ + return SynchronizationParameter() + + @synchronization_parameters.setter + def synchronization_parameters(self, synchronization_parameters: Any) -> None: + pass + + def __dealloc__(self) -> None: + pass + + +class Fusion: + """ + Holds Fusion process data and functions + """ + def __init__(self, *args, **kwargs) -> None: ... + + def __dealloc__(self) -> None: + pass + + def init(self, init_fusion_parameters : InitFusionParameters) -> FUSION_ERROR_CODE: + """ + Initialize the fusion module with the requested parameters. + :param init_parameters: Initialization parameters. + :return: ERROR_CODE.SUCCESS if it goes as it should, otherwise it returns an ERROR_CODE. + """ + return FUSION_ERROR_CODE() + + def close(self) -> None: + """ + Will deactivate all the fusion modules and free internal data. + """ + pass + + def subscribe(self, uuid : CameraIdentifier, communication_parameters: CommunicationParameters, pose: Transform) -> FUSION_ERROR_CODE: + """ + Set the specified camera as a data provider. + :param uuid: The requested camera identifier. + :param communication_parameters: The communication parameters to connect to the camera. + :param pose: The World position of the camera, regarding the other camera of the setup. + :return: FUSION_ERROR_CODE.SUCCESS if it goes as it should, otherwise it returns an FUSION_ERROR_CODE. + """ + return FUSION_ERROR_CODE() + + def unsubscribe(self, uuid : CameraIdentifier) -> FUSION_ERROR_CODE: + """ + Remove the specified camera from data provider. + :param uuid: The requested camera identifier. + :return: FUSION_ERROR_CODE.SUCCESS if it goes as it should, otherwise it returns an FUSION_ERROR_CODE. + """ + return FUSION_ERROR_CODE() + + def update_pose(self, uuid : CameraIdentifier, pose: Transform) -> FUSION_ERROR_CODE: + """ + Updates the specified camera position inside fusion WORLD. + :param uuid: The requested camera identifier. + :param pose: The World position of the camera, regarding the other camera of the setup. + :return: FUSION_ERROR_CODE.SUCCESS if it goes as it should, otherwise it returns an FUSION_ERROR_CODE. + """ + return FUSION_ERROR_CODE() + + def get_process_metrics(self) -> tuple[FUSION_ERROR_CODE, FusionMetrics]: + """ + Get the metrics of the Fusion process, for the fused data as well as individual camera provider data. + :param metrics: The process metrics. + :return: FUSION_ERROR_CODE.SUCCESS if it goes as it should, otherwise it returns an FUSION_ERROR_CODE. + :return: The process metrics. + """ + return tuple[FUSION_ERROR_CODE, FusionMetrics]() + + def get_sender_state(self) -> dict: + """ + Returns the state of each connected data senders. + :return: The individual state of each connected senders. + """ + return {} + + def process(self) -> FUSION_ERROR_CODE: + """ + Runs the main function of the Fusion, this trigger the retrieve and synchronization of all connected senders and updates the enabled modules. + :return: FUSION_ERROR_CODE.SUCCESS if it goes as it should, otherwise it returns an FUSION_ERROR_CODE. + """ + return FUSION_ERROR_CODE() + + def enable_body_tracking(self, params : BodyTrackingFusionParameters) -> FUSION_ERROR_CODE: + """ + Enables the body tracking fusion module. + :param params: Structure containing all specific parameters for body tracking fusion. + :return: FUSION_ERROR_CODE.SUCCESS if it goes as it should, otherwise it returns an FUSION_ERROR_CODE. + """ + return FUSION_ERROR_CODE() + + def retrieve_bodies(self, bodies : Bodies, parameters : BodyTrackingFusionRuntimeParameters, uuid : CameraIdentifier = CameraIdentifier(0), reference_frame: FUSION_REFERENCE_FRAME = FUSION_REFERENCE_FRAME.BASELINK) -> FUSION_ERROR_CODE: + """ + Retrieves the body data, can be the fused data (default), or the raw data provided by a specific sender. + :param bodies: The fused bodies will be saved into this objects. + :param parameters: Body detection runtime settings, can be changed at each detection. + :param uuid: The id of the sender. + :param reference_frame: The reference frame in which the objects will be expressed. Default: FUSION_REFERENCE_FRAME "FUSION_REFERENCE_FRAME::BASELINK". + :return: FUSION_ERROR_CODE.SUCCESS if it goes as it should, otherwise it returns an FUSION_ERROR_CODE. + """ + return FUSION_ERROR_CODE() + + def enable_object_detection(self, params = ObjectDetectionFusionParameters()) -> FUSION_ERROR_CODE: + """ + Enables the object detection fusion module. + :param params: Structure containing all specific parameters for object detection fusion. + \n For more information, see the ObjectDetectionFusionParameters documentation. + :return: SUCCESS if it goes as it should, otherwise it returns an FUSION_ERROR_CODE. + """ + return FUSION_ERROR_CODE() + + def retrieve_objects_all_od_groups(self, objs, reference_frame: FUSION_REFERENCE_FRAME = FUSION_REFERENCE_FRAME.BASELINK) -> FUSION_ERROR_CODE: + """ + Retrieves all the fused objects data. + :param objs: The fused objects will be saved into this dictionary of objects. + :param reference_frame: The reference frame in which the objects will be expressed. Default: FUSION_REFERENCE_FRAME "FUSION_REFERENCE_FRAME::BASELINK". + :return: SUCCESS if it goes as it should, otherwise it returns a FUSION_ERROR_CODE. + """ + return FUSION_ERROR_CODE() + + def retrieve_objects_one_od_group(self, objs, fused_od_group_name, reference_frame: FUSION_REFERENCE_FRAME = FUSION_REFERENCE_FRAME.BASELINK) -> FUSION_ERROR_CODE: + """ + Retrieves the fused objects of a given fused OD group. + :param objs: The fused objects will be saved into this objects. + :param fused_od_group_name: The name of the fused objects group to retrieve. + :param reference_frame: The reference frame in which the objects will be expressed. Default: FUSION_REFERENCE_FRAME "FUSION_REFERENCE_FRAME::BASELINK". + :return: SUCCESS if it goes as it should, otherwise it returns a FUSION_ERROR_CODE. + """ + return FUSION_ERROR_CODE() + + def retrieve_raw_objects_all_ids(self, objs, uuid) -> FUSION_ERROR_CODE: + """ + Retrieves all the raw objects data provided by a specific sender. + :param objs: The fused objects will be saved into this dictionary of objects. + :param uuid: Retrieve the raw data provided by this sender. + """ + return FUSION_ERROR_CODE() + + def retrieve_raw_objects_one_id(self, py_objects, uuid, instance_id) -> FUSION_ERROR_CODE: + """ + Retrieves the raw objects data provided by a specific sender and a specific instance id. + :param objs: The fused objects will be saved into this objects. + :param uuid: Retrieve the raw data provided by this sender. + :param instance_id: Retrieve the objects inferred by the model with this ID only. + :return: SUCCESS if it goes as it should, otherwise it returns a FUSION_ERROR_CODE. + """ + return FUSION_ERROR_CODE() + + def disable_objects_detection(self) -> None: + """ + Disable the body fusion tracking module. + """ + pass + + def retrieve_image(self, mat, uuid, resolution = Resolution(0, 0)) -> FUSION_ERROR_CODE: + """ + Returns the current sl.VIEW.LEFT of the specified camera, the data is synchronized. + :param mat: the CPU BGRA image of the requested camera. + :param resolution: the requested resolution of the output image, can be lower or equal (default) to the original image resolution. + :param uuid: If set to a sender serial number (different from 0), this will retrieve the raw data provided by this sender. + :return: FUSION_ERROR_CODE.SUCCESS if it goes as it should, otherwise it returns an FUSION_ERROR_CODE. + """ + return FUSION_ERROR_CODE() + + def retrieve_measure(self, mat, uuid, measure: MEASURE, resolution = Resolution(0, 0), reference_frame: FUSION_REFERENCE_FRAME = FUSION_REFERENCE_FRAME.BASELINK) -> FUSION_ERROR_CODE: + """ + Returns the current measure of the specified camera, the data is synchronized. + :param mat: the CPU data of the requested camera. + :param uuid: The id of the sender. + :param measure: measure: the requested measure type, by default DEPTH (F32_C1). + :param resolution: the requested resolution of the output image, can be lower or equal (default) to the original image resolution. + :param reference_frame: The reference frame in which the objects will be expressed. Default: FUSION_REFERENCE_FRAME "FUSION_REFERENCE_FRAME::BASELINK". + :return: FUSION_ERROR_CODE.SUCCESS if it goes as it should, otherwise it returns an FUSION_ERROR_CODE. + """ + return FUSION_ERROR_CODE() + + def disable_body_tracking(self) -> None: + """ + Disable the body fusion tracking module. + """ + pass + + def enable_positionnal_tracking(self, parameters : PositionalTrackingFusionParameters) -> FUSION_ERROR_CODE: + """ + Enables positional tracking fusion module. + :param parameters: A structure containing all the PositionalTrackingFusionParameters that define positional tracking fusion module. + :return: FUSION_ERROR_CODE.SUCCESS if it goes as it should, otherwise it returns an FUSION_ERROR_CODE. + """ + return FUSION_ERROR_CODE() + + def ingest_gnss_data(self, gnss_data : GNSSData) -> FUSION_ERROR_CODE: + """ + Ingest GNSS data from an external sensor into the fusion module. + :param gnss_data: The current GNSS data to combine with the current positional tracking data. + :return: FUSION_ERROR_CODE.SUCCESS if it goes as it should, otherwise it returns an FUSION_ERROR_CODE. + """ + return FUSION_ERROR_CODE() + + def get_position(self, camera_pose : Pose, reference_frame : REFERENCE_FRAME = REFERENCE_FRAME.WORLD, uuid: CameraIdentifier = CameraIdentifier(), position_type : POSITION_TYPE = POSITION_TYPE.FUSION) -> POSITIONAL_TRACKING_STATE: + """ + Get the Fused Position referenced to the first camera subscribed. If uuid is specified then project position on the referenced camera. + :param camera_pose: Will contain the fused position referenced by default in world (world is given by the calibration of the cameras system). + :param reference_frame: Defines the reference from which you want the pose to be expressed. Default : REFERENCE_FRAME.WORLD. + :param uuid: If set to a sender serial number (different from 0), this will retrieve position projected on the requested camera if position_type is equal to POSITION_TYPE.FUSION or raw sender position if position_type is equal to POSITION_TYPE.RAW. + :param position_type: Select if the position should the fused position re-projected in the camera with uuid or if the position should be the raw position (without fusion) of camera with uui. + :return: POSITIONAL_TRACKING_STATE is the current state of the tracking process. + """ + return POSITIONAL_TRACKING_STATE() + + def get_fused_positional_tracking_status(self) -> FusedPositionalTrackingStatus: + """ + Get the Fused Position referenced to the first camera subscribed. If uuid is specified then project position on the referenced camera. + :param camera_pose: Will contain the fused position referenced by default in world (world is given by the calibration of the cameras system). + :param reference_frame: Defines the reference from which you want the pose to be expressed. Default : REFERENCE_FRAME.WORLD. + :param uuid: If set to a sender serial number (different from 0), this will retrieve position projected on the requested camera if position_type is equal to POSITION_TYPE.FUSION or raw sender position if position_type is equal to POSITION_TYPE.RAW. + :param position_type: Select if the position should the fused position re-projected in the camera with uuid or if the position should be the raw position (without fusion) of camera with uui. + :return: POSITIONAL_TRACKING_STATE is the current state of the tracking process. + """ + return FusedPositionalTrackingStatus() + + def get_current_gnss_data(self, gnss_data : GNSSData) -> POSITIONAL_TRACKING_STATE: + """ + Returns the last synchronized gnss data. + :param out: Last synchronized gnss data. (Direction: out) + :return: POSITIONAL_TRACKING_STATE is the current state of the tracking process. + """ + return POSITIONAL_TRACKING_STATE() + + def get_geo_pose(self, pose : GeoPose) -> GNSS_FUSION_STATUS: + """ + Returns the current GeoPose. + :param pose: The current GeoPose. (Direction: out) + :return: GNSS_FUSION_STATUS is the current state of the tracking process. + """ + return GNSS_FUSION_STATUS() + + def geo_to_camera(self, input : LatLng, output : Pose) -> GNSS_FUSION_STATUS: + """ + Convert latitude / longitude into position in sl::Fusion coordinate system. + :param input: The latitude / longitude to be converted in sl::Fusion coordinate system. (Direction: in) + :param out: Converted position in sl.Fusion coordinate system. (Direction: out) + :return: GNSS_FUSION_STATUS is the current state of the tracking process. + """ + return GNSS_FUSION_STATUS() + + def camera_to_geo(self, input : Pose, output : GeoPose) -> GNSS_FUSION_STATUS: + """ + Convert a position in sl.Fusion coordinate system in global world coordinate. + :param pose: Position to convert in global world coordinate. (Direction: in) + :param pose: Converted position in global world coordinate. (Direction: out) + :return: GNSS_FUSION_STATUS is the current state of the tracking process. + """ + return GNSS_FUSION_STATUS() + + def get_current_timestamp(self) -> Timestamp: + """ + Return the current fusion timestamp, aligned with the synchronized GNSS and camera data. + :return: current fusion timestamp. + """ + return Timestamp() + + def disable_positionnal_tracking(self) -> None: + """ + Disable the fusion positional tracking module. + + The positional tracking is immediately stopped. If a file path is given, saveAreaMap(area_file_path) will be called asynchronously. See getAreaExportState() to get the exportation state. + """ + pass + + def ENU_to_geo(self, input: ENU, output: LatLng) -> FUSION_ERROR_CODE: + """ + Convert ENU to LatLng + + Concert an ENU position into LatLng + """ + return FUSION_ERROR_CODE() + + def geo_to_ENU(self, input : LatLng, out : ENU) -> FUSION_ERROR_CODE: + """ + Convert LatLng to ENU + + Convert am LatLng to ENU + """ + return FUSION_ERROR_CODE() + + def get_current_gnss_calibration_std(self) -> tuple[GNSS_FUSION_STATUS, float, np.array]: + """ + Get the current calibration uncertainty obtained during calibration process. + :return: sl.GNSS_FUSION_STATUS representing current initialisation status. + :return: Output yaw uncertainty. + :return: Output position uncertainty. + # + """ + return tuple[GNSS_FUSION_STATUS, float, np.array]() + + def get_geo_tracking_calibration(self) -> Transform: + """ + Get the calibration found between VIO and GNSS. + + :return: sl.Transform is the calibration found between VIO and GNSS during calibration process. + # + """ + return Transform() + + def enable_spatial_mapping(self, parameters) -> FUSION_ERROR_CODE: + """ + Initializes and starts the spatial mapping processes. + + The spatial mapping will create a geometric representation of the scene based on both tracking data and 3D point clouds. + + The resulting output can be a Mesh or a FusedPointCloud. It can be be obtained by calling retrieve_spatial_map_async(). + Note that retrieve_spatial_map_async() should be called after request_spatial_map_async(). + + \param parameters The structure containing all the specific parameters for the spatial mapping. Default: a balanced parameter preset between geometric fidelity and output file size. For more information, see the SpatialMappingParameters documentation. + :return: SUCCESS if everything went fine, FUSION_ERROR_CODE "FUSION_ERROR_CODE::FAILURE" otherwise. + + .. note:: + The tracking (enable_positional_tracking()) needs to be enabled to use the spatial mapping. + + .. note:: + Lower SpatialMappingParameters.range_meter and SpatialMappingParameters.resolution_meter for higher performance. + + .. warning:: This fuction is only available for INTRA_PROCESS communication type. + """ + return FUSION_ERROR_CODE() + + def request_spatial_map_async(self) -> None: + """ + Starts the spatial map generation process in a non blocking thread from the spatial mapping process. + + The spatial map generation can take a long time depending on the mapping resolution and covered area. This function will trigger the generation of a mesh without blocking the program. + You can get info about the current generation using get_spatial_map_request_status_async(), and retrieve the mesh using request_spatial_map_async(...) . + + .. note:: + Only one mesh can be generated at a time. If the previous mesh generation is not over, new calls of the function will be ignored. + + # + """ + pass + + def get_spatial_map_request_status_async(self) -> FUSION_ERROR_CODE: + """ + Returns the spatial map generation status. This status allows to know if the mesh can be retrieved by calling retrieve_spatial_map_async(). + :return: SUCCESS if the mesh is ready and not yet retrieved, otherwise FUSION_ERROR_CODE "FUSION_ERROR_CODE::FAILURE". + + \n See request_spatial_map_async() for an example. + # + """ + return FUSION_ERROR_CODE() + + def retrieve_spatial_map_async(self, py_mesh) -> FUSION_ERROR_CODE: + """ + Retrieves the current generated spatial map. + + After calling request_spatial_map_async(), this method allows you to retrieve the generated mesh or fused point cloud. + \n The Mesh or FusedPointCloud will only be available when get_spatial_map_request_status_async() returns FUSION_ERROR_CODE.SUCCESS. + + :param py_mesh: The Mesh or FusedPointCloud to be filled with the generated spatial map. (Direction: out) + :return: FUSION_ERROR_CODE.SUCCESS if the mesh is retrieved, otherwise FUSION_ERROR_CODE.FAILURE. + + .. note:: + This method only updates the necessary chunks and adds the new ones in order to improve update speed. + + .. warning:: You should not modify the mesh / fused point cloud between two calls of this method, otherwise it can lead to a corrupted mesh / fused point cloud. + See request_spatial_map_async() for an example. + """ + return FUSION_ERROR_CODE() + + def disable_spatial_mapping(self) -> None: + """ + Disables the spatial mapping process. + + The spatial mapping is immediately stopped. + + If the mapping has been enabled, this function will automatically be called by close(). + + .. note:: + This function frees the memory allocated for the spatial mapping, consequently, the spatial map cannot be retrieved after this call. + """ + pass + + +class SVOData: + """ + Class containing SVO data to be ingested/retrieved to/from SVO. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def timestamp_ns(self) -> Timestamp: + """ + Timestamp of the data. + """ + return Timestamp() + + @timestamp_ns.setter + def timestamp_ns(self, timestamp_ns: Any) -> None: + pass + + @property + def key(self) -> str: + """ + Key of the data. + """ + return str() + + @key.setter + def key(self, key: Any) -> None: + pass + + def get_content_as_string(self) -> str: + """ + Get the content of the sl.SVOData as a string. + + :return: The content of the sl.SVOData as a string. + """ + return str() + + def set_string_content(self, data: str) -> str: + """ + Set the content of the sl.SVOData as a string. + + \param data The string data content to set. + """ + return str() + + +class CameraOneConfiguration: + """ + Structure containing information about the camera sensor. + + Information about the camera is available in the sl.CameraInformation struct returned by sl.Camera.get_camera_information(). + .. note:: + This object is meant to be used as a read-only container, editing any of its field won't impact the SDK. + + .. warning:: sl.CalibrationOneParameters are returned in sl.COORDINATE_SYSTEM.IMAGE, they are not impacted by the sl.InitParametersOne.coordinate_system. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def calibration_parameters_raw(self) -> CameraParameters: + """ + Intrinsics and extrinsic stereo parameters for unrectified/distorted images. + """ + return CameraParameters() + + @property + def fps(self) -> float: + """ + FPS of the camera. + """ + return float() + + @property + def firmware_version(self) -> int: + """ + Internal firmware version of the camera. + """ + return int() + + @property + def calibration_parameters(self) -> CameraParameters: + """ + Intrinsics and extrinsic stereo parameters for rectified/undistorted images. + """ + return CameraParameters() + + @property + def resolution(self) -> Resolution: + """ + Resolution of the camera. + """ + return Resolution() + + +class CameraOneInformation: + """ + Structure containing information of a single camera (serial number, model, calibration, etc.) + That information about the camera will be returned by CameraOne.get_camera_information() + .. note:: + This object is meant to be used as a read-only container, editing any of its fields won't impact the SDK. + + .. warning:: CalibrationParameters are returned in COORDINATE_SYSTEM.IMAGE , they are not impacted by the InitParametersOne.coordinate_system + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def camera_model(self) -> MODEL: + """ + Model of the camera (see sl.MODEL). + """ + return MODEL() + + @property + def serial_number(self) -> int: + """ + Serial number of the camera. + """ + return int() + + @property + def camera_configuration(self) -> CameraOneConfiguration: + """ + Camera configuration parameters stored in a sl.CameraOneConfiguration. + """ + return CameraOneConfiguration() + + @property + def sensors_configuration(self) -> SensorsConfiguration: + """ + Sensors configuration parameters stored in a sl.SensorsConfiguration. + """ + return SensorsConfiguration() + + @property + def input_type(self) -> INPUT_TYPE: + """ + Input type used in the ZED SDK. + """ + return INPUT_TYPE() + + +class InitParametersOne: + """ + Class containing the options used to initialize the sl.CameraOne object. + + This class allows you to select multiple parameters for the sl.Camera such as the selected camera, resolution, depth mode, coordinate system, and units of measurement. + \n Once filled with the desired options, it should be passed to the sl.Camera.open() method. + + .. code-block:: text + + + import pyzed.sl as sl + + def main() : + zed = sl.CameraOne() # Create a ZED camera object + + init_params = sl.InitParametersOne() # Set initial parameters + init_params.sdk_verbose = 0 # Disable verbose mode + + # Use the camera in LIVE mode + init_params.camera_resolution = sl.RESOLUTION.HD1080 # Use HD1080 video mode + init_params.camera_fps = 30 # Set fps at 30 + + # Or use the camera in SVO (offline) mode + #init_params.set_from_svo_file("xxxx.svo") + + # Or use the camera in STREAM mode + #init_params.set_from_stream("192.168.1.12", 30000) + + # Other parameters are left to their default values + + # Open the camera + err = zed.open(init_params) + if err != sl.ERROR_CODE.SUCCESS: + exit(-1) + + # Close the camera + zed.close() + return 0 + + if __name__ == "__main__" : + main() + + + + With its default values, it opens the camera in live mode at sl.RESOLUTION.HD720 + \n You can customize it to fit your application. + .. note:: + The parameters can also be saved and reloaded using its save() and load() methods. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def coordinate_units(self) -> UNIT: + """ + Unit of spatial data (depth, point cloud, tracking, mesh, etc.) for retrieval. + + Default: sl.UNIT.MILLIMETER + """ + return UNIT() + + @coordinate_units.setter + def coordinate_units(self, coordinate_units: Any) -> None: + pass + + @property + def optional_settings_path(self) -> str: + """ + Optional path where the ZED SDK has to search for the settings file (SN.conf file). + + This file contains the calibration information of the camera. + \n Default: "" + + .. note:: + The settings file will be searched in the default directory: + + * **Linux**: /usr/local/zed/settings/ + * **Windows**: C:/ProgramData/stereolabs/settings + + .. note:: + If a path is specified and no file has been found, the ZED SDK will search the settings file in the default directory. + + .. note:: + An automatic download of the settings file (through **ZED Explorer** or the installer) will still download the files on the default path. + + + .. code-block:: text + + init_params = sl.InitParametersOne() # Set initial parameters + home = "/path/to/home" + path = home + "/Documents/settings/" # assuming /path/to/home/Documents/settings/SNXXXX.conf exists. Otherwise, it will be searched in /usr/local/zed/settings/ + init_params.optional_settings_path = path + """ + return str() + + @optional_settings_path.setter + def optional_settings_path(self, optional_settings_path: Any) -> None: + pass + + @property + def coordinate_system(self) -> COORDINATE_SYSTEM: + """ + sl.COORDINATE_SYSTEM to be used as reference for positional tracking, mesh, point clouds, etc. + + This parameter allows you to select the sl.COORDINATE_SYSTEM used by the sl.Camera object to return its measures. + \n This defines the order and the direction of the axis of the coordinate system. + \n Default: sl.COORDINATE_SYSTEM.IMAGE + """ + return COORDINATE_SYSTEM() + + @coordinate_system.setter + def coordinate_system(self, coordinate_system: Any) -> None: + pass + + @property + def async_grab_camera_recovery(self) -> bool: + """ + Define the behavior of the automatic camera recovery during sl.Camera.grab() method call. + + When async is enabled and there's an issue with the communication with the sl.Camera object, + sl.Camera.grab() will exit after a short period and return the sl.ERROR_CODE.CAMERA_REBOOTING warning. + \n The recovery will run in the background until the correct communication is restored. + \n When async_grab_camera_recovery is false, the sl.Camera.grab() method is blocking and will return + only once the camera communication is restored or the timeout is reached. + \n Default: False + """ + return bool() + + @async_grab_camera_recovery.setter + def async_grab_camera_recovery(self, async_grab_camera_recovery: Any) -> None: + pass + + @property + def enable_hdr(self) -> bool: + """ + Activates HDR support for the current resolution/mode. Only active if the camera supports HDR for this resolution + + \n Default: False + """ + return bool() + + @enable_hdr.setter + def enable_hdr(self, enable_hdr: Any) -> None: + pass + + @property + def camera_fps(self) -> int: + """ + Requested camera frame rate. + + If set to 0, the highest FPS of the specified camera_resolution will be used. + \n Default: 0 + \n\n See sl.RESOLUTION for a list of supported frame rates. + .. note:: + If the requested camera_fps is unsupported, the closest available FPS will be used. + """ + return int() + + @camera_fps.setter + def camera_fps(self, camera_fps: Any) -> None: + pass + + @property + def svo_real_time_mode(self) -> bool: + """ + Defines if sl.Camera object return the frame in real time mode. + + When playing back an SVO file, each call to sl.Camera.grab() will extract a new frame and use it. + \n However, it ignores the real capture rate of the images saved in the SVO file. + \n Enabling this parameter will bring the SDK closer to a real simulation when playing back a file by using the images' timestamps. + \n Default: False + .. note:: + sl.Camera.grab() will return an error when trying to play too fast, and frames will be dropped when playing too slowly. + """ + return bool() + + @svo_real_time_mode.setter + def svo_real_time_mode(self, svo_real_time_mode: Any) -> None: + pass + + @property + def camera_resolution(self) -> RESOLUTION: + """ + Desired camera resolution. + .. note:: + Small resolutions offer higher framerate and lower computation time. + + .. note:: + In most situations, sl.RESOLUTION.HD720 at 60 FPS is the best balance between image quality and framerate. + + + Default: + * ZED X/X Mini: sl.RESOLUTION.HD1200 + * other cameras: sl.RESOLUTION.HD720 + .. note:: + Available resolutions are listed here: sl.RESOLUTION. + """ + return RESOLUTION() + + @camera_resolution.setter + def camera_resolution(self, camera_resolution: Any) -> None: + pass + + @property + def sdk_verbose_log_file(self) -> str: + """ + File path to store the ZED SDK logs (if sdk_verbose is enabled). + + The file will be created if it does not exist. + \n Default: "" + + .. note:: + Setting this parameter to any value will redirect all standard output print calls of the entire program. + + .. note:: + This means that your own standard output print calls will be redirected to the log file. + + .. warning:: The log file won't be cleared after successive executions of the application. + .. warning:: This means that it can grow indefinitely if not cleared. + """ + return str() + + @sdk_verbose_log_file.setter + def sdk_verbose_log_file(self, sdk_verbose_log_file: Any) -> None: + pass + + @property + def sdk_verbose(self) -> int: + """ + Enable the ZED SDK verbose mode. + + This parameter allows you to enable the verbosity of the ZED SDK to get a variety of runtime information in the console. + \n When developing an application, enabling verbose (``sdk_verbose >= 1``) mode can help you understand the current ZED SDK behavior. + \n However, this might not be desirable in a shipped version. + \n Default: 0 (no verbose message) + .. note:: + The verbose messages can also be exported into a log file. + + .. note:: + See sdk_verbose_log_file for more. + """ + return int() + + @sdk_verbose.setter + def sdk_verbose(self, sdk_verbose: Any) -> None: + pass + + def input(self, input_t) -> None: + """ + The SDK can handle different input types: + - Select a camera by its ID (/dev/videoX on Linux, and 0 to N cameras connected on Windows) + - Select a camera by its serial number + - Open a recorded sequence in the SVO file format + - Open a streaming camera from its IP address and port + + This parameter allows you to select to desired input. It should be used like this: + .. code-block:: text + + init_params = sl.InitParametersOne() # Set initial parameters + init_params.sdk_verbose = 1 # Enable verbose mode + input_t = sl.InputType() + input_t.set_from_camera_id(0) # Selects the camera with ID = 0 + init_params.input = input_t + init_params.set_from_camera_id(0) # You can also use this + + + .. code-block:: text + + init_params = sl.InitParametersOne() # Set initial parameters + init_params.sdk_verbose = 1 # Enable verbose mode + input_t = sl.InputType() + input_t.set_from_serial_number(1010) # Selects the camera with serial number = 101 + init_params.input = input_t + init_params.set_from_serial_number(1010) # You can also use this + + + .. code-block:: text + + init_params = sl.InitParametersOne() # Set initial parameters + init_params.sdk_verbose = 1 # Enable verbose mode + input_t = sl.InputType() + input_t.set_from_svo_file("/path/to/file.svo") # Selects the and SVO file to be read + init_params.input = input_t + init_params.set_from_svo_file("/path/to/file.svo") # You can also use this + + + .. code-block:: text + + init_params = sl.InitParametersOne() # Set initial parameters + init_params.sdk_verbose = 1 # Enable verbose mode + input_t = sl.InputType() + input_t.set_from_stream("192.168.1.42") + init_params.input = input_t + init_params.set_from_stream("192.168.1.42") # You can also use this + + + Available cameras and their ID/serial can be listed using get_device_list() and get_streaming_device_list() + Each Camera will create its own memory (CPU and GPU), therefore the number of ZED used at the same time can be limited by the configuration of your computer. (GPU/CPU memory and capabilities) + + default : empty + See InputType for complementary information. + + .. warning:: Using the ZED SDK Python API, using init_params.input.set_from_XXX won't work, use init_params.set_from_XXX instead + @property + def input(self) -> InputType: + input_t = InputType() + input_t.input = self.init.input + return input_t + """ + pass + + def set_from_camera_id(self, cam_id, bus_type : BUS_TYPE = BUS_TYPE.AUTO) -> None: + """ + Defines the input source with a camera id to initialize and open an sl.CameraOne object from. + :param id: Id of the desired camera to open. + :param bus_type: sl.BUS_TYPE of the desired camera to open. + """ + pass + + def set_from_serial_number(self, serial_number) -> None: + """ + Defines the input source with a serial number to initialize and open an sl.CameraOne object from. + :param serial_number: Serial number of the desired camera to open. + """ + pass + + def set_from_svo_file(self, svo_input_filename) -> None: + """ + Defines the input source with an SVO file to initialize and open an sl.CameraOne object from. + :param svo_input_filename: Path to the desired SVO file to open. + """ + pass + + def set_from_stream(self, sender_ip, port = 30000) -> None: + """ + Defines the input source from a stream to initialize and open an sl.CameraOne object from. + :param sender_ip: IP address of the streaming sender. + :param port: Port on which to listen. Default: 30000 + """ + pass + + +class CameraOne: + """ + Class containing the options used to initialize the sl.CameraOne object. + + This class allows you to select multiple parameters for the sl.Camera such as the selected camera, resolution, depth mode, coordinate system, and units of measurement. + \n Once filled with the desired options, it should be passed to the sl.Camera.open() method. + + .. code-block:: text + + + import pyzed.sl as sl + + def main() : + zed = sl.CameraOne() # Create a ZED camera object + + init_params = sl.InitParametersOne() # Set initial parameters + init_params.sdk_verbose = 0 # Disable verbose mode + + # Use the camera in LIVE mode + init_params.camera_resolution = sl.RESOLUTION.HD1080 # Use HD1080 video mode + init_params.camera_fps = 30 # Set fps at 30 + + # Or use the camera in SVO (offline) mode + #init_params.set_from_svo_file("xxxx.svo") + + # Or use the camera in STREAM mode + #init_params.set_from_stream("192.168.1.12", 30000) + + # Other parameters are left to their default values + + # Open the camera + err = zed.open(init_params) + if err != sl.ERROR_CODE.SUCCESS: + exit(-1) + + # Close the camera + zed.close() + return 0 + + if __name__ == "__main__" : + main() + + + + With its default values, it opens the camera in live mode at sl.RESOLUTION.HD720 + \n You can customize it to fit your application. + .. note:: + The parameters can also be saved and reloaded using its save() and load() methods. + """ + def __init__(self, *args, **kwargs) -> None: ... + + def close(self) -> None: + """ + Close an opened camera. + + If open() has been called, this method will close the connection to the camera (or the SVO file) and free the corresponding memory. + + If open() wasn't called or failed, this method won't have any effect. + + .. note:: + If an asynchronous task is running within the Camera object, like save_area_map(), this method will wait for its completion. + + .. note:: + To apply a new InitParametersOne, you will need to close the camera first and then open it again with the new InitParameters values. + + .. warning:: Therefore you need to make sure to delete your GPU sl.Mat objects before the context is destroyed. + """ + pass + + def open(self, py_init : InitParametersOne = InitParametersOne()) -> ERROR_CODE: + """ + Opens the ZED camera from the provided InitParametersOne. + The method will also check the hardware requirements and run a self-calibration. + :param py_init: A structure containing all the initial parameters. Default: a preset of InitParametersOne. + :return: An error code giving information about the internal process. If ERROR_CODE.SUCCESS is returned, the camera is ready to use. Every other code indicates an error and the program should be stopped. + + Here is the proper way to call this function: + + .. code-block:: text + + zed = sl.CameraOne() # Create a ZED camera object + + init_params = sl.InitParametersOne() # Set configuration parameters + init_params.camera_resolution = sl.RESOLUTION.HD720 # Use HD720 video mode + init_params.camera_fps = 60 # Set fps at 60 + + # Open the camera + err = zed.open(init_params) + if (err != sl.ERROR_CODE.SUCCESS) : + print(repr(err)) # Display the error + exit(-1) + + + .. note:: + If you are having issues opening a camera, the diagnostic tool provided in the SDK can help you identify to problems. + + - **Windows:** C:\\Program Files (x86)\\ZED SDK\\tools\\ZED Diagnostic.exe + - **Linux:** /usr/local/zed/tools/ZED Diagnostic + .. note:: + If this method is called on an already opened camera, close() will be called. + """ + return ERROR_CODE() + + def is_opened(self) -> bool: + """ + Reports if the camera has been successfully opened. + It has the same behavior as checking if open() returns ERROR_CODE.SUCCESS. + :return: True if the ZED camera is already setup, otherwise false. + """ + return bool() + + def grab(self) -> ERROR_CODE: + """ + This method will grab the latest images from the camera, rectify them, and compute the retrieve_measure() "measurements" based on the RuntimeParameters provided (depth, point cloud, tracking, etc.) + + As measures are created in this method, its execution can last a few milliseconds, depending on your parameters and your hardware. + \n The exact duration will mostly depend on the following parameters: + + - InitParametersOne.camera_resolution : Lower resolutions are faster to compute. + + This method is meant to be called frequently in the main loop of your application. + .. note:: + Since ZED SDK 3.0, this method is blocking. It means that grab() will wait until a new frame is detected and available. + + .. note:: + If no new frames is available until timeout is reached, grab() will return ERROR_CODE.CAMERA_NOT_DETECTED since the camera has probably been disconnected. + + + :return: ERROR_CODE.SUCCESS means that no problem was encountered. + .. note:: + Returned errors can be displayed using ``str()``. + + + .. code-block:: text + + image = sl.Mat() + while True: + # Grab an image + if zed.grab() == sl.ERROR_CODE.SUCCESS: # A new image is available if grab() returns SUCCESS + zed.retrieve_image(image) # Get the left image + # Use the image for your application + """ + return ERROR_CODE() + + def retrieve_image(self, py_mat, view = VIEW.LEFT, mem_type = MEM.CPU, resolution = Resolution(0, 0)) -> ERROR_CODE: + """ + Retrieves images from the camera (or SVO file). + + Multiple images are available along with a view of various measures for display purposes. + \n Available images and views are listed here. + \n As an example, VIEW.DEPTH can be used to get a gray-scale version of the depth map, but the actual depth values can be retrieved using retrieve_measure() . + \n + \n **Pixels** + \n Most VIEW modes output image with 4 channels as BGRA (Blue, Green, Red, Alpha), for more information see enum VIEW + \n + \n **Memory** + \n By default, images are copied from GPU memory to CPU memory (RAM) when this function is called. + \n If your application can use GPU images, using the **type** parameter can increase performance by avoiding this copy. + \n If the provided sl.Mat object is already allocated and matches the requested image format, memory won't be re-allocated. + \n + \n **Image size** + \n By default, images are returned in the resolution provided by Resolution "get_camera_information().camera_configuration.resolution". + \n However, you can request custom resolutions. For example, requesting a smaller image can help you speed up your application. + .. warning:: A sl.Mat resolution higher than the camera resolution **cannot** be requested. + + :param py_mat: The sl.Mat to store the image. (Direction: out) + :param view: Defines the image you want (see VIEW). Default: VIEW.LEFT. (Direction: in) + :param mem_type: Defines on which memory the image should be allocated. Default: MEM.CPU (you cannot change this default value). (Direction: in) + :param resolution: If specified, defines the Resolution of the output sl.Mat. If set to Resolution "Resolution(0,0)", the camera resolution will be taken. Default: (0,0). (Direction: in) + :return: ERROR_CODE.SUCCESS if the method succeeded. + :return: ERROR_CODE.INVALID_FUNCTION_PARAMETERS if the view mode requires a module not enabled (VIEW.DEPTH with DEPTH_MODE.NONE for example). + :return: ERROR_CODE.FAILURE if another error occurred. + + .. note:: + As this method retrieves the images grabbed by the grab() method, it should be called afterward. + + + .. code-block:: text + + # create sl.Mat objects to store the images + left_image = sl.Mat() + while True: + # Grab an image + if zed.grab() == sl.ERROR_CODE.SUCCESS: # A new image is available if grab() returns SUCCESS + zed.retrieve_image(left_image, sl.VIEW.LEFT) # Get the rectified left image + + # Display the center pixel colors + err, left_center = left_image.get_value(left_image.get_width() / 2, left_image.get_height() / 2) + if err == sl.ERROR_CODE.SUCCESS: + print("left_image center pixel R:", int(left_center[0]), " G:", int(left_center[1]), " B:", int(left_center[2])) + else: + print("error:", err) + """ + return ERROR_CODE() + + def set_svo_position(self, frame_number: int) -> None: + """ + Sets the playback cursor to the desired frame number in the SVO file. + + This method allows you to move around within a played-back SVO file. After calling, the next call to grab() will read the provided frame number. + + :param frame_number: The number of the desired frame to be decoded. + + .. note:: + The method works only if the camera is open in SVO playback mode. + + + .. code-block:: text + + + import pyzed.sl as sl + + def main(): + # Create a ZED camera object + zed = sl.CameraOne() + + # Set configuration parameters + init_params = sl.InitParametersOne() + init_params.set_from_svo_file("path/to/my/file.svo") + + # Open the camera + err = zed.open(init_params) + if err != sl.ERROR_CODE.SUCCESS: + print(repr(err)) + exit(-1) + + # Loop between frames 0 and 50 + left_image = sl.Mat() + while zed.get_svo_position() < zed.get_svo_number_of_frames() - 1: + + print("Current frame: ", zed.get_svo_position()) + + # Loop if we reached frame 50 + if zed.get_svo_position() == 50: + zed.set_svo_position(0) + + # Grab an image + if zed.grab() == sl.ERROR_CODE.SUCCESS: + zed.retrieve_image(left_image, sl.VIEW.LEFT) # Get the rectified left image + + # Use the image in your application + + # Close the Camera + zed.close() + return 0 + + if __name__ == "__main__" : + main() + + """ + pass + + def get_svo_position(self) -> int: + """ + Returns the current playback position in the SVO file. + + The position corresponds to the number of frames already read from the SVO file, starting from 0 to n. + + Each grab() call increases this value by one (except when using InitParametersOne.svo_real_time_mode). + :return: The current frame position in the SVO file. -1 if the SDK is not reading an SVO. + + .. note:: + The method works only if the camera is open in SVO playback mode. + + + See set_svo_position() for an example. + """ + return int() + + def get_svo_number_of_frames(self) -> int: + """ + Returns the number of frames in the SVO file. + + :return: The total number of frames in the SVO file. -1 if the SDK is not reading a SVO. + + The method works only if the camera is open in SVO playback mode. + """ + return int() + + def ingest_data_into_svo(self, data: SVOData) -> ERROR_CODE: + """ + ingest a SVOData in the SVO file. + + :return: An error code stating the success, or not. + + The method works only if the camera is open in SVO recording mode. + """ + return ERROR_CODE() + + def get_svo_data_keys(self) -> list: + """ + Get the external channels that can be retrieved from the SVO file. + + :return: a list of keys + + The method works only if the camera is open in SVO playback mode. + """ + return [] + + def retrieve_svo_data(self, key: str, data: dict, ts_begin: Timestamp, ts_end: Timestamp) -> ERROR_CODE: + """ + retrieve SVO datas from the SVO file at the given channel key and in the given timestamp range. + + :return: An error code stating the success, or not. + :param key: The channel key. + :param data: The dict to be filled with SVOData objects, with timestamps as keys. + :param ts_begin: The beginning of the range. + :param ts_end: The end of the range. + + The method works only if the camera is open in SVO playback mode. + """ + return ERROR_CODE() + + def set_camera_settings(self, settings: VIDEO_SETTINGS, value = -1) -> ERROR_CODE: + """ + retrieve SVO datas from the SVO file at the given channel key and in the given timestamp range. + + :return: An error code stating the success, or not. + :param key: The channel key. + :param data: The dict to be filled with SVOData objects, with timestamps as keys. + :param ts_begin: The beginning of the range. + :param ts_end: The end of the range. + + The method works only if the camera is open in SVO playback mode. + """ + return ERROR_CODE() + + def set_camera_settings_range(self, settings: VIDEO_SETTINGS, value_min = -1, value_max = -1) -> ERROR_CODE: + """ + Sets the value of the requested VIDEO_SETTINGS "camera setting" that supports two values (min/max). + + This method only works with the following VIDEO_SETTINGS: + - sl.VIDEO_SETTINGS.AUTO_EXPOSURE_TIME_RANGE + - sl.VIDEO_SETTINGS.AUTO_ANALOG_GAIN_RANGE + - sl.VIDEO_SETTINGS.AUTO_DIGITAL_GAIN_RANGE + + :param settings: The setting to be set. + :param min: The minimum value that can be reached (-1 or 0 gives full range). + :param max: The maximum value that can be reached (-1 or 0 gives full range). + :return: ERROR_CODE to indicate if the method was successful. + + .. warning:: If VIDEO_SETTINGS settings is not supported or min >= max, it will return ERROR_CODE.INVALID_FUNCTION_PARAMETERS. + .. note:: + The method works only if the camera is open in LIVE or STREAM mode. + + + .. code-block:: text + + # For ZED X based product, set the automatic exposure from 2ms to 5ms. Expected exposure time cannot go beyond those values + zed.set_camera_settings_range(sl.VIDEO_SETTINGS.AEC_RANGE, 2000, 5000); + """ + return ERROR_CODE() + + def set_camera_settings_roi(self, settings: VIDEO_SETTINGS, roi: Rect, reset = False) -> ERROR_CODE: + """ + Overloaded method for VIDEO_SETTINGS.AEC_AGC_ROI which takes a Rect as parameter. + + :param settings: Must be set at VIDEO_SETTINGS.AEC_AGC_ROI, otherwise the method will have no impact. + :param roi: Rect that defines the target to be applied for AEC/AGC computation. Must be given according to camera resolution. + :param eye: SIDE on which to be applied for AEC/AGC computation. Default: SIDE.BOTH + :param reset: Cancel the manual ROI and reset it to the full image. Default: False + + .. note:: + The method works only if the camera is open in LIVE or STREAM mode. + + + .. code-block:: text + + roi = sl.Rect(42, 56, 120, 15) + zed.set_camera_settings_roi(sl.VIDEO_SETTINGS.AEC_AGC_ROI, roi, sl.SIDE.BOTH) + + """ + return ERROR_CODE() + + def get_camera_settings(self, setting: VIDEO_SETTINGS) -> tuple[ERROR_CODE, int]: + """ + Returns the current value of the requested VIDEO_SETTINGS "camera setting" (gain, brightness, hue, exposure, etc.). + + Possible values (range) of each setting are available here. + + :param setting: The requested setting. + :return: ERROR_CODE to indicate if the method was successful. + :return: The current value for the corresponding setting. + + .. code-block:: text + + err, gain = zed.get_camera_settings(sl.VIDEO_SETTINGS.GAIN) + if err == sl.ERROR_CODE.SUCCESS: + print("Current gain value:", gain) + else: + print("error:", err) + + + .. note:: + The method works only if the camera is open in LIVE or STREAM mode. + + .. note:: + Settings are not exported in the SVO file format. + """ + return tuple[ERROR_CODE, int]() + + def get_camera_settings_range(self, setting: VIDEO_SETTINGS) -> tuple[ERROR_CODE, int, int]: + """ + Returns the values of the requested settings for VIDEO_SETTINGS that supports two values (min/max). + + This method only works with the following VIDEO_SETTINGS: + - sl.VIDEO_SETTINGS.AUTO_EXPOSURE_TIME_RANGE + - sl.VIDEO_SETTINGS.AUTO_ANALOG_GAIN_RANGE + - sl.VIDEO_SETTINGS.AUTO_DIGITAL_GAIN_RANGE + + Possible values (range) of each setting are available here. + :param setting: The requested setting. + :return: ERROR_CODE to indicate if the method was successful. + :return: The current value of the minimum for the corresponding setting. + :return: The current value of the maximum for the corresponding setting. + + .. code-block:: text + + err, aec_range_min, aec_range_max = zed.get_camera_settings(sl.VIDEO_SETTINGS.AUTO_EXPOSURE_TIME_RANGE) + if err == sl.ERROR_CODE.SUCCESS: + print("Current AUTO_EXPOSURE_TIME_RANGE range values ==> min:", aec_range_min, "max:", aec_range_max) + else: + print("error:", err) + + + .. note:: + Works only with ZED X that supports low-level controls + """ + return tuple[ERROR_CODE, int, int]() + + def get_camera_settings_roi(self, setting: VIDEO_SETTINGS, roi: Rect) -> ERROR_CODE: + """ + Returns the current value of the currently used ROI for the camera setting AEC_AGC_ROI. + + :param setting: Must be set at VIDEO_SETTINGS.AEC_AGC_ROI, otherwise the method will have no impact. (Direction: in) + :param roi: Roi that will be filled. (Direction: out) + :param eye: The requested side. Default: SIDE.BOTH (Direction: in) + :return: ERROR_CODE to indicate if the method was successful. + + .. code-block:: text + + roi = sl.Rect() + err = zed.get_camera_settings_roi(sl.VIDEO_SETTINGS.AEC_AGC_ROI, roi, sl.SIDE.BOTH) + print("Current ROI for AEC_AGC: " + str(roi.x) + " " + str(roi.y)+ " " + str(roi.width) + " " + str(roi.height)) + + + .. note:: + Works only if the camera is open in LIVE or STREAM mode with VIDEO_SETTINGS.AEC_AGC_ROI. + + .. note:: + It will return ERROR_CODE.INVALID_FUNCTION_CALL or ERROR_CODE.INVALID_FUNCTION_PARAMETERS otherwise. + """ + return ERROR_CODE() + + def is_camera_setting_supported(self, setting: VIDEO_SETTINGS) -> bool: + """ + Returns if the video setting is supported by the camera or not + + :param setting: the video setting to test (Direction: in) + :return: True if the VIDEO_SETTINGS is supported by the camera, False otherwise + """ + return bool() + + def get_current_fps(self) -> float: + """ + Returns the current framerate at which the grab() method is successfully called. + + The returned value is based on the difference of camera get_timestamp() "timestamps" between two successful grab() calls. + + :return: The current SDK framerate + + .. warning:: The returned framerate (number of images grabbed per second) can be lower than InitParametersOne.camera_fps if the grab() function runs slower than the image stream or is called too often. + + .. code-block:: text + + current_fps = zed.get_current_fps() + print("Current framerate: ", current_fps) + """ + return float() + + def get_timestamp(self, time_reference: TIME_REFERENCE) -> Timestamp: + """ + Returns the timestamp in the requested TIME_REFERENCE. + + - When requesting the TIME_REFERENCE.IMAGE timestamp, the UNIX nanosecond timestamp of the latest grab() "grabbed" image will be returned. + \n This value corresponds to the time at which the entire image was available in the PC memory. As such, it ignores the communication time that corresponds to 2 or 3 frame-time based on the fps (ex: 33.3ms to 50ms at 60fps). + + - When requesting the TIME_REFERENCE.CURRENT timestamp, the current UNIX nanosecond timestamp is returned. + + This function can also be used when playing back an SVO file. + + :param time_reference: The selected TIME_REFERENCE. + :return: The Timestamp in nanosecond. 0 if not available (SVO file without compression). + + .. note:: + As this function returns UNIX timestamps, the reference it uses is common across several Camera instances. + + \n This can help to organized the grabbed images in a multi-camera application. + + .. code-block:: text + + last_image_timestamp = zed.get_timestamp(sl.TIME_REFERENCE.IMAGE) + current_timestamp = zed.get_timestamp(sl.TIME_REFERENCE.CURRENT) + print("Latest image timestamp: ", last_image_timestamp.get_nanoseconds(), "ns from Epoch.") + print("Current timestamp: ", current_timestamp.get_nanoseconds(), "ns from Epoch.") + """ + return Timestamp() + + def get_frame_dropped_count(self) -> int: + """ + Returns the number of frames dropped since grab() was called for the first time. + + A dropped frame corresponds to a frame that never made it to the grab method. + \n This can happen if two frames were extracted from the camera when grab() is called. The older frame will be dropped so as to always use the latest (which minimizes latency). + + :return: The number of frames dropped since the first grab() call. + """ + return int() + + def get_camera_information(self, resizer = Resolution(0, 0)) -> CameraOneInformation: + """ + Returns the number of frames dropped since grab() was called for the first time. + + A dropped frame corresponds to a frame that never made it to the grab method. + \n This can happen if two frames were extracted from the camera when grab() is called. The older frame will be dropped so as to always use the latest (which minimizes latency). + + :return: The number of frames dropped since the first grab() call. + """ + return CameraOneInformation() + + def get_init_parameters(self) -> InitParametersOne: + """ + Returns the InitParametersOne associated with the Camera object. + It corresponds to the structure given as argument to open() method. + + :return: InitParametersOne containing the parameters used to initialize the Camera object. + """ + return InitParametersOne() + + def get_streaming_parameters(self) -> StreamingParameters: + """ + Returns the StreamingParameters used. + + It corresponds to the structure given as argument to the enable_streaming() method. + + :return: StreamingParameters containing the parameters used for streaming initialization. + """ + return StreamingParameters() + + def get_sensors_data(self, py_sensor_data, time_reference = TIME_REFERENCE.CURRENT) -> ERROR_CODE: + """ + Retrieves the SensorsData (IMU, magnetometer, barometer) at a specific time reference. + + - Calling get_sensors_data with TIME_REFERENCE.CURRENT gives you the latest sensors data received. Getting all the data requires to call this method at 800Hz in a thread. + - Calling get_sensors_data with TIME_REFERENCE.IMAGE gives you the sensors data at the time of the latest image grab() "grabbed". + + SensorsData object contains the previous IMUData structure that was used in ZED SDK v2.X: + \n For IMU data, the values are provided in 2 ways : + +
  • **Time-fused** pose estimation that can be accessed using: + * IMUData.get_pose "data.get_imu_data().get_pose()" +
  • +
  • **Raw values** from the IMU sensor: + + * IMUData.get_angular_velocity "data.get_imu_data().get_angular_velocity()", corresponding to the gyroscope + * IMUData.get_linear_acceleration "data.get_imu_data().get_linear_acceleration()", corresponding to the accelerometer + both the gyroscope and accelerometer are synchronized. +
  • + + + The delta time between previous and current values can be calculated using data.imu.timestamp + + .. note:: + The IMU quaternion (fused data) is given in the specified COORDINATE_SYSTEM of InitParametersOne. + + + :param py_sensor_data: The SensorsData variable to store the data. (Direction: out) + :param reference_frame: Defines the reference from which you want the data to be expressed. Default: REFERENCE_FRAME.WORLD. (Direction: in) + :return: ERROR_CODE.SUCCESS if sensors data have been extracted. + :return: ERROR_CODE.SENSORS_NOT_AVAILABLE if the camera model is a MODEL.ZED. + :return: ERROR_CODE.MOTION_SENSORS_REQUIRED if the camera model is correct but the sensors module is not opened. + :return: ERROR_CODE.INVALID_FUNCTION_PARAMETERS if the **reference_time** is not valid. See Warning. + + .. warning:: In SVO reading mode, the TIME_REFERENCE.CURRENT is currently not available (yielding ERROR_CODE.INVALID_FUNCTION_PARAMETERS. + .. warning:: Only the quaternion data and barometer data (if available) at TIME_REFERENCE.IMAGE are available. Other values will be set to 0. + """ + return ERROR_CODE() + + def get_sensors_data_batch(self, py_sensor_data) -> ERROR_CODE: + """ + Retrieves all SensorsData (IMU only) associated to most recent grabbed frame in the specified COORDINATE_SYSTEM of InitParameters. + + For IMU data, the values are provided in 2 ways: + +
  • **Time-fused** pose estimation that can be accessed using: + * IMUData.get_pose "data.get_imu_data().get_pose()" +
  • +
  • **Raw values** from the IMU sensor: + + * IMUData.get_angular_velocity "data.get_imu_data().get_angular_velocity()", corresponding to the gyroscope + * IMUData.get_linear_acceleration "data.get_imu_data().get_linear_acceleration()", corresponding to the accelerometer + both the gyroscope and accelerometer are synchronized. +
  • + + + The delta time between previous and current values can be calculated using data.imu.timestamp + + :param py_sensor_data: The SensorsData list to store the data. (Direction: out) + :return: ERROR_CODE.SUCCESS if sensors data have been extracted. + :return: ERROR_CODE.SENSORS_NOT_AVAILABLE if the camera model is a MODEL.ZED. + :return: ERROR_CODE.MOTION_SENSORS_REQUIRED if the camera model is correct but the sensors module is not opened. + :return: ERROR_CODE.INVALID_FUNCTION_PARAMETERS if the **reference_time** is not valid. See Warning. + + .. code-block:: text + + if zed.grab() == sl.ERROR_CODE.SUCCESS: + sensors_data = [] + if (zed.get_sensors_data_batch(sensors_data) == sl.ERROR_CODE.SUCCESS): + for data in sensors_data: + print("IMU data: ", data.imu.get_angular_velocity(), data.imu.get_linear_acceleration()) + print("IMU pose: ", data.imu.get_pose().get_translation()) + print("IMU orientation: ", data.imu.get_orientation().get()) + """ + return ERROR_CODE() + + def enable_streaming(self, streaming_parameters = StreamingParameters()) -> ERROR_CODE: + """ + Creates a streaming pipeline. + + :param streaming_parameters: A structure containing all the specific parameters for the streaming. Default: a reset of StreamingParameters . + :return: ERROR_CODE.SUCCESS if the streaming was successfully started. + :return: ERROR_CODE.INVALID_FUNCTION_CALL if open() was not successfully called before. + :return: ERROR_CODE.FAILURE if streaming RTSP protocol was not able to start. + :return: ERROR_CODE.NO_GPU_COMPATIBLE if the streaming codec is not supported (in this case, use H264 codec which is supported on all NVIDIA GPU the ZED SDK supports). + + .. code-block:: text + + import pyzed.sl as sl + + def main() : + # Create a ZED camera object + zed = sl.CameraOneOne() + + # Set initial parameters + init_params = sl.InitParametersOne() + init_params.camera_resolution = sl.RESOLUTION.HD720 # Use HD720 video mode (default fps: 60) + + # Open the camera + err = zed.open(init_params) + if err != sl.ERROR_CODE.SUCCESS : + print(repr(err)) + exit(-1) + + # Enable streaming + stream_params = sl.StreamingParameters() + stream_params.port = 30000 + stream_params.bitrate = 8000 + err = zed.enable_streaming(stream_params) + if err != sl.ERROR_CODE.SUCCESS : + print(repr(err)) + exit(-1) + + # Grab data during 500 frames + i = 0 + while i < 500 : + if zed.grab() == sl.ERROR_CODE.SUCCESS : + i = i+1 + + zed.disable_streaming() + zed.close() + return 0 + + if __name__ == "__main__" : + main() + """ + return ERROR_CODE() + + def disable_streaming(self) -> None: + """ + Disables the streaming initiated by enable_streaming(). + .. note:: + This method will automatically be called by close() if enable_streaming() was called. + + + See enable_streaming() for an example. + """ + pass + + def is_streaming_enabled(self) -> bool: + """ + Tells if the streaming is running. + :return: True if the stream is running, False otherwise. + """ + return bool() + + def enable_recording(self, record: RecordingParameters) -> ERROR_CODE: + """ + Creates an SVO file to be filled by enable_recording() and disable_recording(). + + \n SVO files are custom video files containing the un-rectified images from the camera along with some meta-data like timestamps or IMU orientation (if applicable). + \n They can be used to simulate a live ZED and test a sequence with various SDK parameters. + \n Depending on the application, various compression modes are available. See SVO_COMPRESSION_MODE. + + :param record: A structure containing all the specific parameters for the recording such as filename and compression mode. Default: a reset of RecordingParameters . + :return: An ERROR_CODE that defines if the SVO file was successfully created and can be filled with images. + + .. warning:: This method can be called multiple times during a camera lifetime, but if **video_filename** is already existing, the file will be erased. + + + .. code-block:: text + + import pyzed.sl as sl + + def main() : + # Create a ZED camera object + zed = sl.CameraOneOne() + # Set initial parameters + init_params = sl.InitParametersOne() + init_params.camera_resolution = sl.RESOLUTION.HD720 # Use HD720 video mode (default fps: 60) + init_params.coordinate_units = sl.UNIT.METER # Set units in meters + # Open the camera + err = zed.open(init_params) + if (err != sl.ERROR_CODE.SUCCESS): + print(repr(err)) + exit(-1) + + # Enable video recording + record_params = sl.RecordingParameters("myVideoFile.svo") + err = zed.enable_recording(record_params) + if (err != sl.ERROR_CODE.SUCCESS): + print(repr(err)) + exit(-1) + + # Grab data during 500 frames + i = 0 + while i < 500 : + # Grab a new frame + if zed.grab() == sl.ERROR_CODE.SUCCESS: + # Record the grabbed frame in the video file + i = i + 1 + + zed.disable_recording() + print("Video has been saved ...") + zed.close() + return 0 + + if __name__ == "__main__" : + main() + """ + return ERROR_CODE() + + def disable_recording(self) -> None: + """ + Disables the recording initiated by enable_recording() and closes the generated file. + + .. note:: + This method will automatically be called by close() if enable_recording() was called. + + + See enable_recording() for an example. + """ + pass + + def get_recording_status(self) -> RecordingStatus: + """ + Get the recording information. + :return: The recording state structure. For more details, see RecordingStatus. + """ + return RecordingStatus() + + def pause_recording(self, value = True) -> None: + """ + Pauses or resumes the recording. + :param status: If True, the recording is paused. If False, the recording is resumed. + """ + pass + + def get_device_list() -> list[DeviceProperties]: + """ + List all the connected devices with their associated information. + + This method lists all the cameras available and provides their serial number, models and other information. + :return: The device properties for each connected camera. + """ + return list[DeviceProperties]() + + def reboot(sn : int, full_reboot: bool =True) -> ERROR_CODE: + """ + Performs a hardware reset of the ZED 2 and the ZED 2i. + + :param sn: Serial number of the camera to reset, or 0 to reset the first camera detected. + :param full_reboot: Perform a full reboot (sensors and video modules) if True, otherwise only the video module will be rebooted. + :return: ERROR_CODE "ERROR_CODE::SUCCESS" if everything went fine. + :return: ERROR_CODE "ERROR_CODE::CAMERA_NOT_DETECTED" if no camera was detected. + :return: ERROR_CODE "ERROR_CODE::FAILURE" otherwise. + + .. note:: + This method only works for ZED 2, ZED 2i, and newer camera models. + + + .. warning:: This method will invalidate any sl.Camera object, since the device is rebooting. + """ + return ERROR_CODE() + + def reboot_from_input(input_type: INPUT_TYPE) -> ERROR_CODE: + """ + Performs a hardware reset of all devices matching the InputType. + + :param input_type: Input type of the devices to reset. + :return: ERROR_CODE "ERROR_CODE::SUCCESS" if everything went fine. + :return: ERROR_CODE "ERROR_CODE::CAMERA_NOT_DETECTED" if no camera was detected. + :return: ERROR_CODE "ERROR_CODE::FAILURE" otherwise. + :return: ERROR_CODE "ERROR_CODE::INVALID_FUNCTION_PARAMETERS" for SVOs and streams. + + .. warning:: This method will invalidate any sl.Camera object, since the device is rebooting. + """ + return ERROR_CODE() + + diff --git a/py_workspace/libs/pyzed_pkg/pyzed/pyzed.egg-info/PKG-INFO b/py_workspace/libs/pyzed_pkg/pyzed/pyzed.egg-info/PKG-INFO new file mode 100644 index 0000000..66b3012 --- /dev/null +++ b/py_workspace/libs/pyzed_pkg/pyzed/pyzed.egg-info/PKG-INFO @@ -0,0 +1,5 @@ +Metadata-Version: 2.4 +Name: pyzed +Version: 0.1.0 +Summary: Wrapper for ZED SDK +Requires-Python: >=3.12 diff --git a/py_workspace/libs/pyzed_pkg/pyzed/pyzed.egg-info/SOURCES.txt b/py_workspace/libs/pyzed_pkg/pyzed/pyzed.egg-info/SOURCES.txt new file mode 100644 index 0000000..a3d4a05 --- /dev/null +++ b/py_workspace/libs/pyzed_pkg/pyzed/pyzed.egg-info/SOURCES.txt @@ -0,0 +1,9 @@ +__init__.py +pyproject.toml +sl.pyi +./__init__.py +./sl.pyi +pyzed.egg-info/PKG-INFO +pyzed.egg-info/SOURCES.txt +pyzed.egg-info/dependency_links.txt +pyzed.egg-info/top_level.txt \ No newline at end of file diff --git a/py_workspace/libs/pyzed_pkg/pyzed/pyzed.egg-info/dependency_links.txt b/py_workspace/libs/pyzed_pkg/pyzed/pyzed.egg-info/dependency_links.txt new file mode 100644 index 0000000..8b13789 --- /dev/null +++ b/py_workspace/libs/pyzed_pkg/pyzed/pyzed.egg-info/dependency_links.txt @@ -0,0 +1 @@ + diff --git a/py_workspace/libs/pyzed_pkg/pyzed/pyzed.egg-info/top_level.txt b/py_workspace/libs/pyzed_pkg/pyzed/pyzed.egg-info/top_level.txt new file mode 100644 index 0000000..247f07b --- /dev/null +++ b/py_workspace/libs/pyzed_pkg/pyzed/pyzed.egg-info/top_level.txt @@ -0,0 +1 @@ +pyzed diff --git a/py_workspace/libs/pyzed_pkg/pyzed/sl.cpython-312-x86_64-linux-gnu.so b/py_workspace/libs/pyzed_pkg/pyzed/sl.cpython-312-x86_64-linux-gnu.so new file mode 100755 index 0000000..039b885 --- /dev/null +++ b/py_workspace/libs/pyzed_pkg/pyzed/sl.cpython-312-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:730a4c2adc33129b4ed503cb04a75b0baaa2538fcd254781830541a0a8a14e1c +size 5161120 diff --git a/py_workspace/libs/pyzed_pkg/pyzed/sl.pyi b/py_workspace/libs/pyzed_pkg/pyzed/sl.pyi new file mode 100644 index 0000000..33c405e --- /dev/null +++ b/py_workspace/libs/pyzed_pkg/pyzed/sl.pyi @@ -0,0 +1,14672 @@ +import enum +import numpy as np +from typing import List, Tuple, Dict, Optional, Union, Any, overload, Mapping, MutableMapping + +class Timestamp(): + """ + Structure representing timestamps with utilities. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def data_ns(self) -> int: + """ + Timestamp in nanoseconds. + """ + return int() + + @data_ns.setter + def data_ns(self, data_ns: Any) -> None: + pass + + def get_nanoseconds(self) -> int: + """ + Returns the timestamp in nanoseconds. + """ + return int() + + def get_microseconds(self) -> int: + """ + Returns the timestamp in microseconds. + """ + return int() + + def get_milliseconds(self) -> int: + """ + Returns the timestamp in milliseconds. + """ + return int() + + def get_seconds(self) -> int: + """ + Returns the timestamp in seconds. + """ + return int() + + def set_nanoseconds(self, t_ns: int) -> None: + """ + Sets the timestamp to a value in nanoseconds. + """ + pass + + def set_microseconds(self, t_us: int) -> None: + """ + Sets the timestamp to a value in microseconds. + """ + pass + + def set_milliseconds(self, t_ms: int) -> None: + """ + Sets the timestamp to a value in milliseconds. + """ + pass + + def set_seconds(self, t_s: int) -> None: + """ + Sets the timestamp to a value in seconds. + """ + pass + + +class ERROR_CODE(enum.Enum): + """ + Lists error codes in the ZED SDK. + + | Enumerator | | + |:---:|:---:| + | POTENTIAL_CALIBRATION_ISSUE | The camera has a potential calibration issue. | + | CONFIGURATION_FALLBACK | The operation could not proceed with the target configuration but did success with a fallback. | + | SENSORS_DATA_REQUIRED | The input data does not contains the high frequency sensors data, this is usually because it requires newer SVO/Streaming. In order to work this modules needs inertial data present in it input. | + | CORRUPTED_FRAME | The image could be corrupted, Enabled with the parameter InitParameters.enable_image_validity_check. + | CAMERA_REBOOTING | The camera is currently rebooting. | + | SUCCESS | Standard code for successful behavior. | + | FAILURE | Standard code for unsuccessful behavior. | + | NO_GPU_COMPATIBLE | No GPU found or CUDA capability of the device is not supported. | + | NOT_ENOUGH_GPU_MEMORY | Not enough GPU memory for this depth mode. Try a different mode (such as PERFORMANCE), or increase the minimum depth value (see InitParameters.depth_minimum_distance). | + | CAMERA_NOT_DETECTED | No camera was detected. | + | SENSORS_NOT_INITIALIZED | The MCU that controls the sensors module has an invalid serial number. You can try to recover it by launching the **ZED Diagnostic** tool from the command line with the option ``-r``. | + | SENSORS_NOT_AVAILABLE | A camera with sensor is detected but the sensors (IMU, barometer, ...) cannot be opened. Only the MODEL.ZED does not has sensors. Unplug/replug is required. | + | INVALID_RESOLUTION | In case of invalid resolution parameter, such as an upsize beyond the original image size in Camera.retrieve_image. | + | LOW_USB_BANDWIDTH | Insufficient bandwidth for the correct use of the camera. This issue can occur when you use multiple cameras or a USB 2.0 port. | + | CALIBRATION_FILE_NOT_AVAILABLE | The calibration file of the camera is not found on the host machine. Use **ZED Explorer** or **ZED Calibration** to download the factory calibration file. | + | INVALID_CALIBRATION_FILE | The calibration file is not valid. Try to download the factory calibration file or recalibrate your camera using **ZED Calibration**. | + | INVALID_SVO_FILE | The provided SVO file is not valid. | + | SVO_RECORDING_ERROR | An error occurred while trying to record an SVO (not enough free storage, invalid file, ...). | + | SVO_UNSUPPORTED_COMPRESSION | An SVO related error, occurs when NVIDIA based compression cannot be loaded. | + | END_OF_SVOFILE_REACHED | SVO end of file has been reached. No frame will be available until the SVO position is reset. | + | INVALID_COORDINATE_SYSTEM | The requested coordinate system is not available. | + | INVALID_FIRMWARE | The firmware of the camera is out of date. Update to the latest version. | + | INVALID_FUNCTION_PARAMETERS | Invalid parameters have been given for the function. | + | CUDA_ERROR | A CUDA error has been detected in the process, in sl.Camera.grab() or sl.Camera.retrieve_xxx() only. Activate verbose in sl.Camera.open() for more info. | + | CAMERA_NOT_INITIALIZED | The ZED SDK is not initialized. Probably a missing call to sl.Camera.open(). | + | NVIDIA_DRIVER_OUT_OF_DATE | Your NVIDIA driver is too old and not compatible with your current CUDA version. | + | INVALID_FUNCTION_CALL | The call of the function is not valid in the current context. Could be a missing call of sl.Camera.open(). | + | CORRUPTED_SDK_INSTALLATION | The ZED SDK was not able to load its dependencies or some assets are missing. Reinstall the ZED SDK or check for missing dependencies (cuDNN, TensorRT). | + | INCOMPATIBLE_SDK_VERSION | The installed ZED SDK is incompatible with the one used to compile the program. | + | INVALID_AREA_FILE | The given area file does not exist. Check the path. | + | INCOMPATIBLE_AREA_FILE | The area file does not contain enough data to be used or the sl.DEPTH_MODE used during the creation of the area file is different from the one currently set. | + | CAMERA_FAILED_TO_SETUP | Failed to open the camera at the proper resolution. Try another resolution or make sure that the UVC driver is properly installed. | + | CAMERA_DETECTION_ISSUE | Your camera can not be opened. Try replugging it to another port or flipping the USB-C connector (if there is one). | + | CANNOT_START_CAMERA_STREAM | Cannot start the camera stream. Make sure your camera is not already used by another process or blocked by firewall or antivirus. | + | NO_GPU_DETECTED | No GPU found. CUDA is unable to list it. Can be a driver/reboot issue. | + | PLANE_NOT_FOUND | Plane not found. Either no plane is detected in the scene, at the location or corresponding to the floor, or the floor plane doesn't match the prior given. | + | MODULE_NOT_COMPATIBLE_WITH_CAMERA | The module you try to use is not compatible with your camera sl.MODEL. Note: sl.MODEL.ZED does not has an IMU and does not support the AI modules. | + | MOTION_SENSORS_REQUIRED | The module needs the sensors to be enabled (see InitParameters.sensors_required). | + | MODULE_NOT_COMPATIBLE_WITH_CUDA_VERSION | The module needs a newer version of CUDA. | + """ + POTENTIAL_CALIBRATION_ISSUE = enum.auto() + CONFIGURATION_FALLBACK = enum.auto() + SENSORS_DATA_REQUIRED = enum.auto() + CORRUPTED_FRAME = enum.auto() + CAMERA_REBOOTING = enum.auto() + SUCCESS = enum.auto() + FAILURE = enum.auto() + NO_GPU_COMPATIBLE = enum.auto() + NOT_ENOUGH_GPU_MEMORY = enum.auto() + CAMERA_NOT_DETECTED = enum.auto() + SENSORS_NOT_INITIALIZED = enum.auto() + SENSORS_NOT_AVAILABLE = enum.auto() + INVALID_RESOLUTION = enum.auto() + LOW_USB_BANDWIDTH = enum.auto() + CALIBRATION_FILE_NOT_AVAILABLE = enum.auto() + INVALID_CALIBRATION_FILE = enum.auto() + INVALID_SVO_FILE = enum.auto() + SVO_RECORDING_ERROR = enum.auto() + END_OF_SVOFILE_REACHED = enum.auto() + SVO_UNSUPPORTED_COMPRESSION = enum.auto() + INVALID_COORDINATE_SYSTEM = enum.auto() + INVALID_FIRMWARE = enum.auto() + INVALID_FUNCTION_PARAMETERS = enum.auto() + CUDA_ERROR = enum.auto() + CAMERA_NOT_INITIALIZED = enum.auto() + NVIDIA_DRIVER_OUT_OF_DATE = enum.auto() + INVALID_FUNCTION_CALL = enum.auto() + CORRUPTED_SDK_INSTALLATION = enum.auto() + INCOMPATIBLE_SDK_VERSION = enum.auto() + INVALID_AREA_FILE = enum.auto() + INCOMPATIBLE_AREA_FILE = enum.auto() + CAMERA_FAILED_TO_SETUP = enum.auto() + CAMERA_DETECTION_ISSUE = enum.auto() + CANNOT_START_CAMERA_STREAM = enum.auto() + NO_GPU_DETECTED = enum.auto() + PLANE_NOT_FOUND = enum.auto() + MODULE_NOT_COMPATIBLE_WITH_CAMERA = enum.auto() + MOTION_SENSORS_REQUIRED = enum.auto() + MODULE_NOT_COMPATIBLE_WITH_CUDA_VERSION = enum.auto() + DRIVER_FAILURE = enum.auto() + LAST = enum.auto() + def __str__(self) -> None: + pass + + def __repr__(self) -> None: + pass + + def __lt__(self, other) -> None: + pass + + def __le__(self, other) -> None: + pass + + def __gt__(self, other) -> None: + pass + + def __ge__(self, other) -> None: + pass + + +def _initialize_error_codes() -> None: + """ + Lists error codes in the ZED SDK. + + | Enumerator | | + |:---:|:---:| + | POTENTIAL_CALIBRATION_ISSUE | The camera has a potential calibration issue. | + | CONFIGURATION_FALLBACK | The operation could not proceed with the target configuration but did success with a fallback. | + | SENSORS_DATA_REQUIRED | The input data does not contains the high frequency sensors data, this is usually because it requires newer SVO/Streaming. In order to work this modules needs inertial data present in it input. | + | CORRUPTED_FRAME | The image could be corrupted, Enabled with the parameter InitParameters.enable_image_validity_check. + | CAMERA_REBOOTING | The camera is currently rebooting. | + | SUCCESS | Standard code for successful behavior. | + | FAILURE | Standard code for unsuccessful behavior. | + | NO_GPU_COMPATIBLE | No GPU found or CUDA capability of the device is not supported. | + | NOT_ENOUGH_GPU_MEMORY | Not enough GPU memory for this depth mode. Try a different mode (such as PERFORMANCE), or increase the minimum depth value (see InitParameters.depth_minimum_distance). | + | CAMERA_NOT_DETECTED | No camera was detected. | + | SENSORS_NOT_INITIALIZED | The MCU that controls the sensors module has an invalid serial number. You can try to recover it by launching the **ZED Diagnostic** tool from the command line with the option ``-r``. | + | SENSORS_NOT_AVAILABLE | A camera with sensor is detected but the sensors (IMU, barometer, ...) cannot be opened. Only the MODEL.ZED does not has sensors. Unplug/replug is required. | + | INVALID_RESOLUTION | In case of invalid resolution parameter, such as an upsize beyond the original image size in Camera.retrieve_image. | + | LOW_USB_BANDWIDTH | Insufficient bandwidth for the correct use of the camera. This issue can occur when you use multiple cameras or a USB 2.0 port. | + | CALIBRATION_FILE_NOT_AVAILABLE | The calibration file of the camera is not found on the host machine. Use **ZED Explorer** or **ZED Calibration** to download the factory calibration file. | + | INVALID_CALIBRATION_FILE | The calibration file is not valid. Try to download the factory calibration file or recalibrate your camera using **ZED Calibration**. | + | INVALID_SVO_FILE | The provided SVO file is not valid. | + | SVO_RECORDING_ERROR | An error occurred while trying to record an SVO (not enough free storage, invalid file, ...). | + | SVO_UNSUPPORTED_COMPRESSION | An SVO related error, occurs when NVIDIA based compression cannot be loaded. | + | END_OF_SVOFILE_REACHED | SVO end of file has been reached. No frame will be available until the SVO position is reset. | + | INVALID_COORDINATE_SYSTEM | The requested coordinate system is not available. | + | INVALID_FIRMWARE | The firmware of the camera is out of date. Update to the latest version. | + | INVALID_FUNCTION_PARAMETERS | Invalid parameters have been given for the function. | + | CUDA_ERROR | A CUDA error has been detected in the process, in sl.Camera.grab() or sl.Camera.retrieve_xxx() only. Activate verbose in sl.Camera.open() for more info. | + | CAMERA_NOT_INITIALIZED | The ZED SDK is not initialized. Probably a missing call to sl.Camera.open(). | + | NVIDIA_DRIVER_OUT_OF_DATE | Your NVIDIA driver is too old and not compatible with your current CUDA version. | + | INVALID_FUNCTION_CALL | The call of the function is not valid in the current context. Could be a missing call of sl.Camera.open(). | + | CORRUPTED_SDK_INSTALLATION | The ZED SDK was not able to load its dependencies or some assets are missing. Reinstall the ZED SDK or check for missing dependencies (cuDNN, TensorRT). | + | INCOMPATIBLE_SDK_VERSION | The installed ZED SDK is incompatible with the one used to compile the program. | + | INVALID_AREA_FILE | The given area file does not exist. Check the path. | + | INCOMPATIBLE_AREA_FILE | The area file does not contain enough data to be used or the sl.DEPTH_MODE used during the creation of the area file is different from the one currently set. | + | CAMERA_FAILED_TO_SETUP | Failed to open the camera at the proper resolution. Try another resolution or make sure that the UVC driver is properly installed. | + | CAMERA_DETECTION_ISSUE | Your camera can not be opened. Try replugging it to another port or flipping the USB-C connector (if there is one). | + | CANNOT_START_CAMERA_STREAM | Cannot start the camera stream. Make sure your camera is not already used by another process or blocked by firewall or antivirus. | + | NO_GPU_DETECTED | No GPU found. CUDA is unable to list it. Can be a driver/reboot issue. | + | PLANE_NOT_FOUND | Plane not found. Either no plane is detected in the scene, at the location or corresponding to the floor, or the floor plane doesn't match the prior given. | + | MODULE_NOT_COMPATIBLE_WITH_CAMERA | The module you try to use is not compatible with your camera sl.MODEL. Note: sl.MODEL.ZED does not has an IMU and does not support the AI modules. | + | MOTION_SENSORS_REQUIRED | The module needs the sensors to be enabled (see InitParameters.sensors_required). | + | MODULE_NOT_COMPATIBLE_WITH_CUDA_VERSION | The module needs a newer version of CUDA. | + """ + pass + +class MODEL(enum.Enum): + """ + Lists ZED camera model. + + + | Enumerator | | + |:---:|:---:| + | ZED | ZED camera model | + | ZED_M | ZED Mini (ZED M) camera model | + | ZED2 | ZED 2 camera model | + | ZED2i | ZED 2i camera model | + | ZED_X | ZED X camera model | + | ZED_XM | ZED X Mini (ZED XM) camera model | + | ZED_X_HDR | ZED X HDR camera model | + | ZED_X_HDR_MINI | ZED X HDR Mini camera model | + | ZED_X_HDR_MAX | ZED X HDR Wide camera model | + | VIRTUAL_ZED_X | Virtual ZED X generated from 2 ZED X One | + | ZED_XONE_GS | ZED X One with global shutter AR0234 sensor | + | ZED_XONE_UHD | ZED X One with 4K rolling shutter IMX678 sensor | + | ZED_XONE_HDR | ZED X One HDR | + """ + ZED = enum.auto() + ZED_M = enum.auto() + ZED2 = enum.auto() + ZED2i = enum.auto() + ZED_X = enum.auto() + ZED_XM = enum.auto() + ZED_X_HDR = enum.auto() + ZED_X_HDR_MINI = enum.auto() + ZED_X_HDR_MAX = enum.auto() + VIRTUAL_ZED_X = enum.auto() + ZED_XONE_GS = enum.auto() + ZED_XONE_UHD = enum.auto() + ZED_XONE_HDR = enum.auto() + LAST = enum.auto() + def __str__(self) -> None: + pass + + def __repr__(self) -> None: + pass + + def __lt__(self, other) -> None: + pass + + def __le__(self, other) -> None: + pass + + def __gt__(self, other) -> None: + pass + + def __ge__(self, other) -> None: + pass + + +class INPUT_TYPE(enum.Enum): + """ + Lists available input types in the ZED SDK. + + + | Enumerator | | + |:---:|:---:| + | USB | USB input mode | + | SVO | SVO file input mode | + | STREAM | STREAM input mode (requires to use Camera.enable_streaming "enable_streaming()" / Camera.disable_streaming "disable_streaming()" on the "sender" side) | + | GMSL | GMSL input mode (only on NVIDIA Jetson) | + """ + USB = enum.auto() + SVO = enum.auto() + STREAM = enum.auto() + GMSL = enum.auto() + LAST = enum.auto() + def __lt__(self, other) -> None: + pass + + def __le__(self, other) -> None: + pass + + def __gt__(self, other) -> None: + pass + + def __ge__(self, other) -> None: + pass + + +class AI_MODELS(enum.Enum): + """ + Lists available AI models. + + | Enumerator | | + |:---:|:---:| + | MULTI_CLASS_DETECTION | Related to sl.OBJECT_DETECTION_MODEL.MULTI_CLASS_BOX_FAST | + | MULTI_CLASS_MEDIUM_DETECTION | Related to sl.OBJECT_DETECTION_MODEL.MULTI_CLASS_BOX_MEDIUM | + | MULTI_CLASS_ACCURATE_DETECTION | Related to sl.OBJECT_DETECTION_MODEL.MULTI_CLASS_BOX_ACCURATE | + | HUMAN_BODY_FAST_DETECTION | Related to sl.BODY_TRACKING_MODEL.HUMAN_BODY_FAST | + | HUMAN_BODY_MEDIUM_DETECTION | Related to sl.BODY_TRACKING_MODEL.HUMAN_BODY_MEDIUM | + | HUMAN_BODY_ACCURATE_DETECTION | Related to sl.BODY_TRACKING_MODEL.HUMAN_BODY_ACCURATE | + | HUMAN_BODY_38_FAST_DETECTION | Related to sl.BODY_TRACKING_MODEL.HUMAN_BODY_FAST | + | HUMAN_BODY_38_MEDIUM_DETECTION | Related to sl.BODY_TRACKING_MODEL.HUMAN_BODY_FAST | + | HUMAN_BODY_38_ACCURATE_DETECTION | Related to sl.BODY_TRACKING_MODEL.HUMAN_BODY_FAST | + | PERSON_HEAD_DETECTION | Related to sl.OBJECT_DETECTION_MODEL.PERSON_HEAD_BOX_FAST | + | PERSON_HEAD_ACCURATE_DETECTION | Related to sl.OBJECT_DETECTION_MODEL.PERSON_HEAD_BOX_ACCURATE | + | REID_ASSOCIATION | Related to sl.BatchParameters.enable | + | NEURAL_LIGHT_DEPTH | Related to sl.DEPTH_MODE.NEURAL_LIGHT_DEPTH | + | NEURAL_DEPTH | Related to sl.DEPTH_MODE.NEURAL | + | NEURAL_PLUS_DEPTH | Related to sl.DEPTH_MODE.NEURAL_PLUS_DEPTH | + """ + MULTI_CLASS_DETECTION = enum.auto() + MULTI_CLASS_MEDIUM_DETECTION = enum.auto() + MULTI_CLASS_ACCURATE_DETECTION = enum.auto() + HUMAN_BODY_FAST_DETECTION = enum.auto() + HUMAN_BODY_MEDIUM_DETECTION = enum.auto() + HUMAN_BODY_ACCURATE_DETECTION = enum.auto() + HUMAN_BODY_38_FAST_DETECTION = enum.auto() + HUMAN_BODY_38_MEDIUM_DETECTION = enum.auto() + HUMAN_BODY_38_ACCURATE_DETECTION = enum.auto() + PERSON_HEAD_DETECTION = enum.auto() + PERSON_HEAD_ACCURATE_DETECTION = enum.auto() + REID_ASSOCIATION = enum.auto() + NEURAL_LIGHT_DEPTH = enum.auto() + NEURAL_DEPTH = enum.auto() + NEURAL_PLUS_DEPTH = enum.auto() + LAST = enum.auto() + def __lt__(self, other) -> None: + pass + + def __le__(self, other) -> None: + pass + + def __gt__(self, other) -> None: + pass + + def __ge__(self, other) -> None: + pass + + +class OBJECT_DETECTION_MODEL(enum.Enum): + """ + Lists available models for the object detection module. + + + | Enumerator | | + |:---:|:---:| + | MULTI_CLASS_BOX_FAST | Any objects, bounding box based. | + | MULTI_CLASS_BOX_ACCURATE | Any objects, bounding box based, more accurate but slower than the base model. | + | MULTI_CLASS_BOX_MEDIUM | Any objects, bounding box based, compromise between accuracy and speed. | + | PERSON_HEAD_BOX_FAST | Bounding box detector specialized in person heads particularly well suited for crowded environments. The person localization is also improved. | + | PERSON_HEAD_BOX_ACCURATE | Bounding box detector specialized in person heads, particularly well suited for crowded environments. The person localization is also improved, more accurate but slower than the base model. | + | CUSTOM_BOX_OBJECTS | For external inference, using your own custom model and/or frameworks. This mode disables the internal inference engine, the 2D bounding box detection must be provided. | + """ + MULTI_CLASS_BOX_FAST = enum.auto() + MULTI_CLASS_BOX_MEDIUM = enum.auto() + MULTI_CLASS_BOX_ACCURATE = enum.auto() + PERSON_HEAD_BOX_FAST = enum.auto() + PERSON_HEAD_BOX_ACCURATE = enum.auto() + CUSTOM_BOX_OBJECTS = enum.auto() + CUSTOM_YOLOLIKE_BOX_OBJECTS = enum.auto() + LAST = enum.auto() + def __lt__(self, other) -> None: + pass + + def __le__(self, other) -> None: + pass + + def __gt__(self, other) -> None: + pass + + def __ge__(self, other) -> None: + pass + + +class BODY_TRACKING_MODEL(enum.Enum): + """ + Lists available models for the body tracking module. + + + | Enumerator | | + |:---:|:---:| + | HUMAN_BODY_FAST | Keypoints based, specific to human skeleton, real time performance even on Jetson or low end GPU cards. | + | HUMAN_BODY_ACCURATE | Keypoints based, specific to human skeleton, state of the art accuracy, requires powerful GPU. | + | HUMAN_BODY_MEDIUM | Keypoints based, specific to human skeleton, compromise between accuracy and speed. | + """ + HUMAN_BODY_FAST = enum.auto() + HUMAN_BODY_ACCURATE = enum.auto() + HUMAN_BODY_MEDIUM = enum.auto() + LAST = enum.auto() + def __lt__(self, other) -> None: + pass + + def __le__(self, other) -> None: + pass + + def __gt__(self, other) -> None: + pass + + def __ge__(self, other) -> None: + pass + + +class OBJECT_FILTERING_MODE(enum.Enum): + """ + Lists supported bounding box preprocessing. + + + | Enumerator | | + |:---:|:---:| + | NONE | The ZED SDK will not apply any preprocessing to the detected objects. | + | NMS3D | The ZED SDK will remove objects that are in the same 3D position as an already tracked object (independent of class id). | + | NMS3D_PER_CLASS | The ZED SDK will remove objects that are in the same 3D position as an already tracked object of the same class id. | + """ + NONE = enum.auto() + NMS3D = enum.auto() + NMS3D_PER_CLASS = enum.auto() + LAST = enum.auto() + def __lt__(self, other) -> None: + pass + + def __le__(self, other) -> None: + pass + + def __gt__(self, other) -> None: + pass + + def __ge__(self, other) -> None: + pass + + +class OBJECT_ACCELERATION_PRESET(enum.Enum): + """ + Lists supported presets for maximum acceleration allowed for a given tracked object. + + + | Enumerator | | + |:---:|:---:| + | DEFAULT | The ZED SDK will automatically determine the appropriate maximum acceleration. | + | LOW | Suitable for objects with relatively low maximum acceleration (e.g., a person walking). | + | MEDIUM | Suitable for objects with moderate maximum acceleration (e.g., a person running). | + | HIGH | Suitable for objects with high maximum acceleration (e.g., a car accelerating, a kicked sports ball). | + """ + DEFAULT = enum.auto() + LOW = enum.auto() + MEDIUM = enum.auto() + HIGH = enum.auto() + LAST = enum.auto() + def __lt__(self, other) -> None: + pass + + def __le__(self, other) -> None: + pass + + def __gt__(self, other) -> None: + pass + + def __ge__(self, other) -> None: + pass + + +class CAMERA_STATE(enum.Enum): + """ + Lists possible camera states. + + + | Enumerator | | + |:---:|:---:| + | AVAILABLE | The camera can be opened by the ZED SDK. | + | NOT_AVAILABLE | The camera is already opened and unavailable. | + """ + AVAILABLE = enum.auto() + NOT_AVAILABLE = enum.auto() + LAST = enum.auto() + def __str__(self) -> None: + pass + + def __repr__(self) -> None: + pass + + def __lt__(self, other) -> None: + pass + + def __le__(self, other) -> None: + pass + + def __gt__(self, other) -> None: + pass + + def __ge__(self, other) -> None: + pass + + +class SIDE(enum.Enum): + """ + Lists possible sides on which to get data from. + + | Enumerator | | + |:---:|:---:| + | LEFT | Left side only. | + | RIGHT | Right side only. | + | BOTH | Left and right side. | + """ + LEFT = enum.auto() + RIGHT = enum.auto() + BOTH = enum.auto() + def __lt__(self, other) -> None: + pass + + def __le__(self, other) -> None: + pass + + def __gt__(self, other) -> None: + pass + + def __ge__(self, other) -> None: + pass + + +class RESOLUTION(enum.Enum): + """ + Lists available resolutions. + .. note:: + The VGA resolution does not respect the 640*480 standard to better fit the camera sensor (672*376 is used). + + .. warning:: All resolutions are not available for every camera. + .. warning:: You can find the available resolutions for each camera in `our documentation `_. + + | Enumerator | | + |:---:|:---:| + | HD4K | 3856x2180 for imx678 mono | + | QHDPLUS | 3800x1800 | + | HD2K | 2208*1242 (x2) Available FPS: 15 | + | HD1080 | 1920*1080 (x2) Available FPS: 15, 30 | + | HD1200 | 1920*1200 (x2) Available FPS: 15, 30, 60 | + | HD1536 | 1920*1536 (x2) Available FPS: 30 | + | HD720 | 1280*720 (x2) Available FPS: 15, 30, 60 | + | SVGA | 960*600 (x2) Available FPS: 15, 30, 60, 120 | + | VGA | 672*376 (x2) Available FPS: 15, 30, 60, 100 | + | AUTO | Select the resolution compatible with the camera: * ZED X/X Mini: HD1200* other cameras: HD720 | + """ + HD4K = enum.auto() + QHDPLUS = enum.auto() + HD2K = enum.auto() + HD1080 = enum.auto() + HD1200 = enum.auto() + HD1536 = enum.auto() + HD720 = enum.auto() + SVGA = enum.auto() + VGA = enum.auto() + AUTO = enum.auto() + LAST = enum.auto() + def __lt__(self, other) -> None: + pass + + def __le__(self, other) -> None: + pass + + def __gt__(self, other) -> None: + pass + + def __ge__(self, other) -> None: + pass + + +def sleep_ms(time: int) -> None: + """ + Blocks the execution of the current thread for **time milliseconds. + :param time: Number of milliseconds to wait. + """ + pass + +def sleep_us(time: int) -> None: + """ + Blocks the execution of the current thread for **time microseconds. + :param time: Number of microseconds to wait. + """ + pass + +def get_resolution(resolution: RESOLUTION) -> Resolution: + """ + Gets the corresponding sl.Resolution from an sl.RESOLUTION. + + :param resolution: The wanted sl.RESOLUTION. + :return: The sl.Resolution corresponding to sl.RESOLUTION given as argument. + """ + return Resolution() + +class DeviceProperties: + """ + Class containing information about the properties of a camera. + + .. note:: + A camera_model sl.MODEL.ZED_M with an id '-1' can be due to an inverted USB-C cable. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def camera_state(self) -> CAMERA_STATE: + """ + State of the camera. + + Default: sl.CAMERA_STATE.NOT_AVAILABLE + """ + return CAMERA_STATE() + + @camera_state.setter + def camera_state(self, camera_state: Any) -> None: + pass + + @property + def id(self) -> int: + """ + Id of the camera. + + Default: -1 + """ + return int() + + @id.setter + def id(self, id: Any) -> None: + pass + + @property + def camera_name(self) -> str: + """ + Name of Camera in DT (ZED_CAM1) + """ + return str() + + @camera_name.setter + def camera_name(self, camera_name: Any) -> None: + pass + + @property + def i2c_port(self) -> int: + """ + i2c port of the camera. + """ + return int() + + @i2c_port.setter + def i2c_port(self, i2c_port: Any) -> None: + pass + + @property + def camera_model(self) -> MODEL: + """ + Model of the camera. + """ + return MODEL() + + @camera_model.setter + def camera_model(self, camera_model: Any) -> None: + pass + + @identifier.setter + def identifier(self, identifier: Any) -> None: + pass + + @property + def camera_sensor_model(self) -> str: + """ + Name of sensor (zedx) + """ + return str() + + @camera_sensor_model.setter + def camera_sensor_model(self, camera_sensor_model: Any) -> None: + pass + + @property + def path(self) -> str: + """ + System path of the camera. + """ + return str() + + @path.setter + def path(self, path: Any) -> None: + pass + + @property + def sensor_address_right(self) -> int: + """ + sensor_address when available (ZED-X HDR/XOne HDR only) + """ + return int() + + @sensor_address_right.setter + def sensor_address_right(self, sensor_address_right: Any) -> None: + pass + + @property + def serial_number(self) -> int: + """ + Serial number of the camera. + + Default: 0 + .. warning:: Not provided for Windows. + """ + return int() + + @serial_number.setter + def serial_number(self, serial_number: Any) -> None: + pass + + @property + def sensor_address_left(self) -> int: + """ + sensor_address when available (ZED-X HDR/XOne HDR only) + """ + return int() + + @sensor_address_left.setter + def sensor_address_left(self, sensor_address_left: Any) -> None: + pass + + @property + def camera_badge(self) -> str: + """ + Badge name (zedx_ar0234) + """ + return str() + + @camera_badge.setter + def camera_badge(self, camera_badge: Any) -> None: + pass + + @property + def input_type(self) -> INPUT_TYPE: + """ + Input type of the camera. + """ + return INPUT_TYPE() + + @input_type.setter + def input_type(self, input_type: Any) -> None: + pass + + def identifier(self) -> np.numpy[np.uint8]: + """ + sensor_address when available (ZED-X HDR/XOne HDR only) + """ + return np.numpy[np.uint8]() + + def __str__(self) -> None: + pass + + def __repr__(self) -> None: + pass + + +class Matrix3f: + """ + Class representing a generic 3*3 matrix. + + It is defined in a row-major order, it means that, in the value buffer, the entire first row is stored first, followed by the entire second row, and so on. + \n The data value of the matrix can be accessed with the r() method. + .. code-block:: text + + | r00 r01 r02 | + | r10 r11 r12 | + | r20 r21 r22 | + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def matrix_name(self) -> str: + """ + Name of the matrix (optional). + """ + return str() + + @matrix_name.setter + def matrix_name(self, matrix_name: Any) -> None: + pass + + @r.setter + def r(self, r: Any) -> None: + pass + + @property + def nbElem(self) -> int: + return int() + + def _initialize_from_input(self, input_data) -> None: + pass + + def __dealloc__(self) -> None: + pass + + def init_matrix(self, matrix) -> None: + """ + Copy the values from another sl.Matrix3f. + :param matrix: sl.Matrix3f to copy. + """ + pass + + def inverse(self) -> None: + """ + Sets the sl.Matrix3f to its inverse. + """ + pass + + def inverse_mat(self, rotation) -> Matrix3f: + """ + Returns the inverse of a sl.Matrix3f. + :param rotation: sl.Matrix3f to compute the inverse from. + :return: The inverse of the sl.Matrix3f given as input. + """ + return Matrix3f() + + def transpose(self) -> None: + """ + Sets the sl.Matrix3f to its transpose. + """ + pass + + def transpose_mat(self, rotation) -> Matrix3f: + """ + Returns the transpose of a sl.Matrix3f. + :param rotation: sl.Matrix3f to compute the transpose from. + :return: The transpose of the sl.Matrix3f given as input. + """ + return Matrix3f() + + def set_identity(self) -> Matrix3f: + """ + Sets the sl.Matrix3f to identity. + :return: itself + """ + return Matrix3f() + + def identity(self) -> Matrix3f: + """ + Creates an identity sl.Matrix3f. + :return: A sl.Matrix3f set to identity. + """ + return Matrix3f() + + def set_zeros(self) -> None: + """ + Sets the sl.Matrix3f to zero. + """ + pass + + def zeros(self) -> Matrix3f: + """ + Creates a sl.Matrix3f filled with zeros. + :return: A sl.Matrix3f filled with zeros. + """ + return Matrix3f() + + def get_infos(self) -> str: + """ + Returns the components of the sl.Matrix3f in a string. + :return: A string containing the components of the current sl.Matrix3f. + """ + return str() + + def r(self) -> np.numpy[float][float]: + """ + 3*3 numpy array of inner data. + """ + return np.numpy[float][float]() + + def __mul__(self, other) -> None: + pass + + def __richcmp__(left, right, op) -> None: + pass + + def __getitem__(self, key) -> None: + pass + + def __setitem__(self, key, value) -> None: + pass + + def __repr__(self) -> None: + pass + + +class Matrix4f: + """ + Class representing a generic 4*4 matrix. + + It is defined in a row-major order, it means that, in the value buffer, the entire first row is stored first, followed by the entire second row, and so on. + \n The data value of the matrix can be accessed with the r() method. + .. code-block:: text + + | r00 r01 r02 tx | + | r10 r11 r12 ty | + | r20 r21 r22 tz | + | m30 m31 m32 m33 | + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def matrix_name(self) -> str: + """ + Returns the name of the matrix (optional). + """ + return str() + + @matrix_name.setter + def matrix_name(self, matrix_name: Any) -> None: + pass + + @m.setter + def m(self, m: Any) -> None: + pass + + def _initialize_from_input(self, input_data) -> None: + pass + + def __dealloc__(self) -> None: + pass + + def init_matrix(self, matrix: Matrix4f) -> None: + """ + Copy the values from another sl.Matrix4f. + :param matrix: sl.Matrix4f to copy. + """ + pass + + def inverse(self) -> ERROR_CODE: + """ + Sets the sl.Matrix4f to its inverse. + :return: sl.ERROR_CODE.SUCCESS if the inverse has been computed, sl.ERROR_CODE.FAILURE is not (det = 0). + """ + return ERROR_CODE() + + def inverse_mat(self, rotation: Matrix4f) -> Matrix4f: + """ + Returns the inverse of a sl.Matrix4f. + :param rotation: sl.Matrix4f to compute the inverse from. + :return: The inverse of the sl.Matrix4f given as input. + """ + return Matrix4f() + + def transpose(self) -> None: + """ + Sets the sl.Matrix4f to its transpose. + """ + pass + + def transpose_mat(self, rotation: Matrix4f) -> Matrix4f: + """ + Returns the transpose of a sl.Matrix4f. + :param rotation: sl.Matrix4f to compute the transpose from. + :return: The transpose of the sl.Matrix4f given as input. + """ + return Matrix4f() + + def set_identity(self) -> Matrix4f: + """ + Sets the sl.Matrix4f to identity. + :return: itself + """ + return Matrix4f() + + def identity(self) -> Matrix4f: + """ + Creates an identity sl.Matrix4f. + :return: A sl.Matrix3f set to identity. + """ + return Matrix4f() + + def set_zeros(self) -> None: + """ + Sets the sl.Matrix4f to zero. + """ + pass + + def zeros(self) -> Matrix4f: + """ + Creates a sl.Matrix4f filled with zeros. + :return: A sl.Matrix4f filled with zeros. + """ + return Matrix4f() + + def get_infos(self) -> str: + """ + Returns the components of the sl.Matrix4f in a string. + :return: A string containing the components of the current sl.Matrix4f. + """ + return str() + + def set_sub_matrix3f(self, input: Matrix3f, row = 0, column = 0) -> ERROR_CODE: + """ + Sets a sl.Matrix3f inside the sl.Matrix4f. + .. note:: + Can be used to set the rotation matrix when the sl.Matrix4f is a pose or an isometric matrix. + + :param input: Sub-matrix to put inside the sl.Matrix4f. + :param row: Index of the row to start the 3x3 block. Must be 0 or 1. + :param column: Index of the column to start the 3x3 block. Must be 0 or 1. + + :return: sl.ERROR_CODE.SUCCESS if everything went well, sl.ERROR_CODE.FAILURE otherwise. + """ + return ERROR_CODE() + + def set_sub_vector3f(self, input0: float, input1: float, input2: float, column = 3) -> ERROR_CODE: + """ + Sets a 3x1 Vector inside the sl.Matrix4f at the specified column index. + .. note:: + Can be used to set the translation/position matrix when the sl.Matrix4f is a pose or an isometry. + + :param input0: First value of the 3x1 Vector to put inside the sl.Matrix4f. + :param input1: Second value of the 3x1 Vector to put inside the sl.Matrix4f. + :param input2: Third value of the 3x1 Vector to put inside the sl.Matrix4f. + :param column: Index of the column to start the 3x3 block. By default, it is the last column (translation for a sl.Pose). + :return: sl.ERROR_CODE.SUCCESS if everything went well, sl.ERROR_CODE.FAILURE otherwise. + """ + return ERROR_CODE() + + def set_sub_vector4f(self, input0: float, input1: float, input2: float, input3: float, column = 3) -> ERROR_CODE: + """ + Sets a 4x1 Vector inside the sl.Matrix4f at the specified column index. + :param input0: First value of the 4x1 Vector to put inside the sl.Matrix4f. + :param input1: Second value of the 4x1 Vector to put inside the sl.Matrix4f. + :param input2: Third value of the 4x1 Vector to put inside the sl.Matrix4f. + :param input3: Fourth value of the 4x1 Vector to put inside the sl.Matrix4f. + :param column: Index of the column to start the 3x3 block. By default, it is the last column (translation for a sl.Pose). + :return: sl.ERROR_CODE.SUCCESS if everything went well, sl.ERROR_CODE.FAILURE otherwise. + """ + return ERROR_CODE() + + def m(self) -> np.numpy[float][float]: + """ + 4*4 numpy array of inner data. + """ + return np.numpy[float][float]() + + def __mul__(self, other) -> None: + pass + + def __richcmp__(left, right, op) -> None: + pass + + def __getitem__(self, key) -> None: + pass + + def __setitem__(self, key, value) -> None: + pass + + def __repr__(self) -> None: + pass + + +class VIDEO_SETTINGS(enum.Enum): + """ + Lists available camera settings for the camera (contrast, hue, saturation, gain, ...). + + .. warning:: All VIDEO_SETTINGS are not supported for all camera models. You can find the supported VIDEO_SETTINGS for each ZED camera in our `documentation `_.\n\n + GAIN and EXPOSURE are linked in auto/default mode (see sl.Camera.set_camera_settings()). + + | Enumerator | | + |:---:|:---:| + | BRIGHTNESS | Brightness control Affected value should be between 0 and 8. Note: Not available for ZED X/X Mini cameras. | + | CONTRAST | Contrast control Affected value should be between 0 and 8. Note: Not available for ZED X/X Mini cameras. | + | HUE | Hue control Affected value should be between 0 and 11. Note: Not available for ZED X/X Mini cameras. | + | SATURATION | Saturation control Affected value should be between 0 and 8. | + | SHARPNESS | Digital sharpening control Affected value should be between 0 and 8. | + | GAMMA | ISP gamma control Affected value should be between 1 and 9. | + | GAIN | Gain control Affected value should be between 0 and 100 for manual control. Note: If EXPOSURE is set to -1 (automatic mode), then GAIN will be automatic as well. | + | EXPOSURE | Exposure control Affected value should be between 0 and 100 for manual control. The exposition is mapped linearly in a percentage of the following max values. Special case for ``EXPOSURE = 0`` that corresponds to 0.17072ms. The conversion to milliseconds depends on the framerate: * 15fps & ``EXPOSURE = 100`` -> 19.97ms* 30fps & ``EXPOSURE = 100`` -> 19.97ms* 60fps & ``EXPOSURE = 100`` -> 10.84072ms* 100fps & ``EXPOSURE = 100`` -> 10.106624ms | + | AEC_AGC | Defines if the GAIN and EXPOSURE are in automatic mode or not. Setting GAIN or EXPOSURE values will automatically set this value to 0. | + | AEC_AGC_ROI | Defines the region of interest for automatic exposure/gain computation. To be used with the dedicated Camera.set_camera_settings_roi "set_camera_settings_roi()" / Camera.get_camera_settings_roi "get_camera_settings_roi()" methods. | + | WHITEBALANCE_TEMPERATURE | Color temperature control Affected value should be between 2800 and 6500 with a step of 100.Note: Setting a value will automatically set WHITEBALANCE_AUTO to 0. | + | WHITEBALANCE_AUTO | Defines if the white balance is in automatic mode or not. | + | LED_STATUS | Status of the front LED of the camera. Set to 0 to disable the light, 1 to enable the light. Default value is on. Note: Requires camera firmware 1523 at least. | + | EXPOSURE_TIME | Real exposure time control in microseconds. Note: Only available for ZED X/X Mini cameras.Note: Replace EXPOSURE setting. | + | ANALOG_GAIN | Real analog gain (sensor) control in mDB. The range is defined by Jetson DTS and by default [1000-16000]. Note: Only available for ZED X/X Mini cameras.Note: Replace GAIN settings. | + | DIGITAL_GAIN | Real digital gain (ISP) as a factor. The range is defined by Jetson DTS and by default [1-256]. Note: Only available for ZED X/X Mini cameras.Note: Replace GAIN settings. | + | AUTO_EXPOSURE_TIME_RANGE | Range of exposure auto control in micro seconds. Used with Camera.set_camera_settings_range "set_camera_settings_range()". Min/max range between max range defined in DTS. By default: [28000 - or 19000] us. Note: Only available for ZED X/X Mini cameras. | + | AUTO_ANALOG_GAIN_RANGE | Range of sensor gain in automatic control. Used with Camera.set_camera_settings_range "set_camera_settings_range()". Min/max range between max range defined in DTS. By default: [1000 - 16000] mdB. Note: Only available for ZED X/X Mini cameras. | + | AUTO_DIGITAL_GAIN_RANGE | Range of digital ISP gain in automatic control. Used with Camera.set_camera_settings_range "set_camera_settings_range()". Min/max range between max range defined in DTS. By default: [1 - 256]. Note: Only available for ZED X/X Mini cameras. | + | EXPOSURE_COMPENSATION | Exposure-target compensation made after auto exposure. Reduces the overall illumination target by factor of F-stops. Affected value should be between 0 and 100 (mapped between [-2.0,2.0]). Default value is 50, i.e. no compensation applied. Note: Only available for ZED X/X Mini cameras. | + | DENOISING | Level of denoising applied on both left and right images. Affected value should be between 0 and 100. Default value is 50. Note: Only available for ZED X/X Mini cameras. | + """ + BRIGHTNESS = enum.auto() + CONTRAST = enum.auto() + HUE = enum.auto() + SATURATION = enum.auto() + SHARPNESS = enum.auto() + GAMMA = enum.auto() + GAIN = enum.auto() + EXPOSURE = enum.auto() + AEC_AGC = enum.auto() + AEC_AGC_ROI = enum.auto() + WHITEBALANCE_TEMPERATURE = enum.auto() + WHITEBALANCE_AUTO = enum.auto() + LED_STATUS = enum.auto() + EXPOSURE_TIME = enum.auto() + ANALOG_GAIN = enum.auto() + DIGITAL_GAIN = enum.auto() + AUTO_EXPOSURE_TIME_RANGE = enum.auto() + AUTO_ANALOG_GAIN_RANGE = enum.auto() + AUTO_DIGITAL_GAIN_RANGE = enum.auto() + EXPOSURE_COMPENSATION = enum.auto() + DENOISING = enum.auto() + LAST = enum.auto() + def __lt__(self, other) -> None: + pass + + def __le__(self, other) -> None: + pass + + def __gt__(self, other) -> None: + pass + + def __ge__(self, other) -> None: + pass + + +class DEPTH_MODE(enum.Enum): + """ + Lists available depth computation modes. + + | Enumerator | | + |:---:|:---:| + | NONE | No depth map computation. Only rectified stereo images will be available. | + | PERFORMANCE | Computation mode optimized for speed. | + | QUALITY | Computation mode designed for challenging areas with untextured surfaces. | + | ULTRA | Computation mode that favors edges and sharpness. Requires more GPU memory and computation power. | + | NEURAL_LIGHT | End to End Neural disparity estimation. Requires AI module. | + | NEURAL | End to End Neural disparity estimation. Requires AI module. | + | NEURAL_PLUS | End to End Neural disparity estimation. More precise but requires more GPU memory and computation power. Requires AI module. | + """ + NONE = enum.auto() + PERFORMANCE = enum.auto() + QUALITY = enum.auto() + ULTRA = enum.auto() + NEURAL_LIGHT = enum.auto() + NEURAL = enum.auto() + NEURAL_PLUS = enum.auto() + LAST = enum.auto() + def __lt__(self, other) -> None: + pass + + def __le__(self, other) -> None: + pass + + def __gt__(self, other) -> None: + pass + + def __ge__(self, other) -> None: + pass + + +class UNIT(enum.Enum): + """ + Lists available units for measures. + + | Enumerator | | + |:---:|:---:| + | MILLIMETER | International System (1/1000 meters) | + | CENTIMETER | International System (1/100 meters) | + | METER | International System (1 meter) | + | INCH | Imperial Unit (1/12 feet) | + | FOOT | Imperial Unit (1 foot) | + """ + MILLIMETER = enum.auto() + CENTIMETER = enum.auto() + METER = enum.auto() + INCH = enum.auto() + FOOT = enum.auto() + LAST = enum.auto() + def __lt__(self, other) -> None: + pass + + def __le__(self, other) -> None: + pass + + def __gt__(self, other) -> None: + pass + + def __ge__(self, other) -> None: + pass + + +class COORDINATE_SYSTEM(enum.Enum): + """ + Lists available coordinates systems for positional tracking and 3D measures. + + | Enumerator | | + |:---:|:---:| + | IMAGE | Standard coordinates system in computer vision. Used in OpenCV: see `here `_. | + | LEFT_HANDED_Y_UP | Left-handed with Y up and Z forward. Used in Unity with DirectX. | + | RIGHT_HANDED_Y_UP | Right-handed with Y pointing up and Z backward. Used in OpenGL. | + | RIGHT_HANDED_Z_UP | Right-handed with Z pointing up and Y forward. Used in 3DSMax. | + | LEFT_HANDED_Z_UP | Left-handed with Z axis pointing up and X forward. Used in Unreal Engine. | + | RIGHT_HANDED_Z_UP_X_FWD | Right-handed with Z pointing up and X forward. Used in ROS (REP 103). | + """ + IMAGE = enum.auto() + LEFT_HANDED_Y_UP = enum.auto() + RIGHT_HANDED_Y_UP = enum.auto() + RIGHT_HANDED_Z_UP = enum.auto() + LEFT_HANDED_Z_UP = enum.auto() + RIGHT_HANDED_Z_UP_X_FWD = enum.auto() + LAST = enum.auto() + def __str__(self) -> None: + pass + + def __repr__(self) -> None: + pass + + def __lt__(self, other) -> None: + pass + + def __le__(self, other) -> None: + pass + + def __gt__(self, other) -> None: + pass + + def __ge__(self, other) -> None: + pass + + +class MEASURE(enum.Enum): + """ + Lists retrievable measures. + | Enumerator | | + |:---:|:---:| + | DISPARITY | Disparity map. Each pixel contains 1 float. Type: sl.MAT_TYPE.F32_C1 | + | DEPTH | Depth map in sl.UNIT defined in sl.InitParameters.coordinate_units. Each pixel contains 1 float. Type: sl.MAT_TYPE.F32_C1 | + | CONFIDENCE | Certainty/confidence of the depth map. Each pixel contains 1 float. Type: sl.MAT_TYPE.F32_C1 | + | XYZ | Point cloud. Each pixel contains 4 float (X, Y, Z, not used). Type: sl.MAT_TYPE.F32_C4 | + | XYZRGBA | Colored point cloud. Each pixel contains 4 float (X, Y, Z, color). The color should to be read as an unsigned char[4] representing the RGBA color. Type: sl.MAT_TYPE.F32_C4 | + | XYZBGRA | Colored point cloud. Each pixel contains 4 float (X, Y, Z, color). The color should to be read as an unsigned char[4] representing the BGRA color. Type: sl.MAT_TYPE.F32_C4 | + | XYZARGB | Colored point cloud. Each pixel contains 4 float (X, Y, Z, color). The color should to be read as an unsigned char[4] representing the ARGB color. Type: sl.MAT_TYPE.F32_C4 | + | XYZABGR | Colored point cloud. Each pixel contains 4 float (X, Y, Z, color). The color should to be read as an unsigned char[4] representing the ABGR color. Type: sl.MAT_TYPE.F32_C4 | + | NORMALS | Normal vectors map. Each pixel contains 4 float (X, Y, Z, 0). Type: sl.MAT_TYPE.F32_C4 | + | DISPARITY_RIGHT | Disparity map for right sensor. Each pixel contains 1 float. Type: sl.MAT_TYPE.F32_C1 | + | DEPTH_RIGHT | Depth map for right sensor. Each pixel contains 1 float. Type: sl.MAT_TYPE.F32_C1 | + | XYZ_RIGHT | Point cloud for right sensor. Each pixel contains 4 float (X, Y, Z, not used). Type: sl.MAT_TYPE.F32_C4 | + | XYZRGBA_RIGHT | Colored point cloud for right sensor. Each pixel contains 4 float (X, Y, Z, color). The color needs to be read as an unsigned char[4] representing the RGBA color. Type: sl.MAT_TYPE.F32_C4 | + | XYZBGRA_RIGHT | Colored point cloud for right sensor. Each pixel contains 4 float (X, Y, Z, color). The color needs to be read as an unsigned char[4] representing the BGRA color. Type: sl.MAT_TYPE.F32_C4 | + | XYZARGB_RIGHT | Colored point cloud for right sensor. Each pixel contains 4 float (X, Y, Z, color). The color needs to be read as an unsigned char[4] representing the ARGB color. Type: sl.MAT_TYPE.F32_C4 | + | XYZABGR_RIGHT | Colored point cloud for right sensor. Each pixel contains 4 float (X, Y, Z, color). The color needs to be read as an unsigned char[4] representing the ABGR color. Type: sl.MAT_TYPE.F32_C4 | + | NORMALS_RIGHT | Normal vectors map for right view. Each pixel contains 4 float (X, Y, Z, 0). Type: sl.MAT_TYPE.F32_C4 | + | DEPTH_U16_MM | Depth map in millimeter whatever the sl.UNIT defined in sl.InitParameters.coordinate_units. Invalid values are set to 0 and depth values are clamped at 65000. Each pixel contains 1 unsigned short. Type: sl.MAT_TYPE.U16_C1 | + | DEPTH_U16_MM_RIGHT | Depth map in millimeter for right sensor. Each pixel contains 1 unsigned short. Type: sl.MAT_TYPE.U16_C1 | + """ + DISPARITY = enum.auto() + DEPTH = enum.auto() + CONFIDENCE = enum.auto() + XYZ = enum.auto() + XYZRGBA = enum.auto() + XYZBGRA = enum.auto() + XYZARGB = enum.auto() + XYZABGR = enum.auto() + NORMALS = enum.auto() + DISPARITY_RIGHT = enum.auto() + DEPTH_RIGHT = enum.auto() + XYZ_RIGHT = enum.auto() + XYZRGBA_RIGHT = enum.auto() + XYZBGRA_RIGHT = enum.auto() + XYZARGB_RIGHT = enum.auto() + XYZABGR_RIGHT = enum.auto() + NORMALS_RIGHT = enum.auto() + DEPTH_U16_MM = enum.auto() + DEPTH_U16_MM_RIGHT = enum.auto() + LAST = enum.auto() + def __str__(self) -> None: + pass + + def __repr__(self) -> None: + pass + + def __lt__(self, other) -> None: + pass + + def __le__(self, other) -> None: + pass + + def __gt__(self, other) -> None: + pass + + def __ge__(self, other) -> None: + pass + + +class VIEW(enum.Enum): + """ + Lists available views. + + | Enumerator | | + |:---:|:---:| + | LEFT | Left BGRA image. Each pixel contains 4 unsigned char (B, G, R, A). Type: sl.MAT_TYPE.U8_C4 | + | RIGHT | Right BGRA image. Each pixel contains 4 unsigned char (B, G, R, A). Type: sl.MAT_TYPE.U8_C4 | + | LEFT_GRAY | Left gray image. Each pixel contains 1 unsigned char. Type: sl.MAT_TYPE.U8_C1| + | RIGHT_GRAY | Right gray image. Each pixel contains 1 unsigned char. Type: sl.MAT_TYPE.U8_C1| + | LEFT_UNRECTIFIED | Left BGRA unrectified image. Each pixel contains 4 unsigned char (B, G, R, A). Type: sl.MAT_TYPE.U8_C4 | + | RIGHT_UNRECTIFIED | Right BGRA unrectified image. Each pixel contains 4 unsigned char (B, G, R, A). Type: sl.MAT_TYPE.U8_C4 | + | LEFT_UNRECTIFIED_GRAY | Left gray unrectified image. Each pixel contains 1 unsigned char. Type: sl.MAT_TYPE.U8_C1 | + | RIGHT_UNRECTIFIED_GRAY | Right gray unrectified image. Each pixel contains 1 unsigned char. Type: sl.MAT_TYPE.U8_C1 | + | SIDE_BY_SIDE | Left and right image (the image width is therefore doubled). Each pixel contains 4 unsigned char (B, G, R, A). Type: sl.MAT_TYPE.U8_C4 | + | DEPTH | Color rendering of the depth. Each pixel contains 4 unsigned char (B, G, R, A). Type: sl.MAT_TYPE.U8_C4 Note: Use sl.MEASURE.DEPTH with sl.Camera.retrieve_measure() to get depth values. | + | CONFIDENCE | Color rendering of the depth confidence. Each pixel contains 4 unsigned char (B, G, R, A). Type: sl.MAT_TYPE.U8_C4 Note: Use sl.MEASURE.CONFIDENCE with sl.Camera.retrieve_measure() to get confidence values. | + | NORMALS | Color rendering of the normals. Each pixel contains 4 unsigned char (B, G, R, A). Type: sl.MAT_TYPE.U8_C4 Note: Use sl.MEASURE.NORMALS with sl.Camera.retrieve_measure() to get normal values. | + | DEPTH_RIGHT | Color rendering of the right depth mapped on right sensor. Each pixel contains 4 unsigned char (B, G, R, A). Type: sl.MAT_TYPE.U8_C4 Note: Use sl.MEASURE.DEPTH_RIGHT with sl.Camera.retrieve_measure() to get depth right values. | + | NORMALS_RIGHT | Color rendering of the normals mapped on right sensor. Each pixel contains 4 unsigned char (B, G, R, A). Type: sl.MAT_TYPE.U8_C4 Note: Use sl.MEASURE.NORMALS_RIGHT with sl.Camera.retrieve_measure() to get normal right values. | + """ + LEFT = enum.auto() + RIGHT = enum.auto() + LEFT_GRAY = enum.auto() + RIGHT_GRAY = enum.auto() + LEFT_UNRECTIFIED = enum.auto() + RIGHT_UNRECTIFIED = enum.auto() + LEFT_UNRECTIFIED_GRAY = enum.auto() + RIGHT_UNRECTIFIED_GRAY = enum.auto() + SIDE_BY_SIDE = enum.auto() + DEPTH = enum.auto() + CONFIDENCE = enum.auto() + NORMALS = enum.auto() + DEPTH_RIGHT = enum.auto() + NORMALS_RIGHT = enum.auto() + LEFT_BGRA = enum.auto() + LEFT_BGR = enum.auto() + RIGHT_BGRA = enum.auto() + RIGHT_BGR = enum.auto() + LEFT_UNRECTIFIED_BGRA = enum.auto() + LEFT_UNRECTIFIED_BGR = enum.auto() + RIGHT_UNRECTIFIED_BGRA = enum.auto() + RIGHT_UNRECTIFIED_BGR = enum.auto() + SIDE_BY_SIDE_BGRA = enum.auto() + SIDE_BY_SIDE_BGR = enum.auto() + SIDE_BY_SIDE_GRAY = enum.auto() + SIDE_BY_SIDE_UNRECTIFIED_BGRA = enum.auto() + SIDE_BY_SIDE_UNRECTIFIED_BGR = enum.auto() + SIDE_BY_SIDE_UNRECTIFIED_GRAY = enum.auto() + DEPTH_BGRA = enum.auto() + DEPTH_BGR = enum.auto() + DEPTH_GRAY = enum.auto() + CONFIDENCE_BGRA = enum.auto() + CONFIDENCE_BGR = enum.auto() + CONFIDENCE_GRAY = enum.auto() + NORMALS_BGRA = enum.auto() + NORMALS_BGR = enum.auto() + NORMALS_GRAY = enum.auto() + DEPTH_RIGHT_BGRA = enum.auto() + DEPTH_RIGHT_BGR = enum.auto() + DEPTH_RIGHT_GRAY = enum.auto() + NORMALS_RIGHT_BGRA = enum.auto() + NORMALS_RIGHT_BGR = enum.auto() + NORMALS_RIGHT_GRAY = enum.auto() + LAST = enum.auto() + def __str__(self) -> None: + pass + + def __repr__(self) -> None: + pass + + def __lt__(self, other) -> None: + pass + + def __le__(self, other) -> None: + pass + + def __gt__(self, other) -> None: + pass + + def __ge__(self, other) -> None: + pass + + +class POSITIONAL_TRACKING_STATE(enum.Enum): + """ + Lists the different states of positional tracking. + + | Enumerator | | + |:---:|:---:| + | SEARCHING | Warning: DEPRECATED: This state is no longer in use. | + | OK | The positional tracking is functioning normally. | + | OFF | The positional tracking is currently disabled. | + | FPS_TOO_LOW | The effective FPS is too low to provide accurate motion tracking results. Consider adjusting performance parameters (e.g., depth mode, camera resolution) to improve tracking quality.| + | SEARCHING_FLOOR_PLANE | The camera is currently searching for the floor plane to establish its position relative to it. The world reference frame will be set afterward. | + | UNAVAILABLE | The tracking module was unable to perform tracking from the previous frame to the current frame. | + """ + SEARCHING = enum.auto() + OK = enum.auto() + OFF = enum.auto() + FPS_TOO_LOW = enum.auto() + SEARCHING_FLOOR_PLANE = enum.auto() + UNAVAILABLE = enum.auto() + LAST = enum.auto() + def __str__(self) -> None: + pass + + def __repr__(self) -> None: + pass + + def __lt__(self, other) -> None: + pass + + def __le__(self, other) -> None: + pass + + def __gt__(self, other) -> None: + pass + + def __ge__(self, other) -> None: + pass + + +class ODOMETRY_STATUS(enum.Enum): + """ + Report the status of current odom tracking. + + | Enumerator | | + |:----------:|:---------------------------| + | OK | The positional tracking module successfully tracked from the previous frame to the current frame. | + | UNAVAILABLE | The positional tracking module cannot track the current frame. | + | INSUFFICIENT_FEATURES | The positional tracking failed to track the current frame because it could not find enought features. | + """ + OK = enum.auto() + UNAVAILABLE = enum.auto() + INSUFFICIENT_FEATURES = enum.auto() + LAST = enum.auto() + def __str__(self) -> None: + pass + + def __repr__(self) -> None: + pass + + def __lt__(self, other) -> None: + pass + + def __le__(self, other) -> None: + pass + + def __gt__(self, other) -> None: + pass + + def __ge__(self, other) -> None: + pass + + +class SPATIAL_MEMORY_STATUS(enum.Enum): + """ + Report the status of current map tracking. + + | Enumerator | | + |:-----------:|:---------------------------| + | OK | The positional tracking module is operating normally. | + | LOOP_CLOSED | The positional tracking module detected a loop and corrected its position. | + | SEARCHING | The positional tracking module is searching for recognizable areas in the global map to relocate. | + | INITIALIZING| Displayed until the cameras has acquired enough memory (Initial Area Mapping) or has found its first loop closure and is localized in the loaded area map (Lifelong Mapping/Localization). Users need to keep moving the camera for it to get updated. | + | MAP_UPDATE | Displayed when the robot is mapping (Initial Area Mapping) or when the robot is getting out of the area map bounds (Lifelong Mapping). Displayed as “Tracking” when in exploratory mode with SLAM engaged. | + | KNOWN_MAP | Displayed when the camera is localized within the loaded area map. | + | LOST | Displayed when localization cannot operate anymore (camera completely obstructed, sudden localization jumps after being localized) in Mapping/ Localization modes. It can also include the case where the camera jumps or is located out of map bounds in Localization mode. This should be an indicator for users to stop the robot. | + | OFF | Displayed when the spatial memory is turned off.| + """ + OK = enum.auto() + LOOP_CLOSED = enum.auto() + SEARCHING = enum.auto() + INITIALIZING = enum.auto() + MAP_UPDATE = enum.auto() + KNOWN_MAP = enum.auto() + LOST = enum.auto() + OFF = enum.auto() + LAST = enum.auto() + def __str__(self) -> None: + pass + + def __repr__(self) -> None: + pass + + def __lt__(self, other) -> None: + pass + + def __le__(self, other) -> None: + pass + + def __gt__(self, other) -> None: + pass + + def __ge__(self, other) -> None: + pass + + +class POSITIONAL_TRACKING_FUSION_STATUS(enum.Enum): + """ + Report the status of the positional tracking fusion. + + | Enumerator | | + |:----------:|:---------------------------| + | VISUAL_INERTIAL | The positional tracking module is fusing visual and inertial data. | + | VISUAL | The positional tracking module is fusing visual data only. | + | INERTIAL | The positional tracking module is fusing inertial data only. | + | GNSS | The positional tracking module is fusing GNSS data only. | + | VISUAL_INERTIAL_GNSS | The positional tracking module is fusing visual, inertial, and GNSS data. | + | VISUAL_GNSS | The positional tracking module is fusing visual and GNSS data. | + | INERTIAL_GNSS | The positional tracking module is fusing inertial and GNSS data. | + | UNAVAILABLE | The positional tracking module is unavailable. | + """ + VISUAL_INERTIAL = enum.auto() + VISUAL = enum.auto() + INERTIAL = enum.auto() + GNSS = enum.auto() + VISUAL_INERTIAL_GNSS = enum.auto() + VISUAL_GNSS = enum.auto() + INERTIAL_GNSS = enum.auto() + UNAVAILABLE = enum.auto() + LAST = enum.auto() + def __str__(self) -> None: + pass + + def __repr__(self) -> None: + pass + + def __lt__(self, other) -> None: + pass + + def __le__(self, other) -> None: + pass + + def __gt__(self, other) -> None: + pass + + def __ge__(self, other) -> None: + pass + + +class GNSS_STATUS(enum.Enum): + """ + Lists that represents the status of the of GNSS signal. + + | Enumerator | | + |:---:|:---:| + | UNKNOWN | No GNSS fix data is available. | + | SINGLE | Single Point Positioning. | + | DGNSS | Differential GNSS. | + | PPS | Precise Positioning Service. | + | RTK_FLOAT | Real Time Kinematic Float. | + | RTK_FIX | Real Time Kinematic Fixed. | + """ + UNKNOWN = enum.auto() + SINGLE = enum.auto() + DGNSS = enum.auto() + PPS = enum.auto() + RTK_FLOAT = enum.auto() + RTK_FIX = enum.auto() + LAST = enum.auto() + def __str__(self) -> None: + pass + + def __repr__(self) -> None: + pass + + def __lt__(self, other) -> None: + pass + + def __le__(self, other) -> None: + pass + + def __gt__(self, other) -> None: + pass + + def __ge__(self, other) -> None: + pass + + +class GNSS_MODE(enum.Enum): + """ + Lists that represents the mode of GNSS signal. + + | Enumerator | | + |:---:|:---:| + | UNKNOWN | No GNSS fix data is available. | + | NO_FIX | No GNSS fix is available. | + | FIX_2D | 2D GNSS fix, providing latitude and longitude coordinates but without altitude information. | + | FIX_3D | 3D GNSS fix, providing latitude, longitude, and altitude coordinates. | + """ + UNKNOWN = enum.auto() + NO_FIX = enum.auto() + FIX_2D = enum.auto() + FIX_3D = enum.auto() + LAST = enum.auto() + def __str__(self) -> None: + pass + + def __repr__(self) -> None: + pass + + def __lt__(self, other) -> None: + pass + + def __le__(self, other) -> None: + pass + + def __gt__(self, other) -> None: + pass + + def __ge__(self, other) -> None: + pass + + +class GNSS_FUSION_STATUS(enum.Enum): + """ + Lists that represents the current GNSS fusion status + + | Enumerator | | + |:---:|:---:| + | OK | The GNSS fusion module is calibrated and working successfully. | + | OFF | The GNSS fusion module is not enabled. | + | CALIBRATION_IN_PROGRESS | Calibration of the GNSS/VIO fusion module is in progress. | + | RECALIBRATION_IN_PROGRESS | Re-alignment of GNSS/VIO data is in progress, leading to potentially inaccurate global position. | + """ + OK = enum.auto() + OFF = enum.auto() + CALIBRATION_IN_PROGRESS = enum.auto() + RECALIBRATION_IN_PROGRESS = enum.auto() + LAST = enum.auto() + def __str__(self) -> None: + pass + + def __repr__(self) -> None: + pass + + def __lt__(self, other) -> None: + pass + + def __le__(self, other) -> None: + pass + + def __gt__(self, other) -> None: + pass + + def __ge__(self, other) -> None: + pass + + +class Landmark: + """ + Represents a 3d landmark. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def position(self) -> list[float]: + """ + The position of the landmark. + """ + return list[float]() + + @position.setter + def position(self, position: Any) -> None: + pass + + @property + def id(self) -> int: + """ + The ID of the landmark. + """ + return int() + + @id.setter + def id(self, id: Any) -> None: + pass + + +class Landmark2D: + """ + Represents the projection of a 3d landmark in the image. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @position.setter + def position(self, position: Any) -> None: + pass + + @property + def id(self) -> int: + """ + Unique identifier of the corresponding landmark. + """ + return int() + + @property + def dynamic_confidence(self) -> float: + """ + Confidence score indicating the likelihood that the landmark is associated with a dynamic object. + + The value ranges from 0 to 1, where a smaller value indicates greater confidence that the landmark + is owned by a dynamic object. + """ + return float() + + def position(self) -> np.array: + """ + The position of the landmark in the image. + """ + return np.array() + + +class PositionalTrackingStatus: + """ + Lists the different status of the positional tracking + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def odometry_status(self) -> ODOMETRY_STATUS: + """ + Represents the current state of Visual-Inertial Odometry (VIO) tracking between the previous frame and the current frame. + """ + return ODOMETRY_STATUS() + + @odometry_status.setter + def odometry_status(self, odometry_status: Any) -> None: + pass + + @property + def tracking_fusion_status(self) -> POSITIONAL_TRACKING_FUSION_STATUS: + """ + Represents the current state of the positional tracking fusion. + """ + return POSITIONAL_TRACKING_FUSION_STATUS() + + @tracking_fusion_status.setter + def tracking_fusion_status(self, tracking_fusion_status: Any) -> None: + pass + + @property + def spatial_memory_status(self) -> SPATIAL_MEMORY_STATUS: + """ + Represents the current state of camera tracking in the global map. + """ + return SPATIAL_MEMORY_STATUS() + + @spatial_memory_status.setter + def spatial_memory_status(self, spatial_memory_status: Any) -> None: + pass + + +class FusedPositionalTrackingStatus: + """ + Lists the different status of the positional tracking + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def odometry_status(self) -> ODOMETRY_STATUS: + return ODOMETRY_STATUS() + + @odometry_status.setter + def odometry_status(self, odometry_status: Any) -> None: + pass + + @property + def tracking_fusion_status(self) -> POSITIONAL_TRACKING_FUSION_STATUS: + return POSITIONAL_TRACKING_FUSION_STATUS() + + @tracking_fusion_status.setter + def tracking_fusion_status(self, tracking_fusion_status: Any) -> None: + pass + + @property + def gnss_mode(self) -> GNSS_MODE: + return GNSS_MODE() + + @gnss_mode.setter + def gnss_mode(self, gnss_mode: Any) -> None: + pass + + @property + def spatial_memory_status(self) -> SPATIAL_MEMORY_STATUS: + return SPATIAL_MEMORY_STATUS() + + @spatial_memory_status.setter + def spatial_memory_status(self, spatial_memory_status: Any) -> None: + pass + + @property + def gnss_status(self) -> GNSS_STATUS: + return GNSS_STATUS() + + @gnss_status.setter + def gnss_status(self, gnss_status: Any) -> None: + pass + + @property + def gnss_fusion_status(self) -> GNSS_FUSION_STATUS: + return GNSS_FUSION_STATUS() + + @gnss_fusion_status.setter + def gnss_fusion_status(self, gnss_fusion_status: Any) -> None: + pass + + +class POSITIONAL_TRACKING_MODE(enum.Enum): + """ + Lists the mode of positional tracking that can be used. + + | Enumerator | | + |:---:|:---:| + | GEN_1 | Default mode. Fast and stable mode. Requires depth computation. Less robust than GEN_3. | + | GEN_2 | Warning: DEPRECATED. | + | GEN_3 | Fast and accurate, in both exploratory mode and mapped environments. Note: Can be used even if depth_mode is set to DEPTH_MODE::NONE. | + """ + GEN_1 = enum.auto() + GEN_2 = enum.auto() + GEN_3 = enum.auto() + def __str__(self) -> None: + pass + + def __repr__(self) -> None: + pass + + def __lt__(self, other) -> None: + pass + + def __le__(self, other) -> None: + pass + + def __gt__(self, other) -> None: + pass + + def __ge__(self, other) -> None: + pass + + +class AREA_EXPORTING_STATE(enum.Enum): + """ + Lists the different states of spatial memory area export. + + | Enumerator | | + |:---:|:---:| + | SUCCESS | The spatial memory file has been successfully created. | + | RUNNING | The spatial memory is currently being written. | + | NOT_STARTED | The spatial memory file exportation has not been called. | + | FILE_EMPTY | The spatial memory contains no data, the file is empty. | + | FILE_ERROR | The spatial memory file has not been written because of a wrong file name. | + | SPATIAL_MEMORY_DISABLED | The spatial memory learning is disabled. No file can be created. | + """ + SUCCESS = enum.auto() + RUNNING = enum.auto() + NOT_STARTED = enum.auto() + FILE_EMPTY = enum.auto() + FILE_ERROR = enum.auto() + SPATIAL_MEMORY_DISABLED = enum.auto() + LAST = enum.auto() + def __str__(self) -> None: + pass + + def __repr__(self) -> None: + pass + + def __lt__(self, other) -> None: + pass + + def __le__(self, other) -> None: + pass + + def __gt__(self, other) -> None: + pass + + def __ge__(self, other) -> None: + pass + + +class REFERENCE_FRAME(enum.Enum): + """ + Lists possible types of position matrix used to store camera path and pose. + + | Enumerator | | + |:---:|:---:| + | WORLD | The transform of sl.Pose will contain the motion with reference to the world frame (previously called sl.PATH). | + | CAMERA | The transform of sl.Pose will contain the motion with reference to the previous camera frame (previously called sl.POSE). | + """ + WORLD = enum.auto() + CAMERA = enum.auto() + LAST = enum.auto() + def __str__(self) -> None: + pass + + def __repr__(self) -> None: + pass + + def __lt__(self, other) -> None: + pass + + def __le__(self, other) -> None: + pass + + def __gt__(self, other) -> None: + pass + + def __ge__(self, other) -> None: + pass + + +class TIME_REFERENCE(enum.Enum): + """ + Lists possible time references for timestamps or data. + + + | Enumerator | | + |:---:|:---:| + | IMAGE | The requested timestamp or data will be at the time of the frame extraction. | + | CURRENT | The requested timestamp or data will be at the time of the function call. | + """ + IMAGE = enum.auto() + CURRENT = enum.auto() + LAST = enum.auto() + def __str__(self) -> None: + pass + + def __repr__(self) -> None: + pass + + def __lt__(self, other) -> None: + pass + + def __le__(self, other) -> None: + pass + + def __gt__(self, other) -> None: + pass + + def __ge__(self, other) -> None: + pass + + +class SPATIAL_MAPPING_STATE(enum.Enum): + """ + Lists the different states of spatial mapping. + + | Enumerator | | + |:---:|:---:| + | INITIALIZING | The spatial mapping is initializing. | + | OK | The depth and tracking data were correctly integrated in the mapping algorithm. | + | NOT_ENOUGH_MEMORY | The maximum memory dedicated to the scanning has been reached. The mesh will no longer be updated. | + | NOT_ENABLED | sl.Camera.enable_spatial_mapping() wasn't called or the scanning was stopped and not relaunched. | + | FPS_TOO_LOW | The effective FPS is too low to give proper results for spatial mapping. Consider using performance parameters (sl.DEPTH_MODE.PERFORMANCE, sl.MAPPING_RESOLUTION.LOW, low camera resolution (RESOLUTION "sl.RESOLUTION.VGA/SVGA" or sl.RESOLUTION.HD720). | + """ + INITIALIZING = enum.auto() + OK = enum.auto() + NOT_ENOUGH_MEMORY = enum.auto() + NOT_ENABLED = enum.auto() + FPS_TOO_LOW = enum.auto() + LAST = enum.auto() + +class REGION_OF_INTEREST_AUTO_DETECTION_STATE(enum.Enum): + """ + Lists the different states of region of interest auto detection. + + | Enumerator | | + |:---:|:---:| + | RUNNING | The region of interest auto detection is initializing. | + | READY | The region of interest mask is ready, if auto_apply was enabled, the region of interest mask is being used | + | NOT_ENABLED | The region of interest auto detection is not enabled | + """ + RUNNING = enum.auto() + READY = enum.auto() + NOT_ENABLED = enum.auto() + LAST = enum.auto() + +class SVO_COMPRESSION_MODE(enum.Enum): + """ + Lists available compression modes for SVO recording. + .. note:: + LOSSLESS is an improvement of previous lossless compression (used in ZED Explorer), even if size may be bigger, compression time is much faster. + + + | Enumerator | | + |:---:|:---:| + | LOSSLESS | PNG/ZSTD (lossless) CPU based compression. Average size: 42% of RAW | + | H264 | H264 (AVCHD) GPU based compression. Average size: 1% of RAW Note: Requires a NVIDIA GPU. | + | H265 | H265 (HEVC) GPU based compression. Average size: 1% of RAW Note: Requires a NVIDIA GPU. | + | H264_LOSSLESS | H264 Lossless GPU/Hardware based compression. Average size: 25% of RAW Provides a SSIM/PSNR result (vs RAW) >= 99.9%. Note: Requires a NVIDIA GPU. | + | H265_LOSSLESS | H265 Lossless GPU/Hardware based compression. Average size: 25% of RAW Provides a SSIM/PSNR result (vs RAW) >= 99.9%. Note: Requires a NVIDIA GPU. | + """ + LOSSLESS = enum.auto() + H264 = enum.auto() + H265 = enum.auto() + H264_LOSSLESS = enum.auto() + H265_LOSSLESS = enum.auto() + LAST = enum.auto() + def __str__(self) -> None: + pass + + def __repr__(self) -> None: + pass + + def __lt__(self, other) -> None: + pass + + def __le__(self, other) -> None: + pass + + def __gt__(self, other) -> None: + pass + + def __ge__(self, other) -> None: + pass + + +class MEM(enum.Enum): + """ + Lists available memory type. + .. note:: + The ZED SDK Python wrapper does not support GPU data storage/access. + + + | Enumerator | | + |:---:|:---:| + | CPU | Data will be stored on the CPU (processor side). | + | GPU | Data will be stored on the GPU | + | BOTH | Data will be stored on both the CPU and GPU memory | + """ + CPU = enum.auto() + GPU = enum.auto() + BOTH = enum.auto() + +class COPY_TYPE(enum.Enum): + """ + Lists available copy operation on sl.Mat. + .. note:: + The ZED SDK Python wrapper does not support GPU data storage/access. + + + | Enumerator | | + |:---:|:---:| + | CPU_CPU | Copy data from CPU to CPU. | + | GPU_CPU | Copy data from GPU to CPU. | + | CPU_GPU | Copy data from CPU to GPU. | + | GPU_GPU | Copy data from GPU to GPU. | + """ + CPU_CPU = enum.auto() + GPU_CPU = enum.auto() + CPU_GPU = enum.auto() + GPU_GPU = enum.auto() + +class MAT_TYPE(enum.Enum): + """ + Lists available sl.Mat formats. + .. note:: + sl.Mat type depends on image or measure type. + + .. note:: + For the dependencies, see sl.VIEW and sl.MEASURE. + + + | Enumerator | | + |:---:|:---:| + | F32_C1 | 1-channel matrix of float | + | F32_C2 | 2-channel matrix of float | + | F32_C3 | 3-channel matrix of float | + | F32_C4 | 4-channel matrix of float | + | U8_C1 | 1-channel matrix of unsigned char | + | U8_C2 | 2-channel matrix of unsigned char | + | U8_C3 | 3-channel matrix of unsigned char | + | U8_C4 | 4-channel matrix of unsigned char | + | U16_C1 | 1-channel matrix of unsigned short | + | S8_C4 | 4-channel matrix of signed char | + """ + F32_C1 = enum.auto() + F32_C2 = enum.auto() + F32_C3 = enum.auto() + F32_C4 = enum.auto() + U8_C1 = enum.auto() + U8_C2 = enum.auto() + U8_C3 = enum.auto() + U8_C4 = enum.auto() + U16_C1 = enum.auto() + S8_C4 = enum.auto() + +class SENSOR_TYPE(enum.Enum): + """ + Lists available sensor types. + .. note:: + Sensors are not available on sl.MODEL.ZED. + + + | Enumerator | | + |:---:|:---:| + | ACCELEROMETER | Three-axis accelerometer sensor to measure the inertial accelerations. | + | GYROSCOPE | Three-axis gyroscope sensor to measure the angular velocities. | + | MAGNETOMETER | Three-axis magnetometer sensor to measure the orientation of the device with respect to the Earth's magnetic field. | + | BAROMETER | Barometer sensor to measure the atmospheric pressure. | + """ + ACCELEROMETER = enum.auto() + GYROSCOPE = enum.auto() + MAGNETOMETER = enum.auto() + BAROMETER = enum.auto() + +class SENSORS_UNIT(enum.Enum): + """ + Lists available measurement units of onboard sensors. + .. note:: + Sensors are not available on sl.MODEL.ZED. + + + | Enumerator | | + |:---:|:---:| + | M_SEC_2 | m/s² (acceleration) | + | DEG_SEC | deg/s (angular velocity) | + | U_T | μT (magnetic field) | + | HPA | hPa (atmospheric pressure) | + | CELSIUS | °C (temperature) | + | HERTZ | Hz (frequency) | + """ + M_SEC_2 = enum.auto() + DEG_SEC = enum.auto() + U_T = enum.auto() + HPA = enum.auto() + CELSIUS = enum.auto() + HERTZ = enum.auto() + +class MODULE(enum.Enum): + """ + Lists available module + + + | MODULE | Description | + |:---:|:---:| + | ALL | All modules | + | DEPTH | For the depth module (includes all 'measures' in retrieveMeasure) | + | POSITIONAL_TRACKING | For the positional tracking module | + | OBJECT_DETECTION | For the object detection module | + | BODY_TRACKING | For the body tracking module | + | SPATIAL_MAPPING | For the spatial mapping module | + """ + ALL = enum.auto() + DEPTH = enum.auto() + POSITIONAL_TRACKING = enum.auto() + OBJECT_DETECTION = enum.auto() + BODY_TRACKING = enum.auto() + SPATIAL_MAPPING = enum.auto() + LAST = enum.auto() + def __str__(self) -> None: + pass + + def __repr__(self) -> None: + pass + + def __lt__(self, other) -> None: + pass + + def __le__(self, other) -> None: + pass + + def __gt__(self, other) -> None: + pass + + def __ge__(self, other) -> None: + pass + + +class OBJECT_CLASS(enum.Enum): + """ + Lists available object classes. + + + | OBJECT_CLASS | Description | + |:---:|:---:| + | PERSON | For people detection | + | VEHICLE | For vehicle detection (cars, trucks, buses, motorcycles, etc.) | + | BAG | For bag detection (backpack, handbag, suitcase, etc.) | + | ANIMAL | For animal detection (cow, sheep, horse, dog, cat, bird, etc.) | + | ELECTRONICS | For electronic device detection (cellphone, laptop, etc.) | + | FRUIT_VEGETABLE | For fruit and vegetable detection (banana, apple, orange, carrot, etc.) | + | SPORT | For sport-related object detection (sport ball, etc.) | + """ + PERSON = enum.auto() + VEHICLE = enum.auto() + BAG = enum.auto() + ANIMAL = enum.auto() + ELECTRONICS = enum.auto() + FRUIT_VEGETABLE = enum.auto() + SPORT = enum.auto() + LAST = enum.auto() + def __str__(self) -> None: + pass + + def __repr__(self) -> None: + pass + + def __lt__(self, other) -> None: + pass + + def __le__(self, other) -> None: + pass + + def __gt__(self, other) -> None: + pass + + def __ge__(self, other) -> None: + pass + + +class OBJECT_SUBCLASS(enum.Enum): + """ + List available object subclasses. + + Given as hint, when using object tracking an object can change of sl.OBJECT_SUBCLASS while keeping the same sl.OBJECT_CLASS + (i.e.: frame n: MOTORBIKE, frame n+1: BICYCLE). + + | OBJECT_SUBCLASS | OBJECT_CLASS | + |:---:|:---:| + | PERSON | PERSON | + | PERSON_HEAD | PERSON | + | BICYCLE | VEHICLE | + | CAR | VEHICLE | + | MOTORBIKE | VEHICLE | + | BUS | VEHICLE | + | TRUCK | VEHICLE | + | BOAT | VEHICLE | + | BACKPACK | BAG | + | HANDBAG | BAG | + | SUITCASE | BAG | + | BIRD | ANIMAL | + | CAT | ANIMAL | + | DOG | ANIMAL | + | HORSE | ANIMAL | + | SHEEP | ANIMAL | + | COW | ANIMAL | + | CELLPHONE | ELECTRONICS | + | LAPTOP | ELECTRONICS | + | BANANA | FRUIT_VEGETABLE | + | APPLE | FRUIT_VEGETABLE | + | ORANGE | FRUIT_VEGETABLE | + | CARROT | FRUIT_VEGETABLE | + | SPORTSBALL | SPORT | + | MACHINERY | VEHICLE | + """ + PERSON = enum.auto() + PERSON_HEAD = enum.auto() + BICYCLE = enum.auto() + CAR = enum.auto() + MOTORBIKE = enum.auto() + BUS = enum.auto() + TRUCK = enum.auto() + BOAT = enum.auto() + BACKPACK = enum.auto() + HANDBAG = enum.auto() + SUITCASE = enum.auto() + BIRD = enum.auto() + CAT = enum.auto() + DOG = enum.auto() + HORSE = enum.auto() + SHEEP = enum.auto() + COW = enum.auto() + CELLPHONE = enum.auto() + LAPTOP = enum.auto() + BANANA = enum.auto() + APPLE = enum.auto() + ORANGE = enum.auto() + CARROT = enum.auto() + SPORTSBALL = enum.auto() + MACHINERY = enum.auto() + LAST = enum.auto() + def __str__(self) -> None: + pass + + def __repr__(self) -> None: + pass + + def __lt__(self, other) -> None: + pass + + def __le__(self, other) -> None: + pass + + def __gt__(self, other) -> None: + pass + + def __ge__(self, other) -> None: + pass + + +class OBJECT_TRACKING_STATE(enum.Enum): + """ + Lists the different states of object tracking. + + + | Enumerator | | + |:---:|:---:| + | OFF | The tracking is not yet initialized. The object id is not usable. | + | OK | The object is tracked. | + | SEARCHING | The object could not be detected in the image and is potentially occluded. The trajectory is estimated. | + | TERMINATE | This is the last searching state of the track. The track will be deleted in the next sl.Camera.retrieve_objects(). | + """ + OFF = enum.auto() + OK = enum.auto() + SEARCHING = enum.auto() + TERMINATE = enum.auto() + LAST = enum.auto() + def __str__(self) -> None: + pass + + def __repr__(self) -> None: + pass + + def __lt__(self, other) -> None: + pass + + def __le__(self, other) -> None: + pass + + def __gt__(self, other) -> None: + pass + + def __ge__(self, other) -> None: + pass + + +class FLIP_MODE(enum.Enum): + """ + Lists possible flip modes of the camera. + + + | Enumerator | | + |:---:|:---:| + | OFF | No flip applied. Default behavior. | + | ON | Images and camera sensors' data are flipped useful when your camera is mounted upside down. | + | AUTO | In LIVE mode, use the camera orientation (if an IMU is available) to set the flip mode. In SVO mode, read the state of this enum when recorded. | + """ + OFF = enum.auto() + ON = enum.auto() + AUTO = enum.auto() + def __str__(self) -> None: + pass + + def __repr__(self) -> None: + pass + + def __lt__(self, other) -> None: + pass + + def __le__(self, other) -> None: + pass + + def __gt__(self, other) -> None: + pass + + def __ge__(self, other) -> None: + pass + + +class OBJECT_ACTION_STATE(enum.Enum): + """ + Lists the different states of an object's actions. + + + | Enumerator | | + |:---:|:---:| + | IDLE | The object is staying static. | + | MOVING | The object is moving. | + """ + IDLE = enum.auto() + MOVING = enum.auto() + LAST = enum.auto() + def __str__(self) -> None: + pass + + def __repr__(self) -> None: + pass + + def __lt__(self, other) -> None: + pass + + def __le__(self, other) -> None: + pass + + def __gt__(self, other) -> None: + pass + + def __ge__(self, other) -> None: + pass + + +class ObjectData: + """ + Class containing data of a detected object such as its bounding_box, label, id and its 3D position. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @head_position.setter + def head_position(self, head_position: Any) -> None: + pass + + @property + def confidence(self) -> float: + """ + Detection confidence value of the object. + From 0 to 100, a low value means the object might not be localized perfectly or the label (sl.OBJECT_CLASS) is uncertain. + """ + return float() + + @confidence.setter + def confidence(self, confidence: Any) -> None: + pass + + @property + def unique_object_id(self) -> str: + """ + Unique id to help identify and track AI detections. + It can be either generated externally, or by using generate_unique_id() or left empty. + """ + return str() + + @unique_object_id.setter + def unique_object_id(self, unique_object_id: Any) -> None: + pass + + @position.setter + def position(self, position: Any) -> None: + pass + + @property + def tracking_state(self) -> OBJECT_TRACKING_STATE: + """ + Object tracking state. + """ + return OBJECT_TRACKING_STATE() + + @tracking_state.setter + def tracking_state(self, tracking_state: Any) -> None: + pass + + @velocity.setter + def velocity(self, velocity: Any) -> None: + pass + + @property + def action_state(self) -> OBJECT_ACTION_STATE: + """ + Object action state. + """ + return OBJECT_ACTION_STATE() + + @action_state.setter + def action_state(self, action_state: Any) -> None: + pass + + @property + def id(self) -> int: + """ + Object identification number. + It is used as a reference when tracking the object through the frames. + .. note:: + Only available if sl.ObjectDetectionParameters.enable_tracking is activated. + + .. note:: + Otherwise, it will be set to -1. + """ + return int() + + @id.setter + def id(self, id: Any) -> None: + pass + + @property + def sublabel(self) -> OBJECT_SUBCLASS: + """ + Object sub-class/sub-category to identify the object type. + """ + return OBJECT_SUBCLASS() + + @sublabel.setter + def sublabel(self, sublabel: Any) -> None: + pass + + @property + def mask(self) -> Mat: + """ + Mask defining which pixels which belong to the object (in bounding_box_2d and set to 255) and those of the background (set to 0). + .. warning:: The mask information is only available for tracked objects (sl.OBJECT_TRACKING_STATE.OK) that have a valid depth. + .. warning:: Otherwise, the mask will not be initialized (```mask.is_init() == False```). + """ + return Mat() + + @mask.setter + def mask(self, mask: Any) -> None: + pass + + @position_covariance.setter + def position_covariance(self, position_covariance: Any) -> None: + pass + + @property + def raw_label(self) -> int: + """ + Object raw label. + It is forwarded from sl.CustomBoxObjectData when using sl.OBJECT_DETECTION_MODEL.CUSTOM_BOX_OBJECTS. + """ + return int() + + @raw_label.setter + def raw_label(self, raw_label: Any) -> None: + pass + + @dimensions.setter + def dimensions(self, dimensions: Any) -> None: + pass + + @bounding_box_2d.setter + def bounding_box_2d(self, bounding_box_2d: Any) -> None: + pass + + @bounding_box.setter + def bounding_box(self, bounding_box: Any) -> None: + pass + + @property + def label(self) -> OBJECT_CLASS: + """ + Object class/category to identify the object type. + """ + return OBJECT_CLASS() + + @label.setter + def label(self, label: Any) -> None: + pass + + def position(self) -> np.array[float]: + """ + Object 3D centroid. + .. note:: + It is defined in sl.InitParameters.coordinate_units and expressed in sl.RuntimeParameters.measure3D_reference_frame. + """ + return np.array[float]() + + def velocity(self) -> np.array[float]: + """ + Object 3D velocity. + .. note:: + It is defined in ```sl.InitParameters.coordinate_units / s``` and expressed in sl.RuntimeParameters.measure3D_reference_frame. + """ + return np.array[float]() + + def bounding_box(self) -> np.array[float][float]: + """ + 3D bounding box of the object represented as eight 3D points. + .. note:: + It is defined in sl.InitParameters.coordinate_units and expressed in sl.RuntimeParameters.measure3D_reference_frame. + + .. code-block:: text + + 1 ------ 2 + / /| + 0 ------ 3 | + | Object | 6 + | |/ + 4 ------ 7 + """ + return np.array[float][float]() + + def bounding_box_2d(self) -> np.array[int][int]: + """ + 2D bounding box of the object represented as four 2D points starting at the top left corner and rotation clockwise. + .. note:: + Expressed in pixels on the original image resolution, ```[0, 0]``` is the top left corner. + + .. code-block:: text + + A ------ B + | Object | + D ------ C + """ + return np.array[int][int]() + + def dimensions(self) -> np.array[float]: + """ + 3D object dimensions: width, height, length. + .. note:: + It is defined in sl.InitParameters.coordinate_units and expressed in sl.RuntimeParameters.measure3D_reference_frame. + """ + return np.array[float]() + + def head_bounding_box(self) -> np.array[float][float]: + """ + 3D bounding box of the head of the object (a person) represented as eight 3D points. + .. note:: + It is defined in sl.InitParameters.coordinate_units and expressed in sl.RuntimeParameters.measure3D_reference_frame. + + .. warning:: Not available with sl.OBJECT_DETECTION_MODEL.MULTI_CLASS_BOX_XXX. + """ + return np.array[float][float]() + + def head_bounding_box_2d(self) -> np.array[int][int]: + """ + 2D bounding box of the head of the object (a person) represented as four 2D points starting at the top left corner and rotation clockwise. + .. note:: + Expressed in pixels on the original image resolution, ```[0, 0]``` is the top left corner. + + .. warning:: Not available with sl.OBJECT_DETECTION_MODEL.MULTI_CLASS_BOX_XXX. + """ + return np.array[int][int]() + + def head_position(self) -> np.array[float]: + """ + 3D centroid of the head of the object (a person). + .. note:: + It is defined in sl.InitParameters.coordinate_units and expressed in sl.RuntimeParameters.measure3D_reference_frame. + + .. warning:: Not available with sl.OBJECT_DETECTION_MODEL.MULTI_CLASS_BOX_XXX. + """ + return np.array[float]() + + def position_covariance(self) -> np.array[float]: + """ + Covariance matrix of the 3D position. + .. note:: + It is represented by its upper triangular matrix value + + .. code-block:: text + + = [p0, p1, p2] + [p1, p3, p4] + [p2, p4, p5] + + where pi is ```position_covariance[i]``` + """ + return np.array[float]() + + +class BodyData: + """ + Class containing data of a detected body/person such as its bounding_box, id and its 3D position. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @head_position.setter + def head_position(self, head_position: Any) -> None: + pass + + @property + def id(self) -> int: + """ + Body/person identification number. + It is used as a reference when tracking the body through the frames. + .. note:: + Only available if sl.BodyTrackingParameters.enable_tracking is activated. + + .. note:: + Otherwise, it will be set to -1. + """ + return int() + + @id.setter + def id(self, id: Any) -> None: + pass + + @property + def confidence(self) -> float: + """ + Detection confidence value of the body/person. + From 0 to 100, a low value means the body might not be localized perfectly. + """ + return float() + + @confidence.setter + def confidence(self, confidence: Any) -> None: + pass + + @position_covariance.setter + def position_covariance(self, position_covariance: Any) -> None: + pass + + @property + def mask(self) -> Mat: + """ + Mask defining which pixels which belong to the body/person (in bounding_box_2d and set to 255) and those of the background (set to 0). + .. warning:: The mask information is only available for tracked bodies (sl.OBJECT_TRACKING_STATE.OK) that have a valid depth. + .. warning:: Otherwise, the mask will not be initialized (```mask.is_init() == False```). + """ + return Mat() + + @mask.setter + def mask(self, mask: Any) -> None: + pass + + @property + def unique_object_id(self) -> str: + """ + Unique id to help identify and track AI detections. + It can be either generated externally, or by using generate_unique_id() or left empty. + """ + return str() + + @unique_object_id.setter + def unique_object_id(self, unique_object_id: Any) -> None: + pass + + @position.setter + def position(self, position: Any) -> None: + pass + + @property + def tracking_state(self) -> OBJECT_TRACKING_STATE: + """ + Body/person tracking state. + """ + return OBJECT_TRACKING_STATE() + + @tracking_state.setter + def tracking_state(self, tracking_state: Any) -> None: + pass + + @velocity.setter + def velocity(self, velocity: Any) -> None: + pass + + @keypoints_covariance.setter + def keypoints_covariance(self, keypoints_covariance: Any) -> None: + pass + + @bounding_box_2d.setter + def bounding_box_2d(self, bounding_box_2d: Any) -> None: + pass + + @dimensions.setter + def dimensions(self, dimensions: Any) -> None: + pass + + @property + def action_state(self) -> OBJECT_ACTION_STATE: + """ + Body/person action state. + """ + return OBJECT_ACTION_STATE() + + @action_state.setter + def action_state(self, action_state: Any) -> None: + pass + + @bounding_box.setter + def bounding_box(self, bounding_box: Any) -> None: + pass + + def position(self) -> np.array[float]: + """ + Body/person 3D centroid. + .. note:: + It is defined in sl.InitParameters.coordinate_units and expressed in sl.RuntimeParameters.measure3D_reference_frame. + """ + return np.array[float]() + + def velocity(self) -> np.array[float]: + """ + Body/person 3D velocity. + .. note:: + It is defined in ```sl.InitParameters.coordinate_units / s``` and expressed in sl.RuntimeParameters.measure3D_reference_frame. + """ + return np.array[float]() + + def bounding_box(self) -> np.array[float][float]: + """ + 3D bounding box of the body/person represented as eight 3D points. + .. note:: + It is defined in sl.InitParameters.coordinate_units and expressed in sl.RuntimeParameters.measure3D_reference_frame. + + .. code-block:: text + + 1 ------ 2 + / /| + 0 ------ 3 | + | Object | 6 + | |/ + 4 ------ 7 + """ + return np.array[float][float]() + + def bounding_box_2d(self) -> np.array[int][int]: + """ + 2D bounding box of the body/person represented as four 2D points starting at the top left corner and rotation clockwise. + .. note:: + Expressed in pixels on the original image resolution, ```[0, 0]``` is the top left corner. + + .. code-block:: text + + A ------ B + | Object | + D ------ C + """ + return np.array[int][int]() + + def keypoints_covariance(self) -> np.array[float][float]: + """ + NumPy array of detection covariance for each keypoint. + .. warning:: In some cases, eg. body partially out of the image or missing depth data, some keypoints can not be detected. Their covariances will be 0. + """ + return np.array[float][float]() + + def position_covariance(self) -> np.array[float]: + """ + Covariance matrix of the 3D position. + .. note:: + It is represented by its upper triangular matrix value + + .. code-block:: text + + = [p0, p1, p2] + [p1, p3, p4] + [p2, p4, p5] + + where pi is ```position_covariance[i]``` + """ + return np.array[float]() + + def dimensions(self) -> np.array[float]: + """ + 3D body/person dimensions: width, height, length. + .. note:: + It is defined in sl.InitParameters.coordinate_units and expressed in sl.RuntimeParameters.measure3D_reference_frame. + """ + return np.array[float]() + + def keypoint(self) -> np.array[float][float]: + """ + Set of useful points representing the human body in 3D. + .. note:: + They are defined in sl.InitParameters.coordinate_units and expressed in sl.RuntimeParameters.measure3D_reference_frame. + + .. warning:: In some cases, eg. body partially out of the image or missing depth data, some keypoints can not be detected. They will have non finite values. + """ + return np.array[float][float]() + + def keypoint_2d(self) -> np.array[int][int]: + """ + Set of useful points representing the human body in 2D. + .. note:: + Expressed in pixels on the original image resolution, ```[0, 0]``` is the top left corner. + + .. warning:: In some cases, eg. body partially out of the image, some keypoints can not be detected. They will have negatives coordinates. + """ + return np.array[int][int]() + + def head_bounding_box(self) -> np.array[float][float]: + """ + 3D bounding box of the head of the body/person represented as eight 3D points. + .. note:: + It is defined in sl.InitParameters.coordinate_units and expressed in sl.RuntimeParameters.measure3D_reference_frame. + """ + return np.array[float][float]() + + def head_bounding_box_2d(self) -> np.array[int][int]: + """ + 2D bounding box of the head of the body/person represented as four 2D points starting at the top left corner and rotation clockwise. + .. note:: + Expressed in pixels on the original image resolution, ```[0, 0]``` is the top left corner. + """ + return np.array[int][int]() + + def head_position(self) -> np.array[float]: + """ + 3D centroid of the head of the body/person. + .. note:: + It is defined in sl.InitParameters.coordinate_units and expressed in sl.RuntimeParameters.measure3D_reference_frame. + """ + return np.array[float]() + + def keypoint_confidence(self) -> np.array[float]: + """ + NumPy array of detection confidences for each keypoint. + .. note:: + They can not be lower than the sl.BodyTrackingRuntimeParameters.detection_confidence_threshold. + + .. warning:: In some cases, eg. body partially out of the image or missing depth data, some keypoints can not be detected. They will have non finite values. + """ + return np.array[float]() + + def local_position_per_joint(self) -> np.array[float][float]: + """ + NumPy array of local position (position of the child keypoint with respect to its parent expressed in its parent coordinate frame) for each keypoint. + .. note:: + They are expressed in sl.REFERENCE_FRAME.CAMERA or sl.REFERENCE_FRAME.WORLD. + + .. warning:: Not available with sl.BODY_FORMAT.BODY_18. + """ + return np.array[float][float]() + + def local_orientation_per_joint(self) -> np.array[float][float]: + """ + NumPy array of local orientation for each keypoint. + .. note:: + The orientation is represented by a quaternion. + + .. warning:: Not available with sl.BODY_FORMAT.BODY_18. + """ + return np.array[float][float]() + + def global_root_orientation(self) -> np.array[float]: + """ + Global root orientation of the skeleton (NumPy array). + The orientation is also represented by a quaternion. + .. note:: + The global root position is already accessible in keypoint attribute by using the root index of a given sl.BODY_FORMAT. + + .. warning:: Not available with sl.BODY_FORMAT.BODY_18. + """ + return np.array[float]() + + +def generate_unique_id() -> None: + """ + Generate a UUID like unique id to help identify and track AI detections. + """ + pass + +class CustomBoxObjectData: + """ + Class that store externally detected objects. + + The objects can be ingested with sl.Camera.ingest_custom_box_objects() to extract 3D and tracking information over time. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def tracking_max_dist(self) -> float: + """ + Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. + By default, do not discard tracked object based on distance. + Only valid for static object. + """ + return float() + + @tracking_max_dist.setter + def tracking_max_dist(self, tracking_max_dist: Any) -> None: + pass + + @property + def is_grounded(self) -> bool: + """ + Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking. + - true: 2 DoF projected alongside the floor plane. Case for object standing on the ground such as person, vehicle, etc. + \n The projection implies that the objects cannot be superposed on multiple horizontal levels. + - false: 6 DoF (full 3D movements are allowed). + + .. note:: + This parameter cannot be changed for a given object tracking id. + + .. note:: + It is advised to set it by labels to avoid issues. + """ + return bool() + + @is_grounded.setter + def is_grounded(self, is_grounded: Any) -> None: + pass + + @property + def tracking_timeout(self) -> float: + """ + Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time. + By default, let the tracker decide internally based on the internal sub class of the tracked object. + """ + return float() + + @tracking_timeout.setter + def tracking_timeout(self, tracking_timeout: Any) -> None: + pass + + @property + def unique_object_id(self) -> str: + """ + Unique id to help identify and track AI detections. + It can be either generated externally, or by using generate_unique_id() or left empty. + """ + return str() + + @unique_object_id.setter + def unique_object_id(self, unique_object_id: Any) -> None: + pass + + @property + def probability(self) -> float: + """ + Detection confidence value of the object. + .. note:: + The value should be in ```[0-1]```. + + .. note:: + It can be used to improve the object tracking. + """ + return float() + + @probability.setter + def probability(self, probability: Any) -> None: + pass + + @property + def is_static(self) -> bool: + """ + Provide hypothesis about the object staticity to improve the object tracking. + - true: the object will be assumed to never move nor being moved. + - false: the object will be assumed to be able to move or being moved. + """ + return bool() + + @is_static.setter + def is_static(self, is_static: Any) -> None: + pass + + @bounding_box_2d.setter + def bounding_box_2d(self, bounding_box_2d: Any) -> None: + pass + + @property + def label(self) -> int: + """ + Object label. + This information is passed-through and can be used to improve object tracking. + .. note:: + It should define an object class. This means that any similar object (in classification) should share the same label number. + """ + return int() + + @label.setter + def label(self, label: Any) -> None: + pass + + def bounding_box_2d(self) -> np.array[int][int]: + """ + 2D bounding box of the object represented as four 2D points starting at the top left corner and rotation clockwise. + .. note:: + Expressed in pixels on the original image resolution, ```[0, 0]``` is the top left corner. + + .. code-block:: text + + A ------ B + | Object | + D ------ C + """ + return np.array[int][int]() + + +class CustomMaskObjectData: + """ + Class storing externally detected objects. + + The objects can be ingested with sl.Camera.ingest_custom_mask_objects() to extract 3D and tracking information over time. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def tracking_max_dist(self) -> float: + """ + Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. + By default, do not discard tracked object based on distance. + Only valid for static object. + """ + return float() + + @tracking_max_dist.setter + def tracking_max_dist(self, tracking_max_dist: Any) -> None: + pass + + @property + def is_grounded(self) -> bool: + """ + Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking. + - true: 2 DoF projected alongside the floor plane. Case for object standing on the ground such as person, vehicle, etc. + \n The projection implies that the objects cannot be superposed on multiple horizontal levels. + - false: 6 DoF (full 3D movements are allowed). + + .. note:: + This parameter cannot be changed for a given object tracking id. + + .. note:: + It is advised to set it by labels to avoid issues. + """ + return bool() + + @is_grounded.setter + def is_grounded(self, is_grounded: Any) -> None: + pass + + @property + def tracking_timeout(self) -> float: + """ + Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time. + By default, let the tracker decide internally based on the internal sub class of the tracked object. + """ + return float() + + @tracking_timeout.setter + def tracking_timeout(self, tracking_timeout: Any) -> None: + pass + + @property + def unique_object_id(self) -> str: + """ + Unique id to help identify and track AI detections. + It can be either generated externally, or by using generate_unique_id() or left empty. + """ + return str() + + @unique_object_id.setter + def unique_object_id(self, unique_object_id: Any) -> None: + pass + + @property + def probability(self) -> float: + """ + Detection confidence value of the object. + .. note:: + The value should be in ```[0-1]```. + + .. note:: + It can be used to improve the object tracking. + """ + return float() + + @probability.setter + def probability(self, probability: Any) -> None: + pass + + @property + def is_static(self) -> bool: + """ + Provide hypothesis about the object staticity to improve the object tracking. + - true: the object will be assumed to never move nor being moved. + - false: the object will be assumed to be able to move or being moved. + """ + return bool() + + @is_static.setter + def is_static(self, is_static: Any) -> None: + pass + + @bounding_box_2d.setter + def bounding_box_2d(self, bounding_box_2d: Any) -> None: + pass + + @property + def label(self) -> int: + """ + Object label. + This information is passed-through and can be used to improve object tracking. + .. note:: + It should define an object class. This means that any similar object (in classification) should share the same label number. + """ + return int() + + @label.setter + def label(self, label: Any) -> None: + pass + + @property + def box_mask(self) -> Mat: + """ + Mask defining which pixels which belong to the object (in bounding_box_2d and set to 255) and those of the background (set to 0). + """ + return Mat() + + @box_mask.setter + def box_mask(self, box_mask: Any) -> None: + pass + + def bounding_box_2d(self) -> np.array[int][int]: + """ + 2D bounding box of the object represented as four 2D points starting at the top left corner and rotation clockwise. + .. note:: + Expressed in pixels on the original image resolution, ```[0, 0]``` is the top left corner. + + .. code-block:: text + + A ------ B + | Object | + D ------ C + """ + return np.array[int][int]() + + +class BODY_18_PARTS(enum.Enum): + """ + Semantic of human body parts and order of sl.BodyData.keypoint for sl.BODY_FORMAT.BODY_18. + + | BODY_18_PARTS | Keypoint number | + |:---:|:---:| + | NOSE | 0 | + | NECK | 1 | + | RIGHT_SHOULDER | 2 | + | RIGHT_ELBOW | 3 | + | RIGHT_WRIST | 4 | + | LEFT_SHOULDER | 5 | + | LEFT_ELBOW | 6 | + | LEFT_WRIST | 7 | + | RIGHT_HIP | 8 | + | RIGHT_KNEE | 9 | + | RIGHT_ANKLE | 10 | + | LEFT_HIP | 11 | + | LEFT_KNEE | 12 | + | LEFT_ANKLE | 13 | + | RIGHT_EYE | 14 | + | LEFT_EYE | 15 | + | RIGHT_EAR | 16 | + | LEFT_EAR | 17 | + """ + NOSE = enum.auto() + NECK = enum.auto() + RIGHT_SHOULDER = enum.auto() + RIGHT_ELBOW = enum.auto() + RIGHT_WRIST = enum.auto() + LEFT_SHOULDER = enum.auto() + LEFT_ELBOW = enum.auto() + LEFT_WRIST = enum.auto() + RIGHT_HIP = enum.auto() + RIGHT_KNEE = enum.auto() + RIGHT_ANKLE = enum.auto() + LEFT_HIP = enum.auto() + LEFT_KNEE = enum.auto() + LEFT_ANKLE = enum.auto() + RIGHT_EYE = enum.auto() + LEFT_EYE = enum.auto() + RIGHT_EAR = enum.auto() + LEFT_EAR = enum.auto() + LAST = enum.auto() + +class BODY_34_PARTS(enum.Enum): + """ + Semantic of human body parts and order of sl.BodyData.keypoint for sl.BODY_FORMAT.BODY_34. + + | BODY_34_PARTS | Keypoint number | + |:---:|:---:| + | PELVIS | 0 | + | NAVAL_SPINE | 1 | + | CHEST_SPINE | 2 | + | NECK | 3 | + | LEFT_CLAVICLE | 4 | + | LEFT_SHOULDER | 5 | + | LEFT_ELBOW | 6 | + | LEFT_WRIST | 7 | + | LEFT_HAND | 8 | + | LEFT_HANDTIP | 9 | + | LEFT_THUMB | 10 | + | RIGHT_CLAVICLE | 11 | + | RIGHT_SHOULDER | 12 | + | RIGHT_ELBOW | 13 | + | RIGHT_WRIST | 14 | + | RIGHT_HAND | 15 | + | RIGHT_HANDTIP | 16 | + | RIGHT_THUMB | 17 | + | LEFT_HIP | 18 | + | LEFT_KNEE | 19 | + | LEFT_ANKLE | 20 | + | LEFT_FOOT | 21 | + | RIGHT_HIP | 22 | + | RIGHT_KNEE | 23 | + | RIGHT_ANKLE | 24 | + | RIGHT_FOOT | 25 | + | HEAD | 26 | + | NOSE | 27 | + | LEFT_EYE | 28 | + | LEFT_EAR | 29 | + | RIGHT_EYE | 30 | + | RIGHT_EAR | 31 | + | LEFT_HEEL | 32 | + | RIGHT_HEEL | 33 | + """ + PELVIS = enum.auto() + NAVAL_SPINE = enum.auto() + CHEST_SPINE = enum.auto() + NECK = enum.auto() + LEFT_CLAVICLE = enum.auto() + LEFT_SHOULDER = enum.auto() + LEFT_ELBOW = enum.auto() + LEFT_WRIST = enum.auto() + LEFT_HAND = enum.auto() + LEFT_HANDTIP = enum.auto() + LEFT_THUMB = enum.auto() + RIGHT_CLAVICLE = enum.auto() + RIGHT_SHOULDER = enum.auto() + RIGHT_ELBOW = enum.auto() + RIGHT_WRIST = enum.auto() + RIGHT_HAND = enum.auto() + RIGHT_HANDTIP = enum.auto() + RIGHT_THUMB = enum.auto() + LEFT_HIP = enum.auto() + LEFT_KNEE = enum.auto() + LEFT_ANKLE = enum.auto() + LEFT_FOOT = enum.auto() + RIGHT_HIP = enum.auto() + RIGHT_KNEE = enum.auto() + RIGHT_ANKLE = enum.auto() + RIGHT_FOOT = enum.auto() + HEAD = enum.auto() + NOSE = enum.auto() + LEFT_EYE = enum.auto() + LEFT_EAR = enum.auto() + RIGHT_EYE = enum.auto() + RIGHT_EAR = enum.auto() + LEFT_HEEL = enum.auto() + RIGHT_HEEL = enum.auto() + LAST = enum.auto() + +class BODY_38_PARTS(enum.Enum): + """ + Semantic of human body parts and order of sl.BodyData.keypoint for sl.BODY_FORMAT.BODY_38. + + | BODY_38_PARTS | Keypoint number | + |:---:|:---:| + | PELVIS | 0 | + | SPINE_1 | 1 | + | SPINE_2 | 2 | + | SPINE_3 | 3 | + | NECK | 4 | + | NOSE | 5 | + | LEFT_EYE | 6 | + | RIGHT_EYE | 7 | + | LEFT_EAR | 8 | + | RIGHT_EAR | 9 | + | LEFT_CLAVICLE | 10 | + | RIGHT_CLAVICLE | 11 | + | LEFT_SHOULDER | 12 | + | RIGHT_SHOULDER | 13 | + | LEFT_ELBOW | 14 | + | RIGHT_ELBOW | 15 | + | LEFT_WRIST | 16 | + | RIGHT_WRIST | 17 | + | LEFT_HIP | 18 | + | RIGHT_HIP | 19 | + | LEFT_KNEE | 20 | + | RIGHT_KNEE | 21 | + | LEFT_ANKLE | 22 | + | RIGHT_ANKLE | 23 | + | LEFT_BIG_TOE | 24 | + | RIGHT_BIG_TOE | 25 | + | LEFT_SMALL_TOE | 26 | + | RIGHT_SMALL_TOE | 27 | + | LEFT_HEEL | 28 | + | RIGHT_HEEL | 29 | + | LEFT_HAND_THUMB_4 | 30 | + | RIGHT_HAND_THUMB_4 | 31 | + | LEFT_HAND_INDEX_1 | 32 | + | RIGHT_HAND_INDEX_1 | 33 | + | LEFT_HAND_MIDDLE_4 | 34 | + | RIGHT_HAND_MIDDLE_4 | 35 | + | LEFT_HAND_PINKY_1 | 36 | + | RIGHT_HAND_PINKY_1 | 37 | + """ + PELVIS = enum.auto() + SPINE_1 = enum.auto() + SPINE_2 = enum.auto() + SPINE_3 = enum.auto() + NECK = enum.auto() + NOSE = enum.auto() + LEFT_EYE = enum.auto() + RIGHT_EYE = enum.auto() + LEFT_EAR = enum.auto() + RIGHT_EAR = enum.auto() + LEFT_CLAVICLE = enum.auto() + RIGHT_CLAVICLE = enum.auto() + LEFT_SHOULDER = enum.auto() + RIGHT_SHOULDER = enum.auto() + LEFT_ELBOW = enum.auto() + RIGHT_ELBOW = enum.auto() + LEFT_WRIST = enum.auto() + RIGHT_WRIST = enum.auto() + LEFT_HIP = enum.auto() + RIGHT_HIP = enum.auto() + LEFT_KNEE = enum.auto() + RIGHT_KNEE = enum.auto() + LEFT_ANKLE = enum.auto() + RIGHT_ANKLE = enum.auto() + LEFT_BIG_TOE = enum.auto() + RIGHT_BIG_TOE = enum.auto() + LEFT_SMALL_TOE = enum.auto() + RIGHT_SMALL_TOE = enum.auto() + LEFT_HEEL = enum.auto() + RIGHT_HEEL = enum.auto() + LEFT_HAND_THUMB_4 = enum.auto() + RIGHT_HAND_THUMB_4 = enum.auto() + LEFT_HAND_INDEX_1 = enum.auto() + RIGHT_HAND_INDEX_1 = enum.auto() + LEFT_HAND_MIDDLE_4 = enum.auto() + RIGHT_HAND_MIDDLE_4 = enum.auto() + LEFT_HAND_PINKY_1 = enum.auto() + RIGHT_HAND_PINKY_1 = enum.auto() + LAST = enum.auto() + +class INFERENCE_PRECISION(enum.Enum): + """ + Report the actual inference precision used + + | Enumerator | | + |:---:|:---:| + | FP32 | | + | FP16 | | + | INT8 | | + """ + FP32 = enum.auto() + FP16 = enum.auto() + INT8 = enum.auto() + LAST = enum.auto() + +class BODY_FORMAT(enum.Enum): + """ + Lists supported skeleton body models. + + | Enumerator | | + |:---:|:---:| + | BODY_18 | 18-keypoint model Basic body model | + | BODY_34 | 34-keypoint model Note: Requires body fitting enabled. | + | BODY_38 | 38-keypoint model Including simplified face, hands and feet.Note: Early Access | + """ + BODY_18 = enum.auto() + BODY_34 = enum.auto() + BODY_38 = enum.auto() + LAST = enum.auto() + +class BODY_KEYPOINTS_SELECTION(enum.Enum): + """ + Lists supported models for skeleton keypoints selection. + + | Enumerator | | + |:---:|:---:| + | FULL | Full keypoint model | + | UPPER_BODY | Upper body keypoint model Will output only upper body (from hip). | + """ + FULL = enum.auto() + UPPER_BODY = enum.auto() + LAST = enum.auto() + +def get_idx(part: BODY_18_PARTS) -> int: + """ + Return associated index of each sl.BODY_18_PARTS. + """ + return int() + +def get_idx_34(part: BODY_34_PARTS) -> int: + """ + Return associated index of each sl.BODY_34_PARTS. + """ + return int() + +def get_idx_38(part: BODY_38_PARTS) -> int: + """ + Return associated index of each sl.BODY_38_PARTS. + """ + return int() + +class ObjectsBatch: + """ + Class containing batched data of a detected objects from the object detection module. + + This class can be used to store trajectories. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def id(self) -> int: + """ + Id of the batch. + """ + return int() + + @id.setter + def id(self, id: Any) -> None: + pass + + @property + def sublabel(self) -> OBJECT_SUBCLASS: + """ + Objects sub-class/sub-category to identify the object type. + """ + return OBJECT_SUBCLASS() + + @sublabel.setter + def sublabel(self, sublabel: Any) -> None: + pass + + @property + def tracking_state(self) -> OBJECT_TRACKING_STATE: + """ + Objects tracking state. + """ + return OBJECT_TRACKING_STATE() + + @tracking_state.setter + def tracking_state(self, tracking_state: Any) -> None: + pass + + @property + def timestamps(self) -> list[Timestamp]: + """ + List of timestamps for each object. + """ + return list[Timestamp]() + + @property + def label(self) -> OBJECT_CLASS: + """ + Objects class/category to identify the object type. + """ + return OBJECT_CLASS() + + @label.setter + def label(self, label: Any) -> None: + pass + + @property + def action_states(self) -> list[OBJECT_ACTION_STATE]: + """ + List of action states for each object. + """ + return list[OBJECT_ACTION_STATE]() + + def positions(self) -> np.array[float][float]: + """ + NumPy array of positions for each object. + """ + return np.array[float][float]() + + def position_covariances(self) -> np.array[float][float]: + """ + NumPy array of positions' covariances for each object. + """ + return np.array[float][float]() + + def velocities(self) -> np.array[float][float]: + """ + NumPy array of 3D velocities for each object. + """ + return np.array[float][float]() + + def bounding_boxes(self) -> np.array[float][float][float]: + """ + NumPy array of 3D bounding boxes for each object. + .. note:: + They are defined in sl.InitParameters.coordinate_units and expressed in sl.RuntimeParameters.measure3D_reference_frame. + + .. code-block:: text + + 1 ------ 2 + / /| + 0 ------ 3 | + | Object | 6 + | |/ + 4 ------ 7 + """ + return np.array[float][float][float]() + + def bounding_boxes_2d(self) -> np.array[int][int][int]: + """ + NumPy array of 2D bounding boxes for each object. + .. note:: + Expressed in pixels on the original image resolution, ```[0, 0]``` is the top left corner. + + .. code-block:: text + + A ------ B + | Object | + D ------ C + """ + return np.array[int][int][int]() + + def confidences(self) -> np.array[float]: + """ + NumPy array of confidences for each object. + """ + return np.array[float]() + + def head_bounding_boxes_2d(self) -> np.array[int][int][int]: + return np.array[int][int][int]() + + def head_bounding_boxes(self) -> np.array[float][float][float]: + return np.array[float][float][float]() + + def head_positions(self) -> np.array[float][float]: + return np.array[float][float]() + + +class Objects: + """ + Class containing the results of the object detection module. + + The detected objects are listed in object_list. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def object_list(self) -> list[ObjectData]: + """ + List of detected objects. + """ + return list[ObjectData]() + + @object_list.setter + def object_list(self, object_list: Any) -> None: + pass + + @property + def is_new(self) -> bool: + """ + Whether object_list has already been retrieved or not. + Default: False + """ + return bool() + + @is_new.setter + def is_new(self, is_new: Any) -> None: + pass + + @property + def timestamp(self) -> Timestamp: + """ + Timestamp corresponding to the frame acquisition. + This value is especially useful for the async mode to synchronize the data. + """ + return Timestamp() + + @timestamp.setter + def timestamp(self, timestamp: Any) -> None: + pass + + @property + def is_tracked(self) -> bool: + """ + Whether both the object tracking and the world orientation has been setup. + Default: False + """ + return bool() + + @is_tracked.setter + def is_tracked(self, is_tracked: Any) -> None: + pass + + def get_object_data_from_id(self, py_object_data: ObjectData, object_data_id: int) -> bool: + """ + Method that looks for a given object id in the current objects list. + :param py_object_data: sl.ObjectData to fill if the search succeeded. (Direction: out) + :param object_data_id: Id of the sl.ObjectData to search. (Direction: in) + :return: True if found, otherwise False. + """ + return bool() + + +class BodiesBatch: + """ + Class containing batched data of a detected bodies/persons from the body tracking module. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def id(self) -> int: + """ + Id of the batch. + """ + return int() + + @id.setter + def id(self, id: Any) -> None: + pass + + @property + def action_states(self) -> list[OBJECT_ACTION_STATE]: + """ + List of action states for each body/person. + """ + return list[OBJECT_ACTION_STATE]() + + @property + def timestamps(self) -> list[Timestamp]: + """ + List of timestamps for each body/person. + """ + return list[Timestamp]() + + @property + def tracking_state(self) -> OBJECT_TRACKING_STATE: + """ + Bodies/persons tracking state. + """ + return OBJECT_TRACKING_STATE() + + @tracking_state.setter + def tracking_state(self, tracking_state: Any) -> None: + pass + + def positions(self) -> np.array[float][float]: + """ + NumPy array of positions for each body/person. + """ + return np.array[float][float]() + + def position_covariances(self) -> np.array[float][float]: + """ + NumPy array of positions' covariances for each body/person. + """ + return np.array[float][float]() + + def velocities(self) -> np.array[float][float]: + """ + NumPy array of 3D velocities for each body/person. + """ + return np.array[float][float]() + + def bounding_boxes(self) -> np.array[float][float][float]: + """ + NumPy array of 3D bounding boxes for each body/person. + .. note:: + They are defined in sl.InitParameters.coordinate_units and expressed in sl.RuntimeParameters.measure3D_reference_frame. + + .. code-block:: text + + 1 ------ 2 + / /| + 0 ------ 3 | + | Object | 6 + | |/ + 4 ------ 7 + """ + return np.array[float][float][float]() + + def bounding_boxes_2d(self) -> np.array[int][int][int]: + """ + NumPy array of 2D bounding boxes for each body/person. + .. note:: + Expressed in pixels on the original image resolution, ```[0, 0]``` is the top left corner. + + .. code-block:: text + + A ------ B + | Object | + D ------ C + """ + return np.array[int][int][int]() + + def confidences(self) -> np.array[float]: + """ + NumPy array of confidences for each body/person. + """ + return np.array[float]() + + def keypoints_2d(self) -> np.array[int][int][int]: + return np.array[int][int][int]() + + def keypoints(self) -> np.array[float][float][float]: + """ + NumPy array of 3D keypoints for each body/person. + """ + return np.array[float][float][float]() + + def head_bounding_boxes_2d(self) -> np.array[int][int][int]: + """ + NumPy array of 3D keypoints for each body/person. + """ + return np.array[int][int][int]() + + def head_bounding_boxes(self) -> np.array[float][float][float]: + """ + NumPy array of 3D keypoints for each body/person. + """ + return np.array[float][float][float]() + + def head_positions(self) -> np.array[float][float]: + """ + NumPy array of 3D keypoints for each body/person. + """ + return np.array[float][float]() + + def keypoint_confidences(self) -> np.array[float][float]: + """ + NumPy array of detection confidences NumPy array for each keypoint for each body/person. + """ + return np.array[float][float]() + + +class Bodies: + """ + Class containing the results of the body tracking module. + + The detected bodies/persons are listed in body_list. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def body_list(self) -> list[BodyData]: + """ + List of detected bodies/persons. + """ + return list[BodyData]() + + @body_list.setter + def body_list(self, body_list: Any) -> None: + pass + + @property + def is_tracked(self) -> bool: + """ + Whether both the body tracking and the world orientation has been setup. + Default: False + """ + return bool() + + @is_tracked.setter + def is_tracked(self, is_tracked: Any) -> None: + pass + + @property + def inference_precision_mode(self) -> INFERENCE_PRECISION: + """ + Status of the actual inference precision mode used to detect the bodies/persons. + .. note:: + It depends on the GPU hardware support, the sl.BodyTrackingParameters.allow_reduced_precision_inference input parameter and the model support. + """ + return INFERENCE_PRECISION() + + @inference_precision_mode.setter + def inference_precision_mode(self, inference_precision_mode: Any) -> None: + pass + + @property + def is_new(self) -> bool: + """ + Whether object_list has already been retrieved or not. + Default: False + """ + return bool() + + @is_new.setter + def is_new(self, is_new: Any) -> None: + pass + + @property + def body_format(self) -> BODY_FORMAT: + """ + Body format used in sl.BodyTrackingParameters.body_format parameter. + """ + return BODY_FORMAT() + + @body_format.setter + def body_format(self, body_format: Any) -> None: + pass + + @property + def timestamp(self) -> Timestamp: + """ + Timestamp corresponding to the frame acquisition. + This value is especially useful for the async mode to synchronize the data. + """ + return Timestamp() + + @timestamp.setter + def timestamp(self, timestamp: Any) -> None: + pass + + def get_body_data_from_id(self, py_body_data: BodyData, body_data_id: int) -> bool: + """ + Method that looks for a given body id in the current bodies list. + :param py_body_data: sl.BodyData to fill if the search succeeded. (Direction: out) + :param body_data_id: Id of the sl.BodyData to search. (Direction: in) + :return: True if found, otherwise False. + """ + return bool() + + +class BatchParameters: + """ + Class containing a set of parameters for batch object detection. + + The default constructor sets all parameters to their default settings. + .. note:: + Parameters can be adjusted by the user. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def latency(self) -> float: + """ + Trajectories will be output in batch with the desired latency in seconds. + During this waiting time, re-identification of objects is done in the background. + .. note:: + Specifying a short latency will limit the search (falling in timeout) for previously seen object ids but will be closer to real time output. + + .. note:: + Specifying a long latency will reduce the change of timeout in re-identification but increase difference with live output. + """ + return float() + + @latency.setter + def latency(self, latency: Any) -> None: + pass + + @property + def enable(self) -> bool: + """ + Whether to enable the batch option in the object detection module. + Batch queueing system provides: + - deep-learning based re-identification + - trajectory smoothing and filtering + + Default: False + .. note:: + To activate this option, enable must be set to True. + """ + return bool() + + @enable.setter + def enable(self, enable: Any) -> None: + pass + + @property + def id_retention_time(self) -> float: + """ + Max retention time in seconds of a detected object. + After this time, the same object will mostly have a different id. + """ + return float() + + @id_retention_time.setter + def id_retention_time(self, id_retention_time: Any) -> None: + pass + + def __dealloc__(self) -> None: + """ + Default constructor. + All the parameters are set to their default values. + param enable : Activates enable + param id_retention_time : Chosen id_retention_time + param batch_duration : Chosen latency + """ + pass + + +class ObjectDetectionParameters: + """ + Class containing a set of parameters for the object detection module. + + The default constructor sets all parameters to their default settings. + .. note:: + Parameters can be adjusted by the user. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def enable_segmentation(self) -> bool: + """ + Whether the object masks will be computed. + Default: False + """ + return bool() + + @enable_segmentation.setter + def enable_segmentation(self, enable_segmentation: Any) -> None: + pass + + @property + def allow_reduced_precision_inference(self) -> bool: + """ + Whether to allow inference to run at a lower precision to improve runtime and memory usage. + It might increase the initial optimization time and could include downloading calibration data or calibration cache and slightly reduce the accuracy. + .. note:: + The fp16 is automatically enabled if the GPU is compatible and provides a speed up of almost x2 and reduce memory usage by almost half, no precision loss. + + .. note:: + This setting allow int8 precision which can speed up by another x2 factor (compared to fp16, or x4 compared to fp32) and half the fp16 memory usage, however some accuracy could be lost. + + .. note:: + The accuracy loss should not exceed 1-2% on the compatible models. + + .. note:: + The current compatible models are all sl.AI_MODELS.HUMAN_BODY_XXXX. + """ + return bool() + + @allow_reduced_precision_inference.setter + def allow_reduced_precision_inference(self, allow_reduced_precision_inference: Any) -> None: + pass + + @property + def filtering_mode(self) -> OBJECT_FILTERING_MODE: + """ + Filtering mode that should be applied to raw detections. + Default: sl.OBJECT_FILTERING_MODE.NMS_3D (same behavior as previous ZED SDK version) + .. note:: + This parameter is only used in detection model sl.OBJECT_DETECTION_MODEL.MULTI_CLASS_BOX_XXX + + and sl.OBJECT_DETECTION_MODEL.CUSTOM_BOX_OBJECTS. + .. note:: + For custom object, it is recommended to use sl.OBJECT_FILTERING_MODE.NMS_3D_PER_CLASS + + or sl.OBJECT_FILTERING_MODE.NONE. + .. note:: + In this case, you might need to add your own NMS filter before ingesting the boxes into the object detection module. + """ + return OBJECT_FILTERING_MODE() + + @filtering_mode.setter + def filtering_mode(self, filtering_mode: Any) -> None: + pass + + @property + def batch_parameters(self) -> BatchParameters: + """ + Batching system parameters. + Batching system (introduced in 3.5) performs short-term re-identification with deep-learning and trajectories filtering. + \n sl.BatchParameters.enable must to be true to use this feature (by default disabled). + """ + return BatchParameters() + + @batch_parameters.setter + def batch_parameters(self, batch_parameters: Any) -> None: + pass + + @property + def instance_module_id(self) -> int: + """ + Id of the module instance. + This is used to identify which object detection module instance is used. + """ + return int() + + @instance_module_id.setter + def instance_module_id(self, instance_module_id: Any) -> None: + pass + + @property + def detection_model(self) -> OBJECT_DETECTION_MODEL: + """ + sl.OBJECT_DETECTION_MODEL to use. + Default: sl.OBJECT_DETECTION_MODEL.MULTI_CLASS_BOX_FAST + """ + return OBJECT_DETECTION_MODEL() + + @detection_model.setter + def detection_model(self, detection_model: Any) -> None: + pass + + @property + def fused_objects_group_name(self) -> str: + """ + In a multi camera setup, specify which group this model belongs to. + + In a multi camera setup, multiple cameras can be used to detect objects and multiple detector having similar output layout can see the same object. + Therefore, Fusion will fuse together the outputs received by multiple detectors only if they are part of the same fused_objects_group_name. + + .. note:: + This parameter is not used when not using a multi-camera setup and must be set in a multi camera setup. + """ + return str() + + @fused_objects_group_name.setter + def fused_objects_group_name(self, fused_objects_group_name: Any) -> None: + pass + + @property + def enable_tracking(self) -> bool: + """ + Whether the object detection system includes object tracking capabilities across a sequence of images. + Default: True + """ + return bool() + + @enable_tracking.setter + def enable_tracking(self, enable_tracking: Any) -> None: + pass + + @property + def custom_onnx_file(self) -> str: + """ + Path to the YOLO-like onnx file for custom object detection ran in the ZED SDK. + + When `detection_model` is OBJECT_DETECTION_MODEL::CUSTOM_YOLOLIKE_BOX_OBJECTS, a onnx model must be passed so that the ZED SDK can optimize it for your GPU and run inference on it. + + The resulting optimized model will be saved for re-use in the future. + + .. attention:: - The model must be a YOLO-like model. + .. attention:: - The caching uses the `custom_onnx_file` string along with your GPU specs to decide whether to use the cached optmized model or to optimize the passed onnx model. + If you want to use a different model (i.e. an onnx with different weights), you must use a different `custom_onnx_file` string or delete the cached optimized model in + /resources. + + .. note:: + This parameter is useless when detection_model is not OBJECT_DETECTION_MODEL::CUSTOM_YOLOLIKE_BOX_OBJECTS. + """ + return str() + + @custom_onnx_file.setter + def custom_onnx_file(self, custom_onnx_file: Any) -> None: + pass + + @property + def max_range(self) -> float: + """ + Upper depth range for detections. + Default: -1 (value set in sl.InitParameters.depth_maximum_distance) + .. note:: + The value cannot be greater than sl.InitParameters.depth_maximum_distance and its unit is defined in sl.InitParameters.coordinate_units. + """ + return float() + + @max_range.setter + def max_range(self, max_range: Any) -> None: + pass + + @property + def prediction_timeout_s(self) -> float: + """ + Prediction duration of the ZED SDK when an object is not detected anymore before switching its state to sl.OBJECT_TRACKING_STATE.SEARCHING. + It prevents the jittering of the object state when there is a short misdetection. + \n The user can define their own prediction time duration. + \n Default: 0.2 + .. note:: + During this time, the object will have sl.OBJECT_TRACKING_STATE.OK state even if it is not detected. + + .. note:: + The duration is expressed in seconds. + + .. warning:: prediction_timeout_s will be clamped to 1 second as the prediction is getting worse with time. + .. warning:: Setting this parameter to 0 disables the ZED SDK predictions. + """ + return float() + + @prediction_timeout_s.setter + def prediction_timeout_s(self, prediction_timeout_s: Any) -> None: + pass + + @property + def custom_onnx_dynamic_input_shape(self) -> Resolution: + """ + Resolution to the YOLO-like onnx file for custom object detection ran in the ZED SDK. This resolution defines the input tensor size for dynamic shape ONNX model only. The batch and channel dimensions are automatically handled, it assumes it's color images like default YOLO models. + + .. note:: + This parameter is only used when detection_model is OBJECT_DETECTION_MODEL::CUSTOM_YOLOLIKE_BOX_OBJECTS and the provided ONNX file is using dynamic shapes. + + .. attention:: - Multiple model only support squared images + + Default: Squared images 512x512 (input tensor will be 1x3x512x512) + """ + return Resolution() + + @custom_onnx_dynamic_input_shape.setter + def custom_onnx_dynamic_input_shape(self, custom_onnx_dynamic_input_shape: Any) -> None: + pass + + def __dealloc__(self) -> None: + """ + Default constructor. + All the parameters are set to their default values. + :param enable_tracking: Activates enable_tracking + :param enable_segmentation: Activates enable_segmentation + :param detection_model: Chosen detection_model + :param max_range: Chosen max_range + :param batch_trajectories_parameters: Chosen batch_parameters + :param filtering_mode: Chosen filtering_mode + :param prediction_timeout_s: Chosen prediction_timeout_s + :param allow_reduced_precision_inference: Activates allow_reduced_precision_inference + :param instance_module_id: Chosen instance_module_id + """ + pass + + +class ObjectDetectionRuntimeParameters: + """ + Class containing a set of runtime parameters for the object detection module. + + The default constructor sets all parameters to their default settings. + .. note:: + Parameters can be adjusted by the user. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def object_class_filter(self) -> list[OBJECT_CLASS]: + """ + Defines which object types to detect and track. + Default: [] (all classes are tracked) + .. note:: + Fewer object types can slightly speed up the process since every object is tracked. + + .. note:: + Will output only the selected classes. + + + In order to get all the available classes, the filter list must be empty : + .. code-block:: text + + object_class_filter = {}; + + + To select a set of specific object classes, like vehicles, persons and animals for instance: + .. code-block:: text + + object_class_filter = {sl.OBJECT_CLASS.VEHICLE, sl.OBJECT_CLASS.PERSON, sl.OBJECT_CLASS.ANIMAL}; + """ + return list[OBJECT_CLASS]() + + @object_class_filter.setter + def object_class_filter(self, object_class_filter: Any) -> None: + pass + + @property + def detection_confidence_threshold(self) -> float: + """ + Confidence threshold. + From 1 to 100, with 1 meaning a low threshold, more uncertain objects and 99 very few but very precise objects. + \n Default: 20 + .. note:: + If the scene contains a lot of objects, increasing the confidence can slightly speed up the process, since every object instance is tracked. + + .. note:: + detection_confidence_threshold is used as a fallback when sl::ObjectDetectionRuntimeParameters.object_class_detection_confidence_threshold is partially set. + """ + return float() + + @detection_confidence_threshold.setter + def detection_confidence_threshold(self, detection_confidence_threshold: Any) -> None: + pass + + @property + def object_class_detection_confidence_threshold(self) -> dict: + """ + Dictonary of confidence thresholds for each class (can be empty for some classes). + .. note:: + sl.ObjectDetectionRuntimeParameters.detection_confidence_threshold will be taken as fallback/default value. + """ + return {} + + @object_class_detection_confidence_threshold.setter + def object_class_detection_confidence_threshold(self, object_class_detection_confidence_threshold: Any) -> None: + pass + + def __dealloc__(self) -> None: + """ + Default constructor. + All the parameters are set to their default values. + :param detection_confidence_threshold: Chosen detection_confidence_threshold + :param object_class_filter: Chosen object_class_filter + :param object_class_detection_confidence_threshold: Chosen object_class_detection_confidence_threshold + """ + pass + + +class CustomObjectDetectionProperties: + """ + Class containing a set of runtime properties of a certain class ID for the object detection module using a custom model. + + The default constructor sets all parameters to their default settings. + .. note:: + Parameters can be adjusted by the user. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def tracking_max_dist(self) -> float: + """ + Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. + + By default, do not discard tracked object based on distance. + Only valid for static object. + """ + return float() + + @tracking_max_dist.setter + def tracking_max_dist(self, tracking_max_dist: Any) -> None: + pass + + @property + def is_grounded(self) -> bool: + """ + Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking. + - true: 2 DoF projected alongside the floor plane. Case for object standing on the ground such as person, vehicle, etc. + The projection implies that the objects cannot be superposed on multiple horizontal levels. + - false: 6 DoF (full 3D movements are allowed). + + .. note:: + This parameter cannot be changed for a given object tracking id. + + .. note:: + It is advised to set it by labels to avoid issues. + """ + return bool() + + @is_grounded.setter + def is_grounded(self, is_grounded: Any) -> None: + pass + + @property + def min_box_height_meters(self) -> float: + """ + Minimum allowed 3D height. + + Any prediction smaller than that will be either discarded (if object is tracked and in SEARCHING state) or clamped. + Default: -1 (no filtering) + """ + return float() + + @min_box_height_meters.setter + def min_box_height_meters(self, min_box_height_meters: Any) -> None: + pass + + @property + def enabled(self) -> bool: + """ + Whether the object object is kept or not + """ + return bool() + + @enabled.setter + def enabled(self, enabled: Any) -> None: + pass + + @property + def tracking_timeout(self) -> float: + """ + Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time. + + By default, let the tracker decide internally based on the internal sub class of the tracked object. + """ + return float() + + @tracking_timeout.setter + def tracking_timeout(self, tracking_timeout: Any) -> None: + pass + + @property + def min_box_width_normalized(self) -> float: + """ + Minimum allowed width normalized to the image size. + + Any prediction smaller than that will be filtered out. + Default: -1 (no filtering) + """ + return float() + + @min_box_width_normalized.setter + def min_box_width_normalized(self, min_box_width_normalized: Any) -> None: + pass + + @property + def object_acceleration_preset(self) -> OBJECT_ACCELERATION_PRESET: + """ + Preset defining the expected maximum acceleration of the tracked object. + + Determines how the ZED SDK interprets object acceleration, affecting tracking behavior and predictions. + Default: Default + """ + return OBJECT_ACCELERATION_PRESET() + + @object_acceleration_preset.setter + def object_acceleration_preset(self, object_acceleration_preset: Any) -> None: + pass + + @property + def max_box_height_meters(self) -> float: + """ + Maximum allowed 3D height. + + Any prediction bigger than that will be either discarded (if object is tracked and in SEARCHING state) or clamped. + Default: -1 (no filtering) + """ + return float() + + @max_box_height_meters.setter + def max_box_height_meters(self, max_box_height_meters: Any) -> None: + pass + + @property + def max_allowed_acceleration(self) -> float: + """ + Manually override the acceleration preset. + + If set, this value takes precedence over the selected preset, allowing for a custom maximum acceleration. + Unit is m/s^2. + """ + return float() + + @max_allowed_acceleration.setter + def max_allowed_acceleration(self, max_allowed_acceleration: Any) -> None: + pass + + @property + def max_box_width_normalized(self) -> float: + """ + Maximum allowed width normalized to the image size. + + Any prediction bigger than that will be filtered out. + Default: -1 (no filtering) + """ + return float() + + @max_box_width_normalized.setter + def max_box_width_normalized(self, max_box_width_normalized: Any) -> None: + pass + + @property + def max_box_width_meters(self) -> float: + """ + Maximum allowed 3D width. + + Any prediction bigger than that will be either discarded (if object is tracked and in SEARCHING state) or clamped. + Default: -1 (no filtering) + """ + return float() + + @max_box_width_meters.setter + def max_box_width_meters(self, max_box_width_meters: Any) -> None: + pass + + @property + def is_static(self) -> bool: + """ + Provide hypothesis about the object staticity to improve the object tracking. + - true: the object will be assumed to never move nor being moved. + - false: the object will be assumed to be able to move or being moved. + """ + return bool() + + @is_static.setter + def is_static(self, is_static: Any) -> None: + pass + + @property + def native_mapped_class(self) -> OBJECT_SUBCLASS: + """ + For increased accuracy, the native sl::OBJECT_SUBCLASS mapping, if any. + + Native objects have refined internal parameters for better 3D projection and tracking accuracy. + If one of the custom objects can be mapped to one the native sl::OBJECT_SUBCLASS, this can help to boost the tracking accuracy. + Default: no mapping + """ + return OBJECT_SUBCLASS() + + @native_mapped_class.setter + def native_mapped_class(self, native_mapped_class: Any) -> None: + pass + + @property + def detection_confidence_threshold(self) -> float: + """ + Confidence threshold. + + From 1 to 100, with 1 meaning a low threshold, more uncertain objects and 99 very few but very precise objects. + Default: 20.f + + .. note:: + If the scene contains a lot of objects, increasing the confidence can slightly speed up the process, since every object instance is tracked. + """ + return float() + + @detection_confidence_threshold.setter + def detection_confidence_threshold(self, detection_confidence_threshold: Any) -> None: + pass + + @property + def min_box_width_meters(self) -> float: + """ + Minimum allowed 3D width. + + Any prediction smaller than that will be either discarded (if object is tracked and in SEARCHING state) or clamped. + Default: -1 (no filtering) + """ + return float() + + @min_box_width_meters.setter + def min_box_width_meters(self, min_box_width_meters: Any) -> None: + pass + + @property + def min_box_height_normalized(self) -> float: + """ + Minimum allowed height normalized to the image size. + + Any prediction smaller than that will be filtered out. + Default: -1 (no filtering) + """ + return float() + + @min_box_height_normalized.setter + def min_box_height_normalized(self, min_box_height_normalized: Any) -> None: + pass + + @property + def max_box_height_normalized(self) -> float: + """ + Maximum allowed height normalized to the image size. + + Any prediction bigger than that will be filtered out. + Default: -1 (no filtering) + """ + return float() + + @max_box_height_normalized.setter + def max_box_height_normalized(self, max_box_height_normalized: Any) -> None: + pass + + def __dealloc__(self) -> None: + """ + Default constructor. + All the parameters are set to their default values. + :param detection_confidence_threshold: Chosen detection_confidence_threshold + :param object_class_filter: Chosen object_class_filter + :param object_class_detection_confidence_threshold: Chosen object_class_detection_confidence_threshold + """ + pass + + +class CustomObjectDetectionRuntimeParameters: + """ + Class containing a set of runtime parameters for the object detection module using your own model ran by the SDK. + + The default constructor sets all parameters to their default settings. + .. note:: + Parameters can be adjusted by the user. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def object_detection_properties(self) -> CustomObjectDetectionProperties: + """ + Global object detection properties. + + .. note:: + object_detection_properties is used as a fallback when sl::CustomObjectDetectionRuntimeParameters.object_class_detection_properties is partially set. + """ + return CustomObjectDetectionProperties() + + @object_detection_properties.setter + def object_detection_properties(self, object_detection_properties: Any) -> None: + pass + + @property + def object_class_detection_properties(self) -> dict: + """ + Per class object detection properties. + """ + return {} + + @object_class_detection_properties.setter + def object_class_detection_properties(self, object_class_detection_properties: Any) -> None: + pass + + def __dealloc__(self) -> None: + """ + Default constructor. + """ + pass + + +class BodyTrackingParameters: + """ + Class containing a set of parameters for the body tracking module. + + The default constructor sets all parameters to their default settings. + .. note:: + Parameters can be adjusted by the user. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def enable_segmentation(self) -> bool: + """ + Whether the body/person masks will be computed. + Default: False + """ + return bool() + + @enable_segmentation.setter + def enable_segmentation(self, enable_segmentation: Any) -> None: + pass + + @property + def allow_reduced_precision_inference(self) -> bool: + """ + Whether to allow inference to run at a lower precision to improve runtime and memory usage. + It might increase the initial optimization time and could include downloading calibration data or calibration cache and slightly reduce the accuracy. + .. note:: + The fp16 is automatically enabled if the GPU is compatible and provides a speed up of almost x2 and reduce memory usage by almost half, no precision loss. + + .. note:: + This setting allow int8 precision which can speed up by another x2 factor (compared to fp16, or x4 compared to fp32) and half the fp16 memory usage, however some accuracy could be lost. + + .. note:: + The accuracy loss should not exceed 1-2% on the compatible models. + + .. note:: + The current compatible models are all sl.AI_MODELS.HUMAN_BODY_XXXX. + """ + return bool() + + @allow_reduced_precision_inference.setter + def allow_reduced_precision_inference(self, allow_reduced_precision_inference: Any) -> None: + pass + + @property + def instance_module_id(self) -> int: + """ + Id of the module instance. + This is used to identify which body tracking module instance is used. + """ + return int() + + @instance_module_id.setter + def instance_module_id(self, instance_module_id: Any) -> None: + pass + + @property + def detection_model(self) -> BODY_TRACKING_MODEL: + """ + sl.BODY_TRACKING_MODEL to use. + Default: sl.BODY_TRACKING_MODEL.HUMAN_BODY_ACCURATE + """ + return BODY_TRACKING_MODEL() + + @detection_model.setter + def detection_model(self, detection_model: Any) -> None: + pass + + @property + def enable_body_fitting(self) -> bool: + """ + Whether to apply the body fitting. + Default: False + """ + return bool() + + @enable_body_fitting.setter + def enable_body_fitting(self, enable_body_fitting: Any) -> None: + pass + + @property + def body_format(self) -> BODY_FORMAT: + """ + Body format to be outputted by the ZED SDK with sl.Camera.retrieve_bodies(). + Default: sl.BODY_FORMAT.BODY_18 + """ + return BODY_FORMAT() + + @body_format.setter + def body_format(self, body_format: Any) -> None: + pass + + @property + def enable_tracking(self) -> bool: + """ + Whether the body tracking system includes body/person tracking capabilities across a sequence of images. + Default: True + """ + return bool() + + @enable_tracking.setter + def enable_tracking(self, enable_tracking: Any) -> None: + pass + + @property + def max_range(self) -> float: + """ + Upper depth range for detections. + Default: -1 (value set in sl.InitParameters.depth_maximum_distance) + .. note:: + The value cannot be greater than sl.InitParameters.depth_maximum_distance and its unit is defined in sl.InitParameters.coordinate_units. + """ + return float() + + @max_range.setter + def max_range(self, max_range: Any) -> None: + pass + + @property + def prediction_timeout_s(self) -> float: + """ + Prediction duration of the ZED SDK when an object is not detected anymore before switching its state to sl.OBJECT_TRACKING_STATE.SEARCHING. + It prevents the jittering of the object state when there is a short misdetection. + \n The user can define their own prediction time duration. + \n Default: 0.2 + .. note:: + During this time, the object will have sl.OBJECT_TRACKING_STATE.OK state even if it is not detected. + + .. note:: + The duration is expressed in seconds. + + .. warning:: prediction_timeout_s will be clamped to 1 second as the prediction is getting worse with time. + .. warning:: Setting this parameter to 0 disables the ZED SDK predictions. + """ + return float() + + @prediction_timeout_s.setter + def prediction_timeout_s(self, prediction_timeout_s: Any) -> None: + pass + + @property + def body_selection(self) -> BODY_KEYPOINTS_SELECTION: + """ + Selection of keypoints to be outputted by the ZED SDK with sl.Camera.retrieve_bodies(). + Default: sl.BODY_KEYPOINTS_SELECTION.FULL + """ + return BODY_KEYPOINTS_SELECTION() + + @body_selection.setter + def body_selection(self, body_selection: Any) -> None: + pass + + def __dealloc__(self) -> None: + """ + Default constructor. + All the parameters are set to their default values. + :param enable_tracking: Activates enable_tracking + :param enable_segmentation: Activates enable_segmentation + :param detection_model: Chosen detection_model + :param enable_body_fitting: Activates enable_body_fitting + :param max_range: Chosen max_range + :param body_format: Chosen body_format + :param body_selection: Chosen body_selection + :param prediction_timeout_s: Chosen prediction_timeout_s + :param allow_reduced_precision_inference: Activates allow_reduced_precision_inference + :param instance_module_id: Chosen instance_module_id + """ + pass + + +class BodyTrackingRuntimeParameters: + """ + Class containing a set of runtime parameters for the body tracking module. + + The default constructor sets all parameters to their default settings. + .. note:: + Parameters can be adjusted by the user. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def skeleton_smoothing(self) -> float: + """ + Control of the smoothing of the fitted fused skeleton. + It is ranged from 0 (low smoothing) and 1 (high smoothing). + \n Default: 0 + """ + return float() + + @skeleton_smoothing.setter + def skeleton_smoothing(self, skeleton_smoothing: Any) -> None: + pass + + @property + def detection_confidence_threshold(self) -> float: + """ + Confidence threshold. + From 1 to 100, with 1 meaning a low threshold, more uncertain objects and 99 very few but very precise objects. + \n Default: 20 + .. note:: + If the scene contains a lot of objects, increasing the confidence can slightly speed up the process, since every object instance is tracked. + """ + return float() + + @detection_confidence_threshold.setter + def detection_confidence_threshold(self, detection_confidence_threshold: Any) -> None: + pass + + @property + def minimum_keypoints_threshold(self) -> int: + """ + Minimum threshold for the keypoints. + The ZED SDK will only output the keypoints of the skeletons with threshold greater than this value. + \n Default: 0 + .. note:: + It is useful, for example, to remove unstable fitting results when a skeleton is partially occluded. + """ + return int() + + @minimum_keypoints_threshold.setter + def minimum_keypoints_threshold(self, minimum_keypoints_threshold: Any) -> None: + pass + + def __dealloc__(self) -> None: + """ + Default constructor. + All the parameters are set to their default values. + :param detection_confidence_threshold: Chosen detection_confidence_threshold + :param minimum_keypoints_threshold: Chosen minimum_keypoints_threshold + :param skeleton_smoothing: Chosen skeleton_smoothing + """ + pass + + +class PlaneDetectionParameters: + """ + Class containing a set of parameters for the plane detection functionality. + + The default constructor sets all parameters to their default settings. + .. note:: + Parameters can be adjusted by the user. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def normal_similarity_threshold(self) -> float: + """ + Controls the spread of plane by checking the angle difference. + Default: 15 degrees + """ + return float() + + @normal_similarity_threshold.setter + def normal_similarity_threshold(self, normal_similarity_threshold: Any) -> None: + pass + + @property + def max_distance_threshold(self) -> float: + """ + Controls the spread of plane by checking the position difference. + Default: 0.15 meters + """ + return float() + + @max_distance_threshold.setter + def max_distance_threshold(self, max_distance_threshold: Any) -> None: + pass + + def __dealloc__(self) -> None: + """ + Default constructor. + Values: + - max_distance_threshold : 0.15 meters + - normal_similarity_threshold : 15.0 degrees + """ + pass + + +class RegionOfInterestParameters: + """ + Class containing a set of parameters for the plane detection functionality. + + The default constructor sets all parameters to their default settings. + .. note:: + Parameters can be adjusted by the user. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def image_height_ratio_cutoff(self) -> float: + """ + By default consider only the lower half of the image, can be useful to filter out the sky + Default: 0.5, correspond to the lower half of the image + """ + return float() + + @image_height_ratio_cutoff.setter + def image_height_ratio_cutoff(self, image_height_ratio_cutoff: Any) -> None: + pass + + @property + def auto_apply_module(self) -> set[MODULE]: + """ + Once computed the ROI computed will be automatically applied + Default: Enabled + """ + return set[MODULE]() + + @auto_apply_module.setter + def auto_apply_module(self, auto_apply_module: Any) -> None: + pass + + @property + def depth_far_threshold_meters(self) -> float: + """ + Filtering how far object in the ROI should be considered, this is useful for a vehicle for instance + Default: 2.5 meters + """ + return float() + + @depth_far_threshold_meters.setter + def depth_far_threshold_meters(self, depth_far_threshold_meters: Any) -> None: + pass + + def __dealloc__(self) -> None: + pass + + +def get_current_timestamp() -> Timestamp: + """ + Class containing a set of parameters for the plane detection functionality. + + The default constructor sets all parameters to their default settings. + .. note:: + Parameters can be adjusted by the user. + """ + return Timestamp() + +class Resolution: + """ + Structure containing the width and height of an image. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def height(self) -> int: + """ + Height of the image in pixels. + """ + return int() + + @height.setter + def height(self, height: Any) -> None: + pass + + @property + def width(self) -> int: + """ + Width of the image in pixels. + """ + return int() + + @width.setter + def width(self, width: Any) -> None: + pass + + def area(self) -> int: + """ + Area (width * height) of the image. + """ + return int() + + def __richcmp__(left, right, op) -> None: + pass + + +class Rect: + """ + Class defining a 2D rectangle with top-left corner coordinates and width/height in pixels. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def x(self) -> int: + """ + x coordinates of top-left corner. + """ + return int() + + @x.setter + def x(self, x: Any) -> None: + pass + + @property + def y(self) -> int: + """ + y coordinates of top-left corner. + """ + return int() + + @y.setter + def y(self, y: Any) -> None: + pass + + @property + def height(self) -> int: + """ + Height of the rectangle in pixels. + """ + return int() + + @height.setter + def height(self, height: Any) -> None: + pass + + @property + def width(self) -> int: + """ + Width of the rectangle in pixels. + """ + return int() + + @width.setter + def width(self, width: Any) -> None: + pass + + def area(self) -> int: + """ + Returns the area of the rectangle. + """ + return int() + + def is_empty(self) -> bool: + """ + Tests if the given sl.Rect is empty (width or/and height is null). + """ + return bool() + + def contains(self, target: Rect, proper = False) -> bool: + """ + Tests if this sl.Rect contains the **target** sl.Rect. + :return: True if this rectangle contains the rectangle, otherwise False. + .. note:: + This method only returns true if the target rectangle is entirely inside this rectangle (not on the edge). + """ + return bool() + + def is_contained(self, target: Rect, proper = False) -> bool: + """ + Tests if this sl.Rect is contained inside the given **target** sl.Rect. + :return: True if this rectangle is inside the current **target** sl.Rect, otherwise False. + .. note:: + This method only returns True if this rectangle is entirely inside the rectangle (not on the edge). + """ + return bool() + + def __richcmp__(left, right, op) -> None: + """ + Tests if this sl.Rect is contained inside the given **target** sl.Rect. + :return: True if this rectangle is inside the current **target** sl.Rect, otherwise False. + .. note:: + This method only returns True if this rectangle is entirely inside the rectangle (not on the edge). + """ + pass + + +class CameraParameters: + """ + Class containing the intrinsic parameters of a camera. + That information about the camera will be returned by sl.Camera.get_camera_information(). + .. note:: + Similar to the sl.CalibrationParameters, those parameters are taken from the settings file (SNXXX.conf) and are modified during the sl.Camera.open() call when running a self-calibration). + + .. note:: + Those parameters given after sl.Camera.open() call, represent the camera matrix corresponding to rectified or unrectified images. + + .. note:: + When filled with rectified parameters, fx, fy, cx, cy must be the same for left and right camera once sl.Camera.open() has been called. + + .. note:: + Since distortion is corrected during rectification, distortion should not be considered on rectified images. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def d_fov(self) -> float: + """ + Diagonal field of view, in degrees. + """ + return float() + + @d_fov.setter + def d_fov(self, d_fov: Any) -> None: + pass + + @property + def cy(self) -> float: + """ + Optical center along y axis, defined in pixels (usually close to height / 2). + """ + return float() + + @cy.setter + def cy(self, cy: Any) -> None: + pass + + @property + def image_size(self) -> Resolution: + """ + Size in pixels of the images given by the camera. + """ + return Resolution() + + @image_size.setter + def image_size(self, image_size: Any) -> None: + pass + + @property + def focal_length_metric(self) -> float: + """ + Real focal length in millimeters. + """ + return float() + + @focal_length_metric.setter + def focal_length_metric(self, focal_length_metric: Any) -> None: + pass + + @property + def fy(self) -> float: + """ + Focal length in pixels along y axis. + """ + return float() + + @fy.setter + def fy(self, fy: Any) -> None: + pass + + @property + def v_fov(self) -> float: + """ + Vertical field of view, in degrees. + """ + return float() + + @v_fov.setter + def v_fov(self, v_fov: Any) -> None: + pass + + @property + def fx(self) -> float: + """ + Focal length in pixels along x axis. + """ + return float() + + @fx.setter + def fx(self, fx: Any) -> None: + pass + + @property + def disto(self) -> list[float]: + """ + Distortion factor : [k1, k2, p1, p2, k3, k4, k5, k6, s1, s2, s3, s4]. + + Radial (k1, k2, k3, k4, k5, k6), Tangential (p1,p2) and Prism (s1, s2, s3, s4) distortion. + """ + return list[float]() + + @property + def h_fov(self) -> float: + """ + Horizontal field of view, in degrees. + """ + return float() + + @h_fov.setter + def h_fov(self, h_fov: Any) -> None: + pass + + @property + def cx(self) -> float: + """ + Optical center along x axis, defined in pixels (usually close to width / 2). + """ + return float() + + @cx.setter + def cx(self, cx: Any) -> None: + pass + + def set_disto(self, value1: float, value2: float, value3: float, value4: float, value5: float) -> None: + """ + Sets the elements of the disto array. + :param value1: k1 + :param value2: k2 + :param value3: p1 + :param value4: p2 + :param value5: k3 + """ + pass + + def set_up(self, fx_: float, fy_: float, cx_: float, cy_: float) -> None: + """ + Setups the parameters of a camera. + :param fx_: Horizontal focal length + :param fy_: Vertical focal length + :param cx_: Horizontal optical center + :param cx_: Vertical optical center. + """ + pass + + def scale(self, resolution: Resolution) -> CameraParameters: + """ + Return the sl.CameraParameters for another resolution. + :param resolution: Resolution in which to get the new sl.CameraParameters. + :return: The sl.CameraParameters for the resolution given as input. + """ + return CameraParameters() + + +class CalibrationParameters: + """ + Class containing intrinsic and extrinsic parameters of the camera (translation and rotation). + + That information about the camera will be returned by sl.Camera.get_camera_information(). + .. note:: + The calibration/rectification process, called during sl.Camera.open(), is using the raw parameters defined in the SNXXX.conf file, where XXX is the serial number of the camera. + + .. note:: + Those values may be adjusted or not by the self-calibration to get a proper image alignment. + + .. note:: + After sl.Camera.open() is done (with or without self-calibration activated), most of the stereo parameters (except baseline of course) should be 0 or very close to 0. + + .. note:: + It means that images after rectification process (given by sl.Camera.retrieve_image()) are aligned as if they were taken by a "perfect" stereo camera, defined by the new sl.CalibrationParameters. + + .. warning:: CalibrationParameters are returned in sl.COORDINATE_SYSTEM.IMAGE, they are not impacted by the sl.InitParameters.coordinate_system. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def left_cam(self) -> CameraParameters: + """ + Intrinsic sl.CameraParameters of the left camera. + """ + return CameraParameters() + + @left_cam.setter + def left_cam(self, left_cam: Any) -> None: + pass + + @property + def stereo_transform(self) -> Transform: + """ + Left to right camera transform, expressed in user coordinate system and unit (defined by sl.InitParameters.coordinate_system). + """ + return Transform() + + @property + def right_cam(self) -> CameraParameters: + """ + Intrinsic sl.CameraParameters of the right camera. + """ + return CameraParameters() + + @right_cam.setter + def right_cam(self, right_cam: Any) -> None: + pass + + def set(self) -> None: + pass + + def get_camera_baseline(self) -> float: + """ + Returns the baseline of the camera in the sl.UNIT defined in sl.InitParameters.coordinate_units. + """ + return float() + + +class SensorParameters: + """ + Class containing information about a single sensor available in the current device. + + Information about the camera sensors is available in the sl.CameraInformation struct returned by sl.Camera.get_camera_information(). + .. note:: + This class is meant to be used as a read-only container. + + .. note:: + Editing any of its fields will not impact the ZED SDK. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def sensor_unit(self) -> SENSORS_UNIT: + """ + Unit of the sensor. + """ + return SENSORS_UNIT() + + @property + def random_walk(self) -> float: + """ + Random walk derived from the Allan Variance given as continuous (frequency-independent). + .. note:: + The units will be expressed in ```sensor_unit / √(Hz)```. + + .. note:: + `NAN` if the information is not available. + """ + return float() + + @random_walk.setter + def random_walk(self, random_walk: Any) -> None: + pass + + @property + def noise_density(self) -> float: + """ + White noise density given as continuous (frequency-independent). + .. note:: + The units will be expressed in ```sensor_unit / √(Hz)```. + + .. note:: + `NAN` if the information is not available. + """ + return float() + + @noise_density.setter + def noise_density(self, noise_density: Any) -> None: + pass + + @property + def sensor_type(self) -> SENSOR_TYPE: + """ + Type of the sensor. + """ + return SENSOR_TYPE() + + @property + def sampling_rate(self) -> float: + """ + Sampling rate (or ODR) of the sensor. + """ + return float() + + @sampling_rate.setter + def sampling_rate(self, sampling_rate: Any) -> None: + pass + + @property + def resolution(self) -> float: + """ + Resolution of the sensor. + """ + return float() + + @resolution.setter + def resolution(self, resolution: Any) -> None: + pass + + @property + def is_available(self) -> bool: + """ + Whether the sensor is available in your camera. + """ + return bool() + + def set(self) -> None: + pass + + def sensor_range(self) -> np.array[float]: + """ + Range (NumPy array) of the sensor (minimum: `sensor_range[0]`, maximum: `sensor_range[1]`). + """ + return np.array[float]() + + def set_sensor_range(self, value1: float, value2: float) -> None: + """ + Sets the minimum and the maximum values of the sensor range. + \param float value1 : Minimum of the range to set. + \param float value2 : Maximum of the range to set. + """ + pass + + +class SensorsConfiguration: + """ + Class containing information about all the sensors available in the current device. + + Information about the camera sensors is available in the sl.CameraInformation struct returned by sl.Camera.get_camera_information(). + .. note:: + This class is meant to be used as a read-only container. + + .. note:: + Editing any of its fields will not impact the ZED SDK. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def camera_imu_transform(self) -> Transform: + """ + IMU to left camera transform matrix. + .. note:: + It contains the rotation and translation between the IMU frame and camera frame. + """ + return Transform() + + @property + def barometer_parameters(self) -> SensorParameters: + """ + Configuration of the barometer. + """ + return SensorParameters() + + @property + def magnetometer_parameters(self) -> SensorParameters: + """ + Configuration of the magnetometer. + """ + return SensorParameters() + + @property + def imu_magnetometer_transform(self) -> Transform: + """ + Magnetometer to IMU transform matrix. + .. note:: + It contains rotation and translation between IMU frame and magnetometer frame. + """ + return Transform() + + @property + def firmware_version(self) -> int: + """ + Firmware version of the sensor module. + .. note:: + 0 if no sensors are available (sl.MODEL.ZED). + """ + return int() + + @property + def gyroscope_parameters(self) -> SensorParameters: + """ + Configuration of the gyroscope. + """ + return SensorParameters() + + @property + def accelerometer_parameters(self) -> SensorParameters: + """ + Configuration of the accelerometer. + """ + return SensorParameters() + + def __set_from_camera(self, py_camera, resizer = Resolution(0, 0)) -> None: + pass + + def __set_from_cameraone(self, py_camera, resizer = Resolution(0, 0)) -> None: + pass + + def is_sensor_available(self, sensor_type) -> bool: + """ + Checks if a sensor is available on the device. + :param sensor_type: Sensor type to check. + :return: True if the sensor is available on the device, otherwise False. + """ + return bool() + + +class CameraConfiguration: + """ + Structure containing information about the camera sensor. + + Information about the camera is available in the sl.CameraInformation struct returned by sl.Camera.get_camera_information(). + .. note:: + This object is meant to be used as a read-only container, editing any of its field won't impact the SDK. + + .. warning:: sl.CalibrationParameters are returned in sl.COORDINATE_SYSTEM.IMAGE, they are not impacted by the sl.InitParameters.coordinate_system. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def calibration_parameters_raw(self) -> CalibrationParameters: + """ + Intrinsics and extrinsic stereo parameters for unrectified/distorted images. + """ + return CalibrationParameters() + + @property + def fps(self) -> float: + """ + FPS of the camera. + """ + return float() + + @property + def firmware_version(self) -> int: + """ + Internal firmware version of the camera. + """ + return int() + + @property + def calibration_parameters(self) -> CalibrationParameters: + """ + Intrinsics and extrinsic stereo parameters for rectified/undistorted images. + """ + return CalibrationParameters() + + @property + def resolution(self) -> Resolution: + """ + Resolution of the camera. + """ + return Resolution() + + +class CameraInformation: + """ + Structure containing information of a single camera (serial number, model, calibration, etc.) + That information about the camera will be returned by Camera.get_camera_information() + .. note:: + This object is meant to be used as a read-only container, editing any of its fields won't impact the SDK. + + .. warning:: CalibrationParameters are returned in COORDINATE_SYSTEM.IMAGE , they are not impacted by the InitParameters.coordinate_system + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def camera_model(self) -> MODEL: + """ + Model of the camera (see sl.MODEL). + """ + return MODEL() + + @property + def serial_number(self) -> int: + """ + Serial number of the camera. + """ + return int() + + @property + def camera_configuration(self) -> CameraConfiguration: + """ + Camera configuration parameters stored in a sl.CameraConfiguration. + """ + return CameraConfiguration() + + @property + def sensors_configuration(self) -> SensorsConfiguration: + """ + Sensors configuration parameters stored in a sl.SensorsConfiguration. + """ + return SensorsConfiguration() + + @property + def input_type(self) -> INPUT_TYPE: + """ + Input type used in the ZED SDK. + """ + return INPUT_TYPE() + + +class Mat: + """ + Class representing 1 to 4-channel matrix of float or uchar, stored on CPU and/or GPU side. + + This class is defined in a row-major order, meaning that for an image buffer, the rows are stored consecutively from top to bottom. + .. note:: + The ZED SDK Python wrapper does not support GPU data storage/access. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def verbose(self) -> bool: + """ + Whether the sl.Mat can display information. + """ + return bool() + + @verbose.setter + def verbose(self, verbose: Any) -> None: + pass + + @property + def name(self) -> str: + """ + The name of the sl.Mat (optional). + In verbose mode, it iss used to indicate which sl.Mat is printing information. + \n Default set to "n/a" to avoid empty string if not filled. + """ + return str() + + @name.setter + def name(self, name: Any) -> None: + pass + + @property + def timestamp(self) -> int: + """ + Timestamp of the last manipulation of the data of the matrix by a method/function. + """ + return int() + + @timestamp.setter + def timestamp(self, timestamp: Any) -> None: + pass + + def init_mat_type(self, width, height, mat_type, memory_type = MEM.CPU) -> None: + """ + Initilizes a new sl.Mat and allocates the requested memory by calling alloc_size(). + :param width: Width of the matrix in pixels. Default: 0 + :param height: Height of the matrix in pixels. Default: 0 + :param mat_type: Type of the matrix (sl.MAT_TYPE.F32_C1, sl.MAT_TYPE.U8_C4, etc.).\n Default: sl.MAT_TYPE.F32_C1 + :param memory_type: Where the buffer will be stored. Default: sl.MEM.CPU (you cannot change this default value) + """ + pass + + def init_mat_cpu(self, width: int, height: int, mat_type: MAT_TYPE, ptr, step, memory_type = MEM.CPU) -> None: + """ + Initilizes a new sl.Mat from an existing data pointer. + This method does not allocate the memory. + :param width: Width of the matrix in pixels. + :param height: Height of the matrix in pixels. + :param mat_type: Type of the matrix (sl.MAT_TYPE.F32_C1, sl.MAT_TYPE.U8_C4, etc.).\n Default: sl.MAT_TYPE.F32_C1 + :param ptr: Pointer to the data array. + :param step: Step of the data array (bytes size of one pixel row). + :param memory_type: Where the buffer will be stored. Default: sl.MEM.CPU (you cannot change this default value) + """ + pass + + def init_mat_resolution(self, resolution: Resolution, mat_type: MAT_TYPE, memory_type = MEM.CPU) -> None: + """ + Initilizes a new sl.Mat and allocates the requested memory by calling alloc_size(). + :param resolution: Size of the matrix in pixels. + :param mat_type: Type of the matrix (sl.MAT_TYPE.F32_C1, sl.MAT_TYPE.U8_C4, etc.).\n Default: sl.MAT_TYPE.F32_C1 + :param memory_type: Where the buffer will be stored. Default: sl.MEM.CPU (you cannot change this default value) + """ + pass + + def init_mat_resolution_cpu(self, resolution: Resolution, mat_type, ptr, step, memory_type = MEM.CPU) -> None: + """ + Initilizes a new sl.Mat from an existing data pointer. + This method does not allocate the memory. + :param resolution: the size of the matrix in pixels. + :param mat_type: Type of the matrix (sl.MAT_TYPE.F32_C1, sl.MAT_TYPE.U8_C4, etc.).\n Default: sl.MAT_TYPE.F32_C1 + :param ptr: Pointer to the data array (CPU or GPU). + :param step: Step of the data array (bytes size of one pixel row). + :param memory_type: Where the buffer will be stored. Default: sl.MEM.CPU (you cannot change this default value) + """ + pass + + def init_mat(self, matrix: Mat) -> None: + """ + Initilizes a new sl.Mat by copy (shallow copy). + This method does not allocate the memory. + :param mat: sl.Mat to copy. + """ + pass + + def alloc_size(self, width, height, mat_type, memory_type = MEM.CPU) -> None: + """ + Allocates the sl.Mat memory. + :param width: Width of the matrix in pixels. + :param height: Height of the matrix in pixels. + :param mat_type: Type of the matrix (sl.MAT_TYPE.F32_C1, sl.MAT_TYPE.U8_C4, etc.).\n Default: sl.MAT_TYPE.F32_C1 + :param memory_type: Where the buffer will be stored. Default: sl.MEM.CPU (you cannot change this default value) + + .. warning:: It erases previously allocated memory. + """ + pass + + def alloc_resolution(self, resolution: Resolution, mat_type: MAT_TYPE, memory_type = MEM.CPU) -> None: + """ + Allocates the sl.Mat memory. + :param resolution: Size of the matrix in pixels. + :param mat_type: Type of the matrix (sl.MAT_TYPE.F32_C1, sl.MAT_TYPE.U8_C4, etc.).\n Default: sl.MAT_TYPE.F32_C1 + :param memory_type: Where the buffer will be stored. Default: sl.MEM.CPU (you cannot change this default value) + + .. warning:: It erases previously allocated memory. + """ + pass + + def free(self, memory_type = MEM.CPU) -> None: + """ + Free the owned memory. + :param memory_type: Specifies which memory you wish to free. Default: sl.MEM.CPU (you cannot change this default value) + """ + pass + + def copy_to(self, dst: Mat, cpy_type = COPY_TYPE.CPU_CPU) -> ERROR_CODE: + """ + Copies data to another sl.Mat (deep copy). + + :param dst: sl.Mat where the data will be copied to. + :param cpy_type: Specifies the memory that will be used for the copy. Default: sl.COPY_TYPE.CPU_CPU (you cannot change this default value) + :return: sl.ERROR_CODE.SUCCESS if everything went well, sl.ERROR_CODE.FAILURE otherwise. + + .. note:: + If the destination is not allocated or does not have a compatible sl.MAT_TYPE or sl.Resolution, + + current memory is freed and new memory is directly allocated. + """ + return ERROR_CODE() + + def update_cpu_from_gpu(self) -> ERROR_CODE: + """ + Downloads data from DEVICE (GPU) to HOST (CPU), if possible. + .. note:: + If no CPU or GPU memory are available for this sl::Mat, some are directly allocated. + + .. note:: + If verbose is set to true, you have information in case of failure. + """ + return ERROR_CODE() + + def update_gpu_from_cpu(self) -> ERROR_CODE: + """ + Uploads data from HOST (CPU) to DEVICE (GPU), if possible. + .. note:: + If no CPU or GPU memory are available for this sl::Mat, some are directly allocated. + + .. note:: + If verbose is set to true, you have information in case of failure. + """ + return ERROR_CODE() + + def set_from(self, src: Mat, cpy_type = COPY_TYPE.CPU_CPU) -> ERROR_CODE: + """ + Copies data from an other sl.Mat (deep copy). + :param src: sl.Mat where the data will be copied from. + :param cpy_type: Specifies the memory that will be used for the copy. Default: sl.COPY_TYPE.CPU_CPU (you cannot change this default value) + :return: sl.ERROR_CODE.SUCCESS if everything went well, sl.ERROR_CODE.FAILURE otherwise. + + .. note:: + If the destination is not allocated or does not have a compatible sl.MAT_TYPE or sl.Resolution, + + current memory is freed and new memory is directly allocated. + """ + return ERROR_CODE() + + def read(self, filepath: str) -> ERROR_CODE: + """ + Reads an image from a file (only if sl.MEM.CPU is available on the current sl.Mat). + Supported input files format are PNG and JPEG. + :param filepath: Path of the file to read from (including the name and extension). + :return: sl.ERROR_CODE.SUCCESS if everything went well, sl.ERROR_CODE.FAILURE otherwise. + + .. note:: + Supported sl.MAT_TYPE are : + + - MAT_TYPE.F32_C1 for PNG/PFM/PGM + - MAT_TYPE.F32_C3 for PCD/PLY/VTK/XYZ + - MAT_TYPE.F32_C4 for PCD/PLY/VTK/WYZ + - MAT_TYPE.U8_C1 for PNG/JPG + - MAT_TYPE.U8_C3 for PNG/JPG + - MAT_TYPE.U8_C4 for PNG/JPG + """ + return ERROR_CODE() + + def write(self, filepath: str, memory_type = MEM.CPU, compression_level = -1) -> ERROR_CODE: + """ + Writes the sl.Mat (only if sl.MEM.CPU is available on the current sl.Mat) into a file as an image. + Supported output files format are PNG and JPEG. + :param filepath: Path of the file to write (including the name and extension). + :param memory_type: Memory type of the sl.Mat. Default: sl.MEM.CPU (you cannot change the default value) + :param compression_level: Level of compression between 0 (lowest compression == highest size == highest quality(jpg)) and 100 (highest compression == lowest size == lowest quality(jpg)). + .. note:: + Specific/default value for compression_level = -1 : This will set the default quality for PNG(30) or JPEG(5). + + .. note:: + compression_level is only supported for [U8_Cx] (MAT_TYPE). + + :return: sl.ERROR_CODE.SUCCESS if everything went well, sl.ERROR_CODE.FAILURE otherwise. + + .. note:: + Supported sl.MAT_TYPE are : + + - MAT_TYPE.F32_C1 for PNG/PFM/PGM + - MAT_TYPE.F32_C3 for PCD/PLY/VTK/XYZ + - MAT_TYPE.F32_C4 for PCD/PLY/VTK/WYZ + - MAT_TYPE.U8_C1 for PNG/JPG + - MAT_TYPE.U8_C3 for PNG/JPG + - MAT_TYPE.U8_C4 for PNG/JPG + """ + return ERROR_CODE() + + def set_to(self, value, memory_type = MEM.CPU) -> ERROR_CODE: + """ + Fills the sl.Mat with the given value. + This method overwrites all the matrix. + :param value: Value to be copied all over the matrix. + :param memory_type: Which buffer to fill. Default: sl.MEM.CPU (you cannot change the default value) + """ + return ERROR_CODE() + + def set_value(self, x: int, y: int, value, memory_type = MEM.CPU) -> ERROR_CODE: + """ + Sets a value to a specific point in the matrix. + :param x: Column of the point to change. + :param y: Row of the point to change. + :param value: Value to be set. + :param memory_type: Which memory will be updated. + :return: sl.ERROR_CODE.SUCCESS if everything went well, sl.ERROR_CODE.FAILURE otherwise. + + .. warning:: Not efficient for sl.MEM.GPU, use it on sparse data. + """ + return ERROR_CODE() + + def get_value(self, x: int, y: int, memory_type = MEM.CPU) -> ERROR_CODE: + """ + Returns the value of a specific point in the matrix. + :param x: Column of the point to get the value from. + :param y: Row of the point to get the value from. + :param memory_type: Which memory should be read. + :return: sl.ERROR_CODE.SUCCESS if everything went well, sl.ERROR_CODE.FAILURE otherwise. + + .. warning:: Not efficient for sl.MEM.GPU, use it on sparse data. + """ + return ERROR_CODE() + + def get_width(self) -> int: + """ + Returns the width of the matrix. + :return: Width of the matrix in pixels. + """ + return int() + + def get_height(self) -> int: + """ + Returns the height of the matrix. + :return: Height of the matrix in pixels. + """ + return int() + + def get_resolution(self) -> Resolution: + """ + Returns the resolution (width and height) of the matrix. + :return: Resolution of the matrix in pixels. + """ + return Resolution() + + def get_channels(self) -> int: + """ + Returns the number of values stored in one pixel. + :return: Number of values in a pixel. + """ + return int() + + def get_data_type(self) -> MAT_TYPE: + """ + Returns the format of the matrix. + :return: Format of the current sl.Mat. + """ + return MAT_TYPE() + + def get_memory_type(self) -> MEM: + """ + Returns the type of memory (CPU and/or GPU). + :return: Type of allocated memory. + """ + return MEM() + + def numpy(self, force = False) -> np.array: + """ + Returns the sl.Mat as a NumPy array. + This is for convenience to mimic the [PyTorch API](https://pytorch.org/docs/stable/generated/torch.Tensor.numpy.html). + \n This is like an alias of get_data() method. + :param force: Whether the memory of the sl.Mat need to be duplicated. + :return: NumPy array containing the sl.Mat data. + .. note:: + The fastest is **force at False but the sl.Mat memory must not be released to use the NumPy array. + """ + return np.array() + + def get_data(self, memory_type = MEM.CPU, deep_copy = False) -> np.array: + """ + Cast the data of the sl.Mat in a NumPy array (with or without copy). + :param memory_type: Which memory should be read. If MEM.GPU, you should have CuPy installed. Default: MEM.CPU + :param deep_copy: Whether the memory of the sl.Mat need to be duplicated. + :return: NumPy array containing the sl.Mat data. + .. note:: + The fastest is **deep_copy at False but the sl.Mat memory must not be released to use the NumPy array. + """ + return np.array() + + def get_step_bytes(self, memory_type = MEM.CPU) -> int: + """ + Returns the memory step in bytes (size of one pixel row). + :param memory_type: Specifies whether you want sl.MEM.CPU or sl.MEM.GPU step.\n Default: sl.MEM.CPU (you cannot change the default value) + :return: The step in bytes of the specified memory. + """ + return int() + + def get_step(self, memory_type = MEM.CPU) -> int: + """ + Returns the memory step in number of elements (size in one pixel row). + :param memory_type: Specifies whether you want sl.MEM.CPU or sl.MEM.GPU step.\n Default: sl.MEM.CPU (you cannot change the default value) + :return: The step in number of elements. + """ + return int() + + def get_pixel_bytes(self) -> int: + """ + Returns the size of one pixel in bytes. + :return: Size of a pixel in bytes. + """ + return int() + + def get_width_bytes(self) -> int: + """ + Returns the size of a row in bytes. + :return: Size of a row in bytes. + """ + return int() + + def get_infos(self) -> str: + """ + Returns the information about the sl.Mat into a string. + :return: String containing the sl.Mat information. + """ + return str() + + def is_init(self) -> bool: + """ + Returns whether the sl.Mat is initialized or not. + :return: True if current sl.Mat has been allocated (by the constructor or therefore). + """ + return bool() + + def is_memory_owner(self) -> bool: + """ + Returns whether the sl.Mat is the owner of the memory it accesses. + + If not, the memory won't be freed if the sl.Mat is destroyed. + :return: True if the sl.Mat is owning its memory, else False. + """ + return bool() + + def clone(self, py_mat: Mat) -> ERROR_CODE: + """ + Duplicates a sl.Mat by copy (deep copy). + :param py_mat: sl.Mat to copy. + + This method copies the data array(s) and it marks the new sl.Mat as the memory owner. + """ + return ERROR_CODE() + + def move(self, py_mat: Mat) -> ERROR_CODE: + """ + Moves the data of the sl.Mat to another sl.Mat. + + This method gives the attribute of the current s.Mat to the specified one. (No copy.) + :param py_mat: sl.Mat to move to. + .. note:: + : The current sl.Mat is then no more usable since its loose its attributes. + """ + return ERROR_CODE() + + def convert_color_inplace(self, memory_type = MEM.CPU) -> ERROR_CODE: + """ + Convert the color channels of the Mat (RGB<->BGR or RGBA<->BGRA) + This methods works only on 8U_C4 or 8U_C3 + """ + return ERROR_CODE() + + def convert_color(mat1: Mat, mat2: Mat, swap_RB_channels: bool, remove_alpha_channels: bool, memory_type = MEM.CPU) -> ERROR_CODE: + """ + Convert the color channels of the Mat into another Mat + This methods works only on 8U_C4 if remove_alpha_channels is enabled, or 8U_C4 and 8U_C3 if swap_RB_channels is enabled + The inplace method sl::Mat::convertColor can be used for only swapping the Red and Blue channel efficiently + """ + return ERROR_CODE() + + def swap(mat1: Mat, mat2: Mat) -> None: + """ + Swaps the content of the provided sl::Mat (only swaps the pointers, no data copy). + :param mat1: First matrix to swap. + :param mat2: Second matrix to swap. + """ + pass + + def get_pointer(self, memory_type = MEM.CPU) -> int: + """ + Gets the pointer of the content of the sl.Mat. + :param memory_type: Which memory you want to get. Default: sl.MEM.CPU (you cannot change the default value) + :return: Pointer of the content of the sl.Mat. + """ + return int() + + def __repr__(self) -> None: + pass + + +def blob_from_image(mat1: Mat, mat2: Mat, resolution: Resolution, scale: float, mean: tuple, stdev: tuple, keep_aspect_ratio: bool, swap_RB_channels: bool) -> ERROR_CODE: + """ + Convert an image into a GPU Tensor in planar channel configuration (NCHW), ready to use for deep learning model + :param image_in: input image to convert + :param tensor_out: output GPU tensor + :param resolution_out: resolution of the output image, generally square, although not mandatory + :param scalefactor: Scale factor applied to each pixel value, typically to convert the char value into [0-1] float + :param mean: mean, statistic to normalized the pixel values, applied AFTER the scale. For instance for imagenet statistics the mean would be sl::float3(0.485, 0.456, 0.406) + :param stddev: standard deviation, statistic to normalized the pixel values, applied AFTER the scale. For instance for imagenet statistics the standard deviation would be sl::float3(0.229, 0.224, 0.225) + :param keep_aspect_ratio: indicates if the original width and height ratio should be kept using padding (sometimes refer to as letterboxing) or if the image should be stretched + :param swap_RB_channels: indicates if the Red and Blue channels should be swapped (RGB<->BGR or RGBA<->BGRA) + :return: ERROR_CODE : The error code gives information about the success of the function + + Example usage, for a 416x416 squared RGB image (letterboxed), with a scale factor of 1/255, and using the imagenet statistics for normalization: + .. code-block:: text + + + image = sl.Mat() + blob = sl.Mat() + resolution = sl.Resolution(416,416) + scale = 1.0/255.0 # Scale factor to apply to each pixel value + keep_aspect_ratio = True # Add padding to keep the aspect ratio + swap_RB_channels = True # ZED SDK outputs BGR images, so we need to swap the R and B channels + zed.retrieve_image(image, sl.VIEW.LEFT, type=sl.MEM.GPU) # Get the ZED image (GPU only is more efficient in that case) + err = sl.blob_from_image(image, blob, resolution, scale, (0.485, 0.456, 0.406), (0.229, 0.224, 0.225), keep_aspect_ratio, swap_RB_channels) + # By default the blob is in GPU memory, you can move it to CPU memory if needed + blob.update_cpu_from_gpu() + + """ + return ERROR_CODE() + +def is_camera_one(camera_model: MODEL) -> bool: + """ + Check if the camera is a ZED One (Monocular) or ZED (Stereo) + :param camera_model: The camera model to check + """ + return bool() + +def is_resolution_available(resolution: RESOLUTION, camera_model: MODEL) -> bool: + """ + Check if a resolution is available for a given camera model + :param resolution: Resolution to check + :param camera_model: The camera model to check + """ + return bool() + +def is_FPS_available(fps, resolution: RESOLUTION, camera_model: MODEL) -> bool: + """ + Check if a frame rate is available for a given resolution and camera model + :param fps: Frame rate to check + :param resolution: Resolution to check + :param camera_model: The camera model to check + """ + return bool() + +def is_HDR_available(resolution: RESOLUTION, camera_model: MODEL) -> bool: + """ + Check if a resolution for a given camera model is available for HDR + :param resolution: Resolution to check + :param camera_model: The camera model to check + """ + return bool() + +class Rotation(Matrix3f): + """ + Class representing a rotation for the positional tracking module. + + It inherits from the generic sl.Matrix3f class. + """ + def __init__(self, *args, **kwargs) -> None: ... + + def __dealloc__(self) -> None: + pass + + def init_rotation(self, rot: Rotation) -> None: + """ + Deep copy from another sl.Rotation. + :param rot: sl.Rotation to copy. + """ + pass + + def init_matrix(self, matrix: Matrix3f) -> None: + """ + Initializes the sl.Rotation from a sl.Matrix3f. + :param matrix: sl.Matrix3f to be used. + """ + pass + + def init_orientation(self, orient: Orientation) -> None: + """ + Initializes the sl.Rotation from an sl.Orientation. + :param orient: sl.Orientation to be used. + """ + pass + + def init_angle_translation(self, angle: float, axis: Translation) -> None: + """ + Initializes the sl.Rotation from an angle and an axis. + :param angle: Rotation angle in radian. + :param axis: 3D axis to rotate around. + """ + pass + + def set_orientation(self, py_orientation: Orientation) -> None: + """ + Sets the sl.Rotation from an sl.Orientation. + :param py_orientation: sl.Orientation containing the rotation to set. + """ + pass + + def get_orientation(self) -> Orientation: + """ + Returns the sl.Orientation corresponding to the current sl.Rotation. + :return: Rotation of the current orientation. + """ + return Orientation() + + def get_rotation_vector(self) -> np.array[float]: + """ + Returns the 3x1 rotation vector obtained from 3x3 rotation matrix using Rodrigues formula. + :return: Rotation vector (NumPy array) created from the sl.Orientation values. + """ + return np.array[float]() + + def set_rotation_vector(self, input0: float, input1: float, input2: float) -> None: + """ + Sets the sl.Rotation from a rotation vector (using Rodrigues' transformation). + :param input0: ```rx``` component of the rotation vector. + :param input1: ```ry``` component of the rotation vector. + :param input2: ```rz``` component of the rotation vector. + """ + pass + + def get_euler_angles(self, radian = True) -> np.array[float]: + """ + Converts the sl.Rotation into Euler angles. + :param radian: Whether the angle will be returned in radian or degree. Default: True + :return: Euler angles (NumPy array) created from the sl.Rotation values representing the rotations around the X, Y and Z axes using YZX convention. + """ + return np.array[float]() + + def set_euler_angles(self, input0: float, input1: float, input2: float, radian = True) -> None: + """ + Sets the sl.Rotation from Euler angles. + :param input0: Roll value. + :param input1: Pitch value. + :param input2: Yaw value. + :param radian: Whether the angle is in radian or degree. Default: True + """ + pass + + +class Translation: + """ + Class representing a translation for the positional tracking module. + + sl.Translation is a vector as ```[tx, ty, tz]```. + \n You can access the data with the get() method that returns a NumPy array. + """ + def __init__(self, *args, **kwargs) -> None: ... + + def init_translation(self, tr) -> None: + """ + Deep copy from another sl.Translation. + :param tr: sl.Translation to copy. + """ + pass + + def init_vector(self, t1, t2, t3) -> None: + """ + Initializes the sl.Translation with its components. + :param t1: First component. + :param t2: Second component. + :param t3: Third component. + """ + pass + + def normalize(self) -> None: + """ + Normalizes the current sl.Translation. + """ + pass + + def normalize_translation(self, tr) -> Translation: + """ + Gets the normalized sl.Translation of a given sl.Translation. + :param tr: sl.Translation to be get the normalized translation from. + :return: Another sl.Translation object equal to [**tr.normalize()](normalize). + """ + return Translation() + + def size(self) -> int: + """ + Gets the size of the sl.Translation. + :return: Size of the sl.Translation. + """ + return int() + + def dot_translation(tr1: Translation, tr2) -> float: + """ + Computes the dot product of two sl.Translation objects. + :param tr1: First sl.Translation to get the dot product from. + :param tr2: Sencond sl.Translation to get the dot product from. + :return: Dot product of **tr1 and **tr2. + """ + return float() + + def get(self) -> np.array[float]: + """ + Gets the sl.Translation as an NumPy array. + :return: NumPy array containing the components of the sl.Translation. + """ + return np.array[float]() + + def __mul__(self, other) -> None: + """ + Gets the sl.Translation as an NumPy array. + :return: NumPy array containing the components of the sl.Translation. + """ + pass + + def __repr__(self) -> None: + """ + Gets the sl.Translation as an NumPy array. + :return: NumPy array containing the components of the sl.Translation. + """ + pass + + +class Orientation: + """ + Class representing an orientation/quaternion for the positional tracking module. + + sl.Orientation is a vector defined as ```[ox, oy, oz, ow]```. + """ + def __init__(self, *args, **kwargs) -> None: ... + + def init_orientation(self, orient) -> None: + """ + Deep copy from another sl.Orientation. + :param orient: sl.Orientation to copy. + """ + pass + + def init_vector(self, v0, v1, v2, v3) -> None: + """ + Initializes the sl.Orientation with its components. + :param v0: ox component. + :param v1: oy component. + :param v2: oz component. + :param v3: ow component. + """ + pass + + def init_rotation(self, rotation) -> None: + """ + Initializes the sl.Orientation from an sl.Rotation. + + It converts the sl.Rotation representation to the sl.Orientation one. + :param rotation: sl.Rotation to be used. + """ + pass + + def init_translation(self, tr1, tr2) -> None: + """ + Initializes the sl.Orientation from a vector represented by two sl.Translation. + :param tr1: First point of the vector. + :param tr2: Second point of the vector. + """ + pass + + def set_rotation_matrix(self, py_rotation) -> None: + """ + Sets the rotation component of the current sl.Transform from an sl.Rotation. + :param py_rotation: sl.Rotation to be used. + """ + pass + + def get_rotation_matrix(self) -> Rotation: + """ + Returns the current sl.Orientation as an sl.Rotation. + :return: The rotation computed from the orientation data. + """ + return Rotation() + + def set_identity(self) -> None: + """ + Sets the current sl.Orientation to identity. + """ + pass + + def identity(self, orient = Orientation()) -> Orientation: + """ + Creates an sl.Orientation initialized to identity. + :return: Identity sl.Orientation. + """ + return Orientation() + + def set_zeros(self) -> None: + """ + Fills the current sl.Orientation with zeros. + """ + pass + + def zeros(self, orient = Orientation()) -> Orientation: + """ + Creates an sl.Orientation filled with zeros. + :return: sl.Orientation filled with zeros. + """ + return Orientation() + + def normalize(self) -> None: + """ + Normalizes the current sl.Orientation. + """ + pass + + def normalize_orientation(orient) -> Orientation: + """ + Gets the normalized sl.Orientation of a given sl.Orientation. + :param orient: sl.Orientation to be get the normalized orientation from. + :return: Another sl.Orientation object equal to [**orient.normalize()](normalize). + """ + return Orientation() + + def size(self) -> int: + """ + Gets the size of the sl.Orientation. + :return: Size of the sl.Orientation. + """ + return int() + + def get(self) -> np.array[float]: + """ + Returns a numpy array of the Orientation . + :return: A numpy array of the Orientation . + """ + return np.array[float]() + + def __mul__(self, other) -> None: + """ + Returns a numpy array of the Orientation . + :return: A numpy array of the Orientation . + """ + pass + + def __repr__(self) -> None: + """ + Returns a numpy array of the Orientation . + :return: A numpy array of the Orientation . + """ + pass + + +class Transform(Matrix4f): + """ + Class representing a transformation (translation and rotation) for the positional tracking module. + + It can be used to create any type of Matrix4x4 or sl::Matrix4f that must be specifically used for handling a rotation and position information (OpenGL, Tracking, etc.). + \n It inherits from the generic sl::Matrix4f class. + """ + def __init__(self, *args, **kwargs) -> None: ... + + def __dealloc__(self) -> None: + pass + + def init_transform(self, motion: Transform) -> None: + """ + Deep copy from another sl.Transform. + :param motion: sl.Transform to copy. + """ + pass + + def init_matrix(self, matrix: Matrix4f) -> None: + """ + Initializes the sl.Transform from a sl.Matrix4f. + :param matrix: sl.Matrix4f to be used. + """ + pass + + def init_rotation_translation(self, rot: Rotation, tr: Translation) -> None: + """ + Initializes the sl.Transform from an sl.Rotation and a sl.Translation. + :param rot: sl.Rotation to be used. + :param tr: sl.Translation to be used. + """ + pass + + def init_orientation_translation(self, orient: Orientation, tr: Translation) -> None: + """ + Initializes the sl.Transform from an sl.Orientation and a sl.Translation. + :param orient: Orientation to be used + :param tr: Translation to be used + """ + pass + + def set_rotation_matrix(self, py_rotation: Rotation) -> None: + """ + Sets the rotation component of the current sl.Transform from an sl.Rotation. + :param py_rotation: sl.Rotation to be used. + """ + pass + + def get_rotation_matrix(self) -> Rotation: + """ + Returns the sl.Rotation corresponding to the current sl.Transform. + :return: sl.Rotation created from the sl.Transform values. + .. warning:: The given sl.Rotation contains a copy of the sl.Transform values. + """ + return Rotation() + + def set_translation(self, py_translation: Translation) -> None: + """ + Sets the translation component of the current sl.Transform from an sl.Translation. + :param py_translation: sl.Translation to be used. + """ + pass + + def get_translation(self) -> Translation: + """ + Returns the sl.Translation corresponding to the current sl.Transform. + :return: sl.Translation created from the sl.Transform values. + .. warning:: The given sl.Translation contains a copy of the sl.Transform values. + """ + return Translation() + + def set_orientation(self, py_orientation: Orientation) -> None: + """ + Sets the orientation component of the current sl.Transform from an sl.Orientation. + :param py_orientation: sl.Orientation to be used. + """ + pass + + def get_orientation(self) -> Orientation: + """ + Returns the sl.Orientation corresponding to the current sl.Transform. + :return: sl.Orientation created from the sl.Transform values. + .. warning:: The given sl.Orientation contains a copy of the sl.Transform values. + """ + return Orientation() + + def get_rotation_vector(self) -> np.array[float]: + """ + Returns the 3x1 rotation vector obtained from 3x3 rotation matrix using Rodrigues formula. + :return: Rotation vector (NumPy array) created from the sl.Transform values. + """ + return np.array[float]() + + def set_rotation_vector(self, input0: float, input1: float, input2: float) -> None: + """ + Sets the rotation component of the sl.Transform with a 3x1 rotation vector (using Rodrigues' transformation). + :param input0: ```rx``` component of the rotation vector. + :param input1: ```ry``` component of the rotation vector. + :param input2: ```rz``` component of the rotation vector. + """ + pass + + def get_euler_angles(self, radian = True) -> np.array[float]: + """ + Converts the rotation component of the sl.Transform into Euler angles. + :param radian: Whether the angle will be returned in radian or degree. Default: True + :return: Euler angles (Numpy array) created from the sl.Transform values representing the rotations around the X, Y and Z axes using YZX convention. + """ + return np.array[float]() + + def set_euler_angles(self, input0: float, input1: float, input2: float, radian = True) -> None: + """ + Sets the rotation component of the sl.Transform from Euler angles. + :param input0: Roll value. + :param input1: Pitch value. + :param input2: Yaw value. + :param radian: Whether the angle is in radian or degree. Default: True + """ + pass + + +class MESH_FILE_FORMAT(enum.Enum): + """ + Lists available mesh file formats. + + | Enumerator | | + |:---:|:---:| + | PLY | Contains only vertices and faces. | + | PLY_BIN | Contains only vertices and faces encoded in binary. | + | OBJ | Contains vertices, normals, faces, and texture information (if possible). | + """ + PLY = enum.auto() + PLY_BIN = enum.auto() + OBJ = enum.auto() + LAST = enum.auto() + +class MESH_TEXTURE_FORMAT(enum.Enum): + """ + Lists available mesh texture formats. + + | Enumerator | | + |:---:|:---:| + | RGB | The texture will be on 3 channels. | + | RGBA | The texture will be on 4 channels. | + """ + RGB = enum.auto() + RGBA = enum.auto() + LAST = enum.auto() + +class MESH_FILTER(enum.Enum): + """ + Lists available mesh filtering intensities. + + | Enumerator | | + |:---:|:---:| + | LOW | Clean the mesh by closing small holes and removing isolated faces. | + | MEDIUM | Soft faces decimation and smoothing. | + | HIGH | Drastically reduce the number of faces and apply a soft smooth. | + """ + LOW = enum.auto() + MEDIUM = enum.auto() + HIGH = enum.auto() + +class PLANE_TYPE(enum.Enum): + """ + Lists the available plane types detected based on the orientation. + + + | Enumerator | | + |:---:|:---:| + | HORIZONTAL | Horizontal plane, such as a tabletop, floor, etc. | + | VERTICAL | Vertical plane, such as a wall. | + | UNKNOWN | Unknown plane orientation. | + """ + HORIZONTAL = enum.auto() + VERTICAL = enum.auto() + UNKNOWN = enum.auto() + LAST = enum.auto() + +class MeshFilterParameters: + """ + Class containing a set of parameters for the [mesh filtration](Mesh.filter) functionality. + + The default constructor sets all parameters to their default settings. + .. note:: + Parameters can be adjusted by the user. + """ + def __init__(self, *args, **kwargs) -> None: ... + + def __dealloc__(self) -> None: + pass + + def set(self, filter = MESH_FILTER.LOW) -> None: + """ + Set the filtering intensity. + :param filter: Desired sl.MESH_FILTER. + """ + pass + + def save(self, filename: str) -> bool: + """ + Saves the current set of parameters into a file to be reloaded with the load() method. + :param filename: Name of the file which will be created to store the parameters. + :return: True if the file was successfully saved, otherwise False. + .. warning:: For security reasons, the file must not already exist. + .. warning:: In case a file already exists, the method will return False and existing file will not be updated. + """ + return bool() + + def load(self, filename: str) -> bool: + """ + Loads a set of parameters from the values contained in a previously save() "saved" file. + :param filename: Path to the file from which the parameters will be loaded. + :return: True if the file was successfully loaded, otherwise False. + """ + return bool() + + +class PointCloudChunk: + """ + Class representing a sub-point cloud containing local vertices and colors. + + .. note:: + vertices and normals have the same size. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def has_been_updated(self) -> bool: + """ + Whether the point cloud chunk has been updated by an inner process. + """ + return bool() + + @property + def timestamp(self) -> int: + """ + Timestamp of the latest update. + """ + return int() + + def vertices(self) -> np.array[float]: + """ + NumPy array of vertices. + Vertices are defined by a colored 3D point ```[x, y, z, rgba]```. + """ + return np.array[float]() + + def normals(self) -> np.array[float]: + """ + NumPy array of normals. + Normals are defined by three components ```[nx, ny, nz]```. + .. note:: + A normal is defined for each vertex. + """ + return np.array[float]() + + def barycenter(self) -> np.array[float]: + """ + 3D centroid of the chunk. + """ + return np.array[float]() + + def clear(self) -> None: + """ + Clears all data. + """ + pass + + +class Chunk: + """ + Class representing a sub-mesh containing local vertices and triangles. + + Vertices and normals have the same size and are linked by id stored in triangles. + .. note:: + uv contains data only if your mesh have textures (by loading it or after calling sl.Mesh.apply_texture()). + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def has_been_updated(self) -> bool: + """ + Whether the chunk has been updated by an inner process. + """ + return bool() + + @property + def timestamp(self) -> int: + """ + Timestamp of the latest update. + """ + return int() + + def vertices(self) -> np.array[float]: + """ + NumPy array of vertices. + Vertices are defined by a 3D point ```[x, y, z]```. + """ + return np.array[float]() + + def triangles(self) -> np.array[int]: + """ + NumPy array of triangles/faces. + Triangle defined as a set of three vertices indexes ```[v1, v2, v3]```. + """ + return np.array[int]() + + def normals(self) -> np.array[float]: + """ + NumPy array of normals. + Normals are defined by three components ```[nx, ny, nz]```. + .. note:: + A normal is defined for each vertex. + """ + return np.array[float]() + + def colors(self) -> np.array[int]: + """ + NumPy array of colors. + Colors are defined by three components ```[r, g, b]```. + .. note:: + A color is defined for each vertex. + """ + return np.array[int]() + + def uv(self) -> np.array[float]: + """ + UVs defines the 2D projection of each vertices onto the texture. + Values are normalized [0, 1] and start from the bottom left corner of the texture (as requested by OpenGL). + \n In order to display a textured mesh you need to bind the texture and then draw each triangle by picking its uv values. + .. note:: + Contains data only if your mesh has textures (by loading it or calling sl.Mesh.apply_texture()). + """ + return np.array[float]() + + def barycenter(self) -> np.array[float]: + """ + 3D centroid of the chunk. + """ + return np.array[float]() + + def clear(self) -> None: + """ + Clears all data. + """ + pass + + +class FusedPointCloud: + """ + Class representing a fused point cloud and containing the geometric and color data of the scene captured by the spatial mapping module. + + By default the fused point cloud is defined as a set of point cloud chunks. + \n This way we update only the required data, avoiding a time consuming remapping process every time a small part of the sl.FusedPointCloud cloud is changed. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def chunks(self) -> list[PointCloudChunk]: + """ + List of chunks constituting the sl.FusedPointCloud. + """ + return list[PointCloudChunk]() + + def __dealloc__(self) -> None: + pass + + def __getitem__(self, x) -> PointCloudChunk: + """ + Gets a chunk from chunks. + """ + return PointCloudChunk() + + def vertices(self) -> np.array[float]: + """ + NumPy array of vertices. + Vertices are defined by a colored 3D point ```[x, y, z, rgba]```. + """ + return np.array[float]() + + def normals(self) -> np.array[float]: + """ + NumPy array of normals. + Normals are defined by three components ```[nx, ny, nz]```. + .. note:: + A normal is defined for each vertex. + """ + return np.array[float]() + + def save(self, filename: str, typeMesh = MESH_FILE_FORMAT.OBJ, id = []) -> bool: + """ + Saves the current sl.FusedPointCloud into a file. + :param filename: Path of the file to store the fused point cloud in. + :param typeMesh: File extension type. Default: sl.MESH_FILE_FORMAT.OBJ. + :param id: Set of chunks to be saved. Default: (empty) (all chunks are saved) + :return: True if the file was successfully saved, otherwise False. + + .. note:: + This method operates on the sl.FusedPointCloud not on chunks. + + .. note:: + This way you can save different parts of your sl.FusedPointCloud by updating it with update_from_chunklist(). + """ + return bool() + + def load(self, filename: str, update_chunk_only = False) -> bool: + """ + Loads the fused point cloud from a file. + :param filename: Path of the file to load the fused point cloud from. + :param update_chunk_only: Whether to only load data in chunks (and not vertices / normals).\n Default: False. + :return: True if the mesh was successfully loaded, otherwise False. + + .. note:: + Updating a sl.FusedPointCloud is time consuming. Consider using only chunks for better performances. + """ + return bool() + + def clear(self) -> None: + """ + Clears all the data. + """ + pass + + def update_from_chunklist(self, id = []) -> None: + """ + Updates vertices and normals from chunk data pointed by the given list of id. + :param id: Indices of chunks which will be concatenated. Default: (empty). + .. note:: + If the given list of id is empty, all chunks will be used to update the current sl.FusedPointCloud. + """ + pass + + def get_number_of_points(self) -> int: + """ + Computes the total number of points stored in all chunks. + :return: The number of points stored in all chunks. + """ + return int() + + +class Mesh: + """ + Class representing a mesh and containing the geometric (and optionally texture) data of the scene captured by the spatial mapping module. + + By default the mesh is defined as a set of chunks. + \n This way we update only the data that has to be updated avoiding a time consuming remapping process every time a small part of the sl.Mesh is updated. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def chunks(self) -> list[Chunk]: + """ + List of chunks constituting the sl.Mesh. + """ + return list[Chunk]() + + @property + def texture(self) -> Mat: + """ + Texture of the sl.Mesh. + .. note:: + Contains data only if your mesh has textures (by loading it or calling sl.Mesh.apply_texture()). + """ + return Mat() + + def __dealloc__(self) -> None: + pass + + def __getitem__(self, x) -> Chunk: + """ + Gets a chunk from chunks. + """ + return Chunk() + + def filter(self, params = MeshFilterParameters(), update_chunk_only = False) -> bool: + """ + Filters the mesh. + The resulting mesh is smoothed, small holes are filled, and small blobs of non-connected triangles are deleted. + :param params: Filtering parameters. Default: a preset of sl.MeshFilterParameters. + :param update_chunk_only: Whether to only update chunks (and not vertices / normals / triangles).\n Default: False. + :return: True if the mesh was successfully filtered, otherwise False. + + .. note:: + The filtering is a costly operation. + + .. note:: + It is not recommended to call it every time you retrieve a mesh but only at the end of your spatial mapping process. + """ + return bool() + + def apply_texture(self, texture_format = MESH_TEXTURE_FORMAT.RGB) -> bool: + """ + Applies a texture to the mesh. + By using this method you will get access to uv, and texture. + \n The number of triangles in the mesh may slightly differ before and after calling this method due to missing texture information. + \n There is only one texture for the mesh, the uv of each chunk are expressed for it in its entirety. + \n NumPy arrays of vertices / normals and uv have now the same size. + :param texture_format: Number of channels desired for the computed texture.\n Default: sl.MESH_TEXTURE_FORMAT.RGB. + :return: True if the mesh was successfully textured, otherwise False. + + .. note:: + This method can be called as long as you do not start a new spatial mapping process (due to shared memory). + + .. note:: + This method can require a lot of computation time depending on the number of triangles in the mesh. + + .. note:: + It is recommended to call it once at the end of your spatial mapping process. + + + .. warning:: The sl.SpatialMappingParameters.save_texture parameter must be set to True when enabling the spatial mapping to be able to apply the textures. + .. warning:: The mesh should be filtered before calling this method since filter() will erase the textures. + .. warning:: The texturing is also significantly slower on non-filtered meshes. + """ + return bool() + + def save(self, filename: str, typeMesh = MESH_FILE_FORMAT.OBJ, id = []) -> bool: + """ + Saves the current sl.Mesh into a file. + :param filename: Path of the file to store the mesh in. + :param typeMesh: File extension type. Default: sl.MESH_FILE_FORMAT.OBJ. + :param id: Set of chunks to be saved. Default: (empty) (all chunks are saved) + :return: True if the file was successfully saved, otherwise False. + + .. note:: + Only sl.MESH_FILE_FORMAT.OBJ supports textures data. + + .. note:: + This method operates on the sl.Mesh not on chunks. + + .. note:: + This way you can save different parts of your sl.Mesh by updating it with update_mesh_from_chunklist(). + """ + return bool() + + def load(self, filename: str, update_mesh = False) -> bool: + """ + Loads the mesh from a file. + :param filename: Path of the file to load the mesh from. + :param update_mesh: Whether to only load data in chunks (and not vertices / normals / triangles).\n Default: False. + :return: True if the mesh was successfully loaded, otherwise False. + + .. note:: + Updating a sl::Mesh is time consuming. Consider using only chunks for better performances. + """ + return bool() + + def clear(self) -> None: + """ + Clears all the data. + """ + pass + + def vertices(self) -> np.array[float]: + """ + NumPy array of vertices. + Vertices are defined by a 3D point ```[x, y, z]```. + """ + return np.array[float]() + + def triangles(self) -> np.array[int]: + """ + NumPy array of triangles/faces. + Triangle defined as a set of three vertices indexes ```[v1, v2, v3]```. + """ + return np.array[int]() + + def normals(self) -> np.array[float]: + """ + NumPy array of normals. + Normals are defined by three components ```[nx, ny, nz]```. + .. note:: + A normal is defined for each vertex. + """ + return np.array[float]() + + def colors(self) -> np.array[int]: + """ + NumPy array of colors. + Colors are defined by three components ```[r, g, b]```. + .. note:: + A color is defined for each vertex. + """ + return np.array[int]() + + def uv(self) -> np.array[float]: + """ + UVs defines the 2D projection of each vertices onto the texture. + Values are normalized [0, 1] and start from the bottom left corner of the texture (as requested by OpenGL). + In order to display a textured mesh you need to bind the texture and then draw each triangle by picking its uv values. + .. note:: + Contains data only if your mesh has textures (by loading it or calling sl.Mesh.apply_texture()). + """ + return np.array[float]() + + def get_number_of_triangles(self) -> int: + """ + Computes the total number of triangles stored in all chunks. + :return: The number of triangles stored in all chunks. + """ + return int() + + def get_boundaries(self) -> np.array[int]: + """ + Compute the indices of boundary vertices. + :return: The indices of boundary vertices. + """ + return np.array[int]() + + def merge_chunks(self, faces_per_chunk: int) -> None: + """ + Merges current chunks. + This method can be used to merge chunks into bigger sets to improve rendering process. + :param faces_per_chunk: Number of faces per chunk. + + .. note:: + This method is useful for Unity, which does not handle chunks with more than 65K vertices. + + .. warning:: This method should not be called during spatial mapping process since mesh updates will revert this changes. + """ + pass + + def get_gravity_estimate(self) -> np.array[float]: + """ + Estimates the gravity vector. + This method looks for a dominant plane in the whole mesh considering that it is the floor (or a horizontal plane). + :return: The estimated gravity vector (NumPy array). + + .. note:: + This can be used to find the gravity to create realistic physical interactions. + """ + return np.array[float]() + + def get_visible_list(self, camera_pose: Transform) -> list[int]: + """ + Computes the id list of visible chunks from a specific point of view. + :param camera_pose: Point of view (given in the same reference as the vertices). + :return: The list of id of visible chunks. + """ + return list[int]() + + def get_surrounding_list(self, camera_pose: Transform, radius: float) -> list[int]: + """ + Computes the id list of chunks close to a specific point of view. + :param camera_pose: Point of view (given in the same reference as the vertices). + :param radius: Radius determining closeness (given in the same unit as the mesh). + :return: The list of id of chunks close to the given point. + """ + return list[int]() + + def update_mesh_from_chunklist(self, id = []) -> None: + """ + Updates vertices / normals / triangles / uv from chunk data pointed by the given list of id. + :param id: Indices of chunks which will be concatenated. Default: (empty). + .. note:: + If the given list of id is empty, all chunks will be used to update the current sl.Mesh. + """ + pass + + +class Plane: + """ + Class representing a plane defined by a point and a normal, or a plane equation. + + Other elements can be extracted such as the mesh, the 3D bounds, etc. + .. note:: + The plane measurements are expressed in reference defined by sl.RuntimeParameters.measure3D_reference_frame. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def type(self) -> PLANE_TYPE: + """ + Type of the plane defined by its orientation. + .. note:: + It is deduced from the gravity vector and is therefore not available with on sl.MODEL.ZED. + + .. note:: + sl.MODEL.ZED will give sl.PLANE_TYPE.UNKNOWN for every planes. + """ + return PLANE_TYPE() + + @type.setter + def type(self, type: Any) -> None: + pass + + def get_normal(self) -> np.array[float]: + """ + Gets the plane normal vector. + :return: sl.Plane normalized normal vector (NumPy array). + """ + return np.array[float]() + + def get_center(self) -> np.array[float]: + """ + Gets the plane center point + :return: sl.Plane center point + """ + return np.array[float]() + + def get_pose(self, py_pose = Transform()) -> Transform: + """ + Gets the plane pose relative to the global reference frame. + :param py_pose: sl.Transform to fill (or it creates one by default). + :return: Transformation matrix (rotation and translation) of the plane pose. + .. note:: + Can be used to transform the global reference frame center ```(0, 0, 0)``` to the plane center. + """ + return Transform() + + def get_extents(self) -> np.array[float]: + """ + Gets the width and height of the bounding rectangle around the plane contours. + :return: Width and height of the bounding plane contours (NumPy array). + .. warning:: This value is expressed in the plane reference frame. + """ + return np.array[float]() + + def get_plane_equation(self) -> np.array[float]: + """ + Gets the plane equation. + :return: Plane equation coefficients ```[a, b, c, d]``` (NumPy array). + .. note:: + The plane equation has the following form: ```ax + by + cz = d```. + """ + return np.array[float]() + + def get_bounds(self) -> np.array[float][float]: + """ + Gets the polygon bounds of the plane. + :return: Vector of 3D points forming a polygon bounds corresponding to the current visible limits of the plane (NumPy array). + """ + return np.array[float][float]() + + def extract_mesh(self) -> Mesh: + """ + Compute and return the mesh of the bounds polygon. + :return: sl::Mesh representing the plane delimited by the visible bounds. + """ + return Mesh() + + def get_closest_distance(self, point = [0, 0, 0]) -> float: + """ + Gets the distance between the input point and the projected point alongside the normal vector onto the plane (the closest point on the plane). + :param point: Point to project into the plane. + :return: The Euclidean distance between the input point and the projected point. + """ + return float() + + def clear(self) -> None: + """ + Clears all the data. + """ + pass + + +class MAPPING_RESOLUTION(enum.Enum): + """ + Lists the spatial mapping resolution presets. + + | Enumerator | | + |:---:|:---:| + | HIGH | Creates a detailed geometry. Requires lots of memory. | + | MEDIUM | Small variations in the geometry will disappear. Useful for big objects. | + | LOW | Keeps only huge variations of the geometry. Useful for outdoor purposes. | + """ + HIGH = enum.auto() + MEDIUM = enum.auto() + LOW = enum.auto() + +class MAPPING_RANGE(enum.Enum): + """ + Lists the spatial mapping depth range presets. + + | Enumerator | | + |:---:|:---:| + | SHORT | Only depth close to the camera will be used during spatial mapping. | + | MEDIUM | Medium depth range. | + | LONG | Takes into account objects that are far. Useful for outdoor purposes. | + | AUTO | Depth range will be computed based on current sl.Camera state and parameters. | + """ + SHORT = enum.auto() + MEDIUM = enum.auto() + LONG = enum.auto() + AUTO = enum.auto() + +class SPATIAL_MAP_TYPE(enum.Enum): + """ + Lists the types of spatial maps that can be created. + + | Enumerator | | + |:---:|:---:| + | MESH | The geometry is represented by a set of vertices connected by edges and forming faces. No color information is available. | + | FUSED_POINT_CLOUD | The geometry is represented by a set of 3D colored points. | + """ + MESH = enum.auto() + FUSED_POINT_CLOUD = enum.auto() + +class BUS_TYPE(enum.Enum): + """ + Lists available LIVE input type in the ZED SDK. + + | Enumerator | | + |:---:|:---:| + | USB | USB input mode | + | GMSL | GMSL input mode Note: Only on NVIDIA Jetson. | + | AUTO | Automatically select the input type. Trying first for available USB cameras, then GMSL. | + """ + USB = enum.auto() + GMSL = enum.auto() + AUTO = enum.auto() + LAST = enum.auto() + +def generate_virtual_stereo_serial_number(serial_left, serial_right) -> "unsigned int": + """ + Generate a unique identifier for virtual stereo based on the serial numbers of the two ZED Ones + :param serial_l: Serial number of the left camera. + :param serial_r: Serial number of the right camera. + :return: A unique hash for the given pair of serial numbers, or 0 if an error occurred (e.g: same serial number). + """ + return "unsigned int"() + +class InputType: + """ + Class defining the input type used in the ZED SDK. + It can be used to select a specific camera with an id or serial number, or from a SVO file. + """ + def __init__(self, *args, **kwargs) -> None: ... + + def __dealloc__(self) -> None: + pass + + def set_from_camera_id(self, cam_id, bus_type : BUS_TYPE = BUS_TYPE.AUTO) -> None: + """ + Set the input as the camera with specified id. + + .. note:: + The id is not related to the serial number of the camera. The id is assigned by the OS depending on the order the cameras are plugged. + + .. warning:: Using id is not recommended if you have multiple cameras plugged in the system, prefer using the serial number instead. + + :param id: Id of the camera to open. The default, -1, will open the first available camera. A number >= 0 will try to open the camera with the corresponding id. + :param bus_type: Whether the camera is a USB or a GMSL camera. + """ + pass + + def set_from_serial_number(self, serial_number) -> None: + """ + Set the input as the camera with specified serial number. + :param camera_serial_number: Serial number of the camera to open + """ + pass + + def set_virtual_stereo_from_camera_id(self, id_left, id_right, virtual_serial_number) -> bool: + """ + Set the input as a virtual stereo camera from two cameras with specified ids. + :param id_left: Id of the left camera. + :param id_right: Id of the right camera. + :param virtual_serial_number: Serial number of the virtual stereo camera. + .. note:: + : The virtual serial number must fall within an interval that reflects the Product ID range. + + This is necessary to avoid, for instance, downloading calibration data from an unrelated product. + The valid range is 110000000 to 119999999. + A support function can be used, based on the ZED One serial number, to compute a valid virtual serial number: generate_virtual_stereo_serial_number + :return: False if there's no error and the camera was successfully created, otherwise True. + """ + return bool() + + def set_virtual_stereo_from_serial_numbers(self, camera_left_serial_number, camera_right_serial_number, virtual_serial_number) -> bool: + """ + Set the input as a virtual stereo camera from two cameras with specified serial numbers. + :param camera_left_serial_number: Serial number of the left camera. + :param camera_right_serial_number: Serial number of the right camera. + :param virtual_serial_number: Serial number of the virtual stereo camera. + .. note:: + : The virtual serial number must fall within an interval that reflects the Product ID range. + + This is necessary to avoid, for instance, downloading calibration data from an unrelated product. + The valid range is 110000000 to 119999999. + A support function can be used, based on the ZED One serial number, to compute a valid virtual serial number: generate_virtual_stereo_serial_number + :return: False if there's no error and the camera was successfully created, otherwise True. + """ + return bool() + + def set_from_svo_file(self, svo_input_filename) -> None: + """ + Set the input as the svo specified with the filename + :param svo_input_filename: The path to the desired SVO file + """ + pass + + def set_from_stream(self, sender_ip, port = 30000) -> None: + """ + Set the input to stream with the specified ip and port + :param sender_ip: The IP address of the streaming sender + :param port: The port on which to listen. Default: 30000 + .. note:: + The protocol used for the streaming module is based on RTP/RTCP. + + .. warning:: Port must be even number, since the port+1 is used for control data. + """ + pass + + def get_type(self) -> INPUT_TYPE: + """ + Returns the current input type. + """ + return INPUT_TYPE() + + def get_configuration(self) -> str: + """ + Returns the current input configuration as a string e.g: SVO name, serial number, streaming ip, etc. + """ + return str() + + def is_init(self) -> bool: + """ + Check whether the input is set. + """ + return bool() + + +class InitParameters: + """ + Class containing the options used to initialize the sl.Camera object. + + This class allows you to select multiple parameters for the sl.Camera such as the selected camera, resolution, depth mode, coordinate system, and units of measurement. + \n Once filled with the desired options, it should be passed to the sl.Camera.open() method. + + .. code-block:: text + + + import pyzed.sl as sl + + def main() : + zed = sl.Camera() # Create a ZED camera object + + init_params = sl.InitParameters() # Set initial parameters + init_params.sdk_verbose = 0 # Disable verbose mode + + # Use the camera in LIVE mode + init_params.camera_resolution = sl.RESOLUTION.HD1080 # Use HD1080 video mode + init_params.camera_fps = 30 # Set fps at 30 + + # Or use the camera in SVO (offline) mode + #init_params.set_from_svo_file("xxxx.svo") + + # Or use the camera in STREAM mode + #init_params.set_from_stream("192.168.1.12", 30000) + + # Other parameters are left to their default values + + # Open the camera + err = zed.open(init_params) + if err != sl.ERROR_CODE.SUCCESS: + exit(-1) + + # Close the camera + zed.close() + return 0 + + if __name__ == "__main__" : + main() + + + + With its default values, it opens the camera in live mode at sl.RESOLUTION.HD720 + (or sl.RESOLUTION.HD1200 for the ZED X/X Mini) and sets the depth mode to sl.DEPTH_MODE.NEURAL + \n You can customize it to fit your application. + .. note:: + The parameters can also be saved and reloaded using its save() and load() methods. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def enable_image_enhancement(self) -> bool: + """ + Enable the Enhanced Contrast Technology, to improve image quality. + + Default: True. + + \n If set to true, image enhancement will be activated in camera ISP. Otherwise, the image will not be enhanced by the IPS. + .. note:: + This only works for firmware version starting from 1523 and up. + """ + return bool() + + @enable_image_enhancement.setter + def enable_image_enhancement(self, enable_image_enhancement: Any) -> None: + pass + + @property + def camera_image_flip(self) -> FLIP_MODE: + """ + Defines if a flip of the images is needed. + + If you are using the camera upside down, setting this parameter to sl.FLIP_MODE.ON will cancel its rotation. + \n The images will be horizontally flipped. + \n Default: sl.FLIP_MODE.AUTO + .. note:: + From ZED SDK 3.2 a new sl.FLIP_MODE enum was introduced to add the automatic flip mode detection based on the IMU gravity detection. + + .. note:: + This does not work on sl.MODEL.ZED cameras since they do not have the necessary sensors. + """ + return FLIP_MODE() + + @camera_image_flip.setter + def camera_image_flip(self, camera_image_flip: Any) -> None: + pass + + @property + def maximum_working_resolution(self) -> Resolution: + """ + Set a maximum size for all SDK output, like retrieveImage and retrieveMeasure functions. + + This will override the default (0,0) and instead of outputting native image size sl::Mat, the ZED SDK will take this size as default. + A custom lower size can also be used at runtime, but not bigger. This is used for internal optimization of compute and memory allocations + + The default is similar to previous version with (0,0), meaning native image size + + .. note:: + : if maximum_working_resolution field are lower than 64, it will be interpreted as dividing scale factor; + + - maximum_working_resolution = sl::Resolution(1280, 16) -> 1280 x (image_height/2) = 1280 x half height + - maximum_working_resolution = sl::Resolution(4, 4) -> (image_width/4) x (image_height/4) = quarter size + """ + return Resolution() + + @maximum_working_resolution.setter + def maximum_working_resolution(self, maximum_working_resolution: Any) -> None: + pass + + @property + def sdk_gpu_id(self) -> int: + """ + NVIDIA graphics card id to use. + + By default the SDK will use the most powerful NVIDIA graphics card found. + \n However, when running several applications, or using several cameras at the same time, splitting the load over available GPUs can be useful. + \n This parameter allows you to select the GPU used by the sl.Camera using an ID from 0 to n-1 GPUs in your PC. + \n Default: -1 + .. note:: + A non-positive value will search for all CUDA capable devices and select the most powerful. + """ + return int() + + @sdk_gpu_id.setter + def sdk_gpu_id(self, sdk_gpu_id: Any) -> None: + pass + + @property + def optional_settings_path(self) -> str: + """ + Optional path where the ZED SDK has to search for the settings file (SN.conf file). + + This file contains the calibration information of the camera. + \n Default: "" + + .. note:: + The settings file will be searched in the default directory: + + * **Linux**: /usr/local/zed/settings/ + * **Windows**: C:/ProgramData/stereolabs/settings + + .. note:: + If a path is specified and no file has been found, the ZED SDK will search the settings file in the default directory. + + .. note:: + An automatic download of the settings file (through **ZED Explorer** or the installer) will still download the files on the default path. + + + .. code-block:: text + + init_params = sl.InitParameters() # Set initial parameters + home = "/path/to/home" + path = home + "/Documents/settings/" # assuming /path/to/home/Documents/settings/SNXXXX.conf exists. Otherwise, it will be searched in /usr/local/zed/settings/ + init_params.optional_settings_path = path + """ + return str() + + @optional_settings_path.setter + def optional_settings_path(self, optional_settings_path: Any) -> None: + pass + + @property + def coordinate_system(self) -> COORDINATE_SYSTEM: + """ + sl.COORDINATE_SYSTEM to be used as reference for positional tracking, mesh, point clouds, etc. + + This parameter allows you to select the sl.COORDINATE_SYSTEM used by the sl.Camera object to return its measures. + \n This defines the order and the direction of the axis of the coordinate system. + \n Default: sl.COORDINATE_SYSTEM.IMAGE + """ + return COORDINATE_SYSTEM() + + @coordinate_system.setter + def coordinate_system(self, coordinate_system: Any) -> None: + pass + + @property + def grab_compute_capping_fps(self) -> float: + """ + Define a computation upper limit to the grab frequency. + + This can be useful to get a known constant fixed rate or limit the computation load while keeping a short exposure time by setting a high camera capture framerate. + \n The value should be inferior to the sl.InitParameters.camera_fps and strictly positive. + .. note:: + It has no effect when reading an SVO file. + + + This is an upper limit and won't make a difference if the computation is slower than the desired compute capping FPS. + .. note:: + Internally the sl.Camera.grab() method always tries to get the latest available image while respecting the desired FPS as much as possible. + """ + return float() + + @grab_compute_capping_fps.setter + def grab_compute_capping_fps(self, grab_compute_capping_fps: Any) -> None: + pass + + @property + def async_grab_camera_recovery(self) -> bool: + """ + Define the behavior of the automatic camera recovery during sl.Camera.grab() method call. + + When async is enabled and there's an issue with the communication with the sl.Camera object, + sl.Camera.grab() will exit after a short period and return the sl.ERROR_CODE.CAMERA_REBOOTING warning. + \n The recovery will run in the background until the correct communication is restored. + \n When async_grab_camera_recovery is false, the sl.Camera.grab() method is blocking and will return + only once the camera communication is restored or the timeout is reached. + \n Default: False + """ + return bool() + + @async_grab_camera_recovery.setter + def async_grab_camera_recovery(self, async_grab_camera_recovery: Any) -> None: + pass + + @property + def enable_right_side_measure(self) -> bool: + """ + Enable the measurement computation on the right images. + + By default, the ZED SDK only computes a single depth map, aligned with the left camera image. + \n This parameter allows you to enable sl.MEASURE.DEPTH_RIGHT and other sl.MEASURE.XXX_RIGHT at the cost of additional computation time. + \n For example, mixed reality pass-through applications require one depth map per eye, so this parameter can be activated. + \n Default: False + """ + return bool() + + @enable_right_side_measure.setter + def enable_right_side_measure(self, enable_right_side_measure: Any) -> None: + pass + + @property + def svo_real_time_mode(self) -> bool: + """ + Defines if sl.Camera object return the frame in real time mode. + + When playing back an SVO file, each call to sl.Camera.grab() will extract a new frame and use it. + \n However, it ignores the real capture rate of the images saved in the SVO file. + \n Enabling this parameter will bring the SDK closer to a real simulation when playing back a file by using the images' timestamps. + \n Default: False + .. note:: + sl.Camera.grab() will return an error when trying to play too fast, and frames will be dropped when playing too slowly. + """ + return bool() + + @svo_real_time_mode.setter + def svo_real_time_mode(self, svo_real_time_mode: Any) -> None: + pass + + @property + def sdk_verbose_log_file(self) -> str: + """ + File path to store the ZED SDK logs (if sdk_verbose is enabled). + + The file will be created if it does not exist. + \n Default: "" + + .. note:: + Setting this parameter to any value will redirect all standard output print calls of the entire program. + + .. note:: + This means that your own standard output print calls will be redirected to the log file. + + .. warning:: The log file won't be cleared after successive executions of the application. + .. warning:: This means that it can grow indefinitely if not cleared. + """ + return str() + + @sdk_verbose_log_file.setter + def sdk_verbose_log_file(self, sdk_verbose_log_file: Any) -> None: + pass + + @property + def depth_minimum_distance(self) -> float: + """ + Minimum depth distance to be returned, measured in the sl.UNIT defined in coordinate_units. + + This parameter allows you to specify the minimum depth value (from the camera) that will be computed. + \n Setting this value to any negative or null value will select the default minimum depth distance available for the used ZED Camera (depending on the camera focal length and baseline). + \n Default: -1 + + \n When using deprecated depth modes ( sl.DEPTH_MODE.PERFORMANCE, sl.DEPTH_MODE.QUALITY or sl.DEPTH_MODE.ULTRA), + the default minimum depth distances are given by `this table `_. + + .. note:: + This value cannot be greater than 3 meters. + """ + return float() + + @depth_minimum_distance.setter + def depth_minimum_distance(self, depth_minimum_distance: Any) -> None: + pass + + @property + def coordinate_units(self) -> UNIT: + """ + Unit of spatial data (depth, point cloud, tracking, mesh, etc.) for retrieval. + + Default: sl.UNIT.MILLIMETER + """ + return UNIT() + + @coordinate_units.setter + def coordinate_units(self, coordinate_units: Any) -> None: + pass + + @property + def open_timeout_sec(self) -> float: + """ + Define a timeout in seconds after which an error is reported if the sl.Camera.open() method fails. + + Set to '-1' to try to open the camera endlessly without returning error in case of failure. + \n Set to '0' to return error in case of failure at the first attempt. + \n Default: 5.0 + .. note:: + This parameter only impacts the LIVE mode. + """ + return float() + + @open_timeout_sec.setter + def open_timeout_sec(self, open_timeout_sec: Any) -> None: + pass + + @property + def depth_stabilization(self) -> int: + return int() + + @depth_stabilization.setter + def depth_stabilization(self, depth_stabilization: Any) -> None: + pass + + @property + def depth_mode(self) -> DEPTH_MODE: + """ + sl.DEPTH_MODE to be used. + + The ZED SDK offers several sl.DEPTH_MODE, offering various levels of performance and accuracy. + \n This parameter allows you to set the sl.DEPTH_MODE that best matches your needs. + \n Default: sl.DEPTH_MODE.NEURAL + .. note:: + Available depth mode are listed here: sl.DEPTH_MODE. + """ + return DEPTH_MODE() + + @depth_mode.setter + def depth_mode(self, depth_mode: Any) -> None: + pass + + @property + def depth_maximum_distance(self) -> float: + """ + Maximum depth distance to be returned, measured in the sl.UNIT defined in coordinate_units. + + When estimating the depth, the ZED SDK uses this upper limit to turn higher values into **inf** ones. + \n Changing this value has no impact on performance and doesn't affect the positional tracking nor the spatial mapping. + \n It only change values the depth, point cloud and normals. + \n Setting this value to any negative or null value will select the default maximum depth distance available. + + \n Default: -1 + """ + return float() + + @depth_maximum_distance.setter + def depth_maximum_distance(self, depth_maximum_distance: Any) -> None: + pass + + @property + def enable_image_validity_check(self) -> int: + """ + Enable or disable the image validity verification. + This will perform additional verification on the image to identify corrupted data. This verification is done in the sl.Camera.grab() method and requires some computations. + \n If an issue is found, the sl.Camera.grab() method will output a warning as sl.ERROR_CODE.CORRUPTED_FRAME. + \n This version doesn't detect frame tearing currently. + \n Default: False (disabled) + """ + return int() + + @enable_image_validity_check.setter + def enable_image_validity_check(self, enable_image_validity_check: Any) -> None: + pass + + @property + def async_image_retrieval(self) -> bool: + """ + Enable async image retrieval. + + If set to true will camera image retrieve at a framerate different from grab() application framerate. This is useful for recording SVO or sending camera stream at different rate than application. + \n Default: false + """ + return bool() + + @async_image_retrieval.setter + def async_image_retrieval(self, async_image_retrieval: Any) -> None: + pass + + @property + def sensors_required(self) -> bool: + """ + Requires the successful opening of the motion sensors before opening the camera. + + Default: False. + + .. note:: + If set to false, the ZED SDK will try to **open and use** the IMU (second USB device on USB2.0) and will open the camera successfully even if the sensors failed to open. + + + This can be used for example when using a USB3.0 only extension cable (some fiber extension for example). + .. note:: + This parameter only impacts the LIVE mode. + + .. note:: + If set to true, sl.Camera.open() will fail if the sensors cannot be opened. + + .. note:: + This parameter should be used when the IMU data must be available, such as object detection module or when the gravity is needed. + + + \nNote: This setting is not taken into account for sl.MODEL.ZED camera since it does not include sensors. + """ + return bool() + + @sensors_required.setter + def sensors_required(self, sensors_required: Any) -> None: + pass + + @property + def camera_fps(self) -> int: + """ + Requested camera frame rate. + + If set to 0, the highest FPS of the specified camera_resolution will be used. + \n Default: 0 + \n\n See sl.RESOLUTION for a list of supported frame rates. + .. note:: + If the requested camera_fps is unsupported, the closest available FPS will be used. + """ + return int() + + @camera_fps.setter + def camera_fps(self, camera_fps: Any) -> None: + pass + + @property + def optional_opencv_calibration_file(self) -> str: + """ + Optional path where the ZED SDK can find a file containing the calibration information of the camera computed by OpenCV. + + .. note:: + Using this will disable the factory calibration of the camera. + + .. note:: + The file must be in a XML/YAML/JSON formatting provided by OpenCV. + + .. note:: + It also must contain the following keys: Size, K_LEFT (intrinsic left), K_RIGHT (intrinsic right), + + D_LEFT (distortion left), D_RIGHT (distortion right), R (extrinsic rotation), T (extrinsic translation). + .. warning:: Erroneous calibration values can lead to poor accuracy in all ZED SDK modules. + """ + return str() + + @optional_opencv_calibration_file.setter + def optional_opencv_calibration_file(self, optional_opencv_calibration_file: Any) -> None: + pass + + @property + def camera_resolution(self) -> RESOLUTION: + """ + Desired camera resolution. + .. note:: + Small resolutions offer higher framerate and lower computation time. + + .. note:: + In most situations, sl.RESOLUTION.HD720 at 60 FPS is the best balance between image quality and framerate. + + + Default: + * ZED X/X Mini: sl.RESOLUTION.HD1200 + * other cameras: sl.RESOLUTION.HD720 + .. note:: + Available resolutions are listed here: sl.RESOLUTION. + """ + return RESOLUTION() + + @camera_resolution.setter + def camera_resolution(self, camera_resolution: Any) -> None: + pass + + @property + def camera_disable_self_calib(self) -> bool: + """ + Disables the self-calibration process at camera opening. + + At initialization, sl.Camera runs a self-calibration process that corrects small offsets from the device's factory calibration. + \n A drawback is that calibration parameters will slightly change from one (live) run to another, which can be an issue for repeatability. + \n If set to true, self-calibration will be disabled and calibration parameters won't be optimized, raw calibration parameters from the configuration file will be used. + \n Default: false + .. note:: + In most situations, self calibration should remain enabled. + + .. note:: + You can also trigger the self-calibration at anytime after sl.Camera.open() by calling sl.Camera.update_self_calibration(), even if this parameter is set to true. + """ + return bool() + + @camera_disable_self_calib.setter + def camera_disable_self_calib(self, camera_disable_self_calib: Any) -> None: + pass + + @property + def sdk_verbose(self) -> int: + """ + Enable the ZED SDK verbose mode. + + This parameter allows you to enable the verbosity of the ZED SDK to get a variety of runtime information in the console. + \n When developing an application, enabling verbose (``sdk_verbose >= 1``) mode can help you understand the current ZED SDK behavior. + \n However, this might not be desirable in a shipped version. + \n Default: 1 (verbose messages enabled) + .. note:: + The verbose messages can also be exported into a log file. + + .. note:: + See sdk_verbose_log_file for more. + """ + return int() + + @sdk_verbose.setter + def sdk_verbose(self, sdk_verbose: Any) -> None: + pass + + def __dealloc__(self) -> None: + """ + Default constructor. + + All the parameters are set to their default and optimized values. + :param camera_resolution: Chosen camera_resolution + :param camera_fps: Chosen camera_fps + :param svo_real_time_mode: Activates svo_real_time_mode + :param depth_mode: Chosen depth_mode + :param coordinate_units: Chosen coordinate_units + :param coordinate_system: Chosen coordinate_system + :param sdk_verbose: Sets sdk_verbose + :param sdk_gpu_id: Chosen sdk_gpu_id + :param depth_minimum_distance: Chosen depth_minimum_distance + :param depth_maximum_distance: Chosen depth_maximum_distance + :param camera_disable_self_calib: Activates camera_disable_self_calib + :param camera_image_flip: Sets camera_image_flip + :param enable_right_side_measure: Activates enable_right_side_measure + :param sdk_verbose_log_file: Chosen sdk_verbose_log_file + :param depth_stabilization: Activates depth_stabilization + :param input_t: Chosen input_t (InputType ) + :param optional_settings_path: Chosen optional_settings_path + :param sensors_required: Activates sensors_required + :param enable_image_enhancement: Activates enable_image_enhancement + :param optional_opencv_calibration_file: Sets optional_opencv_calibration_file + :param open_timeout_sec: Sets open_timeout_sec + :param async_grab_camera_recovery: Sets async_grab_camera_recovery + :param grab_compute_capping_fps: Sets grab_compute_capping_fps + :param enable_image_validity_check: Sets enable_image_validity_check + :param maximum_working_resolution: Sets maximum_working_resolution + + .. code-block:: text + + params = sl.InitParameters(camera_resolution=sl.RESOLUTION.HD720, camera_fps=30, depth_mode=sl.DEPTH_MODE.NEURAL) + """ + pass + + def save(self, filename) -> bool: + """ + Saves the current set of parameters into a file to be reloaded with the load() method. + :param filename: Name of the file which will be created to store the parameters (extension '.yml' will be added if not set). + :return: True if file was successfully saved, otherwise False. + .. warning:: For security reason, the file must not exist. + .. warning:: In case a file already exists, the method will return False and existing file will not be updated + + .. code-block:: text + + init_params = sl.InitParameters() # Set initial parameters + init_params.sdk_verbose = 1 # Enable verbose mode + init_params.set_from_svo_file("/path/to/file.svo") # Selects the and SVO file to be read + init_params.save("initParameters.conf") # Export the parameters into a file + """ + return bool() + + def load(self, filename) -> bool: + """ + Loads a set of parameters from the values contained in a previously save() "saved" file. + :param filename: Path to the file from which the parameters will be loaded (extension '.yml' will be added at the end of the filename if not set). + :return: True if the file was successfully loaded, otherwise false. + + .. code-block:: text + + init_params = sl.InitParameters() # Set initial parameters + init_params.load("initParameters.conf") # Load the init_params from a previously exported file + """ + return bool() + + def input(self, input_t) -> None: + """ + The SDK can handle different input types: + - Select a camera by its ID (/dev/videoX on Linux, and 0 to N cameras connected on Windows) + - Select a camera by its serial number + - Open a recorded sequence in the SVO file format + - Open a streaming camera from its IP address and port + + This parameter allows you to select to desired input. It should be used like this: + .. code-block:: text + + init_params = sl.InitParameters() # Set initial parameters + init_params.sdk_verbose = 1 # Enable verbose mode + input_t = sl.InputType() + input_t.set_from_camera_id(0) # Selects the camera with ID = 0 + init_params.input = input_t + init_params.set_from_camera_id(0) # You can also use this + + + .. code-block:: text + + init_params = sl.InitParameters() # Set initial parameters + init_params.sdk_verbose = 1 # Enable verbose mode + input_t = sl.InputType() + input_t.set_from_serial_number(1010) # Selects the camera with serial number = 101 + init_params.input = input_t + init_params.set_from_serial_number(1010) # You can also use this + + + .. code-block:: text + + init_params = sl.InitParameters() # Set initial parameters + init_params.sdk_verbose = 1 # Enable verbose mode + input_t = sl.InputType() + input_t.set_from_svo_file("/path/to/file.svo") # Selects the and SVO file to be read + init_params.input = input_t + init_params.set_from_svo_file("/path/to/file.svo") # You can also use this + + + .. code-block:: text + + init_params = sl.InitParameters() # Set initial parameters + init_params.sdk_verbose = 1 # Enable verbose mode + input_t = sl.InputType() + input_t.set_from_stream("192.168.1.42") + init_params.input = input_t + init_params.set_from_stream("192.168.1.42") # You can also use this + + + Available cameras and their ID/serial can be listed using get_device_list() and get_streaming_device_list() + Each Camera will create its own memory (CPU and GPU), therefore the number of ZED used at the same time can be limited by the configuration of your computer. (GPU/CPU memory and capabilities) + + default : empty + See InputType for complementary information. + + .. warning:: Using the ZED SDK Python API, using init_params.input.set_from_XXX won't work, use init_params.set_from_XXX instead + @property + def input(self) -> InputType: + input_t = InputType() + input_t.input = self.init.input + return input_t + """ + pass + + def set_from_camera_id(self, cam_id, bus_type : BUS_TYPE = BUS_TYPE.AUTO) -> None: + """ + Defines the input source with a camera id to initialize and open an sl.Camera object from. + :param id: Id of the desired camera to open. + :param bus_type: sl.BUS_TYPE of the desired camera to open. + """ + pass + + def set_from_serial_number(self, serial_number) -> None: + """ + Defines the input source with a serial number to initialize and open an sl.Camera object from. + :param serial_number: Serial number of the desired camera to open. + """ + pass + + def set_from_svo_file(self, svo_input_filename) -> None: + """ + Defines the input source with an SVO file to initialize and open an sl.Camera object from. + :param svo_input_filename: Path to the desired SVO file to open. + """ + pass + + def set_from_stream(self, sender_ip, port = 30000) -> None: + """ + Defines the input source from a stream to initialize and open an sl.Camera object from. + :param sender_ip: IP address of the streaming sender. + :param port: Port on which to listen. Default: 30000 + """ + pass + + +class RuntimeParameters: + """ + Class containing parameters that defines the behavior of sl.Camera.grab(). + The default constructor sets all parameters to their default settings. + .. note:: + Parameters can be adjusted by the user. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def texture_confidence_threshold(self) -> int: + """ + Threshold to reject depth values based on their texture confidence. + + The texture confidence range is [1, 100]. + \n Decreasing this value will remove depth data from image areas which are uniform. + \n Default: 100 (no depth pixel will be rejected) + .. note:: + Pixels with a value close to 100 are not to be trusted. Accurate depth pixels tends to be closer to lower values. + """ + return int() + + @texture_confidence_threshold.setter + def texture_confidence_threshold(self, texture_confidence_threshold: Any) -> None: + pass + + @property + def measure3D_reference_frame(self) -> REFERENCE_FRAME: + """ + Reference frame in which to provides the 3D measures (point cloud, normals, etc.). + + Default: sl.REFERENCE_FRAME.CAMERA + """ + return REFERENCE_FRAME() + + @measure3D_reference_frame.setter + def measure3D_reference_frame(self, measure3D_reference_frame: Any) -> None: + pass + + @property + def confidence_threshold(self) -> int: + """ + Threshold to reject depth values based on their confidence. + + Each depth pixel has a corresponding confidence (sl.MEASURE.CONFIDENCE) in the range [1, 100]. + \n Decreasing this value will remove depth data from both objects edges and low textured areas, to keep only confident depth estimation data. + \n Default: 95 (no depth pixel will be rejected) + .. note:: + Pixels with a value close to 100 are not to be trusted. Accurate depth pixels tends to be closer to lower values. + + .. note:: + It can be seen as a probability of error, scaled to 100. + """ + return int() + + @confidence_threshold.setter + def confidence_threshold(self, confidence_threshold: Any) -> None: + pass + + @property + def enable_fill_mode(self) -> bool: + """ + Defines if the depth map should be completed or not. + + Default: False + .. note:: + It is similar to the removed sl.SENSING_MODE.FILL. + + .. warning:: Enabling this will override the confidence values confidence_threshold and texture_confidence_threshold as well as remove_saturated_areas. + """ + return bool() + + @enable_fill_mode.setter + def enable_fill_mode(self, enable_fill_mode: Any) -> None: + pass + + @property + def enable_depth(self) -> bool: + """ + Defines if the depth map should be computed. + + Default: True + .. note:: + If set to False, only the images are available. + """ + return bool() + + @enable_depth.setter + def enable_depth(self, enable_depth: Any) -> None: + pass + + @property + def remove_saturated_areas(self) -> bool: + """ + Defines if the saturated area (luminance>=255) must be removed from depth map estimation. + + Default: True + .. note:: + It is recommended to keep this parameter at True because saturated area can create false detection. + """ + return bool() + + @remove_saturated_areas.setter + def remove_saturated_areas(self, remove_saturated_areas: Any) -> None: + pass + + def __dealloc__(self) -> None: + """ + Default constructor. + + All the parameters are set to their default values. + :param enable_depth: Activates enable_depth + :param enable_fill_mode: Activates enable_fill_mode + :param confidence_threshold: Chosen confidence_threshold + :param texture_confidence_threshold: Chosen texture_confidence_threshold + :param measure3D_reference_frame: Chosen measure3D_reference_frame + :param remove_saturated_areas: Activates remove_saturated_areas + """ + pass + + def save(self, filename: str) -> bool: + """ + Saves the current set of parameters into a file to be reloaded with the load() method. + :param filename: Name of the file which will be created to store the parameters (extension '.yml' will be added if not set). + :return: True if the file was successfully saved, otherwise False. + .. warning:: For security reasons, the file must not already exist. + .. warning:: In case a file already exists, the method will return False and existing file will not be updated. + """ + return bool() + + def load(self, filename: str) -> bool: + """ + Loads a set of parameters from the values contained in a previously save() "saved" file. + :param filename: Path to the file from which the parameters will be loaded (extension '.yml' will be added at the end of the filename if not detected). + :return: True if the file was successfully loaded, otherwise False. + """ + return bool() + + +class PositionalTrackingParameters: + """ + Class containing a set of parameters for the positional tracking module initialization. + + The default constructor sets all parameters to their default settings. + .. note:: + Parameters can be adjusted by the user. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def enable_2d_ground_mode(self) -> bool: + """ + Whether to enable 2D localization mode + """ + return bool() + + @enable_2d_ground_mode.setter + def enable_2d_ground_mode(self, enable_2d_ground_mode: Any) -> None: + pass + + @property + def set_gravity_as_origin(self) -> bool: + """ + Whether to override 2 of the 3 rotations from initial_world_transform using the IMU gravity. + Default: True + .. note:: + This parameter does nothing on sl.ZED.MODEL since it does not have an IMU. + """ + return bool() + + @set_gravity_as_origin.setter + def set_gravity_as_origin(self, set_gravity_as_origin: Any) -> None: + pass + + @property + def enable_area_memory(self) -> bool: + """ + Whether the camera can remember its surroundings. + This helps correct positional tracking drift and can be helpful for positioning different cameras relative to one other in space. + \n Default: true + + .. warning:: This mode requires more resources to run, but greatly improves tracking accuracy. + .. warning:: We recommend leaving it on by default. + """ + return bool() + + @enable_area_memory.setter + def enable_area_memory(self, enable_area_memory: Any) -> None: + pass + + @property + def area_file_path(self) -> str: + """ + Path of an area localization file that describes the surroundings (saved from a previous tracking session). + Default: (empty) + .. note:: + Loading an area file will start a search phase, during which the camera will try to position itself in the previously learned area. + + .. warning:: The area file describes a specific location. If you are using an area file describing a different location, the tracking function will continuously search for a position and may not find a correct one. + .. warning:: The '.area' file can only be used with the same depth mode (sl.DEPTH_MODE) as the one used during area recording. + """ + return str() + + @area_file_path.setter + def area_file_path(self, area_file_path: Any) -> None: + pass + + @property + def mode(self) -> POSITIONAL_TRACKING_MODE: + """ + Positional tracking mode used. + Can be used to improve accuracy in some types of scene at the cost of longer runtime. + \n Default: sl.POSITIONAL_TRACKING_MODE.GEN_1 + """ + return POSITIONAL_TRACKING_MODE() + + @mode.setter + def mode(self, mode: Any) -> None: + pass + + @property + def set_floor_as_origin(self) -> bool: + """ + Initializes the tracking to be aligned with the floor plane to better position the camera in space. + Default: False + .. note:: + This launches floor plane detection in the background until a suitable floor plane is found. + + .. note:: + The tracking will start in sl.POSITIONAL_TRACKING_STATE.SEARCHING state. + + .. warning:: This features does not work with sl.MODEL.ZED since it needs an IMU to classify the floor. + .. warning:: The camera needs to look at the floor during initialization for optimum results. + """ + return bool() + + @set_floor_as_origin.setter + def set_floor_as_origin(self, set_floor_as_origin: Any) -> None: + pass + + @property + def set_as_static(self) -> bool: + """ + Whether to define the camera as static. + If true, it will not move in the environment. This allows you to set its position using initial_world_transform. + \n All ZED SDK functionalities requiring positional tracking will be enabled without additional computation. + \n sl.Camera.get_position() will return the value set as initial_world_transform. + Default: False + """ + return bool() + + @set_as_static.setter + def set_as_static(self, set_as_static: Any) -> None: + pass + + @property + def enable_imu_fusion(self) -> bool: + """ + Whether to enable the IMU fusion. + When set to False, only the optical odometry will be used. + \n Default: True + .. note:: + This setting has no impact on the tracking of a camera. + + .. note:: + sl.MODEL.ZED does not have an IMU. + """ + return bool() + + @enable_imu_fusion.setter + def enable_imu_fusion(self, enable_imu_fusion: Any) -> None: + pass + + @property + def enable_localization_only(self) -> bool: + """ + Whether to enable the area mode in localize only mode. + """ + return bool() + + @enable_localization_only.setter + def enable_localization_only(self, enable_localization_only: Any) -> None: + pass + + @property + def depth_min_range(self) -> float: + """ + Minimum depth used by the ZED SDK for positional tracking. + It may be useful for example if any steady objects are in front of the camera and may perturb the positional tracking algorithm. + \n Default: -1 (no minimum depth) + """ + return float() + + @depth_min_range.setter + def depth_min_range(self, depth_min_range: Any) -> None: + pass + + @property + def enable_pose_smoothing(self) -> bool: + """ + Whether to enable smooth pose correction for small drift correction. + Default: False + """ + return bool() + + @enable_pose_smoothing.setter + def enable_pose_smoothing(self, enable_pose_smoothing: Any) -> None: + pass + + def __dealloc__(self) -> None: + """ + Default constructor. + :param _init_pos: Chosen initial camera position in the world frame (Transform) + :param _enable_memory: Activates enable_memory + :param _enable_pose_smoothing: Activates enable_pose_smoothing + :param _area_path: Chosen area_path + :param _set_floor_as_origin: Activates set_floor_as_origin + :param _enable_imu_fusion: Activates enable_imu_fusion + :param _set_as_static: Activates set_as_static + :param _depth_min_range: Activates depth_min_range + :param _set_gravity_as_origin: Activates set_gravity_as_origin + :param _mode: Chosen mode + + .. code-block:: text + + params = sl.PositionalTrackingParameters(init_pos=sl.Transform(), _enable_pose_smoothing=True) + """ + pass + + def save(self, filename: str) -> bool: + """ + Saves the current set of parameters into a file to be reloaded with the load() method. + :param filename: Name of the file which will be created to store the parameters. + :return: True if the file was successfully saved, otherwise False. + .. warning:: For security reasons, the file must not already exist. + .. warning:: In case a file already exists, the method will return False and existing file will not be updated. + """ + return bool() + + def load(self, filename: str) -> bool: + """ + Loads a set of parameters from the values contained in a previously save() "saved" file. + :param filename: Path to the file from which the parameters will be loaded. + :return: True if the file was successfully loaded, otherwise False. + """ + return bool() + + def initial_world_transform(self, init_pos = Transform()) -> Transform: + """ + Position of the camera in the world frame when the camera is started. + Use this sl.Transform to place the camera frame in the world frame. + \n Default: Identity matrix. + + .. note:: + The camera frame (which defines the reference frame for the camera) is by default positioned at the world frame when tracking is started. + """ + return Transform() + + def set_initial_world_transform(self, value: Transform) -> None: + """ + Set the position of the camera in the world frame when the camera is started. + :param value: Position of the camera in the world frame when the camera will start. + """ + pass + + +class STREAMING_CODEC(enum.Enum): + """ + Lists the different encoding types for image streaming. + + | Enumerator | | + |:---:|:---:| + | H264 | AVCHD/H264 encoding | + | H265 | HEVC/H265 encoding | + """ + H264 = enum.auto() + H265 = enum.auto() + LAST = enum.auto() + +class StreamingProperties: + """ + Class containing information about the properties of a streaming device. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def serial_number(self) -> int: + """ + Serial number of the streaming camera. + + Default: 0 + """ + return int() + + @serial_number.setter + def serial_number(self, serial_number: Any) -> None: + pass + + @property + def ip(self) -> str: + """ + IP address of the streaming device. + + Default: "" + """ + return str() + + @ip.setter + def ip(self, ip: Any) -> None: + pass + + @property + def port(self) -> int: + """ + Streaming port of the streaming device. + + Default: 0 + """ + return int() + + @port.setter + def port(self, port: Any) -> None: + pass + + @property + def codec(self) -> STREAMING_CODEC: + """ + Current codec used for compression in streaming device. + + Default: sl.STREAMING_CODEC.H265 + """ + return STREAMING_CODEC() + + @codec.setter + def codec(self, codec: Any) -> None: + pass + + @property + def current_bitrate(self) -> int: + """ + Current bitrate of encoding of the streaming device. + + Default: 0 + """ + return int() + + @current_bitrate.setter + def current_bitrate(self, current_bitrate: Any) -> None: + pass + + +class StreamingParameters: + """ + Class containing the options used to stream with the ZED SDK. + + The default constructor sets all parameters to their default settings. + .. note:: + Parameters can be adjusted by the user. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def gop_size(self) -> int: + """ + GOP size in number of frames. + + Default: -1 (the GOP size will last at maximum 2 seconds, depending on camera FPS) + .. note:: + The GOP size determines the maximum distance between IDR/I-frames. Very high GOP size will result in slightly more efficient compression, especially on static scenes. But latency will increase. + + .. note:: + Maximum value: 256 + """ + return int() + + @gop_size.setter + def gop_size(self, gop_size: Any) -> None: + pass + + @property + def adaptative_bitrate(self) -> bool: + """ + Defines whether the adaptive bitrate is enable. + + Default: False + .. note:: + Bitrate will be adjusted depending the number of packet dropped during streaming. + + .. note:: + If activated, the bitrate can vary between [bitrate/4, bitrate]. + + .. warning:: Currently, the adaptive bitrate only works when "sending" device is a NVIDIA Jetson (X1, X2, Xavier, Nano). + """ + return bool() + + @adaptative_bitrate.setter + def adaptative_bitrate(self, adaptative_bitrate: Any) -> None: + pass + + @property + def target_framerate(self) -> int: + """ + Framerate for the streaming output. + + Default: 0 (camera framerate will be taken) + .. warning:: This framerate must be below or equal to the camera framerate. + .. warning:: Allowed framerates are 15, 30, 60 or 100 if possible. + .. warning:: Any other values will be discarded and camera FPS will be taken. + """ + return int() + + @target_framerate.setter + def target_framerate(self, target_framerate: Any) -> None: + pass + + @property + def chunk_size(self) -> int: + """ + Size of a single chunk. + + Default: 16084 + .. note:: + Stream buffers are divided into X number of chunks where each chunk is chunk_size bytes long. + + .. note:: + You can lower chunk_size value if network generates a lot of packet lost: this will + + generates more chunk for a single image, but each chunk sent will be lighter to avoid inside-chunk corruption. + .. note:: + Increasing this value can decrease latency. + + + \n Note: Available range: [1024 - 65000] + """ + return int() + + @chunk_size.setter + def chunk_size(self, chunk_size: Any) -> None: + pass + + @property + def port(self) -> int: + """ + Port used for streaming. + .. warning:: Port must be an even number. Any odd number will be rejected. + .. warning:: Port must be opened. + """ + return int() + + @port.setter + def port(self, port: Any) -> None: + pass + + @property + def codec(self) -> STREAMING_CODEC: + """ + Encoding used for streaming. + """ + return STREAMING_CODEC() + + @codec.setter + def codec(self, codec: Any) -> None: + pass + + @property + def bitrate(self) -> int: + """ + Defines the streaming bitrate in Kbits/s + | STREAMING_CODEC | RESOLUTION | FPS | Bitrate (kbps) | + |:---:|:---:|:---:|:---:| + | H264 | HD2K | 15 | 8500 | + | H264 | HD1080 | 30 | 12500 | + | H264 | HD720 | 60 | 7000 | + | H265 | HD2K | 15 | 7000 | + | H265 | HD1080 | 30 | 11000 | + | H265 | HD720 | 60 | 6000 | + + Default: 0 (it will be set to the best value depending on your resolution/FPS) + .. note:: + Available range: [1000 - 60000] + """ + return int() + + @bitrate.setter + def bitrate(self, bitrate: Any) -> None: + pass + + def __dealloc__(self) -> None: + """ + Default constructor. + + All the parameters are set to their default values. + :param codec: Chosen codec + :param port: Chosen port + :param bitrate: Chosen bitrate + :param gop_size: Chosen gop_size + :param adaptative_bitrate: Activtates adaptative_bitrate + :param chunk_size: Chosen chunk_size + :param target_framerate: Chosen target_framerate + + .. code-block:: text + + params = sl.StreamingParameters(port=30000) + """ + pass + + +class RecordingParameters: + """ + Class containing the options used to record. + + The default constructor sets all parameters to their default settings. + .. note:: + Parameters can be adjusted by the user. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def transcode_streaming_input(self) -> bool: + """ + Defines whether to decode and re-encode a streaming source. + + Default: False + .. note:: + If set to False, it will avoid decoding/re-encoding and convert directly streaming input into a SVO file. + + .. note:: + This saves a encoding session and can be especially useful on NVIDIA Geforce cards where the number of encoding session is limited. + + .. note:: + compression_mode, target_framerate and bitrate will be ignored in this mode. + """ + return bool() + + @transcode_streaming_input.setter + def transcode_streaming_input(self, transcode_streaming_input: Any) -> None: + pass + + @property + def target_framerate(self) -> int: + """ + Framerate for the recording file. + + Default: 0 (camera framerate will be taken) + .. warning:: This framerate must be below or equal to the camera framerate and camera framerate must be a multiple of the target framerate. + .. warning:: It means that it must respect `` camera_framerate%target_framerate == 0``. + .. warning:: Allowed framerates are 15,30, 60 or 100 if possible. + .. warning:: Any other values will be discarded and camera FPS will be taken. + """ + return int() + + @target_framerate.setter + def target_framerate(self, target_framerate: Any) -> None: + pass + + @property + def compression_mode(self) -> SVO_COMPRESSION_MODE: + """ + Compression mode the recording. + + Default: sl.SVO_COMPRESSION_MODE.H264 + """ + return SVO_COMPRESSION_MODE() + + @compression_mode.setter + def compression_mode(self, compression_mode: Any) -> None: + pass + + @property + def bitrate(self) -> int: + """ + Overrides the default bitrate of the SVO file, in kbits/s. + + Default: 0 (the default values associated with the resolution) + .. note:: + Only works if compression_mode is H264 or H265. + + .. note:: + Available range: 0 or [1000 - 60000] + """ + return int() + + @bitrate.setter + def bitrate(self, bitrate: Any) -> None: + pass + + @property + def video_filename(self) -> str: + """ + Filename of the file to save the recording into. + """ + return str() + + @video_filename.setter + def video_filename(self, video_filename: Any) -> None: + pass + + def __dealloc__(self) -> None: + """ + Default constructor. + + All the parameters are set to their default values. + :param video_filename: Chosen video_filename + :param compression_mode: Chosen compression_mode + :param target_framerate: Chosen target_framerate + :param bitrate: Chosen bitrate + :param transcode_streaming_input: Enables transcode_streaming_input + + .. code-block:: text + + params = sl.RecordingParameters(video_filename="record.svo",compression_mode=SVO_COMPRESSION_MODE.H264) + """ + pass + + +class SpatialMappingParameters: + """ + Class containing a set of parameters for the spatial mapping module. + + The default constructor sets all parameters to their default settings. + .. note:: + Parameters can be adjusted by the user. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def save_texture(self) -> bool: + """ + Whether to save the texture. + If set to true, you will be able to apply the texture to your mesh after it is created. + \n Default: False + .. note:: + This option will consume more memory. + + .. note:: + This option is only available for sl.SPATIAL_MAP_TYPE.MESH. + """ + return bool() + + @save_texture.setter + def save_texture(self, save_texture: Any) -> None: + pass + + @property + def range_meter(self) -> float: + """ + Depth range in meters. + Can be different from the value set by sl.InitParameters.depth_maximum_distance. + .. note:: + Set to 0 by default. In this case, the range is computed from resolution_meter + + and from the current internal parameters to fit your application. + """ + return float() + + @range_meter.setter + def range_meter(self, range_meter: Any) -> None: + pass + + @property + def map_type(self) -> SPATIAL_MAP_TYPE: + """ + The type of spatial map to be created. + This dictates the format that will be used for the mapping (e.g. mesh, point cloud). + \n See sl.SPATIAL_MAP_TYPE. + """ + return SPATIAL_MAP_TYPE() + + @map_type.setter + def map_type(self, map_type: Any) -> None: + pass + + @property + def use_chunk_only(self) -> bool: + """ + Whether to only use chunks. + If set to False, you will ensure consistency between the mesh and its inner chunk data. + \n Default: False + .. note:: + Updating the mesh is time-consuming. + + .. note:: + Setting this to True results in better performance. + """ + return bool() + + @use_chunk_only.setter + def use_chunk_only(self, use_chunk_only: Any) -> None: + pass + + @property + def reverse_vertex_order(self) -> bool: + """ + Whether to inverse the order of the vertices of the triangles. + If your display process does not handle front and back face culling, you can use this to correct it. + \n Default: False + .. note:: + This option is only available for sl.SPATIAL_MAP_TYPE.MESH. + """ + return bool() + + @reverse_vertex_order.setter + def reverse_vertex_order(self, reverse_vertex_order: Any) -> None: + pass + + @property + def stability_counter(self) -> int: + """ + Control the integration rate of the current depth into the mapping process. + This parameter controls how many times a stable 3D points should be seen before it is integrated into the spatial mapping. + \n Default: 0 (this will define the stability counter based on the mesh resolution, the higher the resolution, the higher the stability counter) + """ + return int() + + @stability_counter.setter + def stability_counter(self, stability_counter: Any) -> None: + pass + + @property + def resolution_meter(self) -> float: + """ + Spatial mapping resolution in meters. + Default: 0.05 + .. note:: + It should fit allowed_resolution. + """ + return float() + + @resolution_meter.setter + def resolution_meter(self, resolution_meter: Any) -> None: + pass + + @property + def max_memory_usage(self) -> int: + """ + The maximum CPU memory (in MB) allocated for the meshing process. + Default: 2048 + """ + return int() + + @max_memory_usage.setter + def max_memory_usage(self, max_memory_usage: Any) -> None: + pass + + def __dealloc__(self) -> None: + """ + Default constructor. + Sets all parameters to their default and optimized values. + :param resolution: Chosen MAPPING_RESOLUTION + :param mapping_range: Chosen MAPPING_RANGE + :param max_memory_usage: Chosen max_memory_usage + :param save_texture: Activates save_texture + :param use_chunk_only: Activates use_chunk_only + :param reverse_vertex_order: Activates reverse_vertex_order + :param map_type: Chosen map_type + + .. code-block:: text + + params = sl.SpatialMappingParameters(resolution=sl.MAPPING_RESOLUTION.HIGH) + """ + pass + + def set_resolution(self, resolution = MAPPING_RESOLUTION.HIGH) -> None: + """ + Sets the resolution to a sl.MAPPING_RESOLUTION preset. + :param resolution: The desired sl.MAPPING_RESOLUTION. Default: sl.MAPPING_RESOLUTION.HIGH + """ + pass + + def set_range(self, mapping_range = MAPPING_RANGE.AUTO) -> None: + """ + Sets the range to a sl.MAPPING_RANGE preset. + :param mapping_range: The desired sl.MAPPING_RANGE. Default: [sl.MAPPING_RANGE::AUTO](MAPPING_RANGE) + """ + pass + + def get_range_preset(self, mapping_range = MAPPING_RANGE.AUTO) -> float: + """ + Returns the value corresponding to a sl.MAPPING_RANGE preset in meters. + :param mapping_range: The desired sl.MAPPING_RANGE. Default: [sl.MAPPING_RANGE::AUTO](MAPPING_RANGE) + :return: The value of **mapping_range in meters. + """ + return float() + + def get_resolution_preset(self, resolution = MAPPING_RESOLUTION.HIGH) -> float: + """ + Returns the value corresponding to a sl.MAPPING_RESOLUTION preset in meters. + :param resolution: The desired sl.MAPPING_RESOLUTION. Default: sl.MAPPING_RESOLUTION.HIGH + :return: The value of **resolution in meters. + """ + return float() + + def get_recommended_range(self, resolution, py_cam: Camera) -> float: + """ + Returns the recommended maximum depth value corresponding to a resolution. + :param resolution: The desired resolution, either defined by a sl.MAPPING_RESOLUTION preset or a resolution value in meters. + :param py_cam: The sl.Camera object which will run the spatial mapping. + :return: The maximum value of depth in meters. + """ + return float() + + def allowed_range(self) -> np.array[float]: + """ + The maximum depth allowed by spatial mapping: + - **allowed_range.first is the minimum value allowed + - **allowed_range.second is the maximum value allowed + """ + return np.array[float]() + + def allowed_resolution(self) -> np.array[float]: + """ + The resolution allowed by the spatial mapping: + - **allowed_resolution.first is the minimum value allowed + - **allowed_resolution.second is the maximum value allowed + """ + return np.array[float]() + + def save(self, filename: str) -> bool: + """ + Saves the current set of parameters into a file to be reloaded with the load() method. + :param filename: Name of the file which will be created to store the parameters (extension '.yml' will be added if not set). + :return: True if the file was successfully saved, otherwise False. + .. warning:: For security reasons, the file must not already exist. + .. warning:: In case a file already exists, the method will return False and existing file will not be updated. + """ + return bool() + + def load(self, filename: str) -> bool: + """ + Loads a set of parameters from the values contained in a previously save() "saved" file. + :param filename: Path to the file from which the parameters will be loaded (extension '.yml' will be added at the end of the filename if not detected). + :return: True if the file was successfully loaded, otherwise False. + """ + return bool() + + +class Pose: + """ + Class containing positional tracking data giving the position and orientation of the camera in 3D space. + + Different representations of position and orientation can be retrieved, along with timestamp and pose confidence. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @pose_covariance.setter + def pose_covariance(self, pose_covariance: Any) -> None: + pass + + @property + def pose_confidence(self) -> int: + """ + Confidence/quality of the pose estimation for the target frame. + A confidence metric of the tracking [0-100] with: + - 0: tracking is lost + - 100: tracking can be fully trusted + """ + return int() + + @pose_confidence.setter + def pose_confidence(self, pose_confidence: Any) -> None: + pass + + @property + def valid(self) -> bool: + """ + Whether the tracking is activated or not. + .. note:: + You should check that first if something is wrong. + """ + return bool() + + @valid.setter + def valid(self, valid: Any) -> None: + pass + + @twist.setter + def twist(self, twist: Any) -> None: + pass + + @property + def timestamp(self) -> Timestamp: + """ + sl.Timestamp of the sl.Pose. + This timestamp should be compared with the camera timestamp for synchronization. + """ + return Timestamp() + + @timestamp.setter + def timestamp(self, timestamp: Any) -> None: + pass + + @twist_covariance.setter + def twist_covariance(self, twist_covariance: Any) -> None: + pass + + def init_pose(self, pose: Pose) -> None: + """ + Deep copy from another sl.Pose. + :param pose: sl.Pose to copy. + """ + pass + + def init_transform(self, pose_data: Transform, timestamp = 0, confidence = 0) -> None: + """ + Initializes the sl.Pose from a sl.Transform. + :param pose_data: sl.Transform containing pose data to copy. + :param timestamp: Timestamp of the pose data. + :param confidence: Confidence of the pose data. + """ + pass + + def get_translation(self, py_translation = Translation()) -> Translation: + """ + Returns the sl.Translation corresponding to the current sl.Pose. + :param py_translation: sl.Translation to be returned. It creates one by default. + :return: sl.Translation filled with values from the sl.Pose. + """ + return Translation() + + def get_orientation(self, py_orientation = Orientation()) -> Orientation: + """ + Returns the sl.Orientation corresponding to the current sl.Pose. + :param py_orientation: sl.Orientation to be returned. It creates one by default. + :return: sl.Orientation filled with values from the sl.Pose. + """ + return Orientation() + + def get_rotation_matrix(self, py_rotation = Rotation()) -> Rotation: + """ + Returns the sl.Rotation corresponding to the current sl.Pose. + :param py_rotation: sl.Rotation to be returned. It creates one by default. + :return: sl.Rotation filled with values from the sl.Pose. + """ + return Rotation() + + def get_rotation_vector(self) -> np.array[float]: + """ + Returns the the 3x1 rotation vector (obtained from 3x3 rotation matrix using Rodrigues formula) corresponding to the current sl.Pose. + :param py_rotation: sl.Rotation to be returned. It creates one by default. + :return: Rotation vector (NumPy array) created from the sl.Pose values. + """ + return np.array[float]() + + def get_euler_angles(self, radian = True) -> np.array[float]: + """ + Converts the rotation component of the sl.Pose into Euler angles. + :param radian: Whether the angle will be returned in radian or degree. Default: True + :return: Euler angles (Numpy array) created from the sl.Pose values representing the rotations around the X, Y and Z axes using YZX convention. + """ + return np.array[float]() + + def pose_data(self, pose_data = Transform()) -> Transform: + """ + sl.Transform containing the rotation and translation data of the sl.Pose. + :param pose_data: sl.Transform to be returned. It creates one by default. + :return: sl.Transform containing the rotation and translation data of the sl.Pose. + """ + return Transform() + + def pose_covariance(self) -> np.array[float]: + """ + 6x6 pose covariance matrix (NumPy array) of translation (the first 3 values) and rotation in so3 (the last 3 values). + .. note:: + Computed only if PositionalTrackingParameters.enable_spatial_memory is disabled. + """ + return np.array[float]() + + def twist(self) -> np.array[float]: + """ + Twist of the camera available in reference camera. + This expresses velocity in free space, broken into its linear and angular parts. + """ + return np.array[float]() + + def twist_covariance(self) -> np.array[float]: + """ + Row-major representation of the 6x6 twist covariance matrix of the camera. + This expresses the uncertainty of the twist. + """ + return np.array[float]() + + +class CAMERA_MOTION_STATE(enum.Enum): + """ + Lists different states of the camera motion. + + | Enumerator | | + |:---:|:---:| + | STATIC | The camera is static. | + | MOVING | The camera is moving. | + | FALLING | The camera is falling. | + """ + STATIC = enum.auto() + MOVING = enum.auto() + FALLING = enum.auto() + LAST = enum.auto() + +class SENSOR_LOCATION(enum.Enum): + """ + Lists possible locations of temperature sensors. + + | Enumerator | | + |:---:|:---:| + | IMU | The temperature sensor is in the IMU. | + | BAROMETER | The temperature sensor is in the barometer. | + | ONBOARD_LEFT | The temperature sensor is next to the left image sensor. | + | ONBOARD_RIGHT | The temperature sensor is next to the right image sensor. | + """ + IMU = enum.auto() + BAROMETER = enum.auto() + ONBOARD_LEFT = enum.auto() + ONBOARD_RIGHT = enum.auto() + LAST = enum.auto() + +class BarometerData: + """ + Class containing data from the barometer sensor. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def pressure(self) -> float: + """ + Ambient air pressure in hectopascal (hPa). + """ + return float() + + @pressure.setter + def pressure(self, pressure: Any) -> None: + pass + + @property + def effective_rate(self) -> float: + """ + Realtime data acquisition rate in hertz (Hz). + """ + return float() + + @effective_rate.setter + def effective_rate(self, effective_rate: Any) -> None: + pass + + @property + def relative_altitude(self) -> float: + """ + Relative altitude from first camera position (at sl.Camera.open() time). + """ + return float() + + @relative_altitude.setter + def relative_altitude(self, relative_altitude: Any) -> None: + pass + + @property + def is_available(self) -> bool: + """ + Whether the barometer sensor is available in your camera. + """ + return bool() + + @is_available.setter + def is_available(self, is_available: Any) -> None: + pass + + @property + def timestamp(self) -> Timestamp: + """ + Data acquisition timestamp. + """ + return Timestamp() + + @timestamp.setter + def timestamp(self, timestamp: Any) -> None: + pass + + +class TemperatureData: + """ + Class containing data from the temperature sensors. + """ + def __init__(self, *args, **kwargs) -> None: ... + + def get(self, location) -> float: + """ + Gets the temperature value at a temperature sensor location. + :param location: Location of the temperature sensor to request. + :return: Temperature at the requested location. + """ + return float() + + +class HEADING_STATE(enum.Enum): + """ + Lists the different states of the magnetic heading. + + | Enumerator | | + |:---:|:---:| + | GOOD | The heading is reliable and not affected by iron interferences. | + | OK | The heading is reliable, but affected by slight iron interferences. | + | NOT_GOOD | The heading is not reliable because affected by strong iron interferences. | + | NOT_CALIBRATED | The magnetometer has not been calibrated. | + | MAG_NOT_AVAILABLE | The magnetometer sensor is not available. | + """ + GOOD = enum.auto() + OK = enum.auto() + NOT_GOOD = enum.auto() + NOT_CALIBRATED = enum.auto() + MAG_NOT_AVAILABLE = enum.auto() + LAST = enum.auto() + +class MagnetometerData: + """ + Class containing data from the magnetometer sensor. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def magnetic_heading_state(self) -> HEADING_STATE: + """ + State of magnetic_heading. + """ + return HEADING_STATE() + + @magnetic_heading_state.setter + def magnetic_heading_state(self, magnetic_heading_state: Any) -> None: + pass + + @property + def magnetic_heading_accuracy(self) -> float: + """ + Accuracy of magnetic_heading measure in the range [0.0, 1.0]. + .. note:: + A negative value means that the magnetometer must be calibrated using **ZED **Sensor **Viewer tool. + """ + return float() + + @magnetic_heading_accuracy.setter + def magnetic_heading_accuracy(self, magnetic_heading_accuracy: Any) -> None: + pass + + @property + def effective_rate(self) -> float: + """ + Realtime data acquisition rate in hertz (Hz). + """ + return float() + + @effective_rate.setter + def effective_rate(self, effective_rate: Any) -> None: + pass + + @property + def magnetic_heading(self) -> float: + """ + Camera heading in degrees relative to the magnetic North Pole. + .. note:: + The magnetic North Pole has an offset with respect to the geographic North Pole, depending on the geographic position of the camera. + + .. note:: + To get a correct magnetic heading, the magnetometer sensor must be calibrated using **ZED **Sensor **Viewer tool. + """ + return float() + + @magnetic_heading.setter + def magnetic_heading(self, magnetic_heading: Any) -> None: + pass + + @property + def timestamp(self) -> int: + """ + Data acquisition timestamp. + """ + return int() + + @timestamp.setter + def timestamp(self, timestamp: Any) -> None: + pass + + @property + def is_available(self) -> bool: + """ + Whether the magnetometer sensor is available in your camera. + """ + return bool() + + @is_available.setter + def is_available(self, is_available: Any) -> None: + pass + + def get_magnetic_field_uncalibrated(self) -> np.array[float]: + """ + Gets the uncalibrated magnetic field local vector in microtesla (μT). + .. note:: + The magnetometer raw values are affected by soft and hard iron interferences. + + .. note:: + The sensor must be calibrated by placing the camera in the working environment and using **ZED **Sensor **Viewer tool. + + .. note:: + Not available in SVO or STREAM mode. + """ + return np.array[float]() + + def get_magnetic_field_calibrated(self) -> np.array[float]: + """ + Gets the magnetic field local vector in microtesla (μT). + .. note:: + To calibrate the magnetometer sensor, please use **ZED **Sensor **Viewer tool after placing the camera in the final operating environment. + """ + return np.array[float]() + + +class SensorsData: + """ + Class containing all sensors data (except image sensors) to be used for positional tracking or environment study. + + .. note:: + Some data are not available in SVO and streaming input mode. + + .. note:: + They are specified by a note "Not available in SVO or STREAM mode." in the documentation of a specific data. + + .. note:: + If nothing is mentioned in the documentation, they are available in all input modes. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def camera_moving_state(self) -> CAMERA_MOTION_STATE: + """ + Motion state of the camera. + """ + return CAMERA_MOTION_STATE() + + @camera_moving_state.setter + def camera_moving_state(self, camera_moving_state: Any) -> None: + pass + + @property + def image_sync_trigger(self) -> int: + """ + Indicates if the sensors data has been taken during a frame capture on sensor. + If the value is 1, the data has been retrieved during a left sensor frame acquisition (the time precision is linked to the IMU rate, therefore 800Hz == 1.3ms). + \n If the value is 0, the data has not been taken during a frame acquisition. + """ + return int() + + @image_sync_trigger.setter + def image_sync_trigger(self, image_sync_trigger: Any) -> None: + pass + + def init_sensorsData(self, sensorsData: SensorsData) -> None: + """ + Copy constructor. + :param sensorsData: sl.SensorsData object to copy. + """ + pass + + def get_imu_data(self) -> IMUData: + """ + Gets the IMU data. + :return: sl.IMUData containing the IMU data. + """ + return IMUData() + + def get_barometer_data(self) -> BarometerData: + """ + Gets the barometer data. + :return: sl.BarometerData containing the barometer data. + """ + return BarometerData() + + def get_magnetometer_data(self) -> MagnetometerData: + """ + Gets the magnetometer data. + :return: sl.MagnetometerData containing the magnetometer data. + """ + return MagnetometerData() + + def get_temperature_data(self) -> TemperatureData: + """ + Gets the temperature data. + :return: sl.TemperatureData containing the temperature data. + """ + return TemperatureData() + + +class IMUData: + """ + Class containing data from the IMU sensor. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def is_available(self) -> bool: + """ + Whether the IMU sensor is available in your camera. + """ + return bool() + + @is_available.setter + def is_available(self, is_available: Any) -> None: + pass + + @property + def timestamp(self) -> int: + """ + Data acquisition timestamp. + """ + return int() + + @timestamp.setter + def timestamp(self, timestamp: Any) -> None: + pass + + @property + def effective_rate(self) -> float: + """ + Realtime data acquisition rate in hertz (Hz). + """ + return float() + + @effective_rate.setter + def effective_rate(self, effective_rate: Any) -> None: + pass + + def get_angular_velocity_uncalibrated(self, angular_velocity_uncalibrated = [0, 0, 0]) -> list[float]: + """ + Gets the angular velocity vector (3x1) of the gyroscope in deg/s (uncorrected from the IMU calibration). + :param angular_velocity_uncalibrated: List to be returned. It creates one by default. + :return: List fill with the raw angular velocity vector. + .. note:: + The value is the exact raw values from the IMU. + + .. note:: + Not available in SVO or STREAM mode. + """ + return list[float]() + + def get_angular_velocity(self, angular_velocity = [0, 0, 0]) -> list[float]: + """ + Gets the angular velocity vector (3x1) of the gyroscope in deg/s. + The value is corrected from bias, scale and misalignment. + :param angular_velocity: List to be returned. It creates one by default. + :return: List fill with the angular velocity vector. + .. note:: + The value can be directly ingested in an IMU fusion algorithm to extract a quaternion. + + .. note:: + Not available in SVO or STREAM mode. + """ + return list[float]() + + def get_linear_acceleration(self, linear_acceleration = [0, 0, 0]) -> list[float]: + """ + Gets the linear acceleration vector (3x1) of the gyroscope in m/s². + The value is corrected from bias, scale and misalignment. + :param linear_acceleration: List to be returned. It creates one by default. + :return: List fill with the linear acceleration vector. + .. note:: + The value can be directly ingested in an IMU fusion algorithm to extract a quaternion. + + .. note:: + Not available in SVO or STREAM mode. + """ + return list[float]() + + def get_linear_acceleration_uncalibrated(self, linear_acceleration_uncalibrated = [0, 0, 0]) -> list[float]: + """ + Gets the linear acceleration vector (3x1) of the gyroscope in m/s² (uncorrected from the IMU calibration). + The value is corrected from bias, scale and misalignment. + :param linear_acceleration_uncalibrated: List to be returned. It creates one by default. + :return: List fill with the raw linear acceleration vector. + .. note:: + The value is the exact raw values from the IMU. + + .. note:: + Not available in SVO or STREAM mode. + """ + return list[float]() + + def get_angular_velocity_covariance(self, angular_velocity_covariance = Matrix3f()) -> Matrix3f: + """ + Gets the covariance matrix of the angular velocity of the gyroscope in deg/s (get_angular_velocity()). + :param angular_velocity_covariance: sl.Matrix3f to be returned. It creates one by default. + :return: sl.Matrix3f filled with the covariance matrix of the angular velocity. + .. note:: + Not available in SVO or STREAM mode. + """ + return Matrix3f() + + def get_linear_acceleration_covariance(self, linear_acceleration_covariance = Matrix3f()) -> Matrix3f: + """ + Gets the covariance matrix of the linear acceleration of the gyroscope in deg/s (get_angular_velocity()). + :param linear_acceleration_covariance: sl.Matrix3f to be returned. It creates one by default. + :return: sl.Matrix3f filled with the covariance matrix of the linear acceleration. + .. note:: + Not available in SVO or STREAM mode. + """ + return Matrix3f() + + def get_pose_covariance(self, pose_covariance = Matrix3f()) -> Matrix3f: + """ + Covariance matrix of the IMU pose (get_pose()). + :param pose_covariance: sl.Matrix3f to be returned. It creates one by default. + :return: sl.Matrix3f filled with the covariance matrix. + """ + return Matrix3f() + + def get_pose(self, pose = Transform()) -> Transform: + """ + IMU pose (IMU 6-DoF fusion). + :param pose: sl.Transform() to be returned. It creates one by default. + :return: sl.Transform filled with the IMU pose. + """ + return Transform() + + +class HealthStatus: + """ + Structure containing the self diagnostic results of the image/depth + That information can be retrieved by sl::Camera::get_health_status(), and enabled by sl::InitParameters::enable_image_validity_check + \n + The default value of sl::InitParameters::enable_image_validity_check is enabled using the fastest setting, + the integer given can be increased to include more advanced and heavier processing to detect issues (up to 3). + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def low_depth_reliability(self) -> bool: + """ + This status indicates low depth map reliability + If the image are unreliable or if the scene condition are very challenging this status report a warning. + This is using the depth confidence and general depth distribution. Typically due to obstructed eye (included very close object, + strong occlusions) or degraded condition like heavy fog/water on the optics + """ + return bool() + + @low_depth_reliability.setter + def low_depth_reliability(self, low_depth_reliability: Any) -> None: + pass + + @property + def enabled(self) -> bool: + """ + Indicates if the Health check is enabled + """ + return bool() + + @enabled.setter + def enabled(self, enabled: Any) -> None: + pass + + @property + def low_image_quality(self) -> bool: + """ + This status indicates poor image quality + It can indicates camera issue, like incorrect manual video settings, damaged hardware, corrupted video stream from the camera, + dirt or other partial or total occlusion, stuck ISP (black/white/green/purple images, incorrect exposure, etc), blurry images + It also includes widely different left and right images which leads to unavailable depth information + In case of very low light this will be reported by this status and the dedicated HealthStatus::low_lighting + + .. note:: + : Frame tearing is currently not detected. Advanced blur detection requires heavier processing and is enabled only when setting Initparameters::enable_image_validity_check to 3 and above + """ + return bool() + + @low_image_quality.setter + def low_image_quality(self, low_image_quality: Any) -> None: + pass + + @property + def low_motion_sensors_reliability(self) -> bool: + """ + This status indicates motion sensors data reliability issue. + This indicates the IMU is providing low quality data. Possible underlying can be regarding the data stream like corrupted data, + timestamp inconsistency, resonance frequencies, saturated sensors / very high acceleration or rotation, shocks + """ + return bool() + + @low_motion_sensors_reliability.setter + def low_motion_sensors_reliability(self, low_motion_sensors_reliability: Any) -> None: + pass + + @property + def low_lighting(self) -> bool: + """ + This status indicates low light scene. + As the camera are passive sensors working in the visible range, they requires some external light to operate. + This status warns if the lighting condition become suboptimal and worst. + This is based on the scene illuminance in LUX for the ZED X cameras series (available with VIDEO_SETTINGS::SCENE_ILLUMINANCE) + For other camera models or when using SVO files, this is based on computer vision processing from the image characteristics. + """ + return bool() + + @low_lighting.setter + def low_lighting(self, low_lighting: Any) -> None: + pass + + +class RecordingStatus: + """ + Class containing information about the status of the recording. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def average_compression_time(self) -> float: + """ + Average compression time in milliseconds since beginning of recording. + """ + return float() + + @average_compression_time.setter + def average_compression_time(self, average_compression_time: Any) -> None: + pass + + @property + def status(self) -> bool: + """ + Status of current frame. + + True for success or False if the frame could not be written in the SVO file. + """ + return bool() + + @status.setter + def status(self, status: Any) -> None: + pass + + @property + def is_recording(self) -> bool: + """ + Report if the recording has been enabled. + """ + return bool() + + @is_recording.setter + def is_recording(self, is_recording: Any) -> None: + pass + + @property + def is_paused(self) -> bool: + """ + Report if the recording has been paused. + """ + return bool() + + @is_paused.setter + def is_paused(self, is_paused: Any) -> None: + pass + + @property + def number_frames_ingested(self) -> int: + """ + Number of frames ingested in SVO encoding/writing. + """ + return int() + + @number_frames_ingested.setter + def number_frames_ingested(self, number_frames_ingested: Any) -> None: + pass + + @property + def current_compression_time(self) -> float: + """ + Compression time for the current frame in milliseconds. + """ + return float() + + @current_compression_time.setter + def current_compression_time(self, current_compression_time: Any) -> None: + pass + + @property + def number_frames_encoded(self) -> int: + """ + Number of frames effectively encoded and written. Might be different from the number of frames ingested. The difference will show the encoder latency + """ + return int() + + @number_frames_encoded.setter + def number_frames_encoded(self, number_frames_encoded: Any) -> None: + pass + + @property + def average_compression_ratio(self) -> float: + """ + Average compression ratio (% of raw size) since beginning of recording. + """ + return float() + + @average_compression_ratio.setter + def average_compression_ratio(self, average_compression_ratio: Any) -> None: + pass + + @property + def current_compression_ratio(self) -> float: + """ + Compression ratio (% of raw size) for the current frame. + """ + return float() + + @current_compression_ratio.setter + def current_compression_ratio(self, current_compression_ratio: Any) -> None: + pass + + +class Camera: + """ + This class serves as the primary interface between the camera and the various features provided by the SDK. + It enables seamless integration and access to a wide array of capabilities, including video streaming, depth sensing, object tracking, mapping, and much more. + + A standard program will use the Camera class like this: + .. code-block:: text + + + import pyzed.sl as sl + + def main(): + # --- Initialize a Camera object and open the ZED + # Create a ZED camera object + zed = sl.Camera() + + # Set configuration parameters + init_params = sl.InitParameters() + init_params.camera_resolution = sl.RESOLUTION.HD720 # Use HD720 video mode for USB cameras + # init_params.camera_resolution = sl.RESOLUTION.HD1200 # Use HD1200 video mode for GMSL cameras + init_params.camera_fps = 60 # Set fps at 60 + + # Open the camera + err = zed.open(init_params) + if err != sl.ERROR_CODE.SUCCESS: + print(repr(err)) + exit(-1) + + runtime_param = sl.RuntimeParameters() + + # --- Main loop grabbing images and depth values + # Capture 50 frames and stop + i = 0 + image = sl.Mat() + depth = sl.Mat() + while i < 50 : + # Grab an image + if zed.grab(runtime_param) == sl.ERROR_CODE.SUCCESS: # A new image is available if grab() returns SUCCESS + # Display a pixel color + zed.retrieve_image(image, sl.VIEW.LEFT) # Get the left image + err, center_rgb = image.get_value(image.get_width() / 2, image.get_height() / 2) + if err == sl.ERROR_CODE.SUCCESS: + print("Image ", i, " center pixel R:", int(center_rgb[0]), " G:", int(center_rgb[1]), " B:", int(center_rgb[2])) + else: + print("Image ", i, " error:", err) + + # Display a pixel depth + zed.retrieve_measure(depth, sl.MEASURE.DEPTH) # Get the depth map + err, center_depth = depth.get_value(depth.get_width() / 2, depth.get_height() /2) + if err == sl.ERROR_CODE.SUCCESS: + print("Image ", i," center depth:", center_depth) + else: + print("Image ", i, " error:", err) + + i = i+1 + + # --- Close the Camera + zed.close() + return 0 + + if __name__ == "__main__": + main() + + """ + def __init__(self, *args, **kwargs) -> None: ... + + def __dealloc__(self) -> None: + pass + + def close(self) -> None: + """ + Close an opened camera. + + If open() has been called, this method will close the connection to the camera (or the SVO file) and free the corresponding memory. + + If open() wasn't called or failed, this method won't have any effect. + + .. note:: + If an asynchronous task is running within the Camera object, like save_area_map(), this method will wait for its completion. + + .. note:: + To apply a new InitParameters, you will need to close the camera first and then open it again with the new InitParameters values. + + .. warning:: If the CUDA context was created by open(), this method will destroy it. + .. warning:: Therefore you need to make sure to delete your GPU sl.Mat objects before the context is destroyed. + """ + pass + + def open(self, py_init = None) -> ERROR_CODE: + """ + Opens the ZED camera from the provided InitParameters. + The method will also check the hardware requirements and run a self-calibration. + :param py_init: A structure containing all the initial parameters. Default: a preset of InitParameters. + :return: An error code giving information about the internal process. If ERROR_CODE.SUCCESS is returned, the camera is ready to use. Every other code indicates an error and the program should be stopped. + + Here is the proper way to call this function: + + .. code-block:: text + + zed = sl.Camera() # Create a ZED camera object + + init_params = sl.InitParameters() # Set configuration parameters + init_params.camera_resolution = sl.RESOLUTION.HD720 # Use HD720 video mode + init_params.camera_fps = 60 # Set fps at 60 + + # Open the camera + err = zed.open(init_params) + if (err != sl.ERROR_CODE.SUCCESS) : + print(repr(err)) # Display the error + exit(-1) + + + .. note:: + If you are having issues opening a camera, the diagnostic tool provided in the SDK can help you identify to problems. + + - **Windows:** C:\\Program Files (x86)\\ZED SDK\\tools\\ZED Diagnostic.exe + - **Linux:** /usr/local/zed/tools/ZED Diagnostic + .. note:: + If this method is called on an already opened camera, close() will be called. + """ + return ERROR_CODE() + + def is_opened(self) -> bool: + """ + Reports if the camera has been successfully opened. + It has the same behavior as checking if open() returns ERROR_CODE.SUCCESS. + :return: True if the ZED camera is already setup, otherwise false. + """ + return bool() + + def read(self) -> ERROR_CODE: + """ + Read the latest images and IMU from the camera and rectify the images. + + This method is meant to be called frequently in the main loop of your application. + + .. note:: + If no new frames is available until timeout is reached, read() will return ERROR_CODE "ERROR_CODE::CAMERA_NOT_DETECTED" since the camera has probably been disconnected. + + .. note:: + Returned errors can be displayed using toString(). + + + :return: ERROR_CODE "ERROR_CODE::SUCCESS" means that no problem was encountered. + """ + return ERROR_CODE() + + def grab(self, py_runtime = None) -> ERROR_CODE: + """ + This method will grab the latest images from the camera, rectify them, and compute the retrieve_measure() "measurements" based on the RuntimeParameters provided (depth, point cloud, tracking, etc.) + + As measures are created in this method, its execution can last a few milliseconds, depending on your parameters and your hardware. + \n The exact duration will mostly depend on the following parameters: + + - InitParameters.enable_right_side_measure : Activating this parameter increases computation time. + - InitParameters.camera_resolution : Lower resolutions are faster to compute. + - enable_positional_tracking() : Activating the tracking is an additional load. + - RuntimeParameters.enable_depth : Avoiding the depth computation must be faster. However, it is required by most SDK features (tracking, spatial mapping, plane estimation, etc.) + - InitParameters.depth_mode : DEPTH_MODE.PERFORMANCE will run faster than DEPTH_MODE.ULTRA. + - InitParameters.depth_stabilization : Stabilizing the depth requires an additional computation load as it enables tracking. + + This method is meant to be called frequently in the main loop of your application. + .. note:: + Since ZED SDK 3.0, this method is blocking. It means that grab() will wait until a new frame is detected and available. + + .. note:: + If no new frames is available until timeout is reached, grab() will return ERROR_CODE.CAMERA_NOT_DETECTED since the camera has probably been disconnected. + + + :param py_runtime: A structure containing all the runtime parameters. Default: a preset of RuntimeParameters. + :return: ERROR_CODE.SUCCESS means that no problem was encountered. + .. note:: + Returned errors can be displayed using ``str()``. + + + .. code-block:: text + + # Set runtime parameters after opening the camera + runtime_param = sl.RuntimeParameters() + + image = sl.Mat() + while True: + # Grab an image + if zed.grab(runtime_param) == sl.ERROR_CODE.SUCCESS: # A new image is available if grab() returns SUCCESS + zed.retrieve_image(image, sl.VIEW.LEFT) # Get the left image + # Use the image for your application + """ + return ERROR_CODE() + + def retrieve_image(self, py_mat, view: VIEW = VIEW.LEFT, mem_type: MEM = MEM.CPU, resolution = None) -> ERROR_CODE: + """ + Retrieves images from the camera (or SVO file). + + Multiple images are available along with a view of various measures for display purposes. + \n Available images and views are listed here. + \n As an example, VIEW.DEPTH can be used to get a gray-scale version of the depth map, but the actual depth values can be retrieved using retrieve_measure() . + \n + \n **Pixels** + \n Most VIEW modes output image with 4 channels as BGRA (Blue, Green, Red, Alpha), for more information see enum VIEW + \n + \n **Memory** + \n By default, images are copied from GPU memory to CPU memory (RAM) when this function is called. + \n If your application can use GPU images, using the **type** parameter can increase performance by avoiding this copy. + \n If the provided sl.Mat object is already allocated and matches the requested image format, memory won't be re-allocated. + \n + \n **Image size** + \n By default, images are returned in the resolution provided by Resolution "get_camera_information().camera_configuration.resolution". + \n However, you can request custom resolutions. For example, requesting a smaller image can help you speed up your application. + .. warning:: A sl.Mat resolution higher than the camera resolution **cannot** be requested. + + :param py_mat: The sl.Mat to store the image. (Direction: out) + :param view: Defines the image you want (see VIEW). Default: VIEW.LEFT. (Direction: in) + :param mem_type: Defines on which memory the image should be allocated. Default: MEM.CPU. (Direction: in) + :param resolution: If specified, defines the Resolution of the output sl.Mat. If set to Resolution "Resolution(0,0)", the camera resolution will be taken. Default: (0,0). (Direction: in) + :return: ERROR_CODE.SUCCESS if the method succeeded. + :return: ERROR_CODE.INVALID_FUNCTION_PARAMETERS if the view mode requires a module not enabled (VIEW.DEPTH with DEPTH_MODE.NONE for example). + :return: ERROR_CODE.INVALID_RESOLUTION if the resolution is higher than one provided by Resolution "get_camera_information().camera_configuration.resolution". + :return: ERROR_CODE.FAILURE if another error occurred. + + .. note:: + As this method retrieves the images grabbed by the grab() method, it should be called afterward. + + + .. code-block:: text + + # create sl.Mat objects to store the images + left_image = sl.Mat() + while True: + # Grab an image + if zed.grab() == sl.ERROR_CODE.SUCCESS: # A new image is available if grab() returns SUCCESS + zed.retrieve_image(left_image, sl.VIEW.LEFT) # Get the rectified left image + + # Display the center pixel colors + err, left_center = left_image.get_value(left_image.get_width() / 2, left_image.get_height() / 2) + if err == sl.ERROR_CODE.SUCCESS: + print("left_image center pixel R:", int(left_center[0]), " G:", int(left_center[1]), " B:", int(left_center[2])) + else: + print("error:", err) + """ + return ERROR_CODE() + + def retrieve_measure(self, py_mat, measure: MEASURE = MEASURE.DEPTH, mem_type: MEM = MEM.CPU, resolution = None) -> ERROR_CODE: + """ + Computed measures, like depth, point cloud, or normals, can be retrieved using this method. + + Multiple measures are available after a grab() call. A full list is available here. + + \n **Memory** + \n By default, images are copied from GPU memory to CPU memory (RAM) when this function is called. + \n If your application can use GPU images, using the **type parameter can increase performance by avoiding this copy. + \n If the provided Mat object is already allocated and matches the requested image format, memory won't be re-allocated. + + \n **Measure size** + \n By default, measures are returned in the resolution provided by get_camera_information() in CameraInformations.camera_resolution . + \n However, custom resolutions can be requested. For example, requesting a smaller measure can help you speed up your application. + .. warning:: A sl.Mat resolution higher than the camera resolution **cannot** be requested. + + :param py_mat: The sl.Mat to store the measures. (Direction: out) + :param measure: Defines the measure you want (see MEASURE). Default: MEASURE.DEPTH. (Direction: in) + :param mem_type: Defines on which memory the image should be allocated. Default: MEM.CPU (you cannot change this default value). (Direction: in) + :param resolution: If specified, defines the Resolution of the output sl.Mat. If set to Resolution "Resolution(0,0)", the camera resolution will be taken. Default: (0,0). (Direction: in) + :return: ERROR_CODE.SUCCESS if the method succeeded. + :return: ERROR_CODE.INVALID_FUNCTION_PARAMETERS if the view mode requires a module not enabled (VIEW.DEPTH with DEPTH_MODE.NONE for example). + :return: ERROR_CODE.INVALID_RESOLUTION if the resolution is higher than one provided by Resolution "get_camera_information().camera_configuration.resolution". + :return: ERROR_CODE.FAILURE if another error occured. + + .. note:: + As this method retrieves the images grabbed by the grab() method, it should be called afterward. + + + .. code-block:: text + + depth_map = sl.Mat() + point_cloud = sl.Mat() + resolution = zed.get_camera_information().camera_configuration.resolution + x = int(resolution.width / 2) # Center coordinates + y = int(resolution.height / 2) + + while True : + if zed.grab() == sl.ERROR_CODE.SUCCESS: # Grab an image + + zed.retrieve_measure(depth_map, sl.MEASURE.DEPTH) # Get the depth map + + # Read a depth value + err, center_depth = depth_map.get_value(x, y) # each depth map pixel is a float value + if err == sl.ERROR_CODE.SUCCESS: # + Inf is "too far", -Inf is "too close", Nan is "unknown/occlusion" + print("Depth value at center:", center_depth, init_params.coordinate_units) + zed.retrieve_measure(point_cloud, sl.MEASURE.XYZRGBA) # Get the point cloud + + # Read a point cloud value + err, pc_value = point_cloud.get_value(x, y) # each point cloud pixel contains 4 floats, so we are using a numpy array + + # Get 3D coordinates + if err == sl.ERROR_CODE.SUCCESS: + print("Point cloud coordinates at center: X=", pc_value[0], ", Y=", pc_value[1], ", Z=", pc_value[2]) + + # Get color information using Python struct package to unpack the unsigned char array containing RGBA values + import struct + packed = struct.pack('f', pc_value[3]) + char_array = struct.unpack('BBBB', packed) + print("Color values at center: R=", char_array[0], ", G=", char_array[1], ", B=", char_array[2], ", A=", char_array[3]) + + """ + return ERROR_CODE() + + def set_region_of_interest(self, py_mat, modules = [MODULE.ALL]) -> ERROR_CODE: + """ + Defines a region of interest to focus on for all the SDK, discarding other parts. + :param roi_mask: The Mat defining the requested region of interest, pixels lower than 127 will be discarded from all modules: depth, positional tracking, etc. + If empty, set all pixels as valid. The mask can be either at lower or higher resolution than the current images. + :return: An ERROR_CODE if something went wrong. + .. note:: + The method support MAT_TYPE "U8_C1/U8_C3/U8_C4" images type. + """ + return ERROR_CODE() + + def get_region_of_interest(self, py_mat, resolution = None, module: MODULE = MODULE.ALL) -> ERROR_CODE: + """ + Get the previously set or computed region of interest + :param roi_mask: The Mat returned + :param image_size: The optional size of the returned mask + :return: An ERROR_CODE if something went wrong. + """ + return ERROR_CODE() + + def start_region_of_interest_auto_detection(self, roi_param = None) -> ERROR_CODE: + """ + Start the auto detection of a region of interest to focus on for all the SDK, discarding other parts. + This detection is based on the general motion of the camera combined with the motion in the scene. + The camera must move for this process, an internal motion detector is used, based on the Positional Tracking module. + It requires a few hundreds frames of motion to compute the mask. + :param roi_param: The RegionOfInterestParameters defining parameters for the detection + + .. note:: + This module is expecting a static portion, typically a fairly close vehicle hood at the bottom of the image. + + This module may not work correctly or detect incorrect background area, especially with slow motion, if there's no static element. + This module work asynchronously, the status can be obtained using get_region_of_interest_auto_detection_status(), the result is either auto applied, + or can be retrieve using get_region_of_interest function. + :return: An ERROR_CODE if something went wrong. + """ + return ERROR_CODE() + + def get_region_of_interest_auto_detection_status(self) -> REGION_OF_INTEREST_AUTO_DETECTION_STATE: + """ + Return the status of the automatic Region of Interest Detection + The automatic Region of Interest Detection is enabled by using startRegionOfInterestAutoDetection + :return: REGION_OF_INTEREST_AUTO_DETECTION_STATE the status + """ + return REGION_OF_INTEREST_AUTO_DETECTION_STATE() + + def start_publishing(self, communication_parameters) -> ERROR_CODE: + """ + Set this camera as a data provider for the Fusion module. + + Metadata is exchanged with the Fusion. + :param communication_parameters: A structure containing all the initial parameters. Default: a preset of CommunicationParameters. + :return: ERROR_CODE.SUCCESS if everything went fine, ERROR_CODE.FAILURE otherwise. + """ + return ERROR_CODE() + + def stop_publishing(self) -> ERROR_CODE: + """ + Set this camera as normal camera (without data providing). + + Stop to send camera data to fusion. + :return: ERROR_CODE.SUCCESS if everything went fine, ERROR_CODE.FAILURE otherwise. + """ + return ERROR_CODE() + + def set_svo_position(self, frame_number) -> None: + """ + Sets the playback cursor to the desired frame number in the SVO file. + + This method allows you to move around within a played-back SVO file. After calling, the next call to grab() will read the provided frame number. + + :param frame_number: The number of the desired frame to be decoded. + + .. note:: + The method works only if the camera is open in SVO playback mode. + + + .. code-block:: text + + + import pyzed.sl as sl + + def main(): + # Create a ZED camera object + zed = sl.Camera() + + # Set configuration parameters + init_params = sl.InitParameters() + init_params.set_from_svo_file("path/to/my/file.svo") + + # Open the camera + err = zed.open(init_params) + if err != sl.ERROR_CODE.SUCCESS: + print(repr(err)) + exit(-1) + + # Loop between frames 0 and 50 + left_image = sl.Mat() + while zed.get_svo_position() < zed.get_svo_number_of_frames() - 1: + + print("Current frame: ", zed.get_svo_position()) + + # Loop if we reached frame 50 + if zed.get_svo_position() == 50: + zed.set_svo_position(0) + + # Grab an image + if zed.grab() == sl.ERROR_CODE.SUCCESS: + zed.retrieve_image(left_image, sl.VIEW.LEFT) # Get the rectified left image + + # Use the image in your application + + # Close the Camera + zed.close() + return 0 + + if __name__ == "__main__" : + main() + + """ + pass + + def pause_svo_reading(self, status) -> None: + """ + Pauses or resumes SVO reading when using SVO Real time mode + :param status: If true, the reading is paused. If false, the reading is resumed. + .. note:: + This is only relevant for SVO InitParameters::svo_real_time_mode + """ + pass + + def get_svo_position(self) -> int: + """ + Returns the current playback position in the SVO file. + + The position corresponds to the number of frames already read from the SVO file, starting from 0 to n. + + Each grab() call increases this value by one (except when using InitParameters.svo_real_time_mode). + :return: The current frame position in the SVO file. -1 if the SDK is not reading an SVO. + + .. note:: + The method works only if the camera is open in SVO playback mode. + + + See set_svo_position() for an example. + """ + return int() + + def get_svo_number_of_frames(self) -> int: + """ + Returns the number of frames in the SVO file. + + :return: The total number of frames in the SVO file. -1 if the SDK is not reading a SVO. + + The method works only if the camera is open in SVO playback mode. + """ + return int() + + def ingest_data_into_svo(self, data) -> ERROR_CODE: + """ + ingest a SVOData in the SVO file. + + :return: An error code stating the success, or not. + + The method works only if the camera is open in SVO recording mode. + """ + return ERROR_CODE() + + def get_svo_data_keys(self) -> list: + """ + Get the external channels that can be retrieved from the SVO file. + + :return: a list of keys + + The method works only if the camera is open in SVO playback mode. + """ + return [] + + def retrieve_svo_data(self, key, data, ts_begin, ts_end) -> ERROR_CODE: + """ + retrieve SVO datas from the SVO file at the given channel key and in the given timestamp range. + + :return: An error code stating the success, or not. + :param key: The channel key. + :param data: The dict to be filled with SVOData objects, with timestamps as keys. + :param ts_begin: The beginning of the range. + :param ts_end: The end of the range. + + The method works only if the camera is open in SVO playback mode. + """ + return ERROR_CODE() + + def set_camera_settings(self, settings: VIDEO_SETTINGS, value = -1) -> ERROR_CODE: + """ + retrieve SVO datas from the SVO file at the given channel key and in the given timestamp range. + + :return: An error code stating the success, or not. + :param key: The channel key. + :param data: The dict to be filled with SVOData objects, with timestamps as keys. + :param ts_begin: The beginning of the range. + :param ts_end: The end of the range. + + The method works only if the camera is open in SVO playback mode. + """ + return ERROR_CODE() + + def set_camera_settings_range(self, settings: VIDEO_SETTINGS, mini = -1, maxi = -1) -> ERROR_CODE: + """ + Sets the value of the requested VIDEO_SETTINGS "camera setting" that supports two values (min/max). + + This method only works with the following VIDEO_SETTINGS: + - sl.VIDEO_SETTINGS.AUTO_EXPOSURE_TIME_RANGE + - sl.VIDEO_SETTINGS.AUTO_ANALOG_GAIN_RANGE + - sl.VIDEO_SETTINGS.AUTO_DIGITAL_GAIN_RANGE + + :param settings: The setting to be set. + :param min: The minimum value that can be reached (-1 or 0 gives full range). + :param max: The maximum value that can be reached (-1 or 0 gives full range). + :return: ERROR_CODE to indicate if the method was successful. + + .. warning:: If VIDEO_SETTINGS settings is not supported or min >= max, it will return ERROR_CODE.INVALID_FUNCTION_PARAMETERS. + .. note:: + The method works only if the camera is open in LIVE or STREAM mode. + + + .. code-block:: text + + # For ZED X based product, set the automatic exposure from 2ms to 5ms. Expected exposure time cannot go beyond those values + zed.set_camera_settings_range(sl.VIDEO_SETTINGS.AEC_RANGE, 2000, 5000); + """ + return ERROR_CODE() + + def set_camera_settings_roi(self, settings: VIDEO_SETTINGS, roi, eye: SIDE = SIDE.BOTH, reset = False) -> ERROR_CODE: + """ + Overloaded method for VIDEO_SETTINGS.AEC_AGC_ROI which takes a Rect as parameter. + + :param settings: Must be set at VIDEO_SETTINGS.AEC_AGC_ROI, otherwise the method will have no impact. + :param roi: Rect that defines the target to be applied for AEC/AGC computation. Must be given according to camera resolution. + :param eye: SIDE on which to be applied for AEC/AGC computation. Default: SIDE.BOTH + :param reset: Cancel the manual ROI and reset it to the full image. Default: False + + .. note:: + The method works only if the camera is open in LIVE or STREAM mode. + + + .. code-block:: text + + roi = sl.Rect(42, 56, 120, 15) + zed.set_camera_settings_roi(sl.VIDEO_SETTINGS.AEC_AGC_ROI, roi, sl.SIDE.BOTH) + + """ + return ERROR_CODE() + + def get_camera_settings(self, setting: VIDEO_SETTINGS) -> tuple[ERROR_CODE, int]: + """ + Returns the current value of the requested VIDEO_SETTINGS "camera setting" (gain, brightness, hue, exposure, etc.). + + Possible values (range) of each setting are available here. + + :param setting: The requested setting. + :return: ERROR_CODE to indicate if the method was successful. + :return: The current value for the corresponding setting. + + .. code-block:: text + + err, gain = zed.get_camera_settings(sl.VIDEO_SETTINGS.GAIN) + if err == sl.ERROR_CODE.SUCCESS: + print("Current gain value:", gain) + else: + print("error:", err) + + + .. note:: + The method works only if the camera is open in LIVE or STREAM mode. + + .. note:: + Settings are not exported in the SVO file format. + """ + return tuple[ERROR_CODE, int]() + + def get_camera_settings_range(self, setting: VIDEO_SETTINGS) -> tuple[ERROR_CODE, int, int]: + """ + Returns the values of the requested settings for VIDEO_SETTINGS that supports two values (min/max). + + This method only works with the following VIDEO_SETTINGS: + - sl.VIDEO_SETTINGS.AUTO_EXPOSURE_TIME_RANGE + - sl.VIDEO_SETTINGS.AUTO_ANALOG_GAIN_RANGE + - sl.VIDEO_SETTINGS.AUTO_DIGITAL_GAIN_RANGE + + Possible values (range) of each setting are available here. + :param setting: The requested setting. + :return: ERROR_CODE to indicate if the method was successful. + :return: The current value of the minimum for the corresponding setting. + :return: The current value of the maximum for the corresponding setting. + + .. code-block:: text + + err, aec_range_min, aec_range_max = zed.get_camera_settings(sl.VIDEO_SETTINGS.AUTO_EXPOSURE_TIME_RANGE) + if err == sl.ERROR_CODE.SUCCESS: + print("Current AUTO_EXPOSURE_TIME_RANGE range values ==> min:", aec_range_min, "max:", aec_range_max) + else: + print("error:", err) + + + .. note:: + Works only with ZED X that supports low-level controls + """ + return tuple[ERROR_CODE, int, int]() + + def get_camera_settings_roi(self, setting: VIDEO_SETTINGS, roi, eye: SIDE = SIDE.BOTH) -> ERROR_CODE: + """ + Returns the current value of the currently used ROI for the camera setting AEC_AGC_ROI. + + :param setting: Must be set at VIDEO_SETTINGS.AEC_AGC_ROI, otherwise the method will have no impact. (Direction: in) + :param roi: Roi that will be filled. (Direction: out) + :param eye: The requested side. Default: SIDE.BOTH (Direction: in) + :return: ERROR_CODE to indicate if the method was successful. + + .. code-block:: text + + roi = sl.Rect() + err = zed.get_camera_settings_roi(sl.VIDEO_SETTINGS.AEC_AGC_ROI, roi, sl.SIDE.BOTH) + print("Current ROI for AEC_AGC: " + str(roi.x) + " " + str(roi.y)+ " " + str(roi.width) + " " + str(roi.height)) + + + .. note:: + Works only if the camera is open in LIVE or STREAM mode with VIDEO_SETTINGS.AEC_AGC_ROI. + + .. note:: + It will return ERROR_CODE.INVALID_FUNCTION_CALL or ERROR_CODE.INVALID_FUNCTION_PARAMETERS otherwise. + """ + return ERROR_CODE() + + def is_camera_setting_supported(self, setting: VIDEO_SETTINGS) -> bool: + """ + Returns if the video setting is supported by the camera or not + + :param setting: the video setting to test (Direction: in) + :return: True if the VIDEO_SETTINGS is supported by the camera, False otherwise + """ + return bool() + + def get_current_fps(self) -> float: + """ + Returns the current framerate at which the grab() method is successfully called. + + The returned value is based on the difference of camera get_timestamp() "timestamps" between two successful grab() calls. + + :return: The current SDK framerate + + .. warning:: The returned framerate (number of images grabbed per second) can be lower than InitParameters.camera_fps if the grab() function runs slower than the image stream or is called too often. + + .. code-block:: text + + current_fps = zed.get_current_fps() + print("Current framerate: ", current_fps) + """ + return float() + + def get_timestamp(self, time_reference: TIME_REFERENCE) -> Timestamp: + """ + Returns the timestamp in the requested TIME_REFERENCE. + + - When requesting the TIME_REFERENCE.IMAGE timestamp, the UNIX nanosecond timestamp of the latest grab() "grabbed" image will be returned. + \n This value corresponds to the time at which the entire image was available in the PC memory. As such, it ignores the communication time that corresponds to 2 or 3 frame-time based on the fps (ex: 33.3ms to 50ms at 60fps). + + - When requesting the TIME_REFERENCE.CURRENT timestamp, the current UNIX nanosecond timestamp is returned. + + This function can also be used when playing back an SVO file. + + :param time_reference: The selected TIME_REFERENCE. + :return: The Timestamp in nanosecond. 0 if not available (SVO file without compression). + + .. note:: + As this function returns UNIX timestamps, the reference it uses is common across several Camera instances. + + \n This can help to organized the grabbed images in a multi-camera application. + + .. code-block:: text + + last_image_timestamp = zed.get_timestamp(sl.TIME_REFERENCE.IMAGE) + current_timestamp = zed.get_timestamp(sl.TIME_REFERENCE.CURRENT) + print("Latest image timestamp: ", last_image_timestamp.get_nanoseconds(), "ns from Epoch.") + print("Current timestamp: ", current_timestamp.get_nanoseconds(), "ns from Epoch.") + """ + return Timestamp() + + def get_frame_dropped_count(self) -> int: + """ + Returns the number of frames dropped since grab() was called for the first time. + + A dropped frame corresponds to a frame that never made it to the grab method. + \n This can happen if two frames were extracted from the camera when grab() is called. The older frame will be dropped so as to always use the latest (which minimizes latency). + + :return: The number of frames dropped since the first grab() call. + """ + return int() + + def get_current_min_max_depth(self) -> tuple[ERROR_CODE, float, float]: + """ + Gets the current range of perceived depth. + :param min: Minimum depth detected (in selected sl.UNIT). (Direction: out) + :param max: Maximum depth detected (in selected sl.UNIT). (Direction: out) + :return: ERROR_CODE.SUCCESS if values can be extracted, ERROR_CODE.FAILURE otherwise. + """ + return tuple[ERROR_CODE, float, float]() + + def get_camera_information(self, resizer = None) -> CameraInformation: + """ + Returns the CameraInformation associated the camera being used. + + To ensure accurate calibration, it is possible to specify a custom resolution as a parameter when obtaining scaled information, as calibration parameters are resolution-dependent. + \n When reading an SVO file, the parameters will correspond to the camera used for recording. + + :param resizer: You can specify a size different from the default image size to get the scaled camera information. + Default = (0,0) meaning original image size (given by CameraConfiguration.resolution "get_camera_information().camera_configuration.resolution"). + :return: CameraInformation containing the calibration parameters of the ZED, as well as serial number and firmware version. + + .. warning:: The returned parameters might vary between two execution due to the InitParameters.camera_disable_self_calib "self-calibration" being run in the open() method. + .. note:: + The calibration file SNXXXX.conf can be found in: + + - **Windows:** C:/ProgramData/Stereolabs/settings/ + - **Linux:** /usr/local/zed/settings/ + """ + return CameraInformation() + + def get_runtime_parameters(self) -> RuntimeParameters: + """ + Returns the RuntimeParameters used. + It corresponds to the structure given as argument to the grab() method. + + :return: RuntimeParameters containing the parameters that define the behavior of the grab method. + """ + return RuntimeParameters() + + def get_init_parameters(self) -> InitParameters: + """ + Returns the InitParameters associated with the Camera object. + It corresponds to the structure given as argument to open() method. + + :return: InitParameters containing the parameters used to initialize the Camera object. + """ + return InitParameters() + + def get_positional_tracking_parameters(self) -> PositionalTrackingParameters: + """ + Returns the PositionalTrackingParameters used. + + It corresponds to the structure given as argument to the enable_positional_tracking() method. + + :return: PositionalTrackingParameters containing the parameters used for positional tracking initialization. + """ + return PositionalTrackingParameters() + + def get_spatial_mapping_parameters(self) -> SpatialMappingParameters: + """ + Returns the SpatialMappingParameters used. + + It corresponds to the structure given as argument to the enable_spatial_mapping() method. + + :return: SpatialMappingParameters containing the parameters used for spatial mapping initialization. + """ + return SpatialMappingParameters() + + def get_object_detection_parameters(self, instance_module_id = 0) -> ObjectDetectionParameters: + """ + Returns the ObjectDetectionParameters used. + + It corresponds to the structure given as argument to the enable_object_detection() method. + :return: ObjectDetectionParameters containing the parameters used for object detection initialization. + """ + return ObjectDetectionParameters() + + def get_body_tracking_parameters(self, instance_id = 0) -> BodyTrackingParameters: + """ + Returns the BodyTrackingParameters used. + + It corresponds to the structure given as argument to the enable_body_tracking() method. + + :return: BodyTrackingParameters containing the parameters used for body tracking initialization. + """ + return BodyTrackingParameters() + + def get_streaming_parameters(self) -> StreamingParameters: + """ + Returns the StreamingParameters used. + + It corresponds to the structure given as argument to the enable_streaming() method. + + :return: StreamingParameters containing the parameters used for streaming initialization. + """ + return StreamingParameters() + + def enable_positional_tracking(self, py_tracking = None) -> ERROR_CODE: + """ + Initializes and starts the positional tracking processes. + + This method allows you to enable the position estimation of the SDK. It only has to be called once in the camera's lifetime. + \n When enabled, the position will be update at each grab() call. + \n Tracking-specific parameters can be set by providing PositionalTrackingParameters to this method. + + :param py_tracking: A structure containing all the specific parameters for the positional tracking. Default: a preset of PositionalTrackingParameters. + :return: ERROR_CODE.FAILURE if the PositionalTrackingParameters.area_file_path file wasn't found, ERROR_CODE.SUCCESS otherwise. + + .. warning:: The positional tracking feature benefits from a high framerate. We found HD720@60fps to be the best compromise between image quality and framerate. + + .. code-block:: text + + + import pyzed.sl as sl + + def main() : + # --- Initialize a Camera object and open the ZED + # Create a ZED camera object + zed = sl.Camera() + + # Set configuration parameters + init_params = sl.InitParameters() + init_params.camera_resolution = sl.RESOLUTION.HD720 # Use HD720 video mode + init_params.camera_fps = 60 # Set fps at 60 + + # Open the camera + err = zed.open(init_params) + if err != sl.ERROR_CODE.SUCCESS: + print(repr(err)) + exit(-1) + + # Set tracking parameters + track_params = sl.PositionalTrackingParameters() + + # Enable positional tracking + err = zed.enable_positional_tracking(track_params) + if err != sl.ERROR_CODE.SUCCESS: + print("Tracking error: ", repr(err)) + exit(-1) + + # --- Main loop + while True: + if zed.grab() == sl.ERROR_CODE.SUCCESS: # Grab an image and computes the tracking + camera_pose = sl.Pose() + zed.get_position(camera_pose, sl.REFERENCE_FRAME.WORLD) + translation = camera_pose.get_translation().get() + print("Camera position: X=", translation[0], " Y=", translation[1], " Z=", translation[2]) + + # --- Close the Camera + zed.close() + return 0 + + if __name__ == "__main__" : + main() + + """ + return ERROR_CODE() + + def update_self_calibration(self) -> None: + """ + Performs a new self-calibration process. + In some cases, due to temperature changes or strong vibrations, the stereo calibration becomes less accurate. + \n Use this method to update the self-calibration data and get more reliable depth values. + .. note:: + The self-calibration will occur at the next grab() call. + + .. note:: + This method is similar to the previous reset_self_calibration() used in 2.X SDK versions. + + .. warning:: New values will then be available in get_camera_information(), be sure to get them to still have consistent 2D <-> 3D conversion. + """ + pass + + def enable_body_tracking(self, body_tracking_parameters = None) -> ERROR_CODE: + """ + Initializes and starts the body tracking module. + + The body tracking module currently supports multiple classes of human skeleton detection with the BODY_TRACKING_MODEL.HUMAN_BODY_FAST, + BODY_TRACKING_MODEL "BODY_TRACKING_MODEL::HUMAN_BODY_MEDIUM" or BODY_TRACKING_MODEL "BODY_TRACKING_MODEL::HUMAN_BODY_ACCURATE". + \n This model only detects humans but provides a full skeleton map for each person. + + \n Detected objects can be retrieved using the retrieve_bodies() method. + + .. note:: + - **This Deep Learning detection module is not available for MODEL.ZED cameras (first generation ZED cameras).** + + .. note:: + - This feature uses AI to locate objects and requires a powerful GPU. A GPU with at least 3GB of memory is recommended. + + + :param body_tracking_parameters: A structure containing all the specific parameters for the object detection. Default: a preset of BodyTrackingParameters. + :return: ERROR_CODE.SUCCESS if everything went fine. + :return: ERROR_CODE.OBJECT_DETECTION_NOT_AVAILABLE if the AI model is missing or corrupted. In this case, the SDK needs to be reinstalled + :return: ERROR_CODE.OBJECT_DETECTION_MODULE_NOT_COMPATIBLE_WITH_CAMERA if the camera used does not have an IMU (MODEL.ZED). + :return: ERROR_CODE.SENSORS_NOT_DETECTED if the camera model is correct (not MODEL.ZED) but the IMU is missing. It probably happens because InitParameters.sensors_required was set to False and that IMU has not been found. + :return: ERROR_CODE.INVALID_FUNCTION_CALL if one of the **body_tracking_parameters** parameter is not compatible with other modules parameters (for example, **depth_mode** has been set to DEPTH_MODE.NONE). + :return: ERROR_CODE.FAILURE otherwise. + + .. code-block:: text + + import pyzed.sl as sl + + def main() : + # Create a ZED camera object + zed = sl.Camera() + + # Open the camera + err = zed.open() + if err != sl.ERROR_CODE.SUCCESS: + print("Opening camera error:", repr(err)) + exit(-1) + + # Enable position tracking (mandatory for object detection) + tracking_params = sl.PositionalTrackingParameters() + err = zed.enable_positional_tracking(tracking_params) + if err != sl.ERROR_CODE.SUCCESS: + print("Enabling Positional Tracking error:", repr(err)) + exit(-1) + + # Set the body tracking parameters + body_tracking_params = sl.BodyTrackingParameters() + + # Enable the body tracking + err = zed.enable_body_tracking(body_tracking_params) + if err != sl.ERROR_CODE.SUCCESS: + print("Enabling Body Tracking error:", repr(err)) + exit(-1) + + # Grab an image and detect bodies on it + bodies = sl.Bodies() + while True : + if zed.grab() == sl.ERROR_CODE.SUCCESS: + zed.retrieve_bodies(bodies) + print(len(bodies.body_list), "bodies detected") + # Use the bodies in your application + + # Close the camera + zed.disable_body_tracking() + zed.close() + + if __name__ == "__main__": + main() + """ + return ERROR_CODE() + + def disable_body_tracking(self, instance_id = 0, force_disable_all_instances = False) -> None: + """ + Disables the body tracking process. + + The body tracking module immediately stops and frees its memory allocations. + + :param instance_id: Id of the body tracking instance. Used when multiple instances of the body tracking module are enabled at the same time. + :param force_disable_all_instances: Should disable all instances of the body tracking module or just **instance_module_id**. + + .. note:: + If the body tracking has been enabled, this method will automatically be called by close(). + """ + pass + + def retrieve_bodies(self, bodies, body_tracking_runtime_parameters = None, instance_id = 0) -> ERROR_CODE: + """ + Retrieves body tracking data from the body tracking module. + + This method returns the result of the body tracking, whether the module is running synchronously or asynchronously. + + - **Asynchronous:** this method immediately returns the last bodies tracked. If the current tracking isn't done, the bodies from the last tracking will be returned, and Bodies.is_new will be set to False. + - **Synchronous:** this method executes tracking and waits for it to finish before returning the detected objects. + + It is recommended to keep the same Bodies object as the input of all calls to this method. This will enable the identification and the tracking of every detected object. + + :param bodies: The detected bodies will be saved into this object. If the object already contains data from a previous tracking, it will be updated, keeping a unique ID for the same person. + :param body_tracking_runtime_parameters: Body tracking runtime settings, can be changed at each tracking. In async mode, the parameters update is applied on the next iteration. If None, the previously used parameters will be used. + :param instance_id: Id of the body tracking instance. Used when multiple instances of the body tracking module are enabled at the same time. + :return: ERROR_CODE.SUCCESS if everything went fine, ERROR_CODE.FAILURE otherwise. + + .. code-block:: text + + bodies = sl.Bodies() # Unique Bodies to be updated after each grab + # Main loop + while True: + if zed.grab() == sl.ERROR_CODE.SUCCESS: # Grab an image from the camera + zed.retrieve_bodies(bodies) + print(len(bodies.body_list), "bodies detected") + """ + return ERROR_CODE() + + def set_body_tracking_runtime_parameters(self, body_tracking_runtime_parameters, instance_module_id = 0) -> ERROR_CODE: + """ + Set the body tracking runtime parameters + """ + return ERROR_CODE() + + def is_body_tracking_enabled(self, instance_id = 0) -> bool: + """ + Tells if the body tracking module is enabled. + """ + return bool() + + def get_sensors_data(self, py_sensor_data, time_reference = TIME_REFERENCE.CURRENT) -> ERROR_CODE: + """ + Retrieves the SensorsData (IMU, magnetometer, barometer) at a specific time reference. + + - Calling get_sensors_data with TIME_REFERENCE.CURRENT gives you the latest sensors data received. Getting all the data requires to call this method at 800Hz in a thread. + - Calling get_sensors_data with TIME_REFERENCE.IMAGE gives you the sensors data at the time of the latest image grab() "grabbed". + + SensorsData object contains the previous IMUData structure that was used in ZED SDK v2.X: + \n For IMU data, the values are provided in 2 ways : + +
  • **Time-fused** pose estimation that can be accessed using: + * IMUData.get_pose "data.get_imu_data().get_pose()" +
  • +
  • **Raw values** from the IMU sensor: + + * IMUData.get_angular_velocity "data.get_imu_data().get_angular_velocity()", corresponding to the gyroscope + * IMUData.get_linear_acceleration "data.get_imu_data().get_linear_acceleration()", corresponding to the accelerometer + both the gyroscope and accelerometer are synchronized. +
  • + + + The delta time between previous and current values can be calculated using data.imu.timestamp + + .. note:: + The IMU quaternion (fused data) is given in the specified COORDINATE_SYSTEM of InitParameters. + + + :param py_sensor_data: The SensorsData variable to store the data. (Direction: out) + :param time_reference: Defines the reference from which you want the data to be expressed. Default: REFERENCE_FRAME.WORLD. (Direction: in) + :return: ERROR_CODE.SUCCESS if sensors data have been extracted. + :return: ERROR_CODE.SENSORS_NOT_AVAILABLE if the camera model is a MODEL.ZED. + :return: ERROR_CODE.MOTION_SENSORS_REQUIRED if the camera model is correct but the sensors module is not opened. + :return: ERROR_CODE.INVALID_FUNCTION_PARAMETERS if the **reference_time** is not valid. See Warning. + + .. warning:: In SVO reading mode, the TIME_REFERENCE.CURRENT is currently not available (yielding ERROR_CODE.INVALID_FUNCTION_PARAMETERS. + .. warning:: Only the quaternion data and barometer data (if available) at TIME_REFERENCE.IMAGE are available. Other values will be set to 0. + """ + return ERROR_CODE() + + def get_sensors_data_batch(self, py_sensor_data) -> ERROR_CODE: + """ + Retrieves all SensorsData associated to most recent grabbed frame in the specified COORDINATE_SYSTEM of InitParameters. + + For IMU data, the values are provided in 2 ways: + +
  • **Time-fused** pose estimation that can be accessed using: + * IMUData.get_pose "data.get_imu_data().get_pose()" +
  • +
  • **Raw values** from the IMU sensor: + + * IMUData.get_angular_velocity "data.get_imu_data().get_angular_velocity()", corresponding to the gyroscope + * IMUData.get_linear_acceleration "data.get_imu_data().get_linear_acceleration()", corresponding to the accelerometer + both the gyroscope and accelerometer are synchronized. +
  • + + + The delta time between previous and current values can be calculated using data.imu.timestamp + + :param py_sensor_data: The SensorsData list to store the data. (Direction: out) + :return: ERROR_CODE.SUCCESS if sensors data have been extracted. + :return: ERROR_CODE.SENSORS_NOT_AVAILABLE if the camera model is a MODEL.ZED. + :return: ERROR_CODE.MOTION_SENSORS_REQUIRED if the camera model is correct but the sensors module is not opened. + :return: ERROR_CODE.INVALID_FUNCTION_PARAMETERS if the **reference_time** is not valid. See Warning. + + .. code-block:: text + + if zed.grab() == sl.ERROR_CODE.SUCCESS: + sensors_data = [] + if (zed.get_sensors_data_batch(sensors_data) == sl.ERROR_CODE.SUCCESS): + for data in sensors_data: + print("IMU data: ", data.imu.get_angular_velocity(), data.imu.get_linear_acceleration()) + print("IMU pose: ", data.imu.get_pose().get_translation()) + print("IMU orientation: ", data.imu.get_orientation().get()) + """ + return ERROR_CODE() + + def set_imu_prior(self, transfom) -> ERROR_CODE: + """ + Set an optional IMU orientation hint that will be used to assist the tracking during the next grab(). + + This method can be used to assist the positional tracking rotation. + + .. note:: + This method is only effective if the camera has a model other than a MODEL.ZED, which does not contains internal sensors. + + .. warning:: It needs to be called before the grab() method. + :param transform: Transform to be ingested into IMU fusion. Note that only the rotation is used. + :return: ERROR_CODE.SUCCESS if the transform has been passed, ERROR_CODE.INVALID_FUNCTION_CALL otherwise (e.g. when used with a ZED camera which doesn't have IMU data). + """ + return ERROR_CODE() + + def get_position(self, py_pose, reference_frame: REFERENCE_FRAME = REFERENCE_FRAME.WORLD) -> POSITIONAL_TRACKING_STATE: + """ + Retrieves the estimated position and orientation of the camera in the specified REFERENCE_FRAME "reference frame". + + - Using REFERENCE_FRAME.WORLD, the returned pose relates to the initial position of the camera (PositionalTrackingParameters.initial_world_transform ). + - Using REFERENCE_FRAME.CAMERA, the returned pose relates to the previous position of the camera. + + If the tracking has been initialized with PositionalTrackingParameters.enable_area_memory to True (default), this method can return POSITIONAL_TRACKING_STATE.SEARCHING. + This means that the tracking lost its link to the initial referential and is currently trying to relocate the camera. However, it will keep on providing position estimations. + + :param camera_pose: The pose containing the position of the camera and other information (timestamp, confidence). (Direction: out) + :param reference_frame: Defines the reference from which you want the pose to be expressed. Default: REFERENCE_FRAME.WORLD. (Direction: in) + :return: The current state of the tracking process. + + .. note:: + Extract Rotation Matrix: Pose.get_rotation_matrix() + + .. note:: + Extract Translation Vector: Pose.get_translation() + + .. note:: + Extract Orientation / Quaternion: Pose.get_orientation() + + + .. warning:: This method requires the tracking to be enabled. enablePositionalTracking() . + + .. note:: + The position is provided in the InitParameters.coordinate_system . See COORDINATE_SYSTEM for its physical origin. + + + .. code-block:: text + + while True: + if zed.grab() == sl.ERROR_CODE.SUCCESS: # Grab an image and computes the tracking + camera_pose = sl.Pose() + zed.get_position(camera_pose, sl.REFERENCE_FRAME.WORLD) + + translation = camera_pose.get_translation().get() + print("Camera position: X=", translation[0], " Y=", translation[1], " Z=", translation[2]) + print("Camera Euler rotation: X=", camera_pose.get_euler_angles()[0], " Y=", camera_pose.get_euler_angles()[1], " Z=", camera_pose.get_euler_angles()[2]) + print("Camera Rodrigues rotation: X=", camera_pose.get_rotation_vector()[0], " Y=", camera_pose.get_rotation_vector()[1], " Z=", camera_pose.get_rotation_vector()[2]) + orientation = camera_pose.get_orientation().get() + print("Camera quaternion orientation: X=", orientation[0], " Y=", orientation[1], " Z=", orientation[2], " W=", orientation[3]) + """ + return POSITIONAL_TRACKING_STATE() + + def get_positional_tracking_landmarks(self, landmarks) -> ERROR_CODE: + """ + Get the current positional tracking landmarks. + :param landmarks: The dictionary of landmarks_id and landmark. + :return: ERROR_CODE that indicate if the function succeed or not. + """ + return ERROR_CODE() + + def get_positional_tracking_landmarks2d(self, landmark2d) -> ERROR_CODE: + """ + Get the current positional tracking landmark. + :param landmark: The landmark. + :return: ERROR_CODE that indicate if the function succeed or not. + """ + return ERROR_CODE() + + def get_positional_tracking_status(self) -> PositionalTrackingStatus: + """ + Return the current status of positional tracking module. + + :return: sl::PositionalTrackingStatus current status of positional tracking module. + """ + return PositionalTrackingStatus() + + def get_area_export_state(self) -> AREA_EXPORTING_STATE: + """ + Returns the state of the spatial memory export process. + + As Camera.save_area_map() only starts the exportation, this method allows you to know when the exportation finished or if it failed. + :return: The current state of the spatial memory export process. + """ + return AREA_EXPORTING_STATE() + + def save_area_map(self, area_file_path = "") -> ERROR_CODE: + """ + Saves the current area learning file. The file will contain spatial memory data generated by the tracking. + + If the tracking has been initialized with PositionalTrackingParameters.enable_area_memory to True (default), the method allows you to export the spatial memory. + \n Reloading the exported file in a future session with PositionalTrackingParameters.area_file_path initializes the tracking within the same referential. + \n This method is asynchronous, and only triggers the file generation. You can use get_area_export_state() to get the export state. + The positional tracking keeps running while exporting. + + :param area_file_path: Path of an '.area' file to save the spatial memory database in. + :return: ERROR_CODE.FAILURE if the **area_file_path** file wasn't found, ERROR_CODE.SUCCESS otherwise. + + See get_area_export_state() + + .. note:: + Please note that this method will also flush the area database that was built/loaded. + + + .. warning:: If the camera wasn't moved during the tracking session, or not enough, the spatial memory won't be usable and the file won't be exported. + .. warning:: The get_area_export_state() will return AREA_EXPORTING_STATE.FILE_EMPTY. + .. warning:: A few meters (~3m) of translation or a full rotation should be enough to get usable spatial memory. + .. warning:: However, as it should be used for relocation purposes, visiting a significant portion of the environment is recommended before exporting. + + .. code-block:: text + + while True : + if zed.grab() == sl.ERROR_CODE.SUCCESS: # Grab an image and computes the tracking + camera_pose = Pose() + zed.get_position(camera_pose, REFERENCE_FRAME.WORLD) + + # Export the spatial memory for future sessions + zed.save_area_map("office.area") # The actual file will be created asynchronously. + print(repr(zed.get_area_export_state())) + + # Close the camera + zed.close() + """ + return ERROR_CODE() + + def disable_positional_tracking(self, area_file_path = "") -> None: + """ + Disables the positional tracking. + + The positional tracking is immediately stopped. If a file path is given, save_area_map() will be called asynchronously. See get_area_export_state() to get the exportation state. + If the tracking has been enabled, this function will automatically be called by close() . + + :param area_file_path: If set, saves the spatial memory into an '.area' file. Default: (empty) + \n **area_file_path** is the name and path of the database, e.g. path/to/file/myArea1.area". + """ + pass + + def is_positional_tracking_enabled(self) -> bool: + """ + Tells if the tracking module is enabled + """ + return bool() + + def reset_positional_tracking(self, path) -> ERROR_CODE: + """ + Resets the tracking, and re-initializes the position with the given transformation matrix. + :param path: Position of the camera in the world frame when the method is called. + :return: ERROR_CODE.SUCCESS if the tracking has been reset, ERROR_CODE.FAILURE otherwise. + + .. note:: + Please note that this method will also flush the accumulated or loaded spatial memory. + """ + return ERROR_CODE() + + def enable_spatial_mapping(self, py_spatial = None) -> ERROR_CODE: + """ + Initializes and starts the spatial mapping processes. + + The spatial mapping will create a geometric representation of the scene based on both tracking data and 3D point clouds. + The resulting output can be a Mesh or a FusedPointCloud. It can be be obtained by calling extract_whole_spatial_map() or retrieve_spatial_map_async(). + Note that retrieve_spatial_map_async should be called after request_spatial_map_async(). + + :param py_spatial: A structure containing all the specific parameters for the spatial mapping. + Default: a balanced parameter preset between geometric fidelity and output file size. For more information, see the SpatialMappingParameters documentation. + :return: ERROR_CODE.SUCCESS if everything went fine, ERROR_CODE.FAILURE otherwise. + + .. warning:: The tracking (enable_positional_tracking() ) and the depth (RuntimeParameters.enable_depth ) needs to be enabled to use the spatial mapping. + .. warning:: The performance greatly depends on the **py_spatial**. + .. warning:: Lower SpatialMappingParameters.range_meter and SpatialMappingParameters.resolution_meter for higher performance. + If the mapping framerate is too slow in live mode, consider using an SVO file, or choose a lower mesh resolution. + + .. note:: + This feature uses host memory (RAM) to store the 3D map. The maximum amount of available memory allowed can be tweaked using the SpatialMappingParameters. + + \n Exceeding the maximum memory allowed immediately stops the mapping. + + .. code-block:: text + + import pyzed.sl as sl + + def main() : + # Create a ZED camera object + zed = sl.Camera() + + # Set initial parameters + init_params = sl.InitParameters() + init_params.camera_resolution = sl.RESOLUTION.HD720 # Use HD720 video mode (default fps: 60) + init_params.coordinate_system = sl.COORDINATE_SYSTEM.RIGHT_HANDED_Y_UP # Use a right-handed Y-up coordinate system (The OpenGL one) + init_params.coordinate_units = sl.UNIT.METER # Set units in meters + + # Open the camera + err = zed.open(init_params) + if err != sl.ERROR_CODE.SUCCESS: + exit(-1) + + # Positional tracking needs to be enabled before using spatial mapping + tracking_parameters = sl.PositionalTrackingParameters() + err = zed.enable_positional_tracking(tracking_parameters) + if err != sl.ERROR_CODE.SUCCESS: + exit(-1) + + # Enable spatial mapping + mapping_parameters = sl.SpatialMappingParameters() + err = zed.enable_spatial_mapping(mapping_parameters) + if err != sl.ERROR_CODE.SUCCESS: + exit(-1) + + # Grab data during 500 frames + i = 0 + mesh = sl.Mesh() # Create a mesh object + while i < 500 : + # For each new grab, mesh data is updated + if zed.grab() == sl.ERROR_CODE.SUCCESS : + # In the background, the spatial mapping will use newly retrieved images, depth and pose to update the mesh + mapping_state = zed.get_spatial_mapping_state() + + # Print spatial mapping state + print("Images captured: ", i, "/ 500 || Spatial mapping state: ", repr(mapping_state)) + i = i + 1 + + # Extract, filter and save the mesh in a .obj file + print("Extracting Mesh ...") + zed.extract_whole_spatial_map(mesh) # Extract the whole mesh + print("Filtering Mesh ...") + mesh.filter(sl.MESH_FILTER.LOW) # Filter the mesh (remove unnecessary vertices and faces) + print("Saving Mesh in mesh.obj ...") + mesh.save("mesh.obj") # Save the mesh in an obj file + + # Disable tracking and mapping and close the camera + zed.disable_spatial_mapping() + zed.disable_positional_tracking() + zed.close() + return 0 + + if __name__ == "__main__" : + main() + """ + return ERROR_CODE() + + def pause_spatial_mapping(self, status) -> None: + """ + Pauses or resumes the spatial mapping processes. + + As spatial mapping runs asynchronously, using this method can pause its computation to free some processing power, and resume it again later. + \n For example, it can be used to avoid mapping a specific area or to pause the mapping when the camera is static. + :param status: If True, the integration is paused. If False, the spatial mapping is resumed. + """ + pass + + def get_spatial_mapping_state(self) -> SPATIAL_MAPPING_STATE: + """ + Returns the current spatial mapping state. + + As the spatial mapping runs asynchronously, this method allows you to get reported errors or status info. + :return: The current state of the spatial mapping process. + + See also SPATIAL_MAPPING_STATE + """ + return SPATIAL_MAPPING_STATE() + + def request_spatial_map_async(self) -> None: + """ + Starts the spatial map generation process in a non-blocking thread from the spatial mapping process. + + The spatial map generation can take a long time depending on the mapping resolution and covered area. This function will trigger the generation of a mesh without blocking the program. + You can get info about the current generation using get_spatial_map_request_status_async(), and retrieve the mesh using retrieve_spatial_map_async(). + + .. note:: + Only one mesh can be generated at a time. If the previous mesh generation is not over, new calls of the function will be ignored. + """ + pass + + def get_spatial_map_request_status_async(self) -> ERROR_CODE: + """ + Returns the spatial map generation status. + + This status allows you to know if the mesh can be retrieved by calling retrieve_spatial_map_async(). + :return: ERROR_CODE.SUCCESS if the mesh is ready and not yet retrieved, otherwise ERROR_CODE.FAILURE. + """ + return ERROR_CODE() + + def retrieve_spatial_map_async(self, py_mesh) -> ERROR_CODE: + """ + Retrieves the current generated spatial map. + + After calling request_spatial_map_async(), this method allows you to retrieve the generated mesh or fused point cloud. + \n The Mesh or FusedPointCloud will only be available when get_spatial_map_request_status_async() returns ERROR_CODE.SUCCESS. + + :param py_mesh: The Mesh or FusedPointCloud to be filled with the generated spatial map. (Direction: out) + :return: ERROR_CODE.SUCCESS if the mesh is retrieved, otherwise ERROR_CODE.FAILURE. + + .. note:: + This method only updates the necessary chunks and adds the new ones in order to improve update speed. + + .. warning:: You should not modify the mesh / fused point cloud between two calls of this method, otherwise it can lead to a corrupted mesh / fused point cloud. + See request_spatial_map_async() for an example. + """ + return ERROR_CODE() + + def extract_whole_spatial_map(self, py_mesh) -> ERROR_CODE: + """ + Extract the current spatial map from the spatial mapping process. + + If the object to be filled already contains a previous version of the mesh / fused point cloud, only changes will be updated, optimizing performance. + + :param py_mesh: The Mesh or FusedPointCloud to be filled with the generated spatial map. (Direction: out) + + :return: ERROR_CODE.SUCCESS if the mesh is filled and available, otherwise ERROR_CODE.FAILURE. + + .. warning:: This is a blocking function. You should either call it in a thread or at the end of the mapping process. + The extraction can be long, calling this function in the grab loop will block the depth and tracking computation giving bad results. + """ + return ERROR_CODE() + + def find_plane_at_hit(self, coord, py_plane: Plane, parameters = PlaneDetectionParameters()) -> ERROR_CODE: + """ + Checks the plane at the given left image coordinates. + + This method gives the 3D plane corresponding to a given pixel in the latest left image grab() "grabbed". + \n The pixel coordinates are expected to be contained x=[0;width-1] and y=[0;height-1], where width/height are defined by the input resolution. + + :param coord: The image coordinate. The coordinate must be taken from the full-size image (Direction: in) + :param plane: The detected plane if the method succeeded. (Direction: out) + :param parameters: A structure containing all the specific parameters for the plane detection. Default: a preset of PlaneDetectionParameters. (Direction: in) + :return: ERROR_CODE.SUCCESS if a plane is found otherwise ERROR_CODE.PLANE_NOT_FOUND. + + .. note:: + The reference frame is defined by the RuntimeParameters.measure3D_reference_frame given to the grab() method. + """ + return ERROR_CODE() + + def find_floor_plane(self, py_plane, reset_tracking_floor_frame, floor_height_prior = float('nan'), world_orientation_prior = Rotation(Matrix3f().zeros()), floor_height_prior_tolerance = float('nan')) -> ERROR_CODE: + """ + Detect the floor plane of the scene. + + This method analyses the latest image and depth to estimate the floor plane of the scene. + \n It expects the floor plane to be visible and bigger than other candidate planes, like a table. + + :param py_plane: The detected floor plane if the method succeeded. (Direction: out) + :param reset_tracking_floor_frame: The transform to align the tracking with the floor plane. (Direction: out) + \n The initial position will then be at ground height, with the axis align with the gravity. + \n The positional tracking needs to be reset/enabled with this transform as a parameter (PositionalTrackingParameters.initial_world_transform). + :param floor_height_prior: Prior set to locate the floor plane depending on the known camera distance to the ground, expressed in the same unit as the ZED. (Direction: in) + \n If the prior is too far from the detected floor plane, the method will return ERROR_CODE.PLANE_NOT_FOUND. + :param world_orientation_prior: Prior set to locate the floor plane depending on the known camera orientation to the ground. (Direction: in) + \n If the prior is too far from the detected floor plane, the method will return ERROR_CODE "ERROR_CODE.PLANE_NOT_FOUND. + :param floor_height_prior_tolerance: Prior height tolerance, absolute value. (Direction: in) + :return: ERROR_CODE.SUCCESS if the floor plane is found and matches the priors (if defined), otherwise ERROR_CODE.PLANE_NOT_FOUND. + + .. note:: + The reference frame is defined by the sl.RuntimeParameters (measure3D_reference_frame) given to the grab() method. + + .. note:: + The length unit is defined by sl.InitParameters (coordinate_units). + + .. note:: + With the ZED, the assumption is made that the floor plane is the dominant plane in the scene. The ZED Mini uses gravity as prior. + + """ + return ERROR_CODE() + + def disable_spatial_mapping(self) -> None: + """ + Disables the spatial mapping process. + + The spatial mapping is immediately stopped. + \n If the mapping has been enabled, this method will automatically be called by close(). + .. note:: + This method frees the memory allocated for the spatial mapping, consequently, meshes and fused point clouds cannot be retrieved after this call. + """ + pass + + def enable_streaming(self, streaming_parameters = None) -> ERROR_CODE: + """ + Creates a streaming pipeline. + + :param streaming_parameters: A structure containing all the specific parameters for the streaming. Default: a reset of StreamingParameters . + :return: ERROR_CODE.SUCCESS if the streaming was successfully started. + :return: ERROR_CODE.INVALID_FUNCTION_CALL if open() was not successfully called before. + :return: ERROR_CODE.FAILURE if streaming RTSP protocol was not able to start. + :return: ERROR_CODE.NO_GPU_COMPATIBLE if the streaming codec is not supported (in this case, use H264 codec which is supported on all NVIDIA GPU the ZED SDK supports). + + .. code-block:: text + + import pyzed.sl as sl + + def main() : + # Create a ZED camera object + zed = sl.Camera() + + # Set initial parameters + init_params = sl.InitParameters() + init_params.camera_resolution = sl.RESOLUTION.HD720 # Use HD720 video mode (default fps: 60) + + # Open the camera + err = zed.open(init_params) + if err != sl.ERROR_CODE.SUCCESS : + print(repr(err)) + exit(-1) + + # Enable streaming + stream_params = sl.StreamingParameters() + stream_params.port = 30000 + stream_params.bitrate = 8000 + err = zed.enable_streaming(stream_params) + if err != sl.ERROR_CODE.SUCCESS : + print(repr(err)) + exit(-1) + + # Grab data during 500 frames + i = 0 + while i < 500 : + if zed.grab() == sl.ERROR_CODE.SUCCESS : + i = i+1 + + zed.disable_streaming() + zed.close() + return 0 + + if __name__ == "__main__" : + main() + """ + return ERROR_CODE() + + def disable_streaming(self) -> None: + """ + Disables the streaming initiated by enable_streaming(). + .. note:: + This method will automatically be called by close() if enable_streaming() was called. + + + See enable_streaming() for an example. + """ + pass + + def is_streaming_enabled(self) -> bool: + """ + Tells if the streaming is running. + :return: True if the stream is running, False otherwise. + """ + return bool() + + def enable_recording(self, record) -> ERROR_CODE: + """ + Creates an SVO file to be filled by enable_recording() and disable_recording(). + + \n SVO files are custom video files containing the un-rectified images from the camera along with some meta-data like timestamps or IMU orientation (if applicable). + \n They can be used to simulate a live ZED and test a sequence with various SDK parameters. + \n Depending on the application, various compression modes are available. See SVO_COMPRESSION_MODE. + + :param record: A structure containing all the specific parameters for the recording such as filename and compression mode. Default: a reset of RecordingParameters . + :return: An ERROR_CODE that defines if the SVO file was successfully created and can be filled with images. + + .. warning:: This method can be called multiple times during a camera lifetime, but if **video_filename** is already existing, the file will be erased. + + + .. code-block:: text + + import pyzed.sl as sl + + def main() : + # Create a ZED camera object + zed = sl.Camera() + # Set initial parameters + init_params = sl.InitParameters() + init_params.camera_resolution = sl.RESOLUTION.HD720 # Use HD720 video mode (default fps: 60) + init_params.coordinate_units = sl.UNIT.METER # Set units in meters + # Open the camera + err = zed.open(init_params) + if (err != sl.ERROR_CODE.SUCCESS): + print(repr(err)) + exit(-1) + + # Enable video recording + record_params = sl.RecordingParameters("myVideoFile.svo") + err = zed.enable_recording(record_params) + if (err != sl.ERROR_CODE.SUCCESS): + print(repr(err)) + exit(-1) + + # Grab data during 500 frames + i = 0 + while i < 500 : + # Grab a new frame + if zed.grab() == sl.ERROR_CODE.SUCCESS: + # Record the grabbed frame in the video file + i = i + 1 + + zed.disable_recording() + print("Video has been saved ...") + zed.close() + return 0 + + if __name__ == "__main__" : + main() + """ + return ERROR_CODE() + + def disable_recording(self) -> None: + """ + Disables the recording initiated by enable_recording() and closes the generated file. + + .. note:: + This method will automatically be called by close() if enable_recording() was called. + + + See enable_recording() for an example. + """ + pass + + def get_recording_status(self) -> RecordingStatus: + """ + Get the recording information. + :return: The recording state structure. For more details, see RecordingStatus. + """ + return RecordingStatus() + + def pause_recording(self, value = True) -> None: + """ + Pauses or resumes the recording. + :param status: If True, the recording is paused. If False, the recording is resumed. + """ + pass + + def get_recording_parameters(self) -> RecordingParameters: + """ + Returns the RecordingParameters used. + + It corresponds to the structure given as argument to the enable_recording() method. + :return: RecordingParameters containing the parameters used for recording initialization. + """ + return RecordingParameters() + + def get_health_status(self) -> HealthStatus: + """ + Get the Health information. + :return: The health state structure. For more details, see HealthStatus. + """ + return HealthStatus() + + def get_retrieve_image_resolution(self, resolution = None) -> Resolution: + """ + Get the Health information. + :return: The health state structure. For more details, see HealthStatus. + """ + return Resolution() + + def get_retrieve_measure_resolution(self, resolution = None) -> Resolution: + """ + Get the Health information. + :return: The health state structure. For more details, see HealthStatus. + """ + return Resolution() + + def enable_object_detection(self, object_detection_parameters = None) -> ERROR_CODE: + """ + Initializes and starts object detection module. + + The object detection module currently support multiple StereoLabs' model for different purposes: "MULTI_CLASS", "PERSON_HEAD" + \n The full list of model is available through OBJECT_DETECTION_MODEL and the full list of detectable objects is available through OBJECT_CLASS and OBJECT_SUBCLASS. + + \n Detected objects can be retrieved using the retrieve_objects() method. + + \n Alternatively, the object detection module supports custom class of objects with the OBJECT_DETECTION_MODEL.CUSTOM_BOX_OBJECTS (see ingestCustomBoxObjects or ingestCustomMaskObjects) + or OBJECT_DETECTION_MODEL.CUSTOM_YOLOLIKE_BOX_OBJECTS (see ObjectDetectionParameters.custom_onnx_file). + + \n Detected custom objects can be retrieved using the retrieve_custom_objects() method. + + .. note:: + - **This Depth Learning detection module is not available MODEL.ZED cameras.** + + .. note:: + - This feature uses AI to locate objects and requires a powerful GPU. A GPU with at least 3GB of memory is recommended. + + + :param object_detection_parameters: A structure containing all the specific parameters for the object detection. Default: a preset of ObjectDetectionParameters. + :return: ERROR_CODE.SUCCESS if everything went fine. + :return: ERROR_CODE.OBJECT_DETECTION_NOT_AVAILABLE if the AI model is missing or corrupted. In this case, the SDK needs to be reinstalled + :return: ERROR_CODE.OBJECT_DETECTION_MODULE_NOT_COMPATIBLE_WITH_CAMERA if the camera used does not have an IMU (MODEL.ZED). + :return: ERROR_CODE.SENSORS_NOT_DETECTED if the camera model is correct (not MODEL.ZED) but the IMU is missing. It probably happens because InitParameters.sensors_required was set to False and that IMU has not been found. + :return: ERROR_CODE.INVALID_FUNCTION_CALL if one of the **object_detection_parameters** parameter is not compatible with other modules parameters (for example, **depth_mode** has been set to DEPTH_MODE.NONE). + :return: ERROR_CODE.FAILURE otherwise. + + .. note:: + The IMU gives the gravity vector that helps in the 3D box localization. Therefore the object detection module is not available for the MODEL.ZED models. + + + .. code-block:: text + + import pyzed.sl as sl + + def main(): + # Create a ZED camera object + zed = sl.Camera() + + # Open the camera + err = zed.open() + if err != sl.ERROR_CODE.SUCCESS: + print("Opening camera error:", repr(err)) + exit(-1) + + # Enable position tracking (mandatory for object detection) + tracking_params = sl.PositionalTrackingParameters() + err = zed.enable_positional_tracking(tracking_params) + if err != sl.ERROR_CODE.SUCCESS: + print("Enabling Positional Tracking error:", repr(err)) + exit(-1) + + # Set the object detection parameters + object_detection_params = sl.ObjectDetectionParameters() + + # Enable the object detection + err = zed.enable_object_detection(object_detection_params) + if err != sl.ERROR_CODE.SUCCESS: + print("Enabling Object Detection error:", repr(err)) + exit(-1) + + # Grab an image and detect objects on it + objects = sl.Objects() + while True: + if zed.grab() == sl.ERROR_CODE.SUCCESS: + zed.retrieve_objects(objects) + print(len(objects.object_list), "objects detected") + # Use the objects in your application + + # Close the camera + zed.disable_object_detection() + zed.close() + + if __name__ == "__main__": + main() + """ + return ERROR_CODE() + + def disable_object_detection(self, instance_module_id = 0, force_disable_all_instances = False) -> None: + """ + Disables the object detection process. + + The object detection module immediately stops and frees its memory allocations. + + :param instance_module_id: Id of the object detection instance. Used when multiple instances of the object detection module are enabled at the same time. + :param force_disable_all_instances: Should disable all instances of the object detection module or just **instance_module_id**. + + .. note:: + If the object detection has been enabled, this method will automatically be called by close(). + """ + pass + + def set_object_detection_runtime_parameters(self, object_detection_parameters, instance_module_id = 0) -> ERROR_CODE: + """ + Set the object detection runtime parameters + """ + return ERROR_CODE() + + def set_custom_object_detection_runtime_parameters(self, custom_object_detection_parameters, instance_module_id = 0) -> ERROR_CODE: + """ + Set the custom object detection runtime parameters + """ + return ERROR_CODE() + + def retrieve_objects(self, py_objects, py_object_detection_parameters = None, instance_module_id = 0) -> ERROR_CODE: + """ + Retrieve objects detected by the object detection module. + + This method returns the result of the object detection, whether the module is running synchronously or asynchronously. + + - **Asynchronous:** this method immediately returns the last objects detected. If the current detection isn't done, the objects from the last detection will be returned, and Objects.is_new will be set to False. + - **Synchronous:** this method executes detection and waits for it to finish before returning the detected objects. + + It is recommended to keep the same Objects object as the input of all calls to this method. This will enable the identification and tracking of every object detected. + + :param py_objects: The detected objects will be saved into this object. If the object already contains data from a previous detection, it will be updated, keeping a unique ID for the same person. (Direction: out) + :param py_object_detection_parameters: Object detection runtime settings, can be changed at each detection. In async mode, the parameters update is applied on the next iteration. If None, use the previously passed parameters. (Direction: in) + :param instance_module_id: Id of the object detection instance. Used when multiple instances of the object detection module are enabled at the same time. + :return: ERROR_CODE.SUCCESS if everything went fine, ERROR_CODE.FAILURE otherwise. + + .. code-block:: text + + objects = sl.Objects() + while True: + if zed.grab() == sl.ERROR_CODE.SUCCESS: + zed.retrieve_objects(objects) + object_list = objects.object_list + for i in range(len(object_list)): + print(repr(object_list[i].label)) + """ + return ERROR_CODE() + + def retrieve_custom_objects(self, py_objects, custom_object_detection_parameters = None, instance_module_id = 0) -> ERROR_CODE: + """ + Retrieve custom objects detected by the object detection module. + + If the object detection module is initialized with OBJECT_DETECTION_MODEL.CUSTOM_BOX_OBJECTS, the objects retrieved will be the ones from ingest_custom_box_objects or ingest_custom_mask_objects. + If the object detection module is initialized with OBJECT_DETECTION_MODEL.CUSTOM_YOLOLIKE_BOX_OBJECTS, the objects retrieved will be the ones detected using the optimized ObjectDetectionParameters.custom_onnx_file model. + + When running the detection internally, this method returns the result of the object detection, whether the module is running synchronously or asynchronously. + + - **Asynchronous:** this method immediately returns the last objects detected. If the current detection isn't done, the objects from the last detection will be returned, and Objects::is_new will be set to false. + - **Synchronous:** this method executes detection and waits for it to finish before returning the detected objects. + + It is recommended to keep the same Objects object as the input of all calls to this method. This will enable the identification and tracking of every object detected. + + :param py_objects: The detected objects will be saved into this object. If the object already contains data from a previous detection, it will be updated, keeping a unique ID for the same person. + :param custom_object_detection_parameters: Custom object detection runtime settings, can be changed at each detection. In async mode, the parameters update is applied on the next iteration. If None, use the previously passed parameters. + :param instance_module_id: Id of the object detection instance. Used when multiple instances of the object detection module are enabled at the same time. + :return: ERROR_CODE "ERROR_CODE::SUCCESS" if everything went fine, ERROR_CODE "ERROR_CODE::FAILURE" otherwise. + + set_custom_object_detection_runtime_parameters and retrieve_objects methods should be used instead. + + .. code-block:: text + + objects = sl.Objects() + while True: + if zed.grab() == sl.ERROR_CODE.SUCCESS: + zed.retrieve_custom_objects(objects) + object_list = objects.object_list + for i in range(len(object_list)): + print(repr(object_list[i].label)) + """ + return ERROR_CODE() + + def get_objects_batch(self, trajectories, instance_module_id = 0) -> ERROR_CODE: + """ + Get a batch of detected objects. + .. warning:: This method needs to be called after retrieve_objects, otherwise trajectories will be empty. + \n It is the retrieve_objects method that ingest the current/live objects into the batching queue. + + :param trajectories: list of sl.ObjectsBatch that will be filled by the batching queue process. An empty list should be passed to the function + :param instance_module_id: Id of the object detection instance. Used when multiple instances of the object detection module are enabled at the same time. + :return: ERROR_CODE.SUCCESS if everything went fine + :return: ERROR_CODE.INVALID_FUNCTION_CALL if batching module is not available (TensorRT!=7.1) or if object tracking was not enabled. + + .. note:: + Most of the time, the vector will be empty and will be filled every BatchParameters::latency. + + + .. code-block:: text + + objects = sl.Objects() # Unique Objects to be updated after each grab + while True: # Main loop + if zed.grab() == sl.ERROR_CODE.SUCCESS: # Grab an image from the camera + zed.retrieve_objects(objects) # Call retrieve_objects so that objects are ingested in the batching system + trajectories = [] # Create an empty list of trajectories + zed.get_objects_batch(trajectories) # Get batch of objects + print("Size of batch: {}".format(len(trajectories))) + """ + return ERROR_CODE() + + def ingest_custom_box_objects(self, objects_in, instance_module_id = 0) -> ERROR_CODE: + """ + Feed the 3D Object tracking function with your own 2D bounding boxes from your own detection algorithm. + :param objects_in: List of CustomBoxObjectData to feed the object detection. + :param instance_module_id: Id of the object detection instance. Used when multiple instances of the object detection module are enabled at the same time. + :return: ERROR_CODE.SUCCESS if everything went fine. + .. note:: + The detection should be done on the current grabbed left image as the internal process will use all currently available data to extract 3D information and perform object tracking. + """ + return ERROR_CODE() + + def ingest_custom_mask_objects(self, objects_in, instance_module_id = 0) -> ERROR_CODE: + """ + Feed the 3D Object tracking function with your own 2D bounding boxes with masks from your own detection algorithm. + :param objects_in: List of CustomMaskObjectData to feed the object detection. + :param instance_module_id: Id of the object detection instance. Used when multiple instances of the object detection module are enabled at the same time. + :return: ERROR_CODE.SUCCESS if everything went fine. + .. note:: + The detection should be done on the current grabbed left image as the internal process will use all currently available data to extract 3D information and perform object tracking. + """ + return ERROR_CODE() + + def is_object_detection_enabled(self, instance_id = 0) -> bool: + """ + Tells if the object detection module is enabled. + """ + return bool() + + def get_sdk_version() -> str: + """ + Returns the version of the currently installed ZED SDK. + :return: The ZED SDK version as a string with the following format: MAJOR.MINOR.PATCH + + .. code-block:: text + + print(sl.Camera.get_sdk_version()) + """ + return str() + + def get_device_list() -> list[DeviceProperties]: + """ + List all the connected devices with their associated information. + + This method lists all the cameras available and provides their serial number, models and other information. + :return: The device properties for each connected camera. + """ + return list[DeviceProperties]() + + def get_streaming_device_list() -> list[StreamingProperties]: + """ + Lists all the streaming devices with their associated information. + + :return: The streaming properties for each connected camera. + .. warning:: This method takes around 2 seconds to make sure all network informations has been captured. Make sure to run this method in a thread. + """ + return list[StreamingProperties]() + + def reboot(sn : int, full_reboot: bool =True) -> ERROR_CODE: + """ + Performs a hardware reset of the ZED 2 and the ZED 2i. + + :param sn: Serial number of the camera to reset, or 0 to reset the first camera detected. + :param full_reboot: Perform a full reboot (sensors and video modules) if True, otherwise only the video module will be rebooted. + :return: ERROR_CODE "ERROR_CODE::SUCCESS" if everything went fine. + :return: ERROR_CODE "ERROR_CODE::CAMERA_NOT_DETECTED" if no camera was detected. + :return: ERROR_CODE "ERROR_CODE::FAILURE" otherwise. + + .. note:: + This method only works for ZED 2, ZED 2i, and newer camera models. + + + .. warning:: This method will invalidate any sl.Camera object, since the device is rebooting. + """ + return ERROR_CODE() + + def reboot_from_input(input_type: INPUT_TYPE) -> ERROR_CODE: + """ + Performs a hardware reset of all devices matching the InputType. + + :param input_type: Input type of the devices to reset. + :return: ERROR_CODE "ERROR_CODE::SUCCESS" if everything went fine. + :return: ERROR_CODE "ERROR_CODE::CAMERA_NOT_DETECTED" if no camera was detected. + :return: ERROR_CODE "ERROR_CODE::FAILURE" otherwise. + :return: ERROR_CODE "ERROR_CODE::INVALID_FUNCTION_PARAMETERS" for SVOs and streams. + + .. warning:: This method will invalidate any sl.Camera object, since the device is rebooting. + """ + return ERROR_CODE() + + +class COMM_TYPE(enum.Enum): + """ + Lists the different types of communications available for Fusion module. + + | Enumerator | | + |:---:|:---:| + | LOCAL_NETWORK | The sender and receiver are on the same local network and communicate by RTP. The communication can be affected by the local network load. | + | INTRA_PROCESS | Both sender and receiver are declared by the same process and can be in different threads. This type of communication is optimized. | + """ + LOCAL_NETWORK = enum.auto() + INTRA_PROCESS = enum.auto() + LAST = enum.auto() + +class FUSION_ERROR_CODE(enum.Enum): + """ + Lists the types of error that can be raised by the Fusion. + + + | Enumerator | | + |:---:|:---:| + | GNSS_DATA_NEED_FIX | GNSS Data need fix status in order to run fusion. | + | GNSS_DATA_COVARIANCE_MUST_VARY | Ingested covariance data must vary between ingest. | + | BODY_FORMAT_MISMATCH | The senders are using different body formats. Consider changing them. | + | NOT_ENABLED | The following module was not enabled. | + | SOURCE_MISMATCH | Some sources are provided by SVO and others by LIVE stream. | + | CONNECTION_TIMED_OUT | Connection timed out. Unable to reach the sender. Verify the sender's IP/port. | + | SHARED_MEMORY_LEAK | Intra-process shared memory allocation issue. Multiple connections to the same data. | + | INVALID_IP_ADDRESS | The provided IP address format is incorrect. Please provide the IP in the format 'a.b.c.d', where (a, b, c, d) are numbers between 0 and 255. | + | CONNECTION_ERROR | Something goes bad in the connection between sender and receiver. | + | FAILURE | Standard code for unsuccessful behavior. | + | SUCCESS | Standard code for successful behavior. | + | FUSION_INCONSISTENT_FPS | Significant differences observed between sender's FPS. | + | FUSION_FPS_TOO_LOW | At least one sender has an FPS lower than 10 FPS. | + | INVALID_TIMESTAMP | Problem detected with the ingested timestamp. Sample data will be ignored. | + | INVALID_COVARIANCE | Problem detected with the ingested covariance. Sample data will be ignored. | + | NO_NEW_DATA_AVAILABLE | All data from all sources has been consumed. No new data is available for processing. | + """ + GNSS_DATA_NEED_FIX = enum.auto() + GNSS_DATA_COVARIANCE_MUST_VARY = enum.auto() + BODY_FORMAT_MISMATCH = enum.auto() + MODULE_NOT_ENABLED = enum.auto() + SOURCE_MISMATCH = enum.auto() + CONNECTION_TIMED_OUT = enum.auto() + MEMORY_ALREADY_USED = enum.auto() + INVALID_IP_ADDRESS = enum.auto() + FAILURE = enum.auto() + SUCCESS = enum.auto() + FUSION_INCONSISTENT_FPS = enum.auto() + FUSION_FPS_TOO_LOW = enum.auto() + INVALID_TIMESTAMP = enum.auto() + INVALID_COVARIANCE = enum.auto() + NO_NEW_DATA_AVAILABLE = enum.auto() + def __str__(self) -> None: + pass + + def __repr__(self) -> None: + pass + + +def _initialize_fusion_error_codes() -> None: + """ + Lists the types of error that can be raised by the Fusion. + + + | Enumerator | | + |:---:|:---:| + | GNSS_DATA_NEED_FIX | GNSS Data need fix status in order to run fusion. | + | GNSS_DATA_COVARIANCE_MUST_VARY | Ingested covariance data must vary between ingest. | + | BODY_FORMAT_MISMATCH | The senders are using different body formats. Consider changing them. | + | NOT_ENABLED | The following module was not enabled. | + | SOURCE_MISMATCH | Some sources are provided by SVO and others by LIVE stream. | + | CONNECTION_TIMED_OUT | Connection timed out. Unable to reach the sender. Verify the sender's IP/port. | + | SHARED_MEMORY_LEAK | Intra-process shared memory allocation issue. Multiple connections to the same data. | + | INVALID_IP_ADDRESS | The provided IP address format is incorrect. Please provide the IP in the format 'a.b.c.d', where (a, b, c, d) are numbers between 0 and 255. | + | CONNECTION_ERROR | Something goes bad in the connection between sender and receiver. | + | FAILURE | Standard code for unsuccessful behavior. | + | SUCCESS | Standard code for successful behavior. | + | FUSION_INCONSISTENT_FPS | Significant differences observed between sender's FPS. | + | FUSION_FPS_TOO_LOW | At least one sender has an FPS lower than 10 FPS. | + | INVALID_TIMESTAMP | Problem detected with the ingested timestamp. Sample data will be ignored. | + | INVALID_COVARIANCE | Problem detected with the ingested covariance. Sample data will be ignored. | + | NO_NEW_DATA_AVAILABLE | All data from all sources has been consumed. No new data is available for processing. | + """ + pass + +class SENDER_ERROR_CODE(enum.Enum): + """ + Lists the types of error that can be raised during the Fusion by senders. + + + | Enumerator | | + |:---:|:---:| + | DISCONNECTED | The sender has been disconnected. | + | SUCCESS | Standard code for successful behavior. | + | GRAB_ERROR | The sender encountered a grab error. | + | INCONSISTENT_FPS | The sender does not run with a constant frame rate. | + | FPS_TOO_LOW | The frame rate of the sender is lower than 10 FPS. | + """ + DISCONNECTED = enum.auto() + SUCCESS = enum.auto() + GRAB_ERROR = enum.auto() + INCONSISTENT_FPS = enum.auto() + FPS_TOO_LOW = enum.auto() + def __str__(self) -> None: + pass + + def __repr__(self) -> None: + pass + + +class POSITION_TYPE(enum.Enum): + """ + Lists the types of possible position outputs. + + + | Enumerator | | + |:---:|:---:| + | RAW | The output position will be the raw position data. | + | FUSION | The output position will be the fused position projected into the requested camera repository. | + """ + RAW = enum.auto() + FUSION = enum.auto() + LAST = enum.auto() + +class FUSION_REFERENCE_FRAME(enum.Enum): + """ + Enum to define the reference frame of the fusion SDK. + + + | Enumerator | | + |:---:|:---:| + | WORLD | The world frame is the reference frame of the world according to the fused positional Tracking. | + | BASELINK | The base link frame is the reference frame where camera calibration is given. | + """ + WORLD = enum.auto() + BASELINK = enum.auto() + +class CommunicationParameters: + """ + Holds the communication parameter to configure the connection between senders and receiver + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def comm_type(self) -> COMM_TYPE: + """ + The type of the used communication + """ + return COMM_TYPE() + + @property + def port(self) -> int: + """ + The comm port used for streaming the data + """ + return int() + + @property + def ip_address(self) -> str: + """ + The IP address of the sender + """ + return str() + + def __dealloc__(self) -> None: + """ + Default constructor. All the parameters are set to their default and optimized values. + """ + pass + + def set_for_shared_memory(self) -> None: + """ + Setup the communication to used shared memory for intra process workflow, senders and receiver in different threads. + """ + pass + + def set_for_local_network(self, port : int, ip : str = "") -> None: + """ + Setup local Network connection information + """ + pass + + +class FusionConfiguration: + """ + Useful struct to store the Fusion configuration, can be read from /write to a JSON file. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def pose(self) -> Transform: + """ + The WORLD Pose of the camera for Fusion in the unit and coordinate system defined by the user in the InitFusionParameters. + """ + return Transform() + + @pose.setter + def pose(self, pose: Any) -> None: + pass + + @property + def serial_number(self) -> int: + """ + The serial number of the used ZED camera. + """ + return int() + + @serial_number.setter + def serial_number(self, serial_number: Any) -> None: + pass + + @property + def communication_parameters(self) -> CommunicationParameters: + """ + The communication parameters to connect this camera to the Fusion. + """ + return CommunicationParameters() + + @communication_parameters.setter + def communication_parameters(self, communication_parameters: Any) -> None: + pass + + @property + def override_gravity(self) -> bool: + """ + Indicates the behavior of the fusion with respect to given calibration pose. + - If true : The calibration pose directly specifies the camera's absolute pose relative to a global reference frame. + - If false : The calibration pose (Pose_rel) is defined relative to the camera's IMU rotational pose. To determine the true absolute position, the Fusion process will compute Pose_abs = Pose_rel * Rot_IMU_camera. + """ + return bool() + + @override_gravity.setter + def override_gravity(self, override_gravity: Any) -> None: + pass + + @property + def input_type(self) -> InputType: + """ + The input type for the current camera. + """ + return InputType() + + @input_type.setter + def input_type(self, input_type: Any) -> None: + pass + + +def read_fusion_configuration_file_from_serial(self, json_config_filename : str, serial_number : int, coord_system : COORDINATE_SYSTEM, unit: UNIT) -> FusionConfiguration: + """ + Read a configuration JSON file to configure a fusion process. + :param json_config_filename: The name of the JSON file containing the configuration. + :param serial_number: The serial number of the ZED Camera you want to retrieve. + :param coord_system: The COORDINATE_SYSTEM in which you want the World Pose to be in. + :param unit: The UNIT in which you want the World Pose to be in. + + :return: A FusionConfiguration for the requested camera. + .. note:: + Empty if no data were found for the requested camera. + """ + return FusionConfiguration() + +def read_fusion_configuration_file(json_config_filename : str, coord_system : COORDINATE_SYSTEM, unit: UNIT) -> list[FusionConfiguration]: + """ + Read a Configuration JSON file to configure a fusion process. + :param json_config_filename: The name of the JSON file containing the configuration. + :param coord_sys: The COORDINATE_SYSTEM in which you want the World Pose to be in. + :param unit: The UNIT in which you want the World Pose to be in. + + :return: A list of FusionConfiguration for all the camera present in the file. + .. note:: + Empty if no data were found for the requested camera. + """ + return list[FusionConfiguration]() + +def read_fusion_configuration_json(fusion_configuration : dict, coord_system : COORDINATE_SYSTEM, unit: UNIT) -> list[FusionConfiguration]: + """ + Read a Configuration JSON to configure a fusion process. + :param fusion_configuration: The JSON containing the configuration. + :param coord_sys: The COORDINATE_SYSTEM in which you want the World Pose to be in. + :param unit: The UNIT in which you want the World Pose to be in. + + :return: A list of FusionConfiguration for all the camera present in the file. + .. note:: + Empty if no data were found for the requested camera. + """ + return list[FusionConfiguration]() + +def write_configuration_file(json_config_filename : str, fusion_configurations : list, coord_sys : COORDINATE_SYSTEM, unit: UNIT) -> None: + """ + Write a Configuration JSON file to configure a fusion process. + :param json_config_filename: The name of the JSON that will contain the information. + :param conf: A list of FusionConfiguration listing all the camera configurations. + :param coord_sys: The COORDINATE_SYSTEM in which the World Pose is. + :param unit: The UNIT in which the World Pose is. + """ + pass + +class GNSSCalibrationParameters: + """ + Holds the options used for calibrating GNSS / VIO. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def target_translation_uncertainty(self) -> float: + """ + This parameter defines the target translation uncertainty at which the calibration process between GNSS and VIO concludes. + + Default: 10e-2 (10 centimeters) + # + """ + return float() + + @target_translation_uncertainty.setter + def target_translation_uncertainty(self, target_translation_uncertainty: Any) -> None: + pass + + @gnss_antenna_position.setter + def gnss_antenna_position(self, gnss_antenna_position: Any) -> None: + pass + + @property + def enable_reinitialization(self) -> bool: + """ + This parameter determines whether reinitialization should be performed between GNSS and VIO fusion when a significant disparity is detected between GNSS data and the current fusion data. + It becomes particularly crucial during prolonged GNSS signal loss scenarios. + + Default: True + # + """ + return bool() + + @enable_reinitialization.setter + def enable_reinitialization(self, enable_reinitialization: Any) -> None: + pass + + @property + def gnss_vio_reinit_threshold(self) -> float: + """ + This parameter determines the threshold for GNSS/VIO reinitialization. + If the fused position deviates beyond out of the region defined by the product of the GNSS covariance and the gnss_vio_reinit_threshold, a reinitialization will be triggered. + + Default: 5 + # + """ + return float() + + @gnss_vio_reinit_threshold.setter + def gnss_vio_reinit_threshold(self, gnss_vio_reinit_threshold: Any) -> None: + pass + + @property + def target_yaw_uncertainty(self) -> float: + """ + This parameter defines the target yaw uncertainty at which the calibration process between GNSS and VIO concludes. + The unit of this parameter is in radian. + + Default: 0.1 radians + # + """ + return float() + + @target_yaw_uncertainty.setter + def target_yaw_uncertainty(self, target_yaw_uncertainty: Any) -> None: + pass + + @property + def enable_translation_uncertainty_target(self) -> bool: + """ + When this parameter is enabled (set to true), the calibration process between GNSS and VIO accounts for the uncertainty in the determined translation, thereby facilitating the calibration termination. + The maximum allowable uncertainty is controlled by the 'target_translation_uncertainty' parameter. + + Default: False + # + """ + return bool() + + @enable_translation_uncertainty_target.setter + def enable_translation_uncertainty_target(self, enable_translation_uncertainty_target: Any) -> None: + pass + + @property + def enable_rolling_calibration(self) -> bool: + """ + If this parameter is set to true, the fusion algorithm will used a rough VIO / GNSS calibration at first and then refine it. This allow you to quickly get a fused position. + + Default: True + # + """ + return bool() + + @enable_rolling_calibration.setter + def enable_rolling_calibration(self, enable_rolling_calibration: Any) -> None: + pass + + def gnss_antenna_position(self) -> np.array[float]: + """ + Define a transform between the GNSS antenna and the camera system for the VIO / GNSS calibration. + + Default value is [0,0,0], this position can be refined by the calibration if enabled + # + """ + return np.array[float]() + + +class PositionalTrackingFusionParameters: + """ + Holds the options used for initializing the positional tracking fusion module. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def base_footprint_to_world_transform(self) -> Transform: + """ + Position and orientation of the base footprint with respect to the user world. + This transform represents a basis change from base footprint coordinate frame to user world coordinate frame + """ + return Transform() + + @base_footprint_to_world_transform.setter + def base_footprint_to_world_transform(self, base_footprint_to_world_transform: Any) -> None: + pass + + @property + def set_gravity_as_origin(self) -> bool: + """ + Whether to override 2 of the 3 rotations from base_footprint_to_world_transform using the IMU gravity. + + Default: False + """ + return bool() + + @set_gravity_as_origin.setter + def set_gravity_as_origin(self, set_gravity_as_origin: Any) -> None: + pass + + @property + def enable_GNSS_fusion(self) -> bool: + """ + This attribute is responsible for enabling or not GNSS positional tracking fusion. + + Default: False + """ + return bool() + + @enable_GNSS_fusion.setter + def enable_GNSS_fusion(self, enable_GNSS_fusion: Any) -> None: + pass + + @property + def tracking_camera_id(self) -> CameraIdentifier: + """ + ID of the camera used for positional tracking. If not specified, will use the first camera called with the subscribe() method. + """ + return CameraIdentifier() + + @tracking_camera_id.setter + def tracking_camera_id(self, tracking_camera_id: Any) -> None: + pass + + @property + def gnss_calibration_parameters(self) -> GNSSCalibrationParameters: + """ + Control the VIO / GNSS calibration process. + """ + return GNSSCalibrationParameters() + + @gnss_calibration_parameters.setter + def gnss_calibration_parameters(self, gnss_calibration_parameters: Any) -> None: + pass + + @property + def base_footprint_to_baselink_transform(self) -> Transform: + """ + Position and orientation of the base footprint with respect to the baselink. + This transform represents a basis change from base footprint coordinate frame to baselink coordinate frame + """ + return Transform() + + @base_footprint_to_baselink_transform.setter + def base_footprint_to_baselink_transform(self, base_footprint_to_baselink_transform: Any) -> None: + pass + + +class SpatialMappingFusionParameters: + """ + Holds the options used for initializing the positional tracking fusion module. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def range_meter(self) -> float: + """ + Depth range in meters. + + Can be different from the value set by sl::InitParameters::depth_maximum_distance. + + Default: 0. In this case, the range is computed from resolution_meter + and from the current internal parameters to fit your application. + """ + return float() + + @range_meter.setter + def range_meter(self, range_meter: Any) -> None: + pass + + @property + def decay(self) -> float: + """ + Adjust the weighting factor for the current depth during the integration process. + + Setting it to 0 discards all previous data and solely integrates the current depth. + + Default: 1, which results in the complete integration and fusion of the current depth with the previously integrated depth. + """ + return float() + + @decay.setter + def decay(self, decay: Any) -> None: + pass + + @property + def enable_forget_past(self) -> bool: + """ + Default: false + """ + return bool() + + @enable_forget_past.setter + def enable_forget_past(self, enable_forget_past: Any) -> None: + pass + + @property + def map_type(self) -> SPATIAL_MAP_TYPE: + """ + The type of spatial map to be created. This dictates the format that will be used for the mapping(e.g. mesh, point cloud). See SPATIAL_MAP_TYPE + + Default: SPATIAL_MAP_TYPE.MESH. + """ + return SPATIAL_MAP_TYPE() + + @map_type.setter + def map_type(self, map_type: Any) -> None: + pass + + @property + def use_chunk_only(self) -> bool: + """ + Set to false if you want to ensure consistency between the mesh and its inner chunk data. + + .. note:: + Updating the mesh is time-consuming. Setting this to true results in better performance. + + + Default: False + """ + return bool() + + @use_chunk_only.setter + def use_chunk_only(self, use_chunk_only: Any) -> None: + pass + + @property + def stability_counter(self) -> int: + """ + Control the integration rate of the current depth into the mapping process. + This parameter controls how many times a stable 3D points should be seen before it is integrated into the spatial mapping. + Default: 0, this will define the stability counter based on the mesh resolution, the higher the resolution, the higher the stability counter. + """ + return int() + + @stability_counter.setter + def stability_counter(self, stability_counter: Any) -> None: + pass + + @property + def disparity_std(self) -> float: + """ + Control the disparity noise (standard deviation) in px. set a very small value (<0.1) if the depth map of the scene is accurate. Set a big value (>0.5) if the depth map is noisy. + + Default: 0.3 + """ + return float() + + @disparity_std.setter + def disparity_std(self, disparity_std: Any) -> None: + pass + + @property + def resolution_meter(self) -> float: + """ + Spatial mapping resolution in meters. + + Default: 0.05 m + """ + return float() + + @resolution_meter.setter + def resolution_meter(self, resolution_meter: Any) -> None: + pass + + @property + def max_memory_usage(self) -> int: + """ + The maximum CPU memory (in MB) allocated for the meshing process. + + Default: 2048 MB + """ + return int() + + @max_memory_usage.setter + def max_memory_usage(self, max_memory_usage: Any) -> None: + pass + + +class BodyTrackingFusionParameters: + """ + Holds the options used to initialize the body tracking module of the Fusion. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def enable_body_fitting(self) -> bool: + """ + Defines if the body fitting will be applied. + + Default: False + .. note:: + If you enable it and the camera provides data as BODY_18 the fused body format will be BODY_34. + """ + return bool() + + @enable_body_fitting.setter + def enable_body_fitting(self, enable_body_fitting: Any) -> None: + pass + + @property + def enable_tracking(self) -> bool: + """ + Defines if the object detection will track objects across images flow. + + Default: True + """ + return bool() + + @enable_tracking.setter + def enable_tracking(self, enable_tracking: Any) -> None: + pass + + +class BodyTrackingFusionRuntimeParameters: + """ + Holds the options used to change the behavior of the body tracking module at runtime. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def skeleton_minimum_allowed_keypoints(self) -> int: + """ + If the fused skeleton has less than skeleton_minimum_allowed_keypoints keypoints, it will be discarded. + + Default: -1. + """ + return int() + + @skeleton_minimum_allowed_keypoints.setter + def skeleton_minimum_allowed_keypoints(self, skeleton_minimum_allowed_keypoints: Any) -> None: + pass + + @property + def skeleton_smoothing(self) -> float: + """ + This value controls the smoothing of the tracked or fitted fused skeleton. + + It is ranged from 0 (low smoothing) and 1 (high smoothing). + \n Default: 0. + """ + return float() + + @skeleton_smoothing.setter + def skeleton_smoothing(self, skeleton_smoothing: Any) -> None: + pass + + @property + def skeleton_minimum_allowed_camera(self) -> int: + """ + If a skeleton was detected in less than skeleton_minimum_allowed_camera cameras, it will be discarded. + + Default: -1. + """ + return int() + + @skeleton_minimum_allowed_camera.setter + def skeleton_minimum_allowed_camera(self, skeleton_minimum_allowed_camera: Any) -> None: + pass + + +class ObjectDetectionFusionParameters: + """ + Holds the options used to initialize the object detection module of the Fusion + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def enable_tracking(self) -> bool: + """ + Defines if the object detection will track objects across images flow. + + Default: True. + """ + return bool() + + @enable_tracking.setter + def enable_tracking(self, enable_tracking: Any) -> None: + pass + + +class CameraMetrics: + """ + Holds the metrics of a sender in the fusion process. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def received_fps(self) -> float: + """ + FPS of the received data. + """ + return float() + + @received_fps.setter + def received_fps(self, received_fps: Any) -> None: + pass + + @property + def ratio_detection(self) -> float: + """ + Skeleton detection percent during the last second. + Number of frames with at least one detection / number of frames, over the last second. + A low value means few detections occured lately for this sender. + """ + return float() + + @ratio_detection.setter + def ratio_detection(self, ratio_detection: Any) -> None: + pass + + @property + def is_present(self) -> bool: + """ + Is set to false if no data in this batch of metrics. + """ + return bool() + + @is_present.setter + def is_present(self, is_present: Any) -> None: + pass + + @property + def received_latency(self) -> float: + """ + Latency (in second) of the received data. + Timestamp difference between the time when the data are sent and the time they are received (mostly introduced when using the local network workflow). + """ + return float() + + @received_latency.setter + def received_latency(self, received_latency: Any) -> None: + pass + + @property + def delta_ts(self) -> float: + """ + Average data acquisition timestamp difference. + Average standard deviation of sender's period since the start. + """ + return float() + + @delta_ts.setter + def delta_ts(self, delta_ts: Any) -> None: + pass + + @property + def synced_latency(self) -> float: + """ + Latency (in seconds) after Fusion synchronization. + Difference between the timestamp of the data received and the timestamp at the end of the Fusion synchronization. + """ + return float() + + @synced_latency.setter + def synced_latency(self, synced_latency: Any) -> None: + pass + + +class FusionMetrics: + """ + Holds the metrics of the fusion process. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def mean_stdev_between_camera(self) -> float: + """ + Standard deviation of the data timestamp fused, the lower the better. + """ + return float() + + @mean_stdev_between_camera.setter + def mean_stdev_between_camera(self, mean_stdev_between_camera: Any) -> None: + pass + + @property + def camera_individual_stats(self) -> dict: + """ + Sender metrics. + """ + return {} + + @camera_individual_stats.setter + def camera_individual_stats(self, camera_individual_stats: Any) -> None: + pass + + @property + def mean_camera_fused(self) -> float: + """ + Mean number of camera that provides data during the past second. + """ + return float() + + @mean_camera_fused.setter + def mean_camera_fused(self, mean_camera_fused: Any) -> None: + pass + + def reset(self) -> None: + """ + Reset the current metrics. + """ + pass + + +class CameraIdentifier: + """ + Used to identify a specific camera in the Fusion API + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def serial_number(self) -> int: + return int() + + @serial_number.setter + def serial_number(self, serial_number: Any) -> None: + pass + + +class ECEF: + """ + Represents a world position in ECEF format. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def z(self) -> double: + """ + z coordinate of ECEF. + """ + return double() + + @z.setter + def z(self, z: Any) -> None: + pass + + @property + def y(self) -> double: + """ + y coordinate of ECEF. + """ + return double() + + @y.setter + def y(self, y: Any) -> None: + pass + + @property + def x(self) -> double: + """ + x coordinate of ECEF. + """ + return double() + + @x.setter + def x(self, x: Any) -> None: + pass + + +class LatLng: + """ + Represents a world position in LatLng format. + """ + def __init__(self, *args, **kwargs) -> None: ... + + def get_latitude(self, in_radian : bool = True) -> None: + """ + Get the latitude coordinate + + :param in_radian: Is the output should be in radian or degree. + :return: Latitude in radian or in degree depending in_radian parameter. + """ + pass + + def get_longitude(self, in_radian = True) -> None: + """ + Get the longitude coordinate + + :param in_radian: Is the output should be in radian or degree. + :return: Longitude in radian or in degree depending in_radian parameter. + """ + pass + + def get_altitude(self) -> None: + """ + Get the altitude coordinate + + :return: Altitude coordinate in meters. + """ + pass + + def get_coordinates(self, in_radian = True) -> None: + """ + Get the coordinates in radians (default) or in degrees. + + :param latitude: Latitude coordinate. + :param longitude: Longitude coordinate. + :param altitude: Altitude coordinate. + :param in_radian: Should the output be expressed in radians or degrees. + """ + pass + + def set_coordinates(self, latitude: double, longitude: double, altitude: double, in_radian = True) -> None: + """ + Set the coordinates in radians (default) or in degrees. + + :param latitude: Latitude coordinate. + :param longitude: Longitude coordinate. + :param altitude: Altitude coordinate. + \@param in_radian: Is input are in radians or in degrees. + """ + pass + + +class UTM: + """ + Represents a world position in UTM format. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def easting(self) -> double: + """ + Easting coordinate. + """ + return double() + + @easting.setter + def easting(self, easting: Any) -> None: + pass + + @property + def gamma(self) -> double: + """ + Gamma coordinate. + """ + return double() + + @gamma.setter + def gamma(self, gamma: Any) -> None: + pass + + @property + def UTM_zone(self) -> str: + """ + UTMZone of the coordinate. + """ + return str() + + @UTM_zone.setter + def UTM_zone(self, UTM_zone: Any) -> None: + pass + + @property + def northing(self) -> double: + """ + Northing coordinate. + """ + return double() + + @northing.setter + def northing(self, northing: Any) -> None: + pass + + +class ENU: + """ + Represent a world position in ENU format. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def east(self) -> double: + """ + East parameter + """ + return double() + + @east.setter + def east(self, east: Any) -> None: + pass + + @property + def up(self) -> double: + """ + Up parameter + """ + return double() + + @up.setter + def up(self, up: Any) -> None: + pass + + @property + def north(self) -> double: + """ + North parameter + """ + return double() + + @north.setter + def north(self, north: Any) -> None: + pass + + +class GeoConverter: + """ + Purely static class for Geo functions. + """ + def __init__(self, *args, **kwargs) -> None: ... + + def ecef2latlng(input: ECEF) -> LatLng: + """ + Convert ECEF coordinates to Lat/Long coordinates. + """ + return LatLng() + + def ecef2utm(input: ECEF) -> UTM: + """ + Convert ECEF coordinates to UTM coordinates. + """ + return UTM() + + def latlng2ecef(input: LatLng) -> ECEF: + """ + Convert Lat/Long coordinates to ECEF coordinates. + """ + return ECEF() + + def latlng2utm(input: LatLng) -> UTM: + """ + Convert Lat/Long coordinates to UTM coordinates. + """ + return UTM() + + def utm2ecef(input: UTM) -> ECEF: + """ + Convert UTM coordinates to ECEF coordinates. + """ + return ECEF() + + def utm2latlng(input: UTM) -> LatLng: + """ + Convert UTM coordinates to Lat/Long coordinates. + """ + return LatLng() + + +class GeoPose: + """ + Holds Geo reference position. + Holds geographic reference position information. + + This class represents a geographic pose, including position, orientation, and accuracy information. + It is used for storing and manipulating geographic data, such as latitude, longitude, altitude, + pose matrices, covariances, and timestamps. + + The pose data is defined in the East-North-Up (ENU) reference frame. The ENU frame is a local + Cartesian coordinate system commonly used in geodetic applications. In this frame, the X-axis + points towards the East, the Y-axis points towards the North, and the Z-axis points upwards. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def heading(self) -> double: + """ + The heading (orientation) of the pose in radians (rad). It indicates the direction in which the object or observer is facing, with 0 degrees corresponding to North and increasing in a counter-clockwise direction. + """ + return double() + + @heading.setter + def heading(self, heading: Any) -> None: + pass + + @property + def horizontal_accuracy(self) -> double: + """ + The horizontal accuracy of the pose in meters. + """ + return double() + + @horizontal_accuracy.setter + def horizontal_accuracy(self, horizontal_accuracy: Any) -> None: + pass + + @property + def pose_data(self) -> Transform: + """ + The 4x4 matrix defining the pose in the East-North-Up (ENU) coordinate system. + """ + return Transform() + + @pose_data.setter + def pose_data(self, pose_data: Any) -> None: + pass + + @property + def vertical_accuracy(self) -> double: + """ + The vertical accuracy of the pose in meters. + """ + return double() + + @vertical_accuracy.setter + def vertical_accuracy(self, vertical_accuracy: Any) -> None: + pass + + @pose_covariance.setter + def pose_covariance(self, pose_covariance: Any) -> None: + pass + + @property + def latlng_coordinates(self) -> LatLng: + """ + The latitude, longitude, and altitude coordinates of the pose. + """ + return LatLng() + + @latlng_coordinates.setter + def latlng_coordinates(self, latlng_coordinates: Any) -> None: + pass + + @property + def timestamp(self) -> Timestamp: + """ + The timestamp associated with the GeoPose. + """ + return Timestamp() + + @timestamp.setter + def timestamp(self, timestamp: Any) -> None: + pass + + def pose_covariance(self) -> np.array[float]: + """ + The pose covariance matrix in ENU. + """ + return np.array[float]() + + +class GNSSData: + """ + Class containing GNSS data to be used for positional tracking as prior. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def gnss_mode(self) -> GNSS_MODE: + """ + Represents the current mode of GNSS. + """ + return GNSS_MODE() + + @gnss_mode.setter + def gnss_mode(self, gnss_mode: Any) -> None: + pass + + @property + def ts(self) -> Timestamp: + """ + Timestamp of the GNSS position (must be aligned with the camera time reference). + """ + return Timestamp() + + @ts.setter + def ts(self, ts: Any) -> None: + pass + + @property + def gnss_status(self) -> GNSS_STATUS: + """ + Represents the current status of GNSS. + """ + return GNSS_STATUS() + + @gnss_status.setter + def gnss_status(self, gnss_status: Any) -> None: + pass + + @property + def latitude_std(self) -> float: + """ + Latitude standard deviation. + """ + return float() + + @latitude_std.setter + def latitude_std(self, latitude_std: Any) -> None: + pass + + @property + def position_covariances(self) -> list[float]: + """ + Covariance of the position in meter (must be expressed in the ENU coordinate system). + For eph, epv GNSS sensors, set it as follow: ```{eph*eph, 0, 0, 0, eph*eph, 0, 0, 0, epv*epv}```. + """ + return list[float]() + + @position_covariances.setter + def position_covariances(self, position_covariances: Any) -> None: + pass + + @property + def longitude_std(self) -> float: + """ + Longitude standard deviation. + """ + return float() + + @longitude_std.setter + def longitude_std(self, longitude_std: Any) -> None: + pass + + @property + def altitude_std(self) -> float: + """ + Altitude standard deviation + """ + return float() + + @altitude_std.setter + def altitude_std(self, altitude_std: Any) -> None: + pass + + def get_coordinates(self, in_radian = True) -> tuple[float, float, float]: + """ + Get the coordinates of the sl.GNSSData. + The sl.LatLng coordinates could be expressed in degrees or radians. + :param latitude: Latitude coordinate. + :param longitude: Longitude coordinate. + :param altitude: Altitude coordinate. + :param is_radian: Should the output be expressed in radians or degrees. + """ + return tuple[float, float, float]() + + def set_coordinates(self, latitude: double, longitude: double, altitude: double, in_radian = True) -> None: + """ + Set the sl.LatLng coordinates of sl.GNSSData. + The sl.LatLng coordinates could be expressed in degrees or radians. + :param latitude: Latitude coordinate. + :param longitude: Longitude coordinate. + :param altitude: Altitude coordinate. + :param is_radian: Are the inputs expressed in radians or in degrees. + """ + pass + + +class SynchronizationParameter: + """ + Class containing GNSS data to be used for positional tracking as prior. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def windows_size(self) -> double: + return double() + + @windows_size.setter + def windows_size(self, windows_size: Any) -> None: + pass + + @property + def data_source_timeout(self) -> double: + return double() + + @data_source_timeout.setter + def data_source_timeout(self, data_source_timeout: Any) -> None: + pass + + @property + def maximum_lateness(self) -> double: + return double() + + @maximum_lateness.setter + def maximum_lateness(self, maximum_lateness: Any) -> None: + pass + + @property + def keep_last_data(self) -> bool: + return bool() + + @keep_last_data.setter + def keep_last_data(self, keep_last_data: Any) -> None: + pass + + +class InitFusionParameters: + """ + Holds the options used to initialize the Fusion object. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def verbose(self) -> bool: + """ + Enable the verbosity mode of the SDK. + """ + return bool() + + @verbose.setter + def verbose(self, verbose: Any) -> None: + pass + + @property + def maximum_working_resolution(self) -> Resolution: + """ + Sets the maximum resolution for all Fusion outputs, such as images and measures. + + The default value is (-1, -1), which allows the Fusion to automatically select the optimal resolution for the best quality/runtime ratio. + + - For images, the output resolution can be up to the native resolution of the camera. + - For measures involving depth, the output resolution can be up to the maximum working resolution. + + Setting this parameter to (-1, -1) will ensure the best balance between quality and performance for depth measures. + """ + return Resolution() + + @maximum_working_resolution.setter + def maximum_working_resolution(self, maximum_working_resolution: Any) -> None: + pass + + @property + def coordinate_units(self) -> UNIT: + """ + This parameter allows you to select the unit to be used for all metric values of the SDK (depth, point cloud, tracking, mesh, and others). + Default : UNIT "UNIT::MILLIMETER" + """ + return UNIT() + + @coordinate_units.setter + def coordinate_units(self, coordinate_units: Any) -> None: + pass + + @property + def timeout_period_number(self) -> int: + """ + If specified change the number of period necessary for a source to go in timeout without data. For example, if you set this to 5 then, if any source do not receive data during 5 period, these sources will go to timeout and will be ignored. + """ + return int() + + @timeout_period_number.setter + def timeout_period_number(self, timeout_period_number: Any) -> None: + pass + + @property + def sdk_gpu_id(self) -> int: + """ + NVIDIA graphics card id to use. + + By default the SDK will use the most powerful NVIDIA graphics card found. + \n However, when running several applications, or using several cameras at the same time, splitting the load over available GPUs can be useful. + \n This parameter allows you to select the GPU used by the sl.Camera using an ID from 0 to n-1 GPUs in your PC. + \n Default: -1 + .. note:: + A non-positive value will search for all CUDA capable devices and select the most powerful. + """ + return int() + + @sdk_gpu_id.setter + def sdk_gpu_id(self, sdk_gpu_id: Any) -> None: + pass + + @property + def coordinate_system(self) -> COORDINATE_SYSTEM: + """ + Positional tracking, point clouds and many other features require a given COORDINATE_SYSTEM to be used as reference. + This parameter allows you to select the COORDINATE_SYSTEM used by the Camera to return its measures. + \n This defines the order and the direction of the axis of the coordinate system. + \n Default : COORDINATE_SYSTEM "COORDINATE_SYSTEM::IMAGE" + """ + return COORDINATE_SYSTEM() + + @coordinate_system.setter + def coordinate_system(self, coordinate_system: Any) -> None: + pass + + @property + def output_performance_metrics(self) -> bool: + """ + It allows users to extract some stats of the Fusion API like drop frame of each camera, latency, etc... + """ + return bool() + + @output_performance_metrics.setter + def output_performance_metrics(self, output_performance_metrics: Any) -> None: + pass + + @property + def synchronization_parameters(self) -> SynchronizationParameter: + """ + Specifies the parameters used for data synchronization during fusion. + + The SynchronizationParameter struct encapsulates the synchronization parameters that control the data fusion process. + """ + return SynchronizationParameter() + + @synchronization_parameters.setter + def synchronization_parameters(self, synchronization_parameters: Any) -> None: + pass + + def __dealloc__(self) -> None: + pass + + +class Fusion: + """ + Holds Fusion process data and functions + """ + def __init__(self, *args, **kwargs) -> None: ... + + def __dealloc__(self) -> None: + pass + + def init(self, init_fusion_parameters : InitFusionParameters) -> FUSION_ERROR_CODE: + """ + Initialize the fusion module with the requested parameters. + :param init_parameters: Initialization parameters. + :return: ERROR_CODE.SUCCESS if it goes as it should, otherwise it returns an ERROR_CODE. + """ + return FUSION_ERROR_CODE() + + def close(self) -> None: + """ + Will deactivate all the fusion modules and free internal data. + """ + pass + + def subscribe(self, uuid : CameraIdentifier, communication_parameters: CommunicationParameters, pose: Transform) -> FUSION_ERROR_CODE: + """ + Set the specified camera as a data provider. + :param uuid: The requested camera identifier. + :param communication_parameters: The communication parameters to connect to the camera. + :param pose: The World position of the camera, regarding the other camera of the setup. + :return: FUSION_ERROR_CODE.SUCCESS if it goes as it should, otherwise it returns an FUSION_ERROR_CODE. + """ + return FUSION_ERROR_CODE() + + def unsubscribe(self, uuid : CameraIdentifier) -> FUSION_ERROR_CODE: + """ + Remove the specified camera from data provider. + :param uuid: The requested camera identifier. + :return: FUSION_ERROR_CODE.SUCCESS if it goes as it should, otherwise it returns an FUSION_ERROR_CODE. + """ + return FUSION_ERROR_CODE() + + def update_pose(self, uuid : CameraIdentifier, pose: Transform) -> FUSION_ERROR_CODE: + """ + Updates the specified camera position inside fusion WORLD. + :param uuid: The requested camera identifier. + :param pose: The World position of the camera, regarding the other camera of the setup. + :return: FUSION_ERROR_CODE.SUCCESS if it goes as it should, otherwise it returns an FUSION_ERROR_CODE. + """ + return FUSION_ERROR_CODE() + + def get_process_metrics(self) -> tuple[FUSION_ERROR_CODE, FusionMetrics]: + """ + Get the metrics of the Fusion process, for the fused data as well as individual camera provider data. + :param metrics: The process metrics. + :return: FUSION_ERROR_CODE.SUCCESS if it goes as it should, otherwise it returns an FUSION_ERROR_CODE. + :return: The process metrics. + """ + return tuple[FUSION_ERROR_CODE, FusionMetrics]() + + def get_sender_state(self) -> dict: + """ + Returns the state of each connected data senders. + :return: The individual state of each connected senders. + """ + return {} + + def process(self) -> FUSION_ERROR_CODE: + """ + Runs the main function of the Fusion, this trigger the retrieve and synchronization of all connected senders and updates the enabled modules. + :return: FUSION_ERROR_CODE.SUCCESS if it goes as it should, otherwise it returns an FUSION_ERROR_CODE. + """ + return FUSION_ERROR_CODE() + + def enable_body_tracking(self, params : BodyTrackingFusionParameters) -> FUSION_ERROR_CODE: + """ + Enables the body tracking fusion module. + :param params: Structure containing all specific parameters for body tracking fusion. + :return: FUSION_ERROR_CODE.SUCCESS if it goes as it should, otherwise it returns an FUSION_ERROR_CODE. + """ + return FUSION_ERROR_CODE() + + def retrieve_bodies(self, bodies : Bodies, parameters : BodyTrackingFusionRuntimeParameters, uuid : CameraIdentifier = CameraIdentifier(0), reference_frame: FUSION_REFERENCE_FRAME = FUSION_REFERENCE_FRAME.BASELINK) -> FUSION_ERROR_CODE: + """ + Retrieves the body data, can be the fused data (default), or the raw data provided by a specific sender. + :param bodies: The fused bodies will be saved into this objects. + :param parameters: Body detection runtime settings, can be changed at each detection. + :param uuid: The id of the sender. + :param reference_frame: The reference frame in which the objects will be expressed. Default: FUSION_REFERENCE_FRAME "FUSION_REFERENCE_FRAME::BASELINK". + :return: FUSION_ERROR_CODE.SUCCESS if it goes as it should, otherwise it returns an FUSION_ERROR_CODE. + """ + return FUSION_ERROR_CODE() + + def enable_object_detection(self, params = ObjectDetectionFusionParameters()) -> FUSION_ERROR_CODE: + """ + Enables the object detection fusion module. + :param params: Structure containing all specific parameters for object detection fusion. + \n For more information, see the ObjectDetectionFusionParameters documentation. + :return: SUCCESS if it goes as it should, otherwise it returns an FUSION_ERROR_CODE. + """ + return FUSION_ERROR_CODE() + + def retrieve_objects_all_od_groups(self, objs, reference_frame: FUSION_REFERENCE_FRAME = FUSION_REFERENCE_FRAME.BASELINK) -> FUSION_ERROR_CODE: + """ + Retrieves all the fused objects data. + :param objs: The fused objects will be saved into this dictionary of objects. + :param reference_frame: The reference frame in which the objects will be expressed. Default: FUSION_REFERENCE_FRAME "FUSION_REFERENCE_FRAME::BASELINK". + :return: SUCCESS if it goes as it should, otherwise it returns a FUSION_ERROR_CODE. + """ + return FUSION_ERROR_CODE() + + def retrieve_objects_one_od_group(self, objs, fused_od_group_name, reference_frame: FUSION_REFERENCE_FRAME = FUSION_REFERENCE_FRAME.BASELINK) -> FUSION_ERROR_CODE: + """ + Retrieves the fused objects of a given fused OD group. + :param objs: The fused objects will be saved into this objects. + :param fused_od_group_name: The name of the fused objects group to retrieve. + :param reference_frame: The reference frame in which the objects will be expressed. Default: FUSION_REFERENCE_FRAME "FUSION_REFERENCE_FRAME::BASELINK". + :return: SUCCESS if it goes as it should, otherwise it returns a FUSION_ERROR_CODE. + """ + return FUSION_ERROR_CODE() + + def retrieve_raw_objects_all_ids(self, objs, uuid) -> FUSION_ERROR_CODE: + """ + Retrieves all the raw objects data provided by a specific sender. + :param objs: The fused objects will be saved into this dictionary of objects. + :param uuid: Retrieve the raw data provided by this sender. + """ + return FUSION_ERROR_CODE() + + def retrieve_raw_objects_one_id(self, py_objects, uuid, instance_id) -> FUSION_ERROR_CODE: + """ + Retrieves the raw objects data provided by a specific sender and a specific instance id. + :param objs: The fused objects will be saved into this objects. + :param uuid: Retrieve the raw data provided by this sender. + :param instance_id: Retrieve the objects inferred by the model with this ID only. + :return: SUCCESS if it goes as it should, otherwise it returns a FUSION_ERROR_CODE. + """ + return FUSION_ERROR_CODE() + + def disable_objects_detection(self) -> None: + """ + Disable the body fusion tracking module. + """ + pass + + def retrieve_image(self, mat, uuid, resolution = Resolution(0, 0)) -> FUSION_ERROR_CODE: + """ + Returns the current sl.VIEW.LEFT of the specified camera, the data is synchronized. + :param mat: the CPU BGRA image of the requested camera. + :param resolution: the requested resolution of the output image, can be lower or equal (default) to the original image resolution. + :param uuid: If set to a sender serial number (different from 0), this will retrieve the raw data provided by this sender. + :return: FUSION_ERROR_CODE.SUCCESS if it goes as it should, otherwise it returns an FUSION_ERROR_CODE. + """ + return FUSION_ERROR_CODE() + + def retrieve_measure(self, mat, uuid, measure: MEASURE, resolution = Resolution(0, 0), reference_frame: FUSION_REFERENCE_FRAME = FUSION_REFERENCE_FRAME.BASELINK) -> FUSION_ERROR_CODE: + """ + Returns the current measure of the specified camera, the data is synchronized. + :param mat: the CPU data of the requested camera. + :param uuid: The id of the sender. + :param measure: measure: the requested measure type, by default DEPTH (F32_C1). + :param resolution: the requested resolution of the output image, can be lower or equal (default) to the original image resolution. + :param reference_frame: The reference frame in which the objects will be expressed. Default: FUSION_REFERENCE_FRAME "FUSION_REFERENCE_FRAME::BASELINK". + :return: FUSION_ERROR_CODE.SUCCESS if it goes as it should, otherwise it returns an FUSION_ERROR_CODE. + """ + return FUSION_ERROR_CODE() + + def disable_body_tracking(self) -> None: + """ + Disable the body fusion tracking module. + """ + pass + + def enable_positionnal_tracking(self, parameters : PositionalTrackingFusionParameters) -> FUSION_ERROR_CODE: + """ + Enables positional tracking fusion module. + :param parameters: A structure containing all the PositionalTrackingFusionParameters that define positional tracking fusion module. + :return: FUSION_ERROR_CODE.SUCCESS if it goes as it should, otherwise it returns an FUSION_ERROR_CODE. + """ + return FUSION_ERROR_CODE() + + def ingest_gnss_data(self, gnss_data : GNSSData) -> FUSION_ERROR_CODE: + """ + Ingest GNSS data from an external sensor into the fusion module. + :param gnss_data: The current GNSS data to combine with the current positional tracking data. + :return: FUSION_ERROR_CODE.SUCCESS if it goes as it should, otherwise it returns an FUSION_ERROR_CODE. + """ + return FUSION_ERROR_CODE() + + def get_position(self, camera_pose : Pose, reference_frame : REFERENCE_FRAME = REFERENCE_FRAME.WORLD, uuid: CameraIdentifier = CameraIdentifier(), position_type : POSITION_TYPE = POSITION_TYPE.FUSION) -> POSITIONAL_TRACKING_STATE: + """ + Get the Fused Position referenced to the first camera subscribed. If uuid is specified then project position on the referenced camera. + :param camera_pose: Will contain the fused position referenced by default in world (world is given by the calibration of the cameras system). + :param reference_frame: Defines the reference from which you want the pose to be expressed. Default : REFERENCE_FRAME.WORLD. + :param uuid: If set to a sender serial number (different from 0), this will retrieve position projected on the requested camera if position_type is equal to POSITION_TYPE.FUSION or raw sender position if position_type is equal to POSITION_TYPE.RAW. + :param position_type: Select if the position should the fused position re-projected in the camera with uuid or if the position should be the raw position (without fusion) of camera with uui. + :return: POSITIONAL_TRACKING_STATE is the current state of the tracking process. + """ + return POSITIONAL_TRACKING_STATE() + + def get_fused_positional_tracking_status(self) -> FusedPositionalTrackingStatus: + """ + Get the Fused Position referenced to the first camera subscribed. If uuid is specified then project position on the referenced camera. + :param camera_pose: Will contain the fused position referenced by default in world (world is given by the calibration of the cameras system). + :param reference_frame: Defines the reference from which you want the pose to be expressed. Default : REFERENCE_FRAME.WORLD. + :param uuid: If set to a sender serial number (different from 0), this will retrieve position projected on the requested camera if position_type is equal to POSITION_TYPE.FUSION or raw sender position if position_type is equal to POSITION_TYPE.RAW. + :param position_type: Select if the position should the fused position re-projected in the camera with uuid or if the position should be the raw position (without fusion) of camera with uui. + :return: POSITIONAL_TRACKING_STATE is the current state of the tracking process. + """ + return FusedPositionalTrackingStatus() + + def get_current_gnss_data(self, gnss_data : GNSSData) -> POSITIONAL_TRACKING_STATE: + """ + Returns the last synchronized gnss data. + :param out: Last synchronized gnss data. (Direction: out) + :return: POSITIONAL_TRACKING_STATE is the current state of the tracking process. + """ + return POSITIONAL_TRACKING_STATE() + + def get_geo_pose(self, pose : GeoPose) -> GNSS_FUSION_STATUS: + """ + Returns the current GeoPose. + :param pose: The current GeoPose. (Direction: out) + :return: GNSS_FUSION_STATUS is the current state of the tracking process. + """ + return GNSS_FUSION_STATUS() + + def geo_to_camera(self, input : LatLng, output : Pose) -> GNSS_FUSION_STATUS: + """ + Convert latitude / longitude into position in sl::Fusion coordinate system. + :param input: The latitude / longitude to be converted in sl::Fusion coordinate system. (Direction: in) + :param out: Converted position in sl.Fusion coordinate system. (Direction: out) + :return: GNSS_FUSION_STATUS is the current state of the tracking process. + """ + return GNSS_FUSION_STATUS() + + def camera_to_geo(self, input : Pose, output : GeoPose) -> GNSS_FUSION_STATUS: + """ + Convert a position in sl.Fusion coordinate system in global world coordinate. + :param pose: Position to convert in global world coordinate. (Direction: in) + :param pose: Converted position in global world coordinate. (Direction: out) + :return: GNSS_FUSION_STATUS is the current state of the tracking process. + """ + return GNSS_FUSION_STATUS() + + def get_current_timestamp(self) -> Timestamp: + """ + Return the current fusion timestamp, aligned with the synchronized GNSS and camera data. + :return: current fusion timestamp. + """ + return Timestamp() + + def disable_positionnal_tracking(self) -> None: + """ + Disable the fusion positional tracking module. + + The positional tracking is immediately stopped. If a file path is given, saveAreaMap(area_file_path) will be called asynchronously. See getAreaExportState() to get the exportation state. + """ + pass + + def ENU_to_geo(self, input: ENU, output: LatLng) -> FUSION_ERROR_CODE: + """ + Convert ENU to LatLng + + Concert an ENU position into LatLng + """ + return FUSION_ERROR_CODE() + + def geo_to_ENU(self, input : LatLng, out : ENU) -> FUSION_ERROR_CODE: + """ + Convert LatLng to ENU + + Convert am LatLng to ENU + """ + return FUSION_ERROR_CODE() + + def get_current_gnss_calibration_std(self) -> tuple[GNSS_FUSION_STATUS, float, np.array]: + """ + Get the current calibration uncertainty obtained during calibration process. + :return: sl.GNSS_FUSION_STATUS representing current initialisation status. + :return: Output yaw uncertainty. + :return: Output position uncertainty. + # + """ + return tuple[GNSS_FUSION_STATUS, float, np.array]() + + def get_geo_tracking_calibration(self) -> Transform: + """ + Get the calibration found between VIO and GNSS. + + :return: sl.Transform is the calibration found between VIO and GNSS during calibration process. + # + """ + return Transform() + + def enable_spatial_mapping(self, parameters) -> FUSION_ERROR_CODE: + """ + Initializes and starts the spatial mapping processes. + + The spatial mapping will create a geometric representation of the scene based on both tracking data and 3D point clouds. + + The resulting output can be a Mesh or a FusedPointCloud. It can be be obtained by calling retrieve_spatial_map_async(). + Note that retrieve_spatial_map_async() should be called after request_spatial_map_async(). + + \param parameters The structure containing all the specific parameters for the spatial mapping. Default: a balanced parameter preset between geometric fidelity and output file size. For more information, see the SpatialMappingParameters documentation. + :return: SUCCESS if everything went fine, FUSION_ERROR_CODE "FUSION_ERROR_CODE::FAILURE" otherwise. + + .. note:: + The tracking (enable_positional_tracking()) needs to be enabled to use the spatial mapping. + + .. note:: + Lower SpatialMappingParameters.range_meter and SpatialMappingParameters.resolution_meter for higher performance. + + .. warning:: This fuction is only available for INTRA_PROCESS communication type. + """ + return FUSION_ERROR_CODE() + + def request_spatial_map_async(self) -> None: + """ + Starts the spatial map generation process in a non blocking thread from the spatial mapping process. + + The spatial map generation can take a long time depending on the mapping resolution and covered area. This function will trigger the generation of a mesh without blocking the program. + You can get info about the current generation using get_spatial_map_request_status_async(), and retrieve the mesh using request_spatial_map_async(...) . + + .. note:: + Only one mesh can be generated at a time. If the previous mesh generation is not over, new calls of the function will be ignored. + + # + """ + pass + + def get_spatial_map_request_status_async(self) -> FUSION_ERROR_CODE: + """ + Returns the spatial map generation status. This status allows to know if the mesh can be retrieved by calling retrieve_spatial_map_async(). + :return: SUCCESS if the mesh is ready and not yet retrieved, otherwise FUSION_ERROR_CODE "FUSION_ERROR_CODE::FAILURE". + + \n See request_spatial_map_async() for an example. + # + """ + return FUSION_ERROR_CODE() + + def retrieve_spatial_map_async(self, py_mesh) -> FUSION_ERROR_CODE: + """ + Retrieves the current generated spatial map. + + After calling request_spatial_map_async(), this method allows you to retrieve the generated mesh or fused point cloud. + \n The Mesh or FusedPointCloud will only be available when get_spatial_map_request_status_async() returns FUSION_ERROR_CODE.SUCCESS. + + :param py_mesh: The Mesh or FusedPointCloud to be filled with the generated spatial map. (Direction: out) + :return: FUSION_ERROR_CODE.SUCCESS if the mesh is retrieved, otherwise FUSION_ERROR_CODE.FAILURE. + + .. note:: + This method only updates the necessary chunks and adds the new ones in order to improve update speed. + + .. warning:: You should not modify the mesh / fused point cloud between two calls of this method, otherwise it can lead to a corrupted mesh / fused point cloud. + See request_spatial_map_async() for an example. + """ + return FUSION_ERROR_CODE() + + def disable_spatial_mapping(self) -> None: + """ + Disables the spatial mapping process. + + The spatial mapping is immediately stopped. + + If the mapping has been enabled, this function will automatically be called by close(). + + .. note:: + This function frees the memory allocated for the spatial mapping, consequently, the spatial map cannot be retrieved after this call. + """ + pass + + +class SVOData: + """ + Class containing SVO data to be ingested/retrieved to/from SVO. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def timestamp_ns(self) -> Timestamp: + """ + Timestamp of the data. + """ + return Timestamp() + + @timestamp_ns.setter + def timestamp_ns(self, timestamp_ns: Any) -> None: + pass + + @property + def key(self) -> str: + """ + Key of the data. + """ + return str() + + @key.setter + def key(self, key: Any) -> None: + pass + + def get_content_as_string(self) -> str: + """ + Get the content of the sl.SVOData as a string. + + :return: The content of the sl.SVOData as a string. + """ + return str() + + def set_string_content(self, data: str) -> str: + """ + Set the content of the sl.SVOData as a string. + + \param data The string data content to set. + """ + return str() + + +class CameraOneConfiguration: + """ + Structure containing information about the camera sensor. + + Information about the camera is available in the sl.CameraInformation struct returned by sl.Camera.get_camera_information(). + .. note:: + This object is meant to be used as a read-only container, editing any of its field won't impact the SDK. + + .. warning:: sl.CalibrationOneParameters are returned in sl.COORDINATE_SYSTEM.IMAGE, they are not impacted by the sl.InitParametersOne.coordinate_system. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def calibration_parameters_raw(self) -> CameraParameters: + """ + Intrinsics and extrinsic stereo parameters for unrectified/distorted images. + """ + return CameraParameters() + + @property + def fps(self) -> float: + """ + FPS of the camera. + """ + return float() + + @property + def firmware_version(self) -> int: + """ + Internal firmware version of the camera. + """ + return int() + + @property + def calibration_parameters(self) -> CameraParameters: + """ + Intrinsics and extrinsic stereo parameters for rectified/undistorted images. + """ + return CameraParameters() + + @property + def resolution(self) -> Resolution: + """ + Resolution of the camera. + """ + return Resolution() + + +class CameraOneInformation: + """ + Structure containing information of a single camera (serial number, model, calibration, etc.) + That information about the camera will be returned by CameraOne.get_camera_information() + .. note:: + This object is meant to be used as a read-only container, editing any of its fields won't impact the SDK. + + .. warning:: CalibrationParameters are returned in COORDINATE_SYSTEM.IMAGE , they are not impacted by the InitParametersOne.coordinate_system + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def camera_model(self) -> MODEL: + """ + Model of the camera (see sl.MODEL). + """ + return MODEL() + + @property + def serial_number(self) -> int: + """ + Serial number of the camera. + """ + return int() + + @property + def camera_configuration(self) -> CameraOneConfiguration: + """ + Camera configuration parameters stored in a sl.CameraOneConfiguration. + """ + return CameraOneConfiguration() + + @property + def sensors_configuration(self) -> SensorsConfiguration: + """ + Sensors configuration parameters stored in a sl.SensorsConfiguration. + """ + return SensorsConfiguration() + + @property + def input_type(self) -> INPUT_TYPE: + """ + Input type used in the ZED SDK. + """ + return INPUT_TYPE() + + +class InitParametersOne: + """ + Class containing the options used to initialize the sl.CameraOne object. + + This class allows you to select multiple parameters for the sl.Camera such as the selected camera, resolution, depth mode, coordinate system, and units of measurement. + \n Once filled with the desired options, it should be passed to the sl.Camera.open() method. + + .. code-block:: text + + + import pyzed.sl as sl + + def main() : + zed = sl.CameraOne() # Create a ZED camera object + + init_params = sl.InitParametersOne() # Set initial parameters + init_params.sdk_verbose = 0 # Disable verbose mode + + # Use the camera in LIVE mode + init_params.camera_resolution = sl.RESOLUTION.HD1080 # Use HD1080 video mode + init_params.camera_fps = 30 # Set fps at 30 + + # Or use the camera in SVO (offline) mode + #init_params.set_from_svo_file("xxxx.svo") + + # Or use the camera in STREAM mode + #init_params.set_from_stream("192.168.1.12", 30000) + + # Other parameters are left to their default values + + # Open the camera + err = zed.open(init_params) + if err != sl.ERROR_CODE.SUCCESS: + exit(-1) + + # Close the camera + zed.close() + return 0 + + if __name__ == "__main__" : + main() + + + + With its default values, it opens the camera in live mode at sl.RESOLUTION.HD720 + \n You can customize it to fit your application. + .. note:: + The parameters can also be saved and reloaded using its save() and load() methods. + """ + def __init__(self, *args, **kwargs) -> None: ... + + @property + def coordinate_units(self) -> UNIT: + """ + Unit of spatial data (depth, point cloud, tracking, mesh, etc.) for retrieval. + + Default: sl.UNIT.MILLIMETER + """ + return UNIT() + + @coordinate_units.setter + def coordinate_units(self, coordinate_units: Any) -> None: + pass + + @property + def optional_settings_path(self) -> str: + """ + Optional path where the ZED SDK has to search for the settings file (SN.conf file). + + This file contains the calibration information of the camera. + \n Default: "" + + .. note:: + The settings file will be searched in the default directory: + + * **Linux**: /usr/local/zed/settings/ + * **Windows**: C:/ProgramData/stereolabs/settings + + .. note:: + If a path is specified and no file has been found, the ZED SDK will search the settings file in the default directory. + + .. note:: + An automatic download of the settings file (through **ZED Explorer** or the installer) will still download the files on the default path. + + + .. code-block:: text + + init_params = sl.InitParametersOne() # Set initial parameters + home = "/path/to/home" + path = home + "/Documents/settings/" # assuming /path/to/home/Documents/settings/SNXXXX.conf exists. Otherwise, it will be searched in /usr/local/zed/settings/ + init_params.optional_settings_path = path + """ + return str() + + @optional_settings_path.setter + def optional_settings_path(self, optional_settings_path: Any) -> None: + pass + + @property + def coordinate_system(self) -> COORDINATE_SYSTEM: + """ + sl.COORDINATE_SYSTEM to be used as reference for positional tracking, mesh, point clouds, etc. + + This parameter allows you to select the sl.COORDINATE_SYSTEM used by the sl.Camera object to return its measures. + \n This defines the order and the direction of the axis of the coordinate system. + \n Default: sl.COORDINATE_SYSTEM.IMAGE + """ + return COORDINATE_SYSTEM() + + @coordinate_system.setter + def coordinate_system(self, coordinate_system: Any) -> None: + pass + + @property + def async_grab_camera_recovery(self) -> bool: + """ + Define the behavior of the automatic camera recovery during sl.Camera.grab() method call. + + When async is enabled and there's an issue with the communication with the sl.Camera object, + sl.Camera.grab() will exit after a short period and return the sl.ERROR_CODE.CAMERA_REBOOTING warning. + \n The recovery will run in the background until the correct communication is restored. + \n When async_grab_camera_recovery is false, the sl.Camera.grab() method is blocking and will return + only once the camera communication is restored or the timeout is reached. + \n Default: False + """ + return bool() + + @async_grab_camera_recovery.setter + def async_grab_camera_recovery(self, async_grab_camera_recovery: Any) -> None: + pass + + @property + def enable_hdr(self) -> bool: + """ + Activates HDR support for the current resolution/mode. Only active if the camera supports HDR for this resolution + + \n Default: False + """ + return bool() + + @enable_hdr.setter + def enable_hdr(self, enable_hdr: Any) -> None: + pass + + @property + def camera_fps(self) -> int: + """ + Requested camera frame rate. + + If set to 0, the highest FPS of the specified camera_resolution will be used. + \n Default: 0 + \n\n See sl.RESOLUTION for a list of supported frame rates. + .. note:: + If the requested camera_fps is unsupported, the closest available FPS will be used. + """ + return int() + + @camera_fps.setter + def camera_fps(self, camera_fps: Any) -> None: + pass + + @property + def svo_real_time_mode(self) -> bool: + """ + Defines if sl.Camera object return the frame in real time mode. + + When playing back an SVO file, each call to sl.Camera.grab() will extract a new frame and use it. + \n However, it ignores the real capture rate of the images saved in the SVO file. + \n Enabling this parameter will bring the SDK closer to a real simulation when playing back a file by using the images' timestamps. + \n Default: False + .. note:: + sl.Camera.grab() will return an error when trying to play too fast, and frames will be dropped when playing too slowly. + """ + return bool() + + @svo_real_time_mode.setter + def svo_real_time_mode(self, svo_real_time_mode: Any) -> None: + pass + + @property + def camera_resolution(self) -> RESOLUTION: + """ + Desired camera resolution. + .. note:: + Small resolutions offer higher framerate and lower computation time. + + .. note:: + In most situations, sl.RESOLUTION.HD720 at 60 FPS is the best balance between image quality and framerate. + + + Default: + * ZED X/X Mini: sl.RESOLUTION.HD1200 + * other cameras: sl.RESOLUTION.HD720 + .. note:: + Available resolutions are listed here: sl.RESOLUTION. + """ + return RESOLUTION() + + @camera_resolution.setter + def camera_resolution(self, camera_resolution: Any) -> None: + pass + + @property + def sdk_verbose_log_file(self) -> str: + """ + File path to store the ZED SDK logs (if sdk_verbose is enabled). + + The file will be created if it does not exist. + \n Default: "" + + .. note:: + Setting this parameter to any value will redirect all standard output print calls of the entire program. + + .. note:: + This means that your own standard output print calls will be redirected to the log file. + + .. warning:: The log file won't be cleared after successive executions of the application. + .. warning:: This means that it can grow indefinitely if not cleared. + """ + return str() + + @sdk_verbose_log_file.setter + def sdk_verbose_log_file(self, sdk_verbose_log_file: Any) -> None: + pass + + @property + def sdk_verbose(self) -> int: + """ + Enable the ZED SDK verbose mode. + + This parameter allows you to enable the verbosity of the ZED SDK to get a variety of runtime information in the console. + \n When developing an application, enabling verbose (``sdk_verbose >= 1``) mode can help you understand the current ZED SDK behavior. + \n However, this might not be desirable in a shipped version. + \n Default: 0 (no verbose message) + .. note:: + The verbose messages can also be exported into a log file. + + .. note:: + See sdk_verbose_log_file for more. + """ + return int() + + @sdk_verbose.setter + def sdk_verbose(self, sdk_verbose: Any) -> None: + pass + + def input(self, input_t) -> None: + """ + The SDK can handle different input types: + - Select a camera by its ID (/dev/videoX on Linux, and 0 to N cameras connected on Windows) + - Select a camera by its serial number + - Open a recorded sequence in the SVO file format + - Open a streaming camera from its IP address and port + + This parameter allows you to select to desired input. It should be used like this: + .. code-block:: text + + init_params = sl.InitParametersOne() # Set initial parameters + init_params.sdk_verbose = 1 # Enable verbose mode + input_t = sl.InputType() + input_t.set_from_camera_id(0) # Selects the camera with ID = 0 + init_params.input = input_t + init_params.set_from_camera_id(0) # You can also use this + + + .. code-block:: text + + init_params = sl.InitParametersOne() # Set initial parameters + init_params.sdk_verbose = 1 # Enable verbose mode + input_t = sl.InputType() + input_t.set_from_serial_number(1010) # Selects the camera with serial number = 101 + init_params.input = input_t + init_params.set_from_serial_number(1010) # You can also use this + + + .. code-block:: text + + init_params = sl.InitParametersOne() # Set initial parameters + init_params.sdk_verbose = 1 # Enable verbose mode + input_t = sl.InputType() + input_t.set_from_svo_file("/path/to/file.svo") # Selects the and SVO file to be read + init_params.input = input_t + init_params.set_from_svo_file("/path/to/file.svo") # You can also use this + + + .. code-block:: text + + init_params = sl.InitParametersOne() # Set initial parameters + init_params.sdk_verbose = 1 # Enable verbose mode + input_t = sl.InputType() + input_t.set_from_stream("192.168.1.42") + init_params.input = input_t + init_params.set_from_stream("192.168.1.42") # You can also use this + + + Available cameras and their ID/serial can be listed using get_device_list() and get_streaming_device_list() + Each Camera will create its own memory (CPU and GPU), therefore the number of ZED used at the same time can be limited by the configuration of your computer. (GPU/CPU memory and capabilities) + + default : empty + See InputType for complementary information. + + .. warning:: Using the ZED SDK Python API, using init_params.input.set_from_XXX won't work, use init_params.set_from_XXX instead + @property + def input(self) -> InputType: + input_t = InputType() + input_t.input = self.init.input + return input_t + """ + pass + + def set_from_camera_id(self, cam_id, bus_type : BUS_TYPE = BUS_TYPE.AUTO) -> None: + """ + Defines the input source with a camera id to initialize and open an sl.CameraOne object from. + :param id: Id of the desired camera to open. + :param bus_type: sl.BUS_TYPE of the desired camera to open. + """ + pass + + def set_from_serial_number(self, serial_number) -> None: + """ + Defines the input source with a serial number to initialize and open an sl.CameraOne object from. + :param serial_number: Serial number of the desired camera to open. + """ + pass + + def set_from_svo_file(self, svo_input_filename) -> None: + """ + Defines the input source with an SVO file to initialize and open an sl.CameraOne object from. + :param svo_input_filename: Path to the desired SVO file to open. + """ + pass + + def set_from_stream(self, sender_ip, port = 30000) -> None: + """ + Defines the input source from a stream to initialize and open an sl.CameraOne object from. + :param sender_ip: IP address of the streaming sender. + :param port: Port on which to listen. Default: 30000 + """ + pass + + +class CameraOne: + """ + Class containing the options used to initialize the sl.CameraOne object. + + This class allows you to select multiple parameters for the sl.Camera such as the selected camera, resolution, depth mode, coordinate system, and units of measurement. + \n Once filled with the desired options, it should be passed to the sl.Camera.open() method. + + .. code-block:: text + + + import pyzed.sl as sl + + def main() : + zed = sl.CameraOne() # Create a ZED camera object + + init_params = sl.InitParametersOne() # Set initial parameters + init_params.sdk_verbose = 0 # Disable verbose mode + + # Use the camera in LIVE mode + init_params.camera_resolution = sl.RESOLUTION.HD1080 # Use HD1080 video mode + init_params.camera_fps = 30 # Set fps at 30 + + # Or use the camera in SVO (offline) mode + #init_params.set_from_svo_file("xxxx.svo") + + # Or use the camera in STREAM mode + #init_params.set_from_stream("192.168.1.12", 30000) + + # Other parameters are left to their default values + + # Open the camera + err = zed.open(init_params) + if err != sl.ERROR_CODE.SUCCESS: + exit(-1) + + # Close the camera + zed.close() + return 0 + + if __name__ == "__main__" : + main() + + + + With its default values, it opens the camera in live mode at sl.RESOLUTION.HD720 + \n You can customize it to fit your application. + .. note:: + The parameters can also be saved and reloaded using its save() and load() methods. + """ + def __init__(self, *args, **kwargs) -> None: ... + + def close(self) -> None: + """ + Close an opened camera. + + If open() has been called, this method will close the connection to the camera (or the SVO file) and free the corresponding memory. + + If open() wasn't called or failed, this method won't have any effect. + + .. note:: + If an asynchronous task is running within the Camera object, like save_area_map(), this method will wait for its completion. + + .. note:: + To apply a new InitParametersOne, you will need to close the camera first and then open it again with the new InitParameters values. + + .. warning:: Therefore you need to make sure to delete your GPU sl.Mat objects before the context is destroyed. + """ + pass + + def open(self, py_init : InitParametersOne = InitParametersOne()) -> ERROR_CODE: + """ + Opens the ZED camera from the provided InitParametersOne. + The method will also check the hardware requirements and run a self-calibration. + :param py_init: A structure containing all the initial parameters. Default: a preset of InitParametersOne. + :return: An error code giving information about the internal process. If ERROR_CODE.SUCCESS is returned, the camera is ready to use. Every other code indicates an error and the program should be stopped. + + Here is the proper way to call this function: + + .. code-block:: text + + zed = sl.CameraOne() # Create a ZED camera object + + init_params = sl.InitParametersOne() # Set configuration parameters + init_params.camera_resolution = sl.RESOLUTION.HD720 # Use HD720 video mode + init_params.camera_fps = 60 # Set fps at 60 + + # Open the camera + err = zed.open(init_params) + if (err != sl.ERROR_CODE.SUCCESS) : + print(repr(err)) # Display the error + exit(-1) + + + .. note:: + If you are having issues opening a camera, the diagnostic tool provided in the SDK can help you identify to problems. + + - **Windows:** C:\\Program Files (x86)\\ZED SDK\\tools\\ZED Diagnostic.exe + - **Linux:** /usr/local/zed/tools/ZED Diagnostic + .. note:: + If this method is called on an already opened camera, close() will be called. + """ + return ERROR_CODE() + + def is_opened(self) -> bool: + """ + Reports if the camera has been successfully opened. + It has the same behavior as checking if open() returns ERROR_CODE.SUCCESS. + :return: True if the ZED camera is already setup, otherwise false. + """ + return bool() + + def grab(self) -> ERROR_CODE: + """ + This method will grab the latest images from the camera, rectify them, and compute the retrieve_measure() "measurements" based on the RuntimeParameters provided (depth, point cloud, tracking, etc.) + + As measures are created in this method, its execution can last a few milliseconds, depending on your parameters and your hardware. + \n The exact duration will mostly depend on the following parameters: + + - InitParametersOne.camera_resolution : Lower resolutions are faster to compute. + + This method is meant to be called frequently in the main loop of your application. + .. note:: + Since ZED SDK 3.0, this method is blocking. It means that grab() will wait until a new frame is detected and available. + + .. note:: + If no new frames is available until timeout is reached, grab() will return ERROR_CODE.CAMERA_NOT_DETECTED since the camera has probably been disconnected. + + + :return: ERROR_CODE.SUCCESS means that no problem was encountered. + .. note:: + Returned errors can be displayed using ``str()``. + + + .. code-block:: text + + image = sl.Mat() + while True: + # Grab an image + if zed.grab() == sl.ERROR_CODE.SUCCESS: # A new image is available if grab() returns SUCCESS + zed.retrieve_image(image) # Get the left image + # Use the image for your application + """ + return ERROR_CODE() + + def retrieve_image(self, py_mat, view = VIEW.LEFT, mem_type = MEM.CPU, resolution = Resolution(0, 0)) -> ERROR_CODE: + """ + Retrieves images from the camera (or SVO file). + + Multiple images are available along with a view of various measures for display purposes. + \n Available images and views are listed here. + \n As an example, VIEW.DEPTH can be used to get a gray-scale version of the depth map, but the actual depth values can be retrieved using retrieve_measure() . + \n + \n **Pixels** + \n Most VIEW modes output image with 4 channels as BGRA (Blue, Green, Red, Alpha), for more information see enum VIEW + \n + \n **Memory** + \n By default, images are copied from GPU memory to CPU memory (RAM) when this function is called. + \n If your application can use GPU images, using the **type** parameter can increase performance by avoiding this copy. + \n If the provided sl.Mat object is already allocated and matches the requested image format, memory won't be re-allocated. + \n + \n **Image size** + \n By default, images are returned in the resolution provided by Resolution "get_camera_information().camera_configuration.resolution". + \n However, you can request custom resolutions. For example, requesting a smaller image can help you speed up your application. + .. warning:: A sl.Mat resolution higher than the camera resolution **cannot** be requested. + + :param py_mat: The sl.Mat to store the image. (Direction: out) + :param view: Defines the image you want (see VIEW). Default: VIEW.LEFT. (Direction: in) + :param mem_type: Defines on which memory the image should be allocated. Default: MEM.CPU (you cannot change this default value). (Direction: in) + :param resolution: If specified, defines the Resolution of the output sl.Mat. If set to Resolution "Resolution(0,0)", the camera resolution will be taken. Default: (0,0). (Direction: in) + :return: ERROR_CODE.SUCCESS if the method succeeded. + :return: ERROR_CODE.INVALID_FUNCTION_PARAMETERS if the view mode requires a module not enabled (VIEW.DEPTH with DEPTH_MODE.NONE for example). + :return: ERROR_CODE.FAILURE if another error occurred. + + .. note:: + As this method retrieves the images grabbed by the grab() method, it should be called afterward. + + + .. code-block:: text + + # create sl.Mat objects to store the images + left_image = sl.Mat() + while True: + # Grab an image + if zed.grab() == sl.ERROR_CODE.SUCCESS: # A new image is available if grab() returns SUCCESS + zed.retrieve_image(left_image, sl.VIEW.LEFT) # Get the rectified left image + + # Display the center pixel colors + err, left_center = left_image.get_value(left_image.get_width() / 2, left_image.get_height() / 2) + if err == sl.ERROR_CODE.SUCCESS: + print("left_image center pixel R:", int(left_center[0]), " G:", int(left_center[1]), " B:", int(left_center[2])) + else: + print("error:", err) + """ + return ERROR_CODE() + + def set_svo_position(self, frame_number: int) -> None: + """ + Sets the playback cursor to the desired frame number in the SVO file. + + This method allows you to move around within a played-back SVO file. After calling, the next call to grab() will read the provided frame number. + + :param frame_number: The number of the desired frame to be decoded. + + .. note:: + The method works only if the camera is open in SVO playback mode. + + + .. code-block:: text + + + import pyzed.sl as sl + + def main(): + # Create a ZED camera object + zed = sl.CameraOne() + + # Set configuration parameters + init_params = sl.InitParametersOne() + init_params.set_from_svo_file("path/to/my/file.svo") + + # Open the camera + err = zed.open(init_params) + if err != sl.ERROR_CODE.SUCCESS: + print(repr(err)) + exit(-1) + + # Loop between frames 0 and 50 + left_image = sl.Mat() + while zed.get_svo_position() < zed.get_svo_number_of_frames() - 1: + + print("Current frame: ", zed.get_svo_position()) + + # Loop if we reached frame 50 + if zed.get_svo_position() == 50: + zed.set_svo_position(0) + + # Grab an image + if zed.grab() == sl.ERROR_CODE.SUCCESS: + zed.retrieve_image(left_image, sl.VIEW.LEFT) # Get the rectified left image + + # Use the image in your application + + # Close the Camera + zed.close() + return 0 + + if __name__ == "__main__" : + main() + + """ + pass + + def get_svo_position(self) -> int: + """ + Returns the current playback position in the SVO file. + + The position corresponds to the number of frames already read from the SVO file, starting from 0 to n. + + Each grab() call increases this value by one (except when using InitParametersOne.svo_real_time_mode). + :return: The current frame position in the SVO file. -1 if the SDK is not reading an SVO. + + .. note:: + The method works only if the camera is open in SVO playback mode. + + + See set_svo_position() for an example. + """ + return int() + + def get_svo_number_of_frames(self) -> int: + """ + Returns the number of frames in the SVO file. + + :return: The total number of frames in the SVO file. -1 if the SDK is not reading a SVO. + + The method works only if the camera is open in SVO playback mode. + """ + return int() + + def ingest_data_into_svo(self, data: SVOData) -> ERROR_CODE: + """ + ingest a SVOData in the SVO file. + + :return: An error code stating the success, or not. + + The method works only if the camera is open in SVO recording mode. + """ + return ERROR_CODE() + + def get_svo_data_keys(self) -> list: + """ + Get the external channels that can be retrieved from the SVO file. + + :return: a list of keys + + The method works only if the camera is open in SVO playback mode. + """ + return [] + + def retrieve_svo_data(self, key: str, data: dict, ts_begin: Timestamp, ts_end: Timestamp) -> ERROR_CODE: + """ + retrieve SVO datas from the SVO file at the given channel key and in the given timestamp range. + + :return: An error code stating the success, or not. + :param key: The channel key. + :param data: The dict to be filled with SVOData objects, with timestamps as keys. + :param ts_begin: The beginning of the range. + :param ts_end: The end of the range. + + The method works only if the camera is open in SVO playback mode. + """ + return ERROR_CODE() + + def set_camera_settings(self, settings: VIDEO_SETTINGS, value = -1) -> ERROR_CODE: + """ + retrieve SVO datas from the SVO file at the given channel key and in the given timestamp range. + + :return: An error code stating the success, or not. + :param key: The channel key. + :param data: The dict to be filled with SVOData objects, with timestamps as keys. + :param ts_begin: The beginning of the range. + :param ts_end: The end of the range. + + The method works only if the camera is open in SVO playback mode. + """ + return ERROR_CODE() + + def set_camera_settings_range(self, settings: VIDEO_SETTINGS, value_min = -1, value_max = -1) -> ERROR_CODE: + """ + Sets the value of the requested VIDEO_SETTINGS "camera setting" that supports two values (min/max). + + This method only works with the following VIDEO_SETTINGS: + - sl.VIDEO_SETTINGS.AUTO_EXPOSURE_TIME_RANGE + - sl.VIDEO_SETTINGS.AUTO_ANALOG_GAIN_RANGE + - sl.VIDEO_SETTINGS.AUTO_DIGITAL_GAIN_RANGE + + :param settings: The setting to be set. + :param min: The minimum value that can be reached (-1 or 0 gives full range). + :param max: The maximum value that can be reached (-1 or 0 gives full range). + :return: ERROR_CODE to indicate if the method was successful. + + .. warning:: If VIDEO_SETTINGS settings is not supported or min >= max, it will return ERROR_CODE.INVALID_FUNCTION_PARAMETERS. + .. note:: + The method works only if the camera is open in LIVE or STREAM mode. + + + .. code-block:: text + + # For ZED X based product, set the automatic exposure from 2ms to 5ms. Expected exposure time cannot go beyond those values + zed.set_camera_settings_range(sl.VIDEO_SETTINGS.AEC_RANGE, 2000, 5000); + """ + return ERROR_CODE() + + def set_camera_settings_roi(self, settings: VIDEO_SETTINGS, roi: Rect, reset = False) -> ERROR_CODE: + """ + Overloaded method for VIDEO_SETTINGS.AEC_AGC_ROI which takes a Rect as parameter. + + :param settings: Must be set at VIDEO_SETTINGS.AEC_AGC_ROI, otherwise the method will have no impact. + :param roi: Rect that defines the target to be applied for AEC/AGC computation. Must be given according to camera resolution. + :param eye: SIDE on which to be applied for AEC/AGC computation. Default: SIDE.BOTH + :param reset: Cancel the manual ROI and reset it to the full image. Default: False + + .. note:: + The method works only if the camera is open in LIVE or STREAM mode. + + + .. code-block:: text + + roi = sl.Rect(42, 56, 120, 15) + zed.set_camera_settings_roi(sl.VIDEO_SETTINGS.AEC_AGC_ROI, roi, sl.SIDE.BOTH) + + """ + return ERROR_CODE() + + def get_camera_settings(self, setting: VIDEO_SETTINGS) -> tuple[ERROR_CODE, int]: + """ + Returns the current value of the requested VIDEO_SETTINGS "camera setting" (gain, brightness, hue, exposure, etc.). + + Possible values (range) of each setting are available here. + + :param setting: The requested setting. + :return: ERROR_CODE to indicate if the method was successful. + :return: The current value for the corresponding setting. + + .. code-block:: text + + err, gain = zed.get_camera_settings(sl.VIDEO_SETTINGS.GAIN) + if err == sl.ERROR_CODE.SUCCESS: + print("Current gain value:", gain) + else: + print("error:", err) + + + .. note:: + The method works only if the camera is open in LIVE or STREAM mode. + + .. note:: + Settings are not exported in the SVO file format. + """ + return tuple[ERROR_CODE, int]() + + def get_camera_settings_range(self, setting: VIDEO_SETTINGS) -> tuple[ERROR_CODE, int, int]: + """ + Returns the values of the requested settings for VIDEO_SETTINGS that supports two values (min/max). + + This method only works with the following VIDEO_SETTINGS: + - sl.VIDEO_SETTINGS.AUTO_EXPOSURE_TIME_RANGE + - sl.VIDEO_SETTINGS.AUTO_ANALOG_GAIN_RANGE + - sl.VIDEO_SETTINGS.AUTO_DIGITAL_GAIN_RANGE + + Possible values (range) of each setting are available here. + :param setting: The requested setting. + :return: ERROR_CODE to indicate if the method was successful. + :return: The current value of the minimum for the corresponding setting. + :return: The current value of the maximum for the corresponding setting. + + .. code-block:: text + + err, aec_range_min, aec_range_max = zed.get_camera_settings(sl.VIDEO_SETTINGS.AUTO_EXPOSURE_TIME_RANGE) + if err == sl.ERROR_CODE.SUCCESS: + print("Current AUTO_EXPOSURE_TIME_RANGE range values ==> min:", aec_range_min, "max:", aec_range_max) + else: + print("error:", err) + + + .. note:: + Works only with ZED X that supports low-level controls + """ + return tuple[ERROR_CODE, int, int]() + + def get_camera_settings_roi(self, setting: VIDEO_SETTINGS, roi: Rect) -> ERROR_CODE: + """ + Returns the current value of the currently used ROI for the camera setting AEC_AGC_ROI. + + :param setting: Must be set at VIDEO_SETTINGS.AEC_AGC_ROI, otherwise the method will have no impact. (Direction: in) + :param roi: Roi that will be filled. (Direction: out) + :param eye: The requested side. Default: SIDE.BOTH (Direction: in) + :return: ERROR_CODE to indicate if the method was successful. + + .. code-block:: text + + roi = sl.Rect() + err = zed.get_camera_settings_roi(sl.VIDEO_SETTINGS.AEC_AGC_ROI, roi, sl.SIDE.BOTH) + print("Current ROI for AEC_AGC: " + str(roi.x) + " " + str(roi.y)+ " " + str(roi.width) + " " + str(roi.height)) + + + .. note:: + Works only if the camera is open in LIVE or STREAM mode with VIDEO_SETTINGS.AEC_AGC_ROI. + + .. note:: + It will return ERROR_CODE.INVALID_FUNCTION_CALL or ERROR_CODE.INVALID_FUNCTION_PARAMETERS otherwise. + """ + return ERROR_CODE() + + def is_camera_setting_supported(self, setting: VIDEO_SETTINGS) -> bool: + """ + Returns if the video setting is supported by the camera or not + + :param setting: the video setting to test (Direction: in) + :return: True if the VIDEO_SETTINGS is supported by the camera, False otherwise + """ + return bool() + + def get_current_fps(self) -> float: + """ + Returns the current framerate at which the grab() method is successfully called. + + The returned value is based on the difference of camera get_timestamp() "timestamps" between two successful grab() calls. + + :return: The current SDK framerate + + .. warning:: The returned framerate (number of images grabbed per second) can be lower than InitParametersOne.camera_fps if the grab() function runs slower than the image stream or is called too often. + + .. code-block:: text + + current_fps = zed.get_current_fps() + print("Current framerate: ", current_fps) + """ + return float() + + def get_timestamp(self, time_reference: TIME_REFERENCE) -> Timestamp: + """ + Returns the timestamp in the requested TIME_REFERENCE. + + - When requesting the TIME_REFERENCE.IMAGE timestamp, the UNIX nanosecond timestamp of the latest grab() "grabbed" image will be returned. + \n This value corresponds to the time at which the entire image was available in the PC memory. As such, it ignores the communication time that corresponds to 2 or 3 frame-time based on the fps (ex: 33.3ms to 50ms at 60fps). + + - When requesting the TIME_REFERENCE.CURRENT timestamp, the current UNIX nanosecond timestamp is returned. + + This function can also be used when playing back an SVO file. + + :param time_reference: The selected TIME_REFERENCE. + :return: The Timestamp in nanosecond. 0 if not available (SVO file without compression). + + .. note:: + As this function returns UNIX timestamps, the reference it uses is common across several Camera instances. + + \n This can help to organized the grabbed images in a multi-camera application. + + .. code-block:: text + + last_image_timestamp = zed.get_timestamp(sl.TIME_REFERENCE.IMAGE) + current_timestamp = zed.get_timestamp(sl.TIME_REFERENCE.CURRENT) + print("Latest image timestamp: ", last_image_timestamp.get_nanoseconds(), "ns from Epoch.") + print("Current timestamp: ", current_timestamp.get_nanoseconds(), "ns from Epoch.") + """ + return Timestamp() + + def get_frame_dropped_count(self) -> int: + """ + Returns the number of frames dropped since grab() was called for the first time. + + A dropped frame corresponds to a frame that never made it to the grab method. + \n This can happen if two frames were extracted from the camera when grab() is called. The older frame will be dropped so as to always use the latest (which minimizes latency). + + :return: The number of frames dropped since the first grab() call. + """ + return int() + + def get_camera_information(self, resizer = Resolution(0, 0)) -> CameraOneInformation: + """ + Returns the number of frames dropped since grab() was called for the first time. + + A dropped frame corresponds to a frame that never made it to the grab method. + \n This can happen if two frames were extracted from the camera when grab() is called. The older frame will be dropped so as to always use the latest (which minimizes latency). + + :return: The number of frames dropped since the first grab() call. + """ + return CameraOneInformation() + + def get_init_parameters(self) -> InitParametersOne: + """ + Returns the InitParametersOne associated with the Camera object. + It corresponds to the structure given as argument to open() method. + + :return: InitParametersOne containing the parameters used to initialize the Camera object. + """ + return InitParametersOne() + + def get_streaming_parameters(self) -> StreamingParameters: + """ + Returns the StreamingParameters used. + + It corresponds to the structure given as argument to the enable_streaming() method. + + :return: StreamingParameters containing the parameters used for streaming initialization. + """ + return StreamingParameters() + + def get_sensors_data(self, py_sensor_data, time_reference = TIME_REFERENCE.CURRENT) -> ERROR_CODE: + """ + Retrieves the SensorsData (IMU, magnetometer, barometer) at a specific time reference. + + - Calling get_sensors_data with TIME_REFERENCE.CURRENT gives you the latest sensors data received. Getting all the data requires to call this method at 800Hz in a thread. + - Calling get_sensors_data with TIME_REFERENCE.IMAGE gives you the sensors data at the time of the latest image grab() "grabbed". + + SensorsData object contains the previous IMUData structure that was used in ZED SDK v2.X: + \n For IMU data, the values are provided in 2 ways : + +
  • **Time-fused** pose estimation that can be accessed using: + * IMUData.get_pose "data.get_imu_data().get_pose()" +
  • +
  • **Raw values** from the IMU sensor: + + * IMUData.get_angular_velocity "data.get_imu_data().get_angular_velocity()", corresponding to the gyroscope + * IMUData.get_linear_acceleration "data.get_imu_data().get_linear_acceleration()", corresponding to the accelerometer + both the gyroscope and accelerometer are synchronized. +
  • + + + The delta time between previous and current values can be calculated using data.imu.timestamp + + .. note:: + The IMU quaternion (fused data) is given in the specified COORDINATE_SYSTEM of InitParametersOne. + + + :param py_sensor_data: The SensorsData variable to store the data. (Direction: out) + :param reference_frame: Defines the reference from which you want the data to be expressed. Default: REFERENCE_FRAME.WORLD. (Direction: in) + :return: ERROR_CODE.SUCCESS if sensors data have been extracted. + :return: ERROR_CODE.SENSORS_NOT_AVAILABLE if the camera model is a MODEL.ZED. + :return: ERROR_CODE.MOTION_SENSORS_REQUIRED if the camera model is correct but the sensors module is not opened. + :return: ERROR_CODE.INVALID_FUNCTION_PARAMETERS if the **reference_time** is not valid. See Warning. + + .. warning:: In SVO reading mode, the TIME_REFERENCE.CURRENT is currently not available (yielding ERROR_CODE.INVALID_FUNCTION_PARAMETERS. + .. warning:: Only the quaternion data and barometer data (if available) at TIME_REFERENCE.IMAGE are available. Other values will be set to 0. + """ + return ERROR_CODE() + + def get_sensors_data_batch(self, py_sensor_data) -> ERROR_CODE: + """ + Retrieves all SensorsData (IMU only) associated to most recent grabbed frame in the specified COORDINATE_SYSTEM of InitParameters. + + For IMU data, the values are provided in 2 ways: + +
  • **Time-fused** pose estimation that can be accessed using: + * IMUData.get_pose "data.get_imu_data().get_pose()" +
  • +
  • **Raw values** from the IMU sensor: + + * IMUData.get_angular_velocity "data.get_imu_data().get_angular_velocity()", corresponding to the gyroscope + * IMUData.get_linear_acceleration "data.get_imu_data().get_linear_acceleration()", corresponding to the accelerometer + both the gyroscope and accelerometer are synchronized. +
  • + + + The delta time between previous and current values can be calculated using data.imu.timestamp + + :param py_sensor_data: The SensorsData list to store the data. (Direction: out) + :return: ERROR_CODE.SUCCESS if sensors data have been extracted. + :return: ERROR_CODE.SENSORS_NOT_AVAILABLE if the camera model is a MODEL.ZED. + :return: ERROR_CODE.MOTION_SENSORS_REQUIRED if the camera model is correct but the sensors module is not opened. + :return: ERROR_CODE.INVALID_FUNCTION_PARAMETERS if the **reference_time** is not valid. See Warning. + + .. code-block:: text + + if zed.grab() == sl.ERROR_CODE.SUCCESS: + sensors_data = [] + if (zed.get_sensors_data_batch(sensors_data) == sl.ERROR_CODE.SUCCESS): + for data in sensors_data: + print("IMU data: ", data.imu.get_angular_velocity(), data.imu.get_linear_acceleration()) + print("IMU pose: ", data.imu.get_pose().get_translation()) + print("IMU orientation: ", data.imu.get_orientation().get()) + """ + return ERROR_CODE() + + def enable_streaming(self, streaming_parameters = StreamingParameters()) -> ERROR_CODE: + """ + Creates a streaming pipeline. + + :param streaming_parameters: A structure containing all the specific parameters for the streaming. Default: a reset of StreamingParameters . + :return: ERROR_CODE.SUCCESS if the streaming was successfully started. + :return: ERROR_CODE.INVALID_FUNCTION_CALL if open() was not successfully called before. + :return: ERROR_CODE.FAILURE if streaming RTSP protocol was not able to start. + :return: ERROR_CODE.NO_GPU_COMPATIBLE if the streaming codec is not supported (in this case, use H264 codec which is supported on all NVIDIA GPU the ZED SDK supports). + + .. code-block:: text + + import pyzed.sl as sl + + def main() : + # Create a ZED camera object + zed = sl.CameraOneOne() + + # Set initial parameters + init_params = sl.InitParametersOne() + init_params.camera_resolution = sl.RESOLUTION.HD720 # Use HD720 video mode (default fps: 60) + + # Open the camera + err = zed.open(init_params) + if err != sl.ERROR_CODE.SUCCESS : + print(repr(err)) + exit(-1) + + # Enable streaming + stream_params = sl.StreamingParameters() + stream_params.port = 30000 + stream_params.bitrate = 8000 + err = zed.enable_streaming(stream_params) + if err != sl.ERROR_CODE.SUCCESS : + print(repr(err)) + exit(-1) + + # Grab data during 500 frames + i = 0 + while i < 500 : + if zed.grab() == sl.ERROR_CODE.SUCCESS : + i = i+1 + + zed.disable_streaming() + zed.close() + return 0 + + if __name__ == "__main__" : + main() + """ + return ERROR_CODE() + + def disable_streaming(self) -> None: + """ + Disables the streaming initiated by enable_streaming(). + .. note:: + This method will automatically be called by close() if enable_streaming() was called. + + + See enable_streaming() for an example. + """ + pass + + def is_streaming_enabled(self) -> bool: + """ + Tells if the streaming is running. + :return: True if the stream is running, False otherwise. + """ + return bool() + + def enable_recording(self, record: RecordingParameters) -> ERROR_CODE: + """ + Creates an SVO file to be filled by enable_recording() and disable_recording(). + + \n SVO files are custom video files containing the un-rectified images from the camera along with some meta-data like timestamps or IMU orientation (if applicable). + \n They can be used to simulate a live ZED and test a sequence with various SDK parameters. + \n Depending on the application, various compression modes are available. See SVO_COMPRESSION_MODE. + + :param record: A structure containing all the specific parameters for the recording such as filename and compression mode. Default: a reset of RecordingParameters . + :return: An ERROR_CODE that defines if the SVO file was successfully created and can be filled with images. + + .. warning:: This method can be called multiple times during a camera lifetime, but if **video_filename** is already existing, the file will be erased. + + + .. code-block:: text + + import pyzed.sl as sl + + def main() : + # Create a ZED camera object + zed = sl.CameraOneOne() + # Set initial parameters + init_params = sl.InitParametersOne() + init_params.camera_resolution = sl.RESOLUTION.HD720 # Use HD720 video mode (default fps: 60) + init_params.coordinate_units = sl.UNIT.METER # Set units in meters + # Open the camera + err = zed.open(init_params) + if (err != sl.ERROR_CODE.SUCCESS): + print(repr(err)) + exit(-1) + + # Enable video recording + record_params = sl.RecordingParameters("myVideoFile.svo") + err = zed.enable_recording(record_params) + if (err != sl.ERROR_CODE.SUCCESS): + print(repr(err)) + exit(-1) + + # Grab data during 500 frames + i = 0 + while i < 500 : + # Grab a new frame + if zed.grab() == sl.ERROR_CODE.SUCCESS: + # Record the grabbed frame in the video file + i = i + 1 + + zed.disable_recording() + print("Video has been saved ...") + zed.close() + return 0 + + if __name__ == "__main__" : + main() + """ + return ERROR_CODE() + + def disable_recording(self) -> None: + """ + Disables the recording initiated by enable_recording() and closes the generated file. + + .. note:: + This method will automatically be called by close() if enable_recording() was called. + + + See enable_recording() for an example. + """ + pass + + def get_recording_status(self) -> RecordingStatus: + """ + Get the recording information. + :return: The recording state structure. For more details, see RecordingStatus. + """ + return RecordingStatus() + + def pause_recording(self, value = True) -> None: + """ + Pauses or resumes the recording. + :param status: If True, the recording is paused. If False, the recording is resumed. + """ + pass + + def get_device_list() -> list[DeviceProperties]: + """ + List all the connected devices with their associated information. + + This method lists all the cameras available and provides their serial number, models and other information. + :return: The device properties for each connected camera. + """ + return list[DeviceProperties]() + + def reboot(sn : int, full_reboot: bool =True) -> ERROR_CODE: + """ + Performs a hardware reset of the ZED 2 and the ZED 2i. + + :param sn: Serial number of the camera to reset, or 0 to reset the first camera detected. + :param full_reboot: Perform a full reboot (sensors and video modules) if True, otherwise only the video module will be rebooted. + :return: ERROR_CODE "ERROR_CODE::SUCCESS" if everything went fine. + :return: ERROR_CODE "ERROR_CODE::CAMERA_NOT_DETECTED" if no camera was detected. + :return: ERROR_CODE "ERROR_CODE::FAILURE" otherwise. + + .. note:: + This method only works for ZED 2, ZED 2i, and newer camera models. + + + .. warning:: This method will invalidate any sl.Camera object, since the device is rebooting. + """ + return ERROR_CODE() + + def reboot_from_input(input_type: INPUT_TYPE) -> ERROR_CODE: + """ + Performs a hardware reset of all devices matching the InputType. + + :param input_type: Input type of the devices to reset. + :return: ERROR_CODE "ERROR_CODE::SUCCESS" if everything went fine. + :return: ERROR_CODE "ERROR_CODE::CAMERA_NOT_DETECTED" if no camera was detected. + :return: ERROR_CODE "ERROR_CODE::FAILURE" otherwise. + :return: ERROR_CODE "ERROR_CODE::INVALID_FUNCTION_PARAMETERS" for SVOs and streams. + + .. warning:: This method will invalidate any sl.Camera object, since the device is rebooting. + """ + return ERROR_CODE() + + diff --git a/py_workspace/ogl_viewer/__pycache__/viewer.cpython-312.pyc b/py_workspace/ogl_viewer/__pycache__/viewer.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..89d7722a7220b173dafa97ed4469a4ac8d4a4f40 GIT binary patch literal 43665 zcmd752~=E1nkJenWReM)>^mfj5FiQdi?t&_5)uMQfMnq%3y}!|vFT=_1tZ&4$8Q?B zTxBS`N2u+7Mm?S*RORVHU7j)SsrM|GYo=7wOXXFjCV98$dFpP>^!Ro6J1WV2>~Zy( zneUIgW^$2aSM{6;iO9HdV~;=n__v7rC&|f42DrNq2YT=K8Vr9;53;jLfro312E%28 z&)_rm8~&BaD29wm$RveK{pJocQknWK9TxU$?Xcq4+@COz*pbNcnfh%VcJ?l*BZ>Vw z2AmyEyt8y9`>Y))zJv~!FR{bzvvs7}sU~}W+CX|ohLk(ABg>c6k?nK5V(7^6IpOD; z4Eqee&=R^@$4)m9k=V|6opOVd!->7E1g zX@`DYV&6Ubw@GE5vwkknJ2dE7zkZFUYl!nF^>BSDunkrB4AQE=1 z$FKL8p9}iWZ_`P)R!+Bp($x+14{_U)Mh1JkhqwXHKxc4+XT&>jcKA?dkn2SOsD%pM z;5q5-64Ra?>h&RE?_gz>=Y=E>ob(EfP)nrvo__D)p+K+HKuxipYF1{Y($JNjwN=}a zE+&nh)$6R-FHIA4d#|(<0l8m`+N!)mW0f9VOzQjv;5Mo3s|+(6K5>|j?%PxQn~vc~ z?;s5{%^I5%w6D8=s57{yHyCIeYR6l3w5YzCa~LohWH#en{hXHy*ufqvE42kxc&O~k zHRsnbw|8$<)k@FODfaGNv!<%bvva3s%hJgxk(_MpCaEPV7!bfzN^Pp)JgYtHHg2q0 z)4kFo$MxvMoz%pwt?8BueR3FLqjLVjr~*qPIurU&4@y|TiQLFwuy?@U)&;0iTp~h* zUmpG#-ep5jdDkc5A|)jPV}>6{?|sHxJom{MR+GqQlGD5e%o(KenN#y3-lD0;CZ|w; z)wEAm&+l7X4OQ0WPpNUE){$WMsx4m{D@V;$TRl})A)#x;*U1G1H~sGO2OBtUh->Z~ z@Cyk(f7g)DFPH&}LYnq-o~Z`rHtAIxyVXJq=$7o*EilJp?#9+v`8)=T?h9HNzK#RKj_k zL9f_qFY4#qsDBUs0?)vE*>KO58Ff|gu8QeE*tKG&mUq=eT^o7V#@U{*YiB5B*W8(( zrr&J+B65w1kE|Id*VBoL? zUo;IC>83EBrW6S%hXEkcB~jRC$fG=#F_cWXRqz@!1{0`PTR@eZUZOi0J_6 zMj42IAVs0=k146oDd$#yPj5zRGlDKTCYUORG&M*|3N@Gj#+GJ|)(l6Z`zd)PwnlaJ#M($MS_lod5 zwRW;+(tMW!Q|l&9A()mmUWd%%b@yDkQ)O?TjTW!vi`Pb6>q532@7K?z|EMu++YxfD zn|GwVSrJVu=hMogX>0hjHMbpW7Sf6$#cO8M!^IoJX`4dMO?O>s<4GKibr=2=Cj;!m z?;+~4!EfjQuymMwE}!ugV~5#q@tIyR$P?aS#j{!SoPcMG<~b41R?V{w&k34myDt&c zU6RiR-{G^vclwgxCo|eF#pediX9TV2_NCz0eZg6kDrD7RE?b8EgZr8ebq){veS1c_ zyZu}jO^@s-On(nwh8MFW6`&n6cqO1RXfXPif|>DN9t%*Ycv8w|PZku++?b(HSrT%b zK^Y;V;ez41vAHUNBghw$yufBBS$l;9Z+CxZPe4fb_HdoUr+T{rUJkH0G{OOboeZ+% zq&Cjq(;Eo-IfBZ7zrS0stL0c(F|p8T^Ma`84SKz!#rmUFBPVzc%?hlA2Z${ynXYGtmW8~+DgRs52p*hYzGA#S;w+!{l+$nFywm;qp-YFp(;~u-GKMIE!fe#@5V(vn z>%(unYc+5sh^sPkrTDpS*dU?oM{!iMOBpIUoKCT?iSk$4k#Xvb)wM4%bCmo3G-mdS_ z>`d%p-CMz@uLwC;EbbmHHwHdM2rM;MKEsGno)e>P(>a3X(UK+Fs_8eiF&!S8f@nIm zYj#5@eRIgU`M1(IJwV}VsBwi$G){!DvvCb0c$ldVrLPG&*F2(Ow4B(eX_qA8$jJy> z89Bj>x)x2k-Rt!a0?hz#EVTgD6eu=8o8!v{OzY(>lUv@{8gdqj4YW|Dl2fLY;Oub? z1imz0H#P4CRcRd=L>sFt3_%Ds|M@O2+qmr%I@{SlLi;>NgeKL7jagcXz0hK^O5b(w@4n>8P}Kiev)Yb+|H09EZcX3PdisU&_-qjzod| z@BsF%^w(c{^`-gj{Al)SK6`b@y?SB!<^^|F)V-c}ub-{|pyfu(&9eqsUDEHy;|)0m;vz#x&PGtSBD-Dn6B>@xSwN_EQAX?*g@ zlcz`iC1D+=O2f^x0_uax1Vd!zh);6@Ia&>}_$+1;x1y13{y3URp-4N4?aHb->K6$l zHs^x9;d~c3NQluEP3CSCvg9V9uEGf<3(#&QVliw~n9d2ueUm+T4K1vs$)q=(3+cI` z^koYfbmuXC=dp0k@z2br%q&dl$~6;~h`aK>4U;>5@Qcq94H<Qu-tK10!xk`yq}1eF9T_)aiZ2*n{D_w+g-cD{BgAresh<0%@K#gR_gbG zYL)~8M^Kr5pF-Ob6WOOsJj#(ATh|J!C&wBvB|&17m{S=%y|wBC*!22XfKM7bRK_gRe1Mbkxdkj9R=z6?2}{`O_7VL9}e9K>tKxiQn28O*uVBcEBu zP+Bekt=1h$FsC2@I|m1cf{?WidpZX_D!BQ59+CR5Q5o2(WD$!IHOR;Vk-}8*S&nuo z9O@jF*FZpA1m)V-@;iudkP^(%;UUOv{G4Fw8Wt>+Qb<1J9~j~;$S;!H2ZzoL`h8-o zyxk@Qf}Fo|KrjynIj|hPqj9?|NA&b)UY#y0R)c=OgZBYKL(0Nh$SMt$?F(n^54raf z>U`nV7Z~cS;-8Hje zHY4obG-pJP5{8NkB~%eC=+B~2`Ht}-`|mP>HVD6vOEa+N(EhzX&Hs60;#pzTD|$xtdn zun6yak-o~rh&`?u;Q-BpMALD!IgA!80d1^xe?(cv;KjmQV>s(T$bEn)RPb0LZnxNy zt71|5Q+&=-b0u?a(H-slj`q;;=fXM9%XocR72)-gMR>jHZh0l)^%4cISA7{EamuyL zwI0LA9(U~q^T+Fr@WJX*U&P%mRRDcD2*Ik?3u%CNV8}PpPhqFmduF7wUwp$=BfVf7 z=7xr$SGd4fVs15+v5q`qa70W2(V2nUNnR~^73A$EkCrDJwd)2(&WfycF~yPxVWT1N zU3jk;9-6H7g8M0kq_kg|jP|mJW`yoj2y27lO!j3`96}fcF>a+2w^E8T+P5fqw@7)7 z_O(jf+6M_nd(ndglRfu=&1%nlkdk0u`M{lQ&v{ssYG3wQh088d-mHOLXq)0B*fu;Q zF!&v;Lq#fO88>trA(^r-mQ1lX=2vVT3FC(I#*RdiehZoLAXLk7ofo>4C4lm2+oTE~ zJ`eAAjSwE;S9F&W#|+;Ay@SocB5w&s$U(6!C`bcahelL1X2p}T5syJ8tI|ssB#1~5 ztbq~CvpDGqULWIf61;&^oe)#nyuif0K7Ro6q9M))5s*z}h^%5dD@!IqNEMnXB+BWn zEV%%luB*Q@5b$~#Q$TzY+{+?B5S%UMH`t&k5~80XO`rze7Z4FSQ!nk~odr`TW~>os z&3s-l6A?*21!Z_T#3H|Qa4{D}qs&J3-ypCgGG#Jwm8j7pWAwLP#1hvN=MekEj0n=E z#AV0hd&VC~G%i5M)LdodIuXl#gS?aE5v)QwoEA03pP+<5J-k>M(n5OX<*k!jAwj-;dh)dVlT+|c z@|9%yiC7lecRzC^WTf0TBqXOSo-XP%iMrXUV+#RvsOT<%I{FU4dd%2s7&EB_^0VpFga&=cNQ5pY{a*()4hRsU{ zr!qp$%IU1zP9nIqa=|JyL&MF2u@1A;icDM^dF|xULg0>(N3CP+6q2Qt)8+36TMjJF z+DC)1TUm0jzpPT+$Wxi`%jTVhYc*UG(Rh7bryu7zey0&JxP z2&i-kXnF~)UBrCrrd1Xkr=w8tkbHv24^)Qb8^za26cf^un(XHGw*%5BIa5>)sUrt12Gpu(f*mmt@55u1nJ_>*y>ppfDX&m(_aVvD(F8f zMhi*OdvC8#NbDLK819As(!H;#Arn|c>8baukb;7n8k-M#Tia^e z+FON0JhvTfZxGV82^$Byhnj|Z1V>MQV?ZtneW~~N`!#)`w^CzJkN8r2=7`CKGHf}1 z1he;S0A6=Mu#2Neop*F|ojn7PLeqn#7ZnCD@dlzZOyHJ_1lxoNT~QnGe-eK=$DxSjUD{>lDG*~ZyGIBVOu3YzBFTx z*w!s5PmYLf!-B0~%Jzs8#KI^hbD(2wu*a>+^O<+R$p2D z-p;UN>yLYGI~o?!E2e|jUb^~HIDPYYy(Fa#xmV1Vf3W(->YKH<*ERgjAk}`-n>`)S>xoC;QG?|Oj0Q0FBUCD{s6+G+%!w`?Zh#y~s zSHD8(-;!+*VGKc=?C7$FqeokgZuJBhS>Pcq!t=tKiyq&QKj0y06qFu;;Ao+^aA1r` zNJg|l?Cu1*@iE+#EMUosxJgVXSchTe6BH6elN-T8x)VwyzHSxu)r8PQtR>|Fz5#*} zAAny{flumM4jpc6YLHM2H-<(FX_{mbcHv&e3+|ibF>GO#_#ILEWAO&KF2NpPodk~H z{uvSm2qrZcf>^fE6tVIe37dXF={|%90Rbem_WPz(d)h)y#q>xxXVrM)yeo(7Fc#dI zp{yOzta?7H{$_hP>tMv)wBXKoGvOXBn`vLfxnjYYKh+pYiT6D4l*PlCg>j!S>wr@nr|YSxq4~^i2gJ zX8|-fW_#MB7QHj|%K%A!UfGkor^xV=BF~g|^3n0}Z_9Lao zAM~m_(P*YLgt5`dsEV}(BS|WdOD~0NMGQ}nor4}>P4mJ1E_uhvBMM5Spa&^L-haX$ zlb*a{cwox5Rz6slZcTewZL}7PsVPkt{uC$El@Ezt*U-wTqMeGCS|*E>js&tp=}08& zlMWlRI}puAR1!4NCAUKAcE*cJ4eg5Q*kit-lNf`khS%}z3E zFKv#@sRmSi3SN`nsS04^P-M z5uA)jXsm7C*VND|IDK5_IgmP%mKKR%jTP+e60BYQ{!UIv91e=FY-CHygdC5{ndDTj7!$C9va%+KArz62;WJGrCbSmUTD#7?s^cOImT>x6bxlm9!Zi(1(=X1;O zm@;p3zMga`={4tkb}_x4&+*_Vc_Aa~^4`h4Z|ui|eZI7UvJ@|5uf3gJKUY0FIBxll zQ`}J%2tXYKLkw>ABY>rAtT#Ld3i~4IS`ZWO`V@Fk->7y=M#F2x7Er39xX~ooh_>p5 zmRv}nfgSXL4`$?-#fu>lOz@~YnYuzMpAT=a_G19OMvN|?aVbN3j7wV91B21NYh**&OXoc*H1CdF7OfM6uJBS_4(&;ARJ(_-| z=TJY(VS5K7frFP0Mx8~xvuG-C$5}@GlniyWlcJDF@1iXX7o)g%r;I@gGx?`&m0ZTg^6sE>JY=XD+fNLcW7*M@Qf#y%950!y|k=h)}mC+0aU@iET;dR^x zh?NCa9=4+iHW~#meg4sS)FQvAqP~|RC+mCEmdD%jrq)jbQ7l8B)#p1lX$2_=A9<9H z4&&hw9o2|Umh9-`)5OTRB`D$k7&Su(%5aDYBVig|hrjh4)4;?MVgfC*y)e!NWmA*gJlu-aCc4qbV&yzaP{R8f*DG}2$^J~QzWDqsc`MnTQ_ zr1heP6tyREs88AcC5@9#ibsr7LEIqWQ7$hAo=Aes+!F9)U2>e1oH6tLhn$c&_AjdkAoqqWBc7oO`+G}j)IBG{(MeP1esL~>#&EMe`@qqQfzd)oKz zg;Wo9h_C923byqI{R510g%FONB;#gL2=}k4q-<0IQkcpAV#-L^*F=ZKgj9&)NPH%m znY4lcmXzv(ok(sk5$Rghy5JZd37qlg(0B zN3O4mIJdBv_sV8`KODF|5U$=n7yRhrhZm!}+Ijr1ZjU_ejhys{y8EKt0lqsBJ{jbn z4u;OX@VNmB-3HUI#Gm^H!_&sSCj9I-9Wi}oFeD!}iPgSv>4k_R|54SRzJ5Afy>+hb zqh~&RCc5JYkN?$2B5lt^p6?8u?2eus;7<;OpC9Dg2192?qG!hVGh?CFi>PY1sgYH6 zK&oo&(pbb%fT|+7YarjNzEM4Q_1JT|QzIP<-Kg&ORHgw@4`Z$VSp^x-~8lN@pqn}38QC4xQ>0U|sblbIO zuRa?tSvPLEBXQs{7G%nX_;>$nH$+p+hG-fy>RFN@R%aU$7rR&4TveHzszq4>rNwWN z*;p^k#pHC$vFb0%%Tb@?(9-Bx58Y8}@Y5!Bdqb%TDwWH`D*gs!r5^dYkI29{*H<1F zfk#-q5xK=&uu=~75eFORQ0o)-u$%ZxX*<>yBN7{9=JzaeZ-6ot@&I{=V2g9yb6;Vm z65}?XuQF3QbxSj~A+AYye6pDeZkBUGPOCe9GT|lh<)oL}BIi+m`?jfJIW%UKw`Y+r z*NoD3C5`?p*p?h6C1-G#rVO1H$#pN$UY#wK#xhtdm-?jTOEaL;0&LNmH9=&z+Nb_KM(K2Ct?SL3MXOs<#tL)9( zPf@J6se^C(1|CGS4O%^16}-{ydzsYR6CCo;S*hO6er%Q=?1G+79186Bb4)hA($n49 z4}Px4bF#DRG$pP(+`iSbS3@b3FiL^@cU1DfCy&Sj?q}rPf+twd_4c>YFr{4v#Y8K%^A$u;jD0x9R~)?M`^CV|2cpeB9{($RpeaKA zBN5wK*r`FLv;EcW_f3xE)y%wm3!k$kn)5uL^ZbN$p?cRsUco!nSE@naMDy43`Dxwio6kL8khlW(D{kD2ml^=`g;_stDI z+wt)ZzWQ*e^?0=PdA{}eXloCT|J6Mp2SP*VBhCx=QnRLvQyavZ!>Q{?4plZ@OLC~D zOHEN{G4Cv%Hr{cT;(W5bj15O5b0z4hzUzH+sn;@B7*J}YDVui$Tdu{=oKaEjaH>5k4By8!^!a&8~LF&ac|9W84ghVb9-VX{G7Gch#c^N=>c)6ESHcmn-3&<`|^W{K7m0e>*a ztSLy47})9=E!w*nJCm&io$5xyy;V*TIWvq(h!3V0&~d?74<_(T3BgTMtocyGA;Apf zFIe`pwCxw1sEJv~2PjT`0`gZ;rz{aQ4R>)1C?!_JY*&gH&51*U${RX^ANXTrg$bW4 z_4Uz9qpyw4JJar49QF!{%bK=CJ)3WPHqY&eZg1wdH;1<$j%%7B55sq2aBu!&dx_{4BHP)=|x>nU?dMj5?CMOd+Ol_ht5U5RZ8$m{one2?Q^w3ozf(#CIfON5&4ck3 z{pK5BP5g*_r?HkfD@a)LnAK;=#|0wvPc99@Y)hG8ts!7Mhs&K#K>kKC;^{%@*X&x;m8cd50MnnuDTPajI1RE`4!V+xta({`|bALr1TjCa| zE(UOyiyu}3Fs$yx3!HcD9n`GcL?*xH{tLabb&U*PAfsDm`w2Z|7oE(9jRR{#fMg?G zL&F!uB~IG~vow&ck?tX4G=HfDj;{POTI2s2sc>G?l4Vc3o1J_4g~=D*U3YEM)lJv7 zUfmkbt{LACN+c~Snp(-HR^B%xCvTWJa5p{w@{Y+J)3vwLm(3SdM2l+pqM8|QcFPAl zZ|uBT%CBz-7ww(gcekiwVjs5t(q+@fuRVA5x!JY+vW>H&(arn#&HHW{`OOEyB?l)` z=4&?24$Zz8s@Xe{G_`B08HPxx5>^QZqU9U-@(r^``0~xS(>KqrTR(AN>cy#-LK&++ z&qqT(r`G(6ykCD-WN_!=OdISb2yv{Rt({#TO4|%c&b%}A*Y~Y>@g>vhd~0K!+3-Q_ z<~^IuKdG@H{FBYrIz!@5wwn?D#K3&Bz0PC)#ObQbH-D0EMK}ga(E4Rq;{SUjmQZ$T zWgmM|Y{ATu+`v6VX2&_XRIOQ&dae<>rp`Vt-nF8xHk7Pj0=4+ZU^QhGsySk?nzFa6 z@6j5CB~oLVIKo6eLiBo>Vk zC$vdAm;@}G29sDvD`-b&ycYIa&wwA70Qo$yJ;E(P+P$CQicg8C!;#%b@Cz<3np^HSU z0U#7FU0()E8imV}6lM~_AO;RXBU?`2P94qG;Y4K5vc4_waeUZkF(Qb>jzN1@(5z%#m5gkB@!i{m^^s z>F|zY5$AD{=_$G6T=~U4A~GJ0D@g42t-7?jOv5Lc_PTZEPl{Z1tIVIQvLYOFYMoF4 zLkE9|fYz?P56|&VJqN2LSk>!}9-Bu&CXdOZNGebdwT?NGA*n}xpkT1z#6h=UY-A7% zyM3#8CWLYO#CD7PHUq1bcnn}d4Wcrc`+WpQGvf9ZW=-=)H0@jQ8s@G!h3`17IHv2u zIj~TH2})Kz9sHOs3unRnI5Q>SVKyHr~>Bc<#%ON`W#0(YdRl^y308y7>5 zK}G+G5hHXChcQqN7^sVR$#HaQQftOhG}gZ^na676buNa(h1{P0hQZF0{r+R@?ncpG zo}ICZ-vlyy`n72oUx@R1Y!k3SA-A`wrM69Aq1vX_22rB+=jaHT$<{;R)*$NS0&M9p z{a<@?YvaD=hI(&fbDNNdRJ{THro^EHSjwTiMRGNY{I|4@K1QurM-EriQ4)5PEI6T} zi#RLwWKkv4CDZlSnyxmzpY!9&n`>{jMVtqS0lfI?#RYfOOi9GOa=~5jwpSxga&KR7 zqre%<50kGazklk-=WiKrl}Fqy_w*EBc_g@6$>&2J6|Je~YwCYG1Z($*^VxgOis>^E zXB9EHtTJ$*cigLBb?z>_Uue*~#)$xfVWu$zwD_Mcwv!hCR|ODLa5oZ4jw@J7Fr;UR zt`dJ2O!QB>$i*bx_-Y=_tb;k@;K(rJaKK|Q1Ly$bJeqMoSK_J8Kqt;#h+@{=XhXc2 zEjz2b$Rb9%-Q~|{7D=p+D-0dD9xjx-D&nZ2(YE>I~?c`OHM>{Js z{bIIG-2X+7KOyfndH;#Lzaj4)c|?}5-HF3uHE>Df)l%GX@`&&i$(PF%BF|0TI*b4v zIJR3Gj1RI*)@2WDCTsZvo7uYhL6XT@L80;oNmqN9lfZg*0dk?50X(L1dljZ(0LX%5+qoN zL%eK}jN%xBIL(ToFv+F^Bj^RNZf+6itCWSz+&7LPkhG9h^hO_1VDp7A5hjzy!qVx6 zsb_)y%Tl)NuZK zN|wXw`i^vJmj+RGG~oLHhcwqWTw++Bos1h}*Kx-5uq6)?7NiG|skSz}z z?s$<<24WS4nFMI8&C&?topNF_sAx}BmRHlJ61n7HsT|VOpuR2FdAnSWQ+548!qLp> z)hm|ayl$>g(M5^kketmB_1Ts^(k6u^@WV$ ze9HWzM34MFsJ3B`5?3#WG;Ju8<6_#dSB_PGpSTUm_y-Tlx%-;rkowyLUGyI)3 ztF5|de{||G+gRdQLXQIJ7n2^B-eHK#;dqi9$fd2t`M5k|iE%lSADshpk)r8!3ZU?q zJi6*{-w8D=hsNwVTk9PAx~)xqqFH%H zo<;RH_^ce3R}o_p^`Jo&WXM^x=c){S-%z5JKwp;OJC_E*H)n84@({e0Q_RC@QSCeu`jz!90IjsI_QU>H$qzuYo^;eVfoE(dk=jE{a zt4aBS9E+61sKq%=%9rFz(lgE9_rY}+~4jJ^BeB(>Y1D%7uwZUwFj!J{*hoK z>|n$jp%dwGpC5+1`L%Kkh?-Ri*I9Q?eE$wuI&dwg_`wK#n7~gOh6}u^;cnDamMo5AP1pLkL z5WW$BqrD?R==)mO&5J@l%UD~-4(H)~-torf`j+F~x~7)a2BAp%vaY4Mt+Bbi!Q1e3 zLtQ(ir_m6FFhVk!PgCvQwuV+wnt~QK*VAPKdvKAvZ|}%p7dvv;dbG}4+jMw;t&oL0 zEzJ$yLnyV?tHw*W8Q`e#p%&8SC-n61C*$aJq{l4>a=dpxNnbDq-sZ+SRLV}G3dylo zF1X_FTX4!}j|Vz?{B+v_X0KiR3X6Q(0z2y}xU~trhmIWp!eV5L6LwD&v1k6Oq*cArx8vGX557@SPpp)H*)!)3Pp~d z*mWkUqG4zid3!q3#~6Gz6s#ewq}&(@N^rwQKAB`p-Y!WwLpJMxLtnq2y|nZqB!bOB z|Amu7Fg#=NX{Y@c4nsx~@cZiIoD6*G`iBBy)+CsGfT`=I*=4U78{?6n=QQ7hI4mxj(&x#T0{xMm3FQK9SYIm`W$#?5*T zV|GsbIVG4nyn^Wj{+{*<##+I+ha=Q080!UNgJA2&he6n}H{v%W9kBc(iaSV3G73PJ zcto;x`)kx6*bKf2f_j^KB6G?L!?gu>`kVVA1*^gtH8TM|V_n$2e%vZW)`T-w&!+Pk z8^Z35h{V~jjnm5_w#o%t(c6LPjWZcnx4!k#`{vn(x$+wae&oVE6!&mG?%;b>e8Kwn z`}l(05nJtoEr;YrwweW7+Qiz4&Z*34R9kV+mKn)fyO6cvGnk8QB6G3SHw$q-^`6cB zW)Aq%1zY-?O_91EvXH*8?A;yjuMQXNVJz%|XTxk;bmLxr z%~(UtG8rrtvj5YNG2}_;oGebw@(2 z-v}3WQsTUO`GxOnxw7SU`JQlo9piAJolZ#~H_a!z-YmadIaxWi{@si#TW2gJP~>2U z7fxHpC$D1(Cdw{7J#q5VGvlUv&Xm{pU)uk@6JclZxN$*znm9RCI(2g5*|4)zjpHV| zrq)ey6VHX6WwEh)a2U@t@qE}>uEw84+A<~eJ>0N``>t;nYze17HEz3$Q;W%$lB2dF z-c~eq24dBxF<25r@A7x zQg$9sjH}{pRqs{7*pfgfXUY=IspfO4BeoSVHk@d?d}8th9eTFqOl3rK*YLS(BDS^K zyHdpPz$DBQ%h`=9;$37BTh)Rsd&=@o%9WIeZTa2YLY$V?xtEOgTarUOB(`rURS=h? zN)CR2bmSPtCI(}C`BDTELl#0R!!`IyR@2bXaKl*$m*QnXf)nS=6&coOrn=xY(bZ@t zGj?R+cy=ttSr~B^lhpggOD~3O(pBly9obnBU|stw1eWTW-brD#i%ir_<-|EXJ4dbU zAPBLz9Q62T>j*OE{yO<&Gi~|QaXM{EiJe_%MtV6v$F?J)oydX*qV|A%f{7{5W0Z|a zq6VLo@1xk;CA6zXuBJI4ZXR=h<%sk8;_=#ut$;4vD1jku)LDT045Q8p9$E=5;;g<4 z<67JVFCOx~ zbj_Oj6f)psG;Oep+VyR`)3l4OjM<8s9%gFW1##$j{db!GNgBot>ou#DhD!w$mGan$ z#&KsT3+XPpwCjgvip}Ua0``^@2#p4) zcWK!yCe`+!&H#x5LA&4}U*OwYCb(C3lJ$AZr51b&X2I#2Fn@dhlb@aZpLR3bJ2X!a?uJgntWYUI=i3^cFR9ZZki#%4ACu7MxNwIlgrJ2BV zFG>wci~=2)LFp1XE*({h%ITj{h^a^{TsE4(Ws#RdUM{>U$!3w}og-w;oky%-Y9Hn< zP?}MAfkebhG^t3Ewj)-fwvg%OMw;45BnFnq&cEawPai)zVVo$zr=uo}uWllZM9I~C zb&jr%qP1iN?tv-)T`Q8pPnyS7A;xNm#m-dq$3*jcbr9(N#{+(S#)<< z#f))!FVT8XfGnFnGrfsyKRk<36pb2#nOOuF%%JfEGjsut-mWFuaE!8f(Vb@oKM?He z=Vn$FkH~EzXhPiW$PF19+D<1P~7O&@GQw(VV%_Nsc-XJ{LEKHqKbt*}{$EH%8 z$m2ATDybn)Br!n8X)>Qfa!jT>5J(3;j{3xsF4(supZK!{L)`Mdir%pF2TA2%(CE@DwKTc^bb8 z^nrLZnPEx*4N61=8kRcVhB|R;uX}eGHIC_a%C!vshFT5li%| z)GYQytXY2?i93jMvW}IZ1HG|-TnqKLQq~rjWoqmV^vuLe!vR?H$?q7)u9zl+hDwqMQQ8PqYuKI_GX8MiUyi=Ht4GqNeZ&0`5)&;Yg_r+)`+uBCodTQi(LteT}hct3A;V$3mU&-a)tbL;7UHNGMq*| z!6)f5%K41)=_7o`@|jFNqh`DTjiztCLiF8vrDA&Hm71Bdna=AKvm3A1%$3b`epqpH zN7I(^Y0JWC_~3C|C8+KHs^2ECuyS@2B7y{n0=}6m% zkY^lIGazm$YUxozq}Y#w>+Y2b;~--K*ceGcb??#GG0Qko&-I=s9d3I- z{saNH3&SZYu3=*!DYV6e8@kVOwe)rjjpY7}Jldv7U3++_|3c5uAhRr>JHGcGg^u4_ z+g$J6Q(JeCeMVq!V^fo;fo5jn^;A#;JW}klqZDMrz@+~2O=Y&T!(zo+VLxN8G%{sk zmfj%beG5$i@}@6*O>T>(Z{pK8h10i;+n6#cnpweTRzx#5@|hdMnVTcFEu>M!!Gtw@ z{+ij6_mBTH@lVTco%)ki{O;qSj+3EXP<8pBpo(NIe=i$)D@8LVEpsaL$ZM{9OwslH z|M>V%1Glz(a*^NB7CPZfkuOPeIg<8qBq?ID z4vzRsw(W_W>2XR-i&>N?x#%rz7HM1t$0@PHXjp0;{!v-mZIKb0A4xskQjwexuVWeaeeU??SLF~w@a_L?q3Tp2yPjB3#*z>K1Fl4e6^fkkPEVOeGOw8L&nWG9OkGO( zs7s;V)Ebwl%Y<_;)H68XzbCCr!Jeo~N2sJ*7i!Yh#m+Y>^eIzKkdTErToz!^+})gX!a zs+&U@`z3Z8W%ktE43-mg;mf0@l0%x-DN+K|RUyAufBUM`upE+eGk$o59IO8N64iYk zrRwm9x=?HL;h$~~9x+;wJn3Et#Kyz^l}jVpl>X?(tX}kLmD*o9#N_8vTbT@8=HIBq zzY$a2-PI!oPDmDsFCW?cDXnpdtNX`YjD3?>yAMCA%I2o=mYW3Z2=11njr$s#Yq4p! z96erpw4VD8MLU$}_QOJo@_@9kKD3SwlGxNT+6_PX)DYJ@Iy4yU?BCmR3>mpz7)vnL zP;lav{NNSr!$Sewutoy+H&F-o26+?YeVeMZ$z%pT1SIYJULkR)yPJ**2=fG%sR3|OL4zGcxf=4QZ#O!w>e&SU2@@Kv$f2y zXaN@=!5}8=!XYVzFq%r8s+~GRszE9}m$&7D2@T${Ntf+orW8l(f$j1C29tnNCCV6=ebrA#cKt}%8A)iB9Yrk@u{ttZYSkl4Gimlc3D>D* zs(c^5glE;2)#9`mjg34;Onr|tTT&eRmEzbqAszdrICZpesZzGKRxzyKBklP$``W3N3)U}Ftt-{gs!Rv!_ zzK>3Qcq;7L2mIi0O_aX+6bZ3vCTm0?RyeJK3F2OFzSJz)aRp}9UOhW&zB)ErH)p)D zcdqqD)6KG*ogY`+TL1CNh_i(-X~C6(w~EG7?q=uHwI|W+Dn1)uZ(1?#WcRLJ8k_P( zi&pda?^+#A*}$i42&ZfsH){~4bKaIAp~`}p&a1_<>u1m0*u*7LUYv-Qyp4g7|Nn`04M8;d1N zQaTKpTPP{`n)%$VsyU=~snB-M(BBrsE+tYDk7PpG<$&b`As-ROV|=LHQC3V!C_gl! zd4*9?vtdD580x{?k)!fax(;gdS-0c;W6Oi;?Fq|Epz=_c4w6Q!!ctJHL`RDr2hF}p z4wUkU9wo9K*_P<9oxM%cuaEk;L@V*;x7tbvYlm}*RwmP1AfS}yo@@Vgf6f0WvG3++ zAu<6H_b+Mvn>2@8Od?LAEf?`b?Ta4W;ev>tDiNh(1WUnU8G1{m*aYr9{BZlo+fN=t zHALid--jogt=W19yzB~I!P-4^)_Y$3iML@R3UD+_K_@XLA-^E30`3@Ch24f&%DYRa zkMZsmGd^6KFm6>NpXS}GW)pb#rnwB>y<^;Zx3v7-;I;Et&(B)tOgECk%eV2R+Z6Me zOx{*J|M(Q;mDd`tHqKf;NV<{4mv0S~Zo_r77e2gjYr`+L{(LK4abJpK;*v>@Smv_W zXBc5+{4n==?$u^oAK!7KgD>9|D&2jv{%4IJH--+kMGtrI_+Q__mp&7lv9w%$zHJjf zp{6ev`8~|oQb2gF`t>|l+7Q<@= z7VPP*hU>8V`A3umn4qmlFb=TOF5)Mi#81$OJCc?04T6~)Iwu&<#(xg*Edmmc0wha` zLkV13P#eG{C5_0{Xi5=J6f3T`B~{Q=9cfUk^Xd5RmC5eGrjtouhD!EK80V3~Iq94_ z^1Cj45-Qnq-;4w}!Qjpu*ZJO;g0kc#(0~T0>X!Pa@ z7sJvbW=wLGY>%z-E_d^FEM0WBaNQZ# z-D;s7!swD?RImEZ(-pD>M1Fg}9&kZNO+m{%$E zUGiRsCvU)JUoXe~bBd+K7-u{FuMvftkfaM+YP2Glzo&)uF{;7B0;?~srAL!?NI}ol-Pn#(Z3^QbwN>-B>X{AE6}$NryKk07 zYnA*-y<+cQ@}umK>vRu80bJf3UZC6c=!}gIHeR2km7pXf64#t9s&lfv}pbG znX8*->Sm4C_g>v4vu-z2KRkA$Ixlg{dxDt-aEGT#gju5#U_X36NFBI zSP&AOe745Ox+!ZKjl)_Mqu?O=QQ~7qGn8&=V*fjJjCMYBEH_V|BcnF3cKmY$o>b|? zwtFHB;+(2z2Rd0oY$S3M>J^kgOFEmgjLt}y%$1-TY{=92;rZ5H0f}Rn0-_)6)6CCS*z}LT4m>GQxxZwX(49l zk!^t2DWqUNrkPOA6ENd69muD}q2WAZwps^G`73BQutZKlG-nl`vuf57T~o{Be@-p- zt7Od*7_?e zr&S?W>s;!MV{=_Mo{iY{%xC2*lB=xMKu`|0lA5bhbZrWLhk8y-G)GJl_vhr%-p&zb z=jO?yCC)97M_rD;;V*;Y$h%6IaVz>vMg#@8+i!cab=!j?J3?8wr|*H=4lgUoTKFKx zY&~XtkYu#xKS;3P*=e#CKC~rR*RYnd!m3IHyLd}{4X%T8D8cqNAxVAUK1X$eIdCDs zqzSMYJjMAteYkm6x>s&+WB~WJtG6WBhA-gWx0*n|i1-?*?K{ZZN!}jv>fi~<^l6=% zlk^GP!JYs|Xj%LmkA<#^=MK>GyX2AdoFfvCD<l47u>gBvz3pD(GnZ7HJWFaMI=%<^xS T?{OG@;z-<+Y5qxu5&r)KKGl!` literal 0 HcmV?d00001 diff --git a/py_workspace/ogl_viewer/viewer.py b/py_workspace/ogl_viewer/viewer.py new file mode 100755 index 0000000..881de91 --- /dev/null +++ b/py_workspace/ogl_viewer/viewer.py @@ -0,0 +1,785 @@ +from OpenGL.GL import * +from OpenGL.GLUT import * +from OpenGL.GLU import * + +import ctypes +import sys +import math +from threading import Lock +import numpy as np +import array + +import pyzed.sl as sl + +M_PI = 3.1415926 + +VERTEX_SHADER = """ +# version 330 core +layout(location = 0) in vec3 in_Vertex; +layout(location = 1) in vec4 in_Color; +uniform mat4 u_mvpMatrix; +out vec4 b_color; +void main() { + b_color = in_Color; + gl_Position = u_mvpMatrix * vec4(in_Vertex, 1); +} +""" + +FRAGMENT_SHADER = """ +# version 330 core +in vec4 b_color; +layout(location = 0) out vec4 out_Color; +void main() { + out_Color = b_color; +} +""" + +POINTCLOUD_VERTEX_SHADER =""" +#version 330 core +layout(location = 0) in vec4 in_VertexRGBA; +uniform mat4 u_mvpMatrix; +out vec4 b_color; +void main() { + uint vertexColor = floatBitsToUint(in_VertexRGBA.w); + vec3 clr_int = vec3((vertexColor & uint(0x000000FF)), (vertexColor & uint(0x0000FF00)) >> 8, (vertexColor & uint(0x00FF0000)) >> 16); + b_color = vec4(clr_int.r / 255.0f, clr_int.g / 255.0f, clr_int.b / 255.0f, 1.f); + gl_Position = u_mvpMatrix * vec4(in_VertexRGBA.xyz, 1); +} +""" + +POINTCLOUD_FRAGMENT_SHADER = """ +#version 330 core +in vec4 b_color; +layout(location = 0) out vec4 out_Color; +void main() { + out_Color = b_color; +} +""" + + +try: + from cuda.bindings import runtime as cudart + import cupy as cp + + GPU_ACCELERATION_AVAILABLE = True + + def format_cudart_err(err): + return ( + f"{cudart.cudaGetErrorName(err)[1].decode('utf-8')}({int(err)}): " + f"{cudart.cudaGetErrorString(err)[1].decode('utf-8')}" + ) + + def check_cudart_err(args): + if isinstance(args, tuple): + assert len(args) >= 1 + err = args[0] + if len(args) == 1: + ret = None + elif len(args) == 2: + ret = args[1] + else: + ret = args[1:] + else: + err = args + ret = None + + assert isinstance(err, cudart.cudaError_t), type(err) + if err != cudart.cudaError_t.cudaSuccess: + raise RuntimeError(format_cudart_err(err)) + + return ret + + class CudaOpenGLMappedBuffer: + def __init__(self, gl_buffer, flags=0): + self._gl_buffer = int(gl_buffer) + self._flags = int(flags) + self._graphics_ressource = None + self._cuda_buffer = None + self.register() + + @property + def gl_buffer(self): + return self._gl_buffer + + @property + def cuda_buffer(self): + assert self.mapped + return self._cuda_buffer + + @property + def graphics_ressource(self): + assert self.registered + return self._graphics_ressource + + @property + def registered(self): + return self._graphics_ressource is not None + + @property + def mapped(self): + return self._cuda_buffer is not None + + def __enter__(self): + return self.map() + + def __exit__(self, exc_type, exc_value, trace): + self.unmap() + return False + + def __del__(self): + try: + self.unregister() + except: + # Ignore errors during cleanup (e.g., during Python shutdown) + pass + + def register(self): + if self.registered: + return self._graphics_ressource + self._graphics_ressource = check_cudart_err( + cudart.cudaGraphicsGLRegisterBuffer(self._gl_buffer, self._flags) + ) + return self._graphics_ressource + + def unregister(self): + if not self.registered: + return self + try: + self.unmap() + if cudart is not None: # Check if cudart is still available + check_cudart_err( + cudart.cudaGraphicsUnregisterResource(self._graphics_ressource) + ) + self._graphics_ressource = None + except Exception: + # Ignore errors during cleanup (e.g., during Python shutdown) + self._graphics_ressource = None + return self + + def map(self, stream=None): + if not self.registered: + raise RuntimeError("Cannot map an unregistered buffer.") + if self.mapped: + return self._cuda_buffer + + check_cudart_err( + cudart.cudaGraphicsMapResources(1, self._graphics_ressource, stream) + ) + + ptr, size = check_cudart_err( + cudart.cudaGraphicsResourceGetMappedPointer(self._graphics_ressource) + ) + + self._cuda_buffer = cp.cuda.MemoryPointer( + cp.cuda.UnownedMemory(ptr, size, self), 0 + ) + return self._cuda_buffer + + def unmap(self, stream=None): + if not self.registered: + raise RuntimeError("Cannot unmap an unregistered buffer.") + if not self.mapped: + return self + + try: + if cudart is not None: # Check if cudart is still available + check_cudart_err( + cudart.cudaGraphicsUnmapResources(1, self._graphics_ressource, stream) + ) + self._cuda_buffer = None + except Exception: + # Force cleanup even if unmap fails + self._cuda_buffer = None + return self + + class CudaOpenGLMappedArray(CudaOpenGLMappedBuffer): + def __init__(self, dtype, shape, gl_buffer, flags=0, strides=None, order='C'): + super().__init__(gl_buffer, flags) + self._dtype = dtype + self._shape = shape + self._strides = strides + self._order = order + + @property + def cuda_array(self): + assert self.mapped + return cp.ndarray( + shape=self._shape, + dtype=self._dtype, + strides=self._strides, + order=self._order, + memptr=self._cuda_buffer, + ) + + def map(self, *args, **kwargs): + super().map(*args, **kwargs) + return self.cuda_array + +except ImportError: + GPU_ACCELERATION_AVAILABLE = False + + +class Shader: + def __init__(self, _vs, _fs): + self.program_id = glCreateProgram() + vertex_id = self.compile(GL_VERTEX_SHADER, _vs) + fragment_id = self.compile(GL_FRAGMENT_SHADER, _fs) + + glAttachShader(self.program_id, vertex_id) + glAttachShader(self.program_id, fragment_id) + glBindAttribLocation( self.program_id, 0, "in_vertex") + glBindAttribLocation( self.program_id, 1, "in_texCoord") + glLinkProgram(self.program_id) + + if glGetProgramiv(self.program_id, GL_LINK_STATUS) != GL_TRUE: + info = glGetProgramInfoLog(self.program_id) + if (self.program_id is not None) and (self.program_id > 0) and glIsProgram(self.program_id): + glDeleteProgram(self.program_id) + if (vertex_id is not None) and (vertex_id > 0) and glIsShader(vertex_id): + glDeleteShader(vertex_id) + if (fragment_id is not None) and (fragment_id > 0) and glIsShader(fragment_id): + glDeleteShader(fragment_id) + raise RuntimeError('Error linking program: %s' % (info)) + if (vertex_id is not None) and (vertex_id > 0) and glIsShader(vertex_id): + glDeleteShader(vertex_id) + if (fragment_id is not None) and (fragment_id > 0) and glIsShader(fragment_id): + glDeleteShader(fragment_id) + + def compile(self, _type, _src): + shader_id = None + try: + shader_id = glCreateShader(_type) + if shader_id == 0: + print("ERROR: shader type {0} does not exist".format(_type)) + exit() + + glShaderSource(shader_id, _src) + glCompileShader(shader_id) + if glGetShaderiv(shader_id, GL_COMPILE_STATUS) != GL_TRUE: + info = glGetShaderInfoLog(shader_id) + if (shader_id is not None) and (shader_id > 0) and glIsShader(shader_id): + glDeleteShader(shader_id) + raise RuntimeError('Shader compilation failed: %s' % (info)) + return shader_id + except: + if (shader_id is not None) and (shader_id > 0) and glIsShader(shader_id): + glDeleteShader(shader_id) + raise + + def get_program_id(self): + return self.program_id + +class Simple3DObject: + def __init__(self, _is_static, pts_size = 3, clr_size = 3): + self.is_init = False + self.drawing_type = GL_TRIANGLES + self.is_static = _is_static + self.clear() + self.pt_type = pts_size + self.clr_type = clr_size + self.data = sl.Mat() + self.cuda_mapped_buffer = None + self.use_gpu = GPU_ACCELERATION_AVAILABLE and not _is_static + + def add_pt(self, _pts): # _pts [x,y,z] + for pt in _pts: + self.vertices.append(pt) + + def add_clr(self, _clrs): # _clr [r,g,b] + for clr in _clrs: + self.colors.append(clr) + + def add_point_clr(self, _pt, _clr): + self.add_pt(_pt) + self.add_clr(_clr) + self.indices.append(len(self.indices)) + + def add_line(self, _p1, _p2, _clr): + self.add_point_clr(_p1, _clr) + self.add_point_clr(_p2, _clr) + + def addFace(self, p1, p2, p3, clr): + self.add_point_clr(p1, clr) + self.add_point_clr(p2, clr) + self.add_point_clr(p3, clr) + + def push_to_GPU(self): + if not self.is_init: + self.vboID = glGenBuffers(3) + self.is_init = True + + if self.is_static: + type_draw = GL_STATIC_DRAW + else: + type_draw = GL_DYNAMIC_DRAW + + if len(self.vertices): + glBindBuffer(GL_ARRAY_BUFFER, self.vboID[0]) + glBufferData(GL_ARRAY_BUFFER, len(self.vertices) * self.vertices.itemsize, (GLfloat * len(self.vertices))(*self.vertices), type_draw) + + if len(self.colors): + glBindBuffer(GL_ARRAY_BUFFER, self.vboID[1]) + glBufferData(GL_ARRAY_BUFFER, len(self.colors) * self.colors.itemsize, (GLfloat * len(self.colors))(*self.colors), type_draw) + + if len(self.indices): + glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, self.vboID[2]) + glBufferData(GL_ELEMENT_ARRAY_BUFFER,len(self.indices) * self.indices.itemsize,(GLuint * len(self.indices))(*self.indices), type_draw) + + self.elementbufferSize = len(self.indices) + + def init(self, res): + if not self.is_init: + self.vboID = glGenBuffers(3) + self.is_init = True + + if self.is_static: + type_draw = GL_STATIC_DRAW + else: + type_draw = GL_DYNAMIC_DRAW + + self.elementbufferSize = res.width * res.height + + # Initialize vertex buffer (for XYZRGBA data) + glBindBuffer(GL_ARRAY_BUFFER, self.vboID[0]) + glBufferData(GL_ARRAY_BUFFER, self.elementbufferSize * self.pt_type * self.vertices.itemsize, None, type_draw) + + # Try to set up GPU acceleration if available + if self.use_gpu: + try: + flags = cudart.cudaGraphicsRegisterFlags.cudaGraphicsRegisterFlagsWriteDiscard + self.cuda_mapped_buffer = CudaOpenGLMappedArray( + dtype=np.float32, + shape=(self.elementbufferSize, self.pt_type), + gl_buffer=self.vboID[0], + flags=flags + ) + except Exception as e: + print(f"Failed to initialize GPU acceleration, falling back to CPU: {e}") + self.use_gpu = False + self.cuda_mapped_buffer = None + + # Initialize color buffer (not used for point clouds with XYZRGBA) + if self.clr_type: + glBindBuffer(GL_ARRAY_BUFFER, self.vboID[1]) + glBufferData(GL_ARRAY_BUFFER, self.elementbufferSize * self.clr_type * self.colors.itemsize, None, type_draw) + + for i in range (0, self.elementbufferSize): + self.indices.append(i) + + glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, self.vboID[2]) + glBufferData(GL_ELEMENT_ARRAY_BUFFER,len(self.indices) * self.indices.itemsize,(GLuint * len(self.indices))(*self.indices), type_draw) + + def setPoints(self, pc): + """Update point cloud data from sl.Mat""" + if not pc.is_init(): + return + + try: + if self.use_gpu and self.cuda_mapped_buffer and pc.get_memory_type() in (sl.MEM.GPU, sl.MEM.BOTH): + self.setPointsGPU(pc) + else: + self.setPointsCPU(pc) + except Exception as e: + print(f"Error setting points: {e}") + # Fallback to CPU if GPU fails + if self.use_gpu: + print("Falling back to CPU processing") + self.use_gpu = False + self.setPointsCPU(pc) + + def setPointsGPU(self, pc): + """Set points using GPU acceleration with CUDA-OpenGL interop""" + try: + # Get point cloud data from GPU memory + cupy_arr = pc.get_data(sl.MEM.GPU) + + # Map OpenGL buffer to CUDA memory + with self.cuda_mapped_buffer as cuda_array: + # Reshape point cloud data to match buffer format + if cupy_arr.ndim == 3: # (height, width, channels) + pc_flat = cupy_arr.reshape(-1, cupy_arr.shape[-1]) + else: + pc_flat = cupy_arr + + # Copy data to GPU buffer (optimized GPU-to-GPU copy with continuous memory) + points_to_copy = min(pc_flat.shape[0], cuda_array.shape[0]) + cuda_array[:points_to_copy] = pc_flat[:points_to_copy] + + # Zero out remaining buffer if needed + if points_to_copy < cuda_array.shape[0]: + cuda_array[points_to_copy:] = 0 + + except Exception as e: + print(f"GPU point cloud update failed: {e}") + raise + + def setPointsCPU(self, pc): + """Fallback CPU method for setting points""" + try: + # Ensure data is available on CPU + if pc.get_memory_type() == sl.MEM.GPU: + pc.update_cpu_from_gpu() + + # Get CPU pointer and upload to GPU buffer + glBindBuffer(GL_ARRAY_BUFFER, self.vboID[0]) + data_ptr = pc.get_pointer(sl.MEM.CPU) + buffer_size = self.elementbufferSize * self.pt_type * 4 # 4 bytes per float32 + glBufferSubData(GL_ARRAY_BUFFER, 0, buffer_size, ctypes.c_void_p(data_ptr)) + glBindBuffer(GL_ARRAY_BUFFER, 0) + + except Exception as e: + print(f"CPU point cloud update failed: {e}") + raise + + def clear(self): + self.vertices = array.array('f') + self.colors = array.array('f') + self.indices = array.array('I') + self.elementbufferSize = 0 + + def set_drawing_type(self, _type): + self.drawing_type = _type + + def draw(self): + if self.elementbufferSize: + glEnableVertexAttribArray(0) + glBindBuffer(GL_ARRAY_BUFFER, self.vboID[0]) + glVertexAttribPointer(0,self.pt_type,GL_FLOAT,GL_FALSE,0,None) + + if(self.clr_type): + glEnableVertexAttribArray(1) + glBindBuffer(GL_ARRAY_BUFFER, self.vboID[1]) + glVertexAttribPointer(1,self.clr_type,GL_FLOAT,GL_FALSE,0,None) + + glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, self.vboID[2]) + glDrawElements(self.drawing_type, self.elementbufferSize, GL_UNSIGNED_INT, None) + + glDisableVertexAttribArray(0) + if self.clr_type: + glDisableVertexAttribArray(1) + + def __del__(self): + """Cleanup GPU resources""" + if hasattr(self, 'cuda_mapped_buffer') and self.cuda_mapped_buffer: + try: + self.cuda_mapped_buffer.unregister() + except: + pass + +class GLViewer: + def __init__(self): + self.available = False + self.mutex = Lock() + self.camera = CameraGL() + self.wheelPosition = 0. + self.mouse_button = [False, False] + self.mouseCurrentPosition = [0., 0.] + self.previousMouseMotion = [0., 0.] + self.mouseMotion = [0., 0.] + self.zedModel = Simple3DObject(True) + self.point_cloud = Simple3DObject(False, 4) + self.save_data = False + + def init(self, _argc, _argv, res): # _params = sl.CameraParameters + glutInit(_argc, _argv) + wnd_w = int(glutGet(GLUT_SCREEN_WIDTH)*0.9) + wnd_h = int(glutGet(GLUT_SCREEN_HEIGHT) *0.9) + glutInitWindowSize(wnd_w, wnd_h) + glutInitWindowPosition(int(wnd_w*0.05), int(wnd_h*0.05)) + + glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGBA | GLUT_DEPTH) + glutCreateWindow(b"ZED Depth Sensing") + glViewport(0, 0, wnd_w, wnd_h) + + glutSetOption(GLUT_ACTION_ON_WINDOW_CLOSE, + GLUT_ACTION_CONTINUE_EXECUTION) + + glEnable(GL_DEPTH_TEST) + + glEnable(GL_BLEND) + glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA) + + glEnable(GL_LINE_SMOOTH) + glHint(GL_LINE_SMOOTH_HINT, GL_NICEST) + + # Compile and create the shader for 3D objects + self.shader_image = Shader(VERTEX_SHADER, FRAGMENT_SHADER) + self.shader_image_MVP = glGetUniformLocation(self.shader_image.get_program_id(), "u_mvpMatrix") + + self.shader_pc = Shader(POINTCLOUD_VERTEX_SHADER, POINTCLOUD_FRAGMENT_SHADER) + self.shader_pc_MVP = glGetUniformLocation(self.shader_pc.get_program_id(), "u_mvpMatrix") + + self.bckgrnd_clr = np.array([223/255., 230/255., 233/255.]) + + # Create the camera model + Z_ = -0.15 + Y_ = Z_ * math.tan(95. * M_PI / 180. / 2.) + X_ = Y_ * 16./9. + + A = np.array([0, 0, 0]) + B = np.array([X_, Y_, Z_]) + C = np.array([-X_, Y_, Z_]) + D = np.array([-X_, -Y_, Z_]) + E = np.array([X_, -Y_, Z_]) + + lime_clr = np.array([217 / 255, 255/255, 66/255]) + + self.zedModel.add_line(A, B, lime_clr) + self.zedModel.add_line(A, C, lime_clr) + self.zedModel.add_line(A, D, lime_clr) + self.zedModel.add_line(A, E, lime_clr) + + self.zedModel.add_line(B, C, lime_clr) + self.zedModel.add_line(C, D, lime_clr) + self.zedModel.add_line(D, E, lime_clr) + self.zedModel.add_line(E, B, lime_clr) + + self.zedModel.set_drawing_type(GL_LINES) + self.zedModel.push_to_GPU() + + self.point_cloud.init(res) + self.point_cloud.set_drawing_type(GL_POINTS) + + # Register GLUT callback functions + glutDisplayFunc(self.draw_callback) + glutIdleFunc(self.idle) + glutKeyboardFunc(self.keyPressedCallback) + glutCloseFunc(self.close_func) + glutMouseFunc(self.on_mouse) + glutMotionFunc(self.on_mousemove) + glutReshapeFunc(self.on_resize) + + self.available = True + + def is_available(self): + if self.available: + glutMainLoopEvent() + return self.available + + def updateData(self, pc): + self.mutex.acquire() + try: + self.point_cloud.setPoints(pc) + finally: + self.mutex.release() + + def idle(self): + if self.available: + glutPostRedisplay() + + def exit(self): + if self.available: + self.available = False + + def close_func(self): + if self.available: + self.available = False + + def keyPressedCallback(self, key, x, y): + if ord(key) == 27: + self.close_func() + if (ord(key) == 83 or ord(key) == 115): + self.save_data = True + + + def on_mouse(self,*args,**kwargs): + (key,Up,x,y) = args + if key==0: + self.mouse_button[0] = (Up == 0) + elif key==2 : + self.mouse_button[1] = (Up == 0) + elif(key == 3): + self.wheelPosition = self.wheelPosition + 1 + elif(key == 4): + self.wheelPosition = self.wheelPosition - 1 + + self.mouseCurrentPosition = [x, y] + self.previousMouseMotion = [x, y] + + def on_mousemove(self,*args,**kwargs): + (x,y) = args + self.mouseMotion[0] = x - self.previousMouseMotion[0] + self.mouseMotion[1] = y - self.previousMouseMotion[1] + self.previousMouseMotion = [x, y] + glutPostRedisplay() + + def on_resize(self,Width,Height): + glViewport(0, 0, Width, Height) + self.camera.setProjection(Height / Width) + + def draw_callback(self): + if self.available: + glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT) + glClearColor(self.bckgrnd_clr[0], self.bckgrnd_clr[1], self.bckgrnd_clr[2], 1.) + + self.mutex.acquire() + self.update() + self.draw() + self.mutex.release() + + glutSwapBuffers() + glutPostRedisplay() + + def update(self): + if(self.mouse_button[0]): + r = sl.Rotation() + vert=self.camera.vertical_ + tmp = vert.get() + vert.init_vector(tmp[0] * 1.,tmp[1] * 1., tmp[2] * 1.) + r.init_angle_translation(self.mouseMotion[0] * 0.02, vert) + self.camera.rotate(r) + + r.init_angle_translation(self.mouseMotion[1] * 0.02, self.camera.right_) + self.camera.rotate(r) + + if(self.mouse_button[1]): + t = sl.Translation() + tmp = self.camera.right_.get() + scale = self.mouseMotion[0] *-0.05 + t.init_vector(tmp[0] * scale, tmp[1] * scale, tmp[2] * scale) + self.camera.translate(t) + + tmp = self.camera.up_.get() + scale = self.mouseMotion[1] * 0.05 + t.init_vector(tmp[0] * scale, tmp[1] * scale, tmp[2] * scale) + self.camera.translate(t) + + if (self.wheelPosition != 0): + t = sl.Translation() + tmp = self.camera.forward_.get() + scale = self.wheelPosition * -0.065 + t.init_vector(tmp[0] * scale, tmp[1] * scale, tmp[2] * scale) + self.camera.translate(t) + + self.camera.update() + + self.mouseMotion = [0., 0.] + self.wheelPosition = 0 + + def draw(self): + vpMatrix = self.camera.getViewProjectionMatrix() + glUseProgram(self.shader_image.get_program_id()) + glUniformMatrix4fv(self.shader_image_MVP, 1, GL_TRUE, (GLfloat * len(vpMatrix))(*vpMatrix)) + glPolygonMode(GL_FRONT_AND_BACK, GL_FILL) + self.zedModel.draw() + glUseProgram(0) + + glUseProgram(self.shader_pc.get_program_id()) + glUniformMatrix4fv(self.shader_pc_MVP, 1, GL_TRUE, (GLfloat * len(vpMatrix))(*vpMatrix)) + glPointSize(1.) + self.point_cloud.draw() + glUseProgram(0) + +class CameraGL: + def __init__(self): + self.ORIGINAL_FORWARD = sl.Translation() + self.ORIGINAL_FORWARD.init_vector(0,0,1) + self.ORIGINAL_UP = sl.Translation() + self.ORIGINAL_UP.init_vector(0,1,0) + self.ORIGINAL_RIGHT = sl.Translation() + self.ORIGINAL_RIGHT.init_vector(1,0,0) + self.znear = 0.5 + self.zfar = 100. + self.horizontalFOV = 70. + self.orientation_ = sl.Orientation() + self.position_ = sl.Translation() + self.forward_ = sl.Translation() + self.up_ = sl.Translation() + self.right_ = sl.Translation() + self.vertical_ = sl.Translation() + self.vpMatrix_ = sl.Matrix4f() + self.offset_ = sl.Translation() + self.offset_.init_vector(0,0,5) + self.projection_ = sl.Matrix4f() + self.projection_.set_identity() + self.setProjection(1.78) + + self.position_.init_vector(0., 0., 0.) + tmp = sl.Translation() + tmp.init_vector(0, 0, -.1) + tmp2 = sl.Translation() + tmp2.init_vector(0, 1, 0) + self.setDirection(tmp, tmp2) + + def update(self): + dot_ = sl.Translation.dot_translation(self.vertical_, self.up_) + if(dot_ < 0.): + tmp = self.vertical_.get() + self.vertical_.init_vector(tmp[0] * -1.,tmp[1] * -1., tmp[2] * -1.) + transformation = sl.Transform() + + tmp_position = self.position_.get() + tmp = (self.offset_ * self.orientation_).get() + new_position = sl.Translation() + new_position.init_vector(tmp_position[0] + tmp[0], tmp_position[1] + tmp[1], tmp_position[2] + tmp[2]) + transformation.init_orientation_translation(self.orientation_, new_position) + transformation.inverse() + self.vpMatrix_ = self.projection_ * transformation + + def setProjection(self, im_ratio): + fov_x = self.horizontalFOV * 3.1416 / 180. + fov_y = self.horizontalFOV * im_ratio * 3.1416 / 180. + + self.projection_[(0,0)] = 1. / math.tan(fov_x * .5) + self.projection_[(1,1)] = 1. / math.tan(fov_y * .5) + self.projection_[(2,2)] = -(self.zfar + self.znear) / (self.zfar - self.znear) + self.projection_[(3,2)] = -1. + self.projection_[(2,3)] = -(2. * self.zfar * self.znear) / (self.zfar - self.znear) + self.projection_[(3,3)] = 0. + + def getViewProjectionMatrix(self): + tmp = self.vpMatrix_.m + vpMat = array.array('f') + for row in tmp: + for v in row: + vpMat.append(v) + return vpMat + + def getViewProjectionMatrixRT(self, tr): + tmp = self.vpMatrix_ + tmp.transpose() + tr.transpose() + tmp = (tr * tmp).m + vpMat = array.array('f') + for row in tmp: + for v in row: + vpMat.append(v) + return vpMat + + def setDirection(self, dir, vert): + dir.normalize() + tmp = dir.get() + dir.init_vector(tmp[0] * -1.,tmp[1] * -1., tmp[2] * -1.) + self.orientation_.init_translation(self.ORIGINAL_FORWARD, dir) + self.updateVectors() + self.vertical_ = vert + if(sl.Translation.dot_translation(self.vertical_, self.up_) < 0.): + tmp = sl.Rotation() + tmp.init_angle_translation(3.14, self.ORIGINAL_FORWARD) + self.rotate(tmp) + + def translate(self, t): + ref = self.position_.get() + tmp = t.get() + self.position_.init_vector(ref[0] + tmp[0], ref[1] + tmp[1], ref[2] + tmp[2]) + + def setPosition(self, p): + self.position_ = p + + def rotate(self, r): + tmp = sl.Orientation() + tmp.init_rotation(r) + self.orientation_ = tmp * self.orientation_ + self.updateVectors() + + def setRotation(self, r): + self.orientation_.init_rotation(r) + self.updateVectors() + + def updateVectors(self): + self.forward_ = self.ORIGINAL_FORWARD * self.orientation_ + self.up_ = self.ORIGINAL_UP * self.orientation_ + right = self.ORIGINAL_RIGHT + tmp = right.get() + right.init_vector(tmp[0] * -1.,tmp[1] * -1., tmp[2] * -1.) + self.right_ = right * self.orientation_ diff --git a/py_workspace/pyproject.toml b/py_workspace/pyproject.toml new file mode 100644 index 0000000..5c054b6 --- /dev/null +++ b/py_workspace/pyproject.toml @@ -0,0 +1,19 @@ +[project] +name = "depth-sensing" +version = "0.1.0" +description = "depth sensing with ZED cameras" +readme = "README.md" +requires-python = ">=3.12" +dependencies = [ + "pyzed", + "cupy-cuda12x", + "cuda-python", + "numpy", + "pyopengl>=3.1.10", + "pyopengl-accelerate>=3.1.10", + "jupyterlab>=4.5.3", +] + +[tool.uv.sources] +pyzed = { path = "libs/pyzed_pkg" } + diff --git a/py_workspace/uv.lock b/py_workspace/uv.lock new file mode 100644 index 0000000..35b3ead --- /dev/null +++ b/py_workspace/uv.lock @@ -0,0 +1,1659 @@ +version = 1 +revision = 3 +requires-python = ">=3.12" +resolution-markers = [ + "python_full_version >= '3.14'", + "python_full_version < '3.14'", +] + +[[package]] +name = "anyio" +version = "4.12.1" +source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple/" } +dependencies = [ + { name = "idna" }, + { name = "typing-extensions", marker = "python_full_version < '3.13'" }, +] +sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/96/f0/5eb65b2bb0d09ac6776f2eb54adee6abe8228ea05b20a5ad0e4945de8aac/anyio-4.12.1.tar.gz", hash = "sha256:41cfcc3a4c85d3f05c932da7c26d0201ac36f72abd4435ba90d0464a3ffed703", size = 228685, upload-time = "2026-01-06T11:45:21.246Z" } +wheels = [ + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/38/0e/27be9fdef66e72d64c0cdc3cc2823101b80585f8119b5c112c2e8f5f7dab/anyio-4.12.1-py3-none-any.whl", hash = "sha256:d405828884fc140aa80a3c667b8beed277f1dfedec42ba031bd6ac3db606ab6c", size = 113592, upload-time = "2026-01-06T11:45:19.497Z" }, +] + +[[package]] +name = "appnope" +version = "0.1.4" +source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple/" } +sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/35/5d/752690df9ef5b76e169e68d6a129fa6d08a7100ca7f754c89495db3c6019/appnope-0.1.4.tar.gz", hash = "sha256:1de3860566df9caf38f01f86f65e0e13e379af54f9e4bee1e66b48f2efffd1ee", size = 4170, upload-time = "2024-02-06T09:43:11.258Z" } +wheels = [ + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/81/29/5ecc3a15d5a33e31b26c11426c45c501e439cb865d0bff96315d86443b78/appnope-0.1.4-py2.py3-none-any.whl", hash = "sha256:502575ee11cd7a28c0205f379b525beefebab9d161b7c964670864014ed7213c", size = 4321, upload-time = "2024-02-06T09:43:09.663Z" }, +] + +[[package]] +name = "argon2-cffi" +version = "25.1.0" +source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple/" } +dependencies = [ + { name = "argon2-cffi-bindings" }, +] +sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/0e/89/ce5af8a7d472a67cc819d5d998aa8c82c5d860608c4db9f46f1162d7dab9/argon2_cffi-25.1.0.tar.gz", hash = "sha256:694ae5cc8a42f4c4e2bf2ca0e64e51e23a040c6a517a85074683d3959e1346c1", size = 45706, upload-time = "2025-06-03T06:55:32.073Z" } +wheels = [ + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/4f/d3/a8b22fa575b297cd6e3e3b0155c7e25db170edf1c74783d6a31a2490b8d9/argon2_cffi-25.1.0-py3-none-any.whl", hash = "sha256:fdc8b074db390fccb6eb4a3604ae7231f219aa669a2652e0f20e16ba513d5741", size = 14657, upload-time = "2025-06-03T06:55:30.804Z" }, +] + +[[package]] +name = "argon2-cffi-bindings" +version = "25.1.0" +source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple/" } +dependencies = [ + { name = "cffi" }, +] +sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/5c/2d/db8af0df73c1cf454f71b2bbe5e356b8c1f8041c979f505b3d3186e520a9/argon2_cffi_bindings-25.1.0.tar.gz", hash = "sha256:b957f3e6ea4d55d820e40ff76f450952807013d361a65d7f28acc0acbf29229d", size = 1783441, upload-time = "2025-07-30T10:02:05.147Z" } +wheels = [ + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/60/97/3c0a35f46e52108d4707c44b95cfe2afcafc50800b5450c197454569b776/argon2_cffi_bindings-25.1.0-cp314-cp314t-macosx_10_13_universal2.whl", hash = "sha256:3d3f05610594151994ca9ccb3c771115bdb4daef161976a266f0dd8aa9996b8f", size = 54393, upload-time = "2025-07-30T10:01:40.97Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/9d/f4/98bbd6ee89febd4f212696f13c03ca302b8552e7dbf9c8efa11ea4a388c3/argon2_cffi_bindings-25.1.0-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:8b8efee945193e667a396cbc7b4fb7d357297d6234d30a489905d96caabde56b", size = 29328, upload-time = "2025-07-30T10:01:41.916Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/43/24/90a01c0ef12ac91a6be05969f29944643bc1e5e461155ae6559befa8f00b/argon2_cffi_bindings-25.1.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:3c6702abc36bf3ccba3f802b799505def420a1b7039862014a65db3205967f5a", size = 31269, upload-time = "2025-07-30T10:01:42.716Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/d4/d3/942aa10782b2697eee7af5e12eeff5ebb325ccfb86dd8abda54174e377e4/argon2_cffi_bindings-25.1.0-cp314-cp314t-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a1c70058c6ab1e352304ac7e3b52554daadacd8d453c1752e547c76e9c99ac44", size = 86558, upload-time = "2025-07-30T10:01:43.943Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/0d/82/b484f702fec5536e71836fc2dbc8c5267b3f6e78d2d539b4eaa6f0db8bf8/argon2_cffi_bindings-25.1.0-cp314-cp314t-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:e2fd3bfbff3c5d74fef31a722f729bf93500910db650c925c2d6ef879a7e51cb", size = 92364, upload-time = "2025-07-30T10:01:44.887Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/c9/c1/a606ff83b3f1735f3759ad0f2cd9e038a0ad11a3de3b6c673aa41c24bb7b/argon2_cffi_bindings-25.1.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:c4f9665de60b1b0e99bcd6be4f17d90339698ce954cfd8d9cf4f91c995165a92", size = 85637, upload-time = "2025-07-30T10:01:46.225Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/44/b4/678503f12aceb0262f84fa201f6027ed77d71c5019ae03b399b97caa2f19/argon2_cffi_bindings-25.1.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:ba92837e4a9aa6a508c8d2d7883ed5a8f6c308c89a4790e1e447a220deb79a85", size = 91934, upload-time = "2025-07-30T10:01:47.203Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/f0/c7/f36bd08ef9bd9f0a9cff9428406651f5937ce27b6c5b07b92d41f91ae541/argon2_cffi_bindings-25.1.0-cp314-cp314t-win32.whl", hash = "sha256:84a461d4d84ae1295871329b346a97f68eade8c53b6ed9a7ca2d7467f3c8ff6f", size = 28158, upload-time = "2025-07-30T10:01:48.341Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/b3/80/0106a7448abb24a2c467bf7d527fe5413b7fdfa4ad6d6a96a43a62ef3988/argon2_cffi_bindings-25.1.0-cp314-cp314t-win_amd64.whl", hash = "sha256:b55aec3565b65f56455eebc9b9f34130440404f27fe21c3b375bf1ea4d8fbae6", size = 32597, upload-time = "2025-07-30T10:01:49.112Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/05/b8/d663c9caea07e9180b2cb662772865230715cbd573ba3b5e81793d580316/argon2_cffi_bindings-25.1.0-cp314-cp314t-win_arm64.whl", hash = "sha256:87c33a52407e4c41f3b70a9c2d3f6056d88b10dad7695be708c5021673f55623", size = 28231, upload-time = "2025-07-30T10:01:49.92Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/1d/57/96b8b9f93166147826da5f90376e784a10582dd39a393c99bb62cfcf52f0/argon2_cffi_bindings-25.1.0-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:aecba1723ae35330a008418a91ea6cfcedf6d31e5fbaa056a166462ff066d500", size = 54121, upload-time = "2025-07-30T10:01:50.815Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/0a/08/a9bebdb2e0e602dde230bdde8021b29f71f7841bd54801bcfd514acb5dcf/argon2_cffi_bindings-25.1.0-cp39-abi3-macosx_10_9_x86_64.whl", hash = "sha256:2630b6240b495dfab90aebe159ff784d08ea999aa4b0d17efa734055a07d2f44", size = 29177, upload-time = "2025-07-30T10:01:51.681Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/b6/02/d297943bcacf05e4f2a94ab6f462831dc20158614e5d067c35d4e63b9acb/argon2_cffi_bindings-25.1.0-cp39-abi3-macosx_11_0_arm64.whl", hash = "sha256:7aef0c91e2c0fbca6fc68e7555aa60ef7008a739cbe045541e438373bc54d2b0", size = 31090, upload-time = "2025-07-30T10:01:53.184Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/c1/93/44365f3d75053e53893ec6d733e4a5e3147502663554b4d864587c7828a7/argon2_cffi_bindings-25.1.0-cp39-abi3-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1e021e87faa76ae0d413b619fe2b65ab9a037f24c60a1e6cc43457ae20de6dc6", size = 81246, upload-time = "2025-07-30T10:01:54.145Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/09/52/94108adfdd6e2ddf58be64f959a0b9c7d4ef2fa71086c38356d22dc501ea/argon2_cffi_bindings-25.1.0-cp39-abi3-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d3e924cfc503018a714f94a49a149fdc0b644eaead5d1f089330399134fa028a", size = 87126, upload-time = "2025-07-30T10:01:55.074Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/72/70/7a2993a12b0ffa2a9271259b79cc616e2389ed1a4d93842fac5a1f923ffd/argon2_cffi_bindings-25.1.0-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:c87b72589133f0346a1cb8d5ecca4b933e3c9b64656c9d175270a000e73b288d", size = 80343, upload-time = "2025-07-30T10:01:56.007Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/78/9a/4e5157d893ffc712b74dbd868c7f62365618266982b64accab26bab01edc/argon2_cffi_bindings-25.1.0-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:1db89609c06afa1a214a69a462ea741cf735b29a57530478c06eb81dd403de99", size = 86777, upload-time = "2025-07-30T10:01:56.943Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/74/cd/15777dfde1c29d96de7f18edf4cc94c385646852e7c7b0320aa91ccca583/argon2_cffi_bindings-25.1.0-cp39-abi3-win32.whl", hash = "sha256:473bcb5f82924b1becbb637b63303ec8d10e84c8d241119419897a26116515d2", size = 27180, upload-time = "2025-07-30T10:01:57.759Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/e2/c6/a759ece8f1829d1f162261226fbfd2c6832b3ff7657384045286d2afa384/argon2_cffi_bindings-25.1.0-cp39-abi3-win_amd64.whl", hash = "sha256:a98cd7d17e9f7ce244c0803cad3c23a7d379c301ba618a5fa76a67d116618b98", size = 31715, upload-time = "2025-07-30T10:01:58.56Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/42/b9/f8d6fa329ab25128b7e98fd83a3cb34d9db5b059a9847eddb840a0af45dd/argon2_cffi_bindings-25.1.0-cp39-abi3-win_arm64.whl", hash = "sha256:b0fdbcf513833809c882823f98dc2f931cf659d9a1429616ac3adebb49f5db94", size = 27149, upload-time = "2025-07-30T10:01:59.329Z" }, +] + +[[package]] +name = "arrow" +version = "1.4.0" +source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple/" } +dependencies = [ + { name = "python-dateutil" }, + { name = "tzdata" }, +] +sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/b9/33/032cdc44182491aa708d06a68b62434140d8c50820a087fac7af37703357/arrow-1.4.0.tar.gz", hash = "sha256:ed0cc050e98001b8779e84d461b0098c4ac597e88704a655582b21d116e526d7", size = 152931, upload-time = "2025-10-18T17:46:46.761Z" } +wheels = [ + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/ed/c9/d7977eaacb9df673210491da99e6a247e93df98c715fc43fd136ce1d3d33/arrow-1.4.0-py3-none-any.whl", hash = "sha256:749f0769958ebdc79c173ff0b0670d59051a535fa26e8eba02953dc19eb43205", size = 68797, upload-time = "2025-10-18T17:46:45.663Z" }, +] + +[[package]] +name = "asttokens" +version = "3.0.1" +source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple/" } +sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/be/a5/8e3f9b6771b0b408517c82d97aed8f2036509bc247d46114925e32fe33f0/asttokens-3.0.1.tar.gz", hash = "sha256:71a4ee5de0bde6a31d64f6b13f2293ac190344478f081c3d1bccfcf5eacb0cb7", size = 62308, upload-time = "2025-11-15T16:43:48.578Z" } +wheels = [ + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/d2/39/e7eaf1799466a4aef85b6a4fe7bd175ad2b1c6345066aa33f1f58d4b18d0/asttokens-3.0.1-py3-none-any.whl", hash = "sha256:15a3ebc0f43c2d0a50eeafea25e19046c68398e487b9f1f5b517f7c0f40f976a", size = 27047, upload-time = "2025-11-15T16:43:16.109Z" }, +] + +[[package]] +name = "async-lru" +version = "2.1.0" +source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple/" } +sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/ef/c3/bbf34f15ea88dfb649ab2c40f9d75081784a50573a9ea431563cab64adb8/async_lru-2.1.0.tar.gz", hash = "sha256:9eeb2fecd3fe42cc8a787fc32ead53a3a7158cc43d039c3c55ab3e4e5b2a80ed", size = 12041, upload-time = "2026-01-17T22:52:18.931Z" } +wheels = [ + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/2e/e9/eb6a5db5ac505d5d45715388e92bced7a5bb556facc4d0865d192823f2d2/async_lru-2.1.0-py3-none-any.whl", hash = "sha256:fa12dcf99a42ac1280bc16c634bbaf06883809790f6304d85cdab3f666f33a7e", size = 6933, upload-time = "2026-01-17T22:52:17.389Z" }, +] + +[[package]] +name = "attrs" +version = "25.4.0" +source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple/" } +sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/6b/5c/685e6633917e101e5dcb62b9dd76946cbb57c26e133bae9e0cd36033c0a9/attrs-25.4.0.tar.gz", hash = "sha256:16d5969b87f0859ef33a48b35d55ac1be6e42ae49d5e853b597db70c35c57e11", size = 934251, upload-time = "2025-10-06T13:54:44.725Z" } +wheels = [ + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/3a/2a/7cc015f5b9f5db42b7d48157e23356022889fc354a2813c15934b7cb5c0e/attrs-25.4.0-py3-none-any.whl", hash = "sha256:adcf7e2a1fb3b36ac48d97835bb6d8ade15b8dcce26aba8bf1d14847b57a3373", size = 67615, upload-time = "2025-10-06T13:54:43.17Z" }, +] + +[[package]] +name = "babel" +version = "2.18.0" +source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple/" } +sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/7d/b2/51899539b6ceeeb420d40ed3cd4b7a40519404f9baf3d4ac99dc413a834b/babel-2.18.0.tar.gz", hash = "sha256:b80b99a14bd085fcacfa15c9165f651fbb3406e66cc603abf11c5750937c992d", size = 9959554, upload-time = "2026-02-01T12:30:56.078Z" } +wheels = [ + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/77/f5/21d2de20e8b8b0408f0681956ca2c69f1320a3848ac50e6e7f39c6159675/babel-2.18.0-py3-none-any.whl", hash = "sha256:e2b422b277c2b9a9630c1d7903c2a00d0830c409c59ac8cae9081c92f1aeba35", size = 10196845, upload-time = "2026-02-01T12:30:53.445Z" }, +] + +[[package]] +name = "beautifulsoup4" +version = "4.14.3" +source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple/" } +dependencies = [ + { name = "soupsieve" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/c3/b0/1c6a16426d389813b48d95e26898aff79abbde42ad353958ad95cc8c9b21/beautifulsoup4-4.14.3.tar.gz", hash = "sha256:6292b1c5186d356bba669ef9f7f051757099565ad9ada5dd630bd9de5fa7fb86", size = 627737, upload-time = "2025-11-30T15:08:26.084Z" } +wheels = [ + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/1a/39/47f9197bdd44df24d67ac8893641e16f386c984a0619ef2ee4c51fbbc019/beautifulsoup4-4.14.3-py3-none-any.whl", hash = "sha256:0918bfe44902e6ad8d57732ba310582e98da931428d231a5ecb9e7c703a735bb", size = 107721, upload-time = "2025-11-30T15:08:24.087Z" }, +] + +[[package]] +name = "bleach" +version = "6.3.0" +source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple/" } +dependencies = [ + { name = "webencodings" }, +] +sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/07/18/3c8523962314be6bf4c8989c79ad9531c825210dd13a8669f6b84336e8bd/bleach-6.3.0.tar.gz", hash = "sha256:6f3b91b1c0a02bb9a78b5a454c92506aa0fdf197e1d5e114d2e00c6f64306d22", size = 203533, upload-time = "2025-10-27T17:57:39.211Z" } +wheels = [ + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/cd/3a/577b549de0cc09d95f11087ee63c739bba856cd3952697eec4c4bb91350a/bleach-6.3.0-py3-none-any.whl", hash = "sha256:fe10ec77c93ddf3d13a73b035abaac7a9f5e436513864ccdad516693213c65d6", size = 164437, upload-time = "2025-10-27T17:57:37.538Z" }, +] + +[package.optional-dependencies] +css = [ + { name = "tinycss2" }, +] + +[[package]] +name = "certifi" +version = "2026.1.4" +source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple/" } +sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/e0/2d/a891ca51311197f6ad14a7ef42e2399f36cf2f9bd44752b3dc4eab60fdc5/certifi-2026.1.4.tar.gz", hash = "sha256:ac726dd470482006e014ad384921ed6438c457018f4b3d204aea4281258b2120", size = 154268, upload-time = "2026-01-04T02:42:41.825Z" } +wheels = [ + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/e6/ad/3cc14f097111b4de0040c83a525973216457bbeeb63739ef1ed275c1c021/certifi-2026.1.4-py3-none-any.whl", hash = "sha256:9943707519e4add1115f44c2bc244f782c0249876bf51b6599fee1ffbedd685c", size = 152900, upload-time = "2026-01-04T02:42:40.15Z" }, +] + +[[package]] +name = "cffi" +version = "2.0.0" +source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple/" } +dependencies = [ + { name = "pycparser", marker = "implementation_name != 'PyPy'" }, +] +sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/eb/56/b1ba7935a17738ae8453301356628e8147c79dbb825bcbc73dc7401f9846/cffi-2.0.0.tar.gz", hash = "sha256:44d1b5909021139fe36001ae048dbdde8214afa20200eda0f64c068cac5d5529", size = 523588, upload-time = "2025-09-08T23:24:04.541Z" } +wheels = [ + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/ea/47/4f61023ea636104d4f16ab488e268b93008c3d0bb76893b1b31db1f96802/cffi-2.0.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:6d02d6655b0e54f54c4ef0b94eb6be0607b70853c45ce98bd278dc7de718be5d", size = 185271, upload-time = "2025-09-08T23:22:44.795Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/df/a2/781b623f57358e360d62cdd7a8c681f074a71d445418a776eef0aadb4ab4/cffi-2.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8eca2a813c1cb7ad4fb74d368c2ffbbb4789d377ee5bb8df98373c2cc0dee76c", size = 181048, upload-time = "2025-09-08T23:22:45.938Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/ff/df/a4f0fbd47331ceeba3d37c2e51e9dfc9722498becbeec2bd8bc856c9538a/cffi-2.0.0-cp312-cp312-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:21d1152871b019407d8ac3985f6775c079416c282e431a4da6afe7aefd2bccbe", size = 212529, upload-time = "2025-09-08T23:22:47.349Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/d5/72/12b5f8d3865bf0f87cf1404d8c374e7487dcf097a1c91c436e72e6badd83/cffi-2.0.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:b21e08af67b8a103c71a250401c78d5e0893beff75e28c53c98f4de42f774062", size = 220097, upload-time = "2025-09-08T23:22:48.677Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/c2/95/7a135d52a50dfa7c882ab0ac17e8dc11cec9d55d2c18dda414c051c5e69e/cffi-2.0.0-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:1e3a615586f05fc4065a8b22b8152f0c1b00cdbc60596d187c2a74f9e3036e4e", size = 207983, upload-time = "2025-09-08T23:22:50.06Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/3a/c8/15cb9ada8895957ea171c62dc78ff3e99159ee7adb13c0123c001a2546c1/cffi-2.0.0-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:81afed14892743bbe14dacb9e36d9e0e504cd204e0b165062c488942b9718037", size = 206519, upload-time = "2025-09-08T23:22:51.364Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/78/2d/7fa73dfa841b5ac06c7b8855cfc18622132e365f5b81d02230333ff26e9e/cffi-2.0.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:3e17ed538242334bf70832644a32a7aae3d83b57567f9fd60a26257e992b79ba", size = 219572, upload-time = "2025-09-08T23:22:52.902Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/07/e0/267e57e387b4ca276b90f0434ff88b2c2241ad72b16d31836adddfd6031b/cffi-2.0.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:3925dd22fa2b7699ed2617149842d2e6adde22b262fcbfada50e3d195e4b3a94", size = 222963, upload-time = "2025-09-08T23:22:54.518Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/b6/75/1f2747525e06f53efbd878f4d03bac5b859cbc11c633d0fb81432d98a795/cffi-2.0.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:2c8f814d84194c9ea681642fd164267891702542f028a15fc97d4674b6206187", size = 221361, upload-time = "2025-09-08T23:22:55.867Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/7b/2b/2b6435f76bfeb6bbf055596976da087377ede68df465419d192acf00c437/cffi-2.0.0-cp312-cp312-win32.whl", hash = "sha256:da902562c3e9c550df360bfa53c035b2f241fed6d9aef119048073680ace4a18", size = 172932, upload-time = "2025-09-08T23:22:57.188Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/f8/ed/13bd4418627013bec4ed6e54283b1959cf6db888048c7cf4b4c3b5b36002/cffi-2.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:da68248800ad6320861f129cd9c1bf96ca849a2771a59e0344e88681905916f5", size = 183557, upload-time = "2025-09-08T23:22:58.351Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/95/31/9f7f93ad2f8eff1dbc1c3656d7ca5bfd8fb52c9d786b4dcf19b2d02217fa/cffi-2.0.0-cp312-cp312-win_arm64.whl", hash = "sha256:4671d9dd5ec934cb9a73e7ee9676f9362aba54f7f34910956b84d727b0d73fb6", size = 177762, upload-time = "2025-09-08T23:22:59.668Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/4b/8d/a0a47a0c9e413a658623d014e91e74a50cdd2c423f7ccfd44086ef767f90/cffi-2.0.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:00bdf7acc5f795150faa6957054fbbca2439db2f775ce831222b66f192f03beb", size = 185230, upload-time = "2025-09-08T23:23:00.879Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/4a/d2/a6c0296814556c68ee32009d9c2ad4f85f2707cdecfd7727951ec228005d/cffi-2.0.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:45d5e886156860dc35862657e1494b9bae8dfa63bf56796f2fb56e1679fc0bca", size = 181043, upload-time = "2025-09-08T23:23:02.231Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/b0/1e/d22cc63332bd59b06481ceaac49d6c507598642e2230f201649058a7e704/cffi-2.0.0-cp313-cp313-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:07b271772c100085dd28b74fa0cd81c8fb1a3ba18b21e03d7c27f3436a10606b", size = 212446, upload-time = "2025-09-08T23:23:03.472Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/a9/f5/a2c23eb03b61a0b8747f211eb716446c826ad66818ddc7810cc2cc19b3f2/cffi-2.0.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:d48a880098c96020b02d5a1f7d9251308510ce8858940e6fa99ece33f610838b", size = 220101, upload-time = "2025-09-08T23:23:04.792Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/f2/7f/e6647792fc5850d634695bc0e6ab4111ae88e89981d35ac269956605feba/cffi-2.0.0-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:f93fd8e5c8c0a4aa1f424d6173f14a892044054871c771f8566e4008eaa359d2", size = 207948, upload-time = "2025-09-08T23:23:06.127Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/cb/1e/a5a1bd6f1fb30f22573f76533de12a00bf274abcdc55c8edab639078abb6/cffi-2.0.0-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:dd4f05f54a52fb558f1ba9f528228066954fee3ebe629fc1660d874d040ae5a3", size = 206422, upload-time = "2025-09-08T23:23:07.753Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/98/df/0a1755e750013a2081e863e7cd37e0cdd02664372c754e5560099eb7aa44/cffi-2.0.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:c8d3b5532fc71b7a77c09192b4a5a200ea992702734a2e9279a37f2478236f26", size = 219499, upload-time = "2025-09-08T23:23:09.648Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/50/e1/a969e687fcf9ea58e6e2a928ad5e2dd88cc12f6f0ab477e9971f2309b57c/cffi-2.0.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:d9b29c1f0ae438d5ee9acb31cadee00a58c46cc9c0b2f9038c6b0b3470877a8c", size = 222928, upload-time = "2025-09-08T23:23:10.928Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/36/54/0362578dd2c9e557a28ac77698ed67323ed5b9775ca9d3fe73fe191bb5d8/cffi-2.0.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6d50360be4546678fc1b79ffe7a66265e28667840010348dd69a314145807a1b", size = 221302, upload-time = "2025-09-08T23:23:12.42Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/eb/6d/bf9bda840d5f1dfdbf0feca87fbdb64a918a69bca42cfa0ba7b137c48cb8/cffi-2.0.0-cp313-cp313-win32.whl", hash = "sha256:74a03b9698e198d47562765773b4a8309919089150a0bb17d829ad7b44b60d27", size = 172909, upload-time = "2025-09-08T23:23:14.32Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/37/18/6519e1ee6f5a1e579e04b9ddb6f1676c17368a7aba48299c3759bbc3c8b3/cffi-2.0.0-cp313-cp313-win_amd64.whl", hash = "sha256:19f705ada2530c1167abacb171925dd886168931e0a7b78f5bffcae5c6b5be75", size = 183402, upload-time = "2025-09-08T23:23:15.535Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/cb/0e/02ceeec9a7d6ee63bb596121c2c8e9b3a9e150936f4fbef6ca1943e6137c/cffi-2.0.0-cp313-cp313-win_arm64.whl", hash = "sha256:256f80b80ca3853f90c21b23ee78cd008713787b1b1e93eae9f3d6a7134abd91", size = 177780, upload-time = "2025-09-08T23:23:16.761Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/92/c4/3ce07396253a83250ee98564f8d7e9789fab8e58858f35d07a9a2c78de9f/cffi-2.0.0-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:fc33c5141b55ed366cfaad382df24fe7dcbc686de5be719b207bb248e3053dc5", size = 185320, upload-time = "2025-09-08T23:23:18.087Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/59/dd/27e9fa567a23931c838c6b02d0764611c62290062a6d4e8ff7863daf9730/cffi-2.0.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:c654de545946e0db659b3400168c9ad31b5d29593291482c43e3564effbcee13", size = 181487, upload-time = "2025-09-08T23:23:19.622Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/d6/43/0e822876f87ea8a4ef95442c3d766a06a51fc5298823f884ef87aaad168c/cffi-2.0.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:24b6f81f1983e6df8db3adc38562c83f7d4a0c36162885ec7f7b77c7dcbec97b", size = 220049, upload-time = "2025-09-08T23:23:20.853Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/b4/89/76799151d9c2d2d1ead63c2429da9ea9d7aac304603de0c6e8764e6e8e70/cffi-2.0.0-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:12873ca6cb9b0f0d3a0da705d6086fe911591737a59f28b7936bdfed27c0d47c", size = 207793, upload-time = "2025-09-08T23:23:22.08Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/bb/dd/3465b14bb9e24ee24cb88c9e3730f6de63111fffe513492bf8c808a3547e/cffi-2.0.0-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:d9b97165e8aed9272a6bb17c01e3cc5871a594a446ebedc996e2397a1c1ea8ef", size = 206300, upload-time = "2025-09-08T23:23:23.314Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/47/d9/d83e293854571c877a92da46fdec39158f8d7e68da75bf73581225d28e90/cffi-2.0.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:afb8db5439b81cf9c9d0c80404b60c3cc9c3add93e114dcae767f1477cb53775", size = 219244, upload-time = "2025-09-08T23:23:24.541Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/2b/0f/1f177e3683aead2bb00f7679a16451d302c436b5cbf2505f0ea8146ef59e/cffi-2.0.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:737fe7d37e1a1bffe70bd5754ea763a62a066dc5913ca57e957824b72a85e205", size = 222828, upload-time = "2025-09-08T23:23:26.143Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/c6/0f/cafacebd4b040e3119dcb32fed8bdef8dfe94da653155f9d0b9dc660166e/cffi-2.0.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:38100abb9d1b1435bc4cc340bb4489635dc2f0da7456590877030c9b3d40b0c1", size = 220926, upload-time = "2025-09-08T23:23:27.873Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/3e/aa/df335faa45b395396fcbc03de2dfcab242cd61a9900e914fe682a59170b1/cffi-2.0.0-cp314-cp314-win32.whl", hash = "sha256:087067fa8953339c723661eda6b54bc98c5625757ea62e95eb4898ad5e776e9f", size = 175328, upload-time = "2025-09-08T23:23:44.61Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/bb/92/882c2d30831744296ce713f0feb4c1cd30f346ef747b530b5318715cc367/cffi-2.0.0-cp314-cp314-win_amd64.whl", hash = "sha256:203a48d1fb583fc7d78a4c6655692963b860a417c0528492a6bc21f1aaefab25", size = 185650, upload-time = "2025-09-08T23:23:45.848Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/9f/2c/98ece204b9d35a7366b5b2c6539c350313ca13932143e79dc133ba757104/cffi-2.0.0-cp314-cp314-win_arm64.whl", hash = "sha256:dbd5c7a25a7cb98f5ca55d258b103a2054f859a46ae11aaf23134f9cc0d356ad", size = 180687, upload-time = "2025-09-08T23:23:47.105Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/3e/61/c768e4d548bfa607abcda77423448df8c471f25dbe64fb2ef6d555eae006/cffi-2.0.0-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:9a67fc9e8eb39039280526379fb3a70023d77caec1852002b4da7e8b270c4dd9", size = 188773, upload-time = "2025-09-08T23:23:29.347Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/2c/ea/5f76bce7cf6fcd0ab1a1058b5af899bfbef198bea4d5686da88471ea0336/cffi-2.0.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:7a66c7204d8869299919db4d5069a82f1561581af12b11b3c9f48c584eb8743d", size = 185013, upload-time = "2025-09-08T23:23:30.63Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/be/b4/c56878d0d1755cf9caa54ba71e5d049479c52f9e4afc230f06822162ab2f/cffi-2.0.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:7cc09976e8b56f8cebd752f7113ad07752461f48a58cbba644139015ac24954c", size = 221593, upload-time = "2025-09-08T23:23:31.91Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/e0/0d/eb704606dfe8033e7128df5e90fee946bbcb64a04fcdaa97321309004000/cffi-2.0.0-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:92b68146a71df78564e4ef48af17551a5ddd142e5190cdf2c5624d0c3ff5b2e8", size = 209354, upload-time = "2025-09-08T23:23:33.214Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/d8/19/3c435d727b368ca475fb8742ab97c9cb13a0de600ce86f62eab7fa3eea60/cffi-2.0.0-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:b1e74d11748e7e98e2f426ab176d4ed720a64412b6a15054378afdb71e0f37dc", size = 208480, upload-time = "2025-09-08T23:23:34.495Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/d0/44/681604464ed9541673e486521497406fadcc15b5217c3e326b061696899a/cffi-2.0.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:28a3a209b96630bca57cce802da70c266eb08c6e97e5afd61a75611ee6c64592", size = 221584, upload-time = "2025-09-08T23:23:36.096Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/25/8e/342a504ff018a2825d395d44d63a767dd8ebc927ebda557fecdaca3ac33a/cffi-2.0.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:7553fb2090d71822f02c629afe6042c299edf91ba1bf94951165613553984512", size = 224443, upload-time = "2025-09-08T23:23:37.328Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/e1/5e/b666bacbbc60fbf415ba9988324a132c9a7a0448a9a8f125074671c0f2c3/cffi-2.0.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:6c6c373cfc5c83a975506110d17457138c8c63016b563cc9ed6e056a82f13ce4", size = 223437, upload-time = "2025-09-08T23:23:38.945Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/a0/1d/ec1a60bd1a10daa292d3cd6bb0b359a81607154fb8165f3ec95fe003b85c/cffi-2.0.0-cp314-cp314t-win32.whl", hash = "sha256:1fc9ea04857caf665289b7a75923f2c6ed559b8298a1b8c49e59f7dd95c8481e", size = 180487, upload-time = "2025-09-08T23:23:40.423Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/bf/41/4c1168c74fac325c0c8156f04b6749c8b6a8f405bbf91413ba088359f60d/cffi-2.0.0-cp314-cp314t-win_amd64.whl", hash = "sha256:d68b6cef7827e8641e8ef16f4494edda8b36104d79773a334beaa1e3521430f6", size = 191726, upload-time = "2025-09-08T23:23:41.742Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/ae/3a/dbeec9d1ee0844c679f6bb5d6ad4e9f198b1224f4e7a32825f47f6192b0c/cffi-2.0.0-cp314-cp314t-win_arm64.whl", hash = "sha256:0a1527a803f0a659de1af2e1fd700213caba79377e27e4693648c2923da066f9", size = 184195, upload-time = "2025-09-08T23:23:43.004Z" }, +] + +[[package]] +name = "charset-normalizer" +version = "3.4.4" +source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple/" } +sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/13/69/33ddede1939fdd074bce5434295f38fae7136463422fe4fd3e0e89b98062/charset_normalizer-3.4.4.tar.gz", hash = "sha256:94537985111c35f28720e43603b8e7b43a6ecfb2ce1d3058bbe955b73404e21a", size = 129418, upload-time = "2025-10-14T04:42:32.879Z" } +wheels = [ + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/f3/85/1637cd4af66fa687396e757dec650f28025f2a2f5a5531a3208dc0ec43f2/charset_normalizer-3.4.4-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:0a98e6759f854bd25a58a73fa88833fba3b7c491169f86ce1180c948ab3fd394", size = 208425, upload-time = "2025-10-14T04:40:53.353Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/9d/6a/04130023fef2a0d9c62d0bae2649b69f7b7d8d24ea5536feef50551029df/charset_normalizer-3.4.4-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b5b290ccc2a263e8d185130284f8501e3e36c5e02750fc6b6bdeb2e9e96f1e25", size = 148162, upload-time = "2025-10-14T04:40:54.558Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/78/29/62328d79aa60da22c9e0b9a66539feae06ca0f5a4171ac4f7dc285b83688/charset_normalizer-3.4.4-cp312-cp312-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:74bb723680f9f7a6234dcf67aea57e708ec1fbdf5699fb91dfd6f511b0a320ef", size = 144558, upload-time = "2025-10-14T04:40:55.677Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/86/bb/b32194a4bf15b88403537c2e120b817c61cd4ecffa9b6876e941c3ee38fe/charset_normalizer-3.4.4-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:f1e34719c6ed0b92f418c7c780480b26b5d9c50349e9a9af7d76bf757530350d", size = 161497, upload-time = "2025-10-14T04:40:57.217Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/19/89/a54c82b253d5b9b111dc74aca196ba5ccfcca8242d0fb64146d4d3183ff1/charset_normalizer-3.4.4-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:2437418e20515acec67d86e12bf70056a33abdacb5cb1655042f6538d6b085a8", size = 159240, upload-time = "2025-10-14T04:40:58.358Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/c0/10/d20b513afe03acc89ec33948320a5544d31f21b05368436d580dec4e234d/charset_normalizer-3.4.4-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:11d694519d7f29d6cd09f6ac70028dba10f92f6cdd059096db198c283794ac86", size = 153471, upload-time = "2025-10-14T04:40:59.468Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/61/fa/fbf177b55bdd727010f9c0a3c49eefa1d10f960e5f09d1d887bf93c2e698/charset_normalizer-3.4.4-cp312-cp312-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:ac1c4a689edcc530fc9d9aa11f5774b9e2f33f9a0c6a57864e90908f5208d30a", size = 150864, upload-time = "2025-10-14T04:41:00.623Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/05/12/9fbc6a4d39c0198adeebbde20b619790e9236557ca59fc40e0e3cebe6f40/charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:21d142cc6c0ec30d2efee5068ca36c128a30b0f2c53c1c07bd78cb6bc1d3be5f", size = 150647, upload-time = "2025-10-14T04:41:01.754Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/ad/1f/6a9a593d52e3e8c5d2b167daf8c6b968808efb57ef4c210acb907c365bc4/charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:5dbe56a36425d26d6cfb40ce79c314a2e4dd6211d51d6d2191c00bed34f354cc", size = 145110, upload-time = "2025-10-14T04:41:03.231Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/30/42/9a52c609e72471b0fc54386dc63c3781a387bb4fe61c20231a4ebcd58bdd/charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:5bfbb1b9acf3334612667b61bd3002196fe2a1eb4dd74d247e0f2a4d50ec9bbf", size = 162839, upload-time = "2025-10-14T04:41:04.715Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/c4/5b/c0682bbf9f11597073052628ddd38344a3d673fda35a36773f7d19344b23/charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:d055ec1e26e441f6187acf818b73564e6e6282709e9bcb5b63f5b23068356a15", size = 150667, upload-time = "2025-10-14T04:41:05.827Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/e4/24/a41afeab6f990cf2daf6cb8c67419b63b48cf518e4f56022230840c9bfb2/charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:af2d8c67d8e573d6de5bc30cdb27e9b95e49115cd9baad5ddbd1a6207aaa82a9", size = 160535, upload-time = "2025-10-14T04:41:06.938Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/2a/e5/6a4ce77ed243c4a50a1fecca6aaaab419628c818a49434be428fe24c9957/charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:780236ac706e66881f3b7f2f32dfe90507a09e67d1d454c762cf642e6e1586e0", size = 154816, upload-time = "2025-10-14T04:41:08.101Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/a8/ef/89297262b8092b312d29cdb2517cb1237e51db8ecef2e9af5edbe7b683b1/charset_normalizer-3.4.4-cp312-cp312-win32.whl", hash = "sha256:5833d2c39d8896e4e19b689ffc198f08ea58116bee26dea51e362ecc7cd3ed26", size = 99694, upload-time = "2025-10-14T04:41:09.23Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/3d/2d/1e5ed9dd3b3803994c155cd9aacb60c82c331bad84daf75bcb9c91b3295e/charset_normalizer-3.4.4-cp312-cp312-win_amd64.whl", hash = "sha256:a79cfe37875f822425b89a82333404539ae63dbdddf97f84dcbc3d339aae9525", size = 107131, upload-time = "2025-10-14T04:41:10.467Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/d0/d9/0ed4c7098a861482a7b6a95603edce4c0d9db2311af23da1fb2b75ec26fc/charset_normalizer-3.4.4-cp312-cp312-win_arm64.whl", hash = "sha256:376bec83a63b8021bb5c8ea75e21c4ccb86e7e45ca4eb81146091b56599b80c3", size = 100390, upload-time = "2025-10-14T04:41:11.915Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/97/45/4b3a1239bbacd321068ea6e7ac28875b03ab8bc0aa0966452db17cd36714/charset_normalizer-3.4.4-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:e1f185f86a6f3403aa2420e815904c67b2f9ebc443f045edd0de921108345794", size = 208091, upload-time = "2025-10-14T04:41:13.346Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/7d/62/73a6d7450829655a35bb88a88fca7d736f9882a27eacdca2c6d505b57e2e/charset_normalizer-3.4.4-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6b39f987ae8ccdf0d2642338faf2abb1862340facc796048b604ef14919e55ed", size = 147936, upload-time = "2025-10-14T04:41:14.461Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/89/c5/adb8c8b3d6625bef6d88b251bbb0d95f8205831b987631ab0c8bb5d937c2/charset_normalizer-3.4.4-cp313-cp313-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:3162d5d8ce1bb98dd51af660f2121c55d0fa541b46dff7bb9b9f86ea1d87de72", size = 144180, upload-time = "2025-10-14T04:41:15.588Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/91/ed/9706e4070682d1cc219050b6048bfd293ccf67b3d4f5a4f39207453d4b99/charset_normalizer-3.4.4-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:81d5eb2a312700f4ecaa977a8235b634ce853200e828fbadf3a9c50bab278328", size = 161346, upload-time = "2025-10-14T04:41:16.738Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/d5/0d/031f0d95e4972901a2f6f09ef055751805ff541511dc1252ba3ca1f80cf5/charset_normalizer-3.4.4-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:5bd2293095d766545ec1a8f612559f6b40abc0eb18bb2f5d1171872d34036ede", size = 158874, upload-time = "2025-10-14T04:41:17.923Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/f5/83/6ab5883f57c9c801ce5e5677242328aa45592be8a00644310a008d04f922/charset_normalizer-3.4.4-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a8a8b89589086a25749f471e6a900d3f662d1d3b6e2e59dcecf787b1cc3a1894", size = 153076, upload-time = "2025-10-14T04:41:19.106Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/75/1e/5ff781ddf5260e387d6419959ee89ef13878229732732ee73cdae01800f2/charset_normalizer-3.4.4-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:bc7637e2f80d8530ee4a78e878bce464f70087ce73cf7c1caf142416923b98f1", size = 150601, upload-time = "2025-10-14T04:41:20.245Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/d7/57/71be810965493d3510a6ca79b90c19e48696fb1ff964da319334b12677f0/charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:f8bf04158c6b607d747e93949aa60618b61312fe647a6369f88ce2ff16043490", size = 150376, upload-time = "2025-10-14T04:41:21.398Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/e5/d5/c3d057a78c181d007014feb7e9f2e65905a6c4ef182c0ddf0de2924edd65/charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:554af85e960429cf30784dd47447d5125aaa3b99a6f0683589dbd27e2f45da44", size = 144825, upload-time = "2025-10-14T04:41:22.583Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/e6/8c/d0406294828d4976f275ffbe66f00266c4b3136b7506941d87c00cab5272/charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:74018750915ee7ad843a774364e13a3db91682f26142baddf775342c3f5b1133", size = 162583, upload-time = "2025-10-14T04:41:23.754Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/d7/24/e2aa1f18c8f15c4c0e932d9287b8609dd30ad56dbe41d926bd846e22fb8d/charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:c0463276121fdee9c49b98908b3a89c39be45d86d1dbaa22957e38f6321d4ce3", size = 150366, upload-time = "2025-10-14T04:41:25.27Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/e4/5b/1e6160c7739aad1e2df054300cc618b06bf784a7a164b0f238360721ab86/charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:362d61fd13843997c1c446760ef36f240cf81d3ebf74ac62652aebaf7838561e", size = 160300, upload-time = "2025-10-14T04:41:26.725Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/7a/10/f882167cd207fbdd743e55534d5d9620e095089d176d55cb22d5322f2afd/charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:9a26f18905b8dd5d685d6d07b0cdf98a79f3c7a918906af7cc143ea2e164c8bc", size = 154465, upload-time = "2025-10-14T04:41:28.322Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/89/66/c7a9e1b7429be72123441bfdbaf2bc13faab3f90b933f664db506dea5915/charset_normalizer-3.4.4-cp313-cp313-win32.whl", hash = "sha256:9b35f4c90079ff2e2edc5b26c0c77925e5d2d255c42c74fdb70fb49b172726ac", size = 99404, upload-time = "2025-10-14T04:41:29.95Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/c4/26/b9924fa27db384bdcd97ab83b4f0a8058d96ad9626ead570674d5e737d90/charset_normalizer-3.4.4-cp313-cp313-win_amd64.whl", hash = "sha256:b435cba5f4f750aa6c0a0d92c541fb79f69a387c91e61f1795227e4ed9cece14", size = 107092, upload-time = "2025-10-14T04:41:31.188Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/af/8f/3ed4bfa0c0c72a7ca17f0380cd9e4dd842b09f664e780c13cff1dcf2ef1b/charset_normalizer-3.4.4-cp313-cp313-win_arm64.whl", hash = "sha256:542d2cee80be6f80247095cc36c418f7bddd14f4a6de45af91dfad36d817bba2", size = 100408, upload-time = "2025-10-14T04:41:32.624Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/2a/35/7051599bd493e62411d6ede36fd5af83a38f37c4767b92884df7301db25d/charset_normalizer-3.4.4-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:da3326d9e65ef63a817ecbcc0df6e94463713b754fe293eaa03da99befb9a5bd", size = 207746, upload-time = "2025-10-14T04:41:33.773Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/10/9a/97c8d48ef10d6cd4fcead2415523221624bf58bcf68a802721a6bc807c8f/charset_normalizer-3.4.4-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8af65f14dc14a79b924524b1e7fffe304517b2bff5a58bf64f30b98bbc5079eb", size = 147889, upload-time = "2025-10-14T04:41:34.897Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/10/bf/979224a919a1b606c82bd2c5fa49b5c6d5727aa47b4312bb27b1734f53cd/charset_normalizer-3.4.4-cp314-cp314-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:74664978bb272435107de04e36db5a9735e78232b85b77d45cfb38f758efd33e", size = 143641, upload-time = "2025-10-14T04:41:36.116Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/ba/33/0ad65587441fc730dc7bd90e9716b30b4702dc7b617e6ba4997dc8651495/charset_normalizer-3.4.4-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:752944c7ffbfdd10c074dc58ec2d5a8a4cd9493b314d367c14d24c17684ddd14", size = 160779, upload-time = "2025-10-14T04:41:37.229Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/67/ed/331d6b249259ee71ddea93f6f2f0a56cfebd46938bde6fcc6f7b9a3d0e09/charset_normalizer-3.4.4-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:d1f13550535ad8cff21b8d757a3257963e951d96e20ec82ab44bc64aeb62a191", size = 159035, upload-time = "2025-10-14T04:41:38.368Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/67/ff/f6b948ca32e4f2a4576aa129d8bed61f2e0543bf9f5f2b7fc3758ed005c9/charset_normalizer-3.4.4-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ecaae4149d99b1c9e7b88bb03e3221956f68fd6d50be2ef061b2381b61d20838", size = 152542, upload-time = "2025-10-14T04:41:39.862Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/16/85/276033dcbcc369eb176594de22728541a925b2632f9716428c851b149e83/charset_normalizer-3.4.4-cp314-cp314-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:cb6254dc36b47a990e59e1068afacdcd02958bdcce30bb50cc1700a8b9d624a6", size = 149524, upload-time = "2025-10-14T04:41:41.319Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/9e/f2/6a2a1f722b6aba37050e626530a46a68f74e63683947a8acff92569f979a/charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:c8ae8a0f02f57a6e61203a31428fa1d677cbe50c93622b4149d5c0f319c1d19e", size = 150395, upload-time = "2025-10-14T04:41:42.539Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/60/bb/2186cb2f2bbaea6338cad15ce23a67f9b0672929744381e28b0592676824/charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:47cc91b2f4dd2833fddaedd2893006b0106129d4b94fdb6af1f4ce5a9965577c", size = 143680, upload-time = "2025-10-14T04:41:43.661Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/7d/a5/bf6f13b772fbb2a90360eb620d52ed8f796f3c5caee8398c3b2eb7b1c60d/charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:82004af6c302b5d3ab2cfc4cc5f29db16123b1a8417f2e25f9066f91d4411090", size = 162045, upload-time = "2025-10-14T04:41:44.821Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/df/c5/d1be898bf0dc3ef9030c3825e5d3b83f2c528d207d246cbabe245966808d/charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:2b7d8f6c26245217bd2ad053761201e9f9680f8ce52f0fcd8d0755aeae5b2152", size = 149687, upload-time = "2025-10-14T04:41:46.442Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/a5/42/90c1f7b9341eef50c8a1cb3f098ac43b0508413f33affd762855f67a410e/charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:799a7a5e4fb2d5898c60b640fd4981d6a25f1c11790935a44ce38c54e985f828", size = 160014, upload-time = "2025-10-14T04:41:47.631Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/76/be/4d3ee471e8145d12795ab655ece37baed0929462a86e72372fd25859047c/charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:99ae2cffebb06e6c22bdc25801d7b30f503cc87dbd283479e7b606f70aff57ec", size = 154044, upload-time = "2025-10-14T04:41:48.81Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/b0/6f/8f7af07237c34a1defe7defc565a9bc1807762f672c0fde711a4b22bf9c0/charset_normalizer-3.4.4-cp314-cp314-win32.whl", hash = "sha256:f9d332f8c2a2fcbffe1378594431458ddbef721c1769d78e2cbc06280d8155f9", size = 99940, upload-time = "2025-10-14T04:41:49.946Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/4b/51/8ade005e5ca5b0d80fb4aff72a3775b325bdc3d27408c8113811a7cbe640/charset_normalizer-3.4.4-cp314-cp314-win_amd64.whl", hash = "sha256:8a6562c3700cce886c5be75ade4a5db4214fda19fede41d9792d100288d8f94c", size = 107104, upload-time = "2025-10-14T04:41:51.051Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/da/5f/6b8f83a55bb8278772c5ae54a577f3099025f9ade59d0136ac24a0df4bde/charset_normalizer-3.4.4-cp314-cp314-win_arm64.whl", hash = "sha256:de00632ca48df9daf77a2c65a484531649261ec9f25489917f09e455cb09ddb2", size = 100743, upload-time = "2025-10-14T04:41:52.122Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/0a/4c/925909008ed5a988ccbb72dcc897407e5d6d3bd72410d69e051fc0c14647/charset_normalizer-3.4.4-py3-none-any.whl", hash = "sha256:7a32c560861a02ff789ad905a2fe94e3f840803362c84fecf1851cb4cf3dc37f", size = 53402, upload-time = "2025-10-14T04:42:31.76Z" }, +] + +[[package]] +name = "colorama" +version = "0.4.6" +source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple/" } +sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/d8/53/6f443c9a4a8358a93a6792e2acffb9d9d5cb0a5cfd8802644b7b1c9a02e4/colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44", size = 27697, upload-time = "2022-10-25T02:36:22.414Z" } +wheels = [ + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/d1/d6/3965ed04c63042e047cb6a3e6ed1a63a35087b6a609aa3a15ed8ac56c221/colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6", size = 25335, upload-time = "2022-10-25T02:36:20.889Z" }, +] + +[[package]] +name = "comm" +version = "0.2.3" +source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple/" } +sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/4c/13/7d740c5849255756bc17888787313b61fd38a0a8304fc4f073dfc46122aa/comm-0.2.3.tar.gz", hash = "sha256:2dc8048c10962d55d7ad693be1e7045d891b7ce8d999c97963a5e3e99c055971", size = 6319, upload-time = "2025-07-25T14:02:04.452Z" } +wheels = [ + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/60/97/891a0971e1e4a8c5d2b20bbe0e524dc04548d2307fee33cdeba148fd4fc7/comm-0.2.3-py3-none-any.whl", hash = "sha256:c615d91d75f7f04f095b30d1c1711babd43bdc6419c1be9886a85f2f4e489417", size = 7294, upload-time = "2025-07-25T14:02:02.896Z" }, +] + +[[package]] +name = "cuda-bindings" +version = "13.1.1" +source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple/" } +dependencies = [ + { name = "cuda-pathfinder" }, +] +wheels = [ + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/53/3d/c8ed9d169843091f3f0d6b8218e826fd59520a37e0434c204feada597988/cuda_bindings-13.1.1-cp312-cp312-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1e75ad0cb863330df784236d289612d71ca855c013d19ae00e5693574abd6915", size = 15530160, upload-time = "2025-12-09T22:05:55.386Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/4a/8e/368295623ee43fba622909d780fbb6863efc1638dff55f67a0f04eac6470/cuda_bindings-13.1.1-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:25785d1a3cdcd98f151240fd5efd025609319a6720a217dee2a929241749d488", size = 16110386, upload-time = "2025-12-09T22:05:57.71Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/60/1f/ecc4701ade3e85f091c625a920574527b9daf7fb354189fbfbc5516af6cd/cuda_bindings-13.1.1-cp312-cp312-win_amd64.whl", hash = "sha256:ccde9c95c0e953b31fe7731bb08da9d0a34b1770498df9a3c156fdfdbe3951ad", size = 15250028, upload-time = "2025-12-09T22:06:00.346Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/fe/c1/0ee8fd94bab7e23116e0e3da8c0902e299f3d9edc95f1d7d8ef894c897ed/cuda_bindings-13.1.1-cp313-cp313-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0c9822a57c8f952dc367aacd7c32fe4cb17371104383606f455ea74635bff4c7", size = 15421116, upload-time = "2025-12-09T22:06:02.994Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/f3/c2/f272fad414b96299e010dcbe510cf17fc25deaf3443e0fdb55020a8298a3/cuda_bindings-13.1.1-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:5837f5ea422c5653626dcfe22e9ab68142cd19af9e67a226100f224cc25a1b99", size = 15940152, upload-time = "2025-12-09T22:06:05.079Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/2a/56/433093bec0121f031edb582ea3a72f71031e8fbebecaaf329809344da4c7/cuda_bindings-13.1.1-cp313-cp313-win_amd64.whl", hash = "sha256:9e4f348cd7a779657d51e6f71aac3965fb1738f40ff3bbe75265a3242fd6f29f", size = 15216463, upload-time = "2025-12-09T22:06:07.296Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/de/38/40416d037ed25db68f1dbd50e0232775a62d90c9f25af22b196c0a13b88c/cuda_bindings-13.1.1-cp314-cp314-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:86258fe1b0d3998bea7f57dc891569e4996705b8dd00366e44c722d0a29b2090", size = 15498927, upload-time = "2025-12-09T22:06:09.476Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/ac/3f/f1f88b6cdb7d41ba076f8ff10edf6d3bd17e740da9a163544b43d6349653/cuda_bindings-13.1.1-cp314-cp314-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:daf8468fd603b2724c2d16cbd499348c64916ed72b1d04643f1660ce13cd12ae", size = 15984539, upload-time = "2025-12-09T22:06:11.882Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/f6/33/7739cc5e9a3373df8e7dea9060528bee5f70cf6e28b9c14f765502816c71/cuda_bindings-13.1.1-cp314-cp314-win_amd64.whl", hash = "sha256:f2e079182014dbc162562b46467815272c14c7afe5b988978fa968728b0ac726", size = 15373212, upload-time = "2025-12-09T22:06:13.989Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/9e/0a/5c6d514e566ff86c4054bbbb6554bf49b9c55fefbc934eb456faecab53c9/cuda_bindings-13.1.1-cp314-cp314t-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d0cd96a6ec00a78235947bff9462b2139bc5b83ce8e297d865802f0b52d1e23d", size = 15403944, upload-time = "2025-12-09T22:06:16.315Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/0b/5b/319cfa491a685d4d4757aa24223b6dbc0976954afac42f49fc47290ba6a3/cuda_bindings-13.1.1-cp314-cp314t-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9ff465829c6c394c2b4047250324a19925cf8c44633345b2746a4741e07bf827", size = 15911462, upload-time = "2025-12-09T22:06:18.403Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/e3/5c/38b92080c5b6c4ddb09f0be2536123f81c7e9e1a89e4573f20cb00347ee3/cuda_bindings-13.1.1-cp314-cp314t-win_amd64.whl", hash = "sha256:8205eee6b8b458a2110c0384923ace206855d0f1b436fc1b145fcbaa1653b501", size = 16044390, upload-time = "2025-12-09T22:06:20.945Z" }, +] + +[[package]] +name = "cuda-pathfinder" +version = "1.3.3" +source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple/" } +wheels = [ + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/0b/02/4dbe7568a42e46582248942f54dc64ad094769532adbe21e525e4edf7bc4/cuda_pathfinder-1.3.3-py3-none-any.whl", hash = "sha256:9984b664e404f7c134954a771be8775dfd6180ea1e1aef4a5a37d4be05d9bbb1", size = 27154, upload-time = "2025-12-04T22:35:08.996Z" }, +] + +[[package]] +name = "cuda-python" +version = "13.1.1" +source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple/" } +dependencies = [ + { name = "cuda-bindings" }, + { name = "cuda-pathfinder" }, +] +wheels = [ + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/cd/08/b5e3b9822662d72d540d830531e3ab6a7cabbda3dd56175696aabccfeb76/cuda_python-13.1.1-py3-none-any.whl", hash = "sha256:944cc4fe6482673d28dd545797a28840945a1668739328fa2ad1e9be4f7050d9", size = 8038, upload-time = "2025-12-09T22:13:10.719Z" }, +] + +[[package]] +name = "cupy-cuda12x" +version = "13.6.0" +source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple/" } +dependencies = [ + { name = "fastrlock" }, + { name = "numpy" }, +] +wheels = [ + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/12/c5/7e7fc4816d0de0154e5d9053242c3a08a0ca8b43ee656a6f7b3b95055a7b/cupy_cuda12x-13.6.0-cp312-cp312-manylinux2014_aarch64.whl", hash = "sha256:a6970ceefe40f9acbede41d7fe17416bd277b1bd2093adcde457b23b578c5a59", size = 127334633, upload-time = "2025-08-18T08:24:43.065Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/e0/95/d7e1295141e7d530674a3cc567e13ed0eb6b81524cb122d797ed996b5bea/cupy_cuda12x-13.6.0-cp312-cp312-manylinux2014_x86_64.whl", hash = "sha256:79b0cacb5e8b190ef409f9e03f06ac8de1b021b0c0dda47674d446f5557e0eb1", size = 112886268, upload-time = "2025-08-18T08:24:49.294Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/ae/8c/14555b63fd78cfac7b88af0094cea0a3cb845d243661ec7da69f7b3ea0de/cupy_cuda12x-13.6.0-cp312-cp312-win_amd64.whl", hash = "sha256:ca06fede7b8b83ca9ad80062544ef2e5bb8d4762d1c4fc3ac8349376de9c8a5e", size = 89785108, upload-time = "2025-08-18T08:24:54.527Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/19/ec/f62cb991f11fb41291c4c15b6936d7b67ffa71ddb344ad6e8894e06ce58d/cupy_cuda12x-13.6.0-cp313-cp313-manylinux2014_aarch64.whl", hash = "sha256:e5426ae3b1b9cf59927481e457a89e3f0b50a35b114a8034ec9110e7a833434c", size = 126904601, upload-time = "2025-08-18T08:24:59.951Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/f8/b8/30127bcdac53a25f94ee201bf4802fcd8d012145567d77c54174d6d01c01/cupy_cuda12x-13.6.0-cp313-cp313-manylinux2014_x86_64.whl", hash = "sha256:52d9e7f83d920da7d81ec2e791c2c2c747fdaa1d7b811971b34865ce6371e98a", size = 112654824, upload-time = "2025-08-18T08:25:05.944Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/72/36/c9e24acb19f039f814faea880b3704a3661edaa6739456b73b27540663e3/cupy_cuda12x-13.6.0-cp313-cp313-win_amd64.whl", hash = "sha256:297b4268f839de67ef7865c2202d3f5a0fb8d20bd43360bc51b6e60cb4406447", size = 89750580, upload-time = "2025-08-18T08:25:10.972Z" }, +] + +[[package]] +name = "debugpy" +version = "1.8.20" +source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple/" } +sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/e0/b7/cd8080344452e4874aae67c40d8940e2b4d47b01601a8fd9f44786c757c7/debugpy-1.8.20.tar.gz", hash = "sha256:55bc8701714969f1ab89a6d5f2f3d40c36f91b2cbe2f65d98bf8196f6a6a2c33", size = 1645207, upload-time = "2026-01-29T23:03:28.199Z" } +wheels = [ + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/14/57/7f34f4736bfb6e00f2e4c96351b07805d83c9a7b33d28580ae01374430f7/debugpy-1.8.20-cp312-cp312-macosx_15_0_universal2.whl", hash = "sha256:4ae3135e2089905a916909ef31922b2d733d756f66d87345b3e5e52b7a55f13d", size = 2550686, upload-time = "2026-01-29T23:03:42.023Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/ab/78/b193a3975ca34458f6f0e24aaf5c3e3da72f5401f6054c0dfd004b41726f/debugpy-1.8.20-cp312-cp312-manylinux_2_34_x86_64.whl", hash = "sha256:88f47850a4284b88bd2bfee1f26132147d5d504e4e86c22485dfa44b97e19b4b", size = 4310588, upload-time = "2026-01-29T23:03:43.314Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/c1/55/f14deb95eaf4f30f07ef4b90a8590fc05d9e04df85ee379712f6fb6736d7/debugpy-1.8.20-cp312-cp312-win32.whl", hash = "sha256:4057ac68f892064e5f98209ab582abfee3b543fb55d2e87610ddc133a954d390", size = 5331372, upload-time = "2026-01-29T23:03:45.526Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/a1/39/2bef246368bd42f9bd7cba99844542b74b84dacbdbea0833e610f384fee8/debugpy-1.8.20-cp312-cp312-win_amd64.whl", hash = "sha256:a1a8f851e7cf171330679ef6997e9c579ef6dd33c9098458bd9986a0f4ca52e3", size = 5372835, upload-time = "2026-01-29T23:03:47.245Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/15/e2/fc500524cc6f104a9d049abc85a0a8b3f0d14c0a39b9c140511c61e5b40b/debugpy-1.8.20-cp313-cp313-macosx_15_0_universal2.whl", hash = "sha256:5dff4bb27027821fdfcc9e8f87309a28988231165147c31730128b1c983e282a", size = 2539560, upload-time = "2026-01-29T23:03:48.738Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/90/83/fb33dcea789ed6018f8da20c5a9bc9d82adc65c0c990faed43f7c955da46/debugpy-1.8.20-cp313-cp313-manylinux_2_34_x86_64.whl", hash = "sha256:84562982dd7cf5ebebfdea667ca20a064e096099997b175fe204e86817f64eaf", size = 4293272, upload-time = "2026-01-29T23:03:50.169Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/a6/25/b1e4a01bfb824d79a6af24b99ef291e24189080c93576dfd9b1a2815cd0f/debugpy-1.8.20-cp313-cp313-win32.whl", hash = "sha256:da11dea6447b2cadbf8ce2bec59ecea87cc18d2c574980f643f2d2dfe4862393", size = 5331208, upload-time = "2026-01-29T23:03:51.547Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/13/f7/a0b368ce54ffff9e9028c098bd2d28cfc5b54f9f6c186929083d4c60ba58/debugpy-1.8.20-cp313-cp313-win_amd64.whl", hash = "sha256:eb506e45943cab2efb7c6eafdd65b842f3ae779f020c82221f55aca9de135ed7", size = 5372930, upload-time = "2026-01-29T23:03:53.585Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/33/2e/f6cb9a8a13f5058f0a20fe09711a7b726232cd5a78c6a7c05b2ec726cff9/debugpy-1.8.20-cp314-cp314-macosx_15_0_universal2.whl", hash = "sha256:9c74df62fc064cd5e5eaca1353a3ef5a5d50da5eb8058fcef63106f7bebe6173", size = 2538066, upload-time = "2026-01-29T23:03:54.999Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/c5/56/6ddca50b53624e1ca3ce1d1e49ff22db46c47ea5fb4c0cc5c9b90a616364/debugpy-1.8.20-cp314-cp314-manylinux_2_34_x86_64.whl", hash = "sha256:077a7447589ee9bc1ff0cdf443566d0ecf540ac8aa7333b775ebcb8ce9f4ecad", size = 4269425, upload-time = "2026-01-29T23:03:56.518Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/c5/d9/d64199c14a0d4c476df46c82470a3ce45c8d183a6796cfb5e66533b3663c/debugpy-1.8.20-cp314-cp314-win32.whl", hash = "sha256:352036a99dd35053b37b7803f748efc456076f929c6a895556932eaf2d23b07f", size = 5331407, upload-time = "2026-01-29T23:03:58.481Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/e0/d9/1f07395b54413432624d61524dfd98c1a7c7827d2abfdb8829ac92638205/debugpy-1.8.20-cp314-cp314-win_amd64.whl", hash = "sha256:a98eec61135465b062846112e5ecf2eebb855305acc1dfbae43b72903b8ab5be", size = 5372521, upload-time = "2026-01-29T23:03:59.864Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/e0/c3/7f67dea8ccf8fdcb9c99033bbe3e90b9e7395415843accb81428c441be2d/debugpy-1.8.20-py2.py3-none-any.whl", hash = "sha256:5be9bed9ae3be00665a06acaa48f8329d2b9632f15fd09f6a9a8c8d9907e54d7", size = 5337658, upload-time = "2026-01-29T23:04:17.404Z" }, +] + +[[package]] +name = "decorator" +version = "5.2.1" +source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple/" } +sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/43/fa/6d96a0978d19e17b68d634497769987b16c8f4cd0a7a05048bec693caa6b/decorator-5.2.1.tar.gz", hash = "sha256:65f266143752f734b0a7cc83c46f4618af75b8c5911b00ccb61d0ac9b6da0360", size = 56711, upload-time = "2025-02-24T04:41:34.073Z" } +wheels = [ + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/4e/8c/f3147f5c4b73e7550fe5f9352eaa956ae838d5c51eb58e7a25b9f3e2643b/decorator-5.2.1-py3-none-any.whl", hash = "sha256:d316bb415a2d9e2d2b3abcc4084c6502fc09240e292cd76a76afc106a1c8e04a", size = 9190, upload-time = "2025-02-24T04:41:32.565Z" }, +] + +[[package]] +name = "defusedxml" +version = "0.7.1" +source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple/" } +sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/0f/d5/c66da9b79e5bdb124974bfe172b4daf3c984ebd9c2a06e2b8a4dc7331c72/defusedxml-0.7.1.tar.gz", hash = "sha256:1bb3032db185915b62d7c6209c5a8792be6a32ab2fedacc84e01b52c51aa3e69" } +wheels = [ + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/07/6c/aa3f2f849e01cb6a001cd8554a88d4c77c5c1a31c95bdf1cf9301e6d9ef4/defusedxml-0.7.1-py2.py3-none-any.whl", hash = "sha256:a352e7e428770286cc899e2542b6cdaedb2b4953ff269a210103ec58f6198a61" }, +] + +[[package]] +name = "depth-sensing" +version = "0.1.0" +source = { virtual = "." } +dependencies = [ + { name = "cuda-python" }, + { name = "cupy-cuda12x" }, + { name = "jupyterlab" }, + { name = "numpy" }, + { name = "pyopengl" }, + { name = "pyopengl-accelerate" }, + { name = "pyzed" }, +] + +[package.metadata] +requires-dist = [ + { name = "cuda-python" }, + { name = "cupy-cuda12x" }, + { name = "jupyterlab", specifier = ">=4.5.3" }, + { name = "numpy" }, + { name = "pyopengl", specifier = ">=3.1.10" }, + { name = "pyopengl-accelerate", specifier = ">=3.1.10" }, + { name = "pyzed", directory = "libs/pyzed_pkg" }, +] + +[[package]] +name = "executing" +version = "2.2.1" +source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple/" } +sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/cc/28/c14e053b6762b1044f34a13aab6859bbf40456d37d23aa286ac24cfd9a5d/executing-2.2.1.tar.gz", hash = "sha256:3632cc370565f6648cc328b32435bd120a1e4ebb20c77e3fdde9a13cd1e533c4", size = 1129488, upload-time = "2025-09-01T09:48:10.866Z" } +wheels = [ + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/c1/ea/53f2148663b321f21b5a606bd5f191517cf40b7072c0497d3c92c4a13b1e/executing-2.2.1-py2.py3-none-any.whl", hash = "sha256:760643d3452b4d777d295bb167ccc74c64a81df23fb5e08eff250c425a4b2017", size = 28317, upload-time = "2025-09-01T09:48:08.5Z" }, +] + +[[package]] +name = "fastjsonschema" +version = "2.21.2" +source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple/" } +sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/20/b5/23b216d9d985a956623b6bd12d4086b60f0059b27799f23016af04a74ea1/fastjsonschema-2.21.2.tar.gz", hash = "sha256:b1eb43748041c880796cd077f1a07c3d94e93ae84bba5ed36800a33554ae05de", size = 374130, upload-time = "2025-08-14T18:49:36.666Z" } +wheels = [ + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/cb/a8/20d0723294217e47de6d9e2e40fd4a9d2f7c4b6ef974babd482a59743694/fastjsonschema-2.21.2-py3-none-any.whl", hash = "sha256:1c797122d0a86c5cace2e54bf4e819c36223b552017172f32c5c024a6b77e463", size = 24024, upload-time = "2025-08-14T18:49:34.776Z" }, +] + +[[package]] +name = "fastrlock" +version = "0.8.3" +source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple/" } +sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/73/b1/1c3d635d955f2b4bf34d45abf8f35492e04dbd7804e94ce65d9f928ef3ec/fastrlock-0.8.3.tar.gz", hash = "sha256:4af6734d92eaa3ab4373e6c9a1dd0d5ad1304e172b1521733c6c3b3d73c8fa5d", size = 79327, upload-time = "2024-12-17T11:03:39.638Z" } +wheels = [ + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/00/df/56270f2e10c1428855c990e7a7e5baafa9e1262b8e789200bd1d047eb501/fastrlock-0.8.3-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:8cb2cf04352ea8575d496f31b3b88c42c7976e8e58cdd7d1550dfba80ca039da", size = 55727, upload-time = "2024-12-17T11:02:17.26Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/57/21/ea1511b0ef0d5457efca3bf1823effb9c5cad4fc9dca86ce08e4d65330ce/fastrlock-0.8.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:85a49a1f1e020097d087e1963e42cea6f307897d5ebe2cb6daf4af47ffdd3eed", size = 52201, upload-time = "2024-12-17T11:02:19.512Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/80/07/cdecb7aa976f34328372f1c4efd6c9dc1b039b3cc8d3f38787d640009a25/fastrlock-0.8.3-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:5f13ec08f1adb1aa916c384b05ecb7dbebb8df9ea81abd045f60941c6283a670", size = 53924, upload-time = "2024-12-17T11:02:20.85Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/88/6d/59c497f8db9a125066dd3a7442fab6aecbe90d6fec344c54645eaf311666/fastrlock-0.8.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:0ea4e53a04980d646def0f5e4b5e8bd8c7884288464acab0b37ca0c65c482bfe", size = 52140, upload-time = "2024-12-17T11:02:22.263Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/62/04/9138943c2ee803d62a48a3c17b69de2f6fa27677a6896c300369e839a550/fastrlock-0.8.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:38340f6635bd4ee2a4fb02a3a725759fe921f2ca846cb9ca44531ba739cc17b4", size = 53261, upload-time = "2024-12-17T11:02:24.418Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/e2/4b/db35a52589764c7745a613b6943bbd018f128d42177ab92ee7dde88444f6/fastrlock-0.8.3-cp312-cp312-win_amd64.whl", hash = "sha256:da06d43e1625e2ffddd303edcd6d2cd068e1c486f5fd0102b3f079c44eb13e2c", size = 31235, upload-time = "2024-12-17T11:02:25.708Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/92/74/7b13d836c3f221cff69d6f418f46c2a30c4b1fe09a8ce7db02eecb593185/fastrlock-0.8.3-cp313-cp313-macosx_11_0_universal2.whl", hash = "sha256:5264088185ca8e6bc83181dff521eee94d078c269c7d557cc8d9ed5952b7be45", size = 54157, upload-time = "2024-12-17T11:02:29.196Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/06/77/f06a907f9a07d26d0cca24a4385944cfe70d549a2c9f1c3e3217332f4f12/fastrlock-0.8.3-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4a98ba46b3e14927550c4baa36b752d0d2f7387b8534864a8767f83cce75c160", size = 50954, upload-time = "2024-12-17T11:02:32.12Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/f9/4e/94480fb3fd93991dd6f4e658b77698edc343f57caa2870d77b38c89c2e3b/fastrlock-0.8.3-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:dbdea6deeccea1917c6017d353987231c4e46c93d5338ca3e66d6cd88fbce259", size = 52535, upload-time = "2024-12-17T11:02:33.402Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/7d/a7/ee82bb55b6c0ca30286dac1e19ee9417a17d2d1de3b13bb0f20cefb86086/fastrlock-0.8.3-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:c6e5bfecbc0d72ff07e43fed81671747914d6794e0926700677ed26d894d4f4f", size = 50942, upload-time = "2024-12-17T11:02:34.688Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/63/1d/d4b7782ef59e57dd9dde69468cc245adafc3674281905e42fa98aac30a79/fastrlock-0.8.3-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:2a83d558470c520ed21462d304e77a12639859b205759221c8144dd2896b958a", size = 52044, upload-time = "2024-12-17T11:02:36.613Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/28/a3/2ad0a0a69662fd4cf556ab8074f0de978ee9b56bff6ddb4e656df4aa9e8e/fastrlock-0.8.3-cp313-cp313-win_amd64.whl", hash = "sha256:8d1d6a28291b4ace2a66bd7b49a9ed9c762467617febdd9ab356b867ed901af8", size = 30472, upload-time = "2024-12-17T11:02:37.983Z" }, +] + +[[package]] +name = "fqdn" +version = "1.5.1" +source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple/" } +sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/30/3e/a80a8c077fd798951169626cde3e239adeba7dab75deb3555716415bd9b0/fqdn-1.5.1.tar.gz", hash = "sha256:105ed3677e767fb5ca086a0c1f4bb66ebc3c100be518f0e0d755d9eae164d89f", size = 6015, upload-time = "2021-03-11T07:16:29.08Z" } +wheels = [ + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/cf/58/8acf1b3e91c58313ce5cb67df61001fc9dcd21be4fadb76c1a2d540e09ed/fqdn-1.5.1-py3-none-any.whl", hash = "sha256:3a179af3761e4df6eb2e026ff9e1a3033d3587bf980a0b1b2e1e5d08d7358014", size = 9121, upload-time = "2021-03-11T07:16:28.351Z" }, +] + +[[package]] +name = "h11" +version = "0.16.0" +source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple/" } +sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/01/ee/02a2c011bdab74c6fb3c75474d40b3052059d95df7e73351460c8588d963/h11-0.16.0.tar.gz", hash = "sha256:4e35b956cf45792e4caa5885e69fba00bdbc6ffafbfa020300e549b208ee5ff1", size = 101250, upload-time = "2025-04-24T03:35:25.427Z" } +wheels = [ + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/04/4b/29cac41a4d98d144bf5f6d33995617b185d14b22401f75ca86f384e87ff1/h11-0.16.0-py3-none-any.whl", hash = "sha256:63cf8bbe7522de3bf65932fda1d9c2772064ffb3dae62d55932da54b31cb6c86", size = 37515, upload-time = "2025-04-24T03:35:24.344Z" }, +] + +[[package]] +name = "httpcore" +version = "1.0.9" +source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple/" } +dependencies = [ + { name = "certifi" }, + { name = "h11" }, +] +sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/06/94/82699a10bca87a5556c9c59b5963f2d039dbd239f25bc2a63907a05a14cb/httpcore-1.0.9.tar.gz", hash = "sha256:6e34463af53fd2ab5d807f399a9b45ea31c3dfa2276f15a2c3f00afff6e176e8", size = 85484, upload-time = "2025-04-24T22:06:22.219Z" } +wheels = [ + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/7e/f5/f66802a942d491edb555dd61e3a9961140fd64c90bce1eafd741609d334d/httpcore-1.0.9-py3-none-any.whl", hash = "sha256:2d400746a40668fc9dec9810239072b40b4484b640a8c38fd654a024c7a1bf55", size = 78784, upload-time = "2025-04-24T22:06:20.566Z" }, +] + +[[package]] +name = "httpx" +version = "0.28.1" +source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple/" } +dependencies = [ + { name = "anyio" }, + { name = "certifi" }, + { name = "httpcore" }, + { name = "idna" }, +] +sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/b1/df/48c586a5fe32a0f01324ee087459e112ebb7224f646c0b5023f5e79e9956/httpx-0.28.1.tar.gz", hash = "sha256:75e98c5f16b0f35b567856f597f06ff2270a374470a5c2392242528e3e3e42fc", size = 141406, upload-time = "2024-12-06T15:37:23.222Z" } +wheels = [ + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/2a/39/e50c7c3a983047577ee07d2a9e53faf5a69493943ec3f6a384bdc792deb2/httpx-0.28.1-py3-none-any.whl", hash = "sha256:d909fcccc110f8c7faf814ca82a9a4d816bc5a6dbfea25d6591d6985b8ba59ad", size = 73517, upload-time = "2024-12-06T15:37:21.509Z" }, +] + +[[package]] +name = "idna" +version = "3.11" +source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple/" } +sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/6f/6d/0703ccc57f3a7233505399edb88de3cbd678da106337b9fcde432b65ed60/idna-3.11.tar.gz", hash = "sha256:795dafcc9c04ed0c1fb032c2aa73654d8e8c5023a7df64a53f39190ada629902", size = 194582, upload-time = "2025-10-12T14:55:20.501Z" } +wheels = [ + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/0e/61/66938bbb5fc52dbdf84594873d5b51fb1f7c7794e9c0f5bd885f30bc507b/idna-3.11-py3-none-any.whl", hash = "sha256:771a87f49d9defaf64091e6e6fe9c18d4833f140bd19464795bc32d966ca37ea", size = 71008, upload-time = "2025-10-12T14:55:18.883Z" }, +] + +[[package]] +name = "ipykernel" +version = "7.1.0" +source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple/" } +dependencies = [ + { name = "appnope", marker = "sys_platform == 'darwin'" }, + { name = "comm" }, + { name = "debugpy" }, + { name = "ipython" }, + { name = "jupyter-client" }, + { name = "jupyter-core" }, + { name = "matplotlib-inline" }, + { name = "nest-asyncio" }, + { name = "packaging" }, + { name = "psutil" }, + { name = "pyzmq" }, + { name = "tornado" }, + { name = "traitlets" }, +] +sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/b9/a4/4948be6eb88628505b83a1f2f40d90254cab66abf2043b3c40fa07dfce0f/ipykernel-7.1.0.tar.gz", hash = "sha256:58a3fc88533d5930c3546dc7eac66c6d288acde4f801e2001e65edc5dc9cf0db", size = 174579, upload-time = "2025-10-27T09:46:39.471Z" } +wheels = [ + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/a3/17/20c2552266728ceba271967b87919664ecc0e33efca29c3efc6baf88c5f9/ipykernel-7.1.0-py3-none-any.whl", hash = "sha256:763b5ec6c5b7776f6a8d7ce09b267693b4e5ce75cb50ae696aaefb3c85e1ea4c", size = 117968, upload-time = "2025-10-27T09:46:37.805Z" }, +] + +[[package]] +name = "ipython" +version = "9.10.0" +source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple/" } +dependencies = [ + { name = "colorama", marker = "sys_platform == 'win32'" }, + { name = "decorator" }, + { name = "ipython-pygments-lexers" }, + { name = "jedi" }, + { name = "matplotlib-inline" }, + { name = "pexpect", marker = "sys_platform != 'emscripten' and sys_platform != 'win32'" }, + { name = "prompt-toolkit" }, + { name = "pygments" }, + { name = "stack-data" }, + { name = "traitlets" }, +] +sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/a6/60/2111715ea11f39b1535bed6024b7dec7918b71e5e5d30855a5b503056b50/ipython-9.10.0.tar.gz", hash = "sha256:cd9e656be97618a0676d058134cd44e6dc7012c0e5cb36a9ce96a8c904adaf77", size = 4426526, upload-time = "2026-02-02T10:00:33.594Z" } +wheels = [ + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/3d/aa/898dec789a05731cd5a9f50605b7b44a72bd198fd0d4528e11fc610177cc/ipython-9.10.0-py3-none-any.whl", hash = "sha256:c6ab68cc23bba8c7e18e9b932797014cc61ea7fd6f19de180ab9ba73e65ee58d", size = 622774, upload-time = "2026-02-02T10:00:31.503Z" }, +] + +[[package]] +name = "ipython-pygments-lexers" +version = "1.1.1" +source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple/" } +dependencies = [ + { name = "pygments" }, +] +sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/ef/4c/5dd1d8af08107f88c7f741ead7a40854b8ac24ddf9ae850afbcf698aa552/ipython_pygments_lexers-1.1.1.tar.gz", hash = "sha256:09c0138009e56b6854f9535736f4171d855c8c08a563a0dcd8022f78355c7e81", size = 8393, upload-time = "2025-01-17T11:24:34.505Z" } +wheels = [ + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/d9/33/1f075bf72b0b747cb3288d011319aaf64083cf2efef8354174e3ed4540e2/ipython_pygments_lexers-1.1.1-py3-none-any.whl", hash = "sha256:a9462224a505ade19a605f71f8fa63c2048833ce50abc86768a0d81d876dc81c", size = 8074, upload-time = "2025-01-17T11:24:33.271Z" }, +] + +[[package]] +name = "isoduration" +version = "20.11.0" +source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple/" } +dependencies = [ + { name = "arrow" }, +] +sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/7c/1a/3c8edc664e06e6bd06cce40c6b22da5f1429aa4224d0c590f3be21c91ead/isoduration-20.11.0.tar.gz", hash = "sha256:ac2f9015137935279eac671f94f89eb00584f940f5dc49462a0c4ee692ba1bd9", size = 11649, upload-time = "2020-11-01T11:00:00.312Z" } +wheels = [ + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/7b/55/e5326141505c5d5e34c5e0935d2908a74e4561eca44108fbfb9c13d2911a/isoduration-20.11.0-py3-none-any.whl", hash = "sha256:b2904c2a4228c3d44f409c8ae8e2370eb21a26f7ac2ec5446df141dde3452042", size = 11321, upload-time = "2020-11-01T10:59:58.02Z" }, +] + +[[package]] +name = "jedi" +version = "0.19.2" +source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple/" } +dependencies = [ + { name = "parso" }, +] +sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/72/3a/79a912fbd4d8dd6fbb02bf69afd3bb72cf0c729bb3063c6f4498603db17a/jedi-0.19.2.tar.gz", hash = "sha256:4770dc3de41bde3966b02eb84fbcf557fb33cce26ad23da12c742fb50ecb11f0", size = 1231287, upload-time = "2024-11-11T01:41:42.873Z" } +wheels = [ + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/c0/5a/9cac0c82afec3d09ccd97c8b6502d48f165f9124db81b4bcb90b4af974ee/jedi-0.19.2-py2.py3-none-any.whl", hash = "sha256:a8ef22bde8490f57fe5c7681a3c83cb58874daf72b4784de3cce5b6ef6edb5b9", size = 1572278, upload-time = "2024-11-11T01:41:40.175Z" }, +] + +[[package]] +name = "jinja2" +version = "3.1.6" +source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple/" } +dependencies = [ + { name = "markupsafe" }, +] +sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/df/bf/f7da0350254c0ed7c72f3e33cef02e048281fec7ecec5f032d4aac52226b/jinja2-3.1.6.tar.gz", hash = "sha256:0137fb05990d35f1275a587e9aee6d56da821fc83491a0fb838183be43f66d6d", size = 245115, upload-time = "2025-03-05T20:05:02.478Z" } +wheels = [ + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/62/a1/3d680cbfd5f4b8f15abc1d571870c5fc3e594bb582bc3b64ea099db13e56/jinja2-3.1.6-py3-none-any.whl", hash = "sha256:85ece4451f492d0c13c5dd7c13a64681a86afae63a5f347908daf103ce6d2f67", size = 134899, upload-time = "2025-03-05T20:05:00.369Z" }, +] + +[[package]] +name = "json5" +version = "0.13.0" +source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple/" } +sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/77/e8/a3f261a66e4663f22700bc8a17c08cb83e91fbf086726e7a228398968981/json5-0.13.0.tar.gz", hash = "sha256:b1edf8d487721c0bf64d83c28e91280781f6e21f4a797d3261c7c828d4c165bf", size = 52441, upload-time = "2026-01-01T19:42:14.99Z" } +wheels = [ + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/d7/9e/038522f50ceb7e74f1f991bf1b699f24b0c2bbe7c390dd36ad69f4582258/json5-0.13.0-py3-none-any.whl", hash = "sha256:9a08e1dd65f6a4d4c6fa82d216cf2477349ec2346a38fd70cc11d2557499fbcc", size = 36163, upload-time = "2026-01-01T19:42:13.962Z" }, +] + +[[package]] +name = "jsonpointer" +version = "3.0.0" +source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple/" } +sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/6a/0a/eebeb1fa92507ea94016a2a790b93c2ae41a7e18778f85471dc54475ed25/jsonpointer-3.0.0.tar.gz", hash = "sha256:2b2d729f2091522d61c3b31f82e11870f60b68f43fbc705cb76bf4b832af59ef", size = 9114, upload-time = "2024-06-10T19:24:42.462Z" } +wheels = [ + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/71/92/5e77f98553e9e75130c78900d000368476aed74276eb8ae8796f65f00918/jsonpointer-3.0.0-py2.py3-none-any.whl", hash = "sha256:13e088adc14fca8b6aa8177c044e12701e6ad4b28ff10e65f2267a90109c9942", size = 7595, upload-time = "2024-06-10T19:24:40.698Z" }, +] + +[[package]] +name = "jsonschema" +version = "4.26.0" +source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple/" } +dependencies = [ + { name = "attrs" }, + { name = "jsonschema-specifications" }, + { name = "referencing" }, + { name = "rpds-py" }, +] +sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/b3/fc/e067678238fa451312d4c62bf6e6cf5ec56375422aee02f9cb5f909b3047/jsonschema-4.26.0.tar.gz", hash = "sha256:0c26707e2efad8aa1bfc5b7ce170f3fccc2e4918ff85989ba9ffa9facb2be326", size = 366583, upload-time = "2026-01-07T13:41:07.246Z" } +wheels = [ + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/69/90/f63fb5873511e014207a475e2bb4e8b2e570d655b00ac19a9a0ca0a385ee/jsonschema-4.26.0-py3-none-any.whl", hash = "sha256:d489f15263b8d200f8387e64b4c3a75f06629559fb73deb8fdfb525f2dab50ce", size = 90630, upload-time = "2026-01-07T13:41:05.306Z" }, +] + +[package.optional-dependencies] +format-nongpl = [ + { name = "fqdn" }, + { name = "idna" }, + { name = "isoduration" }, + { name = "jsonpointer" }, + { name = "rfc3339-validator" }, + { name = "rfc3986-validator" }, + { name = "rfc3987-syntax" }, + { name = "uri-template" }, + { name = "webcolors" }, +] + +[[package]] +name = "jsonschema-specifications" +version = "2025.9.1" +source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple/" } +dependencies = [ + { name = "referencing" }, +] +sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/19/74/a633ee74eb36c44aa6d1095e7cc5569bebf04342ee146178e2d36600708b/jsonschema_specifications-2025.9.1.tar.gz", hash = "sha256:b540987f239e745613c7a9176f3edb72b832a4ac465cf02712288397832b5e8d", size = 32855, upload-time = "2025-09-08T01:34:59.186Z" } +wheels = [ + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/41/45/1a4ed80516f02155c51f51e8cedb3c1902296743db0bbc66608a0db2814f/jsonschema_specifications-2025.9.1-py3-none-any.whl", hash = "sha256:98802fee3a11ee76ecaca44429fda8a41bff98b00a0f2838151b113f210cc6fe", size = 18437, upload-time = "2025-09-08T01:34:57.871Z" }, +] + +[[package]] +name = "jupyter-client" +version = "8.8.0" +source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple/" } +dependencies = [ + { name = "jupyter-core" }, + { name = "python-dateutil" }, + { name = "pyzmq" }, + { name = "tornado" }, + { name = "traitlets" }, +] +sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/05/e4/ba649102a3bc3fbca54e7239fb924fd434c766f855693d86de0b1f2bec81/jupyter_client-8.8.0.tar.gz", hash = "sha256:d556811419a4f2d96c869af34e854e3f059b7cc2d6d01a9cd9c85c267691be3e", size = 348020, upload-time = "2026-01-08T13:55:47.938Z" } +wheels = [ + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/2d/0b/ceb7694d864abc0a047649aec263878acb9f792e1fec3e676f22dc9015e3/jupyter_client-8.8.0-py3-none-any.whl", hash = "sha256:f93a5b99c5e23a507b773d3a1136bd6e16c67883ccdbd9a829b0bbdb98cd7d7a", size = 107371, upload-time = "2026-01-08T13:55:45.562Z" }, +] + +[[package]] +name = "jupyter-core" +version = "5.9.1" +source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple/" } +dependencies = [ + { name = "platformdirs" }, + { name = "traitlets" }, +] +sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/02/49/9d1284d0dc65e2c757b74c6687b6d319b02f822ad039e5c512df9194d9dd/jupyter_core-5.9.1.tar.gz", hash = "sha256:4d09aaff303b9566c3ce657f580bd089ff5c91f5f89cf7d8846c3cdf465b5508", size = 89814, upload-time = "2025-10-16T19:19:18.444Z" } +wheels = [ + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/e7/e7/80988e32bf6f73919a113473a604f5a8f09094de312b9d52b79c2df7612b/jupyter_core-5.9.1-py3-none-any.whl", hash = "sha256:ebf87fdc6073d142e114c72c9e29a9d7ca03fad818c5d300ce2adc1fb0743407", size = 29032, upload-time = "2025-10-16T19:19:16.783Z" }, +] + +[[package]] +name = "jupyter-events" +version = "0.12.0" +source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple/" } +dependencies = [ + { name = "jsonschema", extra = ["format-nongpl"] }, + { name = "packaging" }, + { name = "python-json-logger" }, + { name = "pyyaml" }, + { name = "referencing" }, + { name = "rfc3339-validator" }, + { name = "rfc3986-validator" }, + { name = "traitlets" }, +] +sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/9d/c3/306d090461e4cf3cd91eceaff84bede12a8e52cd821c2d20c9a4fd728385/jupyter_events-0.12.0.tar.gz", hash = "sha256:fc3fce98865f6784c9cd0a56a20644fc6098f21c8c33834a8d9fe383c17e554b", size = 62196, upload-time = "2025-02-03T17:23:41.485Z" } +wheels = [ + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/e2/48/577993f1f99c552f18a0428731a755e06171f9902fa118c379eb7c04ea22/jupyter_events-0.12.0-py3-none-any.whl", hash = "sha256:6464b2fa5ad10451c3d35fabc75eab39556ae1e2853ad0c0cc31b656731a97fb", size = 19430, upload-time = "2025-02-03T17:23:38.643Z" }, +] + +[[package]] +name = "jupyter-lsp" +version = "2.3.0" +source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple/" } +dependencies = [ + { name = "jupyter-server" }, +] +sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/eb/5a/9066c9f8e94ee517133cd98dba393459a16cd48bba71a82f16a65415206c/jupyter_lsp-2.3.0.tar.gz", hash = "sha256:458aa59339dc868fb784d73364f17dbce8836e906cd75fd471a325cba02e0245", size = 54823, upload-time = "2025-08-27T17:47:34.671Z" } +wheels = [ + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/1a/60/1f6cee0c46263de1173894f0fafcb3475ded276c472c14d25e0280c18d6d/jupyter_lsp-2.3.0-py3-none-any.whl", hash = "sha256:e914a3cb2addf48b1c7710914771aaf1819d46b2e5a79b0f917b5478ec93f34f", size = 76687, upload-time = "2025-08-27T17:47:33.15Z" }, +] + +[[package]] +name = "jupyter-server" +version = "2.17.0" +source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple/" } +dependencies = [ + { name = "anyio" }, + { name = "argon2-cffi" }, + { name = "jinja2" }, + { name = "jupyter-client" }, + { name = "jupyter-core" }, + { name = "jupyter-events" }, + { name = "jupyter-server-terminals" }, + { name = "nbconvert" }, + { name = "nbformat" }, + { name = "packaging" }, + { name = "prometheus-client" }, + { name = "pywinpty", marker = "os_name == 'nt'" }, + { name = "pyzmq" }, + { name = "send2trash" }, + { name = "terminado" }, + { name = "tornado" }, + { name = "traitlets" }, + { name = "websocket-client" }, +] +sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/5b/ac/e040ec363d7b6b1f11304cc9f209dac4517ece5d5e01821366b924a64a50/jupyter_server-2.17.0.tar.gz", hash = "sha256:c38ea898566964c888b4772ae1ed58eca84592e88251d2cfc4d171f81f7e99d5", size = 731949, upload-time = "2025-08-21T14:42:54.042Z" } +wheels = [ + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/92/80/a24767e6ca280f5a49525d987bf3e4d7552bf67c8be07e8ccf20271f8568/jupyter_server-2.17.0-py3-none-any.whl", hash = "sha256:e8cb9c7db4251f51ed307e329b81b72ccf2056ff82d50524debde1ee1870e13f", size = 388221, upload-time = "2025-08-21T14:42:52.034Z" }, +] + +[[package]] +name = "jupyter-server-terminals" +version = "0.5.4" +source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple/" } +dependencies = [ + { name = "pywinpty", marker = "os_name == 'nt'" }, + { name = "terminado" }, +] +sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/f4/a7/bcd0a9b0cbba88986fe944aaaf91bfda603e5a50bda8ed15123f381a3b2f/jupyter_server_terminals-0.5.4.tar.gz", hash = "sha256:bbda128ed41d0be9020349f9f1f2a4ab9952a73ed5f5ac9f1419794761fb87f5", size = 31770, upload-time = "2026-01-14T16:53:20.213Z" } +wheels = [ + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/d1/2d/6674563f71c6320841fc300911a55143925112a72a883e2ca71fba4c618d/jupyter_server_terminals-0.5.4-py3-none-any.whl", hash = "sha256:55be353fc74a80bc7f3b20e6be50a55a61cd525626f578dcb66a5708e2007d14", size = 13704, upload-time = "2026-01-14T16:53:18.738Z" }, +] + +[[package]] +name = "jupyterlab" +version = "4.5.3" +source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple/" } +dependencies = [ + { name = "async-lru" }, + { name = "httpx" }, + { name = "ipykernel" }, + { name = "jinja2" }, + { name = "jupyter-core" }, + { name = "jupyter-lsp" }, + { name = "jupyter-server" }, + { name = "jupyterlab-server" }, + { name = "notebook-shim" }, + { name = "packaging" }, + { name = "setuptools" }, + { name = "tornado" }, + { name = "traitlets" }, +] +sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/3e/76/393eae3349f9a39bf21f8f5406e5244d36e2bfc932049b6070c271f92764/jupyterlab-4.5.3.tar.gz", hash = "sha256:4a159f71067cb38e4a82e86a42de8e7e926f384d7f2291964f282282096d27e8", size = 23939231, upload-time = "2026-01-23T15:04:25.768Z" } +wheels = [ + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/9e/9a/0bf9a7a45f0006d7ff4fdc4fc313de4255acab02bf4db1887c65f0472c01/jupyterlab-4.5.3-py3-none-any.whl", hash = "sha256:63c9f3a48de72ba00df766ad6eed416394f5bb883829f11eeff0872302520ba7", size = 12391761, upload-time = "2026-01-23T15:04:21.214Z" }, +] + +[[package]] +name = "jupyterlab-pygments" +version = "0.3.0" +source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple/" } +sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/90/51/9187be60d989df97f5f0aba133fa54e7300f17616e065d1ada7d7646b6d6/jupyterlab_pygments-0.3.0.tar.gz", hash = "sha256:721aca4d9029252b11cfa9d185e5b5af4d54772bb8072f9b7036f4170054d35d" } +wheels = [ + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/b1/dd/ead9d8ea85bf202d90cc513b533f9c363121c7792674f78e0d8a854b63b4/jupyterlab_pygments-0.3.0-py3-none-any.whl", hash = "sha256:841a89020971da1d8693f1a99997aefc5dc424bb1b251fd6322462a1b8842780" }, +] + +[[package]] +name = "jupyterlab-server" +version = "2.28.0" +source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple/" } +dependencies = [ + { name = "babel" }, + { name = "jinja2" }, + { name = "json5" }, + { name = "jsonschema" }, + { name = "jupyter-server" }, + { name = "packaging" }, + { name = "requests" }, +] +sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/d6/2c/90153f189e421e93c4bb4f9e3f59802a1f01abd2ac5cf40b152d7f735232/jupyterlab_server-2.28.0.tar.gz", hash = "sha256:35baa81898b15f93573e2deca50d11ac0ae407ebb688299d3a5213265033712c", size = 76996, upload-time = "2025-10-22T13:59:18.37Z" } +wheels = [ + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/e0/07/a000fe835f76b7e1143242ab1122e6362ef1c03f23f83a045c38859c2ae0/jupyterlab_server-2.28.0-py3-none-any.whl", hash = "sha256:e4355b148fdcf34d312bbbc80f22467d6d20460e8b8736bf235577dd18506968", size = 59830, upload-time = "2025-10-22T13:59:16.767Z" }, +] + +[[package]] +name = "lark" +version = "1.3.1" +source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple/" } +sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/da/34/28fff3ab31ccff1fd4f6c7c7b0ceb2b6968d8ea4950663eadcb5720591a0/lark-1.3.1.tar.gz", hash = "sha256:b426a7a6d6d53189d318f2b6236ab5d6429eaf09259f1ca33eb716eed10d2905", size = 382732, upload-time = "2025-10-27T18:25:56.653Z" } +wheels = [ + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/82/3d/14ce75ef66813643812f3093ab17e46d3a206942ce7376d31ec2d36229e7/lark-1.3.1-py3-none-any.whl", hash = "sha256:c629b661023a014c37da873b4ff58a817398d12635d3bbb2c5a03be7fe5d1e12", size = 113151, upload-time = "2025-10-27T18:25:54.882Z" }, +] + +[[package]] +name = "markupsafe" +version = "3.0.3" +source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple/" } +sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/7e/99/7690b6d4034fffd95959cbe0c02de8deb3098cc577c67bb6a24fe5d7caa7/markupsafe-3.0.3.tar.gz", hash = "sha256:722695808f4b6457b320fdc131280796bdceb04ab50fe1795cd540799ebe1698", size = 80313, upload-time = "2025-09-27T18:37:40.426Z" } +wheels = [ + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/5a/72/147da192e38635ada20e0a2e1a51cf8823d2119ce8883f7053879c2199b5/markupsafe-3.0.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:d53197da72cc091b024dd97249dfc7794d6a56530370992a5e1a08983ad9230e", size = 11615, upload-time = "2025-09-27T18:36:30.854Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/9a/81/7e4e08678a1f98521201c3079f77db69fb552acd56067661f8c2f534a718/markupsafe-3.0.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:1872df69a4de6aead3491198eaf13810b565bdbeec3ae2dc8780f14458ec73ce", size = 12020, upload-time = "2025-09-27T18:36:31.971Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/1e/2c/799f4742efc39633a1b54a92eec4082e4f815314869865d876824c257c1e/markupsafe-3.0.3-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:3a7e8ae81ae39e62a41ec302f972ba6ae23a5c5396c8e60113e9066ef893da0d", size = 24332, upload-time = "2025-09-27T18:36:32.813Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/3c/2e/8d0c2ab90a8c1d9a24f0399058ab8519a3279d1bd4289511d74e909f060e/markupsafe-3.0.3-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d6dd0be5b5b189d31db7cda48b91d7e0a9795f31430b7f271219ab30f1d3ac9d", size = 22947, upload-time = "2025-09-27T18:36:33.86Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/2c/54/887f3092a85238093a0b2154bd629c89444f395618842e8b0c41783898ea/markupsafe-3.0.3-cp312-cp312-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:94c6f0bb423f739146aec64595853541634bde58b2135f27f61c1ffd1cd4d16a", size = 21962, upload-time = "2025-09-27T18:36:35.099Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/c9/2f/336b8c7b6f4a4d95e91119dc8521402461b74a485558d8f238a68312f11c/markupsafe-3.0.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:be8813b57049a7dc738189df53d69395eba14fb99345e0a5994914a3864c8a4b", size = 23760, upload-time = "2025-09-27T18:36:36.001Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/32/43/67935f2b7e4982ffb50a4d169b724d74b62a3964bc1a9a527f5ac4f1ee2b/markupsafe-3.0.3-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:83891d0e9fb81a825d9a6d61e3f07550ca70a076484292a70fde82c4b807286f", size = 21529, upload-time = "2025-09-27T18:36:36.906Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/89/e0/4486f11e51bbba8b0c041098859e869e304d1c261e59244baa3d295d47b7/markupsafe-3.0.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:77f0643abe7495da77fb436f50f8dab76dbc6e5fd25d39589a0f1fe6548bfa2b", size = 23015, upload-time = "2025-09-27T18:36:37.868Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/2f/e1/78ee7a023dac597a5825441ebd17170785a9dab23de95d2c7508ade94e0e/markupsafe-3.0.3-cp312-cp312-win32.whl", hash = "sha256:d88b440e37a16e651bda4c7c2b930eb586fd15ca7406cb39e211fcff3bf3017d", size = 14540, upload-time = "2025-09-27T18:36:38.761Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/aa/5b/bec5aa9bbbb2c946ca2733ef9c4ca91c91b6a24580193e891b5f7dbe8e1e/markupsafe-3.0.3-cp312-cp312-win_amd64.whl", hash = "sha256:26a5784ded40c9e318cfc2bdb30fe164bdb8665ded9cd64d500a34fb42067b1c", size = 15105, upload-time = "2025-09-27T18:36:39.701Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/e5/f1/216fc1bbfd74011693a4fd837e7026152e89c4bcf3e77b6692fba9923123/markupsafe-3.0.3-cp312-cp312-win_arm64.whl", hash = "sha256:35add3b638a5d900e807944a078b51922212fb3dedb01633a8defc4b01a3c85f", size = 13906, upload-time = "2025-09-27T18:36:40.689Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/38/2f/907b9c7bbba283e68f20259574b13d005c121a0fa4c175f9bed27c4597ff/markupsafe-3.0.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:e1cf1972137e83c5d4c136c43ced9ac51d0e124706ee1c8aa8532c1287fa8795", size = 11622, upload-time = "2025-09-27T18:36:41.777Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/9c/d9/5f7756922cdd676869eca1c4e3c0cd0df60ed30199ffd775e319089cb3ed/markupsafe-3.0.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:116bb52f642a37c115f517494ea5feb03889e04df47eeff5b130b1808ce7c219", size = 12029, upload-time = "2025-09-27T18:36:43.257Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/00/07/575a68c754943058c78f30db02ee03a64b3c638586fba6a6dd56830b30a3/markupsafe-3.0.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:133a43e73a802c5562be9bbcd03d090aa5a1fe899db609c29e8c8d815c5f6de6", size = 24374, upload-time = "2025-09-27T18:36:44.508Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/a9/21/9b05698b46f218fc0e118e1f8168395c65c8a2c750ae2bab54fc4bd4e0e8/markupsafe-3.0.3-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ccfcd093f13f0f0b7fdd0f198b90053bf7b2f02a3927a30e63f3ccc9df56b676", size = 22980, upload-time = "2025-09-27T18:36:45.385Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/7f/71/544260864f893f18b6827315b988c146b559391e6e7e8f7252839b1b846a/markupsafe-3.0.3-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:509fa21c6deb7a7a273d629cf5ec029bc209d1a51178615ddf718f5918992ab9", size = 21990, upload-time = "2025-09-27T18:36:46.916Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/c2/28/b50fc2f74d1ad761af2f5dcce7492648b983d00a65b8c0e0cb457c82ebbe/markupsafe-3.0.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:a4afe79fb3de0b7097d81da19090f4df4f8d3a2b3adaa8764138aac2e44f3af1", size = 23784, upload-time = "2025-09-27T18:36:47.884Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/ed/76/104b2aa106a208da8b17a2fb72e033a5a9d7073c68f7e508b94916ed47a9/markupsafe-3.0.3-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:795e7751525cae078558e679d646ae45574b47ed6e7771863fcc079a6171a0fc", size = 21588, upload-time = "2025-09-27T18:36:48.82Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/b5/99/16a5eb2d140087ebd97180d95249b00a03aa87e29cc224056274f2e45fd6/markupsafe-3.0.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:8485f406a96febb5140bfeca44a73e3ce5116b2501ac54fe953e488fb1d03b12", size = 23041, upload-time = "2025-09-27T18:36:49.797Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/19/bc/e7140ed90c5d61d77cea142eed9f9c303f4c4806f60a1044c13e3f1471d0/markupsafe-3.0.3-cp313-cp313-win32.whl", hash = "sha256:bdd37121970bfd8be76c5fb069c7751683bdf373db1ed6c010162b2a130248ed", size = 14543, upload-time = "2025-09-27T18:36:51.584Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/05/73/c4abe620b841b6b791f2edc248f556900667a5a1cf023a6646967ae98335/markupsafe-3.0.3-cp313-cp313-win_amd64.whl", hash = "sha256:9a1abfdc021a164803f4d485104931fb8f8c1efd55bc6b748d2f5774e78b62c5", size = 15113, upload-time = "2025-09-27T18:36:52.537Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/f0/3a/fa34a0f7cfef23cf9500d68cb7c32dd64ffd58a12b09225fb03dd37d5b80/markupsafe-3.0.3-cp313-cp313-win_arm64.whl", hash = "sha256:7e68f88e5b8799aa49c85cd116c932a1ac15caaa3f5db09087854d218359e485", size = 13911, upload-time = "2025-09-27T18:36:53.513Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/e4/d7/e05cd7efe43a88a17a37b3ae96e79a19e846f3f456fe79c57ca61356ef01/markupsafe-3.0.3-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:218551f6df4868a8d527e3062d0fb968682fe92054e89978594c28e642c43a73", size = 11658, upload-time = "2025-09-27T18:36:54.819Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/99/9e/e412117548182ce2148bdeacdda3bb494260c0b0184360fe0d56389b523b/markupsafe-3.0.3-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:3524b778fe5cfb3452a09d31e7b5adefeea8c5be1d43c4f810ba09f2ceb29d37", size = 12066, upload-time = "2025-09-27T18:36:55.714Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/bc/e6/fa0ffcda717ef64a5108eaa7b4f5ed28d56122c9a6d70ab8b72f9f715c80/markupsafe-3.0.3-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4e885a3d1efa2eadc93c894a21770e4bc67899e3543680313b09f139e149ab19", size = 25639, upload-time = "2025-09-27T18:36:56.908Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/96/ec/2102e881fe9d25fc16cb4b25d5f5cde50970967ffa5dddafdb771237062d/markupsafe-3.0.3-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8709b08f4a89aa7586de0aadc8da56180242ee0ada3999749b183aa23df95025", size = 23569, upload-time = "2025-09-27T18:36:57.913Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/4b/30/6f2fce1f1f205fc9323255b216ca8a235b15860c34b6798f810f05828e32/markupsafe-3.0.3-cp313-cp313t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:b8512a91625c9b3da6f127803b166b629725e68af71f8184ae7e7d54686a56d6", size = 23284, upload-time = "2025-09-27T18:36:58.833Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/58/47/4a0ccea4ab9f5dcb6f79c0236d954acb382202721e704223a8aafa38b5c8/markupsafe-3.0.3-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:9b79b7a16f7fedff2495d684f2b59b0457c3b493778c9eed31111be64d58279f", size = 24801, upload-time = "2025-09-27T18:36:59.739Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/6a/70/3780e9b72180b6fecb83a4814d84c3bf4b4ae4bf0b19c27196104149734c/markupsafe-3.0.3-cp313-cp313t-musllinux_1_2_riscv64.whl", hash = "sha256:12c63dfb4a98206f045aa9563db46507995f7ef6d83b2f68eda65c307c6829eb", size = 22769, upload-time = "2025-09-27T18:37:00.719Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/98/c5/c03c7f4125180fc215220c035beac6b9cb684bc7a067c84fc69414d315f5/markupsafe-3.0.3-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:8f71bc33915be5186016f675cd83a1e08523649b0e33efdb898db577ef5bb009", size = 23642, upload-time = "2025-09-27T18:37:01.673Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/80/d6/2d1b89f6ca4bff1036499b1e29a1d02d282259f3681540e16563f27ebc23/markupsafe-3.0.3-cp313-cp313t-win32.whl", hash = "sha256:69c0b73548bc525c8cb9a251cddf1931d1db4d2258e9599c28c07ef3580ef354", size = 14612, upload-time = "2025-09-27T18:37:02.639Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/2b/98/e48a4bfba0a0ffcf9925fe2d69240bfaa19c6f7507b8cd09c70684a53c1e/markupsafe-3.0.3-cp313-cp313t-win_amd64.whl", hash = "sha256:1b4b79e8ebf6b55351f0d91fe80f893b4743f104bff22e90697db1590e47a218", size = 15200, upload-time = "2025-09-27T18:37:03.582Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/0e/72/e3cc540f351f316e9ed0f092757459afbc595824ca724cbc5a5d4263713f/markupsafe-3.0.3-cp313-cp313t-win_arm64.whl", hash = "sha256:ad2cf8aa28b8c020ab2fc8287b0f823d0a7d8630784c31e9ee5edea20f406287", size = 13973, upload-time = "2025-09-27T18:37:04.929Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/33/8a/8e42d4838cd89b7dde187011e97fe6c3af66d8c044997d2183fbd6d31352/markupsafe-3.0.3-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:eaa9599de571d72e2daf60164784109f19978b327a3910d3e9de8c97b5b70cfe", size = 11619, upload-time = "2025-09-27T18:37:06.342Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/b5/64/7660f8a4a8e53c924d0fa05dc3a55c9cee10bbd82b11c5afb27d44b096ce/markupsafe-3.0.3-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:c47a551199eb8eb2121d4f0f15ae0f923d31350ab9280078d1e5f12b249e0026", size = 12029, upload-time = "2025-09-27T18:37:07.213Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/da/ef/e648bfd021127bef5fa12e1720ffed0c6cbb8310c8d9bea7266337ff06de/markupsafe-3.0.3-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f34c41761022dd093b4b6896d4810782ffbabe30f2d443ff5f083e0cbbb8c737", size = 24408, upload-time = "2025-09-27T18:37:09.572Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/41/3c/a36c2450754618e62008bf7435ccb0f88053e07592e6028a34776213d877/markupsafe-3.0.3-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:457a69a9577064c05a97c41f4e65148652db078a3a509039e64d3467b9e7ef97", size = 23005, upload-time = "2025-09-27T18:37:10.58Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/bc/20/b7fdf89a8456b099837cd1dc21974632a02a999ec9bf7ca3e490aacd98e7/markupsafe-3.0.3-cp314-cp314-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:e8afc3f2ccfa24215f8cb28dcf43f0113ac3c37c2f0f0806d8c70e4228c5cf4d", size = 22048, upload-time = "2025-09-27T18:37:11.547Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/9a/a7/591f592afdc734f47db08a75793a55d7fbcc6902a723ae4cfbab61010cc5/markupsafe-3.0.3-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:ec15a59cf5af7be74194f7ab02d0f59a62bdcf1a537677ce67a2537c9b87fcda", size = 23821, upload-time = "2025-09-27T18:37:12.48Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/7d/33/45b24e4f44195b26521bc6f1a82197118f74df348556594bd2262bda1038/markupsafe-3.0.3-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:0eb9ff8191e8498cca014656ae6b8d61f39da5f95b488805da4bb029cccbfbaf", size = 21606, upload-time = "2025-09-27T18:37:13.485Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/ff/0e/53dfaca23a69fbfbbf17a4b64072090e70717344c52eaaaa9c5ddff1e5f0/markupsafe-3.0.3-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:2713baf880df847f2bece4230d4d094280f4e67b1e813eec43b4c0e144a34ffe", size = 23043, upload-time = "2025-09-27T18:37:14.408Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/46/11/f333a06fc16236d5238bfe74daccbca41459dcd8d1fa952e8fbd5dccfb70/markupsafe-3.0.3-cp314-cp314-win32.whl", hash = "sha256:729586769a26dbceff69f7a7dbbf59ab6572b99d94576a5592625d5b411576b9", size = 14747, upload-time = "2025-09-27T18:37:15.36Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/28/52/182836104b33b444e400b14f797212f720cbc9ed6ba34c800639d154e821/markupsafe-3.0.3-cp314-cp314-win_amd64.whl", hash = "sha256:bdc919ead48f234740ad807933cdf545180bfbe9342c2bb451556db2ed958581", size = 15341, upload-time = "2025-09-27T18:37:16.496Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/6f/18/acf23e91bd94fd7b3031558b1f013adfa21a8e407a3fdb32745538730382/markupsafe-3.0.3-cp314-cp314-win_arm64.whl", hash = "sha256:5a7d5dc5140555cf21a6fefbdbf8723f06fcd2f63ef108f2854de715e4422cb4", size = 14073, upload-time = "2025-09-27T18:37:17.476Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/3c/f0/57689aa4076e1b43b15fdfa646b04653969d50cf30c32a102762be2485da/markupsafe-3.0.3-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:1353ef0c1b138e1907ae78e2f6c63ff67501122006b0f9abad68fda5f4ffc6ab", size = 11661, upload-time = "2025-09-27T18:37:18.453Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/89/c3/2e67a7ca217c6912985ec766c6393b636fb0c2344443ff9d91404dc4c79f/markupsafe-3.0.3-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:1085e7fbddd3be5f89cc898938f42c0b3c711fdcb37d75221de2666af647c175", size = 12069, upload-time = "2025-09-27T18:37:19.332Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/f0/00/be561dce4e6ca66b15276e184ce4b8aec61fe83662cce2f7d72bd3249d28/markupsafe-3.0.3-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1b52b4fb9df4eb9ae465f8d0c228a00624de2334f216f178a995ccdcf82c4634", size = 25670, upload-time = "2025-09-27T18:37:20.245Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/50/09/c419f6f5a92e5fadde27efd190eca90f05e1261b10dbd8cbcb39cd8ea1dc/markupsafe-3.0.3-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:fed51ac40f757d41b7c48425901843666a6677e3e8eb0abcff09e4ba6e664f50", size = 23598, upload-time = "2025-09-27T18:37:21.177Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/22/44/a0681611106e0b2921b3033fc19bc53323e0b50bc70cffdd19f7d679bb66/markupsafe-3.0.3-cp314-cp314t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:f190daf01f13c72eac4efd5c430a8de82489d9cff23c364c3ea822545032993e", size = 23261, upload-time = "2025-09-27T18:37:22.167Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/5f/57/1b0b3f100259dc9fffe780cfb60d4be71375510e435efec3d116b6436d43/markupsafe-3.0.3-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:e56b7d45a839a697b5eb268c82a71bd8c7f6c94d6fd50c3d577fa39a9f1409f5", size = 24835, upload-time = "2025-09-27T18:37:23.296Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/26/6a/4bf6d0c97c4920f1597cc14dd720705eca0bf7c787aebc6bb4d1bead5388/markupsafe-3.0.3-cp314-cp314t-musllinux_1_2_riscv64.whl", hash = "sha256:f3e98bb3798ead92273dc0e5fd0f31ade220f59a266ffd8a4f6065e0a3ce0523", size = 22733, upload-time = "2025-09-27T18:37:24.237Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/14/c7/ca723101509b518797fedc2fdf79ba57f886b4aca8a7d31857ba3ee8281f/markupsafe-3.0.3-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:5678211cb9333a6468fb8d8be0305520aa073f50d17f089b5b4b477ea6e67fdc", size = 23672, upload-time = "2025-09-27T18:37:25.271Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/fb/df/5bd7a48c256faecd1d36edc13133e51397e41b73bb77e1a69deab746ebac/markupsafe-3.0.3-cp314-cp314t-win32.whl", hash = "sha256:915c04ba3851909ce68ccc2b8e2cd691618c4dc4c4232fb7982bca3f41fd8c3d", size = 14819, upload-time = "2025-09-27T18:37:26.285Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/1a/8a/0402ba61a2f16038b48b39bccca271134be00c5c9f0f623208399333c448/markupsafe-3.0.3-cp314-cp314t-win_amd64.whl", hash = "sha256:4faffd047e07c38848ce017e8725090413cd80cbc23d86e55c587bf979e579c9", size = 15426, upload-time = "2025-09-27T18:37:27.316Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/70/bc/6f1c2f612465f5fa89b95bead1f44dcb607670fd42891d8fdcd5d039f4f4/markupsafe-3.0.3-cp314-cp314t-win_arm64.whl", hash = "sha256:32001d6a8fc98c8cb5c947787c5d08b0a50663d139f1305bac5885d98d9b40fa", size = 14146, upload-time = "2025-09-27T18:37:28.327Z" }, +] + +[[package]] +name = "matplotlib-inline" +version = "0.2.1" +source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple/" } +dependencies = [ + { name = "traitlets" }, +] +sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/c7/74/97e72a36efd4ae2bccb3463284300f8953f199b5ffbc04cbbb0ec78f74b1/matplotlib_inline-0.2.1.tar.gz", hash = "sha256:e1ee949c340d771fc39e241ea75683deb94762c8fa5f2927ec57c83c4dffa9fe", size = 8110, upload-time = "2025-10-23T09:00:22.126Z" } +wheels = [ + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/af/33/ee4519fa02ed11a94aef9559552f3b17bb863f2ecfe1a35dc7f548cde231/matplotlib_inline-0.2.1-py3-none-any.whl", hash = "sha256:d56ce5156ba6085e00a9d54fead6ed29a9c47e215cd1bba2e976ef39f5710a76", size = 9516, upload-time = "2025-10-23T09:00:20.675Z" }, +] + +[[package]] +name = "mistune" +version = "3.2.0" +source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple/" } +sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/9d/55/d01f0c4b45ade6536c51170b9043db8b2ec6ddf4a35c7ea3f5f559ac935b/mistune-3.2.0.tar.gz", hash = "sha256:708487c8a8cdd99c9d90eb3ed4c3ed961246ff78ac82f03418f5183ab70e398a", size = 95467, upload-time = "2025-12-23T11:36:34.994Z" } +wheels = [ + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/9b/f7/4a5e785ec9fbd65146a27b6b70b6cdc161a66f2024e4b04ac06a67f5578b/mistune-3.2.0-py3-none-any.whl", hash = "sha256:febdc629a3c78616b94393c6580551e0e34cc289987ec6c35ed3f4be42d0eee1", size = 53598, upload-time = "2025-12-23T11:36:33.211Z" }, +] + +[[package]] +name = "nbclient" +version = "0.10.4" +source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple/" } +dependencies = [ + { name = "jupyter-client" }, + { name = "jupyter-core" }, + { name = "nbformat" }, + { name = "traitlets" }, +] +sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/56/91/1c1d5a4b9a9ebba2b4e32b8c852c2975c872aec1fe42ab5e516b2cecd193/nbclient-0.10.4.tar.gz", hash = "sha256:1e54091b16e6da39e297b0ece3e10f6f29f4ac4e8ee515d29f8a7099bd6553c9", size = 62554, upload-time = "2025-12-23T07:45:46.369Z" } +wheels = [ + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/83/a0/5b0c2f11142ed1dddec842457d3f65eaf71a0080894eb6f018755b319c3a/nbclient-0.10.4-py3-none-any.whl", hash = "sha256:9162df5a7373d70d606527300a95a975a47c137776cd942e52d9c7e29ff83440", size = 25465, upload-time = "2025-12-23T07:45:44.51Z" }, +] + +[[package]] +name = "nbconvert" +version = "7.17.0" +source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple/" } +dependencies = [ + { name = "beautifulsoup4" }, + { name = "bleach", extra = ["css"] }, + { name = "defusedxml" }, + { name = "jinja2" }, + { name = "jupyter-core" }, + { name = "jupyterlab-pygments" }, + { name = "markupsafe" }, + { name = "mistune" }, + { name = "nbclient" }, + { name = "nbformat" }, + { name = "packaging" }, + { name = "pandocfilters" }, + { name = "pygments" }, + { name = "traitlets" }, +] +sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/38/47/81f886b699450d0569f7bc551df2b1673d18df7ff25cc0c21ca36ed8a5ff/nbconvert-7.17.0.tar.gz", hash = "sha256:1b2696f1b5be12309f6c7d707c24af604b87dfaf6d950794c7b07acab96dda78", size = 862855, upload-time = "2026-01-29T16:37:48.478Z" } +wheels = [ + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/0d/4b/8d5f796a792f8a25f6925a96032f098789f448571eb92011df1ae59e8ea8/nbconvert-7.17.0-py3-none-any.whl", hash = "sha256:4f99a63b337b9a23504347afdab24a11faa7d86b405e5c8f9881cd313336d518", size = 261510, upload-time = "2026-01-29T16:37:46.322Z" }, +] + +[[package]] +name = "nbformat" +version = "5.10.4" +source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple/" } +dependencies = [ + { name = "fastjsonschema" }, + { name = "jsonschema" }, + { name = "jupyter-core" }, + { name = "traitlets" }, +] +sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/6d/fd/91545e604bc3dad7dca9ed03284086039b294c6b3d75c0d2fa45f9e9caf3/nbformat-5.10.4.tar.gz", hash = "sha256:322168b14f937a5d11362988ecac2a4952d3d8e3a2cbeb2319584631226d5b3a", size = 142749, upload-time = "2024-04-04T11:20:37.371Z" } +wheels = [ + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/a9/82/0340caa499416c78e5d8f5f05947ae4bc3cba53c9f038ab6e9ed964e22f1/nbformat-5.10.4-py3-none-any.whl", hash = "sha256:3b48d6c8fbca4b299bf3982ea7db1af21580e4fec269ad087b9e81588891200b", size = 78454, upload-time = "2024-04-04T11:20:34.895Z" }, +] + +[[package]] +name = "nest-asyncio" +version = "1.6.0" +source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple/" } +sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/83/f8/51569ac65d696c8ecbee95938f89d4abf00f47d58d48f6fbabfe8f0baefe/nest_asyncio-1.6.0.tar.gz", hash = "sha256:6f172d5449aca15afd6c646851f4e31e02c598d553a667e38cafa997cfec55fe", size = 7418, upload-time = "2024-01-21T14:25:19.227Z" } +wheels = [ + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/a0/c4/c2971a3ba4c6103a3d10c4b0f24f461ddc027f0f09763220cf35ca1401b3/nest_asyncio-1.6.0-py3-none-any.whl", hash = "sha256:87af6efd6b5e897c81050477ef65c62e2b2f35d51703cae01aff2905b1852e1c", size = 5195, upload-time = "2024-01-21T14:25:17.223Z" }, +] + +[[package]] +name = "notebook-shim" +version = "0.2.4" +source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple/" } +dependencies = [ + { name = "jupyter-server" }, +] +sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/54/d2/92fa3243712b9a3e8bafaf60aac366da1cada3639ca767ff4b5b3654ec28/notebook_shim-0.2.4.tar.gz", hash = "sha256:b4b2cfa1b65d98307ca24361f5b30fe785b53c3fd07b7a47e89acb5e6ac638cb", size = 13167, upload-time = "2024-02-14T23:35:18.353Z" } +wheels = [ + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/f9/33/bd5b9137445ea4b680023eb0469b2bb969d61303dedb2aac6560ff3d14a1/notebook_shim-0.2.4-py3-none-any.whl", hash = "sha256:411a5be4e9dc882a074ccbcae671eda64cceb068767e9a3419096986560e1cef", size = 13307, upload-time = "2024-02-14T23:35:16.286Z" }, +] + +[[package]] +name = "numpy" +version = "2.4.2" +source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple/" } +sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/57/fd/0005efbd0af48e55eb3c7208af93f2862d4b1a56cd78e84309a2d959208d/numpy-2.4.2.tar.gz", hash = "sha256:659a6107e31a83c4e33f763942275fd278b21d095094044eb35569e86a21ddae", size = 20723651, upload-time = "2026-01-31T23:13:10.135Z" } +wheels = [ + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/51/6e/6f394c9c77668153e14d4da83bcc247beb5952f6ead7699a1a2992613bea/numpy-2.4.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:21982668592194c609de53ba4933a7471880ccbaadcc52352694a59ecc860b3a", size = 16667963, upload-time = "2026-01-31T23:10:52.147Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/1f/f8/55483431f2b2fd015ae6ed4fe62288823ce908437ed49db5a03d15151678/numpy-2.4.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40397bda92382fcec844066efb11f13e1c9a3e2a8e8f318fb72ed8b6db9f60f1", size = 14693571, upload-time = "2026-01-31T23:10:54.789Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/2f/20/18026832b1845cdc82248208dd929ca14c9d8f2bac391f67440707fff27c/numpy-2.4.2-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:b3a24467af63c67829bfaa61eecf18d5432d4f11992688537be59ecd6ad32f5e", size = 5203469, upload-time = "2026-01-31T23:10:57.343Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/7d/33/2eb97c8a77daaba34eaa3fa7241a14ac5f51c46a6bd5911361b644c4a1e2/numpy-2.4.2-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:805cc8de9fd6e7a22da5aed858e0ab16be5a4db6c873dde1d7451c541553aa27", size = 6550820, upload-time = "2026-01-31T23:10:59.429Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/b1/91/b97fdfd12dc75b02c44e26c6638241cc004d4079a0321a69c62f51470c4c/numpy-2.4.2-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6d82351358ffbcdcd7b686b90742a9b86632d6c1c051016484fa0b326a0a1548", size = 15663067, upload-time = "2026-01-31T23:11:01.291Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/f5/c6/a18e59f3f0b8071cc85cbc8d80cd02d68aa9710170b2553a117203d46936/numpy-2.4.2-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9e35d3e0144137d9fdae62912e869136164534d64a169f86438bc9561b6ad49f", size = 16619782, upload-time = "2026-01-31T23:11:03.669Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/b7/83/9751502164601a79e18847309f5ceec0b1446d7b6aa12305759b72cf98b2/numpy-2.4.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:adb6ed2ad29b9e15321d167d152ee909ec73395901b70936f029c3bc6d7f4460", size = 17013128, upload-time = "2026-01-31T23:11:05.913Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/61/c4/c4066322256ec740acc1c8923a10047818691d2f8aec254798f3dd90f5f2/numpy-2.4.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:8906e71fd8afcb76580404e2a950caef2685df3d2a57fe82a86ac8d33cc007ba", size = 18345324, upload-time = "2026-01-31T23:11:08.248Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/ab/af/6157aa6da728fa4525a755bfad486ae7e3f76d4c1864138003eb84328497/numpy-2.4.2-cp312-cp312-win32.whl", hash = "sha256:ec055f6dae239a6299cace477b479cca2fc125c5675482daf1dd886933a1076f", size = 5960282, upload-time = "2026-01-31T23:11:10.497Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/92/0f/7ceaaeaacb40567071e94dbf2c9480c0ae453d5bb4f52bea3892c39dc83c/numpy-2.4.2-cp312-cp312-win_amd64.whl", hash = "sha256:209fae046e62d0ce6435fcfe3b1a10537e858249b3d9b05829e2a05218296a85", size = 12314210, upload-time = "2026-01-31T23:11:12.176Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/2f/a3/56c5c604fae6dd40fa2ed3040d005fca97e91bd320d232ac9931d77ba13c/numpy-2.4.2-cp312-cp312-win_arm64.whl", hash = "sha256:fbde1b0c6e81d56f5dccd95dd4a711d9b95df1ae4009a60887e56b27e8d903fa", size = 10220171, upload-time = "2026-01-31T23:11:14.684Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/a1/22/815b9fe25d1d7ae7d492152adbc7226d3eff731dffc38fe970589fcaaa38/numpy-2.4.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:25f2059807faea4b077a2b6837391b5d830864b3543627f381821c646f31a63c", size = 16663696, upload-time = "2026-01-31T23:11:17.516Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/09/f0/817d03a03f93ba9c6c8993de509277d84e69f9453601915e4a69554102a1/numpy-2.4.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:bd3a7a9f5847d2fb8c2c6d1c862fa109c31a9abeca1a3c2bd5a64572955b2979", size = 14688322, upload-time = "2026-01-31T23:11:19.883Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/da/b4/f805ab79293c728b9a99438775ce51885fd4f31b76178767cfc718701a39/numpy-2.4.2-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:8e4549f8a3c6d13d55041925e912bfd834285ef1dd64d6bc7d542583355e2e98", size = 5198157, upload-time = "2026-01-31T23:11:22.375Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/74/09/826e4289844eccdcd64aac27d13b0fd3f32039915dd5b9ba01baae1f436c/numpy-2.4.2-cp313-cp313-macosx_14_0_x86_64.whl", hash = "sha256:aea4f66ff44dfddf8c2cffd66ba6538c5ec67d389285292fe428cb2c738c8aef", size = 6546330, upload-time = "2026-01-31T23:11:23.958Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/19/fb/cbfdbfa3057a10aea5422c558ac57538e6acc87ec1669e666d32ac198da7/numpy-2.4.2-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c3cd545784805de05aafe1dde61752ea49a359ccba9760c1e5d1c88a93bbf2b7", size = 15660968, upload-time = "2026-01-31T23:11:25.713Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/04/dc/46066ce18d01645541f0186877377b9371b8fa8017fa8262002b4ef22612/numpy-2.4.2-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d0d9b7c93578baafcbc5f0b83eaf17b79d345c6f36917ba0c67f45226911d499", size = 16607311, upload-time = "2026-01-31T23:11:28.117Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/14/d9/4b5adfc39a43fa6bf918c6d544bc60c05236cc2f6339847fc5b35e6cb5b0/numpy-2.4.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:f74f0f7779cc7ae07d1810aab8ac6b1464c3eafb9e283a40da7309d5e6e48fbb", size = 17012850, upload-time = "2026-01-31T23:11:30.888Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/b7/20/adb6e6adde6d0130046e6fdfb7675cc62bc2f6b7b02239a09eb58435753d/numpy-2.4.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:c7ac672d699bf36275c035e16b65539931347d68b70667d28984c9fb34e07fa7", size = 18334210, upload-time = "2026-01-31T23:11:33.214Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/78/0e/0a73b3dff26803a8c02baa76398015ea2a5434d9b8265a7898a6028c1591/numpy-2.4.2-cp313-cp313-win32.whl", hash = "sha256:8e9afaeb0beff068b4d9cd20d322ba0ee1cecfb0b08db145e4ab4dd44a6b5110", size = 5958199, upload-time = "2026-01-31T23:11:35.385Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/43/bc/6352f343522fcb2c04dbaf94cb30cca6fd32c1a750c06ad6231b4293708c/numpy-2.4.2-cp313-cp313-win_amd64.whl", hash = "sha256:7df2de1e4fba69a51c06c28f5a3de36731eb9639feb8e1cf7e4a7b0daf4cf622", size = 12310848, upload-time = "2026-01-31T23:11:38.001Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/6e/8d/6da186483e308da5da1cc6918ce913dcfe14ffde98e710bfeff2a6158d4e/numpy-2.4.2-cp313-cp313-win_arm64.whl", hash = "sha256:0fece1d1f0a89c16b03442eae5c56dc0be0c7883b5d388e0c03f53019a4bfd71", size = 10221082, upload-time = "2026-01-31T23:11:40.392Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/25/a1/9510aa43555b44781968935c7548a8926274f815de42ad3997e9e83680dd/numpy-2.4.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:5633c0da313330fd20c484c78cdd3f9b175b55e1a766c4a174230c6b70ad8262", size = 14815866, upload-time = "2026-01-31T23:11:42.495Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/36/30/6bbb5e76631a5ae46e7923dd16ca9d3f1c93cfa8d4ed79a129814a9d8db3/numpy-2.4.2-cp313-cp313t-macosx_14_0_arm64.whl", hash = "sha256:d9f64d786b3b1dd742c946c42d15b07497ed14af1a1f3ce840cce27daa0ce913", size = 5325631, upload-time = "2026-01-31T23:11:44.7Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/46/00/3a490938800c1923b567b3a15cd17896e68052e2145d8662aaf3e1ffc58f/numpy-2.4.2-cp313-cp313t-macosx_14_0_x86_64.whl", hash = "sha256:b21041e8cb6a1eb5312dd1d2f80a94d91efffb7a06b70597d44f1bd2dfc315ab", size = 6646254, upload-time = "2026-01-31T23:11:46.341Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/d3/e9/fac0890149898a9b609caa5af7455a948b544746e4b8fe7c212c8edd71f8/numpy-2.4.2-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:00ab83c56211a1d7c07c25e3217ea6695e50a3e2f255053686b081dc0b091a82", size = 15720138, upload-time = "2026-01-31T23:11:48.082Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/ea/5c/08887c54e68e1e28df53709f1893ce92932cc6f01f7c3d4dc952f61ffd4e/numpy-2.4.2-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2fb882da679409066b4603579619341c6d6898fc83a8995199d5249f986e8e8f", size = 16655398, upload-time = "2026-01-31T23:11:50.293Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/4d/89/253db0fa0e66e9129c745e4ef25631dc37d5f1314dad2b53e907b8538e6d/numpy-2.4.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:66cb9422236317f9d44b67b4d18f44efe6e9c7f8794ac0462978513359461554", size = 17079064, upload-time = "2026-01-31T23:11:52.927Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/2a/d5/cbade46ce97c59c6c3da525e8d95b7abe8a42974a1dc5c1d489c10433e88/numpy-2.4.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:0f01dcf33e73d80bd8dc0f20a71303abbafa26a19e23f6b68d1aa9990af90257", size = 18379680, upload-time = "2026-01-31T23:11:55.22Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/40/62/48f99ae172a4b63d981babe683685030e8a3df4f246c893ea5c6ef99f018/numpy-2.4.2-cp313-cp313t-win32.whl", hash = "sha256:52b913ec40ff7ae845687b0b34d8d93b60cb66dcee06996dd5c99f2fc9328657", size = 6082433, upload-time = "2026-01-31T23:11:58.096Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/07/38/e054a61cfe48ad9f1ed0d188e78b7e26859d0b60ef21cd9de4897cdb5326/numpy-2.4.2-cp313-cp313t-win_amd64.whl", hash = "sha256:5eea80d908b2c1f91486eb95b3fb6fab187e569ec9752ab7d9333d2e66bf2d6b", size = 12451181, upload-time = "2026-01-31T23:11:59.782Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/6e/a4/a05c3a6418575e185dd84d0b9680b6bb2e2dc3e4202f036b7b4e22d6e9dc/numpy-2.4.2-cp313-cp313t-win_arm64.whl", hash = "sha256:fd49860271d52127d61197bb50b64f58454e9f578cb4b2c001a6de8b1f50b0b1", size = 10290756, upload-time = "2026-01-31T23:12:02.438Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/18/88/b7df6050bf18fdcfb7046286c6535cabbdd2064a3440fca3f069d319c16e/numpy-2.4.2-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:444be170853f1f9d528428eceb55f12918e4fda5d8805480f36a002f1415e09b", size = 16663092, upload-time = "2026-01-31T23:12:04.521Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/25/7a/1fee4329abc705a469a4afe6e69b1ef7e915117747886327104a8493a955/numpy-2.4.2-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:d1240d50adff70c2a88217698ca844723068533f3f5c5fa6ee2e3220e3bdb000", size = 14698770, upload-time = "2026-01-31T23:12:06.96Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/fb/0b/f9e49ba6c923678ad5bc38181c08ac5e53b7a5754dbca8e581aa1a56b1ff/numpy-2.4.2-cp314-cp314-macosx_14_0_arm64.whl", hash = "sha256:7cdde6de52fb6664b00b056341265441192d1291c130e99183ec0d4b110ff8b1", size = 5208562, upload-time = "2026-01-31T23:12:09.632Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/7d/12/d7de8f6f53f9bb76997e5e4c069eda2051e3fe134e9181671c4391677bb2/numpy-2.4.2-cp314-cp314-macosx_14_0_x86_64.whl", hash = "sha256:cda077c2e5b780200b6b3e09d0b42205a3d1c68f30c6dceb90401c13bff8fe74", size = 6543710, upload-time = "2026-01-31T23:12:11.969Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/09/63/c66418c2e0268a31a4cf8a8b512685748200f8e8e8ec6c507ce14e773529/numpy-2.4.2-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d30291931c915b2ab5717c2974bb95ee891a1cf22ebc16a8006bd59cd210d40a", size = 15677205, upload-time = "2026-01-31T23:12:14.33Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/5d/6c/7f237821c9642fb2a04d2f1e88b4295677144ca93285fd76eff3bcba858d/numpy-2.4.2-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:bba37bc29d4d85761deed3954a1bc62be7cf462b9510b51d367b769a8c8df325", size = 16611738, upload-time = "2026-01-31T23:12:16.525Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/c2/a7/39c4cdda9f019b609b5c473899d87abff092fc908cfe4d1ecb2fcff453b0/numpy-2.4.2-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:b2f0073ed0868db1dcd86e052d37279eef185b9c8db5bf61f30f46adac63c909", size = 17028888, upload-time = "2026-01-31T23:12:19.306Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/da/b3/e84bb64bdfea967cc10950d71090ec2d84b49bc691df0025dddb7c26e8e3/numpy-2.4.2-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:7f54844851cdb630ceb623dcec4db3240d1ac13d4990532446761baede94996a", size = 18339556, upload-time = "2026-01-31T23:12:21.816Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/88/f5/954a291bc1192a27081706862ac62bb5920fbecfbaa302f64682aa90beed/numpy-2.4.2-cp314-cp314-win32.whl", hash = "sha256:12e26134a0331d8dbd9351620f037ec470b7c75929cb8a1537f6bfe411152a1a", size = 6006899, upload-time = "2026-01-31T23:12:24.14Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/05/cb/eff72a91b2efdd1bc98b3b8759f6a1654aa87612fc86e3d87d6fe4f948c4/numpy-2.4.2-cp314-cp314-win_amd64.whl", hash = "sha256:068cdb2d0d644cdb45670810894f6a0600797a69c05f1ac478e8d31670b8ee75", size = 12443072, upload-time = "2026-01-31T23:12:26.33Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/37/75/62726948db36a56428fce4ba80a115716dc4fad6a3a4352487f8bb950966/numpy-2.4.2-cp314-cp314-win_arm64.whl", hash = "sha256:6ed0be1ee58eef41231a5c943d7d1375f093142702d5723ca2eb07db9b934b05", size = 10494886, upload-time = "2026-01-31T23:12:28.488Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/36/2f/ee93744f1e0661dc267e4b21940870cabfae187c092e1433b77b09b50ac4/numpy-2.4.2-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:98f16a80e917003a12c0580f97b5f875853ebc33e2eaa4bccfc8201ac6869308", size = 14818567, upload-time = "2026-01-31T23:12:30.709Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/a7/24/6535212add7d76ff938d8bdc654f53f88d35cddedf807a599e180dcb8e66/numpy-2.4.2-cp314-cp314t-macosx_14_0_arm64.whl", hash = "sha256:20abd069b9cda45874498b245c8015b18ace6de8546bf50dfa8cea1696ed06ef", size = 5328372, upload-time = "2026-01-31T23:12:32.962Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/5e/9d/c48f0a035725f925634bf6b8994253b43f2047f6778a54147d7e213bc5a7/numpy-2.4.2-cp314-cp314t-macosx_14_0_x86_64.whl", hash = "sha256:e98c97502435b53741540a5717a6749ac2ada901056c7db951d33e11c885cc7d", size = 6649306, upload-time = "2026-01-31T23:12:34.797Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/81/05/7c73a9574cd4a53a25907bad38b59ac83919c0ddc8234ec157f344d57d9a/numpy-2.4.2-cp314-cp314t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:da6cad4e82cb893db4b69105c604d805e0c3ce11501a55b5e9f9083b47d2ffe8", size = 15722394, upload-time = "2026-01-31T23:12:36.565Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/35/fa/4de10089f21fc7d18442c4a767ab156b25c2a6eaf187c0db6d9ecdaeb43f/numpy-2.4.2-cp314-cp314t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9e4424677ce4b47fe73c8b5556d876571f7c6945d264201180db2dc34f676ab5", size = 16653343, upload-time = "2026-01-31T23:12:39.188Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/b8/f9/d33e4ffc857f3763a57aa85650f2e82486832d7492280ac21ba9efda80da/numpy-2.4.2-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:2b8f157c8a6f20eb657e240f8985cc135598b2b46985c5bccbde7616dc9c6b1e", size = 17078045, upload-time = "2026-01-31T23:12:42.041Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/c8/b8/54bdb43b6225badbea6389fa038c4ef868c44f5890f95dd530a218706da3/numpy-2.4.2-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:5daf6f3914a733336dab21a05cdec343144600e964d2fcdabaac0c0269874b2a", size = 18380024, upload-time = "2026-01-31T23:12:44.331Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/a5/55/6e1a61ded7af8df04016d81b5b02daa59f2ea9252ee0397cb9f631efe9e5/numpy-2.4.2-cp314-cp314t-win32.whl", hash = "sha256:8c50dd1fc8826f5b26a5ee4d77ca55d88a895f4e4819c7ecc2a9f5905047a443", size = 6153937, upload-time = "2026-01-31T23:12:47.229Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/45/aa/fa6118d1ed6d776b0983f3ceac9b1a5558e80df9365b1c3aa6d42bf9eee4/numpy-2.4.2-cp314-cp314t-win_amd64.whl", hash = "sha256:fcf92bee92742edd401ba41135185866f7026c502617f422eb432cfeca4fe236", size = 12631844, upload-time = "2026-01-31T23:12:48.997Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/32/0a/2ec5deea6dcd158f254a7b372fb09cfba5719419c8d66343bab35237b3fb/numpy-2.4.2-cp314-cp314t-win_arm64.whl", hash = "sha256:1f92f53998a17265194018d1cc321b2e96e900ca52d54c7c77837b71b9465181", size = 10565379, upload-time = "2026-01-31T23:12:51.345Z" }, +] + +[[package]] +name = "packaging" +version = "26.0" +source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple/" } +sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/65/ee/299d360cdc32edc7d2cf530f3accf79c4fca01e96ffc950d8a52213bd8e4/packaging-26.0.tar.gz", hash = "sha256:00243ae351a257117b6a241061796684b084ed1c516a08c48a3f7e147a9d80b4", size = 143416, upload-time = "2026-01-21T20:50:39.064Z" } +wheels = [ + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/b7/b9/c538f279a4e237a006a2c98387d081e9eb060d203d8ed34467cc0f0b9b53/packaging-26.0-py3-none-any.whl", hash = "sha256:b36f1fef9334a5588b4166f8bcd26a14e521f2b55e6b9de3aaa80d3ff7a37529", size = 74366, upload-time = "2026-01-21T20:50:37.788Z" }, +] + +[[package]] +name = "pandocfilters" +version = "1.5.1" +source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple/" } +sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/70/6f/3dd4940bbe001c06a65f88e36bad298bc7a0de5036115639926b0c5c0458/pandocfilters-1.5.1.tar.gz", hash = "sha256:002b4a555ee4ebc03f8b66307e287fa492e4a77b4ea14d3f934328297bb4939e", size = 8454, upload-time = "2024-01-18T20:08:13.726Z" } +wheels = [ + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/ef/af/4fbc8cab944db5d21b7e2a5b8e9211a03a79852b1157e2c102fcc61ac440/pandocfilters-1.5.1-py2.py3-none-any.whl", hash = "sha256:93be382804a9cdb0a7267585f157e5d1731bbe5545a85b268d6f5fe6232de2bc", size = 8663, upload-time = "2024-01-18T20:08:11.28Z" }, +] + +[[package]] +name = "parso" +version = "0.8.5" +source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple/" } +sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/d4/de/53e0bcf53d13e005bd8c92e7855142494f41171b34c2536b86187474184d/parso-0.8.5.tar.gz", hash = "sha256:034d7354a9a018bdce352f48b2a8a450f05e9d6ee85db84764e9b6bd96dafe5a", size = 401205, upload-time = "2025-08-23T15:15:28.028Z" } +wheels = [ + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/16/32/f8e3c85d1d5250232a5d3477a2a28cc291968ff175caeadaf3cc19ce0e4a/parso-0.8.5-py2.py3-none-any.whl", hash = "sha256:646204b5ee239c396d040b90f9e272e9a8017c630092bf59980beb62fd033887", size = 106668, upload-time = "2025-08-23T15:15:25.663Z" }, +] + +[[package]] +name = "pexpect" +version = "4.9.0" +source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple/" } +dependencies = [ + { name = "ptyprocess" }, +] +sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/42/92/cc564bf6381ff43ce1f4d06852fc19a2f11d180f23dc32d9588bee2f149d/pexpect-4.9.0.tar.gz", hash = "sha256:ee7d41123f3c9911050ea2c2dac107568dc43b2d3b0c7557a33212c398ead30f" } +wheels = [ + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/9e/c3/059298687310d527a58bb01f3b1965787ee3b40dce76752eda8b44e9a2c5/pexpect-4.9.0-py2.py3-none-any.whl", hash = "sha256:7236d1e080e4936be2dc3e326cec0af72acf9212a7e1d060210e70a47e253523" }, +] + +[[package]] +name = "platformdirs" +version = "4.5.1" +source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple/" } +sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/cf/86/0248f086a84f01b37aaec0fa567b397df1a119f73c16f6c7a9aac73ea309/platformdirs-4.5.1.tar.gz", hash = "sha256:61d5cdcc6065745cdd94f0f878977f8de9437be93de97c1c12f853c9c0cdcbda", size = 21715, upload-time = "2025-12-05T13:52:58.638Z" } +wheels = [ + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/cb/28/3bfe2fa5a7b9c46fe7e13c97bda14c895fb10fa2ebf1d0abb90e0cea7ee1/platformdirs-4.5.1-py3-none-any.whl", hash = "sha256:d03afa3963c806a9bed9d5125c8f4cb2fdaf74a55ab60e5d59b3fde758104d31", size = 18731, upload-time = "2025-12-05T13:52:56.823Z" }, +] + +[[package]] +name = "prometheus-client" +version = "0.24.1" +source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple/" } +sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/f0/58/a794d23feb6b00fc0c72787d7e87d872a6730dd9ed7c7b3e954637d8f280/prometheus_client-0.24.1.tar.gz", hash = "sha256:7e0ced7fbbd40f7b84962d5d2ab6f17ef88a72504dcf7c0b40737b43b2a461f9", size = 85616, upload-time = "2026-01-14T15:26:26.965Z" } +wheels = [ + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/74/c3/24a2f845e3917201628ecaba4f18bab4d18a337834c1df2a159ee9d22a42/prometheus_client-0.24.1-py3-none-any.whl", hash = "sha256:150db128af71a5c2482b36e588fc8a6b95e498750da4b17065947c16070f4055", size = 64057, upload-time = "2026-01-14T15:26:24.42Z" }, +] + +[[package]] +name = "prompt-toolkit" +version = "3.0.52" +source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple/" } +dependencies = [ + { name = "wcwidth" }, +] +sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/a1/96/06e01a7b38dce6fe1db213e061a4602dd6032a8a97ef6c1a862537732421/prompt_toolkit-3.0.52.tar.gz", hash = "sha256:28cde192929c8e7321de85de1ddbe736f1375148b02f2e17edd840042b1be855", size = 434198, upload-time = "2025-08-27T15:24:02.057Z" } +wheels = [ + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/84/03/0d3ce49e2505ae70cf43bc5bb3033955d2fc9f932163e84dc0779cc47f48/prompt_toolkit-3.0.52-py3-none-any.whl", hash = "sha256:9aac639a3bbd33284347de5ad8d68ecc044b91a762dc39b7c21095fcd6a19955", size = 391431, upload-time = "2025-08-27T15:23:59.498Z" }, +] + +[[package]] +name = "psutil" +version = "7.2.2" +source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple/" } +sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/aa/c6/d1ddf4abb55e93cebc4f2ed8b5d6dbad109ecb8d63748dd2b20ab5e57ebe/psutil-7.2.2.tar.gz", hash = "sha256:0746f5f8d406af344fd547f1c8daa5f5c33dbc293bb8d6a16d80b4bb88f59372", size = 493740, upload-time = "2026-01-28T18:14:54.428Z" } +wheels = [ + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/51/08/510cbdb69c25a96f4ae523f733cdc963ae654904e8db864c07585ef99875/psutil-7.2.2-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:2edccc433cbfa046b980b0df0171cd25bcaeb3a68fe9022db0979e7aa74a826b", size = 130595, upload-time = "2026-01-28T18:14:57.293Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/d6/f5/97baea3fe7a5a9af7436301f85490905379b1c6f2dd51fe3ecf24b4c5fbf/psutil-7.2.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:e78c8603dcd9a04c7364f1a3e670cea95d51ee865e4efb3556a3a63adef958ea", size = 131082, upload-time = "2026-01-28T18:14:59.732Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/37/d6/246513fbf9fa174af531f28412297dd05241d97a75911ac8febefa1a53c6/psutil-7.2.2-cp313-cp313t-manylinux2010_x86_64.manylinux_2_12_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1a571f2330c966c62aeda00dd24620425d4b0cc86881c89861fbc04549e5dc63", size = 181476, upload-time = "2026-01-28T18:15:01.884Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/b8/b5/9182c9af3836cca61696dabe4fd1304e17bc56cb62f17439e1154f225dd3/psutil-7.2.2-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:917e891983ca3c1887b4ef36447b1e0873e70c933afc831c6b6da078ba474312", size = 184062, upload-time = "2026-01-28T18:15:04.436Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/16/ba/0756dca669f5a9300d0cbcbfae9a4c30e446dfc7440ffe43ded5724bfd93/psutil-7.2.2-cp313-cp313t-win_amd64.whl", hash = "sha256:ab486563df44c17f5173621c7b198955bd6b613fb87c71c161f827d3fb149a9b", size = 139893, upload-time = "2026-01-28T18:15:06.378Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/1c/61/8fa0e26f33623b49949346de05ec1ddaad02ed8ba64af45f40a147dbfa97/psutil-7.2.2-cp313-cp313t-win_arm64.whl", hash = "sha256:ae0aefdd8796a7737eccea863f80f81e468a1e4cf14d926bd9b6f5f2d5f90ca9", size = 135589, upload-time = "2026-01-28T18:15:08.03Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/81/69/ef179ab5ca24f32acc1dac0c247fd6a13b501fd5534dbae0e05a1c48b66d/psutil-7.2.2-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:eed63d3b4d62449571547b60578c5b2c4bcccc5387148db46e0c2313dad0ee00", size = 130664, upload-time = "2026-01-28T18:15:09.469Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/7b/64/665248b557a236d3fa9efc378d60d95ef56dd0a490c2cd37dafc7660d4a9/psutil-7.2.2-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:7b6d09433a10592ce39b13d7be5a54fbac1d1228ed29abc880fb23df7cb694c9", size = 131087, upload-time = "2026-01-28T18:15:11.724Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/d5/2e/e6782744700d6759ebce3043dcfa661fb61e2fb752b91cdeae9af12c2178/psutil-7.2.2-cp314-cp314t-manylinux2010_x86_64.manylinux_2_12_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1fa4ecf83bcdf6e6c8f4449aff98eefb5d0604bf88cb883d7da3d8d2d909546a", size = 182383, upload-time = "2026-01-28T18:15:13.445Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/57/49/0a41cefd10cb7505cdc04dab3eacf24c0c2cb158a998b8c7b1d27ee2c1f5/psutil-7.2.2-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e452c464a02e7dc7822a05d25db4cde564444a67e58539a00f929c51eddda0cf", size = 185210, upload-time = "2026-01-28T18:15:16.002Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/dd/2c/ff9bfb544f283ba5f83ba725a3c5fec6d6b10b8f27ac1dc641c473dc390d/psutil-7.2.2-cp314-cp314t-win_amd64.whl", hash = "sha256:c7663d4e37f13e884d13994247449e9f8f574bc4655d509c3b95e9ec9e2b9dc1", size = 141228, upload-time = "2026-01-28T18:15:18.385Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/f2/fc/f8d9c31db14fcec13748d373e668bc3bed94d9077dbc17fb0eebc073233c/psutil-7.2.2-cp314-cp314t-win_arm64.whl", hash = "sha256:11fe5a4f613759764e79c65cf11ebdf26e33d6dd34336f8a337aa2996d71c841", size = 136284, upload-time = "2026-01-28T18:15:19.912Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/e7/36/5ee6e05c9bd427237b11b3937ad82bb8ad2752d72c6969314590dd0c2f6e/psutil-7.2.2-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:ed0cace939114f62738d808fdcecd4c869222507e266e574799e9c0faa17d486", size = 129090, upload-time = "2026-01-28T18:15:22.168Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/80/c4/f5af4c1ca8c1eeb2e92ccca14ce8effdeec651d5ab6053c589b074eda6e1/psutil-7.2.2-cp36-abi3-macosx_11_0_arm64.whl", hash = "sha256:1a7b04c10f32cc88ab39cbf606e117fd74721c831c98a27dc04578deb0c16979", size = 129859, upload-time = "2026-01-28T18:15:23.795Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/b5/70/5d8df3b09e25bce090399cf48e452d25c935ab72dad19406c77f4e828045/psutil-7.2.2-cp36-abi3-manylinux2010_x86_64.manylinux_2_12_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:076a2d2f923fd4821644f5ba89f059523da90dc9014e85f8e45a5774ca5bc6f9", size = 155560, upload-time = "2026-01-28T18:15:25.976Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/63/65/37648c0c158dc222aba51c089eb3bdfa238e621674dc42d48706e639204f/psutil-7.2.2-cp36-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b0726cecd84f9474419d67252add4ac0cd9811b04d61123054b9fb6f57df6e9e", size = 156997, upload-time = "2026-01-28T18:15:27.794Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/8e/13/125093eadae863ce03c6ffdbae9929430d116a246ef69866dad94da3bfbc/psutil-7.2.2-cp36-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:fd04ef36b4a6d599bbdb225dd1d3f51e00105f6d48a28f006da7f9822f2606d8", size = 148972, upload-time = "2026-01-28T18:15:29.342Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/04/78/0acd37ca84ce3ddffaa92ef0f571e073faa6d8ff1f0559ab1272188ea2be/psutil-7.2.2-cp36-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:b58fabe35e80b264a4e3bb23e6b96f9e45a3df7fb7eed419ac0e5947c61e47cc", size = 148266, upload-time = "2026-01-28T18:15:31.597Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/b4/90/e2159492b5426be0c1fef7acba807a03511f97c5f86b3caeda6ad92351a7/psutil-7.2.2-cp37-abi3-win_amd64.whl", hash = "sha256:eb7e81434c8d223ec4a219b5fc1c47d0417b12be7ea866e24fb5ad6e84b3d988", size = 137737, upload-time = "2026-01-28T18:15:33.849Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/8c/c7/7bb2e321574b10df20cbde462a94e2b71d05f9bbda251ef27d104668306a/psutil-7.2.2-cp37-abi3-win_arm64.whl", hash = "sha256:8c233660f575a5a89e6d4cb65d9f938126312bca76d8fe087b947b3a1aaac9ee", size = 134617, upload-time = "2026-01-28T18:15:36.514Z" }, +] + +[[package]] +name = "ptyprocess" +version = "0.7.0" +source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple/" } +sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/20/e5/16ff212c1e452235a90aeb09066144d0c5a6a8c0834397e03f5224495c4e/ptyprocess-0.7.0.tar.gz", hash = "sha256:5c5d0a3b48ceee0b48485e0c26037c0acd7d29765ca3fbb5cb3831d347423220", size = 70762, upload-time = "2020-12-28T15:15:30.155Z" } +wheels = [ + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/22/a6/858897256d0deac81a172289110f31629fc4cee19b6f01283303e18c8db3/ptyprocess-0.7.0-py2.py3-none-any.whl", hash = "sha256:4b41f3967fce3af57cc7e94b888626c18bf37a083e3651ca8feeb66d492fef35", size = 13993, upload-time = "2020-12-28T15:15:28.35Z" }, +] + +[[package]] +name = "pure-eval" +version = "0.2.3" +source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple/" } +sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/cd/05/0a34433a064256a578f1783a10da6df098ceaa4a57bbeaa96a6c0352786b/pure_eval-0.2.3.tar.gz", hash = "sha256:5f4e983f40564c576c7c8635ae88db5956bb2229d7e9237d03b3c0b0190eaf42", size = 19752, upload-time = "2024-07-21T12:58:21.801Z" } +wheels = [ + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/8e/37/efad0257dc6e593a18957422533ff0f87ede7c9c6ea010a2177d738fb82f/pure_eval-0.2.3-py3-none-any.whl", hash = "sha256:1db8e35b67b3d218d818ae653e27f06c3aa420901fa7b081ca98cbedc874e0d0", size = 11842, upload-time = "2024-07-21T12:58:20.04Z" }, +] + +[[package]] +name = "pycparser" +version = "3.0" +source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple/" } +sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/1b/7d/92392ff7815c21062bea51aa7b87d45576f649f16458d78b7cf94b9ab2e6/pycparser-3.0.tar.gz", hash = "sha256:600f49d217304a5902ac3c37e1281c9fe94e4d0489de643a9504c5cdfdfc6b29", size = 103492, upload-time = "2026-01-21T14:26:51.89Z" } +wheels = [ + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/0c/c3/44f3fbbfa403ea2a7c779186dc20772604442dde72947e7d01069cbe98e3/pycparser-3.0-py3-none-any.whl", hash = "sha256:b727414169a36b7d524c1c3e31839a521725078d7b2ff038656844266160a992", size = 48172, upload-time = "2026-01-21T14:26:50.693Z" }, +] + +[[package]] +name = "pygments" +version = "2.19.2" +source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple/" } +sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/b0/77/a5b8c569bf593b0140bde72ea885a803b82086995367bf2037de0159d924/pygments-2.19.2.tar.gz", hash = "sha256:636cb2477cec7f8952536970bc533bc43743542f70392ae026374600add5b887", size = 4968631, upload-time = "2025-06-21T13:39:12.283Z" } +wheels = [ + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/c7/21/705964c7812476f378728bdf590ca4b771ec72385c533964653c68e86bdc/pygments-2.19.2-py3-none-any.whl", hash = "sha256:86540386c03d588bb81d44bc3928634ff26449851e99741617ecb9037ee5ec0b", size = 1225217, upload-time = "2025-06-21T13:39:07.939Z" }, +] + +[[package]] +name = "pyopengl" +version = "3.1.10" +source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple/" } +sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/6f/16/912b7225d56284859cd9a672827f18be43f8012f8b7b932bc4bd959a298e/pyopengl-3.1.10.tar.gz", hash = "sha256:c4a02d6866b54eb119c8e9b3fb04fa835a95ab802dd96607ab4cdb0012df8335", size = 1915580, upload-time = "2025-08-18T02:33:01.76Z" } +wheels = [ + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/de/e4/1ba6f44e491c4eece978685230dde56b14d51a0365bc1b774ddaa94d14cd/pyopengl-3.1.10-py3-none-any.whl", hash = "sha256:794a943daced39300879e4e47bd94525280685f42dbb5a998d336cfff151d74f", size = 3194996, upload-time = "2025-08-18T02:32:59.902Z" }, +] + +[[package]] +name = "pyopengl-accelerate" +version = "3.1.10" +source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple/" } +sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/6d/49/dffa946f07e84eb4666d7fb5553a1d569d54405c863ff7a19b71758712f6/pyopengl_accelerate-3.1.10.tar.gz", hash = "sha256:82751c83f0a6f732b8b5923990edc2441d38176a98756b1718e8d6c4379f5a71", size = 21930, upload-time = "2025-08-18T02:34:06.752Z" } +wheels = [ + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/4c/6b/ba5b6aeff08ef764f9625f092fef27aab07279fbffe0ec50c60359020b6e/pyopengl_accelerate-3.1.10-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a0adc05a0585f98128dcfea80bca5fd016336a6f571a372721dc8e0406b12c2e", size = 424760, upload-time = "2025-08-18T02:33:24.431Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/a4/3d/8a6dde7092eeba0ddb0d75359e2cc1c7b61947b6f73da0bb73ac7ef9e997/pyopengl_accelerate-3.1.10-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:3282e7edd22d7126a3874c08ebcb146122031d0cea774c52f872ae56aaf17f39", size = 3279849, upload-time = "2025-08-18T02:33:25.628Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/50/4f/b4de3294248fcf0739356cc3730fb651b2106ddbe05fc528307df4331955/pyopengl_accelerate-3.1.10-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:7366b6be51aca799d6b4bfbe6a5e9e6e4b0968956d0920676498e6ac2b9ce028", size = 3140923, upload-time = "2025-08-18T02:33:27.106Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/2a/03/913312996a6345fec9436dc57069ac8c8bc31e306d3fba9e4825fc04ece2/pyopengl_accelerate-3.1.10-cp312-cp312-win32.whl", hash = "sha256:f014f2f5a0d68c751ad31c89805c043a08c423a8d6d5e7be620f6d819c6fa971", size = 328551, upload-time = "2025-08-18T02:33:28.619Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/fe/1f/6e839b7ba4823c34f762d3f3e9e8f8405bd3e210314b60df862e030ad8ad/pyopengl_accelerate-3.1.10-cp312-cp312-win_amd64.whl", hash = "sha256:6fa0963d686462dc3a03d10123af385f012001c8b020e908774df29741136bd5", size = 389822, upload-time = "2025-08-18T02:33:29.685Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/71/52/54b93275b76ea2beed7ffbf7e2e2392254c6f5927c438a2db9b972aec255/pyopengl_accelerate-3.1.10-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:bec9014f702997578b5e797d81b3e041cc5b41f0457c0184473f867c38d52665", size = 413558, upload-time = "2025-08-18T02:33:30.846Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/32/08/a491f4c74e53d22189e1aa9049b51ec44ed993e596d41c85d1732cd33ea2/pyopengl_accelerate-3.1.10-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:171e22f7d5055479b0bd7f98192525f8890af4bf19765bea7e860885cbce622e", size = 3139831, upload-time = "2025-08-18T02:33:32.415Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/d6/56/ffdf6adff65022d4df844100d6510c26def358b012c25766983f402b35ff/pyopengl_accelerate-3.1.10-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:5eea15130bc248ca83dce31736afb190c21a0186aa02cefdf4019bc0f3f857f1", size = 3000578, upload-time = "2025-08-18T02:33:33.69Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/6b/a1/6c7a6da865e07665f196ce3a443ed5d6ebd297f459bfa61288a577abaef9/pyopengl_accelerate-3.1.10-cp313-cp313-win32.whl", hash = "sha256:2354d2a2eb1266c984f061148460b2cb02146e90bd501b984d03bcc34f48b8d8", size = 325101, upload-time = "2025-08-18T02:33:34.963Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/7c/d4/93cd104a967dca51cf8ee2edf18bea2b697e9d87ece0beaa6f3ca5f5515d/pyopengl_accelerate-3.1.10-cp313-cp313-win_amd64.whl", hash = "sha256:551a46e907ea24884b6795b290b3b3b0618d217d38cf7cae9c825641375a74fe", size = 385238, upload-time = "2025-08-18T02:33:36.19Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/ae/8d/f86cb315b34fdc670c747b5041bec468258bbda55f9809f004dcbf3ff0fe/pyopengl_accelerate-3.1.10-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:415b6ddceaa6a3fed699f1e7a6aac0de6f37e5ae19349c4ed3a7c38e17a8e09e", size = 416781, upload-time = "2025-08-18T02:33:38.021Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/5e/14/215c66da71a065734217ea53d884c5c008a35757d5d17f667799cdee8fbc/pyopengl_accelerate-3.1.10-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d6b8aabfb227ad579f3dfabe8f0e9909026d1af540baf6405b38efa4f10fb9eb", size = 3090132, upload-time = "2025-08-18T02:33:39.517Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/20/7e/3a4fa502e6e1531af3a89b736a001886f2c2a88f83fc44b853ef177beb89/pyopengl_accelerate-3.1.10-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:3d521cd41a4e62c38ec315770049dfc5c4b25e5c1debd4038cfbf75e05022f7c", size = 2963250, upload-time = "2025-08-18T02:33:41.211Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/29/64/2216644cd1d58771634ec7decbfe973f3eb6ac545224181efaaf30b15197/pyopengl_accelerate-3.1.10-cp314-cp314-win32.whl", hash = "sha256:fce5e6015ba2db99f0ebb07e4cd98ca4e70b419f186ea042cb70379228c1a339", size = 334199, upload-time = "2025-08-18T02:33:42.519Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/04/f0/da152261f49e248b1530460367ee35fcc78c0878685fc07b39a6f53223d4/pyopengl_accelerate-3.1.10-cp314-cp314-win_amd64.whl", hash = "sha256:081b6b0f39f35f750ddf60fc865556e6d01481eb818ce1482f79cfde2abe598b", size = 394806, upload-time = "2025-08-18T02:33:43.638Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/b0/25/7516f1fdeba4d13da3941b198a368cf72d6d17f9bcc48d76476f75973866/pyopengl_accelerate-3.1.10-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:a35503ee877b35264172903eb6c8110b021531090a4647a81252f52a68757d4d", size = 462905, upload-time = "2025-08-18T02:33:45.15Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/9b/72/d8bcd7a2264098dfe36ac15dfded7a8a513d56e4e6adfc9e651fd17cccb1/pyopengl_accelerate-3.1.10-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:db2005912defa77a1e7390024d52b295c85a1c7ebd85ba073f1839d4bfffe855", size = 3379961, upload-time = "2025-08-18T02:33:46.602Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/17/a7/e74bf3a52d7e97f92e8a3cdd7edba7f50c6053536f54a7124772fcbbcf62/pyopengl_accelerate-3.1.10-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:fcf6827b1fff244ca6953068c17285b9eae173a20fccc1d9c193779f71b88d0f", size = 3254293, upload-time = "2025-08-18T02:33:48.105Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/fa/e4/c840324bab4efed955f258422a88a621e7462cafd627b3d2c9d314d4279d/pyopengl_accelerate-3.1.10-cp314-cp314t-win32.whl", hash = "sha256:e55ea6a85894ad74d6f09953e3d359fe27def9fed220588787993e49e42db1d0", size = 408410, upload-time = "2025-08-18T02:33:49.441Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/87/97/6312caecc721cf4e4fdac2a156dc57e403e3d9a17553d72e86653cbc92ef/pyopengl_accelerate-3.1.10-cp314-cp314t-win_amd64.whl", hash = "sha256:a2866cb65c45b013c2bf9995010824cc1b50ae91a4166746beb9ce241803e62a", size = 484776, upload-time = "2025-08-18T02:33:51.342Z" }, +] + +[[package]] +name = "python-dateutil" +version = "2.9.0.post0" +source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple/" } +dependencies = [ + { name = "six" }, +] +sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/66/c0/0c8b6ad9f17a802ee498c46e004a0eb49bc148f2fd230864601a86dcf6db/python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3", size = 342432, upload-time = "2024-03-01T18:36:20.211Z" } +wheels = [ + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/ec/57/56b9bcc3c9c6a792fcbaf139543cee77261f3651ca9da0c93f5c1221264b/python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427", size = 229892, upload-time = "2024-03-01T18:36:18.57Z" }, +] + +[[package]] +name = "python-json-logger" +version = "4.0.0" +source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple/" } +sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/29/bf/eca6a3d43db1dae7070f70e160ab20b807627ba953663ba07928cdd3dc58/python_json_logger-4.0.0.tar.gz", hash = "sha256:f58e68eb46e1faed27e0f574a55a0455eecd7b8a5b88b85a784519ba3cff047f", size = 17683, upload-time = "2025-10-06T04:15:18.984Z" } +wheels = [ + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/51/e5/fecf13f06e5e5f67e8837d777d1bc43fac0ed2b77a676804df5c34744727/python_json_logger-4.0.0-py3-none-any.whl", hash = "sha256:af09c9daf6a813aa4cc7180395f50f2a9e5fa056034c9953aec92e381c5ba1e2", size = 15548, upload-time = "2025-10-06T04:15:17.553Z" }, +] + +[[package]] +name = "pywinpty" +version = "3.0.2" +source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple/" } +sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/f3/bb/a7cc2967c5c4eceb6cc49cfe39447d4bfc56e6c865e7c2249b6eb978935f/pywinpty-3.0.2.tar.gz", hash = "sha256:1505cc4cb248af42cb6285a65c9c2086ee9e7e574078ee60933d5d7fa86fb004", size = 30669, upload-time = "2025-10-03T21:16:29.205Z" } +wheels = [ + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/02/4e/1098484e042c9485f56f16eb2b69b43b874bd526044ee401512234cf9e04/pywinpty-3.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:99fdd9b455f0ad6419aba6731a7a0d2f88ced83c3c94a80ff9533d95fa8d8a9e", size = 2050391, upload-time = "2025-10-03T21:19:01.642Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/fc/19/b757fe28008236a4a713e813283721b8a40aa60cd7d3f83549f2e25a3155/pywinpty-3.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:18f78b81e4cfee6aabe7ea8688441d30247b73e52cd9657138015c5f4ee13a51", size = 2050057, upload-time = "2025-10-03T21:19:26.732Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/cb/44/cbae12ecf6f4fa4129c36871fd09c6bef4f98d5f625ecefb5e2449765508/pywinpty-3.0.2-cp313-cp313t-win_amd64.whl", hash = "sha256:663383ecfab7fc382cc97ea5c4f7f0bb32c2f889259855df6ea34e5df42d305b", size = 2049874, upload-time = "2025-10-03T21:18:53.923Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/ca/15/f12c6055e2d7a617d4d5820e8ac4ceaff849da4cb124640ef5116a230771/pywinpty-3.0.2-cp314-cp314-win_amd64.whl", hash = "sha256:28297cecc37bee9f24d8889e47231972d6e9e84f7b668909de54f36ca785029a", size = 2050386, upload-time = "2025-10-03T21:18:50.477Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/de/24/c6907c5bb06043df98ad6a0a0ff5db2e0affcecbc3b15c42404393a3f72a/pywinpty-3.0.2-cp314-cp314t-win_amd64.whl", hash = "sha256:34b55ae9a1b671fe3eae071d86618110538e8eaad18fcb1531c0830b91a82767", size = 2049834, upload-time = "2025-10-03T21:19:25.688Z" }, +] + +[[package]] +name = "pyyaml" +version = "6.0.3" +source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple/" } +sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/05/8e/961c0007c59b8dd7729d542c61a4d537767a59645b82a0b521206e1e25c2/pyyaml-6.0.3.tar.gz", hash = "sha256:d76623373421df22fb4cf8817020cbb7ef15c725b9d5e45f17e189bfc384190f", size = 130960, upload-time = "2025-09-25T21:33:16.546Z" } +wheels = [ + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/d1/33/422b98d2195232ca1826284a76852ad5a86fe23e31b009c9886b2d0fb8b2/pyyaml-6.0.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:7f047e29dcae44602496db43be01ad42fc6f1cc0d8cd6c83d342306c32270196", size = 182063, upload-time = "2025-09-25T21:32:11.445Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/89/a0/6cf41a19a1f2f3feab0e9c0b74134aa2ce6849093d5517a0c550fe37a648/pyyaml-6.0.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:fc09d0aa354569bc501d4e787133afc08552722d3ab34836a80547331bb5d4a0", size = 173973, upload-time = "2025-09-25T21:32:12.492Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/ed/23/7a778b6bd0b9a8039df8b1b1d80e2e2ad78aa04171592c8a5c43a56a6af4/pyyaml-6.0.3-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:9149cad251584d5fb4981be1ecde53a1ca46c891a79788c0df828d2f166bda28", size = 775116, upload-time = "2025-09-25T21:32:13.652Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/65/30/d7353c338e12baef4ecc1b09e877c1970bd3382789c159b4f89d6a70dc09/pyyaml-6.0.3-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:5fdec68f91a0c6739b380c83b951e2c72ac0197ace422360e6d5a959d8d97b2c", size = 844011, upload-time = "2025-09-25T21:32:15.21Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/8b/9d/b3589d3877982d4f2329302ef98a8026e7f4443c765c46cfecc8858c6b4b/pyyaml-6.0.3-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ba1cc08a7ccde2d2ec775841541641e4548226580ab850948cbfda66a1befcdc", size = 807870, upload-time = "2025-09-25T21:32:16.431Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/05/c0/b3be26a015601b822b97d9149ff8cb5ead58c66f981e04fedf4e762f4bd4/pyyaml-6.0.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:8dc52c23056b9ddd46818a57b78404882310fb473d63f17b07d5c40421e47f8e", size = 761089, upload-time = "2025-09-25T21:32:17.56Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/be/8e/98435a21d1d4b46590d5459a22d88128103f8da4c2d4cb8f14f2a96504e1/pyyaml-6.0.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:41715c910c881bc081f1e8872880d3c650acf13dfa8214bad49ed4cede7c34ea", size = 790181, upload-time = "2025-09-25T21:32:18.834Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/74/93/7baea19427dcfbe1e5a372d81473250b379f04b1bd3c4c5ff825e2327202/pyyaml-6.0.3-cp312-cp312-win32.whl", hash = "sha256:96b533f0e99f6579b3d4d4995707cf36df9100d67e0c8303a0c55b27b5f99bc5", size = 137658, upload-time = "2025-09-25T21:32:20.209Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/86/bf/899e81e4cce32febab4fb42bb97dcdf66bc135272882d1987881a4b519e9/pyyaml-6.0.3-cp312-cp312-win_amd64.whl", hash = "sha256:5fcd34e47f6e0b794d17de1b4ff496c00986e1c83f7ab2fb8fcfe9616ff7477b", size = 154003, upload-time = "2025-09-25T21:32:21.167Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/1a/08/67bd04656199bbb51dbed1439b7f27601dfb576fb864099c7ef0c3e55531/pyyaml-6.0.3-cp312-cp312-win_arm64.whl", hash = "sha256:64386e5e707d03a7e172c0701abfb7e10f0fb753ee1d773128192742712a98fd", size = 140344, upload-time = "2025-09-25T21:32:22.617Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/d1/11/0fd08f8192109f7169db964b5707a2f1e8b745d4e239b784a5a1dd80d1db/pyyaml-6.0.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:8da9669d359f02c0b91ccc01cac4a67f16afec0dac22c2ad09f46bee0697eba8", size = 181669, upload-time = "2025-09-25T21:32:23.673Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/b1/16/95309993f1d3748cd644e02e38b75d50cbc0d9561d21f390a76242ce073f/pyyaml-6.0.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:2283a07e2c21a2aa78d9c4442724ec1eb15f5e42a723b99cb3d822d48f5f7ad1", size = 173252, upload-time = "2025-09-25T21:32:25.149Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/50/31/b20f376d3f810b9b2371e72ef5adb33879b25edb7a6d072cb7ca0c486398/pyyaml-6.0.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ee2922902c45ae8ccada2c5b501ab86c36525b883eff4255313a253a3160861c", size = 767081, upload-time = "2025-09-25T21:32:26.575Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/49/1e/a55ca81e949270d5d4432fbbd19dfea5321eda7c41a849d443dc92fd1ff7/pyyaml-6.0.3-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:a33284e20b78bd4a18c8c2282d549d10bc8408a2a7ff57653c0cf0b9be0afce5", size = 841159, upload-time = "2025-09-25T21:32:27.727Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/74/27/e5b8f34d02d9995b80abcef563ea1f8b56d20134d8f4e5e81733b1feceb2/pyyaml-6.0.3-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0f29edc409a6392443abf94b9cf89ce99889a1dd5376d94316ae5145dfedd5d6", size = 801626, upload-time = "2025-09-25T21:32:28.878Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/f9/11/ba845c23988798f40e52ba45f34849aa8a1f2d4af4b798588010792ebad6/pyyaml-6.0.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:f7057c9a337546edc7973c0d3ba84ddcdf0daa14533c2065749c9075001090e6", size = 753613, upload-time = "2025-09-25T21:32:30.178Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/3d/e0/7966e1a7bfc0a45bf0a7fb6b98ea03fc9b8d84fa7f2229e9659680b69ee3/pyyaml-6.0.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:eda16858a3cab07b80edaf74336ece1f986ba330fdb8ee0d6c0d68fe82bc96be", size = 794115, upload-time = "2025-09-25T21:32:31.353Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/de/94/980b50a6531b3019e45ddeada0626d45fa85cbe22300844a7983285bed3b/pyyaml-6.0.3-cp313-cp313-win32.whl", hash = "sha256:d0eae10f8159e8fdad514efdc92d74fd8d682c933a6dd088030f3834bc8e6b26", size = 137427, upload-time = "2025-09-25T21:32:32.58Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/97/c9/39d5b874e8b28845e4ec2202b5da735d0199dbe5b8fb85f91398814a9a46/pyyaml-6.0.3-cp313-cp313-win_amd64.whl", hash = "sha256:79005a0d97d5ddabfeeea4cf676af11e647e41d81c9a7722a193022accdb6b7c", size = 154090, upload-time = "2025-09-25T21:32:33.659Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/73/e8/2bdf3ca2090f68bb3d75b44da7bbc71843b19c9f2b9cb9b0f4ab7a5a4329/pyyaml-6.0.3-cp313-cp313-win_arm64.whl", hash = "sha256:5498cd1645aa724a7c71c8f378eb29ebe23da2fc0d7a08071d89469bf1d2defb", size = 140246, upload-time = "2025-09-25T21:32:34.663Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/9d/8c/f4bd7f6465179953d3ac9bc44ac1a8a3e6122cf8ada906b4f96c60172d43/pyyaml-6.0.3-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:8d1fab6bb153a416f9aeb4b8763bc0f22a5586065f86f7664fc23339fc1c1fac", size = 181814, upload-time = "2025-09-25T21:32:35.712Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/bd/9c/4d95bb87eb2063d20db7b60faa3840c1b18025517ae857371c4dd55a6b3a/pyyaml-6.0.3-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:34d5fcd24b8445fadc33f9cf348c1047101756fd760b4dacb5c3e99755703310", size = 173809, upload-time = "2025-09-25T21:32:36.789Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/92/b5/47e807c2623074914e29dabd16cbbdd4bf5e9b2db9f8090fa64411fc5382/pyyaml-6.0.3-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:501a031947e3a9025ed4405a168e6ef5ae3126c59f90ce0cd6f2bfc477be31b7", size = 766454, upload-time = "2025-09-25T21:32:37.966Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/02/9e/e5e9b168be58564121efb3de6859c452fccde0ab093d8438905899a3a483/pyyaml-6.0.3-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:b3bc83488de33889877a0f2543ade9f70c67d66d9ebb4ac959502e12de895788", size = 836355, upload-time = "2025-09-25T21:32:39.178Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/88/f9/16491d7ed2a919954993e48aa941b200f38040928474c9e85ea9e64222c3/pyyaml-6.0.3-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c458b6d084f9b935061bc36216e8a69a7e293a2f1e68bf956dcd9e6cbcd143f5", size = 794175, upload-time = "2025-09-25T21:32:40.865Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/dd/3f/5989debef34dc6397317802b527dbbafb2b4760878a53d4166579111411e/pyyaml-6.0.3-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:7c6610def4f163542a622a73fb39f534f8c101d690126992300bf3207eab9764", size = 755228, upload-time = "2025-09-25T21:32:42.084Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/d7/ce/af88a49043cd2e265be63d083fc75b27b6ed062f5f9fd6cdc223ad62f03e/pyyaml-6.0.3-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:5190d403f121660ce8d1d2c1bb2ef1bd05b5f68533fc5c2ea899bd15f4399b35", size = 789194, upload-time = "2025-09-25T21:32:43.362Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/23/20/bb6982b26a40bb43951265ba29d4c246ef0ff59c9fdcdf0ed04e0687de4d/pyyaml-6.0.3-cp314-cp314-win_amd64.whl", hash = "sha256:4a2e8cebe2ff6ab7d1050ecd59c25d4c8bd7e6f400f5f82b96557ac0abafd0ac", size = 156429, upload-time = "2025-09-25T21:32:57.844Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/f4/f4/a4541072bb9422c8a883ab55255f918fa378ecf083f5b85e87fc2b4eda1b/pyyaml-6.0.3-cp314-cp314-win_arm64.whl", hash = "sha256:93dda82c9c22deb0a405ea4dc5f2d0cda384168e466364dec6255b293923b2f3", size = 143912, upload-time = "2025-09-25T21:32:59.247Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/7c/f9/07dd09ae774e4616edf6cda684ee78f97777bdd15847253637a6f052a62f/pyyaml-6.0.3-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:02893d100e99e03eda1c8fd5c441d8c60103fd175728e23e431db1b589cf5ab3", size = 189108, upload-time = "2025-09-25T21:32:44.377Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/4e/78/8d08c9fb7ce09ad8c38ad533c1191cf27f7ae1effe5bb9400a46d9437fcf/pyyaml-6.0.3-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:c1ff362665ae507275af2853520967820d9124984e0f7466736aea23d8611fba", size = 183641, upload-time = "2025-09-25T21:32:45.407Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/7b/5b/3babb19104a46945cf816d047db2788bcaf8c94527a805610b0289a01c6b/pyyaml-6.0.3-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6adc77889b628398debc7b65c073bcb99c4a0237b248cacaf3fe8a557563ef6c", size = 831901, upload-time = "2025-09-25T21:32:48.83Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/8b/cc/dff0684d8dc44da4d22a13f35f073d558c268780ce3c6ba1b87055bb0b87/pyyaml-6.0.3-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:a80cb027f6b349846a3bf6d73b5e95e782175e52f22108cfa17876aaeff93702", size = 861132, upload-time = "2025-09-25T21:32:50.149Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/b1/5e/f77dc6b9036943e285ba76b49e118d9ea929885becb0a29ba8a7c75e29fe/pyyaml-6.0.3-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:00c4bdeba853cc34e7dd471f16b4114f4162dc03e6b7afcc2128711f0eca823c", size = 839261, upload-time = "2025-09-25T21:32:51.808Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/ce/88/a9db1376aa2a228197c58b37302f284b5617f56a5d959fd1763fb1675ce6/pyyaml-6.0.3-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:66e1674c3ef6f541c35191caae2d429b967b99e02040f5ba928632d9a7f0f065", size = 805272, upload-time = "2025-09-25T21:32:52.941Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/da/92/1446574745d74df0c92e6aa4a7b0b3130706a4142b2d1a5869f2eaa423c6/pyyaml-6.0.3-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:16249ee61e95f858e83976573de0f5b2893b3677ba71c9dd36b9cf8be9ac6d65", size = 829923, upload-time = "2025-09-25T21:32:54.537Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/f0/7a/1c7270340330e575b92f397352af856a8c06f230aa3e76f86b39d01b416a/pyyaml-6.0.3-cp314-cp314t-win_amd64.whl", hash = "sha256:4ad1906908f2f5ae4e5a8ddfce73c320c2a1429ec52eafd27138b7f1cbe341c9", size = 174062, upload-time = "2025-09-25T21:32:55.767Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/f1/12/de94a39c2ef588c7e6455cfbe7343d3b2dc9d6b6b2f40c4c6565744c873d/pyyaml-6.0.3-cp314-cp314t-win_arm64.whl", hash = "sha256:ebc55a14a21cb14062aa4162f906cd962b28e2e9ea38f9b4391244cd8de4ae0b", size = 149341, upload-time = "2025-09-25T21:32:56.828Z" }, +] + +[[package]] +name = "pyzed" +version = "0.1.0" +source = { directory = "libs/pyzed_pkg" } + +[[package]] +name = "pyzmq" +version = "27.1.0" +source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple/" } +dependencies = [ + { name = "cffi", marker = "implementation_name == 'pypy'" }, +] +sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/04/0b/3c9baedbdf613ecaa7aa07027780b8867f57b6293b6ee50de316c9f3222b/pyzmq-27.1.0.tar.gz", hash = "sha256:ac0765e3d44455adb6ddbf4417dcce460fc40a05978c08efdf2948072f6db540", size = 281750, upload-time = "2025-09-08T23:10:18.157Z" } +wheels = [ + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/92/e7/038aab64a946d535901103da16b953c8c9cc9c961dadcbf3609ed6428d23/pyzmq-27.1.0-cp312-abi3-macosx_10_15_universal2.whl", hash = "sha256:452631b640340c928fa343801b0d07eb0c3789a5ffa843f6e1a9cee0ba4eb4fc", size = 1306279, upload-time = "2025-09-08T23:08:03.807Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/e8/5e/c3c49fdd0f535ef45eefcc16934648e9e59dace4a37ee88fc53f6cd8e641/pyzmq-27.1.0-cp312-abi3-manylinux2014_i686.manylinux_2_17_i686.whl", hash = "sha256:1c179799b118e554b66da67d88ed66cd37a169f1f23b5d9f0a231b4e8d44a113", size = 895645, upload-time = "2025-09-08T23:08:05.301Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/f8/e5/b0b2504cb4e903a74dcf1ebae157f9e20ebb6ea76095f6cfffea28c42ecd/pyzmq-27.1.0-cp312-abi3-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:3837439b7f99e60312f0c926a6ad437b067356dc2bc2ec96eb395fd0fe804233", size = 652574, upload-time = "2025-09-08T23:08:06.828Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/f8/9b/c108cdb55560eaf253f0cbdb61b29971e9fb34d9c3499b0e96e4e60ed8a5/pyzmq-27.1.0-cp312-abi3-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:43ad9a73e3da1fab5b0e7e13402f0b2fb934ae1c876c51d0afff0e7c052eca31", size = 840995, upload-time = "2025-09-08T23:08:08.396Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/c2/bb/b79798ca177b9eb0825b4c9998c6af8cd2a7f15a6a1a4272c1d1a21d382f/pyzmq-27.1.0-cp312-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:0de3028d69d4cdc475bfe47a6128eb38d8bc0e8f4d69646adfbcd840facbac28", size = 1642070, upload-time = "2025-09-08T23:08:09.989Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/9c/80/2df2e7977c4ede24c79ae39dcef3899bfc5f34d1ca7a5b24f182c9b7a9ca/pyzmq-27.1.0-cp312-abi3-musllinux_1_2_i686.whl", hash = "sha256:cf44a7763aea9298c0aa7dbf859f87ed7012de8bda0f3977b6fb1d96745df856", size = 2021121, upload-time = "2025-09-08T23:08:11.907Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/46/bd/2d45ad24f5f5ae7e8d01525eb76786fa7557136555cac7d929880519e33a/pyzmq-27.1.0-cp312-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:f30f395a9e6fbca195400ce833c731e7b64c3919aa481af4d88c3759e0cb7496", size = 1878550, upload-time = "2025-09-08T23:08:13.513Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/e6/2f/104c0a3c778d7c2ab8190e9db4f62f0b6957b53c9d87db77c284b69f33ea/pyzmq-27.1.0-cp312-abi3-win32.whl", hash = "sha256:250e5436a4ba13885494412b3da5d518cd0d3a278a1ae640e113c073a5f88edd", size = 559184, upload-time = "2025-09-08T23:08:15.163Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/fc/7f/a21b20d577e4100c6a41795842028235998a643b1ad406a6d4163ea8f53e/pyzmq-27.1.0-cp312-abi3-win_amd64.whl", hash = "sha256:9ce490cf1d2ca2ad84733aa1d69ce6855372cb5ce9223802450c9b2a7cba0ccf", size = 619480, upload-time = "2025-09-08T23:08:17.192Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/78/c2/c012beae5f76b72f007a9e91ee9401cb88c51d0f83c6257a03e785c81cc2/pyzmq-27.1.0-cp312-abi3-win_arm64.whl", hash = "sha256:75a2f36223f0d535a0c919e23615fc85a1e23b71f40c7eb43d7b1dedb4d8f15f", size = 552993, upload-time = "2025-09-08T23:08:18.926Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/60/cb/84a13459c51da6cec1b7b1dc1a47e6db6da50b77ad7fd9c145842750a011/pyzmq-27.1.0-cp313-cp313-android_24_arm64_v8a.whl", hash = "sha256:93ad4b0855a664229559e45c8d23797ceac03183c7b6f5b4428152a6b06684a5", size = 1122436, upload-time = "2025-09-08T23:08:20.801Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/dc/b6/94414759a69a26c3dd674570a81813c46a078767d931a6c70ad29fc585cb/pyzmq-27.1.0-cp313-cp313-android_24_x86_64.whl", hash = "sha256:fbb4f2400bfda24f12f009cba62ad5734148569ff4949b1b6ec3b519444342e6", size = 1156301, upload-time = "2025-09-08T23:08:22.47Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/a5/ad/15906493fd40c316377fd8a8f6b1f93104f97a752667763c9b9c1b71d42d/pyzmq-27.1.0-cp313-cp313t-macosx_10_15_universal2.whl", hash = "sha256:e343d067f7b151cfe4eb3bb796a7752c9d369eed007b91231e817071d2c2fec7", size = 1341197, upload-time = "2025-09-08T23:08:24.286Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/14/1d/d343f3ce13db53a54cb8946594e567410b2125394dafcc0268d8dda027e0/pyzmq-27.1.0-cp313-cp313t-manylinux2014_i686.manylinux_2_17_i686.whl", hash = "sha256:08363b2011dec81c354d694bdecaef4770e0ae96b9afea70b3f47b973655cc05", size = 897275, upload-time = "2025-09-08T23:08:26.063Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/69/2d/d83dd6d7ca929a2fc67d2c3005415cdf322af7751d773524809f9e585129/pyzmq-27.1.0-cp313-cp313t-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d54530c8c8b5b8ddb3318f481297441af102517602b569146185fa10b63f4fa9", size = 660469, upload-time = "2025-09-08T23:08:27.623Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/3e/cd/9822a7af117f4bc0f1952dbe9ef8358eb50a24928efd5edf54210b850259/pyzmq-27.1.0-cp313-cp313t-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:6f3afa12c392f0a44a2414056d730eebc33ec0926aae92b5ad5cf26ebb6cc128", size = 847961, upload-time = "2025-09-08T23:08:29.672Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/9a/12/f003e824a19ed73be15542f172fd0ec4ad0b60cf37436652c93b9df7c585/pyzmq-27.1.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:c65047adafe573ff023b3187bb93faa583151627bc9c51fc4fb2c561ed689d39", size = 1650282, upload-time = "2025-09-08T23:08:31.349Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/d5/4a/e82d788ed58e9a23995cee70dbc20c9aded3d13a92d30d57ec2291f1e8a3/pyzmq-27.1.0-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:90e6e9441c946a8b0a667356f7078d96411391a3b8f80980315455574177ec97", size = 2024468, upload-time = "2025-09-08T23:08:33.543Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/d9/94/2da0a60841f757481e402b34bf4c8bf57fa54a5466b965de791b1e6f747d/pyzmq-27.1.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:add071b2d25f84e8189aaf0882d39a285b42fa3853016ebab234a5e78c7a43db", size = 1885394, upload-time = "2025-09-08T23:08:35.51Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/4f/6f/55c10e2e49ad52d080dc24e37adb215e5b0d64990b57598abc2e3f01725b/pyzmq-27.1.0-cp313-cp313t-win32.whl", hash = "sha256:7ccc0700cfdf7bd487bea8d850ec38f204478681ea02a582a8da8171b7f90a1c", size = 574964, upload-time = "2025-09-08T23:08:37.178Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/87/4d/2534970ba63dd7c522d8ca80fb92777f362c0f321900667c615e2067cb29/pyzmq-27.1.0-cp313-cp313t-win_amd64.whl", hash = "sha256:8085a9fba668216b9b4323be338ee5437a235fe275b9d1610e422ccc279733e2", size = 641029, upload-time = "2025-09-08T23:08:40.595Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/f6/fa/f8aea7a28b0641f31d40dea42d7ef003fded31e184ef47db696bc74cd610/pyzmq-27.1.0-cp313-cp313t-win_arm64.whl", hash = "sha256:6bb54ca21bcfe361e445256c15eedf083f153811c37be87e0514934d6913061e", size = 561541, upload-time = "2025-09-08T23:08:42.668Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/87/45/19efbb3000956e82d0331bafca5d9ac19ea2857722fa2caacefb6042f39d/pyzmq-27.1.0-cp314-cp314t-macosx_10_15_universal2.whl", hash = "sha256:ce980af330231615756acd5154f29813d553ea555485ae712c491cd483df6b7a", size = 1341197, upload-time = "2025-09-08T23:08:44.973Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/48/43/d72ccdbf0d73d1343936296665826350cb1e825f92f2db9db3e61c2162a2/pyzmq-27.1.0-cp314-cp314t-manylinux2014_i686.manylinux_2_17_i686.whl", hash = "sha256:1779be8c549e54a1c38f805e56d2a2e5c009d26de10921d7d51cfd1c8d4632ea", size = 897175, upload-time = "2025-09-08T23:08:46.601Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/2f/2e/a483f73a10b65a9ef0161e817321d39a770b2acf8bcf3004a28d90d14a94/pyzmq-27.1.0-cp314-cp314t-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:7200bb0f03345515df50d99d3db206a0a6bee1955fbb8c453c76f5bf0e08fb96", size = 660427, upload-time = "2025-09-08T23:08:48.187Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/f5/d2/5f36552c2d3e5685abe60dfa56f91169f7a2d99bbaf67c5271022ab40863/pyzmq-27.1.0-cp314-cp314t-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:01c0e07d558b06a60773744ea6251f769cd79a41a97d11b8bf4ab8f034b0424d", size = 847929, upload-time = "2025-09-08T23:08:49.76Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/c4/2a/404b331f2b7bf3198e9945f75c4c521f0c6a3a23b51f7a4a401b94a13833/pyzmq-27.1.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:80d834abee71f65253c91540445d37c4c561e293ba6e741b992f20a105d69146", size = 1650193, upload-time = "2025-09-08T23:08:51.7Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/1c/0b/f4107e33f62a5acf60e3ded67ed33d79b4ce18de432625ce2fc5093d6388/pyzmq-27.1.0-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:544b4e3b7198dde4a62b8ff6685e9802a9a1ebf47e77478a5eb88eca2a82f2fd", size = 2024388, upload-time = "2025-09-08T23:08:53.393Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/0d/01/add31fe76512642fd6e40e3a3bd21f4b47e242c8ba33efb6809e37076d9b/pyzmq-27.1.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:cedc4c68178e59a4046f97eca31b148ddcf51e88677de1ef4e78cf06c5376c9a", size = 1885316, upload-time = "2025-09-08T23:08:55.702Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/c4/59/a5f38970f9bf07cee96128de79590bb354917914a9be11272cfc7ff26af0/pyzmq-27.1.0-cp314-cp314t-win32.whl", hash = "sha256:1f0b2a577fd770aa6f053211a55d1c47901f4d537389a034c690291485e5fe92", size = 587472, upload-time = "2025-09-08T23:08:58.18Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/70/d8/78b1bad170f93fcf5e3536e70e8fadac55030002275c9a29e8f5719185de/pyzmq-27.1.0-cp314-cp314t-win_amd64.whl", hash = "sha256:19c9468ae0437f8074af379e986c5d3d7d7bfe033506af442e8c879732bedbe0", size = 661401, upload-time = "2025-09-08T23:08:59.802Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/81/d6/4bfbb40c9a0b42fc53c7cf442f6385db70b40f74a783130c5d0a5aa62228/pyzmq-27.1.0-cp314-cp314t-win_arm64.whl", hash = "sha256:dc5dbf68a7857b59473f7df42650c621d7e8923fb03fa74a526890f4d33cc4d7", size = 575170, upload-time = "2025-09-08T23:09:01.418Z" }, +] + +[[package]] +name = "referencing" +version = "0.37.0" +source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple/" } +dependencies = [ + { name = "attrs" }, + { name = "rpds-py" }, + { name = "typing-extensions", marker = "python_full_version < '3.13'" }, +] +sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/22/f5/df4e9027acead3ecc63e50fe1e36aca1523e1719559c499951bb4b53188f/referencing-0.37.0.tar.gz", hash = "sha256:44aefc3142c5b842538163acb373e24cce6632bd54bdb01b21ad5863489f50d8", size = 78036, upload-time = "2025-10-13T15:30:48.871Z" } +wheels = [ + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/2c/58/ca301544e1fa93ed4f80d724bf5b194f6e4b945841c5bfd555878eea9fcb/referencing-0.37.0-py3-none-any.whl", hash = "sha256:381329a9f99628c9069361716891d34ad94af76e461dcb0335825aecc7692231", size = 26766, upload-time = "2025-10-13T15:30:47.625Z" }, +] + +[[package]] +name = "requests" +version = "2.32.5" +source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple/" } +dependencies = [ + { name = "certifi" }, + { name = "charset-normalizer" }, + { name = "idna" }, + { name = "urllib3" }, +] +sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/c9/74/b3ff8e6c8446842c3f5c837e9c3dfcfe2018ea6ecef224c710c85ef728f4/requests-2.32.5.tar.gz", hash = "sha256:dbba0bac56e100853db0ea71b82b4dfd5fe2bf6d3754a8893c3af500cec7d7cf", size = 134517, upload-time = "2025-08-18T20:46:02.573Z" } +wheels = [ + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/1e/db/4254e3eabe8020b458f1a747140d32277ec7a271daf1d235b70dc0b4e6e3/requests-2.32.5-py3-none-any.whl", hash = "sha256:2462f94637a34fd532264295e186976db0f5d453d1cdd31473c85a6a161affb6", size = 64738, upload-time = "2025-08-18T20:46:00.542Z" }, +] + +[[package]] +name = "rfc3339-validator" +version = "0.1.4" +source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple/" } +dependencies = [ + { name = "six" }, +] +sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/28/ea/a9387748e2d111c3c2b275ba970b735e04e15cdb1eb30693b6b5708c4dbd/rfc3339_validator-0.1.4.tar.gz", hash = "sha256:138a2abdf93304ad60530167e51d2dfb9549521a836871b88d7f4695d0022f6b", size = 5513, upload-time = "2021-05-12T16:37:54.178Z" } +wheels = [ + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/7b/44/4e421b96b67b2daff264473f7465db72fbdf36a07e05494f50300cc7b0c6/rfc3339_validator-0.1.4-py2.py3-none-any.whl", hash = "sha256:24f6ec1eda14ef823da9e36ec7113124b39c04d50a4d3d3a3c2859577e7791fa", size = 3490, upload-time = "2021-05-12T16:37:52.536Z" }, +] + +[[package]] +name = "rfc3986-validator" +version = "0.1.1" +source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple/" } +sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/da/88/f270de456dd7d11dcc808abfa291ecdd3f45ff44e3b549ffa01b126464d0/rfc3986_validator-0.1.1.tar.gz", hash = "sha256:3d44bde7921b3b9ec3ae4e3adca370438eccebc676456449b145d533b240d055", size = 6760, upload-time = "2019-10-28T16:00:19.144Z" } +wheels = [ + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/9e/51/17023c0f8f1869d8806b979a2bffa3f861f26a3f1a66b094288323fba52f/rfc3986_validator-0.1.1-py2.py3-none-any.whl", hash = "sha256:2f235c432ef459970b4306369336b9d5dbdda31b510ca1e327636e01f528bfa9", size = 4242, upload-time = "2019-10-28T16:00:13.976Z" }, +] + +[[package]] +name = "rfc3987-syntax" +version = "1.1.0" +source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple/" } +dependencies = [ + { name = "lark" }, +] +sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/2c/06/37c1a5557acf449e8e406a830a05bf885ac47d33270aec454ef78675008d/rfc3987_syntax-1.1.0.tar.gz", hash = "sha256:717a62cbf33cffdd16dfa3a497d81ce48a660ea691b1ddd7be710c22f00b4a0d", size = 14239, upload-time = "2025-07-18T01:05:05.015Z" } +wheels = [ + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/7e/71/44ce230e1b7fadd372515a97e32a83011f906ddded8d03e3c6aafbdedbb7/rfc3987_syntax-1.1.0-py3-none-any.whl", hash = "sha256:6c3d97604e4c5ce9f714898e05401a0445a641cfa276432b0a648c80856f6a3f", size = 8046, upload-time = "2025-07-18T01:05:03.843Z" }, +] + +[[package]] +name = "rpds-py" +version = "0.30.0" +source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple/" } +sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/20/af/3f2f423103f1113b36230496629986e0ef7e199d2aa8392452b484b38ced/rpds_py-0.30.0.tar.gz", hash = "sha256:dd8ff7cf90014af0c0f787eea34794ebf6415242ee1d6fa91eaba725cc441e84", size = 69469, upload-time = "2025-11-30T20:24:38.837Z" } +wheels = [ + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/03/e7/98a2f4ac921d82f33e03f3835f5bf3a4a40aa1bfdc57975e74a97b2b4bdd/rpds_py-0.30.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:a161f20d9a43006833cd7068375a94d035714d73a172b681d8881820600abfad", size = 375086, upload-time = "2025-11-30T20:22:17.93Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/4d/a1/bca7fd3d452b272e13335db8d6b0b3ecde0f90ad6f16f3328c6fb150c889/rpds_py-0.30.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:6abc8880d9d036ecaafe709079969f56e876fcf107f7a8e9920ba6d5a3878d05", size = 359053, upload-time = "2025-11-30T20:22:19.297Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/65/1c/ae157e83a6357eceff62ba7e52113e3ec4834a84cfe07fa4b0757a7d105f/rpds_py-0.30.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ca28829ae5f5d569bb62a79512c842a03a12576375d5ece7d2cadf8abe96ec28", size = 390763, upload-time = "2025-11-30T20:22:21.661Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/d4/36/eb2eb8515e2ad24c0bd43c3ee9cd74c33f7ca6430755ccdb240fd3144c44/rpds_py-0.30.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a1010ed9524c73b94d15919ca4d41d8780980e1765babf85f9a2f90d247153dd", size = 408951, upload-time = "2025-11-30T20:22:23.408Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/d6/65/ad8dc1784a331fabbd740ef6f71ce2198c7ed0890dab595adb9ea2d775a1/rpds_py-0.30.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f8d1736cfb49381ba528cd5baa46f82fdc65c06e843dab24dd70b63d09121b3f", size = 514622, upload-time = "2025-11-30T20:22:25.16Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/63/8e/0cfa7ae158e15e143fe03993b5bcd743a59f541f5952e1546b1ac1b5fd45/rpds_py-0.30.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d948b135c4693daff7bc2dcfc4ec57237a29bd37e60c2fabf5aff2bbacf3e2f1", size = 414492, upload-time = "2025-11-30T20:22:26.505Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/60/1b/6f8f29f3f995c7ffdde46a626ddccd7c63aefc0efae881dc13b6e5d5bb16/rpds_py-0.30.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:47f236970bccb2233267d89173d3ad2703cd36a0e2a6e92d0560d333871a3d23", size = 394080, upload-time = "2025-11-30T20:22:27.934Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/6d/d5/a266341051a7a3ca2f4b750a3aa4abc986378431fc2da508c5034d081b70/rpds_py-0.30.0-cp312-cp312-manylinux_2_31_riscv64.whl", hash = "sha256:2e6ecb5a5bcacf59c3f912155044479af1d0b6681280048b338b28e364aca1f6", size = 408680, upload-time = "2025-11-30T20:22:29.341Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/10/3b/71b725851df9ab7a7a4e33cf36d241933da66040d195a84781f49c50490c/rpds_py-0.30.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a8fa71a2e078c527c3e9dc9fc5a98c9db40bcc8a92b4e8858e36d329f8684b51", size = 423589, upload-time = "2025-11-30T20:22:31.469Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/00/2b/e59e58c544dc9bd8bd8384ecdb8ea91f6727f0e37a7131baeff8d6f51661/rpds_py-0.30.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:73c67f2db7bc334e518d097c6d1e6fed021bbc9b7d678d6cc433478365d1d5f5", size = 573289, upload-time = "2025-11-30T20:22:32.997Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/da/3e/a18e6f5b460893172a7d6a680e86d3b6bc87a54c1f0b03446a3c8c7b588f/rpds_py-0.30.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:5ba103fb455be00f3b1c2076c9d4264bfcb037c976167a6047ed82f23153f02e", size = 599737, upload-time = "2025-11-30T20:22:34.419Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/5c/e2/714694e4b87b85a18e2c243614974413c60aa107fd815b8cbc42b873d1d7/rpds_py-0.30.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:7cee9c752c0364588353e627da8a7e808a66873672bcb5f52890c33fd965b394", size = 563120, upload-time = "2025-11-30T20:22:35.903Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/6f/ab/d5d5e3bcedb0a77f4f613706b750e50a5a3ba1c15ccd3665ecc636c968fd/rpds_py-0.30.0-cp312-cp312-win32.whl", hash = "sha256:1ab5b83dbcf55acc8b08fc62b796ef672c457b17dbd7820a11d6c52c06839bdf", size = 223782, upload-time = "2025-11-30T20:22:37.271Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/39/3b/f786af9957306fdc38a74cef405b7b93180f481fb48453a114bb6465744a/rpds_py-0.30.0-cp312-cp312-win_amd64.whl", hash = "sha256:a090322ca841abd453d43456ac34db46e8b05fd9b3b4ac0c78bcde8b089f959b", size = 240463, upload-time = "2025-11-30T20:22:39.021Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/f3/d2/b91dc748126c1559042cfe41990deb92c4ee3e2b415f6b5234969ffaf0cc/rpds_py-0.30.0-cp312-cp312-win_arm64.whl", hash = "sha256:669b1805bd639dd2989b281be2cfd951c6121b65e729d9b843e9639ef1fd555e", size = 230868, upload-time = "2025-11-30T20:22:40.493Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/ed/dc/d61221eb88ff410de3c49143407f6f3147acf2538c86f2ab7ce65ae7d5f9/rpds_py-0.30.0-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:f83424d738204d9770830d35290ff3273fbb02b41f919870479fab14b9d303b2", size = 374887, upload-time = "2025-11-30T20:22:41.812Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/fd/32/55fb50ae104061dbc564ef15cc43c013dc4a9f4527a1f4d99baddf56fe5f/rpds_py-0.30.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:e7536cd91353c5273434b4e003cbda89034d67e7710eab8761fd918ec6c69cf8", size = 358904, upload-time = "2025-11-30T20:22:43.479Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/58/70/faed8186300e3b9bdd138d0273109784eea2396c68458ed580f885dfe7ad/rpds_py-0.30.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2771c6c15973347f50fece41fc447c054b7ac2ae0502388ce3b6738cd366e3d4", size = 389945, upload-time = "2025-11-30T20:22:44.819Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/bd/a8/073cac3ed2c6387df38f71296d002ab43496a96b92c823e76f46b8af0543/rpds_py-0.30.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:0a59119fc6e3f460315fe9d08149f8102aa322299deaa5cab5b40092345c2136", size = 407783, upload-time = "2025-11-30T20:22:46.103Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/77/57/5999eb8c58671f1c11eba084115e77a8899d6e694d2a18f69f0ba471ec8b/rpds_py-0.30.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:76fec018282b4ead0364022e3c54b60bf368b9d926877957a8624b58419169b7", size = 515021, upload-time = "2025-11-30T20:22:47.458Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/e0/af/5ab4833eadc36c0a8ed2bc5c0de0493c04f6c06de223170bd0798ff98ced/rpds_py-0.30.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:692bef75a5525db97318e8cd061542b5a79812d711ea03dbc1f6f8dbb0c5f0d2", size = 414589, upload-time = "2025-11-30T20:22:48.872Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/b7/de/f7192e12b21b9e9a68a6d0f249b4af3fdcdff8418be0767a627564afa1f1/rpds_py-0.30.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9027da1ce107104c50c81383cae773ef5c24d296dd11c99e2629dbd7967a20c6", size = 394025, upload-time = "2025-11-30T20:22:50.196Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/91/c4/fc70cd0249496493500e7cc2de87504f5aa6509de1e88623431fec76d4b6/rpds_py-0.30.0-cp313-cp313-manylinux_2_31_riscv64.whl", hash = "sha256:9cf69cdda1f5968a30a359aba2f7f9aa648a9ce4b580d6826437f2b291cfc86e", size = 408895, upload-time = "2025-11-30T20:22:51.87Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/58/95/d9275b05ab96556fefff73a385813eb66032e4c99f411d0795372d9abcea/rpds_py-0.30.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a4796a717bf12b9da9d3ad002519a86063dcac8988b030e405704ef7d74d2d9d", size = 422799, upload-time = "2025-11-30T20:22:53.341Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/06/c1/3088fc04b6624eb12a57eb814f0d4997a44b0d208d6cace713033ff1a6ba/rpds_py-0.30.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:5d4c2aa7c50ad4728a094ebd5eb46c452e9cb7edbfdb18f9e1221f597a73e1e7", size = 572731, upload-time = "2025-11-30T20:22:54.778Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/d8/42/c612a833183b39774e8ac8fecae81263a68b9583ee343db33ab571a7ce55/rpds_py-0.30.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:ba81a9203d07805435eb06f536d95a266c21e5b2dfbf6517748ca40c98d19e31", size = 599027, upload-time = "2025-11-30T20:22:56.212Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/5f/60/525a50f45b01d70005403ae0e25f43c0384369ad24ffe46e8d9068b50086/rpds_py-0.30.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:945dccface01af02675628334f7cf49c2af4c1c904748efc5cf7bbdf0b579f95", size = 563020, upload-time = "2025-11-30T20:22:58.2Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/0b/5d/47c4655e9bcd5ca907148535c10e7d489044243cc9941c16ed7cd53be91d/rpds_py-0.30.0-cp313-cp313-win32.whl", hash = "sha256:b40fb160a2db369a194cb27943582b38f79fc4887291417685f3ad693c5a1d5d", size = 223139, upload-time = "2025-11-30T20:23:00.209Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/f2/e1/485132437d20aa4d3e1d8b3fb5a5e65aa8139f1e097080c2a8443201742c/rpds_py-0.30.0-cp313-cp313-win_amd64.whl", hash = "sha256:806f36b1b605e2d6a72716f321f20036b9489d29c51c91f4dd29a3e3afb73b15", size = 240224, upload-time = "2025-11-30T20:23:02.008Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/24/95/ffd128ed1146a153d928617b0ef673960130be0009c77d8fbf0abe306713/rpds_py-0.30.0-cp313-cp313-win_arm64.whl", hash = "sha256:d96c2086587c7c30d44f31f42eae4eac89b60dabbac18c7669be3700f13c3ce1", size = 230645, upload-time = "2025-11-30T20:23:03.43Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/ff/1b/b10de890a0def2a319a2626334a7f0ae388215eb60914dbac8a3bae54435/rpds_py-0.30.0-cp313-cp313t-macosx_10_12_x86_64.whl", hash = "sha256:eb0b93f2e5c2189ee831ee43f156ed34e2a89a78a66b98cadad955972548be5a", size = 364443, upload-time = "2025-11-30T20:23:04.878Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/0d/bf/27e39f5971dc4f305a4fb9c672ca06f290f7c4e261c568f3dea16a410d47/rpds_py-0.30.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:922e10f31f303c7c920da8981051ff6d8c1a56207dbdf330d9047f6d30b70e5e", size = 353375, upload-time = "2025-11-30T20:23:06.342Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/40/58/442ada3bba6e8e6615fc00483135c14a7538d2ffac30e2d933ccf6852232/rpds_py-0.30.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cdc62c8286ba9bf7f47befdcea13ea0e26bf294bda99758fd90535cbaf408000", size = 383850, upload-time = "2025-11-30T20:23:07.825Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/14/14/f59b0127409a33c6ef6f5c1ebd5ad8e32d7861c9c7adfa9a624fc3889f6c/rpds_py-0.30.0-cp313-cp313t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:47f9a91efc418b54fb8190a6b4aa7813a23fb79c51f4bb84e418f5476c38b8db", size = 392812, upload-time = "2025-11-30T20:23:09.228Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/b3/66/e0be3e162ac299b3a22527e8913767d869e6cc75c46bd844aa43fb81ab62/rpds_py-0.30.0-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1f3587eb9b17f3789ad50824084fa6f81921bbf9a795826570bda82cb3ed91f2", size = 517841, upload-time = "2025-11-30T20:23:11.186Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/3d/55/fa3b9cf31d0c963ecf1ba777f7cf4b2a2c976795ac430d24a1f43d25a6ba/rpds_py-0.30.0-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:39c02563fc592411c2c61d26b6c5fe1e51eaa44a75aa2c8735ca88b0d9599daa", size = 408149, upload-time = "2025-11-30T20:23:12.864Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/60/ca/780cf3b1a32b18c0f05c441958d3758f02544f1d613abf9488cd78876378/rpds_py-0.30.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:51a1234d8febafdfd33a42d97da7a43f5dcb120c1060e352a3fbc0c6d36e2083", size = 383843, upload-time = "2025-11-30T20:23:14.638Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/82/86/d5f2e04f2aa6247c613da0c1dd87fcd08fa17107e858193566048a1e2f0a/rpds_py-0.30.0-cp313-cp313t-manylinux_2_31_riscv64.whl", hash = "sha256:eb2c4071ab598733724c08221091e8d80e89064cd472819285a9ab0f24bcedb9", size = 396507, upload-time = "2025-11-30T20:23:16.105Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/4b/9a/453255d2f769fe44e07ea9785c8347edaf867f7026872e76c1ad9f7bed92/rpds_py-0.30.0-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:6bdfdb946967d816e6adf9a3d8201bfad269c67efe6cefd7093ef959683c8de0", size = 414949, upload-time = "2025-11-30T20:23:17.539Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/a3/31/622a86cdc0c45d6df0e9ccb6becdba5074735e7033c20e401a6d9d0e2ca0/rpds_py-0.30.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:c77afbd5f5250bf27bf516c7c4a016813eb2d3e116139aed0096940c5982da94", size = 565790, upload-time = "2025-11-30T20:23:19.029Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/1c/5d/15bbf0fb4a3f58a3b1c67855ec1efcc4ceaef4e86644665fff03e1b66d8d/rpds_py-0.30.0-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:61046904275472a76c8c90c9ccee9013d70a6d0f73eecefd38c1ae7c39045a08", size = 590217, upload-time = "2025-11-30T20:23:20.885Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/6d/61/21b8c41f68e60c8cc3b2e25644f0e3681926020f11d06ab0b78e3c6bbff1/rpds_py-0.30.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:4c5f36a861bc4b7da6516dbdf302c55313afa09b81931e8280361a4f6c9a2d27", size = 555806, upload-time = "2025-11-30T20:23:22.488Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/f9/39/7e067bb06c31de48de3eb200f9fc7c58982a4d3db44b07e73963e10d3be9/rpds_py-0.30.0-cp313-cp313t-win32.whl", hash = "sha256:3d4a69de7a3e50ffc214ae16d79d8fbb0922972da0356dcf4d0fdca2878559c6", size = 211341, upload-time = "2025-11-30T20:23:24.449Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/0a/4d/222ef0b46443cf4cf46764d9c630f3fe4abaa7245be9417e56e9f52b8f65/rpds_py-0.30.0-cp313-cp313t-win_amd64.whl", hash = "sha256:f14fc5df50a716f7ece6a80b6c78bb35ea2ca47c499e422aa4463455dd96d56d", size = 225768, upload-time = "2025-11-30T20:23:25.908Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/86/81/dad16382ebbd3d0e0328776d8fd7ca94220e4fa0798d1dc5e7da48cb3201/rpds_py-0.30.0-cp314-cp314-macosx_10_12_x86_64.whl", hash = "sha256:68f19c879420aa08f61203801423f6cd5ac5f0ac4ac82a2368a9fcd6a9a075e0", size = 362099, upload-time = "2025-11-30T20:23:27.316Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/2b/60/19f7884db5d5603edf3c6bce35408f45ad3e97e10007df0e17dd57af18f8/rpds_py-0.30.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:ec7c4490c672c1a0389d319b3a9cfcd098dcdc4783991553c332a15acf7249be", size = 353192, upload-time = "2025-11-30T20:23:29.151Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/bf/c4/76eb0e1e72d1a9c4703c69607cec123c29028bff28ce41588792417098ac/rpds_py-0.30.0-cp314-cp314-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f251c812357a3fed308d684a5079ddfb9d933860fc6de89f2b7ab00da481e65f", size = 384080, upload-time = "2025-11-30T20:23:30.785Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/72/87/87ea665e92f3298d1b26d78814721dc39ed8d2c74b86e83348d6b48a6f31/rpds_py-0.30.0-cp314-cp314-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ac98b175585ecf4c0348fd7b29c3864bda53b805c773cbf7bfdaffc8070c976f", size = 394841, upload-time = "2025-11-30T20:23:32.209Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/77/ad/7783a89ca0587c15dcbf139b4a8364a872a25f861bdb88ed99f9b0dec985/rpds_py-0.30.0-cp314-cp314-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3e62880792319dbeb7eb866547f2e35973289e7d5696c6e295476448f5b63c87", size = 516670, upload-time = "2025-11-30T20:23:33.742Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/5b/3c/2882bdac942bd2172f3da574eab16f309ae10a3925644e969536553cb4ee/rpds_py-0.30.0-cp314-cp314-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4e7fc54e0900ab35d041b0601431b0a0eb495f0851a0639b6ef90f7741b39a18", size = 408005, upload-time = "2025-11-30T20:23:35.253Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/ce/81/9a91c0111ce1758c92516a3e44776920b579d9a7c09b2b06b642d4de3f0f/rpds_py-0.30.0-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:47e77dc9822d3ad616c3d5759ea5631a75e5809d5a28707744ef79d7a1bcfcad", size = 382112, upload-time = "2025-11-30T20:23:36.842Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/cf/8e/1da49d4a107027e5fbc64daeab96a0706361a2918da10cb41769244b805d/rpds_py-0.30.0-cp314-cp314-manylinux_2_31_riscv64.whl", hash = "sha256:b4dc1a6ff022ff85ecafef7979a2c6eb423430e05f1165d6688234e62ba99a07", size = 399049, upload-time = "2025-11-30T20:23:38.343Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/df/5a/7ee239b1aa48a127570ec03becbb29c9d5a9eb092febbd1699d567cae859/rpds_py-0.30.0-cp314-cp314-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:4559c972db3a360808309e06a74628b95eaccbf961c335c8fe0d590cf587456f", size = 415661, upload-time = "2025-11-30T20:23:40.263Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/70/ea/caa143cf6b772f823bc7929a45da1fa83569ee49b11d18d0ada7f5ee6fd6/rpds_py-0.30.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:0ed177ed9bded28f8deb6ab40c183cd1192aa0de40c12f38be4d59cd33cb5c65", size = 565606, upload-time = "2025-11-30T20:23:42.186Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/64/91/ac20ba2d69303f961ad8cf55bf7dbdb4763f627291ba3d0d7d67333cced9/rpds_py-0.30.0-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:ad1fa8db769b76ea911cb4e10f049d80bf518c104f15b3edb2371cc65375c46f", size = 591126, upload-time = "2025-11-30T20:23:44.086Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/21/20/7ff5f3c8b00c8a95f75985128c26ba44503fb35b8e0259d812766ea966c7/rpds_py-0.30.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:46e83c697b1f1c72b50e5ee5adb4353eef7406fb3f2043d64c33f20ad1c2fc53", size = 553371, upload-time = "2025-11-30T20:23:46.004Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/72/c7/81dadd7b27c8ee391c132a6b192111ca58d866577ce2d9b0ca157552cce0/rpds_py-0.30.0-cp314-cp314-win32.whl", hash = "sha256:ee454b2a007d57363c2dfd5b6ca4a5d7e2c518938f8ed3b706e37e5d470801ed", size = 215298, upload-time = "2025-11-30T20:23:47.696Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/3e/d2/1aaac33287e8cfb07aab2e6b8ac1deca62f6f65411344f1433c55e6f3eb8/rpds_py-0.30.0-cp314-cp314-win_amd64.whl", hash = "sha256:95f0802447ac2d10bcc69f6dc28fe95fdf17940367b21d34e34c737870758950", size = 228604, upload-time = "2025-11-30T20:23:49.501Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/e8/95/ab005315818cc519ad074cb7784dae60d939163108bd2b394e60dc7b5461/rpds_py-0.30.0-cp314-cp314-win_arm64.whl", hash = "sha256:613aa4771c99f03346e54c3f038e4cc574ac09a3ddfb0e8878487335e96dead6", size = 222391, upload-time = "2025-11-30T20:23:50.96Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/9e/68/154fe0194d83b973cdedcdcc88947a2752411165930182ae41d983dcefa6/rpds_py-0.30.0-cp314-cp314t-macosx_10_12_x86_64.whl", hash = "sha256:7e6ecfcb62edfd632e56983964e6884851786443739dbfe3582947e87274f7cb", size = 364868, upload-time = "2025-11-30T20:23:52.494Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/83/69/8bbc8b07ec854d92a8b75668c24d2abcb1719ebf890f5604c61c9369a16f/rpds_py-0.30.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:a1d0bc22a7cdc173fedebb73ef81e07faef93692b8c1ad3733b67e31e1b6e1b8", size = 353747, upload-time = "2025-11-30T20:23:54.036Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/ab/00/ba2e50183dbd9abcce9497fa5149c62b4ff3e22d338a30d690f9af970561/rpds_py-0.30.0-cp314-cp314t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0d08f00679177226c4cb8c5265012eea897c8ca3b93f429e546600c971bcbae7", size = 383795, upload-time = "2025-11-30T20:23:55.556Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/05/6f/86f0272b84926bcb0e4c972262f54223e8ecc556b3224d281e6598fc9268/rpds_py-0.30.0-cp314-cp314t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5965af57d5848192c13534f90f9dd16464f3c37aaf166cc1da1cae1fd5a34898", size = 393330, upload-time = "2025-11-30T20:23:57.033Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/cb/e9/0e02bb2e6dc63d212641da45df2b0bf29699d01715913e0d0f017ee29438/rpds_py-0.30.0-cp314-cp314t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9a4e86e34e9ab6b667c27f3211ca48f73dba7cd3d90f8d5b11be56e5dbc3fb4e", size = 518194, upload-time = "2025-11-30T20:23:58.637Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/ee/ca/be7bca14cf21513bdf9c0606aba17d1f389ea2b6987035eb4f62bd923f25/rpds_py-0.30.0-cp314-cp314t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e5d3e6b26f2c785d65cc25ef1e5267ccbe1b069c5c21b8cc724efee290554419", size = 408340, upload-time = "2025-11-30T20:24:00.2Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/c2/c7/736e00ebf39ed81d75544c0da6ef7b0998f8201b369acf842f9a90dc8fce/rpds_py-0.30.0-cp314-cp314t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:626a7433c34566535b6e56a1b39a7b17ba961e97ce3b80ec62e6f1312c025551", size = 383765, upload-time = "2025-11-30T20:24:01.759Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/4a/3f/da50dfde9956aaf365c4adc9533b100008ed31aea635f2b8d7b627e25b49/rpds_py-0.30.0-cp314-cp314t-manylinux_2_31_riscv64.whl", hash = "sha256:acd7eb3f4471577b9b5a41baf02a978e8bdeb08b4b355273994f8b87032000a8", size = 396834, upload-time = "2025-11-30T20:24:03.687Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/4e/00/34bcc2565b6020eab2623349efbdec810676ad571995911f1abdae62a3a0/rpds_py-0.30.0-cp314-cp314t-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:fe5fa731a1fa8a0a56b0977413f8cacac1768dad38d16b3a296712709476fbd5", size = 415470, upload-time = "2025-11-30T20:24:05.232Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/8c/28/882e72b5b3e6f718d5453bd4d0d9cf8df36fddeb4ddbbab17869d5868616/rpds_py-0.30.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:74a3243a411126362712ee1524dfc90c650a503502f135d54d1b352bd01f2404", size = 565630, upload-time = "2025-11-30T20:24:06.878Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/3b/97/04a65539c17692de5b85c6e293520fd01317fd878ea1995f0367d4532fb1/rpds_py-0.30.0-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:3e8eeb0544f2eb0d2581774be4c3410356eba189529a6b3e36bbbf9696175856", size = 591148, upload-time = "2025-11-30T20:24:08.445Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/85/70/92482ccffb96f5441aab93e26c4d66489eb599efdcf96fad90c14bbfb976/rpds_py-0.30.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:dbd936cde57abfee19ab3213cf9c26be06d60750e60a8e4dd85d1ab12c8b1f40", size = 556030, upload-time = "2025-11-30T20:24:10.956Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/20/53/7c7e784abfa500a2b6b583b147ee4bb5a2b3747a9166bab52fec4b5b5e7d/rpds_py-0.30.0-cp314-cp314t-win32.whl", hash = "sha256:dc824125c72246d924f7f796b4f63c1e9dc810c7d9e2355864b3c3a73d59ade0", size = 211570, upload-time = "2025-11-30T20:24:12.735Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/d0/02/fa464cdfbe6b26e0600b62c528b72d8608f5cc49f96b8d6e38c95d60c676/rpds_py-0.30.0-cp314-cp314t-win_amd64.whl", hash = "sha256:27f4b0e92de5bfbc6f86e43959e6edd1425c33b5e69aab0984a72047f2bcf1e3", size = 226532, upload-time = "2025-11-30T20:24:14.634Z" }, +] + +[[package]] +name = "send2trash" +version = "2.1.0" +source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple/" } +sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/c5/f0/184b4b5f8d00f2a92cf96eec8967a3d550b52cf94362dad1100df9e48d57/send2trash-2.1.0.tar.gz", hash = "sha256:1c72b39f09457db3c05ce1d19158c2cbef4c32b8bedd02c155e49282b7ea7459", size = 17255, upload-time = "2026-01-14T06:27:36.056Z" } +wheels = [ + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/1c/78/504fdd027da3b84ff1aecd9f6957e65f35134534ccc6da8628eb71e76d3f/send2trash-2.1.0-py3-none-any.whl", hash = "sha256:0da2f112e6d6bb22de6aa6daa7e144831a4febf2a87261451c4ad849fe9a873c", size = 17610, upload-time = "2026-01-14T06:27:35.218Z" }, +] + +[[package]] +name = "setuptools" +version = "80.10.2" +source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple/" } +sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/76/95/faf61eb8363f26aa7e1d762267a8d602a1b26d4f3a1e758e92cb3cb8b054/setuptools-80.10.2.tar.gz", hash = "sha256:8b0e9d10c784bf7d262c4e5ec5d4ec94127ce206e8738f29a437945fbc219b70", size = 1200343, upload-time = "2026-01-25T22:38:17.252Z" } +wheels = [ + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/94/b8/f1f62a5e3c0ad2ff1d189590bfa4c46b4f3b6e49cef6f26c6ee4e575394d/setuptools-80.10.2-py3-none-any.whl", hash = "sha256:95b30ddfb717250edb492926c92b5221f7ef3fbcc2b07579bcd4a27da21d0173", size = 1064234, upload-time = "2026-01-25T22:38:15.216Z" }, +] + +[[package]] +name = "six" +version = "1.17.0" +source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple/" } +sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/94/e7/b2c673351809dca68a0e064b6af791aa332cf192da575fd474ed7d6f16a2/six-1.17.0.tar.gz", hash = "sha256:ff70335d468e7eb6ec65b95b99d3a2836546063f63acc5171de367e834932a81", size = 34031, upload-time = "2024-12-04T17:35:28.174Z" } +wheels = [ + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/b7/ce/149a00dd41f10bc29e5921b496af8b574d8413afcd5e30dfa0ed46c2cc5e/six-1.17.0-py2.py3-none-any.whl", hash = "sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274", size = 11050, upload-time = "2024-12-04T17:35:26.475Z" }, +] + +[[package]] +name = "soupsieve" +version = "2.8.3" +source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple/" } +sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/7b/ae/2d9c981590ed9999a0d91755b47fc74f74de286b0f5cee14c9269041e6c4/soupsieve-2.8.3.tar.gz", hash = "sha256:3267f1eeea4251fb42728b6dfb746edc9acaffc4a45b27e19450b676586e8349", size = 118627, upload-time = "2026-01-20T04:27:02.457Z" } +wheels = [ + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/46/2c/1462b1d0a634697ae9e55b3cecdcb64788e8b7d63f54d923fcd0bb140aed/soupsieve-2.8.3-py3-none-any.whl", hash = "sha256:ed64f2ba4eebeab06cc4962affce381647455978ffc1e36bb79a545b91f45a95", size = 37016, upload-time = "2026-01-20T04:27:01.012Z" }, +] + +[[package]] +name = "stack-data" +version = "0.6.3" +source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple/" } +dependencies = [ + { name = "asttokens" }, + { name = "executing" }, + { name = "pure-eval" }, +] +sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/28/e3/55dcc2cfbc3ca9c29519eb6884dd1415ecb53b0e934862d3559ddcb7e20b/stack_data-0.6.3.tar.gz", hash = "sha256:836a778de4fec4dcd1dcd89ed8abff8a221f58308462e1c4aa2a3cf30148f0b9", size = 44707, upload-time = "2023-09-30T13:58:05.479Z" } +wheels = [ + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/f1/7b/ce1eafaf1a76852e2ec9b22edecf1daa58175c090266e9f6c64afcd81d91/stack_data-0.6.3-py3-none-any.whl", hash = "sha256:d5558e0c25a4cb0853cddad3d77da9891a08cb85dd9f9f91b9f8cd66e511e695", size = 24521, upload-time = "2023-09-30T13:58:03.53Z" }, +] + +[[package]] +name = "terminado" +version = "0.18.1" +source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple/" } +dependencies = [ + { name = "ptyprocess", marker = "os_name != 'nt'" }, + { name = "pywinpty", marker = "os_name == 'nt'" }, + { name = "tornado" }, +] +sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/8a/11/965c6fd8e5cc254f1fe142d547387da17a8ebfd75a3455f637c663fb38a0/terminado-0.18.1.tar.gz", hash = "sha256:de09f2c4b85de4765f7714688fff57d3e75bad1f909b589fde880460c753fd2e", size = 32701, upload-time = "2024-03-12T14:34:39.026Z" } +wheels = [ + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/6a/9e/2064975477fdc887e47ad42157e214526dcad8f317a948dee17e1659a62f/terminado-0.18.1-py3-none-any.whl", hash = "sha256:a4468e1b37bb318f8a86514f65814e1afc977cf29b3992a4500d9dd305dcceb0", size = 14154, upload-time = "2024-03-12T14:34:36.569Z" }, +] + +[[package]] +name = "tinycss2" +version = "1.4.0" +source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple/" } +dependencies = [ + { name = "webencodings" }, +] +sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/7a/fd/7a5ee21fd08ff70d3d33a5781c255cbe779659bd03278feb98b19ee550f4/tinycss2-1.4.0.tar.gz", hash = "sha256:10c0972f6fc0fbee87c3edb76549357415e94548c1ae10ebccdea16fb404a9b7", size = 87085, upload-time = "2024-10-24T14:58:29.895Z" } +wheels = [ + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/e6/34/ebdc18bae6aa14fbee1a08b63c015c72b64868ff7dae68808ab500c492e2/tinycss2-1.4.0-py3-none-any.whl", hash = "sha256:3a49cf47b7675da0b15d0c6e1df8df4ebd96e9394bb905a5775adb0d884c5289", size = 26610, upload-time = "2024-10-24T14:58:28.029Z" }, +] + +[[package]] +name = "tornado" +version = "6.5.4" +source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple/" } +sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/37/1d/0a336abf618272d53f62ebe274f712e213f5a03c0b2339575430b8362ef2/tornado-6.5.4.tar.gz", hash = "sha256:a22fa9047405d03260b483980635f0b041989d8bcc9a313f8fe18b411d84b1d7", size = 513632, upload-time = "2025-12-15T19:21:03.836Z" } +wheels = [ + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/ab/a9/e94a9d5224107d7ce3cc1fab8d5dc97f5ea351ccc6322ee4fb661da94e35/tornado-6.5.4-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:d6241c1a16b1c9e4cc28148b1cda97dd1c6cb4fb7068ac1bedc610768dff0ba9", size = 443909, upload-time = "2025-12-15T19:20:48.382Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/db/7e/f7b8d8c4453f305a51f80dbb49014257bb7d28ccb4bbb8dd328ea995ecad/tornado-6.5.4-cp39-abi3-macosx_10_9_x86_64.whl", hash = "sha256:2d50f63dda1d2cac3ae1fa23d254e16b5e38153758470e9956cbc3d813d40843", size = 442163, upload-time = "2025-12-15T19:20:49.791Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/ba/b5/206f82d51e1bfa940ba366a8d2f83904b15942c45a78dd978b599870ab44/tornado-6.5.4-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d1cf66105dc6acb5af613c054955b8137e34a03698aa53272dbda4afe252be17", size = 445746, upload-time = "2025-12-15T19:20:51.491Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/8e/9d/1a3338e0bd30ada6ad4356c13a0a6c35fbc859063fa7eddb309183364ac1/tornado-6.5.4-cp39-abi3-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:50ff0a58b0dc97939d29da29cd624da010e7f804746621c78d14b80238669335", size = 445083, upload-time = "2025-12-15T19:20:52.778Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/50/d4/e51d52047e7eb9a582da59f32125d17c0482d065afd5d3bc435ff2120dc5/tornado-6.5.4-cp39-abi3-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e5fb5e04efa54cf0baabdd10061eb4148e0be137166146fff835745f59ab9f7f", size = 445315, upload-time = "2025-12-15T19:20:53.996Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/27/07/2273972f69ca63dbc139694a3fc4684edec3ea3f9efabf77ed32483b875c/tornado-6.5.4-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:9c86b1643b33a4cd415f8d0fe53045f913bf07b4a3ef646b735a6a86047dda84", size = 446003, upload-time = "2025-12-15T19:20:56.101Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/d1/83/41c52e47502bf7260044413b6770d1a48dda2f0246f95ee1384a3cd9c44a/tornado-6.5.4-cp39-abi3-musllinux_1_2_i686.whl", hash = "sha256:6eb82872335a53dd063a4f10917b3efd28270b56a33db69009606a0312660a6f", size = 445412, upload-time = "2025-12-15T19:20:57.398Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/10/c7/bc96917f06cbee182d44735d4ecde9c432e25b84f4c2086143013e7b9e52/tornado-6.5.4-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:6076d5dda368c9328ff41ab5d9dd3608e695e8225d1cd0fd1e006f05da3635a8", size = 445392, upload-time = "2025-12-15T19:20:58.692Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/0c/1a/d7592328d037d36f2d2462f4bc1fbb383eec9278bc786c1b111cbbd44cfa/tornado-6.5.4-cp39-abi3-win32.whl", hash = "sha256:1768110f2411d5cd281bac0a090f707223ce77fd110424361092859e089b38d1", size = 446481, upload-time = "2025-12-15T19:21:00.008Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/d6/6d/c69be695a0a64fd37a97db12355a035a6d90f79067a3cf936ec2b1dc38cd/tornado-6.5.4-cp39-abi3-win_amd64.whl", hash = "sha256:fa07d31e0cd85c60713f2b995da613588aa03e1303d75705dca6af8babc18ddc", size = 446886, upload-time = "2025-12-15T19:21:01.287Z" }, + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/50/49/8dc3fd90902f70084bd2cd059d576ddb4f8bb44c2c7c0e33a11422acb17e/tornado-6.5.4-cp39-abi3-win_arm64.whl", hash = "sha256:053e6e16701eb6cbe641f308f4c1a9541f91b6261991160391bfc342e8a551a1", size = 445910, upload-time = "2025-12-15T19:21:02.571Z" }, +] + +[[package]] +name = "traitlets" +version = "5.14.3" +source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple/" } +sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/eb/79/72064e6a701c2183016abbbfedaba506d81e30e232a68c9f0d6f6fcd1574/traitlets-5.14.3.tar.gz", hash = "sha256:9ed0579d3502c94b4b3732ac120375cda96f923114522847de4b3bb98b96b6b7", size = 161621, upload-time = "2024-04-19T11:11:49.746Z" } +wheels = [ + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/00/c0/8f5d070730d7836adc9c9b6408dec68c6ced86b304a9b26a14df072a6e8c/traitlets-5.14.3-py3-none-any.whl", hash = "sha256:b74e89e397b1ed28cc831db7aea759ba6640cb3de13090ca145426688ff1ac4f", size = 85359, upload-time = "2024-04-19T11:11:46.763Z" }, +] + +[[package]] +name = "typing-extensions" +version = "4.15.0" +source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple/" } +sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/72/94/1a15dd82efb362ac84269196e94cf00f187f7ed21c242792a923cdb1c61f/typing_extensions-4.15.0.tar.gz", hash = "sha256:0cea48d173cc12fa28ecabc3b837ea3cf6f38c6d1136f85cbaaf598984861466", size = 109391, upload-time = "2025-08-25T13:49:26.313Z" } +wheels = [ + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/18/67/36e9267722cc04a6b9f15c7f3441c2363321a3ea07da7ae0c0707beb2a9c/typing_extensions-4.15.0-py3-none-any.whl", hash = "sha256:f0fa19c6845758ab08074a0cfa8b7aecb71c999ca73d62883bc25cc018c4e548", size = 44614, upload-time = "2025-08-25T13:49:24.86Z" }, +] + +[[package]] +name = "tzdata" +version = "2025.3" +source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple/" } +sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/5e/a7/c202b344c5ca7daf398f3b8a477eeb205cf3b6f32e7ec3a6bac0629ca975/tzdata-2025.3.tar.gz", hash = "sha256:de39c2ca5dc7b0344f2eba86f49d614019d29f060fc4ebc8a417896a620b56a7", size = 196772, upload-time = "2025-12-13T17:45:35.667Z" } +wheels = [ + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/c7/b0/003792df09decd6849a5e39c28b513c06e84436a54440380862b5aeff25d/tzdata-2025.3-py2.py3-none-any.whl", hash = "sha256:06a47e5700f3081aab02b2e513160914ff0694bce9947d6b76ebd6bf57cfc5d1", size = 348521, upload-time = "2025-12-13T17:45:33.889Z" }, +] + +[[package]] +name = "uri-template" +version = "1.3.0" +source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple/" } +sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/31/c7/0336f2bd0bcbada6ccef7aaa25e443c118a704f828a0620c6fa0207c1b64/uri-template-1.3.0.tar.gz", hash = "sha256:0e00f8eb65e18c7de20d595a14336e9f337ead580c70934141624b6d1ffdacc7" } +wheels = [ + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/e7/00/3fca040d7cf8a32776d3d81a00c8ee7457e00f80c649f1e4a863c8321ae9/uri_template-1.3.0-py3-none-any.whl", hash = "sha256:a44a133ea12d44a0c0f06d7d42a52d71282e77e2f937d8abd5655b8d56fc1363" }, +] + +[[package]] +name = "urllib3" +version = "2.6.3" +source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple/" } +sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/c7/24/5f1b3bdffd70275f6661c76461e25f024d5a38a46f04aaca912426a2b1d3/urllib3-2.6.3.tar.gz", hash = "sha256:1b62b6884944a57dbe321509ab94fd4d3b307075e0c2eae991ac71ee15ad38ed", size = 435556, upload-time = "2026-01-07T16:24:43.925Z" } +wheels = [ + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/39/08/aaaad47bc4e9dc8c725e68f9d04865dbcb2052843ff09c97b08904852d84/urllib3-2.6.3-py3-none-any.whl", hash = "sha256:bf272323e553dfb2e87d9bfd225ca7b0f467b919d7bbd355436d3fd37cb0acd4", size = 131584, upload-time = "2026-01-07T16:24:42.685Z" }, +] + +[[package]] +name = "wcwidth" +version = "0.5.3" +source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple/" } +sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/c2/62/a7c072fbfefb2980a00f99ca994279cb9ecf310cb2e6b2a4d2a28fe192b3/wcwidth-0.5.3.tar.gz", hash = "sha256:53123b7af053c74e9fe2e92ac810301f6139e64379031f7124574212fb3b4091", size = 157587, upload-time = "2026-01-31T03:52:10.92Z" } +wheels = [ + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/3c/c1/d73f12f8cdb1891334a2ccf7389eed244d3941e74d80dd220badb937f3fb/wcwidth-0.5.3-py3-none-any.whl", hash = "sha256:d584eff31cd4753e1e5ff6c12e1edfdb324c995713f75d26c29807bb84bf649e", size = 92981, upload-time = "2026-01-31T03:52:09.14Z" }, +] + +[[package]] +name = "webcolors" +version = "25.10.0" +source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple/" } +sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/1d/7a/eb316761ec35664ea5174709a68bbd3389de60d4a1ebab8808bfc264ed67/webcolors-25.10.0.tar.gz", hash = "sha256:62abae86504f66d0f6364c2a8520de4a0c47b80c03fc3a5f1815fedbef7c19bf", size = 53491, upload-time = "2025-10-31T07:51:03.977Z" } +wheels = [ + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/e2/cc/e097523dd85c9cf5d354f78310927f1656c422bd7b2613b2db3e3f9a0f2c/webcolors-25.10.0-py3-none-any.whl", hash = "sha256:032c727334856fc0b968f63daa252a1ac93d33db2f5267756623c210e57a4f1d", size = 14905, upload-time = "2025-10-31T07:51:01.778Z" }, +] + +[[package]] +name = "webencodings" +version = "0.5.1" +source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple/" } +sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/0b/02/ae6ceac1baeda530866a85075641cec12989bd8d31af6d5ab4a3e8c92f47/webencodings-0.5.1.tar.gz", hash = "sha256:b36a1c245f2d304965eb4e0a82848379241dc04b865afcc4aab16748587e1923", size = 9721, upload-time = "2017-04-05T20:21:34.189Z" } +wheels = [ + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/f4/24/2a3e3df732393fed8b3ebf2ec078f05546de641fe1b667ee316ec1dcf3b7/webencodings-0.5.1-py2.py3-none-any.whl", hash = "sha256:a0af1213f3c2226497a97e2b3aa01a7e4bee4f403f95be16fc9acd2947514a78", size = 11774, upload-time = "2017-04-05T20:21:32.581Z" }, +] + +[[package]] +name = "websocket-client" +version = "1.9.0" +source = { registry = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple/" } +sdist = { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/2c/41/aa4bf9664e4cda14c3b39865b12251e8e7d239f4cd0e3cc1b6c2ccde25c1/websocket_client-1.9.0.tar.gz", hash = "sha256:9e813624b6eb619999a97dc7958469217c3176312b3a16a4bd1bc7e08a46ec98", size = 70576, upload-time = "2025-10-07T21:16:36.495Z" } +wheels = [ + { url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/packages/34/db/b10e48aa8fff7407e67470363eac595018441cf32d5e1001567a7aeba5d2/websocket_client-1.9.0-py3-none-any.whl", hash = "sha256:af248a825037ef591efbf6ed20cc5faa03d3b47b9e5a2230a529eeee1c1fc3ef", size = 82616, upload-time = "2025-10-07T21:16:34.951Z" }, +] diff --git a/zed_settings/SN33076638.conf b/zed_settings/SN33076638.conf new file mode 100644 index 0000000..18dfde5 --- /dev/null +++ b/zed_settings/SN33076638.conf @@ -0,0 +1,127 @@ +[LEFT_CAM_2K] +fx=1911.38 +fy=1912.18 +cx=1123.1 +cy=647.087 +k1=-0.0664062 +k2=-0.031351 +p1=0.00088795 +p2=-0.000350279 +k3=-0.0398029 + +[RIGHT_CAM_2K] +fx=1903.94 +fy=1904.16 +cx=1072.53 +cy=608.963 +k1=-0.0679322 +k2=-0.0262978 +p1=-0.000561163 +p2=-0.000343976 +k3=-0.0470684 + +[LEFT_CAM_FHD] +fx=1911.38 +fy=1912.18 +cx=979.1 +cy=566.087 +k1=-0.0664062 +k2=-0.031351 +p1=0.00088795 +p2=-0.000350279 +k3=-0.0398029 + +[RIGHT_CAM_FHD] +fx=1903.94 +fy=1904.16 +cx=928.53 +cy=527.963 +k1=-0.0679322 +k2=-0.0262978 +p1=-0.000561163 +p2=-0.000343976 +k3=-0.0470684 + +[LEFT_CAM_HD] +fx=955.69 +fy=956.09 +cx=648.05 +cy=371.5435 +k1=-0.0664062 +k2=-0.031351 +p1=0.00088795 +p2=-0.000350279 +k3=-0.0398029 + +[RIGHT_CAM_HD] +fx=951.97 +fy=952.08 +cx=622.765 +cy=352.4815 +k1=-0.0679322 +k2=-0.0262978 +p1=-0.000561163 +p2=-0.000343976 +k3=-0.0470684 + +[LEFT_CAM_VGA] +fx=477.845 +fy=478.045 +cx=339.525 +cy=193.27175 +k1=-0.0664062 +k2=-0.031351 +p1=0.00088795 +p2=-0.000350279 +k3=-0.0398029 + +[RIGHT_CAM_VGA] +fx=475.985 +fy=476.04 +cx=326.8825 +cy=183.74075 +k1=-0.0679322 +k2=-0.0262978 +p1=-0.000561163 +p2=-0.000343976 +k3=-0.0470684 + +[LEFT_DISTO] +k1=-3.662 +k2=41.9691 +k3=-18.0516 +k4=-3.56782 +k5=41.5331 +k6=-14.6458 +p1=0.000900044 +p2=-0.00034462 + +[RIGHT_DISTO] +k1=2.14661 +k2=34.6124 +k3=-15.2868 +k4=2.26238 +k5=34.4656 +k6=-11.903 +p1=-0.000546709 +p2=-0.000466277 + +[STEREO] +Baseline=120.002 +TY=-0.208016 +TZ=0.798937 +CV_2K=0.00533287 +CV_FHD=0.00533287 +CV_HD=0.00533287 +CV_VGA=0.00533287 +RX_2K=0.00314443 +RX_FHD=0.00314443 +RX_HD=0.00314443 +RX_VGA=0.00314443 +RZ_2K=0.00108773 +RZ_FHD=0.00108773 +RZ_HD=0.00108773 +RZ_VGA=0.00108773 + +[MISC] +Sensor_ID=0 diff --git a/zed_settings/SN41831756.conf b/zed_settings/SN41831756.conf new file mode 100755 index 0000000..0157b54 --- /dev/null +++ b/zed_settings/SN41831756.conf @@ -0,0 +1,102 @@ +[LEFT_CAM_FHD1200] +fx=736.358 +fy=736.108 +cx=942.549 +cy=595.264 +k1=-0.0147954 +k2=-0.0287526 +p1=-0.000134749 +p2=-3.3522e-06 +k3=0.00738787 + +[RIGHT_CAM_FHD1200] +fx=735.745 +fy=735.518 +cx=995.742 +cy=599.638 +k1=-0.0178319 +k2=-0.0256327 +p1=-4.72358e-05 +p2=1.44634e-06 +k3=0.00639919 + +[LEFT_CAM_FHD] +fx=736.358 +fy=736.108 +cx=942.549 +cy=535.264 +k1=-0.0147954 +k2=-0.0287526 +p1=-0.000134749 +p2=-3.3522e-06 +k3=0.00738787 + +[RIGHT_CAM_FHD] +fx=735.745 +fy=735.518 +cx=995.742 +cy=539.638 +k1=-0.0178319 +k2=-0.0256327 +p1=-4.72358e-05 +p2=1.44634e-06 +k3=0.00639919 + +[LEFT_CAM_SVGA] +fx=368.179 +fy=368.054 +cx=471.2745 +cy=297.632 +k1=-0.0147954 +k2=-0.0287526 +p1=-0.000134749 +p2=-3.3522e-06 +k3=0.00738787 + +[RIGHT_CAM_SVGA] +fx=367.8725 +fy=367.759 +cx=497.871 +cy=299.819 +k1=-0.0178319 +k2=-0.0256327 +p1=-4.72358e-05 +p2=1.44634e-06 +k3=0.00639919 + +[LEFT_DISTO] +k1=1.09888 +k2=1.0616 +k3=0.184704 +k4=1.10158 +k5=1.09857 +k6=0.265843 +p1=-0.000138723 +p2=-7.42334e-05 + +[RIGHT_DISTO] +k1=1.01197 +k2=1.27451 +k3=0.176115 +k4=1.01663 +k5=1.30548 +k6=0.267962 +p1=-4.72137e-05 +p2=0.000163026 + +[STEREO] +Baseline=120.003 +TY=0.00722453 +TZ=0.0242388 +CV_FHD=-0.0049486 +CV_SVGA=-0.0049486 +CV_FHD1200=-0.0049486 +RX_FHD=0.000570613 +RX_SVGA=0.000570613 +RX_FHD1200=0.000570613 +RZ_FHD=-0.000160916 +RZ_SVGA=-0.000160916 +RZ_FHD1200=-0.000160916 + +[MISC] +Sensor_ID=1 diff --git a/zed_settings/SN44289123.conf b/zed_settings/SN44289123.conf new file mode 100755 index 0000000..b363304 --- /dev/null +++ b/zed_settings/SN44289123.conf @@ -0,0 +1,102 @@ +[LEFT_CAM_FHD1200] +fx=736.835 +fy=736.679 +cx=1014.18 +cy=600.356 +k1=-0.00956936 +k2=-0.0352758 +p1=-0.000125508 +p2=-4.595e-05 +k3=0.0096785 + +[RIGHT_CAM_FHD1200] +fx=737.532 +fy=737.236 +cx=1017.28 +cy=613.02 +k1=-0.0170475 +k2=-0.0251567 +p1=9.40852e-05 +p2=-0.000147634 +k3=0.00614057 + +[LEFT_CAM_FHD] +fx=736.835 +fy=736.679 +cx=1014.18 +cy=540.356 +k1=-0.00956936 +k2=-0.0352758 +p1=-0.000125508 +p2=-4.595e-05 +k3=0.0096785 + +[RIGHT_CAM_FHD] +fx=737.532 +fy=737.236 +cx=1017.28 +cy=553.02 +k1=-0.0170475 +k2=-0.0251567 +p1=9.40852e-05 +p2=-0.000147634 +k3=0.00614057 + +[LEFT_CAM_SVGA] +fx=368.4175 +fy=368.3395 +cx=507.09 +cy=300.178 +k1=-0.00956936 +k2=-0.0352758 +p1=-0.000125508 +p2=-4.595e-05 +k3=0.0096785 + +[RIGHT_CAM_SVGA] +fx=368.766 +fy=368.618 +cx=508.64 +cy=306.51 +k1=-0.0170475 +k2=-0.0251567 +p1=9.40852e-05 +p2=-0.000147634 +k3=0.00614057 + +[LEFT_DISTO] +k1=1.48279 +k2=2.68378 +k3=0.247721 +k4=1.48514 +k5=2.69178 +k6=0.428444 +p1=-0.000100594 +p2=4.28453e-07 + +[RIGHT_DISTO] +k1=4.30704 +k2=4.30304 +k3=0.851655 +k4=4.31597 +k5=4.28536 +k6=1.23358 +p1=0.000137174 +p2=9.13179e-05 + +[STEREO] +Baseline=119.976 +TY=-0.0379865 +TZ=0.0842898 +CV_FHD=0.0038738 +CV_SVGA=0.0038738 +CV_FHD1200=0.0038738 +RX_FHD=0.00143141 +RX_SVGA=0.00143141 +RX_FHD1200=0.00143141 +RZ_FHD=0.00020213 +RZ_SVGA=0.00020213 +RZ_FHD1200=0.00020213 + +[MISC] +Sensor_ID=1 diff --git a/zed_settings/SN44435674.conf b/zed_settings/SN44435674.conf new file mode 100755 index 0000000..3a92605 --- /dev/null +++ b/zed_settings/SN44435674.conf @@ -0,0 +1,102 @@ +[LEFT_CAM_FHD1200] +fx=737.955 +fy=737.756 +cx=968.066 +cy=614.73 +k1=-0.00912579 +k2=-0.0356045 +p1=-0.000101145 +p2=0.00021162 +k3=0.0099766 + +[RIGHT_CAM_FHD1200] +fx=738.69 +fy=738.446 +cx=1005.57 +cy=618.784 +k1=-0.017043 +k2=-0.0250474 +p1=-0.00021727 +p2=0.000224863 +k3=0.00611689 + +[LEFT_CAM_FHD] +fx=737.955 +fy=737.756 +cx=968.066 +cy=554.73 +k1=-0.00912579 +k2=-0.0356045 +p1=-0.000101145 +p2=0.00021162 +k3=0.0099766 + +[RIGHT_CAM_FHD] +fx=738.69 +fy=738.446 +cx=1005.57 +cy=558.784 +k1=-0.017043 +k2=-0.0250474 +p1=-0.00021727 +p2=0.000224863 +k3=0.00611689 + +[LEFT_CAM_SVGA] +fx=368.9775 +fy=368.878 +cx=484.033 +cy=307.365 +k1=-0.00912579 +k2=-0.0356045 +p1=-0.000101145 +p2=0.00021162 +k3=0.0099766 + +[RIGHT_CAM_SVGA] +fx=369.345 +fy=369.223 +cx=502.785 +cy=309.392 +k1=-0.017043 +k2=-0.0250474 +p1=-0.00021727 +p2=0.000224863 +k3=0.00611689 + +[LEFT_DISTO] +k1=0.602706 +k2=0.894049 +k3=0.100431 +k4=0.605659 +k5=0.928546 +k6=0.154562 +p1=-0.000122683 +p2=0.000243073 + +[RIGHT_DISTO] +k1=0.577451 +k2=1.00113 +k3=0.0855869 +k4=0.580119 +k5=1.0349 +k6=0.143931 +p1=-0.00019953 +p2=0.000455092 + +[STEREO] +Baseline=119.849 +TY=0.0243877 +TZ=0.519422 +CV_FHD=0.00390967 +CV_SVGA=0.00390967 +CV_FHD1200=0.00390967 +RX_FHD=-0.00284886 +RX_SVGA=-0.00284886 +RX_FHD1200=-0.00284886 +RZ_FHD=-1.90413e-05 +RZ_SVGA=-1.90413e-05 +RZ_FHD1200=-1.90413e-05 + +[MISC] +Sensor_ID=1 diff --git a/zed_settings/SN46195029.conf b/zed_settings/SN46195029.conf new file mode 100755 index 0000000..d9fa571 --- /dev/null +++ b/zed_settings/SN46195029.conf @@ -0,0 +1,102 @@ +[LEFT_CAM_FHD1200] +fx=737.051 +fy=736.767 +cx=966.406 +cy=624.508 +k1=-0.0076488 +k2=-0.0379318 +p1=-0.000186282 +p2=8.06244e-05 +k3=0.0107286 + +[RIGHT_CAM_FHD1200] +fx=738.916 +fy=738.719 +cx=989.329 +cy=560.646 +k1=-0.0162967 +k2=-0.0262529 +p1=-0.000165635 +p2=-6.45414e-05 +k3=0.00656445 + +[LEFT_CAM_FHD] +fx=737.051 +fy=736.767 +cx=966.406 +cy=564.508 +k1=-0.0076488 +k2=-0.0379318 +p1=-0.000186282 +p2=8.06244e-05 +k3=0.0107286 + +[RIGHT_CAM_FHD] +fx=738.916 +fy=738.719 +cx=989.329 +cy=500.646 +k1=-0.0162967 +k2=-0.0262529 +p1=-0.000165635 +p2=-6.45414e-05 +k3=0.00656445 + +[LEFT_CAM_SVGA] +fx=368.5255 +fy=368.3835 +cx=483.203 +cy=312.254 +k1=-0.0076488 +k2=-0.0379318 +p1=-0.000186282 +p2=8.06244e-05 +k3=0.0107286 + +[RIGHT_CAM_SVGA] +fx=369.458 +fy=369.3595 +cx=494.6645 +cy=280.323 +k1=-0.0162967 +k2=-0.0262529 +p1=-0.000165635 +p2=-6.45414e-05 +k3=0.00656445 + +[LEFT_DISTO] +k1=0.152834 +k2=0.509834 +k3=-0.00790371 +k4=0.153366 +k5=0.553444 +k6=0.0064775 +p1=-0.000202853 +p2=0.000125442 + +[RIGHT_DISTO] +k1=0.601384 +k2=1.05306 +k3=0.11214 +k4=0.606883 +k5=1.08095 +k6=0.178194 +p1=-0.000151962 +p2=0.000136203 + +[STEREO] +Baseline=119.99 +TY=-0.033356 +TZ=0.0292273 +CV_FHD=-0.000834777 +CV_SVGA=-0.000834777 +CV_FHD1200=-0.000834777 +RX_FHD=-0.00556493 +RX_SVGA=-0.00556493 +RX_FHD1200=-0.00556493 +RZ_FHD=0.00031898 +RZ_SVGA=0.00031898 +RZ_FHD1200=0.00031898 + +[MISC] +Sensor_ID=1 diff --git a/zed_settings/inside_network.json b/zed_settings/inside_network.json new file mode 100644 index 0000000..13e0f19 --- /dev/null +++ b/zed_settings/inside_network.json @@ -0,0 +1,90 @@ +{ + "41831756": { + "FusionConfiguration": { + "communication_parameters": { + "CommunicationParameters": { + "communication_type": "LOCAL_NETWORK", + "ip_add": "192.168.128.2", + "ip_port": 30004 + } + }, + "input_type": { + "InputType": { + "input_type_conf": "41831756", + "input_type_conf_right": "0", + "input_type_input": "AUTO", + "input_virtual_serial_number": 0 + } + }, + "override_gravity": false, + "pose": "1.000000 0.000000 0.000000 0.000000 0.000000 1.000000 0.000000 -1.175164 0.000000 0.000000 1.000000 0.000000 0.000000 0.000000 0.000000 1.000000", + "serial_number": 41831756 + } + }, + "44289123": { + "FusionConfiguration": { + "communication_parameters": { + "CommunicationParameters": { + "communication_type": "LOCAL_NETWORK", + "ip_add": "192.168.128.2", + "ip_port": 30000 + } + }, + "input_type": { + "InputType": { + "input_type_conf": "44289123", + "input_type_conf_right": "0", + "input_type_input": "AUTO", + "input_virtual_serial_number": 0 + } + }, + "override_gravity": false, + "pose": "0.878804 -0.039482 0.475548 -2.155006 0.070301 0.996409 -0.047188 -1.323051 -0.471977 0.074901 0.878423 0.196908 0.000000 0.000000 0.000000 1.000000", + "serial_number": 44289123 + } + }, + "44435674": { + "FusionConfiguration": { + "communication_parameters": { + "CommunicationParameters": { + "communication_type": "LOCAL_NETWORK", + "ip_add": "192.168.128.2", + "ip_port": 30002 + } + }, + "input_type": { + "InputType": { + "input_type_conf": "44435674", + "input_type_conf_right": "0", + "input_type_input": "AUTO", + "input_virtual_serial_number": 0 + } + }, + "override_gravity": false, + "pose": "-0.900790 -0.002624 -0.434247 1.078225 0.006139 0.999805 -0.018775 -1.278689 0.434211 -0.019579 -0.900598 7.966109 0.000000 0.000000 0.000000 1.000000", + "serial_number": 44435674 + } + }, + "46195029": { + "FusionConfiguration": { + "communication_parameters": { + "CommunicationParameters": { + "communication_type": "LOCAL_NETWORK", + "ip_add": "192.168.128.2", + "ip_port": 30006 + } + }, + "input_type": { + "InputType": { + "input_type_conf": "46195029", + "input_type_conf_right": "0", + "input_type_input": "AUTO", + "input_virtual_serial_number": 0 + } + }, + "override_gravity": false, + "pose": "-0.250826 0.011303 0.967966 -4.166518 -0.001156 0.999927 -0.011976 -1.289126 -0.968031 -0.004123 -0.250795 5.916860 0.000000 0.000000 0.000000 1.000000", + "serial_number": 46195029 + } + } +} \ No newline at end of file diff --git a/zed_settings/inside_shared.json b/zed_settings/inside_shared.json new file mode 100644 index 0000000..5cb32fc --- /dev/null +++ b/zed_settings/inside_shared.json @@ -0,0 +1,90 @@ +{ + "41831756": { + "FusionConfiguration": { + "communication_parameters": { + "CommunicationParameters": { + "communication_type": "INTRA PROCESS", + "ip_add": "", + "ip_port": 0 + } + }, + "input_type": { + "InputType": { + "input_type_conf": "41831756", + "input_type_conf_right": "0", + "input_type_input": "AUTO", + "input_virtual_serial_number": 0 + } + }, + "override_gravity": false, + "pose": "1.000000 0.000000 0.000000 0.000000 0.000000 1.000000 0.000000 -1.175164 0.000000 0.000000 1.000000 0.000000 0.000000 0.000000 0.000000 1.000000", + "serial_number": 41831756 + } + }, + "44289123": { + "FusionConfiguration": { + "communication_parameters": { + "CommunicationParameters": { + "communication_type": "INTRA PROCESS", + "ip_add": "", + "ip_port": 0 + } + }, + "input_type": { + "InputType": { + "input_type_conf": "44289123", + "input_type_conf_right": "0", + "input_type_input": "AUTO", + "input_virtual_serial_number": 0 + } + }, + "override_gravity": false, + "pose": "0.878804 -0.039482 0.475548 -2.155006 0.070301 0.996409 -0.047188 -1.323051 -0.471977 0.074901 0.878423 0.196908 0.000000 0.000000 0.000000 1.000000", + "serial_number": 44289123 + } + }, + "44435674": { + "FusionConfiguration": { + "communication_parameters": { + "CommunicationParameters": { + "communication_type": "INTRA PROCESS", + "ip_add": "", + "ip_port": 0 + } + }, + "input_type": { + "InputType": { + "input_type_conf": "44435674", + "input_type_conf_right": "0", + "input_type_input": "AUTO", + "input_virtual_serial_number": 0 + } + }, + "override_gravity": false, + "pose": "-0.900790 -0.002624 -0.434247 1.078225 0.006139 0.999805 -0.018775 -1.278689 0.434211 -0.019579 -0.900598 7.966109 0.000000 0.000000 0.000000 1.000000", + "serial_number": 44435674 + } + }, + "46195029": { + "FusionConfiguration": { + "communication_parameters": { + "CommunicationParameters": { + "communication_type": "INTRA PROCESS", + "ip_add": "", + "ip_port": 0 + } + }, + "input_type": { + "InputType": { + "input_type_conf": "46195029", + "input_type_conf_right": "0", + "input_type_input": "AUTO", + "input_virtual_serial_number": 0 + } + }, + "override_gravity": false, + "pose": "-0.250826 0.011303 0.967966 -4.166518 -0.001156 0.999927 -0.011976 -1.289126 -0.968031 -0.004123 -0.250795 5.916860 0.000000 0.000000 0.000000 1.000000", + "serial_number": 46195029 + } + } +} \ No newline at end of file