Is there a way to keep the depth map being published but disable the point cloud? I’m interested in trying to further optimize the performance of the zed2i sensor and would also like to limit the number of topics that the zed2i publishes.
Here are my configuration details:
zed2i_common.yaml
# config/common_yaml
# Common parameters to Stereolabs ZED and ZED mini cameras
#
# Note: the parameter svo_file is passed as exe argumet
---
/**:
ros__parameters:
general:
svo_file: "" # usually overwritten by launch file
svo_loop: false # Enable loop mode when using an SVO as input source
svo_realtime: false # if true SVO will be played trying to respect the original framerate eventually skipping frames, otherwise every frame will be processed respecting the `pub_frame_rate` setting
camera_timeout_sec: 10
camera_max_reconnect: 10
camera_flip: false
zed_id: 0
serial_number: 31681504
resolution: 0 # '0': HD2K, '1': HD1080, '2': HD720, '3': VGA
sdk_verbose: 1
grab_frame_rate: 15 # ZED SDK internal grabbing rate
pub_frame_rate: 10.0 # [DYNAMIC] - frequency of publishing of visual images and depth images
gpu_id: -1
region_of_interest: "" # A polygon defining the ROI where the ZED SDK perform the processing ignoring the rest. Coordinates must be normalized to '1.0' to be resolution independent.
video:
extrinsic_in_camera_frame: false # if `false` extrinsic parameter in `camera_info` will use ROS native frame (X FORWARD, Z UP) instead of the camera frame (Z FORWARD, Y DOWN) [`true` use old behavior as for version < v3.1]
img_downsample_factor: 1.0 # Resample factor for image data matrices [0.01,1.0] The SDK works with native data sizes, but publishes rescaled matrices
brightness: 4 # [DYNAMIC]
contrast: 4 # [DYNAMIC]
hue: 0 # [DYNAMIC]
saturation: 4 # [DYNAMIC]
sharpness: 4 # [DYNAMIC]
gamma: 8 # [DYNAMIC] - Requires SDK >=v3.1
auto_exposure_gain: true # [DYNAMIC]
exposure: 80 # [DYNAMIC]
gain: 80 # [DYNAMIC]
auto_whitebalance: true # [DYNAMIC]
whitebalance_temperature: 42 # [DYNAMIC] - [28,65] works only if `auto_whitebalance` is false
qos_history: 1 # '1': KEEP_LAST - '2': KEEP_ALL
qos_depth: 1 # Queue size if using KEEP_LAST
qos_reliability: 2 # '1': RELIABLE - '2': BEST_EFFORT -
qos_durability: 2 # '1': TRANSIENT_LOCAL - '2': VOLATILE
depth:
quality: 3 # '0': NONE, '1': PERFORMANCE, '2': QUALITY, '3': ULTRA - '4': NEURAL - Note: if '0' all the modules that requires depth extraction are disabled by default (Pos. Tracking, Obj. Detection, Mapping, ...)
sensing_mode: 1 # '0': STANDARD, '1': FILL
depth_stabilization: true # Forces positional tracking to start if 'true'
openni_depth_mode: false # 'false': 32bit float [meters], 'true': 16bit unsigned int [millimeters]
depth_downsample_factor: 1.0 # Resample factor for depth data matrices [0.01,1.0] The SDK works with native data sizes, but publishes rescaled matrices (depth map, point cloud, ...)
# I suspect we can lower the point cloud frequency because we are not using point clouds right now.
point_cloud_freq: 1.0 # [DYNAMIC] - frequency of the pointcloud publishing (equal or less to `grab_frame_rate` value)
depth_confidence: 100 # [DYNAMIC]
depth_texture_conf: 100 # [DYNAMIC]
remove_saturated_areas: true # [DYNAMIC]
qos_history: 1 # '1': KEEP_LAST - '2': KEEP_ALL
qos_depth: 1 # Queue size if using KEEP_LAST
qos_reliability: 2 # '1': RELIABLE - '2': BEST_EFFORT -
qos_durability: 2 # '1': TRANSIENT_LOCAL - '2': VOLATILE
pos_tracking:
pos_tracking_enabled: true # True to enable positional tracking from start
imu_fusion: true # enable/disable IMU fusion. When set to false, only the optical odometry will be used.
publish_tf: false # publish `odom -> base_link` TF
publish_map_tf: false # publish `map -> odom` TF
publish_imu_tf: true # enable/disable the IMU TF broadcasting
base_frame: "base_link" # use the same name as in the URDF file
map_frame: "map"
odometry_frame: "odom"
area_memory_db_path: ""
area_memory: true # Enable to detect loop closure
depth_min_range: 0.0 # Set this value for removing fixed zones of the robot in the FoV of the camerafrom the visual odometry evaluation
set_gravity_as_origin: false # If 'true' align the positional tracking world to imu gravity measurement. Keep the yaw from the user initial pose.
floor_alignment: false # Enable to automatically calculate camera/floor offset
initial_base_pose: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0] # Initial position of the `base_frame` -> [X, Y, Z, R, P, Y]
init_odom_with_first_valid_pose: true # Enable to initialize the odometry with the first valid pose
path_pub_rate: 2.0 # [DYNAMIC] - Camera trajectory publishing frequency
path_max_count: 10 # use '-1' for unlimited path size
two_d_mode: false # Force navigation on a plane. If true the Z value will be fixed to "fixed_z_value", roll and pitch to zero
fixed_z_value: 0.00 # Value to be used for Z coordinate if `two_d_mode` is true
qos_history: 1 # '1': KEEP_LAST - '2': KEEP_ALL
qos_depth: 1 # Queue size if using KEEP_LAST
qos_reliability: 2 # '1': RELIABLE - '2': BEST_EFFORT -
qos_durability: 2 # '1': TRANSIENT_LOCAL - '2': VOLATILE
transform_time_offset: 0.0 # The value added to the timestamp of `map->odom` and `odom->base_link`` transform being generated
mapping:
mapping_enabled: false # True to enable mapping and fused point cloud pubblication
resolution: 0.1 # maps resolution in meters [0.01f, 0.2f]
max_mapping_range: 20.0 # maximum depth range while mapping in meters (-1 for automatic calculation) [2.0, 20.0]
fused_pointcloud_freq: 0.5 # frequency of the publishing of the fused colored point cloud
clicked_point_topic: "/clicked_point" # Topic published by Rviz when a point of the cloud is clicked. Used for plane detection
qos_history: 1 # '1': KEEP_LAST - '2': KEEP_ALL
qos_depth: 1 # Queue size if using KEEP_LAST
qos_reliability: 2 # '1': RELIABLE - '2': BEST_EFFORT -
qos_durability: 2 # '1': TRANSIENT_LOCAL - '2': VOLATILE
sensors:
sensors_image_sync: false # Synchronize Sensors messages with latest published video/depth message
sensors_pub_rate: 200. # frequency of publishing of sensors data. MAX: 400. - MIN: grab rate
qos_history: 1 # '1': KEEP_LAST - '2': KEEP_ALL
qos_depth: 1 # Queue size if using KEEP_LAST
qos_reliability: 2 # '1': RELIABLE - '2': BEST_EFFORT -
qos_durability: 2 # '1': TRANSIENT_LOCAL - '2': VOLATILE
object_detection:
od_enabled: false # True to enable Object Detection [only ZED 2]
confidence_threshold: 50.0 # [DYNAMIC] - Minimum value of the detection confidence of an object [0,100]
prediction_timeout: 0.5 # During this time [sec], the object will have OK state even if it is not detected. Set this parameter to 0 to disable SDK predictions
model: 0 # '0': MULTI_CLASS_BOX - '1': MULTI_CLASS_BOX_ACCURATE - '2': HUMAN_BODY_FAST - '3': HUMAN_BODY_ACCURATE - '4': MULTI_CLASS_BOX_MEDIUM - '5': HUMAN_BODY_MEDIUM - '6': PERSON_HEAD_BOX
filtering_mode: 1 # '0': NONE - '1': NMS3D - '2': NMS3D_PER_CLASS
mc_people: true # [DYNAMIC] - Enable/disable the detection of persons for 'MULTI_CLASS_X' models
mc_vehicle: true # [DYNAMIC] - Enable/disable the detection of vehicles for 'MULTI_CLASS_X' models
mc_bag: true # [DYNAMIC] - Enable/disable the detection of bags for 'MULTI_CLASS_X' models
mc_animal: true # [DYNAMIC] - Enable/disable the detection of animals for 'MULTI_CLASS_X' models
mc_electronics: true # [DYNAMIC] - Enable/disable the detection of electronic devices for 'MULTI_CLASS_X' models
mc_fruit_vegetable: true # [DYNAMIC] - Enable/disable the detection of fruits and vegetables for 'MULTI_CLASS_X' models
mc_sport: true # [DYNAMIC] - Enable/disable the detection of sport-related objects for 'MULTI_CLASS_X' models
body_fitting: true # Enable/disable body fitting for 'HUMAN_BODY_FAST' and 'HUMAN_BODY_ACCURATE' models
body_format: 1 # '0': POSE_18 - '1': POSE_34 [Only if `HUMAN_BODY_*` model is selected]
qos_history: 1 # '1': KEEP_LAST - '2': KEEP_ALL
qos_depth: 1 # Queue size if using KEEP_LAST
qos_reliability: 2 # '1': RELIABLE - '2': BEST_EFFORT
qos_durability: 2 # '1': TRANSIENT_LOCAL - '2': VOLATILE
debug:
debug_mode: true
debug_sensors: false
Any recommendations?