Hi,
I am trying to implement a multiple-camera positional tracking and spatial mapping example using the python api.
First I am trying to have the multi-camera positional tracking working, however on the viewer I am always getting only one camera showing and the poses getting mixed up each time. I am using two ZED Mini cameras and the script that I am using is the following:
import pyzed.sl as sl
import cv2
import numpy as np
import threading
import time
import signal
import ogl_viewer.tracking_viewer as gl
zed_list = []
left_list = []
depth_list = []
timestamp_list = []
def main():
global zed_list
global left_list
global depth_list
global timestamp_list
print("Running...")
init = sl.InitParameters()
init.optional_settings_path = "/opt/zed/settings/"
init.depth_mode = sl.DEPTH_MODE.PERFORMANCE
init.coordinate_system = sl.COORDINATE_SYSTEM.RIGHT_HANDED_Y_UP # OpenGL's coordinate system is right_handed
init.coordinate_units = sl.UNIT.METER#MILLIMETER
init.depth_minimum_distance = 200
init.sdk_verbose = 1
init.camera_resolution = sl.RESOLUTION.HD1080
init.camera_fps = 30 # The framerate is lowered to avoid any USB3 bandwidth issues
# Setup and start positional tracking
pose0 = sl.Pose()
pose_data0 = sl.Transform()
pose1 = sl.Pose()
pose_data1 = sl.Transform()
py_translation = sl.Translation()
positional_tracking_parameters = sl.PositionalTrackingParameters()
positional_tracking_parameters.enable_area_memory = True
positional_tracking_parameters.enable_imu_fusion = True
# Set spatial mapping parameters
spatial_mapping_parameters = sl.SpatialMappingParameters(resolution = sl.MAPPING_RESOLUTION.MEDIUM, mapping_range = sl.MAPPING_RANGE.MEDIUM, max_memory_usage = 2048,
save_texture = False, use_chunk_only = True, reverse_vertex_order = False, map_type = sl.SPATIAL_MAP_TYPE.FUSED_POINT_CLOUD)
# Setup runtime parameters
runtime_parameters = sl.RuntimeParameters()
# Use low depth confidence avoid introducing noise in the constructed model
runtime_parameters.confidence_threshold = 50
# Initialize point cloud viewer
map = sl.FusedPointCloud()
# Point cloud viewer
viewer = gl.GLViewer()
# List and open cameras
name_list = []
last_ts_list = []
cameras = sl.Camera.get_device_list()
index = 0
for cam in cameras:
init.set_from_serial_number(cam.serial_number)
name_list.append("ZED {}".format(cam.serial_number))
print("Opening {}".format(name_list[index]))
zed_list.append(sl.Camera())
left_list.append(sl.Mat())
depth_list.append(sl.Mat())
timestamp_list.append(0)
last_ts_list.append(0)
# Open camera
status = zed_list[index].open(init)
if status != sl.ERROR_CODE.SUCCESS:
# print(repr(status))
print("Open Camera : "+repr(status)+". Exit program.")
zed_list[index].close()
exit()
# Enable positional tracking
status = zed_list[index].enable_positional_tracking(positional_tracking_parameters)
if status != sl.ERROR_CODE.SUCCESS:
print("Enable Positional Tracking : "+repr(status)+". Exit program.")
exit()
# Enable spatial mapping
status = zed_list[index].enable_spatial_mapping(spatial_mapping_parameters)
if status != sl.ERROR_CODE.SUCCESS:
print("Enable Spatial Mapping : "+repr(status)+". Exit program.")
exit()
index = index + 1
if all(cam.camera_model == cameras[0].camera_model for cam in cameras):
camera_infos = zed_list[0].get_camera_information()
# viewer.init(camera_infos.camera_configuration.calibration_parameters.left_cam, map, cameras[0].camera_model)
viewer.init(cameras[0].camera_model)
else:
print("Warning: Not all cameras are of the same type!!!")
exit()
# Start timer
last_call = time.time()
text_translation0 = ""
text_rotation0 = ""
text_translation1 = ""
text_rotation1 = ""
while viewer.is_available():
# for i, cam in enumerate(cameras):
# # Grab an image, a RuntimeParameters object must be given to grab()
# if zed_list[i].grab(runtime_parameters) == sl.ERROR_CODE.SUCCESS:
# # Retrieve left image
# zed_list[i].retrieve_image(left_list[i], sl.VIEW.LEFT)
# # Update pose data (used for projection of the mesh over the current image)
# tracking_state = zed_list[i].get_position(pose, sl.REFERENCE_FRAME.WORLD) #Get the position of the camera in a fixed reference frame (the World Frame)
# #Get rotation and translation and displays it
# if tracking_state == sl.POSITIONAL_TRACKING_STATE.OK:
# rotation = pose.get_rotation_vector()
# translation = pose.get_translation(py_translation)
# text_rotation = str((round(rotation[0], 2), round(rotation[1], 2), round(rotation[2], 2)))
# text_translation = str((round(translation.get()[0], 2), round(translation.get()[1], 2), round(translation.get()[2], 2)))
# pose_data = pose.pose_data(sl.Transform())
# # Update rotation, translation and tracking state values in the OpenGL window
# viewer.updateData(pose_data, text_translation, text_rotation, tracking_state)
# Grab an image, a RuntimeParameters object must be given to grab()
if zed_list[0].grab(runtime_parameters) == sl.ERROR_CODE.SUCCESS:
# Retrieve left image
zed_list[0].retrieve_image(left_list[0], sl.VIEW.LEFT)
# Update pose data (used for projection of the mesh over the current image)
tracking_state0 = zed_list[0].get_position(pose0, sl.REFERENCE_FRAME.WORLD) #Get the position of the camera in a fixed reference frame (the World Frame)
# tracking_status = zed_list[i].get_positional_tracking_status()
#Get rotation and translation and displays it
if tracking_state0 == sl.POSITIONAL_TRACKING_STATE.OK:
rotation = pose0.get_rotation_vector()
translation = pose0.get_translation(py_translation)
text_rotation0 = str((round(rotation[0], 2), round(rotation[1], 2), round(rotation[2], 2)))
text_translation0 = str((round(translation.get()[0], 2), round(translation.get()[1], 2), round(translation.get()[2], 2)))
pose_data0 = pose0.pose_data(sl.Transform())
# Update rotation, translation and tracking state values in the OpenGL window
viewer.updateData(pose_data0, text_translation0, text_rotation0, tracking_state0)
# Grab an image, a RuntimeParameters object must be given to grab()
if zed_list[1].grab(runtime_parameters) == sl.ERROR_CODE.SUCCESS:
# Retrieve left image
zed_list[1].retrieve_image(left_list[1], sl.VIEW.LEFT)
# Update pose data (used for projection of the mesh over the current image)
tracking_state1 = zed_list[1].get_position(pose1, sl.REFERENCE_FRAME.WORLD) #Get the position of the camera in a fixed reference frame (the World Frame)
#Get rotation and translation and displays it
if tracking_state1 == sl.POSITIONAL_TRACKING_STATE.OK:
rotation = pose1.get_rotation_vector()
translation = pose1.get_translation(py_translation)
text_rotation1 = str((round(rotation[0], 2), round(rotation[1], 2), round(rotation[2], 2)))
text_translation1 = str((round(translation.get()[0], 2), round(translation.get()[1], 2), round(translation.get()[2], 2)))
pose_data1 = pose1.pose_data(sl.Transform())
# Update rotation, translation and tracking state values in the OpenGL window
viewer.updateData(pose_data1, text_translation1, text_rotation1, tracking_state1)
viewer.exit()
print("\nFINISH")
if __name__ == "__main__":
main()
Any idea, why it does not work properly. Using only one camera, it doesn’t seem to be any problem.
Thanks.