Realtime mapping using two cameras at the same time

I made another test using the “spacial mapping” sample with the following modified code :

/**********************************************************************************
 ** This sample demonstrates how to capture a live 3D reconstruction of a scene  **
 ** as a fused point cloud and display the result in an OpenGL window.           **
 **********************************************************************************/

// ZED includes
#include <sl/Camera.hpp>

// Sample includes
#include "GLViewer.hpp"

#include <opencv2/opencv.hpp>

// Using std and sl namespaces
using namespace std;
using namespace sl;

int main(int argc, char **argv) {
    Camera zed1;
    Camera zed2;

    // Set configuration parameters for the ZED
    InitParameters init_parameters;
    init_parameters.depth_mode = DEPTH_MODE::ULTRA;    
    init_parameters.coordinate_system = COORDINATE_SYSTEM::RIGHT_HANDED_Y_UP; // OpenGL's coordinate system is right_handed    

    // Open the camera
    init_parameters.input.setFromCameraID(0);
    zed1.open(init_parameters);

    init_parameters.input.setFromCameraID(1);
    zed2.open(init_parameters);

    auto camera_infos = zed1.getCameraInformation();

    // Point cloud viewer
    GLViewer viewer;

    // Initialize point cloud viewer
    FusedPointCloud map;
    GLenum errgl = viewer.init(argc, argv, camera_infos.camera_configuration.calibration_parameters.left_cam, &map, camera_infos.camera_model);

    // Setup and start positional tracking
    Pose pose;
    POSITIONAL_TRACKING_STATE tracking_state = POSITIONAL_TRACKING_STATE::OFF;
    PositionalTrackingParameters positional_tracking_parameters;
    positional_tracking_parameters.enable_area_memory = false;
    zed1.enablePositionalTracking(positional_tracking_parameters);
    zed2.enablePositionalTracking(positional_tracking_parameters);


    // Set spatial mapping parameters
    SpatialMappingParameters spatial_mapping_parameters;
    // Request a Point Cloud
    spatial_mapping_parameters.map_type = SpatialMappingParameters::SPATIAL_MAP_TYPE::FUSED_POINT_CLOUD;
    // Set mapping range, it will set the resolution accordingly (a higher range, a lower resolution)
    spatial_mapping_parameters.set(SpatialMappingParameters::MAPPING_RANGE::SHORT);
    spatial_mapping_parameters.set(SpatialMappingParameters::MAPPING_RESOLUTION::HIGH);
    // Request partial updates only (only the latest updated chunks need to be re-draw)
    spatial_mapping_parameters.use_chunk_only = true;
    // Start the spatial mapping
    zed1.enableSpatialMapping(spatial_mapping_parameters);
    zed2.enableSpatialMapping(spatial_mapping_parameters);
    
    // Timestamp of the last fused point cloud requested
    chrono::high_resolution_clock::time_point ts_last1;
    chrono::high_resolution_clock::time_point ts_last2;

    // Setup runtime parameters
    RuntimeParameters runtime_parameters;
    // Use low depth confidence avoid introducing noise in the constructed model
    runtime_parameters.confidence_threshold = 50;

    auto resolution = camera_infos.camera_configuration.resolution;

    // Define display resolution and check that it fit at least the image resolution
    Resolution display_resolution(min((int)resolution.width, 720), min((int)resolution.height, 404));

    // Create a Mat to contain the left image and its opencv ref
    Mat image_zed1(display_resolution, MAT_TYPE::U8_C4);
    Mat image_zed2(display_resolution, MAT_TYPE::U8_C4);
    cv::Mat image_zed_ocv1(image_zed1.getHeight(), image_zed1.getWidth(), CV_8UC4, image_zed1.getPtr<sl::uchar1>(MEM::CPU));
    cv::Mat image_zed_ocv2(image_zed2.getHeight(), image_zed2.getWidth(), CV_8UC4, image_zed2.getPtr<sl::uchar1>(MEM::CPU));

    // Start the main loop
    while (viewer.isAvailable()) {
        // Grab a new image
        if (zed1.grab(runtime_parameters) == ERROR_CODE::SUCCESS) {
            // Retrieve the left image
            zed1.retrieveImage(image_zed1, VIEW::LEFT, MEM::CPU, display_resolution);
            // Retrieve the camera pose data
            tracking_state = zed1.getPosition(pose);
            viewer.updatePose(pose, tracking_state);

            if (tracking_state == POSITIONAL_TRACKING_STATE::OK) {                
                auto duration = chrono::duration_cast<chrono::milliseconds>(chrono::high_resolution_clock::now() - ts_last1).count();
                
                // Ask for a fused point cloud update if 500ms have elapsed since last request
                if((duration > 500) && viewer.chunksUpdated()) {
                    // Ask for a point cloud refresh
                    zed1.requestSpatialMapAsync();
                    ts_last1 = chrono::high_resolution_clock::now();
                }
                
                // If the point cloud is ready to be retrieved
                if(zed1.getSpatialMapRequestStatusAsync() == ERROR_CODE::SUCCESS) {                    
                    zed1.retrieveSpatialMapAsync(map);
                    viewer.updateChunks();
                }
            }
            cv::imshow("ZED View 1", image_zed_ocv1);
        }
        if (zed2.grab(runtime_parameters) == ERROR_CODE::SUCCESS) {
          // Retrieve the left image
          zed2.retrieveImage(image_zed2, VIEW::LEFT, MEM::CPU, display_resolution);
          // Retrieve the camera pose data
          tracking_state = zed2.getPosition(pose);
          //viewer.updatePose(pose, tracking_state);

          if (tracking_state == POSITIONAL_TRACKING_STATE::OK) {
            auto duration = chrono::duration_cast<chrono::milliseconds>(chrono::high_resolution_clock::now() - ts_last2).count();

            // Ask for a fused point cloud update if 500ms have elapsed since last request
            if ((duration > 500) && viewer.chunksUpdated()) {
              // Ask for a point cloud refresh
              zed2.requestSpatialMapAsync();
              ts_last2 = chrono::high_resolution_clock::now();
            }

            // If the point cloud is ready to be retrieved
            if (zed2.getSpatialMapRequestStatusAsync() == ERROR_CODE::SUCCESS) {
              zed2.retrieveSpatialMapAsync(map);
              viewer.updateChunks();
            }
          }
          cv::imshow("ZED View 2", image_zed_ocv2);
        }
        cv::waitKey(15);
    }

    // Save generated point cloud
    //map.save("MyFusedPointCloud");

    // Free allocated memory before closing the camera
    image_zed1.free();
    image_zed2.free();

    // Close the ZED
    zed1.close();
    zed2.close();

    return 0;
}

Only the second camera do mapping process and I get the same error in the console window :

CUDA error at C:\builds\sl\ZEDKit\lib\src\sl_zed\SpatialMappingHandler.cpp:539 code=400(cudaErrorInvalidResourceHandle) "cudaEventRecord(ev_cpu_data, strm)"
CUDA error at C:\builds\sl\ZEDKit\lib\src\sl_zed\SpatialMappingHandler.cpp:539 code=400(cudaErrorInvalidResourceHandle) "cudaEventRecord(ev_cpu_data, strm)"
CUDA error at C:\builds\sl\ZEDKit\lib\src\sl_zed\SpatialMappingHandler.cpp:539 code=400(cudaErrorInvalidResourceHandle) "cudaEventRecord(ev_cpu_data, strm)"
CUDA error at C:\builds\sl\ZEDKit\lib\src\sl_zed\SpatialMappingHandler.cpp:539 code=400(cudaErrorInvalidResourceHandle) "cudaEventRecord(ev_cpu_data, strm)"
CUDA error at C:\builds\sl\ZEDKit\lib\src\sl_zed\SpatialMappingHandler.cpp:539 code=400(cudaErrorInvalidResourceHandle) "cudaEventRecord(ev_cpu_data, strm)"
CUDA error at C:\builds\sl\ZEDKit\lib\src\sl_zed\SpatialMappingHandler.cpp:539 code=400(cudaErrorInvalidResourceHandle) "cudaEventRecord(ev_cpu_data, strm)"
CUDA error at C:\builds\sl\ZEDKit\lib\src\sl_zed\SpatialMappingHandler.cpp:539 code=400(cudaErrorInvalidResourceHandle) "cudaEventRecord(ev_cpu_data, strm)"

Is there a way to use two (or more?) cameras (fixed in rigid body together) to do realtime mapping ?