/*********************************************************************************************** ** This sample demonstrates how to reloc a ZED camera using an ArUco marker. ** ** Images are captured with the ZED SDK and cameras poses is then computed from ArUco pattern ** ** to reset ZED tracking with this known position. ** ***********************************************************************************************/ #ifdef _DEBUG #error Please build the project in Release mode #endif // Standard includes #include #include // ZED includes #include #include // OCV includes #include // Sample includes #include "aruco.hpp" #include "GLViewer.hpp" // Using std and sl namespaces using namespace std; using namespace sl; // global map: marker ID -> vector of 4 corner points in world coordinates std::map> g_markerWorldCorners; /** Global struct to store all needed infos */ struct CameraData { Camera zed; Mat point_cloud; Mat im_left; cv::Mat im_left_ocv; cv::Mat im_left_ocv_rgb; cv::Mat camera_matrix = cv::Mat::eye(3, 3, CV_64F); cv::Mat dist_coeffs = cv::Mat::zeros(1, 5, CV_64F); sl::Transform aruco_transf; int id = -1; bool is_reloc = false; string info; }; /** ArUco related data */ struct ArucoData { aruco::Dictionary dictionary; float marker_length; }; // Global objetcts bool quit; GLViewer viewer; // Utility: Convert sl::Mat to cv::Mat (for ArUco detection) cv::Mat slMatToCvMat(sl::Mat& image) { int width = image.getWidth(); int height = image.getHeight(); cv::Mat cvImage(height, width, CV_8UC4, image.getPtr(sl::MEM::CPU)); cv::Mat cvImageRGB; cv::cvtColor(cvImage, cvImageRGB, cv::COLOR_BGRA2BGR); // Convert to BGR for OpenCV return cvImageRGB; } // Initialize global marker corners in the world corrdinate frame (meters) void initMarkerWorldCorners(double markerLengthMeters) { std::cout << "[initMarkerWorldCorners] Marker length = " << markerLengthMeters << std::endl; // Define corners in marker local frame, ordered clockwise starting from top-left corner: std::vector markerLocalCorners = { {0.0f, 0.0f, 0.0f}, {static_cast(markerLengthMeters), 0.0f, 0.0f}, {static_cast(markerLengthMeters), static_cast(markerLengthMeters), 0.0f}, {0.0f, static_cast(markerLengthMeters), 0.0f} }; // Marker ID 0 at origin aruco::g_markerWorldCorners[0] = markerLocalCorners; std::cout << "[initMarkerWorldCorners] g_markerWorldCorners[0] = " << std::endl; for (const auto& pt : aruco::g_markerWorldCorners[0]) { std::cout << "(" << pt.x << ", " << pt.y << ", " << pt.z << ") "; } cout << endl; // Marker ID 1 offset by (markerLength, 0, 0) in world coordinates std::vector marker1Corners; for (const auto& p : markerLocalCorners) { marker1Corners.push_back(cv::Point3f(p.x + 2 * static_cast(-markerLengthMeters), p.y, p.z)); } aruco::g_markerWorldCorners[1] = marker1Corners; std::cout << "[initMarkerWorldCorners] g_markerWorldCorners[1] = " << std::endl; for (const auto& pt : aruco::g_markerWorldCorners[1]) { std::cout << "(" << pt.x << ", " << pt.y << ", " << pt.z << ") "; } cout << endl; // Add more markers similarly as needed } // Sample functions void run(vector &zeds, ArucoData &acuroData); void tryReloc(CameraData &it, ArucoData &acuroData); void displayMarker(CameraData &it, ArucoData &acuroData); void propagateTransforms(vector &zeds); void close(); int main(int argc, char **argv) { // Set configuration parameters InitParameters init_params; init_params.camera_resolution = sl::RESOLUTION::HD1080; init_params.coordinate_units = UNIT::METER; init_params.depth_mode = DEPTH_MODE::NEURAL_PLUS; init_params.camera_fps = 30; init_params.depth_stabilization = 30; init_params.depth_minimum_distance = 1.5f; init_params.depth_maximum_distance = 20.0f; init_params.camera_image_flip = sl::FLIP_MODE::AUTO; init_params.enable_image_enhancement = true; init_params.sdk_verbose = true; // detect number of connected ZED camera. auto zed_infos = Camera::getDeviceList(); const int nb_zeds = zed_infos.size(); vector zeds(nb_zeds); // specify size for the point cloud //Resolution res(512, 288); Resolution res(1920, 1080); //Resolution res(2208, 1242); //Resolution res(1280, 720); // define Positional Tracking parameters PositionalTrackingParameters tracking_params; tracking_params.enable_area_memory = false; tracking_params.set_as_static = true; tracking_params.enable_pose_smoothing = true; ArucoData acuroData; acuroData.dictionary = aruco::getPredefinedDictionary(aruco::DICT_6X6_100); //acuroData.marker_length = 0.13f; // Size real size of the maker in meter acuroData.marker_length = 0.20f; // Size real size of the maker in meter initMarkerWorldCorners(acuroData.marker_length); cout << "Make sure the ArUco marker is a 6x6 (100), measuring " << acuroData.marker_length * 1000 << " mm" << endl; int nb_zed_open = 0; // try to open all the connected cameras for (int i = 0; i < nb_zeds; i++) { init_params.input.setFromCameraID(zed_infos[i].id); ERROR_CODE err = zeds[i].zed.open(init_params); if (err == ERROR_CODE::SUCCESS) { nb_zed_open++; zeds[i].id = i; zeds[i].point_cloud.alloc(res, MAT_TYPE::F32_C4, MEM::GPU); // retrieve camera calibration information for aruco detection auto camCalib = zeds[i].zed.getCameraInformation().camera_configuration.calibration_parameters.left_cam; zeds[i].camera_matrix.at(0, 0) = camCalib.fx; zeds[i].camera_matrix.at(1, 1) = camCalib.fy; zeds[i].camera_matrix.at(0, 2) = camCalib.cx; zeds[i].camera_matrix.at(1, 2) = camCalib.cy; zeds[i].camera_matrix.at(2, 2) = 1.0; for (int di = 0; di < 5; di++) { zeds[i].dist_coeffs.at(0, di) = camCalib.disto[di]; } zeds[i].im_left.alloc(camCalib.image_size, MAT_TYPE::U8_C4); zeds[i].im_left_ocv = cv::Mat(zeds[i].im_left.getHeight(), zeds[i].im_left.getWidth(), CV_8UC4, zeds[i].im_left.getPtr(MEM::CPU)); zeds[i].im_left_ocv_rgb = cv::Mat(zeds[i].im_left.getHeight(), zeds[i].im_left.getWidth(), CV_8UC3); // start Positional tracking (will be reset with the marker position) zeds[i].zed.enablePositionalTracking(tracking_params); zeds[i].info = "[" + std::to_string(i) + "] " + std::string(toString(zeds[i].zed.getCameraInformation().camera_model).c_str()) + " SN" + std::to_string(zeds[i].zed.getCameraInformation().serial_number); cout << "Opening " << zeds[i].info << endl; } } if (nb_zed_open != nb_zeds) { cout << "Error: Number of ZED open: " << nb_zed_open << " vs " << nb_zeds << " detected.\n"; return 1; } if (nb_zed_open == 0) { cout << "Error: No ZED detected\n"; return 1; } // initialise 3D viewer viewer.init(argc, argv,res.width, res.height, nb_zeds); // Start the camera thread quit = false; thread zed_callback(run, ref(zeds), ref(acuroData)); // Set the display callback glutCloseFunc(close); glutMainLoop(); // Stop callback if (zed_callback.joinable()) zed_callback.join(); // Free buffer and close the ZEDs for (auto &it : zeds) it.point_cloud.free(); for (auto &it : zeds) it.zed.close(); return 0; } /** This function calculates the positions of all the cameras with respect to the first one **/ void propagateTransforms(vector &zeds) { sl::Transform ref_aruco; sl::Transform ref_cam_pose_gravity; bool first_cam = true; std::vector cam_config; for (auto &it : zeds) { FusionConfiguration config; if(first_cam) { ref_aruco = it.aruco_transf; sl::Pose cam_pose; it.zed.getPosition(cam_pose); ref_cam_pose_gravity = cam_pose.pose_data; first_cam = false; } else { sl::Transform this_to_ref = sl::Transform::inverse(ref_aruco) * it.aruco_transf; sl::Transform cam_pose_gravity = ref_cam_pose_gravity * this_to_ref; it.zed.resetPositionalTracking(cam_pose_gravity); // the IMU is direclty applied by the SDK config.pose = this_to_ref; } // set camera configuration config.communication_parameters.setForSharedMemory(); config.serial_number = it.zed.getCameraInformation().serial_number; config.input_type.setFromSerialNumber(config.serial_number); cam_config.push_back(config); } // export camera configuration to use fusion module sl::writeConfigurationFile("MultiCamConfig.json", cam_config, COORDINATE_SYSTEM::IMAGE, UNIT::METER); } /** This function loops to get image and motion data from the ZED. It is similar to a callback. Add your own code here. **/ void run(vector &zeds, ArucoData &acuroData) { RuntimeParameters rt_p; // ask for point cloud in World Frame, then they will be displayed with the same reference rt_p.measure3D_reference_frame = REFERENCE_FRAME::WORLD; rt_p.confidence_threshold = 40; bool not_aligned = true; while (!quit) { bool all_cam_located = true; for (auto &it : zeds) { // for all camera if (it.zed.grab(rt_p) == ERROR_CODE::SUCCESS) { // grab new image it.zed.retrieveImage(it.im_left); displayMarker(it, acuroData); if (!it.is_reloc) // if still ne relocated, grab left image and look for ArUco pattern tryReloc(it, acuroData); all_cam_located = all_cam_located & it.is_reloc; } } if(all_cam_located) { if(not_aligned) { propagateTransforms(zeds); not_aligned = false; } for (auto &it : zeds) { // for all camera it.zed.retrieveMeasure(it.point_cloud, MEASURE::XYZRGBA, MEM::GPU, it.point_cloud.getResolution()); viewer.updatePointCloud(it.point_cloud, it.id); } } if (viewer.askReset()) { // you can force a new reloc by pressing 'space' from the 3D view; cout << "Reset asked\n"; all_cam_located = false; not_aligned = true; for (auto &it : zeds) { it.is_reloc = false; sl::Transform path_identity; it.zed.resetPositionalTracking(path_identity); } } sleep_ms(1); } } /** * Attempt multi-marker based relocalization; estimate pose using all detected markers at once **/ void tryReloc(CameraData &it, ArucoData &acuroData) { // Detect all markers vector ids; vector> corners; cv::cvtColor(it.im_left_ocv, it.im_left_ocv_rgb, cv::COLOR_RGBA2RGB); aruco::detectMarkers(it.im_left_ocv_rgb, acuroData.dictionary, corners, ids); if (!ids.empty()) { cv::Vec3d rvec, tvec; // Multi-marker pose estimation using aggregated correspondences bool success = aruco::estimatePoseMultiMarkers(corners, ids, it.camera_matrix, it.dist_coeffs, rvec, tvec); if (success) { // Convert pose (rvec, tvec) to sl::Transform sl::Transform pose; { // Rotation vector to rotation matrix cv::Mat R_cv; cv::Rodrigues(rvec, R_cv); // Fill sl::Transform matrix row-major float pose_mat[16] = { static_cast(R_cv.at(0,0)), static_cast(R_cv.at(0,1)), static_cast(R_cv.at(0,2)), 0.0f, static_cast(R_cv.at(1,0)), static_cast(R_cv.at(1,1)), static_cast(R_cv.at(1,2)), 0.0f, static_cast(R_cv.at(2,0)), static_cast(R_cv.at(2,1)), static_cast(R_cv.at(2,2)), 0.0f, static_cast(tvec(0)), static_cast(tvec(1)), static_cast(tvec(2)), 1.0f }; pose.setIdentity(); pose.setTranslation(sl::float3(tvec(0), tvec(1), tvec(2))); // Rotate using the Rodrigues rotation vector pose.setRotationVector(sl::float3(rvec(0), rvec(1), rvec(2))); } pose.inverse(); // Invert pose for coordinate alignment as per your pipeline it.aruco_transf = pose; it.is_reloc = true; } } } /** * Draw detected markers and estimated pose for visualization **/ void displayMarker(CameraData &it, ArucoData &acuroData) { vector ids; vector> corners; cv::cvtColor(it.im_left_ocv, it.im_left_ocv_rgb, cv::COLOR_RGBA2RGB); aruco::detectMarkers(it.im_left_ocv_rgb, acuroData.dictionary, corners, ids); if (!ids.empty()) { // Use multi-marker pose estimation to draw axes only if desired cv::Vec3d rvec, tvec; bool success = aruco::estimatePoseMultiMarkers(corners, ids, it.camera_matrix, it.dist_coeffs, rvec, tvec); if (success) { aruco::drawDetectedMarkers(it.im_left_ocv_rgb, corners, ids, cv::Scalar(236, 188, 26)); aruco::drawAxis(it.im_left_ocv_rgb, it.camera_matrix, it.dist_coeffs, rvec, tvec, acuroData.marker_length * 0.5f); } } cv::imshow(it.info, it.im_left_ocv_rgb); cv::waitKey(10); } /** * Thread-safe graceful exit **/ void close() { quit = true; }