Error in fusion.camera_to_geo()

I am attempting to convert the coordinates of a detected object from local to geographical. This approach relies on the fusion modules “fusion.camera_to_geo” method.

Am I missing something critical to use camera_to_geo ?

The geopose produced by this method has been strange/way off from what is expected. To test this out, I used fusion.get_position → fusion.camera_to_geo to validate the updated geopose with fusion.get_geo_pose:

data:

https://drive.google.com/file/d/1XxqANF3J-y4Q6GYiCN7eC22UHCCcZ8Kr/view?usp=sharing

Subset of code:

                current_state = self.fusion.get_position(self.current_pose)

                if current_state == sl.POSITIONAL_TRACKING_STATE.OK:
                    print(f"Pose Confidence {self.current_pose.pose_confidence}")
                    image_cam = {
                        "rgb":cv2.cvtColor(left_img.numpy(), cv2.COLOR_RGBA2RGB),
                        "depth":depth_image.numpy(),
                        "model":camera_model,
                        "timestamp":ts_camera.get_nanoseconds()/1e9,
                        "pose":self.current_pose
                    }
                    image_geopose = sl.GeoPose()
                    current_geopose_status = self.fusion.get_geo_pose(image_geopose)

                    if current_geopose_status == sl.GNSS_FUSION_STATUS.OK:
                        test_gp = sl.GeoPose()
                        self.fusion.camera_to_geo(self.current_pose,test_gp)
                        lat_test,lon_test,elev_test = test_gp.latlng_coordinates.get_coordinates(False)
                        lat,lon,elev= image_geopose.latlng_coordinates.get_coordinates(False)
                        print(f"Offset Lat: {lat_test-lat:.8f}, Offset Lon: {lon_test-lon:.8f}, Offset Elev: {elev_test-elev:.5f}")

Full code

import cv2
import numpy as np
import pyzed.sl as sl
from typing import List
import json
import logging
import pandas as pd
from datetime import datetime
from shapely.geometry import Point

logger = logging.getLogger(__name__)

class Observer:
    def __init__(self,zed_config_path:str) -> None:
        print(f"ZED Config Path: {zed_config_path}")
        self.model = Perceptree()
        self.zed_config = Config(zed_config_path)
        
        self.fusion = None
        
    def gnss_str_to_pyzed(self,data:str) -> sl.GNSSData:


        data = json.loads(data)
        gps_data = sl.GNSSData()

        sl_fix_mode = sl.GNSS_MODE.FIX_3D
        sl_status = sl.GNSS_STATUS.SINGLE
        gps_data.gnss_mode = sl_fix_mode.value
        gps_data.gnss_status = sl_status.value

        date_time = datetime.fromisoformat(data["timestamp"])
        zed_ts = sl.Timestamp()
        zed_ts.set_nanoseconds(int(date_time.timestamp()*1e9))
        gps_data.ts = zed_ts
        gps_data.set_coordinates(data["lat"],data["lon"],data["elevation"],False)
        covariances = json.loads(data["covariance"])
        gps_data.position_covariances = [float(x)**.5 for x in covariances]
        return gps_data

    def xyz_to_georectified(self,xyz) -> np.ndarray:
        georectified_coords = []
        pose = sl.Pose()
        pose.init_pose(self.current_pose)
        geopose = sl.GeoPose()
        for point in xyz:
            translation = sl.Translation()
            transform = sl.Transform()
              
            translation.init_vector(point[0], point[1], point[2])
            transform.set_translation(translation)
            pose.init_transform(transform)
            
            status = self.fusion.camera_to_geo(pose, geopose)

            lat, lon, elevation = geopose.latlng_coordinates.get_coordinates(False)
            georectified_coords.append([lon, lat, elevation])

        return georectified_coords
    
    def read_video(self,svo_path:str,stop=np.inf) -> pd.DataFrame:
        
        zed_pose = sl.Pose()
        py_translation = sl.Translation()
        text_translation = ""
        text_rotation = ""

        init_params = sl.InitParameters(depth_mode=sl.DEPTH_MODE.ULTRA,
                                        coordinate_units=sl.UNIT.METER,
                                        coordinate_system=sl.COORDINATE_SYSTEM.RIGHT_HANDED_Y_UP)

        init_params.set_from_svo_file(svo_path)

        # create the camera that will input the position from its odometry
        zed = sl.Camera()
        status = zed.open(init_params)
        if status != sl.ERROR_CODE.SUCCESS:
            print("[ZED][ERROR] Camera Open : " + repr(status) + ". Exit program.")
            exit()
        # Enable positional tracking:

        positional_init = zed.enable_positional_tracking()
        if positional_init != sl.ERROR_CODE.SUCCESS:
            print("[ZED][ERROR] Can't start tracking of camera : " + repr(status) + ". Exit program.")
            exit()

        camera_info: sl.CalibrationParameters = zed.get_camera_information()
        params:sl.CameraParameters = camera_info.camera_configuration.calibration_parameters.left_cam

        camera_model = {
            "f_row":params.fy,
            "f_col": params.fx,
            "c_row":params.cy,
            "c_col":params.cx
            
        }
        # Display
        display_resolution = sl.Resolution(1280, 720)
        left_img = sl.Mat()
        depth_image = sl.Mat()
        self.current_pose = sl.Pose()

        self.fusion = sl.Fusion()
        init_fusion_param = sl.InitFusionParameters()
        init_fusion_param.coordinate_units = sl.UNIT.METER
        init_fusion_param.coordinate_system = sl.COORDINATE_SYSTEM.RIGHT_HANDED_Z_UP
        init_fusion_param.verbose = True

        fusion_init_code = self.fusion.init(init_fusion_param)
        if fusion_init_code != sl.FUSION_ERROR_CODE.SUCCESS:
            print("[ZED][ERROR] Failed to initialize fusion :" + repr(fusion_init_code) + ". Exit program")
            exit()
        
        # Enable odometry publishing:
        configuration = sl.CommunicationParameters()
        zed.start_publishing(configuration)

        uuid = sl.CameraIdentifier(zed.get_camera_information().serial_number)
        self.fusion.subscribe(uuid, configuration, sl.Transform(0, 0, 0))

        # Enable positional tracking for Fusion object
        positional_tracking_fusion_parameters = sl.PositionalTrackingFusionParameters()
        positional_tracking_fusion_parameters.enable_GNSS_fusion = True
        gnss_calibration_parameters = sl.GNSSCalibrationParameters()
        gnss_calibration_parameters.target_yaw_uncertainty = 7e-3
        gnss_calibration_parameters.enable_translation_uncertainty_target = True
        gnss_calibration_parameters.target_translation_uncertainty = 15e-2
        gnss_calibration_parameters.enable_reinitialization = False
        gnss_calibration_parameters.gnss_vio_reinit_threshold = 5
        gnss_calibration_parameters.gnss_antenna_position = np.asarray([0.0,.0,0.0])
        positional_tracking_fusion_parameters.gnss_calibration_parameters = gnss_calibration_parameters

        self.fusion.enable_positionnal_tracking(positional_tracking_fusion_parameters)

        start = 0
        stem_map = StemMap({"depth_distance":20/111111,"fov_h":45})

        last_timestamp = sl.Timestamp()
        new_trees = pd.DataFrame()
        gps_measurements = []
        positions = []
        to_cv = np.zeros((720,1280))
        is_running = True
        while is_running:
            if zed.grab() == sl.ERROR_CODE.SUCCESS:
                zed.get_position(zed_pose, sl.REFERENCE_FRAME.WORLD)
                zed.retrieve_image(left_img, sl.VIEW.LEFT, sl.MEM.CPU, display_resolution)
                zed.retrieve_measure(depth_image, sl.MEASURE.DEPTH, sl.MEM.CPU, display_resolution)
                ts_camera = zed.get_timestamp(sl.TIME_REFERENCE.IMAGE)
                #ts_camera.set_nanoseconds(int(ts_camera.get_nanoseconds() + 5e6))
                to_cv = cv2.cvtColor(left_img.numpy(), cv2.COLOR_RGBA2RGB)

            elif zed.grab() == sl.ERROR_CODE.END_OF_SVOFILE_REACHED:
                print("End of SVO file.")
                is_running = False
                break
            data_map = {}
            ing = zed.retrieve_svo_data("GNSS",data_map,last_timestamp,ts_camera)
            data_map = [(k,v) for k,v in data_map.items()]
            for data in data_map:
                
                data_string = data[1].get_content_as_string()
                gps_measurement = self.gnss_str_to_pyzed(data_string)
                ingest_status = self.fusion.ingest_gnss_data(gps_measurement)
            last_timestamp = ts_camera

            if self.fusion.process() == sl.FUSION_ERROR_CODE.SUCCESS:
                
                # get fused pose of the image from camera
                
                current_state = self.fusion.get_position(self.current_pose)

                if current_state == sl.POSITIONAL_TRACKING_STATE.OK:
                    print(f"Pose Confidence {self.current_pose.pose_confidence}")
                    image_cam = {
                        "rgb":cv2.cvtColor(left_img.numpy(), cv2.COLOR_RGBA2RGB),
                        "depth":depth_image.numpy(),
                        "model":camera_model,
                        "timestamp":ts_camera.get_nanoseconds()/1e9,
                        "pose":self.current_pose
                    }
                    image_geopose = sl.GeoPose()
                    current_geopose_status = self.fusion.get_geo_pose(image_geopose)

                    if current_geopose_status == sl.GNSS_FUSION_STATUS.OK:
                        test_gp = sl.GeoPose()
                        self.fusion.camera_to_geo(self.current_pose,test_gp)
                        lat_test,lon_test,elev_test = test_gp.latlng_coordinates.get_coordinates(False)
                        lat,lon,elev= image_geopose.latlng_coordinates.get_coordinates(False)
                        print(f"Offset Lat: {lat_test-lat:.8f}, Offset Lon: {lon_test-lon:.8f}, Offset Elev: {elev_test-elev:.5f}")
                        x,y,z = self.current_pose.get_translation().get().tolist()
                        heading = np.rad2deg(image_geopose.heading)
                        heading = (360-heading)%360
                        _, yaw_std, position_std = self.fusion.get_current_gnss_calibration_std()
                        print(f"Latitude: {lat:.8f}, Longitude: {lon:.8f}, Altitude: {elev:.5f}, Heading: {heading:.5f}, Yaw std: {yaw_std:.5f}, Position std: {position_std[0]:.5f}, {position_std[1]:.5f}, {position_std[2]:.5f}")
                        if abs(yaw_std) < 1:
                            positions.append(
                                {
                                    "geometry": Point(lon,lat,elev),
                                    "x":x,
                                    "y":y,
                                    "z":z,
                                    "heading": heading,
                                    "time":datetime.fromtimestamp(ts_camera.get_seconds()).isoformat()}
                                )
           return positions
1 Like

Hello @fdunbar

Could you reproduce your issue with official SDK sample ?

Regards,
Tanguy

This is about as close as I can get to the official SDK sample. From what I have seen there are no equivalent examples of the fusion.camera_to_geo method being utilized to geo-rectify a translated position in the local coordinate reference frame. Going off of the Python SDK documentation, lat_test and lat should be equivalent in value.

Hello @fdunbar
Indeed it exist an issue with this function. It will be fixed in next release. I will explains here how you could do the work without it. Basically this function just apply the transformation found by the GNSS / VIO calibration. You could get the transformation thanks to get_geo_tracking_calibration. You could do it by yourself by getting the found calibration from the API.

I attach the modified sample adapted for your data at the end of this message.

Important note: This code will only works for position in COORDINATE_SYSTEM::IMAGE. You could transform your position into the coordinate system of your choice by using the appropriate transformation matrix.

Regards,
Tanguy
playback_sample.zip (55.1 KB)

1 Like