Realtime extracting body silhouette from point cloud and body mask

The camera subscription is mandatory for the fusion module. However, you can always fuse the sl.Bodies yourself, without the fusion module.

Hi @alassagne ,

Iā€™m attempting to use the ā€˜network mode on USBā€™ setup, but cannot get it to work and donā€™t see much doc around it. I wrote a simple prototype without multi-processing but things arenā€™t work. Would you please help to see if Iā€™ve missed anything?

Python:

import pyzed.sl as sl
import os
import time
import cv2

config_path = './svo/fusion-single-network.json'

def main():
    fusion_configurations = sl.read_fusion_configuration_file(config_path, sl.COORDINATE_SYSTEM.RIGHT_HANDED_Y_UP, sl.UNIT.METER)
    init_fusion_parameters = sl.InitFusionParameters()
    body_tracking_fusion_params = sl.BodyTrackingFusionParameters()
    body_tracking_runtime_parameters = sl.BodyTrackingRuntimeParameters()
    rt = sl.BodyTrackingFusionRuntimeParameters()
    fusion = sl.Fusion()
    fusion.init(init_fusion_parameters)
    fusion.enable_body_tracking(body_tracking_fusion_params)
    
    bodies = {}
    mats = {}
    cameras = {}
    fused = sl.Bodies()
    
    for conf in fusion_configurations:
        sn = conf.serial_number
        mats[sn] = sl.Mat() 
        bodies[sn] = sl.Bodies()
        svopath = os.path.join(os.path.dirname(config_path), './SVO_SN' + str(sn) + '.svo')
        init_params = sl.InitParameters(
            camera_resolution = sl.RESOLUTION.VGA,
            camera_fps = 15,
            svo_real_time_mode = True,
            depth_mode = sl.DEPTH_MODE.QUALITY,
            coordinate_units = sl.UNIT.METER,
            coordinate_system = sl.COORDINATE_SYSTEM.RIGHT_HANDED_Y_UP,
            open_timeout_sec = 5, # default = 5
        )
        init_params.set_from_svo_file(svopath)  #Set init parameter to run from the .svo
        body_tracking_parameters = sl.BodyTrackingParameters()
        body_tracking_parameters.detection_model = sl.BODY_TRACKING_MODEL.HUMAN_BODY_FAST
        body_tracking_parameters.body_format = sl.BODY_FORMAT.BODY_38
        camera = sl.Camera()
        status = camera.open(init_params)
        print('camera.open() result:', status)

        status = camera.enable_body_tracking(body_tracking_parameters)
        print("camera.enable_body_tracking:", status)

        communication_parameters = sl.CommunicationParameters()
        # communication_parameters.set_for_shared_memory()
        communication_parameters.set_for_local_network(conf.communication_parameters.port, conf.communication_parameters.ip_address)
        
        status = camera.start_publishing(communication_parameters)
        print('camera.start_publishing() result: ', status)

        uuid = sl.CameraIdentifier()
        uuid.serial_number = conf.serial_number
        
        status = fusion.subscribe(uuid, conf.communication_parameters, conf.pose)
        if status != sl.FUSION_ERROR_CODE.SUCCESS:
            print("-- [error] Unable to subscribe to", uuid.serial_number, status)
        
        cameras[sn] = camera
        cv2.namedWindow('debug_' + str(sn))
        time.sleep(1)

    time.sleep(1)

    while True:
        for sn in cameras:
            camera = cameras[sn]
            if camera.grab() == sl.ERROR_CODE.SUCCESS:
                camera.retrieve_bodies(bodies[sn], body_tracking_runtime_parameters)
                camera.retrieve_image(mats[sn], sl.VIEW.LEFT)
                cvImage = mats[sn].get_data()
                resizedImg = cv2.resize(cvImage, (640, 360))
                cv2.imshow('debug_' + str(sn), resizedImg)

        fuse_status = fusion.process()
        if fuse_status == sl.FUSION_ERROR_CODE.SUCCESS:
            fusion.retrieve_bodies(fused, rt)

            out = []
            for body_data in fused.body_list:
                body_out = {}
                body_out['id'] = body_data.id
                body_out["status"] = str(body_data.tracking_state)
                out.append(body_out)
            print('fused bodies ', out)
        else:
            print('fuse failed...', fuse_status)
        
        if cv2.waitKey(10) & 0xFF == ord('q'):
            break

main()

fusion config file:

{
    "20832444": {
        "input": {
            "fusion": {
                "type": "LOCAL_NETWORK",
                "configuration": {
                    "ip": "192.168.1.100",
                    "port": 30000
                }
            },
            "zed": {
                "configuration": "20832444",
                "type": "USB_SERIAL"
            }
        },
        "world": {
            "rotation": [
                0.0,
                0.0,
                0.0
            ],
            "translation": [
                0.0,
                0.0,
                0.0
            ]
        }
    }
}

Thanks a lot!

I still see USB_SERIAL in your fusion configuration. What if you change it ?

Hi @alassagne would you mind telling me what exactly should I put in there? Are there any working example that I can follow? I did try using SVO_FILE but the error is the same.

{
    "20832444": {
        "input": {
            "fusion": {
                "type": "LOCAL_NETWORK",
                "configuration": {
                    "ip": "192.168.1.100",
                    "port": 30000
                }
            },
            "zed": {
                "configuration": "C:\\Users\\user\\source\\repos\\itunnel\\itunnel-depth\\svo\\SVO_SN20832444.svo",
                "type": "SVO_FILE"
            }
        },
        "world": {
            "rotation": [
                0.0,
                0.0,
                0.0
            ],
            "translation": [
                0.0,
                0.0,
                0.0
            ]
        }
    }
}

And the other parameters seems even more irrelevant?

Finally sorted it out.

  1. The fusion file should be something like this:
"26601708": {
        "input": {
            "zed": {
                "configuration": "C:\\Users\\user\\source\\repos\\itunnel\\itunnel-depth\\svo\\SVO_SN26601708.svo",
                "type": "SVO_FILE"
            },
            "fusion": {
                "type": "LOCAL_NETWORK",
                "configuration": {
                    "ip": "127.0.0.1",
                    "port": 30002
                }
            }
        },
        "world": {
            "rotation": [
                0,
                0,
                0
            ],
            "translation": [
                0,
                0,
                0
            ]
        }
    },
   ...
  1. The cameras must be up and running and publishing BEFORE running fusion.subscribe()

Can you tell me how you work with point cloud cropping?

Thanks a lot.

Each 3D point in the point cloud corresponds to a 2D pixel. You can run the convertion using these formula:

With that info, you can either construct the 3D point cloud yourself using only the relevant 2D pixels; or you pass a 2D mask as texture to a glsl shader, and from there decide whether to render a points base on the mask pixel.

1 Like