@alassagne
I have made a sender script inspired by your Python example. It works, I get the following confirmation when starting the fusion receiver script:
[RTPSession] Adding Receiver with IP: 10.50.232.191 ID 1386474649
Each Zed box has ZED SDK 4.1 installed and I have also installed ZED SDK 4.1 on Windows computer.
When I run the fusion receiver script (below) the GLViewer opens and I can see the FPS of each camera (image below), however, I can’t see any skeletons. The problem is when running Windows ZED SDK 4.1 with the fusion receiver.
Running Zed SDK 4.0.8 on computer and Edge Body Tracking app shows skeleton data (standard setup).
Running Zed SDK 4.0.8 on computer and sender script on cameras does not show skeletons.
Running Zed SDK 4.1 on computer and Edge Body Tracking app does not show skeletons.
Running Zed SDK 4.1 on computer and sender script on cameras does not show skeletons.
Also, for some reason, the first time I run the script the cameras are optimizing BODY 18 model instead of BODY 34 even though that is what I have in the script.
Sender script:
#include <sl/Camera.hpp>
int main() {
// Create a Camera object
sl::Camera zed;
sl::InitParameters init_parameters;
init_parameters.depth_mode = sl::DEPTH_MODE::ULTRA;
init_parameters.camera_fps = 30;
init_parameters.camera_resolution = sl::RESOLUTION::HD1080;
init_parameters.coordinate_units = sl::UNIT::METER;
init_parameters.coordinate_system = sl::COORDINATE_SYSTEM::RIGHT_HANDED_Y_UP;
auto state = zed.open(init_parameters);
if (state != sl::ERROR_CODE::SUCCESS)
{
std::cout << "Error with init parameters: " << state << std::endl;
return false;
}
// in most cases in body tracking setup, the cameras are static
sl::PositionalTrackingParameters positional_tracking_parameters;
// in most cases for body detection application the camera is static:
positional_tracking_parameters.set_as_static = true;
state = zed.enablePositionalTracking(positional_tracking_parameters);
if (state != sl::ERROR_CODE::SUCCESS)
{
std::cout << "Error with positional tracking: " << state << std::endl;
return false;
}
// define the body tracking parameters, as the fusion can does the tracking and fitting you don't need to enable them here, unless you need it for your app
sl::BodyTrackingParameters body_tracking_parameters;
body_tracking_parameters.detection_model = sl::BODY_TRACKING_MODEL::HUMAN_BODY_ACCURATE;
body_tracking_parameters.body_format = sl::BODY_FORMAT::BODY_34;
body_tracking_parameters.enable_body_fitting = false;
body_tracking_parameters.enable_tracking = false;
body_tracking_parameters.allow_reduced_precision_inference = true;
state = zed.enableBodyTracking(body_tracking_parameters);
if (state != sl::ERROR_CODE::SUCCESS)
{
std::cout << "Error with body tracking parameters: " << state << std::endl;
return false;
}
// std::string ip = "10.180.69.199";
int port = 30020;
sl::CommunicationParameters communication_parameters;
communication_parameters.setForLocalNetwork(port);
zed.startPublishing(communication_parameters);
sl::Bodies bodies;
sl::BodyTrackingRuntimeParameters body_runtime_parameters;
body_runtime_parameters.detection_confidence_threshold = 40;
// as long as you call the grab function and the retrieveBodies (which runs the detection) the camera will be able to seamlessly transmit the data to the fusion module.
while (true) {
if (zed.grab() == sl::ERROR_CODE::SUCCESS) {
// just be sure to run the bodies detection
zed.retrieveBodies(bodies, body_runtime_parameters);
}
}
// Close the camera
zed.close();
return 0;
}
Fusion receiver script:
// ZED include
#include "ClientPublisher.hpp"
#include "GLViewer.hpp"
#include "utils.hpp"
#include "SK_Serializer.hpp"
int main(int argc, char **argv) {
if (argc != 2) {
// this file should be generated by using the tool ZED360
std::cout << "Need a Configuration file in input" << std::endl;
return 1;
}
// Defines the Coordinate system and unit used in this sample
constexpr sl::COORDINATE_SYSTEM COORDINATE_SYSTEM = sl::COORDINATE_SYSTEM::RIGHT_HANDED_Y_UP;
constexpr sl::UNIT UNIT = sl::UNIT::METER;
// Read json file containing the configuration of your multicamera setup.
auto configurations = sl::readFusionConfigurationFile(argv[1], COORDINATE_SYSTEM, UNIT);
if (configurations.empty()) {
std::cout << "Empty configuration File." << std::endl;
return EXIT_FAILURE;
}
// Initialize the fusion module
sl::InitFusionParameters init_params;
init_params.coordinate_units = UNIT;
init_params.coordinate_system = COORDINATE_SYSTEM;
init_params.verbose = true;
// create and initialize it
sl::Fusion fusion;
fusion.init(init_params);
// subscribe to every cameras of the setup to internally gather their data
std::vector<sl::CameraIdentifier> cameras;
for (auto& it : configurations) {
sl::CameraIdentifier uuid(it.serial_number);
// to subscribe to a camera you must give its serial number, the way to communicate with it (shared memory or local network), and its world pose in the setup.
auto state = fusion.subscribe(uuid, it.communication_parameters, it.pose);
if (state != sl::FUSION_ERROR_CODE::SUCCESS)
std::cout << "Unable to subscribe to " << std::to_string(uuid.sn) << " . " << state << std::endl;
else
cameras.push_back(uuid);
}
// check that at least one camera is connected
if (cameras.empty()) {
std::cout << "no connections " << std::endl;
return EXIT_FAILURE;
}
// as this sample shows how to fuse body detection from the multi camera setup
// we enable the Body Tracking module with its options
sl::BodyTrackingFusionParameters body_fusion_init_params;
body_fusion_init_params.enable_tracking = true;
body_fusion_init_params.enable_body_fitting = true; // skeletons will looks more natural but requires more computations
fusion.enableBodyTracking(body_fusion_init_params);
// define fusion behavior
sl::BodyTrackingFusionRuntimeParameters body_tracking_runtime_parameters;
// be sure that the detection skeleton is complete enough
body_tracking_runtime_parameters.skeleton_minimum_allowed_keypoints = 7;
// we can also want to retrieve skeleton seen by multiple camera, in this case at least half of them
body_tracking_runtime_parameters.skeleton_minimum_allowed_camera = 2; // cameras.size() / 2.;
// creation of a 3D viewer
GLViewer viewer;
viewer.init(argc, argv);
std::cout << "Viewer Shortcuts\n" <<
"\t- 'r': swicth on/off for raw skeleton display\n" <<
"\t- 'p': swicth on/off for live point cloud display\n" <<
"\t- 'c': swicth on/off point cloud display with flat color\n" << std::endl;
// fusion outputs
sl::Bodies fused_bodies;
std::map<sl::CameraIdentifier, sl::Bodies> camera_raw_data;
sl::FusionMetrics metrics;
std::map<sl::CameraIdentifier, sl::Mat> views;
std::map<sl::CameraIdentifier, sl::Mat> pointClouds;
sl::Resolution low_res(512,360);
// JSON output
nlohmann::json bodies_json_file;
bool new_data = false;
sl::Timestamp ts_new_data = sl::Timestamp(0);
// run the fusion as long as the viewer is available.
while (viewer.isAvailable()) {
try {
// run the fusion process (which gather data from all camera, sync them and process them)
if (fusion.process() == sl::FUSION_ERROR_CODE::SUCCESS) {
// Retrieve fused body
fusion.retrieveBodies(fused_bodies, body_tracking_runtime_parameters);
// for debug, you can retrieve the data send by each camera
for (auto& id : cameras) {
fusion.retrieveBodies(camera_raw_data[id], body_tracking_runtime_parameters, id);
sl::Pose pose;
if(fusion.getPosition(pose, sl::REFERENCE_FRAME::WORLD, id, sl::POSITION_TYPE::RAW) == sl::POSITIONAL_TRACKING_STATE::OK)
viewer.setCameraPose(id.sn, pose.pose_data);
auto state_view = fusion.retrieveImage(views[id], id, low_res);
auto state_pc = fusion.retrieveMeasure(pointClouds[id], id, sl::MEASURE::XYZBGRA, low_res);
if(state_view == sl::FUSION_ERROR_CODE::SUCCESS && state_pc == sl::FUSION_ERROR_CODE::SUCCESS)
viewer.updateCamera(id.sn, views[id], pointClouds[id]);
}
// get metrics about the fusion process for monitoring purposes
fusion.getProcessMetrics(metrics);
}
// update the 3D view
viewer.updateBodies(fused_bodies, camera_raw_data, metrics);
//std::this_thread::sleep_for(std::chrono::microseconds(10));
// Serialize dected bodies into a json container
bodies_json_file[std::to_string(fused_bodies.timestamp.getMilliseconds())] = sk::serialize(fused_bodies);
} catch (std::exception &e) {
std::cerr << "Error: " << e.what() << std::endl;
break;
}
}
// Set outFileName to detected_bodies_<timestamp>.json
std::string outfileName("detected_bodies_" + std::to_string(fused_bodies.timestamp.getMilliseconds()) + ".json");
if (bodies_json_file.size())
{
std::ofstream file_out(outfileName);
file_out << std::setw(4) << bodies_json_file << std::endl;
file_out.close();
std::cout << "Successfully saved the body data to " + outfileName << std::endl;
}
viewer.exit();
fusion.close();
return EXIT_SUCCESS;
}
GLViewer:
EDIT: Printing body format and number of bodies shows no skeletons were detected by the sender:
std::cout << "Body format: " << bodies.body_format << "" << std::endl;
std::cout << "Number of bodies detected: " << bodies.body_list.size() << " bodies" << std::endl;
Body format: BODY_34
Number of bodies detected: 0 bodies
Body format: BODY_34
Number of bodies detected: 0 bodies