Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add the ZED Wrapper and Object Detector #3

Merged
merged 96 commits into from
Sep 19, 2024
Merged
Show file tree
Hide file tree
Changes from 79 commits
Commits
Show all changes
96 commits
Select commit Hold shift + click to select a range
7782fff
starter files
jbrhm Sep 2, 2024
c244165
starter code
jbrhm Sep 2, 2024
ad2d273
finding zed sdk
jbrhm Sep 2, 2024
3d175e8
building zed sdk
jbrhm Sep 2, 2024
b2b97f7
y no build
jbrhm Sep 2, 2024
f197dcd
gets paramaters
jbrhm Sep 3, 2024
418b537
gets more aprameters
jbrhm Sep 3, 2024
267e919
why no callback
jbrhm Sep 3, 2024
7df671c
variants :)))))
jbrhm Sep 3, 2024
d861946
logic error
jbrhm Sep 3, 2024
288f67e
more params
jbrhm Sep 3, 2024
162ccbc
most init done
jbrhm Sep 3, 2024
36cd821
no build :(
jbrhm Sep 3, 2024
12e0638
almost builds
jbrhm Sep 3, 2024
560ec18
builds :)))))
jbrhm Sep 3, 2024
0a54992
wat svo?
jbrhm Sep 3, 2024
89147ac
builds
jbrhm Sep 5, 2024
0dbf0bb
AHHHHHHHH POINTERS AHHHHHHHHH
jbrhm Sep 5, 2024
e884847
STD MOVE FTW
jbrhm Sep 5, 2024
0d6a84e
undefined symbols :(
jbrhm Sep 5, 2024
ec4765e
undefined symbols :(
jbrhm Sep 5, 2024
a907977
Builds
jbrhm Sep 5, 2024
7209434
Runs
jbrhm Sep 5, 2024
5507127
Better includes
jbrhm Sep 5, 2024
a2bd36c
Works
jbrhm Sep 5, 2024
2499c37
62 Hz
jbrhm Sep 8, 2024
ffc00d7
begin porting voer obj detect
jbrhm Sep 8, 2024
1a7eba6
more errors
jbrhm Sep 8, 2024
e848112
learning lib builds
jbrhm Sep 8, 2024
c553f19
brah
jbrhm Sep 8, 2024
904ae74
bruhhhh
jbrhm Sep 12, 2024
295ae21
works
jbrhm Sep 12, 2024
051f860
better type safety
jbrhm Sep 13, 2024
4efccaa
got rid of compiler warnings
jbrhm Sep 14, 2024
2e6e95a
merged in master
jbrhm Sep 14, 2024
d23eb77
starter files
jbrhm Sep 2, 2024
b5cb2b5
starter code
jbrhm Sep 2, 2024
9ad086e
finding zed sdk
jbrhm Sep 2, 2024
73a12ce
building zed sdk
jbrhm Sep 2, 2024
b7eac39
y no build
jbrhm Sep 2, 2024
385bf85
gets paramaters
jbrhm Sep 3, 2024
9ca6322
gets more aprameters
jbrhm Sep 3, 2024
1bec7f5
why no callback
jbrhm Sep 3, 2024
f16bfa6
variants :)))))
jbrhm Sep 3, 2024
bf9a8b6
logic error
jbrhm Sep 3, 2024
fa31363
more params
jbrhm Sep 3, 2024
c8ed85d
most init done
jbrhm Sep 3, 2024
523c6e3
no build :(
jbrhm Sep 3, 2024
fab8db8
almost builds
jbrhm Sep 3, 2024
a2a9713
builds :)))))
jbrhm Sep 3, 2024
0e373f2
wat svo?
jbrhm Sep 3, 2024
35342c1
builds
jbrhm Sep 5, 2024
fbd4f32
AHHHHHHHH POINTERS AHHHHHHHHH
jbrhm Sep 5, 2024
4767e6d
STD MOVE FTW
jbrhm Sep 5, 2024
a43cec3
undefined symbols :(
jbrhm Sep 5, 2024
30003d6
undefined symbols :(
jbrhm Sep 5, 2024
ce4ef89
Builds
jbrhm Sep 5, 2024
c76de69
Runs
jbrhm Sep 5, 2024
86e74c8
Better includes
jbrhm Sep 5, 2024
9770d38
Works
jbrhm Sep 5, 2024
b2a97d7
62 Hz
jbrhm Sep 8, 2024
09704e9
begin porting voer obj detect
jbrhm Sep 8, 2024
f64d949
more errors
jbrhm Sep 8, 2024
8b19c94
learning lib builds
jbrhm Sep 8, 2024
3d1824d
brah
jbrhm Sep 8, 2024
44c679e
bruhhhh
jbrhm Sep 12, 2024
fc35e3a
works
jbrhm Sep 12, 2024
a7f55e2
better type safety
jbrhm Sep 13, 2024
e14237b
got rid of compiler warnings
jbrhm Sep 14, 2024
5728d67
Update zed_wrapper.bridge.cpp
jbrhm Sep 14, 2024
e438c2a
Updated CUDA
jbrhm Sep 15, 2024
14745ae
Updated CUDA
jbrhm Sep 15, 2024
eb7471b
Updated CUDA
jbrhm Sep 15, 2024
4e78274
Update zed_wrapper.bridge.cpp
jbrhm Sep 15, 2024
ba98358
CLANG FORMAT IS CRACCCCCCKKKKKKED bro :))))))))))))
jbrhm Sep 15, 2024
7cb4218
CLANG FORMAT IS CRACCCCCCKKKKKKED bro :))))))))))))
jbrhm Sep 15, 2024
f5d09a3
CLANG FORMAT IS CRACCCCCCKKKKKKED bro :))))))))))))
jbrhm Sep 15, 2024
6241883
CLANG FORMAT IS CRACCCCCCKKKKKKED bro :))))))))))))
jbrhm Sep 15, 2024
4a37c44
BLACK
jbrhm Sep 15, 2024
5fa616d
builds
jbrhm Sep 15, 2024
06c768f
no engine
jbrhm Sep 15, 2024
c3c555d
redundant
jbrhm Sep 15, 2024
4026fc9
Merge branch 'main' into JRA/ZED-Wrapper
jbrhm Sep 15, 2024
8ef1e63
Ashwin Suggestions
jbrhm Sep 16, 2024
beb2e9d
No more engine
jbrhm Sep 17, 2024
bfc4c79
Merge branch 'main' into JRA/ZED-Wrapper
jbrhm Sep 17, 2024
60be043
Updating compiler flags
jbrhm Sep 18, 2024
755f093
Final Refactoring
jbrhm Sep 18, 2024
f74ed16
od still works
jbrhm Sep 19, 2024
85eeecc
zed default params
jbrhm Sep 19, 2024
578f3bd
od default params
jbrhm Sep 19, 2024
6feb1d4
Works with new parameters
jbrhm Sep 19, 2024
2f3bb3d
Better pathing on zed launch
jbrhm Sep 19, 2024
fe6c546
Better launch file configuration
jbrhm Sep 19, 2024
a9ef7c3
Merge branch 'main' into JRA/ZED-Wrapper
jbrhm Sep 19, 2024
a990c0a
works
jbrhm Sep 19, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
42 changes: 42 additions & 0 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,8 @@ project(mrover VERSION 2025.0.0 LANGUAGES C CXX)
# Supress ROS CMake warning about Python.
cmake_policy(SET CMP0148 OLD)

set(CMAKE_EXPORT_COMPILE_COMMANDS ON)
jbrhm marked this conversation as resolved.
Show resolved Hide resolved

set(CMAKE_CXX_STANDARD 23)
set(CMAKE_CXX_STANDARD_REQUIRED ON)
# Generate compile_commands.json for clangd language server.
Expand Down Expand Up @@ -210,6 +212,7 @@ ament_target_dependencies(lie rclcpp geometry_msgs tf2 tf2_ros)

mrover_add_header_only_library(units units)
mrover_add_header_only_library(loop_profiler loop_profiler)
mrover_add_header_only_library(parameter_utils parameter_utils)

# Simulator

Expand All @@ -226,6 +229,45 @@ target_include_directories(simulator SYSTEM PRIVATE ${BULLET_INCLUDE_DIRS} ${OPE

# Perception

find_package(ZED QUIET)
find_package(CUDA QUIET)
find_package(OpenCV REQUIRED)
if(ZED_FOUND AND CUDA_FOUND)
enable_language(CUDA)
# CUDA Compile Options
add_library(cuda_compiler_flags INTERFACE)
target_compile_options(cuda_compiler_flags INTERFACE
-Wno-pedantic
-Wno-deprecated
-Wno-unused-parameter
-diag-suppress=815
-diag-suppress=780
-Wno-deprecated-copy
-Wno-unused-command-line-argument
-Wno-ignored-qualifiers
-Wno-sometimes-uninitialized
)

# ZED Wrapper
mrover_add_node(zed perception/zed_wrapper/*.c* perception/zed_wrapper/pch.hpp)
target_link_libraries(zed parameter_utils lie MANIF::manif ${CUDA_LIBRARIES} loop_profiler cuda_compiler_flags)
ament_target_dependencies(zed rclcpp sensor_msgs ZED CUDA tf2 tf2_ros)

# Learning Library
# TODO(john): Update to use the new API
mrover_add_library(_learning learning/*.c* learning)
target_compile_options(_learning PRIVATE -Wno-deprecated-declarations -std=c++17)
target_include_directories(_learning PRIVATE ${CMAKE_CUDA_TOOLKIT_INCLUDE_DIRECTORIES})
target_link_libraries(_learning PRIVATE opencv_core opencv_dnn opencv_imgproc lie nvinfer nvonnxparser tbb cuda_compiler_flags)

# Object Detector
mrover_add_node(object_detector perception/object_detector/*.c* src/perception/object_detector/pch.hpp)
target_link_libraries(object_detector opencv_core opencv_dnn opencv_imgproc lie tbb _learning opencv_imgcodecs opencv_highgui loop_profiler parameter_utils cuda_compiler_flags)
ament_target_dependencies(object_detector rclcpp sensor_msgs CUDA tf2 tf2_ros)
else()
message("ZED not found...")
endif()

mrover_add_node(tag_detector perception/tag_detector/*.cpp perception/tag_detector/pch.hpp)
ament_target_dependencies(tag_detector rclcpp tf2 tf2_ros)
target_link_libraries(tag_detector lie opencv_core opencv_aruco opencv_imgproc loop_profiler)
Expand Down
6 changes: 5 additions & 1 deletion build.sh
Original file line number Diff line number Diff line change
Expand Up @@ -5,10 +5,14 @@ set -euxo pipefail
# Build in the colcon workspace, not the package
pushd ../..

# set C/CXX compilers
# Set C/C++ compilers
export CC=clang
export CXX=clang++

# Set CUDA compilers
export CUDAHOSTCXX=g++-9
export CUDACXX=/usr/local/cuda-12.3/bin/nvcc

# TODO (ali): add build configs for debug vs release
colcon build \
--cmake-args -G Ninja -W no-dev -DCMAKE_BUILD_TYPE=RelWithDebInfo \
Expand Down
18 changes: 18 additions & 0 deletions config/object_detector.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@

# All units are in SI
# ===================
# Time: second, hz
# Angle: radian
# Distance: meter

/object_detector:
ros__parameters:
camera_frame: "zed_left_camera_frame"
world_frame: "map"
increment_weight: 2
decrement_weight: 1
hitcount_threshold: 5
hitcount_max: 10
model_name: "Large-Dataset"
model_score_threshold: 0.75
model_nms_threshold: 0.5
22 changes: 22 additions & 0 deletions config/zed.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
# All units are in SI
# ===================
# Time: second, hz
# Angle: radian
# Distance: meter

/zed_wrapper:
ros__parameters:
depth_confidence: 70
serial_number: -1
grab_target_fps: 60
texture_confidence: 100
image_width: 1280
image_height: 720
use_depth_stabilization: false
grab_resolution: "HD720"
depth_mode: "PERFORMANCE"
depth_maximum_distance: 12.0
use_builtin_visual_odom: false
use_pose_smoothing: true
use_area_memory: true
sv0_file: ""
3 changes: 3 additions & 0 deletions data/Large-Dataset.onnx
Git LFS file not shown
3 changes: 3 additions & 0 deletions data/tensorrt-engine-Large-Dataset.onnx.engine
Git LFS file not shown
28 changes: 28 additions & 0 deletions launch/object_detector.launch.py
jbrhm marked this conversation as resolved.
Show resolved Hide resolved
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
import os

from ament_index_python import get_package_share_directory

from launch import LaunchDescription
from launch.actions import DeclareLaunchArgument
from launch.substitutions import LaunchConfiguration
from launch_ros.actions import Node, ComposableNodeContainer
from launch_ros.descriptions import ComposableNode
from launch.conditions import LaunchConfigurationEquals


def generate_launch_description():
zed_node = Node(
package="mrover",
executable="zed",
name="zed_wrapper",
parameters=[os.path.join(get_package_share_directory("mrover"), "config", "zed.yaml")],
jbrhm marked this conversation as resolved.
Show resolved Hide resolved
)

object_detector_node = Node(
package="mrover",
executable="object_detector",
name="object_detector",
parameters=[os.path.join(get_package_share_directory("mrover"), "config", "object_detector.yaml")],
)

return LaunchDescription([zed_node, object_detector_node])
21 changes: 21 additions & 0 deletions launch/zed.launch.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
import os

from ament_index_python import get_package_share_directory

from launch import LaunchDescription
from launch.actions import DeclareLaunchArgument
from launch.substitutions import LaunchConfiguration
from launch_ros.actions import Node, ComposableNodeContainer
from launch_ros.descriptions import ComposableNode
from launch.conditions import LaunchConfigurationEquals


def generate_launch_description():
zed_node = Node(
package="mrover",
executable="zed",
name="zed_wrapper",
parameters=[os.path.join(get_package_share_directory("mrover"), "config", "zed.yaml")],
jbrhm marked this conversation as resolved.
Show resolved Hide resolved
)

return LaunchDescription([zed_node])
172 changes: 172 additions & 0 deletions learning/inference.cu
Original file line number Diff line number Diff line change
@@ -0,0 +1,172 @@
#include "inference.cuh"

using namespace nvinfer1;

#include <NvOnnxParser.h>

/**
* cudaMemcpys CPU memory in inputTensor to GPU based on bindings
* Queues that tensor to be passed through model
* cudaMemcpys the result back to CPU memory
* Requires bindings, inputTensor, stream
* Modifies stream, outputTensor
*/
constexpr static auto INPUT_BINDING_NAME = "images";
constexpr static auto OUTPUT_BINDING_NAME = "output0";

Inference::Inference(std::filesystem::path const& onnxModelPath, std::string const& modelName, std::string packagePathString) : mPackagePath{std::move(packagePathString)} {
mModelPath = onnxModelPath.string();

// Create the engine object from either the file or from onnx file
mEngine = std::unique_ptr<ICudaEngine>{createCudaEngine(onnxModelPath, modelName)};
if (!mEngine) throw std::runtime_error("Failed to create CUDA engine");

mLogger.log(ILogger::Severity::kINFO, "Created CUDA Engine");

// Check some assumptions about the model
if (mEngine->getNbIOTensors() != 2) throw std::runtime_error("Invalid Binding Count");
if (mEngine->getTensorIOMode(INPUT_BINDING_NAME) != TensorIOMode::kINPUT) throw std::runtime_error("Expected Input Binding 0 Is An Input");
if (mEngine->getTensorIOMode(OUTPUT_BINDING_NAME) != TensorIOMode::kOUTPUT) throw std::runtime_error("Expected Input Binding Input To Be 1");

createExecutionContext();

prepTensors();
}

auto Inference::createCudaEngine(std::filesystem::path const& onnxModelPath, std::string const& modelName) -> ICudaEngine* {
constexpr auto explicitBatch = 1U << static_cast<std::uint32_t>(NetworkDefinitionCreationFlag::kEXPLICIT_BATCH);

std::unique_ptr<IBuilder> builder{createInferBuilder(mLogger)};
if (!builder) throw std::runtime_error("Failed to create Infer Builder");
mLogger.log(ILogger::Severity::kINFO, "Created Infer Builder");

std::unique_ptr<INetworkDefinition> network{builder->createNetworkV2(explicitBatch)};
if (!network) throw std::runtime_error("Failed to create Network Definition");
mLogger.log(ILogger::Severity::kINFO, "Created Network Definition");

std::unique_ptr<nvonnxparser::IParser> parser{nvonnxparser::createParser(*network, mLogger)};
if (!parser) throw std::runtime_error("Failed to create ONNX Parser");
mLogger.log(ILogger::Severity::kINFO, "Created ONNX Parser");

std::unique_ptr<IBuilderConfig> config{builder->createBuilderConfig()};
if (!config) throw std::runtime_error("Failed to create Builder Config");
mLogger.log(ILogger::Severity::kINFO, "Created Builder Config");

if (!parser->parseFromFile(onnxModelPath.c_str(), static_cast<int>(ILogger::Severity::kINFO))) {
throw std::runtime_error("Failed to parse ONNX file");
}

IRuntime* runtime = createInferRuntime(mLogger);

// Define the engine file location relative to the mrover package
std::filesystem::path packagePath{mPackagePath};
std::filesystem::path enginePath = packagePath / "data" / std::string("tensorrt-engine-").append(modelName).append(".engine");
// Check if engine file exists
if (!exists(enginePath)) {
std::cout << "Optimizing ONXX model for TensorRT. This make take a long time..." << std::endl;

// Create the Engine from onnx file
IHostMemory* serializedEngine = builder->buildSerializedNetwork(*network, *config);
if (!serializedEngine) throw std::runtime_error("Failed to serialize engine");

// Create temporary engine for serializing
ICudaEngine* tempEng = runtime->deserializeCudaEngine(serializedEngine->data(), serializedEngine->size());
if (!tempEng) throw std::runtime_error("Failed to create temporary engine");

// Save Engine to File
auto trtModelStream = tempEng->serialize();
std::ofstream outputFileStream{enginePath, std::ios::binary};
outputFileStream.write(static_cast<char const*>(trtModelStream->data()), trtModelStream->size());
outputFileStream.close();

return tempEng;
}

// Load engine from file
std::ifstream inputFileStream{enginePath, std::ios::binary};
std::stringstream engineBuffer;

// Stream in the engine file to the buffer
engineBuffer << inputFileStream.rdbuf();
std::string enginePlan = engineBuffer.str();
// Deserialize the Cuda Engine file from the buffer
return runtime->deserializeCudaEngine(enginePlan.data(), enginePlan.size());
}

auto Inference::createExecutionContext() -> void {
// Create Execution Context
mContext.reset(mEngine->createExecutionContext());
if (!mContext) throw std::runtime_error("Failed to create execution context");

// Set up the input tensor sizing
mContext->setInputShape(INPUT_BINDING_NAME, mEngine->getTensorShape(INPUT_BINDING_NAME));
}

auto Inference::doDetections(cv::Mat const& img) const -> void {
// Do the forward pass on the network
launchInference(img, mOutputTensor);
}

auto Inference::getOutputTensor() -> cv::Mat {
return mOutputTensor;
}

auto Inference::launchInference(cv::Mat const& input, cv::Mat const& output) const -> void {
//Assert these items have been initialized
assert(!input.empty());
assert(!output.empty());
assert(input.isContinuous());
assert(output.isContinuous());
assert(mContext);

// Get the binding id for the input tensor
int inputId = getBindingInputIndex(mContext.get());

// Memcpy the input tensor from the host to the gpu
cudaMemcpy(mBindings[inputId], input.data, input.total() * input.elemSize(), cudaMemcpyHostToDevice);

// Execute the model on the gpu
mContext->executeV2(mBindings.data());

// Memcpy the output tensor from the gpu to the host
cudaMemcpy(output.data, mBindings[1 - inputId], output.total() * output.elemSize(), cudaMemcpyDeviceToHost);
}

auto Inference::prepTensors() -> void {
// Assign the properties to the input and output tensors
for (int i = 0; i < mEngine->getNbIOTensors(); i++) {
char const* tensorName = mEngine->getIOTensorName(i);
auto [rank, extents] = mEngine->getTensorShape(tensorName);

// Multiply sizeof(float) by the product of the extents
// This is essentially: element count * size of each element
std::size_t size = 1;
for(int32_t i = 0; i < rank; ++i){
size *= extents[i];
}
std::cout << tensorName << " is getting allocated to size " << size << std::endl;

// Create GPU memory for TensorRT to operate on
if (cudaError_t result = cudaMalloc(mBindings.data() + i, size * sizeof(float)); result != cudaSuccess)
throw std::runtime_error{"Failed to allocate GPU memory: " + std::string{cudaGetErrorString(result)}};
}

assert(mContext);
// Create an appropriately sized output tensor
auto const [nbDims, d] = mEngine->getTensorShape(OUTPUT_BINDING_NAME);
for (int i = 0; i < nbDims; i++) {
std::array<char, 512> message;
std::snprintf(message.data(), message.size(), "Size %d %d", i, d[i]);
mLogger.log(ILogger::Severity::kINFO, message.data());
}

// Create the mat wrapper around the output matrix for ease of use
assert(nbDims == 3);
assert(d[0] == 1);
mOutputTensor = cv::Mat::zeros(d[1], d[2], CV_32FC1);
}

auto Inference::getBindingInputIndex(IExecutionContext const* context) -> int {
// Returns the id for the input tensor
return context->getEngine().getTensorIOMode(context->getEngine().getIOTensorName(0)) != TensorIOMode::kINPUT; // 0 (false) if bindingIsInput(0), 1 (true) otherwise
}
Loading