diff --git a/docker/perception/traffic_light_detection/traffic_light_detection.Dockerfile b/docker/perception/traffic_light_detection/traffic_light_detection.Dockerfile deleted file mode 100644 index d91b79e5..00000000 --- a/docker/perception/traffic_light_detection/traffic_light_detection.Dockerfile +++ /dev/null @@ -1,55 +0,0 @@ -ARG BASE_IMAGE=ghcr.io/watonomous/wato_monorepo/base:humble-ubuntu22.04 - -################################ Source ################################ -FROM ${BASE_IMAGE} as source - -WORKDIR ${AMENT_WS}/src - -# Copy in source code -COPY src/perception/traffic_light_detection traffic_light_detection -COPY src/wato_msgs/sample_msgs sample_msgs - -# Scan for rosdeps -RUN apt-get -qq update && rosdep update && \ - rosdep install --from-paths . --ignore-src -r -s \ - | grep 'apt-get install' \ - | awk '{print $3}' \ - | sort > /tmp/colcon_install_list - -################################# Dependencies ################################ -FROM ${BASE_IMAGE} as dependencies - -# Install Rosdep requirements -COPY --from=source /tmp/colcon_install_list /tmp/colcon_install_list -RUN apt-fast install -qq -y --no-install-recommends $(cat /tmp/colcon_install_list) - -# Copy in source code from source stage -WORKDIR ${AMENT_WS} -COPY --from=source ${AMENT_WS}/src src - -# Dependency Cleanup -WORKDIR / -RUN apt-get -qq autoremove -y && apt-get -qq autoclean && apt-get -qq clean && \ - rm -rf /root/* /root/.ros /tmp/* /var/lib/apt/lists/* /usr/share/doc/* - -################################ Build ################################ -FROM dependencies as build - -# Build ROS2 packages -WORKDIR ${AMENT_WS} -RUN . /opt/ros/$ROS_DISTRO/setup.sh && \ - colcon build \ - --cmake-args -DCMAKE_BUILD_TYPE=Release - -# Entrypoint will run before any CMD on launch. Sources ~/opt//setup.bash and ~/ament_ws/install/setup.bash -COPY docker/wato_ros_entrypoint.sh ${AMENT_WS}/wato_ros_entrypoint.sh -ENTRYPOINT ["./wato_ros_entrypoint.sh"] - -################################ Prod ################################ -FROM build as deploy - -# Source Cleanup and Security Setup -RUN chown -R $USER:$USER ${AMENT_WS} -RUN rm -rf src/* - -USER ${USER} diff --git a/modules/dev_overrides/docker-compose.perception.yaml b/modules/dev_overrides/docker-compose.perception.yaml index 31b41896..07e28ec2 100644 --- a/modules/dev_overrides/docker-compose.perception.yaml +++ b/modules/dev_overrides/docker-compose.perception.yaml @@ -32,15 +32,6 @@ services: volumes: - ${MONO_DIR}/src/perception/lidar_object_detection:/home/bolty/ament_ws/src/lidar_object_detection - traffic_light_detection: - <<: *fixuid - extends: - file: ../docker-compose.perception.yaml - service: traffic_light_detection - command: tail -F anything - volumes: - - ${MONO_DIR}/src/perception/traffic_light_detection:/home/bolty/ament_ws/src/traffic_light_detection - traffic_sign_detection: <<: *fixuid extends: diff --git a/modules/docker-compose.perception.yaml b/modules/docker-compose.perception.yaml index 2923c8f3..8c806df9 100644 --- a/modules/docker-compose.perception.yaml +++ b/modules/docker-compose.perception.yaml @@ -31,6 +31,7 @@ services: command: /bin/bash -c "ros2 launch camera_object_detection eve_launch.py" volumes: - /mnt/wato-drive2/perception_models/yolov8m.pt:/perception_models/yolov8m.pt + - /mnt/wato-drive2/perception_models/traffic_light.pt:/perception_models/traffic_light.pt lidar_object_detection: build: @@ -42,16 +43,6 @@ services: target: deploy image: "${PERCEPTION_LIDAR_OBJECT_DETECTION_IMAGE}:${TAG}" command: /bin/bash -c "ros2 launch lidar_object_detection lidar_object_detection.launch.py" - traffic_light_detection: - build: - context: .. - dockerfile: docker/perception/traffic_light_detection/traffic_light_detection.Dockerfile - cache_from: - - "${PERCEPTION_TRAFFIC_LIGHT_DETECTION_IMAGE}:build_${TAG}" - - "${PERCEPTION_TRAFFIC_LIGHT_DETECTION_IMAGE}:build_main" - target: deploy - image: "${PERCEPTION_TRAFFIC_LIGHT_DETECTION_IMAGE}:${TAG}" - command: /bin/bash -c "ros2 launch traffic_light_detection traffic_light_detection.launch.py" traffic_sign_detection: build: diff --git a/src/perception/camera_object_detection/camera_object_detection/yolov8_detection.py b/src/perception/camera_object_detection/camera_object_detection/yolov8_detection.py index f5822173..05ceb431 100755 --- a/src/perception/camera_object_detection/camera_object_detection/yolov8_detection.py +++ b/src/perception/camera_object_detection/camera_object_detection/yolov8_detection.py @@ -1,5 +1,6 @@ import rclpy from rclpy.node import Node +import os from sensor_msgs.msg import Image, CompressedImage from vision_msgs.msg import ( @@ -9,7 +10,7 @@ ) from ultralytics.nn.autobackend import AutoBackend -from ultralytics.data.augment import LetterBox +from ultralytics.data.augment import LetterBox, CenterCrop from ultralytics.utils.ops import non_max_suppression from ultralytics.utils.plotting import Annotator, colors @@ -37,18 +38,24 @@ def __init__(self): self.declare_parameter("model_path", "/perception_models/yolov8m.pt") self.declare_parameter("image_size", 1024) self.declare_parameter("compressed", False) + self.declare_parameter("crop_mode", "LetterBox") + self.declare_parameter("save_detections", False) self.camera_topic = self.get_parameter("camera_topic").value self.publish_vis_topic = self.get_parameter("publish_vis_topic").value - self.publish_detection_topic = self.get_parameter( - "publish_detection_topic").value + self.publish_detection_topic = self.get_parameter("publish_detection_topic").value self.model_path = self.get_parameter("model_path").value self.image_size = self.get_parameter("image_size").value self.compressed = self.get_parameter("compressed").value + self.crop_mode = self.get_parameter("crop_mode").value + self.save_detections = bool(self.get_parameter("save_detections").value) + self.counter = 0 # For saving detections + if self.save_detections: + if not os.path.exists("detections"): + os.makedirs("detections") self.line_thickness = 1 self.half = False - self.augment = False self.subscription = self.create_subscription( Image if not self.compressed else CompressedImage, @@ -61,9 +68,11 @@ def __init__(self): ), ) + self.orig_image_width = None + self.orig_image_height = None + # set device - self.device = torch.device( - "cuda" if torch.cuda.is_available() else "cpu") + self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") if torch.cuda.is_available(): self.get_logger().info("Using GPU for inference") else: @@ -73,24 +82,69 @@ def __init__(self): self.cv_bridge = CvBridge() # load yolov8 model - self.model = AutoBackend( - self.model_path, device=self.device, dnn=False, fp16=False) + self.model = AutoBackend(self.model_path, device=self.device, dnn=False, fp16=False) - self.names = self.model.module.names if hasattr( - self.model, "module") else self.model.names + self.names = self.model.module.names if hasattr(self.model, "module") else self.model.names self.stride = int(self.model.stride) # setup vis publishers - self.vis_publisher = self.create_publisher( - Image, self.publish_vis_topic, 10) + self.vis_publisher = self.create_publisher(Image, self.publish_vis_topic, 10) self.detection_publisher = self.create_publisher( - Detection2DArray, self.publish_detection_topic, 10) + Detection2DArray, self.publish_detection_topic, 10 + ) self.get_logger().info( - f"Successfully created node listening on camera topic: {self.camera_topic}...") + f"Successfully created node listening on camera topic: {self.camera_topic}..." + ) + + def crop_image(self, cv_image): + if self.crop_mode == "LetterBox": + img = LetterBox(self.image_size, stride=self.stride)(image=cv_image) + elif self.crop_mode == "CenterCrop": + img = CenterCrop(self.image_size)(cv_image) + else: + raise Exception("Invalid crop mode, please choose either 'LetterBox' or 'CenterCrop'!") + + return img + + def convert_bboxes_to_orig_frame(self, bbox): + """ + Converts bounding box coordinates from the scaled image frame back to the original image frame. + + This function takes into account the original image dimensions and the scaling method used + (either "LetterBox" or "CenterCrop") to accurately map the bounding box coordinates back to + their original positions in the original image. + + Parameters: + bbox (list): A list containing the bounding box coordinates in the format [x1, y1, w1, h1] + in the scaled image frame. + + Returns: + list: A list containing the bounding box coordinates in the format [x1, y1, w1, h1] + in the original image frame. - def preprocess_image(self, cv_image): + """ + width_scale = self.orig_image_width / self.image_size + height_scale = self.orig_image_height / self.image_size + if self.crop_mode == "LetterBox": + translation = (self.image_size - self.orig_image_height / width_scale) / 2 + return [ + bbox[0] * width_scale, + (bbox[1] - translation) * width_scale, + bbox[2] * width_scale, + bbox[3] * width_scale, + ] + elif self.crop_mode == "CenterCrop": + translation = (self.orig_image_width / height_scale - self.image_size) / 2 + return [ + (bbox[0] + translation) * height_scale, + bbox[1] * height_scale, + bbox[2] * height_scale, + bbox[3] * height_scale, + ] + + def crop_and_convert_to_tensor(self, cv_image): """ Preprocess the image by resizing, padding and rearranging the dimensions. @@ -100,9 +154,7 @@ def preprocess_image(self, cv_image): Returns: torch.Tensor image for model input of shape (1,3,w,h) """ - # Padded resize - img = cv_image - img = LetterBox(self.image_size, stride=self.stride)(image=cv_image) + img = self.crop_image(cv_image) # Convert img = img.transpose(2, 0, 1) @@ -143,10 +195,11 @@ def postprocess_detections(self, detections, annotator): annotator_img = annotator.result() return (processed_detections, annotator_img) - def publish_vis(self, annotated_img, feed): + def publish_vis(self, annotated_img, msg, feed): # Publish visualizations imgmsg = self.cv_bridge.cv2_to_imgmsg(annotated_img, "bgr8") - imgmsg.header.frame_id = "camera_{}_link".format(feed) + imgmsg.header.stamp = msg.header.stamp + imgmsg.header.frame_id = msg.header.frame_id self.vis_publisher.publish(imgmsg) def publish_detections(self, detections, msg, feed): @@ -175,10 +228,13 @@ def publish_detections(self, detections, msg, feed): detection2darray.detections.append(detection2d) self.detection_publisher.publish(detection2darray) - return def image_callback(self, msg): self.get_logger().debug("Received image") + if self.orig_image_width is None: + self.orig_image_width = msg.width + self.orig_image_height = msg.height + images = [msg] # msg is a single sensor image startTime = time.time() for image in images: @@ -189,16 +245,13 @@ def image_callback(self, msg): cv_image = cv2.imdecode(np_arr, cv2.IMREAD_COLOR) else: try: - cv_image = self.cv_bridge.imgmsg_to_cv2( - image, desired_encoding="passthrough") + cv_image = self.cv_bridge.imgmsg_to_cv2(image, desired_encoding="passthrough") except CvBridgeError as e: self.get_logger().error(str(e)) return # preprocess image and run through prediction - img = self.preprocess_image(cv_image) - processed_cv_image = LetterBox( - self.image_size, stride=self.stride)(image=cv_image) + img = self.crop_and_convert_to_tensor(cv_image) pred = self.model(img) # nms function used same as yolov8 detect.py @@ -217,6 +270,7 @@ def image_callback(self, msg): xyxy[3] - xyxy[1], ] bbox = [b.item() for b in bbox] + bbox = self.convert_bboxes_to_orig_frame(bbox) detections.append( { @@ -228,20 +282,24 @@ def image_callback(self, msg): self.get_logger().debug(f"{label}: {bbox}") annotator = Annotator( - processed_cv_image, + cv_image, line_width=self.line_thickness, example=str(self.names), ) - (detections, annotated_img) = self.postprocess_detections( - detections, annotator) + (detections, annotated_img) = self.postprocess_detections(detections, annotator) # Currently we support a single camera so we pass an empty string feed = "" - self.publish_vis(annotated_img, feed) + self.publish_vis(annotated_img, msg, feed) self.publish_detections(detections, msg, feed) + if self.save_detections: + cv2.imwrite(f"detections/{self.counter}.jpg", annotated_img) + self.counter += 1 + self.get_logger().info( - f"Finished in: {time.time() - startTime}, {1/(time.time() - startTime)} Hz") + f"Finished in: {time.time() - startTime}, {1/(time.time() - startTime)} Hz" + ) def main(args=None): diff --git a/src/perception/camera_object_detection/config/eve_config.yaml b/src/perception/camera_object_detection/config/eve_config.yaml index efac49fa..208ccbe6 100755 --- a/src/perception/camera_object_detection/config/eve_config.yaml +++ b/src/perception/camera_object_detection/config/eve_config.yaml @@ -1,23 +1,23 @@ left_camera_object_detection_node: ros__parameters: camera_topic: /camera/left/image_color - publish_vis_topic: /camera/left/annotated_img - publish_detection_topic: /camera/left/detections + publish_vis_topic: /camera/left/camera_detections_viz + publish_detection_topic: /camera/left/camera_detections model_path: /perception_models/yolov8m.pt image_size: 1024 center_camera_object_detection_node: ros__parameters: camera_topic: /camera/center/image_color - publish_vis_topic: /camera/center/annotated_img - publish_detection_topic: /camera/center/detections + publish_vis_topic: /camera/center/camera_detections_viz + publish_detection_topic: /camera/center/camera_detections model_path: /perception_models/yolov8m.pt image_size: 1024 right_camera_object_detection_node: ros__parameters: camera_topic: /camera/right/image_color - publish_vis_topic: /camera/right/annotated_img - publish_detection_topic: /camera/right/detections + publish_vis_topic: /camera/right/camera_detections_viz + publish_detection_topic: /camera/right/camera_detections model_path: /perception_models/yolov8m.pt image_size: 1024 diff --git a/src/perception/camera_object_detection/config/traffic_light_config.yaml b/src/perception/camera_object_detection/config/traffic_light_config.yaml new file mode 100755 index 00000000..9a3e00c8 --- /dev/null +++ b/src/perception/camera_object_detection/config/traffic_light_config.yaml @@ -0,0 +1,9 @@ +traffic_light_node: + ros__parameters: + camera_topic: /camera/left/image_color + publish_vis_topic: /traffic_lights_viz + publish_detection_topic: /traffic_lights + model_path: /perception_models/traffic_light.pt + crop_mode: CenterCrop + image_size: 1024 + save_detections: false diff --git a/src/perception/camera_object_detection/launch/deepracer_launch.py b/src/perception/camera_object_detection/launch/deepracer.launch.py similarity index 82% rename from src/perception/camera_object_detection/launch/deepracer_launch.py rename to src/perception/camera_object_detection/launch/deepracer.launch.py index c853a556..138c32b4 100755 --- a/src/perception/camera_object_detection/launch/deepracer_launch.py +++ b/src/perception/camera_object_detection/launch/deepracer.launch.py @@ -5,14 +5,12 @@ def generate_launch_description(): - ld = LaunchDescription() config = os.path.join( get_package_share_directory('camera_object_detection'), 'config', 'deeepracer.yaml' ) - # nodes camera_object_detection_node = Node( package='camera_object_detection', executable='camera_object_detection_node', @@ -20,7 +18,4 @@ def generate_launch_description(): parameters=[config] ) - # finalize - ld.add_action(camera_object_detection_node) - - return ld + return LaunchDescription([camera_object_detection_node]) diff --git a/src/perception/camera_object_detection/launch/eve.launch.py b/src/perception/camera_object_detection/launch/eve.launch.py new file mode 100755 index 00000000..53c3eb08 --- /dev/null +++ b/src/perception/camera_object_detection/launch/eve.launch.py @@ -0,0 +1,43 @@ +from launch import LaunchDescription +from launch.substitutions import LaunchConfiguration +from launch.conditions import LaunchConfigurationEquals +from launch.actions import DeclareLaunchArgument, IncludeLaunchDescription +from launch.launch_description_sources import PythonLaunchDescriptionSource +from ament_index_python.packages import get_package_share_directory +import os + + +def generate_launch_description(): + launch_traffic_light = LaunchConfiguration("launch_traffic_light", default=True) + launch_traffic_light_arg = DeclareLaunchArgument( + "launch_traffic_light", + default_value=launch_traffic_light, + description="Launch traffic light detection", + ) + + launch_args = [launch_traffic_light_arg] + + camera_object_detection_launch_include_dir = os.path.join( + get_package_share_directory("camera_object_detection"), "launch", "include" + ) + + pretrained_yolov8_launch = IncludeLaunchDescription( + PythonLaunchDescriptionSource( + [camera_object_detection_launch_include_dir, "/pretrained_yolov8.launch.py"] + ), + ) + + traffic_light_launch = IncludeLaunchDescription( + PythonLaunchDescriptionSource( + [camera_object_detection_launch_include_dir, "/traffic_light.launch.py"] + ), + condition=LaunchConfigurationEquals("launch_traffic_light", "True"), + ) + + return LaunchDescription( + launch_args + + [ + pretrained_yolov8_launch, + traffic_light_launch, + ] + ) diff --git a/src/perception/camera_object_detection/launch/eve_launch.py b/src/perception/camera_object_detection/launch/eve_launch.py deleted file mode 100755 index 94ac35df..00000000 --- a/src/perception/camera_object_detection/launch/eve_launch.py +++ /dev/null @@ -1,42 +0,0 @@ -from launch import LaunchDescription -from launch_ros.actions import Node -from ament_index_python.packages import get_package_share_directory -import os - - -def generate_launch_description(): - ld = LaunchDescription() - config = os.path.join( - get_package_share_directory('camera_object_detection'), - 'config', - 'eve_config.yaml' - ) - - # nodes - left_camera_object_detection_node = Node( - package='camera_object_detection', - executable='camera_object_detection_node', - name='left_camera_object_detection_node', - parameters=[config] - ) - - center_camera_object_detection_node = Node( - package='camera_object_detection', - executable='camera_object_detection_node', - name='center_camera_object_detection_node', - parameters=[config] - ) - - right_camera_object_detection_node = Node( - package='camera_object_detection', - executable='camera_object_detection_node', - name='right_camera_object_detection_node', - parameters=[config] - ) - - # finalize - ld.add_action(left_camera_object_detection_node) - ld.add_action(center_camera_object_detection_node) - ld.add_action(right_camera_object_detection_node) - - return ld diff --git a/src/perception/camera_object_detection/launch/include/pretrained_yolov8.launch.py b/src/perception/camera_object_detection/launch/include/pretrained_yolov8.launch.py new file mode 100644 index 00000000..13096462 --- /dev/null +++ b/src/perception/camera_object_detection/launch/include/pretrained_yolov8.launch.py @@ -0,0 +1,39 @@ +from launch import LaunchDescription +from launch_ros.actions import Node +from ament_index_python.packages import get_package_share_directory +import os + + +def generate_launch_description(): + config = os.path.join( + get_package_share_directory("camera_object_detection"), "config", "eve_config.yaml" + ) + + left_camera_object_detection_node = Node( + package="camera_object_detection", + executable="camera_object_detection_node", + name="left_camera_object_detection_node", + parameters=[config], + ) + + center_camera_object_detection_node = Node( + package="camera_object_detection", + executable="camera_object_detection_node", + name="center_camera_object_detection_node", + parameters=[config], + ) + + right_camera_object_detection_node = Node( + package="camera_object_detection", + executable="camera_object_detection_node", + name="right_camera_object_detection_node", + parameters=[config], + ) + + return LaunchDescription( + [ + left_camera_object_detection_node, + center_camera_object_detection_node, + right_camera_object_detection_node, + ] + ) diff --git a/src/perception/camera_object_detection/launch/include/traffic_light.launch.py b/src/perception/camera_object_detection/launch/include/traffic_light.launch.py new file mode 100755 index 00000000..234844b8 --- /dev/null +++ b/src/perception/camera_object_detection/launch/include/traffic_light.launch.py @@ -0,0 +1,21 @@ +from launch import LaunchDescription +from launch_ros.actions import Node +from ament_index_python.packages import get_package_share_directory +import os + + +def generate_launch_description(): + config = os.path.join( + get_package_share_directory('camera_object_detection'), + 'config', + 'traffic_light_config.yaml' + ) + + camera_object_detection_node = Node( + package='camera_object_detection', + executable='camera_object_detection_node', + name='traffic_light_node', + parameters=[config] + ) + + return LaunchDescription([camera_object_detection_node]) diff --git a/src/perception/camera_object_detection/launch/nuscenes_launch.py b/src/perception/camera_object_detection/launch/nuscenes.launch.py similarity index 87% rename from src/perception/camera_object_detection/launch/nuscenes_launch.py rename to src/perception/camera_object_detection/launch/nuscenes.launch.py index 7a366260..faff24b6 100755 --- a/src/perception/camera_object_detection/launch/nuscenes_launch.py +++ b/src/perception/camera_object_detection/launch/nuscenes.launch.py @@ -12,7 +12,6 @@ def generate_launch_description(): 'nuscenes_config.yaml' ) - # nodes camera_object_detection_node = Node( package='camera_object_detection', executable='camera_object_detection_node', @@ -21,7 +20,4 @@ def generate_launch_description(): arguments=['--ros-args', '--log-level', 'info'] ) - # finalize - ld.add_action(camera_object_detection_node) - - return ld + return LaunchDescription([camera_object_detection_node]) diff --git a/src/perception/camera_object_detection/setup.py b/src/perception/camera_object_detection/setup.py index 7ba35c15..c9665f79 100755 --- a/src/perception/camera_object_detection/setup.py +++ b/src/perception/camera_object_detection/setup.py @@ -12,7 +12,8 @@ ('share/ament_index/resource_index/packages', ['resource/' + package_name]), ('share/' + package_name, ['package.xml']), - (os.path.join('share', package_name, 'launch'), glob('launch/*.py')), + (os.path.join('share', package_name, 'launch'), glob('launch/*.launch.py')), + (os.path.join('share', package_name, 'launch', 'include'), glob('launch/include/*.launch.py')), (os.path.join('share', package_name, 'config'), glob('config/*.yaml')), ], install_requires=['setuptools'], diff --git a/watod_scripts/watod-setup-env.sh b/watod_scripts/watod-setup-env.sh index 3da136fe..e8b7227b 100755 --- a/watod_scripts/watod-setup-env.sh +++ b/watod_scripts/watod-setup-env.sh @@ -69,7 +69,6 @@ INFRASTRUCTURE_FOXGLOVE_IMAGE=${DATA_STREAM_IMAGE:-"$REGISTRY_URL/infrastructure PERCEPTION_RADAR_OBJECT_DETECTION_IMAGE=${PERCEPTION_RADAR_OBJECT_DETECTION_IMAGE:-"$REGISTRY_URL/perception/radar_object_detection"} PERCEPTION_LIDAR_OBJECT_DETECTION_IMAGE=${PERCEPTION_LIDAR_OBJECT_DETECTION_IMAGE:-"$REGISTRY_URL/perception/lidar_object_detection"} PERCEPTION_CAMERA_OBJECT_DETECTION_IMAGE=${PERCEPTION_CAMERA_OBJECT_DETECTION_IMAGE:-"$REGISTRY_URL/perception/camera_object_detection"} -PERCEPTION_TRAFFIC_LIGHT_DETECTION_IMAGE=${PERCEPTION_TRAFFIC_LIGHT_DETECTION_IMAGE:-"$REGISTRY_URL/perception/traffic_light_detection"} PERCEPTION_TRAFFIC_SIGN_DETECTION_IMAGE=${PERCEPTION_TRAFFIC_SIGN_DETECTION_IMAGE:-"$REGISTRY_URL/perception/traffic_sign_detection"} PERCEPTION_LANE_DETECTION_IMAGE=${PERCEPTION_LANE_DETECTION_IMAGE:-"$REGISTRY_URL/perception/lane_detection"} PERCEPTION_SEMANTIC_SEGMENTATION_IMAGE=${PERCEPTION_SEMANTIC_SEGMENTATION_IMAGE:-"$REGISTRY_URL/perception/semantic_segmentation"} @@ -165,7 +164,6 @@ echo "INFRASTRUCTURE_FOXGLOVE_IMAGE=$INFRASTRUCTURE_FOXGLOVE_IMAGE" >> "$MODULES echo "PERCEPTION_RADAR_OBJECT_DETECTION_IMAGE=$PERCEPTION_RADAR_OBJECT_DETECTION_IMAGE" >> "$MODULES_DIR/.env" echo "PERCEPTION_LIDAR_OBJECT_DETECTION_IMAGE=$PERCEPTION_LIDAR_OBJECT_DETECTION_IMAGE" >> "$MODULES_DIR/.env" echo "PERCEPTION_CAMERA_OBJECT_DETECTION_IMAGE=$PERCEPTION_CAMERA_OBJECT_DETECTION_IMAGE" >> "$MODULES_DIR/.env" -echo "PERCEPTION_TRAFFIC_LIGHT_DETECTION_IMAGE=$PERCEPTION_TRAFFIC_LIGHT_DETECTION_IMAGE" >> "$MODULES_DIR/.env" echo "PERCEPTION_TRAFFIC_SIGN_DETECTION_IMAGE=$PERCEPTION_TRAFFIC_SIGN_DETECTION_IMAGE" >> "$MODULES_DIR/.env" echo "PERCEPTION_LANE_DETECTION_IMAGE=$PERCEPTION_LANE_DETECTION_IMAGE" >> "$MODULES_DIR/.env" echo "PERCEPTION_SEMANTIC_SEGMENTATION_IMAGE=$PERCEPTION_SEMANTIC_SEGMENTATION_IMAGE" >> "$MODULES_DIR/.env"