diff --git a/launch/tier4_perception_launch/launch/perception.launch.xml b/launch/tier4_perception_launch/launch/perception.launch.xml
index 50acaae90beac..d72e6c0462fec 100644
--- a/launch/tier4_perception_launch/launch/perception.launch.xml
+++ b/launch/tier4_perception_launch/launch/perception.launch.xml
@@ -100,7 +100,10 @@
-
+
+
+
+
-
+
+
+
+
diff --git a/launch/tier4_perception_launch/launch/traffic_light_recognition/traffic_light.launch.xml b/launch/tier4_perception_launch/launch/traffic_light_recognition/traffic_light.launch.xml
index 58d7d7f3023b2..5318b488c3e00 100644
--- a/launch/tier4_perception_launch/launch/traffic_light_recognition/traffic_light.launch.xml
+++ b/launch/tier4_perception_launch/launch/traffic_light_recognition/traffic_light.launch.xml
@@ -1,7 +1,8 @@
-
+
+
@@ -13,6 +14,8 @@
+
+
@@ -32,14 +35,17 @@
-
+
-
+
+
+
+
@@ -47,7 +53,7 @@
-
+
diff --git a/launch/tier4_perception_launch/launch/traffic_light_recognition/traffic_light_map_based_detector.launch.py b/launch/tier4_perception_launch/launch/traffic_light_recognition/traffic_light_map_based_detector.launch.py
index defc4a534072b..cc760e409fddc 100644
--- a/launch/tier4_perception_launch/launch/traffic_light_recognition/traffic_light_map_based_detector.launch.py
+++ b/launch/tier4_perception_launch/launch/traffic_light_recognition/traffic_light_map_based_detector.launch.py
@@ -32,7 +32,7 @@ def create_traffic_light_map_based_detector(namespace, context):
output_rois = (
"rough/rois"
- if IfCondition(LaunchConfiguration("enable_fine_detection")).evaluate(context)
+ if IfCondition(LaunchConfiguration("use_ml_detector")).evaluate(context)
else f"/perception/traffic_light_recognition/{namespace}/detection/rois"
)
@@ -87,9 +87,9 @@ def add_launch_arg(name: str, default_value=None, description=None):
add_launch_arg("all_camera_namespaces", "[camera6, camera7]")
add_launch_arg(
- "enable_fine_detection",
+ "use_ml_detector",
"True",
- "If True, output_topic will be for fine detector, otherwise for classifier",
+ "If True, output_topic will be for ml detector, otherwise for classifier",
)
return launch.LaunchDescription(
diff --git a/launch/tier4_perception_launch/launch/traffic_light_recognition/traffic_light_node_container.launch.py b/launch/tier4_perception_launch/launch/traffic_light_recognition/traffic_light_node_container.launch.py
index 1efd6f8cb3cfd..43818dad6841a 100644
--- a/launch/tier4_perception_launch/launch/traffic_light_recognition/traffic_light_node_container.launch.py
+++ b/launch/tier4_perception_launch/launch/traffic_light_recognition/traffic_light_node_container.launch.py
@@ -24,6 +24,8 @@
from launch.conditions import IfCondition
from launch.conditions import UnlessCondition
from launch.substitutions import LaunchConfiguration
+from launch.substitutions import PathJoinSubstitution
+from launch.substitutions import PythonExpression
from launch_ros.actions import ComposableNodeContainer
from launch_ros.actions import LoadComposableNodes
from launch_ros.actions import PushRosNamespace
@@ -60,11 +62,13 @@ def launch_setup(context, *args, **kwargs):
def create_traffic_light_node_container(namespace, context, *args, **kwargs):
camera_arguments = {
+ "input/camera_info": f"/sensing/camera/{namespace}/camera_info",
"input/image": f"/sensing/camera/{namespace}/image_raw",
"output/rois": f"/perception/traffic_light_recognition/{namespace}/detection/rois",
"output/traffic_signals": f"/perception/traffic_light_recognition/{namespace}/classification/traffic_signals",
"output/car/traffic_signals": f"/perception/traffic_light_recognition/{namespace}/classification/car/traffic_signals",
"output/pedestrian/traffic_signals": f"/perception/traffic_light_recognition/{namespace}/classification/pedestrian/traffic_signals",
+ "output/debug": f"/perception/traffic_light_recognition/{namespace}/detection/rois/debug",
}
def create_parameter_dict(*args):
@@ -126,7 +130,7 @@ def create_parameter_dict(*args):
package="autoware_traffic_light_visualization",
plugin="autoware::traffic_light::TrafficLightRoiVisualizerNode",
name="traffic_light_roi_visualizer",
- parameters=[create_parameter_dict("enable_fine_detection", "use_image_transport")],
+ parameters=[create_parameter_dict("use_ml_detector", "use_image_transport")],
remappings=[
("~/input/image", camera_arguments["input/image"]),
("~/input/rois", camera_arguments["output/rois"]),
@@ -192,13 +196,110 @@ def create_parameter_dict(*args):
),
],
target_container=container,
- condition=IfCondition(LaunchConfiguration("enable_fine_detection")),
+ condition=IfCondition(
+ PythonExpression(
+ [
+ "'",
+ LaunchConfiguration("ml_detection_model_type"),
+ "' == 'fine_detection_model' ",
+ ]
+ )
+ ),
+ )
+
+ # cspell: ignore semseg
+ whole_img_detector_loader = LoadComposableNodes(
+ composable_node_descriptions=[
+ ComposableNode(
+ package="autoware_tensorrt_yolox",
+ plugin="autoware::tensorrt_yolox::TrtYoloXNode",
+ name="traffic_light_detector",
+ namespace=f"{namespace}/detection",
+ parameters=[
+ LaunchConfiguration("whole_image_detector_param_path"),
+ {
+ "model_path": PathJoinSubstitution(
+ [
+ LaunchConfiguration("whole_image_detector_model_path"),
+ LaunchConfiguration("whole_image_detector_model_name"),
+ ]
+ ),
+ "label_path": PathJoinSubstitution(
+ [
+ LaunchConfiguration("whole_image_detector_model_path"),
+ "car_ped_tl_detector_labels.txt",
+ ]
+ ),
+ "build_only": False,
+ "clip_value": 0.0,
+ },
+ ],
+ remappings=[
+ ("~/in/image", camera_arguments["input/image"]),
+ ("~/out/objects", "ml_detected/rois"),
+ ("~/out/image", camera_arguments["output/debug"] + "/image"),
+ (
+ "~/out/image/compressed",
+ camera_arguments["output/debug"] + "/image/compressed",
+ ),
+ (
+ "~/out/image/compressedDepth",
+ camera_arguments["output/debug"] + "/image/compressedDepth",
+ ),
+ ("~/out/image/theora", camera_arguments["output/debug"] + "/image/theora"),
+ ],
+ extra_arguments=[
+ {"use_intra_process_comms": LaunchConfiguration("use_intra_process")}
+ ],
+ ),
+ ComposableNode(
+ package="autoware_traffic_light_selector",
+ plugin="autoware::traffic_light::TrafficLightSelectorNode",
+ name="traffic_light_selector",
+ namespace=f"{namespace}/detection",
+ parameters=[
+ {
+ "max_iou_threshold": -0.5,
+ }
+ ],
+ remappings=[
+ ("input/detected_rois", "ml_detected/rois"),
+ ("input/rough_rois", "rough/rois"),
+ ("input/expect_rois", "expect/rois"),
+ ("input/camera_info", camera_arguments["input/camera_info"]),
+ ("output/traffic_rois", camera_arguments["output/rois"]),
+ ],
+ ),
+ ComposableNode(
+ package="autoware_traffic_light_category_merger",
+ plugin="autoware::traffic_light::TrafficLightCategoryMergerNode",
+ name="traffic_light_category_merger",
+ namespace=f"{namespace}/classification",
+ parameters=[],
+ remappings=[
+ ("input/car_signals", "classified/car/traffic_signals"),
+ ("input/pedestrian_signals", "classified/pedestrian/traffic_signals"),
+ ("output/traffic_signals", camera_arguments["output/traffic_signals"]),
+ ],
+ ),
+ ],
+ target_container=container,
+ condition=IfCondition(
+ PythonExpression(
+ [
+ "'",
+ LaunchConfiguration("ml_detection_model_type"),
+ "' == 'whole_image_detection_model' ",
+ ]
+ )
+ ),
)
return [
GroupAction([PushRosNamespace(namespace), container]),
decompressor_loader,
fine_detector_loader,
+ whole_img_detector_loader,
]
@@ -211,11 +312,13 @@ def add_launch_arg(name: str, default_value=None, description=None):
DeclareLaunchArgument(name, default_value=default_value, description=description)
)
+ tensorrt_yolox_share_dir = get_package_share_directory("autoware_tensorrt_yolox")
fine_detector_share_dir = get_package_share_directory("autoware_traffic_light_fine_detector")
classifier_share_dir = get_package_share_directory("autoware_traffic_light_classifier")
add_launch_arg("all_camera_namespaces", "[camera6, camera7]")
add_launch_arg("enable_image_decompressor", "True")
- add_launch_arg("enable_fine_detection", "True")
+ add_launch_arg("use_ml_detector", "True")
+ add_launch_arg("ml_detection_model_type", "fine_detection_model")
add_launch_arg("use_image_transport", "True")
# traffic_light_fine_detector
@@ -224,6 +327,16 @@ def add_launch_arg(name: str, default_value=None, description=None):
os.path.join(fine_detector_share_dir, "config", "traffic_light_fine_detector.param.yaml"),
)
+ # whole image (traffic light) detector by yolox
+ add_launch_arg(
+ "whole_image_detector_model_path", os.path.expandvars("$HOME/autoware_data/tensorrt_yolox")
+ )
+ add_launch_arg("whole_image_detector_model_name", "tlr_car_ped_yolox_s_960_960_batch_1")
+ add_launch_arg(
+ "whole_image_detector_param_path",
+ os.path.join(tensorrt_yolox_share_dir, "config", "yolox_traffic_light_detector.param.yaml"),
+ )
+
# traffic_light_classifier
add_launch_arg(
"car_classifier_param_path",
diff --git a/perception/autoware_traffic_light_visualization/config/traffic_light_visualization.param.yaml b/perception/autoware_traffic_light_visualization/config/traffic_light_visualization.param.yaml
index 1354f7e619b86..4ad0faacc325f 100644
--- a/perception/autoware_traffic_light_visualization/config/traffic_light_visualization.param.yaml
+++ b/perception/autoware_traffic_light_visualization/config/traffic_light_visualization.param.yaml
@@ -1,4 +1,4 @@
/**:
ros__parameters:
- enable_fine_detection: false
+ use_ml_detector: false
use_image_transport: true
diff --git a/perception/autoware_traffic_light_visualization/launch/traffic_light_roi_visualizer.launch.xml b/perception/autoware_traffic_light_visualization/launch/traffic_light_roi_visualizer.launch.xml
index d4af7a27636df..a6a09576532c4 100644
--- a/perception/autoware_traffic_light_visualization/launch/traffic_light_roi_visualizer.launch.xml
+++ b/perception/autoware_traffic_light_visualization/launch/traffic_light_roi_visualizer.launch.xml
@@ -4,7 +4,7 @@
-
+
@@ -13,7 +13,7 @@
-
+
diff --git a/perception/autoware_traffic_light_visualization/schema/traffic_light_visualization.schema.json b/perception/autoware_traffic_light_visualization/schema/traffic_light_visualization.schema.json
index 7058e89c3e5ea..ee1ec952242ef 100644
--- a/perception/autoware_traffic_light_visualization/schema/traffic_light_visualization.schema.json
+++ b/perception/autoware_traffic_light_visualization/schema/traffic_light_visualization.schema.json
@@ -6,7 +6,7 @@
"traffic_light_visualization": {
"type": "object",
"properties": {
- "enable_fine_detection": {
+ "use_ml_detector": {
"type": "boolean",
"description": "whether to visualize result of the traffic light fine detection",
"default": "false"
@@ -17,7 +17,7 @@
"default": "true"
}
},
- "required": ["enable_fine_detection", "use_image_transport"]
+ "required": ["use_ml_detector", "use_image_transport"]
}
},
"properties": {
diff --git a/perception/autoware_traffic_light_visualization/src/traffic_light_roi_visualizer/node.cpp b/perception/autoware_traffic_light_visualization/src/traffic_light_roi_visualizer/node.cpp
index 6ab9f09064f58..cdde10675bd44 100644
--- a/perception/autoware_traffic_light_visualization/src/traffic_light_roi_visualizer/node.cpp
+++ b/perception/autoware_traffic_light_visualization/src/traffic_light_roi_visualizer/node.cpp
@@ -30,10 +30,10 @@ TrafficLightRoiVisualizerNode::TrafficLightRoiVisualizerNode(const rclcpp::NodeO
using std::placeholders::_2;
using std::placeholders::_3;
using std::placeholders::_4;
- enable_fine_detection_ = this->declare_parameter("enable_fine_detection");
+ use_ml_detector_ = this->declare_parameter("use_ml_detector");
use_image_transport_ = this->declare_parameter("use_image_transport");
- if (enable_fine_detection_) {
+ if (use_ml_detector_) {
sync_with_rough_roi_.reset(new SyncWithRoughRoi(
SyncPolicyWithRoughRoi(10), image_sub_, roi_sub_, rough_roi_sub_, traffic_signals_sub_));
sync_with_rough_roi_->registerCallback(
@@ -69,7 +69,7 @@ void TrafficLightRoiVisualizerNode::connectCb()
image_sub_.unsubscribe();
traffic_signals_sub_.unsubscribe();
roi_sub_.unsubscribe();
- if (enable_fine_detection_) {
+ if (use_ml_detector_) {
rough_roi_sub_.unsubscribe();
}
} else if (!image_sub_.getSubscriber()) {
@@ -77,7 +77,7 @@ void TrafficLightRoiVisualizerNode::connectCb()
roi_sub_.subscribe(this, "~/input/rois", rclcpp::QoS{1}.get_rmw_qos_profile());
traffic_signals_sub_.subscribe(
this, "~/input/traffic_signals", rclcpp::QoS{1}.get_rmw_qos_profile());
- if (enable_fine_detection_) {
+ if (use_ml_detector_) {
rough_roi_sub_.subscribe(this, "~/input/rough/rois", rclcpp::QoS{1}.get_rmw_qos_profile());
}
}
diff --git a/perception/autoware_traffic_light_visualization/src/traffic_light_roi_visualizer/node.hpp b/perception/autoware_traffic_light_visualization/src/traffic_light_roi_visualizer/node.hpp
index 245c6caa6946c..b8d8023502555 100644
--- a/perception/autoware_traffic_light_visualization/src/traffic_light_roi_visualizer/node.hpp
+++ b/perception/autoware_traffic_light_visualization/src/traffic_light_roi_visualizer/node.hpp
@@ -180,7 +180,7 @@ class TrafficLightRoiVisualizerNode : public rclcpp::Node
typedef message_filters::Synchronizer SyncWithRoughRoi;
std::shared_ptr sync_with_rough_roi_;
- bool enable_fine_detection_;
+ bool use_ml_detector_;
bool use_image_transport_;
};