diff --git a/localization/autoware_localization_evaluation_scripts/CMakeLists.txt b/localization/autoware_localization_evaluation_scripts/CMakeLists.txt
new file mode 100644
index 00000000..1dfb9180
--- /dev/null
+++ b/localization/autoware_localization_evaluation_scripts/CMakeLists.txt
@@ -0,0 +1,12 @@
+cmake_minimum_required(VERSION 3.5)
+project(autoware_localization_evaluation_scripts)
+
+find_package(autoware_cmake REQUIRED)
+autoware_package()
+
+install(PROGRAMS
+ scripts/plot_diagnostics.py
+ DESTINATION lib/${PROJECT_NAME}
+)
+
+ament_auto_package()
diff --git a/localization/autoware_localization_evaluation_scripts/README.md b/localization/autoware_localization_evaluation_scripts/README.md
new file mode 100644
index 00000000..035c892a
--- /dev/null
+++ b/localization/autoware_localization_evaluation_scripts/README.md
@@ -0,0 +1,61 @@
+# autoware_localization_evaluation_scripts
+
+This package contains some scripts for evaluating the localization of Autoware.
+
+Ths scripts are used for the result rosbags of localization, particularly the result_bag from `driving_log_replayer_v2`.
+
+
+
+As a test, execute `driving_log_replayer_v2` with the following command:
+
+```bash
+ros2 launch driving_log_replayer_v2 driving_log_replayer_v2.launch.py \
+ scenario_path:=$HOME/driving_log_replayer_v2/localization.yaml
+```
+
+Then, use the scripts for processing the result_bag:
+
+```bash
+$HOME/driving_log_replayer_v2/out/latest/result_bag
+```
+
+## plot_diagnostics.py
+
+```bash
+ros2 run autoware_localization_evaluation_scripts plot_diagnostics.py \
+ \
+ --save_dir=/your/path (default:rosbag_path/../) \
+ --storage=mcap (default:sqlite3)
+```
+
+[Example]
+
+```bash
+$ ros2 run autoware_localization_evaluation_scripts plot_diagnostics.py \
+ $HOME/driving_log_replayer_v2/out/latest/result_bag
+[INFO] [1740561002.740748735] [rosbag2_storage]: Opened database '$HOME/driving_log_replayer_v2/out/latest/result_bag/result_bag_0.db3' for READ_ONLY.
+save_dir=PosixPath('$HOME/driving_log_replayer_v2/out/latest/diagnostics_result')
+```
+
+This command outputs each diagnostic (tsv) and plot result (png).
+
+```bash
+$ tree $HOME/driving_log_replayer_v2/out/latest/diagnostics_result
+$HOME/driving_log_replayer_v2/out/latest/diagnostics_result
+├── gyro_bias_validator__gyro_bias_validator.png
+├── gyro_bias_validator__gyro_bias_validator.tsv
+├── localization__ekf_localizer.png
+├── localization__ekf_localizer.tsv
+├── localization__pose_instability_detector.png
+├── localization__pose_instability_detector.tsv
+├── localization_error_monitor__ellipse_error_status.png
+├── localization_error_monitor__ellipse_error_status.tsv
+├── ndt_scan_matcher__scan_matching_status.png
+└── ndt_scan_matcher__scan_matching_status.tsv
+
+0 directories, 10 files
+```
+
+[Example : ndt_scan_matcher__scan_matching_status.png]
+
+
diff --git a/localization/autoware_localization_evaluation_scripts/media/ndt_scan_matcher__scan_matching_status.png b/localization/autoware_localization_evaluation_scripts/media/ndt_scan_matcher__scan_matching_status.png
new file mode 100644
index 00000000..9d8819cd
Binary files /dev/null and b/localization/autoware_localization_evaluation_scripts/media/ndt_scan_matcher__scan_matching_status.png differ
diff --git a/localization/autoware_localization_evaluation_scripts/package.xml b/localization/autoware_localization_evaluation_scripts/package.xml
new file mode 100644
index 00000000..e40d8cbd
--- /dev/null
+++ b/localization/autoware_localization_evaluation_scripts/package.xml
@@ -0,0 +1,23 @@
+
+
+
+ autoware_localization_evaluation_scripts
+ 0.1.0
+ A package for evaluation localization
+ Yamato Ando
+ Taiki Yamada
+ Shintaro Sakoda
+ Anh Nguyen
+ Masahiro Sakamoto
+ Hayato Mizushima
+ Apache License 2.0
+
+ Shintaro Sakoda
+
+ ament_cmake_auto
+ autoware_cmake
+
+
+ ament_cmake
+
+
diff --git a/localization/autoware_localization_evaluation_scripts/scripts/plot_diagnostics.py b/localization/autoware_localization_evaluation_scripts/scripts/plot_diagnostics.py
new file mode 100755
index 00000000..82eac299
--- /dev/null
+++ b/localization/autoware_localization_evaluation_scripts/scripts/plot_diagnostics.py
@@ -0,0 +1,305 @@
+#!/usr/bin/env python3
+"""A script to parse diagnostics messages from a rosbag file."""
+
+import argparse
+from pathlib import Path
+
+from builtin_interfaces.msg import Time
+import matplotlib.pyplot as plt
+import pandas as pd
+from rclpy.serialization import deserialize_message
+import rosbag2_py
+from rosidl_runtime_py.utilities import get_message
+
+
+def parse_args() -> argparse.Namespace:
+ parser = argparse.ArgumentParser()
+ parser.add_argument("rosbag_path", type=Path)
+ parser.add_argument("--save_dir", type=Path, default=None)
+ parser.add_argument("--storage", type=str, default="sqlite3", choices=["sqlite3", "mcap"])
+ return parser.parse_args()
+
+
+def parse_stamp(stamp: Time) -> int:
+ return stamp.sec * int(1e9) + stamp.nanosec
+
+
+def diag_name_to_filename(diag_name: str) -> str:
+ return diag_name.replace(":", "_").replace(" ", "_")
+
+
+def parse_diagnostics_msgs(rosbag_path: str, target_list: list, storage: str) -> dict:
+ serialization_format = "cdr"
+ storage_options = rosbag2_py.StorageOptions(
+ uri=str(rosbag_path),
+ storage_id=storage,
+ )
+ converter_options = rosbag2_py.ConverterOptions(
+ input_serialization_format=serialization_format,
+ output_serialization_format=serialization_format,
+ )
+
+ reader = rosbag2_py.SequentialReader()
+ reader.open(storage_options, converter_options)
+
+ topic_types = reader.get_all_topics_and_types()
+ type_map = {topic_types[i].name: topic_types[i].type for i in range(len(topic_types))}
+
+ storage_filter = rosbag2_py.StorageFilter(topics=["/diagnostics"])
+ reader.set_filter(storage_filter)
+
+ data_dict: dict = {key: [] for key in target_list}
+
+ while reader.has_next():
+ (topic, data, timestamp_rosbag) = reader.read_next()
+ msg_type = get_message(type_map[topic])
+ msg = deserialize_message(data, msg_type)
+ timestamp_header = parse_stamp(msg.header.stamp)
+ for status in msg.status:
+ if status.name not in target_list:
+ continue
+ curr_data = {kv.key: kv.value for kv in status.values}
+ curr_data["timestamp_rosbag"] = timestamp_rosbag
+ curr_data["timestamp_header"] = timestamp_header
+ curr_data["level"] = int.from_bytes(status.level, "big")
+ curr_data["message"] = status.message
+ data_dict[status.name].append(curr_data)
+ return data_dict
+
+
+if __name__ == "__main__":
+ args = parse_args()
+ rosbag_path = args.rosbag_path
+ save_dir = args.save_dir
+ storage = args.storage
+
+ if save_dir is None:
+ if rosbag_path.is_dir(): # if specified directory containing db3 files
+ save_dir = rosbag_path.parent / "diagnostics_result"
+ else: # if specified db3 file directly
+ save_dir = rosbag_path.parent.parent / "diagnostics_result"
+
+ target_list = [
+ "ndt_scan_matcher: scan_matching_status",
+ "localization: ekf_localizer",
+ "localization_error_monitor: ellipse_error_status",
+ "localization: pose_instability_detector",
+ "gyro_bias_validator: gyro_bias_validator",
+ ]
+
+ data_dict = parse_diagnostics_msgs(rosbag_path, target_list, storage)
+
+ save_dir.mkdir(exist_ok=True)
+ print(f"{save_dir=}")
+
+ ################################
+ # save diagnostics data as tsv #
+ ################################
+ for key, data in data_dict.items():
+ df = pd.DataFrame(data)
+ for col in df.columns:
+ if pd.api.types.is_numeric_dtype(df[col]):
+ df[col] = df[col].astype(float)
+ df[col] = df[col].apply(lambda x: int(x) if x.is_integer() else x)
+ filename = diag_name_to_filename(key)
+ df.to_csv(
+ save_dir / f"{filename}.tsv",
+ index=False,
+ sep="\t",
+ float_format="%.9f",
+ )
+
+ # Fix timestamp to relative time from the first message and convert to seconds
+ # (for better visualization)
+ for one_data_dict_key in data_dict:
+ one_data_dict = data_dict[one_data_dict_key]
+ if len(one_data_dict) == 0:
+ continue
+ first_h = int(one_data_dict[0]["timestamp_header"])
+ first_r = int(one_data_dict[0]["timestamp_rosbag"])
+ for row in one_data_dict:
+ row["timestamp_header"] = int(row["timestamp_header"]) - first_h
+ row["timestamp_rosbag"] = int(row["timestamp_rosbag"]) - first_r
+ row["timestamp_header"] /= 1e9
+ row["timestamp_rosbag"] /= 1e9
+
+ ####################
+ # ndt_scan_matcher #
+ ####################
+ diag_name = "ndt_scan_matcher: scan_matching_status"
+ df = pd.DataFrame(data_dict[diag_name])
+ key_list = [
+ "execution_time",
+ "iteration_num",
+ "sensor_points_size",
+ "sensor_points_delay_time_sec",
+ "skipping_publish_num",
+ "transform_probability",
+ "transform_probability_diff",
+ "nearest_voxel_transformation_likelihood",
+ "nearest_voxel_transformation_likelihood_diff",
+ "local_optimal_solution_oscillation_num",
+ "distance_initial_to_result",
+ ]
+ plt.figure(figsize=(6.4 * 2, 4.8 * 2))
+ for i, key in enumerate(key_list):
+ if key not in df.columns:
+ print(f"Skip {key}")
+ continue
+ df[key] = df[key].astype(float)
+ df = df.dropna(subset=[key])
+ plt.subplot(4, 3, i + 1)
+ plt.plot(df["timestamp_header"], df[key], label=key)
+ if key == "nearest_voxel_transformation_likelihood":
+ plt.plot(
+ df["timestamp_header"],
+ [2.3 for _ in range(len(df))],
+ label="threshold",
+ linestyle="dashed",
+ )
+ plt.xlabel("time [s]")
+ plt.title(f"{key}")
+ plt.ylim(bottom=min(0, df[key].min()))
+ plt.grid()
+
+ plt.tight_layout()
+ save_path = save_dir / f"{diag_name_to_filename(diag_name)}.png"
+ plt.savefig(save_path, bbox_inches="tight", pad_inches=0.05)
+
+ #################
+ # ekf_localizer #
+ #################
+ diag_name = "localization: ekf_localizer"
+ df = pd.DataFrame(data_dict[diag_name])
+ df = df[df["is_activated"] == "True"]
+ key_list = [
+ "pose_mahalanobis_distance",
+ "twist_mahalanobis_distance",
+ "cov_ellipse_long_axis_size",
+ "cov_ellipse_lateral_direction_size",
+ ]
+ plt.figure(figsize=(6.4 * 2, 4.8 * 2))
+ for i, key in enumerate(key_list):
+ if key not in df.columns:
+ print(f"Skip {key}")
+ continue
+ df[key] = df[key].astype(float)
+ key_threshold = (
+ key + "_threshold" if "mahalanobis" in key else key.replace("_size", "_warn_threshold")
+ )
+ df[key_threshold] = df[key_threshold].astype(float)
+ plt.subplot(4, 1, (i + 1))
+ plt.plot(df["timestamp_header"], df[key], label=key)
+ plt.plot(df["timestamp_header"], df[key_threshold], label=key_threshold)
+ plt.xlabel("time [s]")
+ plt.title(f"{key}")
+ plt.grid()
+
+ plt.tight_layout()
+ save_path = save_dir / f"{diag_name_to_filename(diag_name)}.png"
+ plt.savefig(save_path, bbox_inches="tight", pad_inches=0.05)
+
+ #############################
+ # pose_instability_detector #
+ #############################
+ diag_name = "localization: pose_instability_detector"
+ df = pd.DataFrame(data_dict[diag_name])
+
+ plt.figure(figsize=(6.4 * 2, 4.8 * 2))
+ target_names = ["position", "angle"]
+ for i, target_name in enumerate(target_names):
+ plt.subplot(2, 1, i + 1)
+ key_list = [
+ f"diff_{target_name}_x",
+ f"diff_{target_name}_y",
+ f"diff_{target_name}_z",
+ ]
+ for key in key_list:
+ key_value = key + ":value"
+ key_threshold = key + ":threshold"
+ if key_value not in df.columns or key_threshold not in df.columns:
+ print(f"Skip {key}")
+ continue
+ df[key_value] = df[key_value].astype(float)
+ df[key_threshold] = df[key_threshold].astype(float)
+ plt.plot(df["timestamp_header"], df[key_value], label=key_value)
+ plt.plot(
+ df["timestamp_header"],
+ df[key_threshold],
+ linestyle="dashed",
+ )
+ plt.plot(
+ df["timestamp_header"],
+ -df[key_threshold],
+ linestyle="dashed",
+ )
+ plt.legend(bbox_to_anchor=(1.05, 1), loc="upper left", borderaxespad=0)
+ plt.xlabel("time [s]")
+ unit = "[m]" if target_name == "position" else "[rad]"
+ plt.ylabel(f"diff_{target_name} {unit}")
+ plt.grid()
+
+ plt.tight_layout()
+ save_path = save_dir / f"{diag_name_to_filename(diag_name)}.png"
+ plt.savefig(save_path, bbox_inches="tight", pad_inches=0.05)
+
+ ##############################
+ # localization_error_monitor #
+ ##############################
+ diag_name = "localization_error_monitor: ellipse_error_status"
+ df = pd.DataFrame(data_dict[diag_name])
+ key_list = [
+ "localization_error_ellipse",
+ "localization_error_ellipse_lateral_direction",
+ "level",
+ ]
+ plt.figure(figsize=(6.4 * 2, 4.8 * 2))
+ for _, key in enumerate(key_list):
+ if key not in df.columns:
+ print(f"Skip {key}")
+ continue
+ df[key] = df[key].astype(float)
+ plt.plot(df["timestamp_header"], df[key], label=key)
+ plt.xlabel("time [s]")
+ plt.ylabel("error_ellipse [m]")
+ plt.grid()
+ plt.legend()
+ plt.tight_layout()
+ save_path = save_dir / f"{diag_name_to_filename(diag_name)}.png"
+ plt.savefig(save_path, bbox_inches="tight", pad_inches=0.05)
+
+ #######################
+ # gyro_bias_validator #
+ #######################
+ diag_name = "gyro_bias_validator: gyro_bias_validator"
+ df = pd.DataFrame(data_dict[diag_name])
+ key_list = [
+ "estimated_gyro_bias_x",
+ "estimated_gyro_bias_y",
+ "estimated_gyro_bias_z",
+ ]
+ plt.figure(figsize=(6.4 * 2, 4.8 * 2))
+ for _, key in enumerate(key_list):
+ if key not in df.columns:
+ print(f"Skip {key}")
+ continue
+ df[key] = df[key].astype(float)
+ plt.plot(df["timestamp_header"], df[key], label=key)
+ if "gyro_bias_threshold" in df.columns:
+ plt.plot(
+ df["timestamp_header"],
+ df["gyro_bias_threshold"].astype(float),
+ linestyle="dashed",
+ )
+ plt.plot(
+ df["timestamp_header"],
+ -df["gyro_bias_threshold"].astype(float),
+ linestyle="dashed",
+ )
+ plt.xlabel("time [s]")
+ plt.ylabel("gyro_bias [rad/s]")
+ plt.grid()
+ plt.legend()
+ plt.tight_layout()
+ save_path = save_dir / f"{diag_name_to_filename(diag_name)}.png"
+ plt.savefig(save_path, bbox_inches="tight", pad_inches=0.05)