Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add scripts for measuring performance #64

Open
wants to merge 18 commits into
base: main
Choose a base branch
from
Open
4 changes: 2 additions & 2 deletions nocturne/cpp/src/scenario.cc
Original file line number Diff line number Diff line change
Expand Up @@ -892,8 +892,8 @@ void Scenario::LoadObjects(const json& objects_json) {
// TODO(ev) current_time_ should be passed in rather than defined here.
const geometry::Vector2D position(obj["position"][current_time_]["x"],
obj["position"][current_time_]["y"]);
const float width = static_cast<float>(obj["width"]);
const float length = static_cast<float>(obj["length"]);
const float width = std::abs(static_cast<float>(obj["width"]));
const float length = std::abs(static_cast<float>(obj["length"]));
geometry::Vector2D target_position;
if (obj.contains("goalPosition")) {
target_position = geometry::Vector2D(obj["goalPosition"]["x"],
Expand Down
124 changes: 124 additions & 0 deletions scripts/data_analysis/draw_perf_figure.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,124 @@
import pickle
import numpy as np

import matplotlib.pyplot as plt

FILE_NAMES = [
"./perf_data/perf_stats_2000-1.pkl",
"./perf_data/perf_stats_2000-2.pkl",
"./perf_data/perf_stats_2000-3.pkl",
"./perf_data/perf_stats_2000-4.pkl",
"./perf_data/perf_stats_2000-5.pkl",
]

NUM_MAX_VEHICLES = 400
X_RANGE = 51

n = len(FILE_NAMES)

single_avg_fps = np.zeros(n)
single_avg_fps_by_veh = np.zeros((NUM_MAX_VEHICLES, n))

multi_avg_fps = np.zeros(n)
multi_avg_sec = np.zeros(n)
multi_avg_fps_by_agt = np.zeros((NUM_MAX_VEHICLES, n))
multi_avg_sec_by_agt = np.zeros((NUM_MAX_VEHICLES, n))
multi_all_fps = np.zeros(n)

multi_avg_agt_num = np.zeros(n)

for i, file in enumerate(FILE_NAMES):
with open(file, "rb") as f:
stats = pickle.load(f)

cur_single_avg_fps = np.sum(stats["single_cnt_by_veh"]) / np.sum(
stats["single_sec_by_veh"])
single_avg_fps[i] = cur_single_avg_fps
single_avg_fps_by_veh[:, i] = stats["single_avg_fps"]

cur_multi_avg_fps = np.sum(stats["multi_cnt_by_agt"]) / np.sum(
stats["multi_sec_by_agt"])
cur_multi_avg_sec = np.sum(stats["multi_sec_by_agt"]) / np.sum(
stats["multi_cnt_by_agt"])
multi_avg_fps[i] = cur_multi_avg_fps
multi_avg_sec[i] = cur_multi_avg_sec
multi_avg_fps_by_agt[:, i] = stats["multi_avg_fps_by_agt"]
multi_avg_sec_by_agt[:, i] = stats["multi_avg_sec_by_agt"]
multi_all_fps[i] = cur_multi_avg_fps * stats["multi_avg_agt_num"]
multi_avg_agt_num[i] = stats["multi_avg_agt_num"]

# single_avg_fps_by_veh_med = np.median(single_avg_fps_by_veh, axis=1)
# single_avg_fps_by_veh_min = np.min(single_avg_fps_by_veh, axis=1)
# single_avg_fps_by_veh_max = np.max(single_avg_fps_by_veh, axis=1)

single_avg_fps_by_veh_avg = np.mean(single_avg_fps_by_veh, axis=1)
single_avg_fps_by_veh_std = np.std(single_avg_fps_by_veh, axis=1, ddof=1)

print(f"[single] avg_fps = {single_avg_fps}")
print(f"[single] avg_fps = {np.mean(single_avg_fps)} " +
f"[{np.std(single_avg_fps, ddof=1)}]")

vehicles = [10, 20, 30]
for v in vehicles:
print(f"[single] vehicle = {v}, fps = {single_avg_fps_by_veh_avg[v]}, " +
f"std = {single_avg_fps_by_veh_std[v]}")

x = np.arange(X_RANGE)
m = X_RANGE

fig, ax = plt.subplots()
ax.plot(x, single_avg_fps_by_veh_avg[:m])
ax.fill_between(
x,
single_avg_fps_by_veh_avg[:m] - single_avg_fps_by_veh_std[:m],
single_avg_fps_by_veh_avg[:m] + single_avg_fps_by_veh_std[:m],
alpha=0.25,
)
plt.xlabel("Number of non-agent vehicles")
plt.ylabel("Average steps-per-second")
# plt.show()
plt.savefig("figure_7.png", dpi=300)
plt.clf()

print(f"[multi] avg_agent_num = {multi_avg_agt_num}")
print(f"[multi] avg_agent_num = {np.mean(multi_avg_agt_num)} " +
f"[{np.std(multi_avg_agt_num, ddof=1)}]")
print(f"[multi] avg_fps = {multi_avg_fps}")
print(f"[multi] avg_fps = {np.mean(multi_avg_fps)} " +
f"[{np.std(multi_avg_fps, ddof=1)}]")
print(f"[multi] all_fps = {multi_all_fps}")
print(f"[multi] all_fps = {np.mean(multi_all_fps)} " +
f"[{np.std(multi_all_fps, ddof=1)}]")

# multi_avg_fps_by_agt_med = np.median(multi_avg_fps_by_agt, axis=1)
# multi_avg_fps_by_agt_min = np.min(multi_avg_fps_by_agt, axis=1)
# multi_avg_fps_by_agt_max = np.max(multi_avg_fps_by_agt, axis=1)

# multi_avg_sec_by_agt_med = np.median(multi_avg_sec_by_agt, axis=1)
# multi_avg_sec_by_agt_min = np.min(multi_avg_sec_by_agt, axis=1)
# multi_avg_sec_by_agt_max = np.max(multi_avg_sec_by_agt, axis=1)

multi_avg_fps_by_agt_avg = np.mean(multi_avg_fps_by_agt, axis=1)
multi_avg_fps_by_agt_std = np.std(multi_avg_fps_by_agt, axis=1, ddof=1)

multi_avg_sec_by_agt_avg = np.mean(multi_avg_sec_by_agt, axis=1)
multi_avg_sec_by_agt_std = np.std(multi_avg_sec_by_agt, axis=1, ddof=1)

x = np.arange(1, X_RANGE)
m = X_RANGE - 1
# print(multi_avg_sec_by_agt[:m, :])

fig, ax = plt.subplots()
ax.plot(x, multi_avg_sec_by_agt_avg[:m] * 1000)
ax.fill_between(
x,
(multi_avg_sec_by_agt_avg[:m] - multi_avg_sec_by_agt_std[:m]) * 1000,
(multi_avg_sec_by_agt_avg[:m] + multi_avg_sec_by_agt_std[:m]) * 1000,
alpha=0.25,
)

plt.xlabel("Number of controlled agents")
plt.ylabel("Average time per step (ms)")
# plt.show()
plt.savefig("figure_8.png", dpi=300)
plt.clf()
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file added scripts/data_analysis/perf_stats_1000.pkl
Binary file not shown.
Binary file not shown.
231 changes: 216 additions & 15 deletions scripts/data_analysis/speed_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,14 +5,207 @@
"""Utils that we use to understand the datasets we are working with."""
import json
import os
import pickle
import time

import hydra
import numpy as np
import matplotlib.pyplot as plt

from typing import Any, Dict, Sequence, Union

from cfgs.config import PROCESSED_TRAIN_NO_TL, get_scenario_dict, set_display_window
from nocturne import Simulation, Action

MAX_NUM_VEHICLES = 400


def single_agent_test(cfg: Dict[str, Any], files: Sequence[str],
num_steps: int) -> Dict[str, Union[float, np.ndarray]]:
sec_by_veh = np.zeros(MAX_NUM_VEHICLES, dtype=np.int64)
cnt_by_veh = np.zeros(MAX_NUM_VEHICLES, dtype=np.int64)
avg_agt_num = []
avg_veh_num = []

cnt = 0
for file in files:
cnt += 1

for t in range(num_steps):
local_cfg = get_scenario_dict(cfg)
local_cfg['start_time'] = t
sim = Simulation(os.path.join(PROCESSED_TRAIN_NO_TL, file),
local_cfg)
scenario = sim.scenario()
vehs = scenario.vehicles()
agts = scenario.moving_objects()

num_vehs = len(vehs)
num_agts = len(agts)
if num_agts == 0:
break

num_objs = len(scenario.objects())
if num_vehs != num_objs:
print(f"[Single] num_vehs = {num_vehs}, num_objs = {num_objs}")

avg_agt_num.append(num_agts)
avg_veh_num.append(num_vehs)

agt_idx = np.random.randint(num_agts)
for i, agt in enumerate(agts):
if i != agt_idx:
agt.expert_control = True
agt = agts[agt_idx]

try:
t = time.perf_counter_ns()
_ = scenario.flattened_visible_state(agt, 80,
(120 / 180) * np.pi)
agt.apply_action(Action(1.0, 1.0, 1.0))
sim.step(0.1)
total_time = time.perf_counter_ns() - t
except Exception as e:
print(e)
continue

if num_vehs <= MAX_NUM_VEHICLES:
sec_by_veh[num_vehs - 1] += total_time
cnt_by_veh[num_vehs - 1] += 1

sec_by_veh = sec_by_veh * 1e-9
avg_sec = sec_by_veh / cnt_by_veh
avg_fps = cnt_by_veh / sec_by_veh
avg_sec = np.nan_to_num(avg_sec)
avg_fps = np.nan_to_num(avg_fps)

overall_avg_sec = np.sum(sec_by_veh) / np.sum(cnt_by_veh)
overall_avg_fps = np.sum(cnt_by_veh) / np.sum(sec_by_veh)
avg_agt_num = np.mean(avg_agt_num)
avg_veh_num = np.mean(avg_veh_num)

print(f"[single] num_files = {cnt}")
print(f"[single] avg_sec = {avg_sec}")
print(f"[single] avg_fps = {avg_fps}")
print(f"[single] overall_avg_sec = {overall_avg_sec}")
print(f"[single] overall_avg_fps = {overall_avg_fps}")
print(f"[single] overall_avg_veh = {avg_veh_num}")
print(f"[single] overall_avg_agt = {avg_agt_num}")

return {
"single_sec_by_veh": sec_by_veh,
"single_cnt_by_veh": cnt_by_veh,
"single_avg_sec": avg_sec,
"single_avg_fps": avg_fps,
"single_avg_agt_num": avg_agt_num,
"single_avg_veh_num": avg_veh_num,
"single_overall_avg_sec": overall_avg_sec,
"single_overall_avg_fps": overall_avg_fps,
}


def multi_agent_test(cfg: Dict[str, Any], files: Sequence[str],
num_steps: int) -> Dict[str, Union[float, np.ndarray]]:
sec_by_veh = np.zeros(MAX_NUM_VEHICLES, dtype=np.int64)
cnt_by_veh = np.zeros(MAX_NUM_VEHICLES, dtype=np.int64)
sec_by_agt = np.zeros(MAX_NUM_VEHICLES, dtype=np.int64)
cnt_by_agt = np.zeros(MAX_NUM_VEHICLES, dtype=np.int64)
veh_by_agt = np.zeros(MAX_NUM_VEHICLES, dtype=np.int64)

avg_agt_num = []
avg_veh_num = []

cnt = 0
for file in files:
cnt += 1

for t in range(num_steps):
local_cfg = get_scenario_dict(cfg)
local_cfg['start_time'] = t
sim = Simulation(os.path.join(PROCESSED_TRAIN_NO_TL, file),
local_cfg)
scenario = sim.scenario()
vehs = scenario.vehicles()
agts = scenario.moving_objects()

num_vehs = len(vehs)
num_agts = len(agts)
if num_agts == 0:
break

num_objs = len(scenario.objects())
if num_vehs != num_objs:
print(f"[Multi] num_vehs = {num_vehs}, num_objs = {num_objs}")

avg_agt_num.append(num_agts)
avg_veh_num.append(num_vehs)

try:
t = time.perf_counter_ns()
for agt in agts:
_ = scenario.flattened_visible_state(
agt, 80, (120 / 180) * np.pi)
agt.apply_action(Action(1.0, 1.0, 1.0))
sim.step(0.1)
total_time = time.perf_counter_ns() - t
except Exception as e:
print(e)
continue

if num_vehs <= MAX_NUM_VEHICLES:
sec_by_veh[num_vehs - 1] += total_time
cnt_by_veh[num_vehs - 1] += 1
sec_by_agt[num_agts - 1] += total_time
cnt_by_agt[num_agts - 1] += 1
veh_by_agt[num_agts - 1] += num_vehs

sec_by_veh = sec_by_veh * 1e-9
sec_by_agt = sec_by_agt * 1e-9
avg_sec_by_veh = sec_by_veh / cnt_by_veh
avg_sec_by_agt = sec_by_agt / cnt_by_agt
avg_fps_by_veh = cnt_by_veh / sec_by_veh
avg_fps_by_agt = cnt_by_agt / sec_by_agt
avg_veh_by_agt = veh_by_agt / cnt_by_agt

avg_sec_by_veh = np.nan_to_num(avg_sec_by_veh)
avg_fps_by_veh = np.nan_to_num(avg_fps_by_veh)
avg_sec_by_agt = np.nan_to_num(avg_sec_by_agt)
avg_fps_by_agt = np.nan_to_num(avg_fps_by_agt)
avg_veh_by_agt = np.nan_to_num(avg_veh_by_agt)

overall_avg_sec = np.sum(sec_by_agt) / np.sum(cnt_by_agt)
overall_avg_fps = np.sum(cnt_by_agt) / np.sum(sec_by_agt)
avg_agt_num = np.mean(avg_agt_num)
avg_veh_num = np.mean(avg_veh_num)

print(f"[multi] num_files = {cnt}")
print(f"[multi] avg_sec_by_veh = {avg_sec_by_veh}")
print(f"[multi] avg_fps_by_veh = {avg_fps_by_veh}")
print(f"[multi] avg_sec_by_agt = {avg_sec_by_agt}")
print(f"[multi] avg_fps_by_agt = {avg_fps_by_agt}")
print(f"[multi] avg_veh_by_agt = {avg_veh_by_agt}")
print(f"[multi] overall_avg_sec = {overall_avg_sec}")
print(f"[multi] overall_avg_fps = {overall_avg_fps}")
print(f"[multi] overall_avg_veh = {avg_veh_num}")
print(f"[multi] overall_avg_agt = {avg_agt_num}")

return {
"multi_sec_by_veh": sec_by_veh,
"multi_cnt_by_veh": cnt_by_veh,
"multi_sec_by_agt": sec_by_agt,
"multi_cnt_by_agt": cnt_by_agt,
"multi_veh_by_agt": veh_by_agt,
"multi_avg_sec_by_veh": avg_sec_by_veh,
"multi_avg_fps_by_veh": avg_fps_by_veh,
"multi_avg_sec_by_agt": avg_sec_by_agt,
"multi_avg_fps_by_agt": avg_fps_by_agt,
"multi_avg_veh_by_agt": avg_veh_by_agt,
"multi_avg_agt_num": avg_agt_num,
"multi_avg_veh_num": avg_veh_num,
"mult_overall_avg_sec": overall_avg_sec,
"mult_overall_avg_fps": overall_avg_fps,
}


def run_speed_test(files, cfg):
"""Compute the expert accelerations and number of vehicles across the dataset.
Expand All @@ -25,20 +218,27 @@ def run_speed_test(files, cfg):
[np.float], [np.float]: List of expert accels, list of number
of moving vehicles in file
"""
times_list = []
for file in files:
sim = Simulation(os.path.join(PROCESSED_TRAIN_NO_TL, file),
get_scenario_dict(cfg))
vehs = sim.scenario().getObjectsThatMoved()
scenario = sim.getScenario()
veh = vehs[np.random.randint(len(vehs))]
t = time.perf_counter()
_ = scenario.flattened_visible_state(veh, 80, (180 / 180) * np.pi)
veh.apply_action(Action(1.0, 1.0, 1.0))
sim.step(0.1)
times_list.append(time.perf_counter() - t)
print('avg, std. time to get obs is {}, {}'.format(np.mean(times_list),
np.std(times_list)))

num_files = 2000
num_steps = 90

indices = np.random.choice(len(files), num_files)

indices = indices.tolist()
sample_files = [files[i] for i in indices]

stats = {"num_files": num_files}

stats1 = single_agent_test(cfg, sample_files, num_steps)
stats.update(stats1)

stats2 = multi_agent_test(cfg, sample_files, num_steps)
stats.update(stats2)

print(stats)

with open("./perf_stats.pkl", 'wb') as f:
pickle.dump(stats, f)


@hydra.main(config_path="../../cfgs/", config_name="config")
Expand All @@ -48,7 +248,8 @@ def analyze_accels(cfg):
with open(os.path.join(f_path, 'valid_files.json')) as file:
valid_veh_dict = json.load(file)
files = list(valid_veh_dict.keys())
run_speed_test(files[0:10], cfg)
print(f"tot_files = {len(files)}")
run_speed_test(files, cfg)


if __name__ == '__main__':
Expand Down
Loading