diff --git a/.idea/vcs.xml b/.idea/vcs.xml new file mode 100644 index 0000000..94a25f7 --- /dev/null +++ b/.idea/vcs.xml @@ -0,0 +1,6 @@ + + + + + + \ No newline at end of file diff --git a/.idea/workspace.xml b/.idea/workspace.xml new file mode 100644 index 0000000..761a345 --- /dev/null +++ b/.idea/workspace.xml @@ -0,0 +1,37 @@ + + + + + + + + + + + + + + + + + + + + + + 1668358801742 + + + + \ No newline at end of file diff --git a/Agent_1.py b/Agent_1.py index c199944..e85beba 100644 --- a/Agent_1.py +++ b/Agent_1.py @@ -1,17 +1,9 @@ import random from pprint import pprint - import config import utils from prey import Prey from predator import Predator -""" -# Test Imports -from pprint import pprint -import environment as env - -""" - class Agent_1: @@ -206,62 +198,3 @@ def begin(arena): forced_termination * 100 / number_of_games, 100.0, 100.0] # data.append(data_row) return data_row - - -""" -# Class Test code -#arena=env.generate_environement() -arena = {0: [1, 49, 48], - 1: [2, 0, 46], - 2: [3, 1, 5], - 3: [4, 2, 7], - 4: [5, 3, 6], - 5: [6, 4, 2], - 6: [7, 5, 4], - 7: [8, 6, 3], - 8: [9, 7, 10], - 9: [10, 8, 11], - 10: [11, 9, 8], - 11: [12, 10, 9], - 12: [13, 11, 14], - 13: [14, 12, 15], - 14: [15, 13, 12], - 15: [16, 14, 13], - 16: [17, 15, 19], - 17: [18, 16, 20], - 18: [19, 17, 21], - 19: [20, 18, 16], - 20: [21, 19, 17], - 21: [22, 20, 18], - 22: [23, 21, 26], - 23: [24, 22, 25], - 24: [25, 23, 28], - 25: [26, 24, 23], - 26: [27, 25, 22], - 27: [28, 26, 30], - 28: [29, 27, 24], - 29: [30, 28, 31], - 30: [31, 29, 27], - 31: [32, 30, 29], - 32: [33, 31, 35], - 33: [34, 32], - 34: [35, 33, 39], - 35: [36, 34, 32], - 36: [37, 35, 38], - 37: [38, 36, 41], - 38: [39, 37, 36], - 39: [40, 38, 34], - 40: [41, 39, 44], - 41: [42, 40, 37], - 42: [43, 41], - 43: [44, 42, 47], - 44: [45, 43, 40], - 45: [46, 44, 49], - 46: [47, 45, 1], - 47: [48, 46, 43], - 48: [49, 47, 0], - 49: [0, 48, 45]} -# print(a1.curr_pos) -a1.move(arena, 5, 6) -# pprint(arena) -""" diff --git a/Agent_2.py b/Agent_2.py index 635f679..893ab85 100644 --- a/Agent_2.py +++ b/Agent_2.py @@ -1,17 +1,9 @@ import random -from pprint import pprint -import matplotlib.pyplot as plt import config import utils from prey import Prey from predator import Predator -import networkx as nx -""" -# Test Imports -from pprint import pprint -import environment as env -""" class Agent_2: @@ -131,82 +123,6 @@ def begin(arena): data_row = ["Agent_2", win_count * 100 / number_of_games, loss_count * 100 / number_of_games, forced_termination * 100 / number_of_games, 100.0, 100.0] - # data.append(data_row) - - - # chocolate pan - # if loss_count * 100 / number_of_games > 30: - # pprint(arena) - # print("Agent:",test_agent_pos," Prey :",test_prey_pos, " Predator :",test_predator_pos) - # - # edges = [] - # for key in arena: - # for i in arena[key]: - # edges.append([key,i]) - # #print(edges) - # graph=nx.Graph() - # graph.add_edges_from(edges) - # nx.draw_networkx(graph) - # plt.show() - # - # exit(0) - return data_row + return data_row -""" -# Class Test code -#arena=env.generate_environement() -arena = {0: [1, 49, 48], - 1: [2, 0, 46], - 2: [3, 1, 5], - 3: [4, 2, 7], - 4: [5, 3, 6], - 5: [6, 4, 2], - 6: [7, 5, 4], - 7: [8, 6, 3], - 8: [9, 7, 10], - 9: [10, 8, 11], - 10: [11, 9, 8], - 11: [12, 10, 9], - 12: [13, 11, 14], - 13: [14, 12, 15], - 14: [15, 13, 12], - 15: [16, 14, 13], - 16: [17, 15, 19], - 17: [18, 16, 20], - 18: [19, 17, 21], - 19: [20, 18, 16], - 20: [21, 19, 17], - 21: [22, 20, 18], - 22: [23, 21, 26], - 23: [24, 22, 25], - 24: [25, 23, 28], - 25: [26, 24, 23], - 26: [27, 25, 22], - 27: [28, 26, 30], - 28: [29, 27, 24], - 29: [30, 28, 31], - 30: [31, 29, 27], - 31: [32, 30, 29], - 32: [33, 31, 35], - 33: [34, 32], - 34: [35, 33, 39], - 35: [36, 34, 32], - 36: [37, 35, 38], - 37: [38, 36, 41], - 38: [39, 37, 36], - 39: [40, 38, 34], - 40: [41, 39, 44], - 41: [42, 40, 37], - 42: [43, 41], - 43: [44, 42, 47], - 44: [45, 43, 40], - 45: [46, 44, 49], - 46: [47, 45, 1], - 47: [48, 46, 43], - 48: [49, 47, 0], - 49: [0, 48, 45]} -# print(a1.curr_pos) -a1.move(arena, 5, 6) -# pprint(arena) -""" diff --git a/Agent_3.py b/Agent_3.py index 4ef9053..6e2341e 100644 --- a/Agent_3.py +++ b/Agent_3.py @@ -1,6 +1,4 @@ import random -from pprint import pprint - import config import utils from prey import Prey @@ -33,7 +31,7 @@ def __init__(self, prey_loc, predator_loc): self.prey_belief_state = dict.fromkeys([i for i in range(50)], 1/49) self.prey_belief_state[self.curr_pos] = 0 - # print(f'Initial belief state: {self.prey_belief_state}') + def move(self, arena, prey_loc, predator_loc): """ @@ -108,28 +106,12 @@ def begin(arena): 'after_survey') if max(agent3.prey_belief_state.values()) == 1: prey_certainty_counter += 1 - # print('belief state after survey:') - # pprint(agent3.prey_belief_state) - # print('sum of prob: ', sum(agent3.prey_belief_state.values())) - """ - # print(found_prey) - if found_prey: - # found the prey and now have to use a variable assignment tree to track the prey - pass - else: - # Choose a node at random and assume it is where the prey is - agent3.prey_belief_state[node_surveyed] = 0 - for i in range(50): - degree = utils.get_degree(arena, i) - if i != node_surveyed: - agent3.prey_belief_state[i] += 1/48 # Has to be phrased in the form of previous probability and next probability in terms of the degree of neighbours of this node - """ believed_prey_curr_pos = utils.return_max_prey_belief(agent3.prey_belief_state, arena) - # print(f'believed_prey_curr_pos: {believed_prey_curr_pos}') + #using the max belief node for prey agent3.move(arena, believed_prey_curr_pos, predator.curr_pos) - # print(f'agent after movement: {agent3.curr_pos}') + # Checking termination states if agent3.curr_pos == prey.curr_pos: win_count += 1 @@ -147,9 +129,6 @@ def begin(arena): node_surveyed, \ 'after_agent_moves') - # print('belief state after_agent_moves:') - # pprint(agent3.prey_belief_state) - # print('sum of prob: ', sum(agent3.prey_belief_state.values())) prey.move(arena) @@ -166,9 +145,7 @@ def begin(arena): node_surveyed, \ 'after_prey_moves') - # print('belief state after_prey_moves:') - # pprint(agent3.prey_belief_state) - # print('sum of prob: ', sum(agent3.prey_belief_state.values())) + predator.move(agent3.curr_pos, arena) # Checking termination states @@ -191,64 +168,6 @@ def begin(arena): data_row = ["Agent_3", win_count * 100 / number_of_games, loss_count * 100 / number_of_games, forced_termination * 100 / number_of_games, prey_certainty * 100 / number_of_games, 100.0] - # data.append(data_row) - return data_row + return data_row -""" -# Class Test code -#arena=env.generate_environement() -arena = {0: [1, 49, 48], - 1: [2, 0, 46], - 2: [3, 1, 5], - 3: [4, 2, 7], - 4: [5, 3, 6], - 5: [6, 4, 2], - 6: [7, 5, 4], - 7: [8, 6, 3], - 8: [9, 7, 10], - 9: [10, 8, 11], - 10: [11, 9, 8], - 11: [12, 10, 9], - 12: [13, 11, 14], - 13: [14, 12, 15], - 14: [15, 13, 12], - 15: [16, 14, 13], - 16: [17, 15, 19], - 17: [18, 16, 20], - 18: [19, 17, 21], - 19: [20, 18, 16], - 20: [21, 19, 17], - 21: [22, 20, 18], - 22: [23, 21, 26], - 23: [24, 22, 25], - 24: [25, 23, 28], - 25: [26, 24, 23], - 26: [27, 25, 22], - 27: [28, 26, 30], - 28: [29, 27, 24], - 29: [30, 28, 31], - 30: [31, 29, 27], - 31: [32, 30, 29], - 32: [33, 31, 35], - 33: [34, 32], - 34: [35, 33, 39], - 35: [36, 34, 32], - 36: [37, 35, 38], - 37: [38, 36, 41], - 38: [39, 37, 36], - 39: [40, 38, 34], - 40: [41, 39, 44], - 41: [42, 40, 37], - 42: [43, 41], - 43: [44, 42, 47], - 44: [45, 43, 40], - 45: [46, 44, 49], - 46: [47, 45, 1], - 47: [48, 46, 43], - 48: [49, 47, 0], - 49: [0, 48, 45]} -# print(a1.curr_pos) -a1.move(arena, 5, 6) -# pprint(arena) -""" diff --git a/Agent_4.py b/Agent_4.py index 8037cdb..5f47389 100644 --- a/Agent_4.py +++ b/Agent_4.py @@ -1,6 +1,4 @@ import random -from pprint import pprint - import config import utils from prey import Prey @@ -96,7 +94,7 @@ def begin(arena): # Survey a node initially without ever knowing where the prey is for a fact found_prey, node_surveyed = utils.survey_prey(agent4, prey) - # print(f'node surveyed: {node_surveyed}') + # prey belief state will be updated here agent4.prey_belief_state = utils.update_prey_belief_state(agent4.prey_belief_state, \ agent4.curr_pos, \ @@ -107,15 +105,12 @@ def begin(arena): 'after_survey') if max(agent4.prey_belief_state.values()) == 1: prey_certainty_counter += 1 - # print('belief state after survey:') - # pprint(agent4.prey_belief_state) - # print('sum of prob: ', sum(agent4.prey_belief_state.values())) believed_prey_curr_pos = utils.return_max_prey_belief(agent4.prey_belief_state, arena) - # print(f'believed_prey_curr_pos: {believed_prey_curr_pos}') + # using the max belief node for prey agent4.move(arena, believed_prey_curr_pos, predator.curr_pos) - # print(f'agent after movement: {agent4.curr_pos}') + # Checking termination states if agent4.curr_pos == prey.curr_pos: win_count += 1 @@ -133,9 +128,6 @@ def begin(arena): node_surveyed, \ 'after_agent_moves') - # print('belief state after_agent_moves:') - # pprint(agent4.prey_belief_state) - # print('sum of prob: ', sum(agent4.prey_belief_state.values())) prey.move(arena) @@ -152,9 +144,7 @@ def begin(arena): node_surveyed, \ 'after_prey_moves') - # print('belief state after_prey_moves:') - # pprint(agent4.prey_belief_state) - # print('sum of prob: ', sum(agent4.prey_belief_state.values())) + predator.move(agent4.curr_pos, arena) # Checking termination states @@ -177,5 +167,5 @@ def begin(arena): data_row = ["Agent_4", win_count * 100 / number_of_games, loss_count * 100 / number_of_games, forced_termination * 100 / number_of_games, prey_certainty * 100 / number_of_games, 100.0] - # data.append(data_row) + return data_row diff --git a/Agent_5.py b/Agent_5.py index b77f8df..0096f53 100644 --- a/Agent_5.py +++ b/Agent_5.py @@ -1,6 +1,4 @@ import random -from pprint import pprint - import config import utils from prey import Prey @@ -93,8 +91,6 @@ def begin(arena): predator_certainty_counter = 0 while 1: print("In game Agent_5 at game_count: ", game_count, " step_count: ", step_count) - # print("Agent Prey Predator") - # print(agent5.curr_pos, prey.curr_pos, predator.curr_pos) # Survey a node initially without ever knowing where the prey is for a fact found_predator, node_surveyed = utils.survey_predator(agent5, predator) @@ -113,12 +109,7 @@ def begin(arena): predator_certainty_counter += 1 believed_predator_curr_pos = utils.return_max_predator_belief(agent5.predator_belief_state, arena) - # print("after_survey Predator Belief") - # print("after_survey Predator Actual:", predator.curr_pos, "Predator believed :", believed_predator_curr_pos) - # pprint(agent5.predator_belief_state) - - # print(f'believed_predator_curr_pos: {believed_predator_curr_pos}') # using the max belief node for prey agent5.move(arena, prey.curr_pos, believed_predator_curr_pos) @@ -139,10 +130,6 @@ def begin(arena): node_surveyed, \ 'after_agent_moves') - # print("after_agent_moves Predator Belief") - # print("after_agent_moves Predator Actual:", predator.curr_pos, "Predator believed :",believed_predator_curr_pos) - # pprint(agent5.predator_belief_state) - prey.move(arena) @@ -162,12 +149,6 @@ def begin(arena): node_surveyed, \ 'after_predator_moves') - # print("after_predator_moves Predator Belief") - # print("after_predator_moves Predator Actual:", predator.curr_pos, "Predator believed :",believed_predator_curr_pos) - # pprint(agent5.predator_belief_state) - - - # Checking termination states if agent5.curr_pos == predator.curr_pos: loss_count += 1 @@ -187,5 +168,4 @@ def begin(arena): data_row = ["Agent_5", win_count * 100 / number_of_games, loss_count * 100 / number_of_games, forced_termination * 100 / number_of_games, 100.0, predator_certainty * 100 / number_of_games] - # data.append(data_row) return data_row diff --git a/Agent_6.py b/Agent_6.py index 4d3272f..e30929e 100644 --- a/Agent_6.py +++ b/Agent_6.py @@ -1,6 +1,4 @@ import random -from pprint import pprint - import config import utils from prey import Prey @@ -112,7 +110,7 @@ def begin(arena): believed_predator_curr_pos = utils.return_max_predator_belief(agent6.predator_belief_state, arena) - # print(f'believed_predator_curr_pos: {believed_predator_curr_pos}') + # using the max belief node for prey agent6.move(arena, prey.curr_pos, believed_predator_curr_pos) @@ -170,5 +168,5 @@ def begin(arena): data_row = ["Agent_6", win_count * 100 / number_of_games, loss_count * 100 / number_of_games, forced_termination * 100 / number_of_games, 100.0, predator_certainty * 100 / number_of_games] - # data.append(data_row) + return data_row \ No newline at end of file diff --git a/Agent_7.py b/Agent_7.py index 010f7e0..e7b986a 100644 --- a/Agent_7.py +++ b/Agent_7.py @@ -1,6 +1,4 @@ import random -from pprint import pprint - import config import utils from prey import Prey @@ -129,8 +127,6 @@ def begin(arena): if predator_node_surveyed == prey.curr_pos: found_prey = True zero_values = 0 - - # updating both belief states agent7.prey_belief_state = utils.update_prey_belief_state(agent7.prey_belief_state, \ agent7.curr_pos, \ @@ -156,12 +152,6 @@ def begin(arena): believed_prey_curr_pos = utils.return_max_prey_belief(agent7.prey_belief_state, arena) believed_predator_curr_pos = utils.return_max_predator_belief(agent7.predator_belief_state, arena) - # print( " After survey, Predator at : ", predator.curr_pos," believed at :", believed_predator_curr_pos) - # pprint(agent7.predator_belief_state) - # print(sum(agent7.predator_belief_state.values())) - - # print(f'believed_prey_curr_pos: {believed_prey_curr_pos}') - # print(f'believed_predator_curr_pos: {believed_predator_curr_pos}') #using the max belief node for prey agent7.move(arena, believed_prey_curr_pos, believed_predator_curr_pos) @@ -191,10 +181,6 @@ def begin(arena): node_surveyed, \ 'after_agent_moves') - # print(" After survey, Predator at : ", predator.curr_pos, " believed at :", believed_predator_curr_pos) - # pprint(agent7.predator_belief_state) - # print(sum(agent7.predator_belief_state.values())) - prey.move(arena) agent7.prey_belief_state = utils.update_prey_belief_state(agent7.prey_belief_state, \ @@ -220,9 +206,6 @@ def begin(arena): node_surveyed, \ 'after_predator_moves') - # print(" After survey, Predator at : ", predator.curr_pos, " believed at :", believed_predator_curr_pos) - # pprint(agent7.predator_belief_state) - # print(sum(agent7.predator_belief_state.values())) found_prey = False found_predator = False @@ -255,5 +238,5 @@ def begin(arena): data_row = ["Agent_7", win_count * 100 / number_of_games, loss_count * 100 / number_of_games, forced_termination * 100 / number_of_games, prey_certainty * 100 / number_of_games, predator_certainty * 100 / number_of_games] - # data.append(data_row) + return data_row diff --git a/Agent_7_wdd_handled.py b/Agent_7_wdd_handled.py index f856efe..7007ee3 100644 --- a/Agent_7_wdd_handled.py +++ b/Agent_7_wdd_handled.py @@ -1,6 +1,4 @@ import random -from pprint import pprint - import config import utils from prey import Prey @@ -35,12 +33,12 @@ def __init__(self, prey_loc, predator_loc): # Initialize prey belief state self.prey_belief_state = dict.fromkeys([i for i in range(50)], 1 / 49) self.prey_belief_state[self.curr_pos] = 0 - # print(f'Initial prey belief state: {self.prey_belief_state}') + # Initialize peadator belief state self.predator_belief_state = dict.fromkeys([i for i in range(50)], 0) self.predator_belief_state[predator_loc] = 1 - # print(f'Initial predator belief state: {self.predator_belief_state}') + def move(self, arena, prey_loc, predator_loc): """ @@ -134,25 +132,10 @@ def begin(arena): 'after_survey') if max(agent7_wdd_handled.predator_belief_state.values()) == 1: predator_certainty_counter += 1 - """ - # print(found_prey) - if found_prey: - # found the prey and now have to use a variable assignment tree to track the prey - pass - else: - # Choose a node at random and assume it is where the prey is - agent7.prey_belief_state[node_surveyed] = 0 - for i in range(50): - degree = utils.get_degree(arena, i) - if i != node_surveyed: - agent7.prey_belief_state[i] += 1/48 # Has to be phrased in the form of previous probability and next probability in terms of the degree of neighbours of this node - """ believed_prey_curr_pos = utils.return_max_prey_belief(agent7_wdd_handled.prey_belief_state, arena) believed_predator_curr_pos = utils.return_max_predator_belief(agent7_wdd_handled.predator_belief_state, arena) - # print(f'believed_prey_curr_pos: {believed_prey_curr_pos}') - # print(f'believed_predator_curr_pos: {believed_predator_curr_pos}') # using the max belief node for prey agent7_wdd_handled.move(arena, believed_prey_curr_pos, believed_predator_curr_pos) @@ -229,6 +212,6 @@ def begin(arena): data_row = ["Agent_7_wdd_handled", win_count * 100 / number_of_games, loss_count * 100 / number_of_games, forced_termination * 100 / number_of_games, prey_certainty * 100 / number_of_games, predator_certainty * 100 / number_of_games] - # data.append(data_row) + return data_row diff --git a/Agent_7_with_defective_drone.py b/Agent_7_with_defective_drone.py index 42bae60..01286e9 100644 --- a/Agent_7_with_defective_drone.py +++ b/Agent_7_with_defective_drone.py @@ -1,6 +1,4 @@ import random -from pprint import pprint - import config import utils from prey import Prey @@ -128,25 +126,10 @@ def begin(arena): node_surveyed, \ 'after_survey') - """ - # print(found_prey) - if found_prey: - # found the prey and now have to use a variable assignment tree to track the prey - pass - else: - # Choose a node at random and assume it is where the prey is - agent7.prey_belief_state[node_surveyed] = 0 - for i in range(50): - degree = utils.get_degree(arena, i) - if i != node_surveyed: - agent7.prey_belief_state[i] += 1/48 # Has to be phrased in the form of previous probability and next probability in terms of the degree of neighbours of this node - """ believed_prey_curr_pos = utils.return_max_prey_belief(agent7_wdd.prey_belief_state, arena) believed_predator_curr_pos = utils.return_max_predator_belief(agent7_wdd.predator_belief_state, arena) - # print(f'believed_prey_curr_pos: {believed_prey_curr_pos}') - # print(f'believed_predator_curr_pos: {believed_predator_curr_pos}') # using the max belief node for prey agent7_wdd.move(arena, believed_prey_curr_pos, believed_predator_curr_pos) @@ -215,6 +198,5 @@ def begin(arena): data_row = ["Agent_7_wdd", win_count * 100 / number_of_games, loss_count * 100 / number_of_games, forced_termination * 100 / number_of_games] - # data.append(data_row) return data_row diff --git a/Agent_8.py b/Agent_8.py index 5c7abd1..d17ab3f 100644 --- a/Agent_8.py +++ b/Agent_8.py @@ -1,6 +1,4 @@ import random -from pprint import pprint - import config import utils from prey import Prey @@ -132,9 +130,6 @@ def begin(arena): believed_prey_curr_pos = utils.return_max_prey_belief(agent8.prey_belief_state, arena) believed_predator_curr_pos = utils.return_max_predator_belief(agent8.predator_belief_state, arena) - # print(f'believed_prey_curr_pos: {believed_prey_curr_pos}') - # print(f'believed_predator_curr_pos: {believed_predator_curr_pos}') - # using the max belief node for prey agent8.move(arena, believed_prey_curr_pos, believed_predator_curr_pos) # Checking termination states @@ -205,5 +200,4 @@ def begin(arena): data_row = ["Agent_8", win_count * 100 / number_of_games, loss_count * 100 / number_of_games, forced_termination * 100 / number_of_games, prey_certainty * 100 / number_of_games, predator_certainty * 100 / number_of_games] - # data.append(data_row) return data_row diff --git a/Agent_8_wdd_handled.py b/Agent_8_wdd_handled.py index 8bd43b2..405fa60 100644 --- a/Agent_8_wdd_handled.py +++ b/Agent_8_wdd_handled.py @@ -1,6 +1,4 @@ import random -from pprint import pprint - import config import utils from prey import Prey @@ -138,9 +136,6 @@ def begin(arena): believed_prey_curr_pos = utils.return_max_prey_belief(agent8_wdd_handled.prey_belief_state, arena) believed_predator_curr_pos = utils.return_max_predator_belief(agent8_wdd_handled.predator_belief_state, arena) - # print(f'believed_prey_curr_pos: {believed_prey_curr_pos}') - # print(f'believed_predator_curr_pos: {believed_predator_curr_pos}') - # using the max belief node for prey agent8_wdd_handled.move(arena, believed_prey_curr_pos, believed_predator_curr_pos) # Checking termination states @@ -216,5 +211,4 @@ def begin(arena): data_row = ["Agent_8_wdd_handled", win_count * 100 / number_of_games, loss_count * 100 / number_of_games, forced_termination * 100 / number_of_games, prey_certainty * 100 / number_of_games, predator_certainty * 100 / number_of_games] - # data.append(data_row) return data_row diff --git a/Agent_8_with_defective_drone.py b/Agent_8_with_defective_drone.py index a79853f..b5bbd7f 100644 --- a/Agent_8_with_defective_drone.py +++ b/Agent_8_with_defective_drone.py @@ -1,6 +1,4 @@ import random -from pprint import pprint - import config import utils from prey import Prey @@ -132,9 +130,6 @@ def begin(arena): believed_prey_curr_pos = utils.return_max_prey_belief(agent8_wdd.prey_belief_state, arena) believed_predator_curr_pos = utils.return_max_predator_belief(agent8_wdd.predator_belief_state, arena) - # print(f'believed_prey_curr_pos: {believed_prey_curr_pos}') - # print(f'believed_predator_curr_pos: {believed_predator_curr_pos}') - # using the max belief node for prey agent8_wdd.move(arena, believed_prey_curr_pos, believed_predator_curr_pos) # Checking termination states @@ -202,5 +197,4 @@ def begin(arena): data_row = ["Agent_8_wdd", win_count * 100 / number_of_games, loss_count * 100 / number_of_games, forced_termination * 100 / number_of_games] - # data.append(data_row) return data_row diff --git a/config.py b/config.py index cca128e..dfa4124 100644 --- a/config.py +++ b/config.py @@ -1,18 +1,8 @@ -# FILE_PATH = 'D:/Desktop/Fall_22_Academics/520_Intro to AI/Project_2/Data/' -FILE_PATH = '/Users/dhanur/Everything/Subjects/520_Intro_to_Artificial_Intelligence/Projects/Project_2/circle_of_life/The-Circle-of-Life/data/' -FILE_NAME = 'Agent_1_data.csv' +FILE_PATH = 'D:/Desktop/Fall_22_Academics/520_Intro to AI/Project_2/Data/' +# FILE_PATH = '/Users/dhanur/Everything/Subjects/520_Intro_to_Artificial_Intelligence/Projects/Project_2/circle_of_life/The-Circle-of-Life/data/' +FILE_NAME = 'Agent_data.csv' NUMBER_OF_GAMES = 30 NUMBER_OF_ARENAS = 100 FORCED_TERMINATION_THRESHOLD = 5000 -SCARED_THRESHOLD = 3 - -# NUMBER_OF_GAMES = 1 -# NUMBER_OF_ARENAS = 1 -# FORCED_TERMINATION_THRESHOLD = 1000 -# SCARED_THRESHOLD = 1 - -# NUMBER_OF_GAMES = 1 -# NUMBER_OF_ARENAS = 1 -# FORCED_TERMINATION_THRESHOLD = 10000 -# SCARED_THRESHOLD = 3 \ No newline at end of file +SCARED_THRESHOLD = 3 \ No newline at end of file diff --git a/environment.py b/environment.py index 0bd9cce..a46a40a 100644 --- a/environment.py +++ b/environment.py @@ -16,11 +16,7 @@ def generate_environement(): degree[keys]=len(arena[keys]) available_nodes.append(keys) - #print(degree) - #print(available_nodes) - while len(available_nodes) > 0: - #print(available_nodes) node1 = random.choice(available_nodes) # Checking if random node has degree 3 @@ -47,8 +43,6 @@ def generate_environement(): else: legal_node2_neighbours.append(node2) - #print("legal nodes for ",node1,":", legal_node2_neighbours) - # legal_node2_neighbours contains all the legal neighbours of node2 if len(legal_node2_neighbours)==0: available_nodes.remove(node1) @@ -70,27 +64,6 @@ def generate_environement(): degree[node1] += 1 degree[node2] += 1 - """if len(available_nodes) == 2: - node1=available_nodes[0] - node2=available_nodes[1] - if degree[node2] <3 and degree[node1] < 3 : - # Adding edge to the dictionary - arena[node1].append(node2) - arena[node2].append(node1) - - # Removing nodes from available nodes - print(node1, node2) - available_nodes.remove(node1) - available_nodes.remove(node2) - degree[node1] += 1 - degree[node2] += 1""" - - #print(available_nodes) - #print(sum(degree.values())) - - # print(sum(degree.values())) - #print(degree) - #print(arena) # Maze visualization code @@ -105,6 +78,3 @@ def generate_environement(): plt.show()""" return arena - -# arena=generate_environement() -# print(arena) diff --git a/predator.py b/predator.py index 49172a7..8f2a4c1 100644 --- a/predator.py +++ b/predator.py @@ -15,7 +15,7 @@ def __init__(self, start = random.randint(0,49)): start (int): A random integer denoting a node in the arena """ self.curr_pos = start - #print(f'predator initialized with {self.curr_pos}') + def move(self, agent_pos, arena): """ @@ -27,13 +27,6 @@ def move(self, agent_pos, arena): agent_pos (int): Position of the agent """ - """ - #print('moving predator') - path, path_length = utils.get_shortest_path(self.curr_pos, agent_pos, arena) - # print(path) - # path.popleft() # removes the first element of the path so that predator doesn't endup in the the same place - self.curr_pos = path.popleft() - """ predator_neighbour_path_length = {} # Finds the length for the shortest path for each of predators neighbours @@ -49,17 +42,6 @@ def move(self, agent_pos, arena): # Chooses randomly between the neighbours self.curr_pos = random.choice(neighbours_with_min_path_length) - """ Testing this function - print("predator_neighbour_path_length") - print(predator_neighbour_path_length) - print("neighbours_with_min_path_length") - print(neighbours_with_min_path_length) - print("Predator curr_pos") - print(self.curr_pos) - - exit(0) - """ - def distracted_move(self, agent_pos, arena): """ Randomly chooses between the neighbours having the shortest path to the agent @@ -70,13 +52,6 @@ def distracted_move(self, agent_pos, arena): agent_pos (int): Position of the agent """ - """ - #print('moving predator') - path, path_length = utils.get_shortest_path(self.curr_pos, agent_pos, arena) - # print(path) - # path.popleft() # removes the first element of the path so that predator doesn't endup in the the same place - self.curr_pos = path.popleft() - """ if random.random() <= 0.6: predator_distracted = False else: @@ -100,17 +75,6 @@ def distracted_move(self, agent_pos, arena): # Chooses randomly between the neighbours self.curr_pos = random.choice(neighbours_with_min_path_length) - """ Testing this function - print("predator_neighbour_path_length") - print(predator_neighbour_path_length) - print("neighbours_with_min_path_length") - print(neighbours_with_min_path_length) - print("Predator curr_pos") - print(self.curr_pos) - - exit(0) - """ - def move_with_rand_selection(self, agent_pos, arena): """ Finds the shortest path to the agent and then takes a step towards it @@ -134,13 +98,3 @@ def move_with_rand_selection(self, agent_pos, arena): # Chooses randomly between the neighbours self.curr_pos = random.choice(neighbours_with_min_path_length) - """ Testing this function - print("predator_neighbour_path_length") - print(predator_neighbour_path_length) - print("neighbours_with_min_path_length") - print(neighbours_with_min_path_length) - print("Predator curr_pos") - print(self.curr_pos) - - exit(0) - """ diff --git a/prey.py b/prey.py index 641f827..904cf0b 100644 --- a/prey.py +++ b/prey.py @@ -22,7 +22,7 @@ def move(self, arena): arena: The arena used currently """ - #list_to_choose_from = arena[self.curr_pos] <- this line edits the original arena variable + list_to_choose_from = deepcopy(arena[self.curr_pos]) list_to_choose_from.append(self.curr_pos) self.curr_pos = random.choice(list_to_choose_from) diff --git a/run.py b/run.py index af8640e..e604c06 100644 --- a/run.py +++ b/run.py @@ -14,7 +14,6 @@ from Agent_7_with_defective_drone import Agent_7_wdd from Agent_7_wdd_handled import Agent_7_wdd_handled # from Agent_9 import Agent_9 -# from test_agent import Agent_7 import utils import config @@ -33,43 +32,21 @@ def run(): while no_of_arenas < config.NUMBER_OF_ARENAS: arena = env.generate_environement() results.append(Agent_1.begin(arena)) - # results.append(Agent_2.begin(arena)) - # results.append(Agent_3.begin(arena)) - # results.append(Agent_4.begin(arena)) - # results.append(Agent_5.begin(arena)) - # results.append(Agent_6.begin(arena)) - # results.append(Agent_7.begin(arena)) - # results.append(Agent_8.begin(arena)) - # results.append(Agent_7_wdd.begin(arena)) - # results.append(Agent_7_wdd_handled.begin(arena)) + results.append(Agent_2.begin(arena)) + results.append(Agent_3.begin(arena)) + results.append(Agent_4.begin(arena)) + results.append(Agent_5.begin(arena)) + results.append(Agent_6.begin(arena)) + results.append(Agent_7.begin(arena)) + results.append(Agent_8.begin(arena)) + results.append(Agent_7_wdd.begin(arena)) + results.append(Agent_7_wdd_handled.begin(arena)) # results.append(Agent_9.begin(arena)) - # results.append(Agent_test.begin(arena)) + print('-'*100) print(f'arena number: {no_of_arenas}') no_of_arenas += 1 - - #wrote something for summarizing metrics across all arenas, should convert to a function later - # a1_survival = 0 - # a1_dead = 0 - # a1_terminated = 0 - - # print('r') - # print(results) - # print('r') - # print(results[0][0]) - # print('r') - # print(results[1][0]) - - # for row in results: - # if row[0] == 'Agent_1': - # a1_survival += row[1] - # a1_dead += row[2] - # a1_terminated += row[3] - # results.append(['Agent_1_Summary', a1_survival / config.NUMBER_OF_ARENAS, a1_dead / config.NUMBER_OF_ARENAS\ - # , a1_terminated / config.NUMBER_OF_ARENAS]) - - # print(results) utils.store_data(results) print("Final Data Collected !") diff --git a/test_utils.py b/test_utils.py index 3381977..e69de29 100644 --- a/test_utils.py +++ b/test_utils.py @@ -1,838 +0,0 @@ -from collections import deque -import csv -import random -import config -from pprint import pprint - -from matplotlib.artist import get - -import config - - -def update_prey_belief_state(prey_belief_state, agent_curr_pos, agent_prev_pos, arena, found_prey, surveyed_node, - checkpoint): - """ - Updates prey belief state - - Parameters: - prey_belief_state (dict): Stores prey's belief state - agent_curr_pos (int): Stores Agent's current position - agent_prev_pos (int): Stores Agent's previous position - arena (dict): Contains the graph - found_prey (bool): Contains prey is found status - surveyed_node (int): Contains the node that was surveyed by the agent - checkpoint (string): Describes which part of the function to run - - - Returns: - new_prey_belief_state (dict): The updated belief state - """ - - # Initializing the new prey belief states - new_prey_belief_state = dict.fromkeys([i for i in range(50)], 999.0) - new_prey_belief_state[agent_curr_pos] = 0.0 - - # After surveying the node - if checkpoint == 'after_survey': - if found_prey: - for i in range(50): - new_prey_belief_state[i] = 0.0 - new_prey_belief_state[surveyed_node] = 1.0 - return new_prey_belief_state - else: - new_prey_belief_state[surveyed_node] = 0.0 - for i in range(50): - if i not in (agent_curr_pos, surveyed_node): - new_prey_belief_state[i] = prey_belief_state[i] / ( - sum(prey_belief_state.values()) - prey_belief_state[surveyed_node] - prey_belief_state[ - agent_curr_pos]) - # print('in update func') - # pprint(new_prey_belief_state) - # print('in update prey belief func after_survey') - # print('sum of prob: ', sum(new_prey_belief_state.values())) - # exit(0) - return new_prey_belief_state - - elif checkpoint == 'after_agent_moves': - if found_prey: - return prey_belief_state - else: - # print(f'agent_curr_pos in func: {agent_curr_pos}') - new_prey_belief_state[agent_prev_pos] = 0.0 - new_prey_belief_state[agent_curr_pos] = 0.0 - new_prey_belief_state[surveyed_node] = 0.0 - - for i in range(50): - if i not in (agent_curr_pos, agent_prev_pos, surveyed_node): - new_prey_belief_state[i] = prey_belief_state[i] / ( - sum(prey_belief_state.values()) - prey_belief_state[agent_curr_pos] - prey_belief_state[ - surveyed_node]) - # print('in update func') - # pprint(new_prey_belief_state) - # print('in update prey belief func after_agent_moves') - # print('sum of prob: ', sum(new_prey_belief_state.values())) - # exit(0) - return new_prey_belief_state - - elif checkpoint == 'after_prey_moves': - new_prey_belief_state[agent_curr_pos] = 0.0 - - temp_prey_belief_state = dict.fromkeys([i for i in range(50)], 999.0) - temp_prey_belief_state[agent_curr_pos] = 0.0 - - # for i in range(50): - # if i != agent_curr_pos: - # temp_prey_belief_state[i] = prey_belief_state[i] / ( sum(prey_belief_state.values()) - prey_belief_state[surveyed_node] - prey_belief_state[agent_curr_pos]) - # print('in update func temp') - # pprint(temp_prey_belief_state) - # print('sum of prob: ', sum(temp_prey_belief_state.values())) - # print('arena') - - # Test for degree - # for i in range(50): - # temp_sum = 0.0 - # if i != agent_curr_pos: - # if i not in arena[agent_curr_pos]: - # for j in arena[i]: - # temp_sum += prey_belief_state[j] / ( get_degree(arena, j) + 1 ) - # temp_sum += prey_belief_state[i] / ( get_degree(arena, i) + 1 ) - # elif i in arena[agent_curr_pos]: - # for j in arena[i]: - # temp_sum += prey_belief_state[j] / ( get_degree(arena, j)) - # temp_sum += prey_belief_state[i] / ( get_degree(arena, i)) - # new_prey_belief_state[i] = temp_sum - - # prey has moved - for i in range(50): - temp_sum = 0.0 - for j in arena[i]: - temp_sum += prey_belief_state[j] / (get_degree(arena, j) + 1) - temp_sum += prey_belief_state[i] / (get_degree(arena, i) + 1) - temp_prey_belief_state[i] = temp_sum - - # pretend to survey node for agent curr pos - new_prey_belief_state[agent_curr_pos] = 0.0 - for i in range(50): - if i != agent_curr_pos: - new_prey_belief_state[i] = temp_prey_belief_state[i] / ( - sum(temp_prey_belief_state.values()) - temp_prey_belief_state[agent_curr_pos]) - - # print('in update func') - # pprint(new_prey_belief_state) - # print('in update prey belief func after_prey_moves') - # print('sum of prob: ', sum(new_prey_belief_state.values())) - # print('arena') - # pprint(arena) - # exit(0) - return new_prey_belief_state - - -def update_predator_belief_state(predator_belief_state, agent_curr_pos, agent_prev_pos, arena, found_predator, - surveyed_node, checkpoint): - """ - Updates predator belief state - - Parameters: - predator_belief_state (dict): Stores predator's belief state - agent_curr_pos (int): Stores Agent's current position - agent_prev_pos (int): Stores Agent's previous position - arena (dict): Contains the graph - found_predator (bool): Contains predator is found status - surveyed_node (int): Contains the node that was surveyed by the agent - checkpoint (string): Describes which part of the function to run - - - Returns: - new_prey_belief_state (dict): The updated belief state - """ - - # Initializing the new predator belief states - new_predator_belief_state = dict.fromkeys([i for i in range(50)], 999.0) - new_predator_belief_state[agent_curr_pos] = 0.0 - # new_predator_belief_state = predator_belief_state - - if checkpoint == 'after_survey': - # if found_predator: - # for i in range(50): - # new_predator_belief_state[i] = 0.0 - # new_predator_belief_state[surveyed_node] = 1.0 - # else: - new_predator_belief_state[surveyed_node] = 0.0 - for i in range(50): - if i not in (agent_curr_pos, surveyed_node): - new_predator_belief_state[i] = predator_belief_state[i] / ( - sum(predator_belief_state.values()) - predator_belief_state[surveyed_node]) - # print('in update func') - # pprint(new_predator_belief_state) - # print('in update predator belief func after_survey') - # print('sum of prob: ', sum(new_predator_belief_state.values())) - # exit(0) - return new_predator_belief_state - - - - elif checkpoint == 'after_agent_moves': - if found_predator: - # print('in update func') - # pprint(predator_belief_state) - # print('sum of prob: ', sum(predator_belief_state.values())) - # exit(0) - return predator_belief_state - - else: - # print(f'agent_curr_pos in func: {agent_curr_pos}') - new_predator_belief_state[agent_prev_pos] = 0.0 - new_predator_belief_state[agent_curr_pos] = 0.0 - new_predator_belief_state[surveyed_node] = 0.0 - - for i in range(50): - if i not in (agent_curr_pos, agent_prev_pos, surveyed_node): - new_predator_belief_state[i] = predator_belief_state[i] / ( - sum(predator_belief_state.values()) - predator_belief_state[agent_curr_pos] - - predator_belief_state[surveyed_node]) - # print('in update func') - # pprint(new_predator_belief_state) - # print('in update predator belief func after_agent_moves') - # print('sum of prob: ', sum(new_predator_belief_state.values())) - # exit(0) - return new_predator_belief_state - - elif checkpoint == 'after_predator_moves': - new_predator_belief_state[agent_curr_pos] = 0.0 - - temp_predator_belief_state = dict.fromkeys([i for i in range(50)], 999.0) - temp_predator_belief_state[agent_curr_pos] = 0.0 - - # predator has moved - for i in range(50): - temp_sum = 0.0 - for j in arena[i]: - neighbour_path_length = {} - - # Finds the length for the shortest path for each of neighbours - for k in arena[j]: - path, path_length = get_shortest_path(k, agent_curr_pos, arena) - neighbour_path_length[k] = path_length - - # Finds all the neighbours that have minimum path length - min_length = min(neighbour_path_length.values()) - neighbours_with_min_path_length = [key for key, value in neighbour_path_length.items() if - value == min_length] - shortest_length_nodes = len(neighbours_with_min_path_length) - - if j in neighbours_with_min_path_length: - temp_sum += predator_belief_state[j] * (0.4 / get_degree(arena, j)) + (0.6 / shortest_length_nodes) - else: - temp_sum += predator_belief_state[j] * (0.4 / get_degree(arena, j)) - - temp_predator_belief_state[i] = temp_sum - - # temp_sum += prey_belief_state[j] / ( get_degree(arena, j) + 1 ) - # temp_sum += prey_belief_state[i] / ( get_degree(arena, i) + 1 ) - # temp_prey_belief_state[i] = temp_sum - - # print('in update func') - # pprint(temp_predator_belief_state) - # print('sum of prob: ', sum(temp_predator_belief_state.values())) - # exit(0) - - # pretend to survey node for agent curr pos - new_predator_belief_state[agent_curr_pos] = 0.0 - for i in range(50): - if i != agent_curr_pos: - new_predator_belief_state[i] = temp_predator_belief_state[i] / ( - sum(temp_predator_belief_state.values()) - temp_predator_belief_state[agent_curr_pos]) - - # print('in update predator belief func after predator moves') - # pprint(new_predator_belief_state) - # print('sum of prob: ', sum(new_predator_belief_state.values())) - # exit(0) - return new_predator_belief_state - - -def get_degree(arena, node): - """ - Gets the degree of the node - - Parameters: - arena (dict): Arena for the game - node (int): Node to get the degree for - - Returns: - len(arena[node]) (int): Gets the degree of the node - """ - return len(arena[node]) - - -def survey_prey(agent, prey): - """ - Surveys the node with the highest probability of the prey being there and updates the belief state accordingly - - Parameters: - agent (object): Agent object - prey (object): Prey object - - Returns: - found_prey (Bool): Returns True if found prey else False - node_to_survey (int): Returns the node surveyed - """ - - belief_state = agent.prey_belief_state - - # Selects all positions where the probability is max - max_prob_of_prey = [pos for pos, prob in belief_state.items() if prob == max(belief_state.values())] - - # print(max_prob_of_prey) - - node_to_survey = random.choice(max_prob_of_prey) - - # print(node_to_survey) - - if node_to_survey == prey.curr_pos: - return True, node_to_survey - else: - return False, node_to_survey - - -def survey_predator(agent, predator): - """ - Surveys the node with the highest probability of the predator being there and updates the belief state accordingly - - Parameters: - agent (object): Agent object - prey (object): Predator object - - Returns: - found_predator (Bool): Returns True if found predator else False - node_to_survey (int): Returns the node surveyed - """ - - belief_state = agent.predator_belief_state - - # Selects all positions where the probability is max - max_prob_of_predator = [pos for pos, prob in belief_state.items() if prob == max(belief_state.values())] - - # print(max_prob_of_predator) - - node_to_survey = random.choice(max_prob_of_predator) - - # print(node_to_survey) - - if node_to_survey == predator.curr_pos: - return True, node_to_survey - else: - return False, node_to_survey - - -def store_data(data): - """ - Stores the collected data toa a CSV file - - data: Data collected from all the agents - """ - file_path_to_write = config.FILE_PATH + config.FILE_NAME - # print(file_path_to_write) - f = open(file_path_to_write, 'w') - writer = csv.writer(f) - writer.writerows(data) - print("Data Collection Complete") - f.close() - - -def FindPath(parent, start_pos, end_pos): - """ - Backtracks and finds the path in the arena to the end position from the start position - - Parameters: - parent (dict): Parent dictionary of each node - start_pos (int): Start position of the path - end_pos (int): End position of the path - - Returns: - path (deque): Path from start to end position - """ - path = deque() - path.append(end_pos) - while path[-1] != start_pos: - path.append(parent[path[-1]]) - path.reverse() - path.popleft() - return path - - -def get_shortest_path(start_pos, end_pos, arena): - """ - Uses Breath First Search to find the shortest path between the start and end position - 'neighbours' is used as the fringe (queue) to add surrounding nodes in the arena - - Parameters: - start_pos (int): Start position of the path - end_pos (int): End position of the path - arena (dict): The arena used currently - - Returns: - path (deque): Shortest path evaluated - (len(path) - 1) (int): Length of the path - """ - - parent = {} - visited = [False] * 50 - - # print(f'start_pos = {start_pos}') - visited[start_pos] = True - # print(visited) - neighbours = deque() - - curr_pos = start_pos - while curr_pos != end_pos: - for surrounding_node in arena[curr_pos]: - # print(f'curr_pos: {curr_pos}') - # print(f'surrounding_node: {surrounding_node}') - if not visited[surrounding_node] and surrounding_node not in neighbours: - neighbours.append(surrounding_node) - parent.update({surrounding_node: curr_pos}) - visited[surrounding_node] = True - # print(f'added {surrounding_node}') - curr_pos = neighbours.popleft() - - path = FindPath(parent, start_pos, end_pos) - # print(f'a {type(path)}') - return path, (len(path) - 1) - - -def return_max_prey_belief(belief_state, arena): - """ - Returns a randomly chosen node for max belief of the prey - - Parameters: - belief_state (dict): The belief state of the prey - arena (dict): Arena for the game - - Returns: - random.choice(max_belief_nodes) (int): Random value from max beliefs - """ - # return max(belief_state, key = belief_state.get) - max_belief = max(belief_state.values()) - max_belief_nodes = [key for key, value in belief_state.items() if value == max_belief] - - return random.choice(max_belief_nodes) - - -def return_max_predator_belief(belief_state, arena): - """ - Returns a randomly chosen node for max belief of the predator - - Parameters: - belief_state (dict): The belief state of the predator - arena (dict): Arena for the game - - Returns: - random.choice(max_belief_nodes) (int): Random value from max beliefs - """ - # return max(belief_state, key = belief_state.get) - max_belief = max(belief_state.values()) - max_belief_nodes = [key for key, value in belief_state.items() if value == max_belief] - - return random.choice(max_belief_nodes) - - -def best_node_v2(arena, curr_pos, prey_loc, predator_loc): - """ - Returns a node closer to the prey while the agent is 'not scared' - Always moves away from predator if the agent is 'scared' - Agent is scared if it is within a specific distance from the prey - - Parameters: - arena (dictionary): Adjacency list representing the graph - prey_loc (int): Location of prey - predator_loc (int): Location of Predator - - Returns: - curr_pos (int): Position to move to - - """ - path_to_predator, distance_to_predator = get_shortest_path(curr_pos, predator_loc, arena) - - path_to_prey, distance_to_prey = get_shortest_path(curr_pos, prey_loc, arena) - - if distance_to_predator <= config.SCARED_THRESHOLD: - neighbour_predator_path_length = {} - - for i in arena[curr_pos]: - neghbour_path, neighbour_predator_path_length[i] = get_shortest_path(i, predator_loc, arena) - - curr_pos = max(neighbour_predator_path_length, key=neighbour_predator_path_length.get) - - return curr_pos - - else: - neighbour_prey_path_length = {} - - for i in arena[curr_pos]: - neghbour_path, neighbour_prey_path_length[i] = get_shortest_path(i, prey_loc, arena) - - curr_pos = min(neighbour_prey_path_length, key=neighbour_prey_path_length.get) - - return curr_pos - - -def best_node(arena, curr_pos, prey_loc, predator_loc): - """ - Returns the node that the agent should move to according to the following rules: - 1. Neighbors that are closer to the Prey and farther from the Predator. - 2. Neighbors that are closer to the Prey and not closer to the Predator. - 3. Neighbors that are not farther from the Prey and farther from the Predator. - 4. Neighbors that are not farther from the Prey and not closer to the Predator. - 5. Neighbors that are farther from the Predator. - 6. Neighbors that are not closer to the Predator. - 7. Sit still and pray. - - Parameters: - arena (dictionary): Adjacency list representing the graph - prey_loc (int): Location of prey - predator_loc (int): Location of Predator - - Returns: - curr_pos (int): Position to move to - """ - - # Do not remove the following test cases - # curr_pos = 17 - # curr_pos = 42 - - # print("Initial pos", curr_pos) - # Neighbours of the current node are extracted here - neighbours = arena[curr_pos].copy() - - # Distances from prey and predator will be stored in the following dicts - predator_dist = {} - prey_dist = {} - - # Storing the distances of the agent location to the prey and predator - path, curr_pos_prey_dist = get_shortest_path(curr_pos, prey_loc, arena) - path, curr_pos_predator_dist = get_shortest_path(curr_pos, predator_loc, arena) - - # Find distance from all neighbours to the prey and the predator - for i in neighbours: - path, prey_dist[i] = get_shortest_path(i, prey_loc, arena) - path, predator_dist[i] = get_shortest_path(i, predator_loc, arena) - - # Defining subsets of nodes - closer_to_prey = {} - not_farther_from_prey = {} - farther_from_predator = {} - not_closer_to_predator = {} - - # Adding nodes to the subsets - for k in prey_dist.keys(): - if prey_dist[k] < curr_pos_prey_dist: - closer_to_prey[k] = prey_dist[k] - - for k in prey_dist.keys(): - if prey_dist[k] == curr_pos_prey_dist: - not_farther_from_prey[k] = prey_dist[k] - - for k in predator_dist.keys(): - if predator_dist[k] >= curr_pos_predator_dist: - farther_from_predator[k] = predator_dist[k] - - for k in predator_dist.keys(): - if predator_dist[k] == curr_pos_predator_dist: - farther_from_predator[k] = predator_dist[k] - - # Flag helps to avoid going through multiple ifs if one if condition is satisfied - flag = 0 - - min_length = min(closer_to_prey.values()) - focused_neighbours = [key for key, value in closer_to_prey.items() if value == min_length] - curr_pos = random.choice(focused_neighbours) - - # Assigning the position accorinding to the given priorrity - if len(set(closer_to_prey).intersection(set(farther_from_predator))) != 0 and flag == 0: - # curr_pos = min(closer_to_prey, key=closer_to_prey.get) - min_length = min(closer_to_prey.values()) - focused_neighbours = [key for key, value in closer_to_prey.items() if value == min_length] - curr_pos = random.choice(focused_neighbours) - # print("priority 1") - flag = 1 - - elif len(set(closer_to_prey).intersection(set(not_closer_to_predator))) != 0 and flag == 0: - # curr_pos = min(closer_to_prey, key=closer_to_prey.get) - min_length = min(closer_to_prey.values()) - focused_neighbours = [key for key, value in closer_to_prey.items() if value == min_length] - curr_pos = random.choice(focused_neighbours) - # print("priority 2") - flag = 1 - - elif len(set(not_farther_from_prey).intersection(set(farther_from_predator))) != 0 and flag == 0: - # curr_pos = min(not_farther_from_prey, key=not_farther_from_prey.get) - min_length = min(not_farther_from_prey.values()) - focused_neighbours = [key for key, value in not_farther_from_prey.items() if value == min_length] - curr_pos = random.choice(focused_neighbours) - # print("priority 3") - flag = 1 - - elif len(set(closer_to_prey).intersection(set(not_closer_to_predator))) != 0 and flag == 0: - # curr_pos = min(closer_to_prey, key=closer_to_prey.get) - min_length = min(closer_to_prey.values()) - focused_neighbours = [key for key, value in closer_to_prey.items() if value == min_length] - curr_pos = random.choice(focused_neighbours) - # print("priority 4") - flag = 1 - - elif len(farther_from_predator) != 0 and flag == 0: - # curr_pos = max(farther_from_predator, key=farther_from_predator.get) - min_length = min(farther_from_predator.values()) - focused_neighbours = [key for key, value in farther_from_predator.items() if value == min_length] - curr_pos = random.choice(focused_neighbours) - # print("priority 5") - flag = 1 - - elif len(not_closer_to_predator) != 0 and flag == 0: - # curr_pos = min(not_closer_to_predator, key=not_closer_to_predator.get) - min_length = min(not_closer_to_predator.values()) - focused_neighbours = [key for key, value in not_closer_to_predator.items() if value == min_length] - curr_pos = random.choice(focused_neighbours) - # print("priority 6") - - else: - # print("Sitting and Praying") - return 999 - - """print(curr_pos_prey_dist,curr_pos_predator_dist) - print(prey_dist,predator_dist) - print("pos after movement", curr_pos)""" - - return curr_pos - - -def update_prey_belief_state_defective_drone(prey_belief_state, agent_curr_pos, agent_prev_pos, arena, found_prey, - surveyed_node, - checkpoint): - """ - Updates prey belief state - - Parameters: - prey_belief_state (dict): Stores prey's belief state - agent_curr_pos (int): Stores Agent's current position - agent_prev_pos (int): Stores Agent's previous position - arena (dict): Contains the graph - found_prey (bool): Contains prey is found status - surveyed_node (int): Contains the node that was surveyed by the agent - checkpoint (string): Describes which part of th function to run - - - Returns: - new_prey_belief_state (dict): The updated belief state - """ - - # Initializing the new prey belief states - new_prey_belief_state = dict.fromkeys([i for i in range(50)], 999.0) - new_prey_belief_state[agent_curr_pos] = 0.0 - - # After surveying the node - if checkpoint == 'after_survey': - if found_prey: - for i in range(50): - new_prey_belief_state[i] = 0.0 - new_prey_belief_state[surveyed_node] = 1.0 - return new_prey_belief_state - else: - new_prey_belief_state[surveyed_node] = 0.0 - for i in range(50): - if i not in (agent_curr_pos, surveyed_node): - new_prey_belief_state[i] = prey_belief_state[i] / ( - sum(prey_belief_state.values()) - 0.9 * prey_belief_state[surveyed_node] - - prey_belief_state[agent_curr_pos]) - elif i == surveyed_node: - new_prey_belief_state[i] = prey_belief_state[i] * 0.1 / ( - sum(prey_belief_state.values()) - (0.9 * prey_belief_state[surveyed_node])) - # print('in update func') - # pprint(new_prey_belief_state) - # print('in update prey belief func after_survey') - # print('sum of prob: ', sum(new_prey_belief_state.values())) - # exit(0) - return new_prey_belief_state - - elif checkpoint == 'after_agent_moves': - if found_prey: - return prey_belief_state - else: - # print(f'agent_curr_pos in func: {agent_curr_pos}') - new_prey_belief_state[agent_prev_pos] = 0.0 - new_prey_belief_state[agent_curr_pos] = 0.0 - # new_prey_belief_state[surveyed_node] = 0.0 - - for i in range(50): - # if i not in (agent_curr_pos, agent_prev_pos, surveyed_node): - if i not in (agent_curr_pos, agent_prev_pos): - # new_prey_belief_state[i] = prey_belief_state[i] / ( - # sum(prey_belief_state.values()) - prey_belief_state[agent_curr_pos] - prey_belief_state[ - # surveyed_node]) - new_prey_belief_state[i] = prey_belief_state[i] / ( - sum(prey_belief_state.values()) - prey_belief_state[agent_curr_pos]) - # print('in update func') - # pprint(new_prey_belief_state) - # print('in update prey belief func after_agent_moves') - # print('sum of prob: ', sum(new_prey_belief_state.values())) - # exit(0) - return new_prey_belief_state - - elif checkpoint == 'after_prey_moves': - new_prey_belief_state[agent_curr_pos] = 0.0 - - temp_prey_belief_state = dict.fromkeys([i for i in range(50)], 999.0) - temp_prey_belief_state[agent_curr_pos] = 0.0 - - # prey has moved - for i in range(50): - temp_sum = 0.0 - for j in arena[i]: - temp_sum += prey_belief_state[j] / (get_degree(arena, j) + 1) - temp_sum += prey_belief_state[i] / (get_degree(arena, i) + 1) - temp_prey_belief_state[i] = temp_sum - - # pretend to survey node for agent curr pos - new_prey_belief_state[agent_curr_pos] = 0.0 - for i in range(50): - if i != agent_curr_pos: - new_prey_belief_state[i] = temp_prey_belief_state[i] / ( - sum(temp_prey_belief_state.values()) - temp_prey_belief_state[agent_curr_pos]) - - # print('in update func') - # pprint(new_prey_belief_state) - # print('in update prey belief func after_prey_moves') - # print('sum of prob: ', sum(new_prey_belief_state.values())) - # print('arena') - # pprint(arena) - # exit(0) - return new_prey_belief_state - - -def update_predator_belief_state_defective_drone(predator_belief_state, agent_curr_pos, agent_prev_pos, arena, - found_predator, - surveyed_node, checkpoint): - """ - Updates predator belief state - - Parameters: - predator_belief_state (dict): Stores predator's belief state - agent_curr_pos (int): Stores Agent's current position - agent_prev_pos (int): Stores Agent's previous position - arena (dict): Contains the graph - found_predator (bool): Contains predator is found status - surveyed_node (int): Contains the node that was surveyed by the agent - checkpoint (string): Describes which part of the function to run - - - Returns: - new_prey_belief_state (dict): The updated belief state - """ - - # Initializing the new predator belief states - new_predator_belief_state = dict.fromkeys([i for i in range(50)], 999.0) - new_predator_belief_state[agent_curr_pos] = 0.0 - # new_predator_belief_state = predator_belief_state - - if checkpoint == 'after_survey': - if found_predator: - for i in range(50): - new_predator_belief_state[i] = 0.0 - new_predator_belief_state[surveyed_node] = 1.0 - else: - new_predator_belief_state[surveyed_node] = 0.0 - for i in range(50): - if i not in (agent_curr_pos, surveyed_node): - new_predator_belief_state[i] = predator_belief_state[i] / ( - sum(predator_belief_state.values()) - (0.9 * predator_belief_state[surveyed_node])) - elif i == surveyed_node: - new_predator_belief_state[i] = predator_belief_state[i] * 0.1 / ( - sum(predator_belief_state.values()) - (0.9 * predator_belief_state[surveyed_node])) - - # print('in update func') - # pprint(new_predator_belief_state) - # print('in update predator belief func after_survey') - # print('sum of prob: ', sum(new_predator_belief_state.values())) - # exit(0) - return new_predator_belief_state - - - - elif checkpoint == 'after_agent_moves': - if found_predator: - # print('in update func') - # pprint(predator_belief_state) - # print('sum of prob: ', sum(predator_belief_state.values())) - # exit(0) - return predator_belief_state - - else: - # print(f'agent_curr_pos in func: {agent_curr_pos}') - new_predator_belief_state[agent_prev_pos] = 0.0 - new_predator_belief_state[agent_curr_pos] = 0.0 - # new_predator_belief_state[surveyed_node] = 0.0 - - for i in range(50): - # if i not in (agent_curr_pos, agent_prev_pos, surveyed_node): - if i not in (agent_curr_pos, agent_prev_pos): - # new_predator_belief_state[i] = predator_belief_state[i] / (sum(predator_belief_state.values()) - predator_belief_state[agent_curr_pos] -predator_belief_state[surveyed_node]) - new_predator_belief_state[i] = predator_belief_state[i] / ( - sum(predator_belief_state.values()) - predator_belief_state[agent_curr_pos]) - # print('in update func') - # pprint(new_predator_belief_state) - # print('in update predator belief func after_agent_moves') - # print('sum of prob: ', sum(new_predator_belief_state.values())) - # exit(0) - return new_predator_belief_state - - elif checkpoint == 'after_predator_moves': - new_predator_belief_state[agent_curr_pos] = 0.0 - - temp_predator_belief_state = dict.fromkeys([i for i in range(50)], 999.0) - temp_predator_belief_state[agent_curr_pos] = 0.0 - - # predator has moved - for i in range(50): - temp_sum = 0.0 - for j in arena[i]: - neighbour_path_length = {} - - # Finds the length for the shortest path for each of neighbours - for k in arena[j]: - path, path_length = get_shortest_path(k, agent_curr_pos, arena) - neighbour_path_length[k] = path_length - - # Finds all the neighbours that have minimum path length - min_length = min(neighbour_path_length.values()) - neighbours_with_min_path_length = [key for key, value in neighbour_path_length.items() if - value == min_length] - shortest_length_nodes = len(neighbours_with_min_path_length) - - if j in neighbours_with_min_path_length: - temp_sum += predator_belief_state[j] * (0.4 / get_degree(arena, j)) + (0.6 / shortest_length_nodes) - else: - temp_sum += predator_belief_state[j] * (0.4 / get_degree(arena, j)) - - temp_predator_belief_state[i] = temp_sum - - # temp_sum += prey_belief_state[j] / ( get_degree(arena, j) + 1 ) - # temp_sum += prey_belief_state[i] / ( get_degree(arena, i) + 1 ) - # temp_prey_belief_state[i] = temp_sum - - # print('in update func') - # pprint(temp_predator_belief_state) - # print('sum of prob: ', sum(temp_predator_belief_state.values())) - # exit(0) - - # pretend to survey node for agent curr pos - new_predator_belief_state[agent_curr_pos] = 0.0 - for i in range(50): - if i != agent_curr_pos: - new_predator_belief_state[i] = temp_predator_belief_state[i] / ( - sum(temp_predator_belief_state.values()) - temp_predator_belief_state[agent_curr_pos]) - - # print('in update predator belief func after predator moves') - # pprint(new_predator_belief_state) - # print('sum of prob: ', sum(new_predator_belief_state.values())) - # exit(0) - return new_predator_belief_state diff --git a/utils.py b/utils.py index 3fda01a..4e7dfc9 100644 --- a/utils.py +++ b/utils.py @@ -42,11 +42,7 @@ def update_prey_belief_state(prey_belief_state, agent_curr_pos, agent_prev_pos, for i in range(50): if i not in (agent_curr_pos, surveyed_node): new_prey_belief_state[i] = prey_belief_state[i] / ( sum(prey_belief_state.values()) - prey_belief_state[surveyed_node] - prey_belief_state[agent_curr_pos]) - # print('in update func') - # pprint(new_prey_belief_state) - # print('in update prey belief func after_survey') - # print('sum of prob: ', sum(new_prey_belief_state.values())) - # exit(0) + return new_prey_belief_state elif checkpoint == 'after_agent_moves': @@ -62,11 +58,6 @@ def update_prey_belief_state(prey_belief_state, agent_curr_pos, agent_prev_pos, if i not in (agent_curr_pos, agent_prev_pos, surveyed_node): new_prey_belief_state[i] = prey_belief_state[i] / ( sum(prey_belief_state.values()) - prey_belief_state[agent_curr_pos] - prey_belief_state[surveyed_node]) - # print('in update func') - # pprint(new_prey_belief_state) - # print('in update prey belief func after_agent_moves') - # print('sum of prob: ', sum(new_prey_belief_state.values())) - # exit(0) return new_prey_belief_state elif checkpoint == 'after_prey_moves': @@ -90,15 +81,6 @@ def update_prey_belief_state(prey_belief_state, agent_curr_pos, agent_prev_pos, for i in range(50): if i != agent_curr_pos: new_prey_belief_state[i] = temp_prey_belief_state[i] / ( sum(temp_prey_belief_state.values()) - temp_prey_belief_state[agent_curr_pos]) - - - # print('in update func') - # pprint(new_prey_belief_state) - # print('in update prey belief func after_prey_moves') - # print('sum of prob: ', sum(new_prey_belief_state.values())) - # print('arena') - # pprint(arena) - # exit(0) return new_prey_belief_state def update_predator_belief_state(predator_belief_state, agent_curr_pos, agent_prev_pos, arena, found_predator, surveyed_node, checkpoint): @@ -122,7 +104,7 @@ def update_predator_belief_state(predator_belief_state, agent_curr_pos, agent_pr # Initializing the new predator belief states new_predator_belief_state = dict.fromkeys([i for i in range(50)], 999.0) new_predator_belief_state[agent_curr_pos] = 0.0 - # new_predator_belief_state = predator_belief_state + if checkpoint == 'after_survey': if found_predator: @@ -141,21 +123,14 @@ def update_predator_belief_state(predator_belief_state, agent_curr_pos, agent_pr print(f'predator_belief_state[surveyed_node]: {predator_belief_state[surveyed_node]}') print(f'predator_belief_state[i]: {predator_belief_state[i]}') exit(0) - # print('in update func') - # pprint(new_predator_belief_state) - # print('in update predator belief func after_survey') - # print('sum of prob: ', sum(new_predator_belief_state.values())) - # exit(0) + return new_predator_belief_state elif checkpoint == 'after_agent_moves': if found_predator: - # print('in update func') - # pprint(predator_belief_state) - # print('sum of prob: ', sum(predator_belief_state.values())) - # exit(0) + return predator_belief_state else: @@ -176,11 +151,7 @@ def update_predator_belief_state(predator_belief_state, agent_curr_pos, agent_pr print(f'predator_belief_state[i]: {predator_belief_state[i]}') exit(0) - # print('in update func') - # pprint(new_predator_belief_state) - # print('in update predator belief func after_agent_moves') - # print('sum of prob: ', sum(new_predator_belief_state.values())) - # exit(0) + return new_predator_belief_state elif checkpoint == 'after_predator_moves': @@ -216,26 +187,12 @@ def update_predator_belief_state(predator_belief_state, agent_curr_pos, agent_pr temp_predator_belief_state[i] = temp_sum - # temp_sum += prey_belief_state[j] / ( get_degree(arena, j) + 1 ) - # temp_sum += prey_belief_state[i] / ( get_degree(arena, i) + 1 ) - # temp_prey_belief_state[i] = temp_sum - - # print('in update func') - # pprint(temp_predator_belief_state) - # print('sum of prob: ', sum(temp_predator_belief_state.values())) - # exit(0) - - # pretend to survey node for agent curr pos new_predator_belief_state[agent_curr_pos] = 0.0 for i in range(50): if i != agent_curr_pos: new_predator_belief_state[i] = temp_predator_belief_state[i] / ( sum(temp_predator_belief_state.values()) - temp_predator_belief_state[agent_curr_pos]) - - # print('in update predator belief func after predator moves') - # pprint(new_predator_belief_state) - # print('sum of prob: ', sum(new_predator_belief_state.values())) - # exit(0) + return new_predator_belief_state def get_degree(arena, node): @@ -269,11 +226,8 @@ def survey_prey(agent, prey): # Selects all positions where the probability is max max_prob_of_prey = [pos for pos, prob in belief_state.items() if prob == max(belief_state.values())] - # print(max_prob_of_prey) - node_to_survey = random.choice(max_prob_of_prey) - # print(node_to_survey) if node_to_survey == prey.curr_pos: return True, node_to_survey @@ -298,12 +252,8 @@ def survey_predator(agent, predator): # Selects all positions where the probability is max max_prob_of_predator = [pos for pos, prob in belief_state.items() if prob == max(belief_state.values())] - # print(max_prob_of_predator) - node_to_survey = random.choice(max_prob_of_predator) - # print(node_to_survey) - if node_to_survey == predator.curr_pos: return True, node_to_survey else: @@ -362,26 +312,22 @@ def get_shortest_path(start_pos, end_pos, arena): parent = {} visited = [False] * 50 - - #print(f'start_pos = {start_pos}') + visited[start_pos] = True - # print(visited) neighbours = deque() curr_pos = start_pos while curr_pos != end_pos: for surrounding_node in arena[curr_pos]: - # print(f'curr_pos: {curr_pos}') - # print(f'surrounding_node: {surrounding_node}') if not visited[surrounding_node] and surrounding_node not in neighbours: neighbours.append(surrounding_node) parent.update({surrounding_node: curr_pos}) visited[surrounding_node] = True - # print(f'added {surrounding_node}') + curr_pos = neighbours.popleft() path = FindPath(parent, start_pos, end_pos) - # print(f'a {type(path)}') + return path, (len(path) - 1) def return_max_prey_belief(belief_state, arena): @@ -395,7 +341,7 @@ def return_max_prey_belief(belief_state, arena): Returns: random.choice(max_belief_nodes) (int): Random value from max beliefs """ - # return max(belief_state, key = belief_state.get) + max_belief = max(belief_state.values()) max_belief_nodes = [key for key, value in belief_state.items() if value == max_belief ] @@ -412,9 +358,9 @@ def return_max_predator_belief(belief_state, arena): Returns: random.choice(max_belief_nodes) (int): Random value from max beliefs """ - # return max(belief_state, key = belief_state.get) + max_belief = max(belief_state.values()) - # print("MAX PREDATOR BELIEF IS :" , max_belief) + max_belief_nodes = [key for key, value in belief_state.items() if value == max_belief ] return random.choice(max_belief_nodes) @@ -480,11 +426,6 @@ def best_node(arena, curr_pos, prey_loc, predator_loc): curr_pos (int): Position to move to """ - # Do not remove the following test cases - # curr_pos = 17 - # curr_pos = 42 - - #print("Initial pos", curr_pos) # Neighbours of the current node are extracted here neighbours = arena[curr_pos].copy() @@ -583,9 +524,6 @@ def best_node(arena, curr_pos, prey_loc, predator_loc): #print("Sitting and Praying") return 999 - """print(curr_pos_prey_dist,curr_pos_predator_dist) - print(prey_dist,predator_dist) - print("pos after movement", curr_pos)""" return curr_pos @@ -629,34 +567,19 @@ def update_prey_belief_state_defective_drone(prey_belief_state, agent_curr_pos, elif i == surveyed_node: new_prey_belief_state[i] = prey_belief_state[i]*0.1 / ( sum(prey_belief_state.values()) - (0.9 * prey_belief_state[surveyed_node])) - # print('in update func') - # pprint(new_prey_belief_state) - # print('in update prey belief func after_survey') - # print('sum of prob: ', sum(new_prey_belief_state.values())) - # exit(0) return new_prey_belief_state elif checkpoint == 'after_agent_moves': if found_prey: return prey_belief_state else: - # print(f'agent_curr_pos in func: {agent_curr_pos}') + new_prey_belief_state[agent_prev_pos] = 0.0 new_prey_belief_state[agent_curr_pos] = 0.0 - # new_prey_belief_state[surveyed_node] = 0.0 - for i in range(50): - # if i not in (agent_curr_pos, agent_prev_pos, surveyed_node): if i not in (agent_curr_pos, agent_prev_pos): - # new_prey_belief_state[i] = prey_belief_state[i] / ( - # sum(prey_belief_state.values()) - prey_belief_state[agent_curr_pos] - prey_belief_state[ - # surveyed_node]) + new_prey_belief_state[i] = prey_belief_state[i] / (sum(prey_belief_state.values()) - prey_belief_state[agent_curr_pos]) - # print('in update func') - # pprint(new_prey_belief_state) - # print('in update prey belief func after_agent_moves') - # print('sum of prob: ', sum(new_prey_belief_state.values())) - # exit(0) return new_prey_belief_state elif checkpoint == 'after_prey_moves': @@ -679,14 +602,6 @@ def update_prey_belief_state_defective_drone(prey_belief_state, agent_curr_pos, if i != agent_curr_pos: new_prey_belief_state[i] = temp_prey_belief_state[i] / ( sum(temp_prey_belief_state.values()) - temp_prey_belief_state[agent_curr_pos]) - - # print('in update func') - # pprint(new_prey_belief_state) - # print('in update prey belief func after_prey_moves') - # print('sum of prob: ', sum(new_prey_belief_state.values())) - # print('arena') - # pprint(arena) - # exit(0) return new_prey_belief_state def update_predator_belief_state_defective_drone(predator_belief_state, agent_curr_pos, agent_prev_pos, arena, found_predator, @@ -727,40 +642,27 @@ def update_predator_belief_state_defective_drone(predator_belief_state, agent_cu elif i == surveyed_node: new_predator_belief_state[i] = predator_belief_state[i]*0.1 / ( sum(predator_belief_state.values()) - (0.9 * predator_belief_state[surveyed_node])) - - # print('in update func') - # pprint(new_predator_belief_state) - # print('in update predator belief func after_survey') - # print('sum of prob: ', sum(new_predator_belief_state.values())) - # exit(0) return new_predator_belief_state elif checkpoint == 'after_agent_moves': if found_predator: - # print('in update func') - # pprint(predator_belief_state) - # print('sum of prob: ', sum(predator_belief_state.values())) - # exit(0) + return predator_belief_state else: - # print(f'agent_curr_pos in func: {agent_curr_pos}') + new_predator_belief_state[agent_prev_pos] = 0.0 new_predator_belief_state[agent_curr_pos] = 0.0 - # new_predator_belief_state[surveyed_node] = 0.0 + for i in range(50): - # if i not in (agent_curr_pos, agent_prev_pos, surveyed_node): + if i not in (agent_curr_pos, agent_prev_pos): - # new_predator_belief_state[i] = predator_belief_state[i] / (sum(predator_belief_state.values()) - predator_belief_state[agent_curr_pos] -predator_belief_state[surveyed_node]) + new_predator_belief_state[i] = predator_belief_state[i] / (sum(predator_belief_state.values()) - predator_belief_state[agent_curr_pos]) - # print('in update func') - # pprint(new_predator_belief_state) - # print('in update predator belief func after_agent_moves') - # print('sum of prob: ', sum(new_predator_belief_state.values())) - # exit(0) + return new_predator_belief_state elif checkpoint == 'after_predator_moves': @@ -793,14 +695,6 @@ def update_predator_belief_state_defective_drone(predator_belief_state, agent_cu temp_predator_belief_state[i] = temp_sum - # temp_sum += prey_belief_state[j] / ( get_degree(arena, j) + 1 ) - # temp_sum += prey_belief_state[i] / ( get_degree(arena, i) + 1 ) - # temp_prey_belief_state[i] = temp_sum - - # print('in update func') - # pprint(temp_predator_belief_state) - # print('sum of prob: ', sum(temp_predator_belief_state.values())) - # exit(0) # pretend to survey node for agent curr pos new_predator_belief_state[agent_curr_pos] = 0.0 @@ -809,8 +703,4 @@ def update_predator_belief_state_defective_drone(predator_belief_state, agent_cu new_predator_belief_state[i] = temp_predator_belief_state[i] / ( sum(temp_predator_belief_state.values()) - temp_predator_belief_state[agent_curr_pos]) - # print('in update predator belief func after predator moves') - # pprint(new_predator_belief_state) - # print('sum of prob: ', sum(new_predator_belief_state.values())) - # exit(0) return new_predator_belief_state