diff --git a/Agent_7.py b/Agent_7.py index e7b986a..9253a3b 100644 --- a/Agent_7.py +++ b/Agent_7.py @@ -232,7 +232,7 @@ def begin(arena): if step_count != 0: predator_certainty += predator_certainty_counter / step_count else: - predator_certainty = 1.0 + predator_certainty = 0.0 game_count += 1 diff --git a/Agent_7_survey_or_move.py b/Agent_7_survey_or_move.py new file mode 100644 index 0000000..ac6b568 --- /dev/null +++ b/Agent_7_survey_or_move.py @@ -0,0 +1,247 @@ +import random +import config +import utils +from prey import Prey +from predator import Predator + +class Agent_7_survey_or_move: + + def __init__(self, prey_loc, predator_loc): + """ + Initializing the position of the Agent at locations where prey and predator are not present + Also initializes the belief state of the agent + + Parameters: + self + prey_loc (int): Location of the prey + predator_loc (int): Location of the predator + """ + + # Handling condition where prey and predator are spawned on the same location + list_to_choose_from = list(range(50)) + if prey_loc == predator_loc: + list_to_choose_from.remove(prey_loc) + else: + list_to_choose_from.remove(prey_loc) + list_to_choose_from.remove(predator_loc) + + self.curr_pos = random.choice(list_to_choose_from) + + self.prev_pos = 999 + + # Initialize prey belief state + self.prey_belief_state = dict.fromkeys([i for i in range(50)], 1/49) + self.prey_belief_state[self.curr_pos] = 0 + # print(f'Initial prey belief state: {self.prey_belief_state}') + + # Initialize peadator belief state + self.predator_belief_state = dict.fromkeys([i for i in range(50)], 0) + self.predator_belief_state[predator_loc] = 1 + # print(f'Initial predator belief state: {self.predator_belief_state}') + + def move(self, arena, prey_loc, predator_loc): + """ + Moves Agent 1 according to the given priority + + Parameters: + self + arena (dictionary): Adjacency list representing the graph + prey_loc (int): Location of prey + predator_loc (int): Location of Predator + """ + + pos = utils.best_node(arena, self.curr_pos, prey_loc, predator_loc) + + # Handling Sitting and praying case + if pos == 999: + pass + else: + self.prev_pos = self.curr_pos + self.curr_pos = pos + + + def begin(arena): + """ + Creates all the maze objects and plays number of games and collects data + + Parameters: + arena (dict): Arena to use + + Returns: + data_row (list): Results evaluated for the agent + """ + + # Initiating game variables + game_count = 0 + step_count = 0 + + # Initiating variables for analysis + win_count = 0 + loss_count = 0 + forced_termination = 0 + data_row = [] + + number_of_games = config.NUMBER_OF_GAMES + forced_termination_threshold = config.FORCED_TERMINATION_THRESHOLD + + prey_certainty = 0.0 + predator_certainty = 0.0 + while game_count < number_of_games: + # Creating objects + prey = Prey() + predator = Predator() + agent7 = Agent_7_survey_or_move(prey.curr_pos, predator.curr_pos) + zero_values=0 + step_count = 0 + found_prey = False + found_predator = True + prey_certainty_counter = 0 + predator_certainty_counter = 0 + believed_predator_curr_pos = predator.curr_pos + survey = False + node_surveyed = 0 + while 1: + print("In game Agent_7 at game_count: ", game_count, " step_count: ", step_count) + print(agent7.curr_pos, prey.curr_pos, predator.curr_pos) + + if survey: + # Check if it knows where the predator is + for i in agent7.predator_belief_state.keys(): + if agent7.predator_belief_state[i] == 0 or agent7.predator_belief_state[i] == 0.0: + zero_values+=1 + + + print(zero_values) + + + if zero_values == 49 : + found_prey, prey_node_surveyed = utils.survey_prey(agent7, prey) + else: + found_predator, predator_node_surveyed = utils.survey_predator(agent7, predator) + + if prey_node_surveyed != None: + node_surveyed = prey_node_surveyed + if prey_node_surveyed == predator.curr_pos: + found_predator = True + + else: + node_surveyed = predator_node_surveyed + if predator_node_surveyed == prey.curr_pos: + found_prey = True + zero_values = 0 + + agent7.prey_belief_state = utils.update_prey_belief_state(agent7.prey_belief_state, \ + agent7.curr_pos, \ + agent7.prev_pos, \ + arena, \ + found_prey, \ + node_surveyed, \ + 'after_survey') + + + + agent7.predator_belief_state = utils.update_predator_belief_state(agent7.predator_belief_state, \ + agent7.curr_pos, \ + agent7.prev_pos, \ + arena, \ + found_predator, \ + node_surveyed, \ + 'after_survey') + + if max(agent7.prey_belief_state.values()) == 1: + prey_certainty_counter += 1 + + if max(agent7.predator_belief_state.values()) == 1: + predator_certainty_counter += 1 + + believed_prey_curr_pos = utils.return_max_prey_belief(agent7.prey_belief_state, arena) + believed_predator_curr_pos = utils.return_max_predator_belief(agent7.predator_belief_state, arena) + + + #using the max belief node for prey + agent7.move(arena, believed_prey_curr_pos, believed_predator_curr_pos) + + # Checking termination states + if agent7.curr_pos == prey.curr_pos: + win_count += 1 + break + elif agent7.curr_pos == predator.curr_pos: + loss_count += 1 + break + + # update belief state + agent7.prey_belief_state = utils.update_prey_belief_state(agent7.prey_belief_state, \ + agent7.curr_pos, \ + agent7.prev_pos, \ + arena, \ + found_prey, \ + node_surveyed, \ + 'after_agent_moves') + + agent7.predator_belief_state = utils.update_predator_belief_state(agent7.predator_belief_state, \ + agent7.curr_pos, \ + agent7.prev_pos, \ + arena, \ + found_predator, \ + node_surveyed, \ + 'after_agent_moves') + + prey.move(arena) + + agent7.prey_belief_state = utils.update_prey_belief_state(agent7.prey_belief_state, \ + agent7.curr_pos, \ + agent7.prev_pos, \ + arena, \ + found_prey, \ + node_surveyed, \ + 'after_prey_moves') + + # Checking termination states + if agent7.curr_pos == prey.curr_pos: + win_count += 1 + break + + predator.distracted_move(agent7.curr_pos, arena) + + agent7.predator_belief_state = utils.update_predator_belief_state(agent7.predator_belief_state, \ + agent7.curr_pos, \ + agent7.prev_pos, \ + arena, \ + found_predator, \ + node_surveyed, \ + 'after_predator_moves') + + + found_prey = False + found_predator = False + + predator_node_surveyed = None + prey_node_surveyed = None + + # Checking termination states + if agent7.curr_pos == predator.curr_pos: + loss_count += 1 + break + + step_count += 1 + + # Forcing termination + if step_count >= forced_termination_threshold: + forced_termination += 1 + break + if step_count != 0: + prey_certainty += prey_certainty_counter / step_count + else: + prey_certainty = 0.0 + + if step_count != 0: + predator_certainty += predator_certainty_counter / step_count + else: + predator_certainty = 0.0 + + game_count += 1 + + data_row = ["Agent_7_survey_or_move", win_count * 100 / number_of_games, loss_count * 100 / number_of_games, + forced_termination * 100 / number_of_games, prey_certainty * 100 / number_of_games, predator_certainty * 100 / number_of_games] + + return data_row diff --git a/Agent_7_wdd_handled.py b/Agent_7_wdd_handled.py index 7007ee3..74bf781 100644 --- a/Agent_7_wdd_handled.py +++ b/Agent_7_wdd_handled.py @@ -207,7 +207,7 @@ def begin(arena): if step_count != 0: predator_certainty += predator_certainty_counter / step_count else: - predator_certainty = 1.0 + predator_certainty = 0.0 game_count += 1 data_row = ["Agent_7_wdd_handled", win_count * 100 / number_of_games, loss_count * 100 / number_of_games, diff --git a/Agent_7_with_defective_drone.py b/Agent_7_with_defective_drone.py index 01286e9..66ed562 100644 --- a/Agent_7_with_defective_drone.py +++ b/Agent_7_with_defective_drone.py @@ -84,6 +84,8 @@ def begin(arena): number_of_games = config.NUMBER_OF_GAMES forced_termination_threshold = config.FORCED_TERMINATION_THRESHOLD + prey_certainty = 0.0 + predator_certainty = 0.0 while game_count < number_of_games: # Creating objects prey = Prey() @@ -93,6 +95,8 @@ def begin(arena): step_count = 0 found_prey = False found_predator = True + prey_certainty_counter = 0 + predator_certainty_counter = 0 while 1: print("In game Agent_7_wdd at game_count: ", game_count, " step_count: ", step_count) print(agent7_wdd.curr_pos, prey.curr_pos, predator.curr_pos) @@ -118,6 +122,9 @@ def begin(arena): node_surveyed, \ 'after_survey') + if max(agent7_wdd.prey_belief_state.values()) == 1: + prey_certainty_counter += 1 + agent7_wdd.predator_belief_state = utils.update_predator_belief_state(agent7_wdd.predator_belief_state, \ agent7_wdd.curr_pos, \ agent7_wdd.prev_pos, \ @@ -125,7 +132,8 @@ def begin(arena): found_predator, \ node_surveyed, \ 'after_survey') - + if max(agent7_wdd.predator_belief_state.values()) == 1: + predator_certainty_counter += 1 believed_prey_curr_pos = utils.return_max_prey_belief(agent7_wdd.prey_belief_state, arena) believed_predator_curr_pos = utils.return_max_predator_belief(agent7_wdd.predator_belief_state, arena) @@ -193,10 +201,18 @@ def begin(arena): if step_count >= forced_termination_threshold: forced_termination += 1 break - + if step_count != 0: + prey_certainty += prey_certainty_counter / step_count + else: + prey_certainty = 0.0 + + if step_count != 0: + predator_certainty += predator_certainty_counter / step_count + else: + predator_certainty = 0.0 game_count += 1 data_row = ["Agent_7_wdd", win_count * 100 / number_of_games, loss_count * 100 / number_of_games, - forced_termination * 100 / number_of_games] + forced_termination * 100 / number_of_games, prey_certainty * 100 / number_of_games, predator_certainty * 100 / number_of_games] return data_row diff --git a/Agent_8.py b/Agent_8.py index d17ab3f..e9f39ce 100644 --- a/Agent_8.py +++ b/Agent_8.py @@ -193,8 +193,12 @@ def begin(arena): forced_termination += 1 break - prey_certainty += prey_certainty_counter / step_count - predator_certainty += predator_certainty_counter / step_count + if step_count != 0: + prey_certainty += prey_certainty_counter / step_count + predator_certainty += predator_certainty_counter / step_count + else: + prey_certainty = 0.0 + predator_certainty = 0.0 game_count += 1 diff --git a/Agent_8_survey_or_move.py b/Agent_8_survey_or_move.py new file mode 100644 index 0000000..c2133a1 --- /dev/null +++ b/Agent_8_survey_or_move.py @@ -0,0 +1,212 @@ +import random +import config +import utils +from prey import Prey +from predator import Predator + + +class Agent_8_survey_or_move: + + def __init__(self, prey_loc, predator_loc): + """ + Initializing the position of the Agent at locations where prey and predator are not present + Also initializes the belief state of the agent + + Parameters: + self + prey_loc (int): Location of the prey + predator_loc (int): Location of the predator + """ + + # Handling condition where prey and predator are spawned on the same location + list_to_choose_from = list(range(50)) + if prey_loc == predator_loc: + list_to_choose_from.remove(prey_loc) + else: + list_to_choose_from.remove(prey_loc) + list_to_choose_from.remove(predator_loc) + + self.curr_pos = random.choice(list_to_choose_from) + + self.prev_pos = 999 + + # Initialize prey belief state + self.prey_belief_state = dict.fromkeys([i for i in range(50)], 1 / 49) + self.prey_belief_state[self.curr_pos] = 0 + # print(f'Initial prey belief state: {self.prey_belief_state}') + + # Initialize peadator belief state + self.predator_belief_state = dict.fromkeys([i for i in range(50)], 0) + self.predator_belief_state[predator_loc] = 1 + # print(f'Initial predator belief state: {self.predator_belief_state}') + + def move(self, arena, prey_loc, predator_loc): + """ + Moves according to the modified priority + + Parameters: + self + arena (dictionary): Adjacency list representing the graph + prey_loc (int): Location of prey + predator_loc (int): Location of Predator + """ + + pos = utils.best_node_v2(arena, self.curr_pos, prey_loc, predator_loc) + + # Handling Sitting and praying case + if pos == 999: + pass + else: + self.prev_pos = self.curr_pos + self.curr_pos = pos + + def begin(arena): + """ + Creates all the maze objects and plays number of games and collects data + + Parameters: + arena (dict): Arena to use + + Returns: + data_row (list): Results evaluated for the agent + """ + + # Initiating game variables + game_count = 0 + step_count = 0 + + # Initiating variables for analysis + win_count = 0 + loss_count = 0 + forced_termination = 0 + data_row = [] + + number_of_games = config.NUMBER_OF_GAMES + forced_termination_threshold = config.FORCED_TERMINATION_THRESHOLD + + prey_certainty = 0.0 + predator_certainty = 0.0 + while game_count < number_of_games: + # Creating objects + prey = Prey() + predator = Predator() + agent8 = Agent_8_survey_or_move(prey.curr_pos, predator.curr_pos) + + step_count = 0 + found_prey = False + found_predator = True + prey_certainty_counter = 0 + predator_certainty_counter = 0 + survey = False + node_surveyed = 0 + while 1: + print("In game Agent_8 at game_count: ", game_count, " step_count: ", step_count) + print(agent8.curr_pos, prey.curr_pos, predator.curr_pos) + + if survey: + # Check if it knows where the predator is + if max(agent8.predator_belief_state.values()) == 1.0: + found_prey, node_surveyed = utils.survey_prey(agent8, prey) + else: + found_predator, node_surveyed = utils.survey_predator(agent8, predator) + + # updating both belief states + agent8.prey_belief_state = utils.update_prey_belief_state(agent8.prey_belief_state, \ + agent8.curr_pos, \ + agent8.prev_pos, \ + arena, \ + found_prey, \ + node_surveyed, \ + 'after_survey') + + agent8.predator_belief_state = utils.update_predator_belief_state(agent8.predator_belief_state, \ + agent8.curr_pos, \ + agent8.prev_pos, \ + arena, \ + found_predator, \ + node_surveyed, \ + 'after_survey') + + if max(agent8.prey_belief_state.values()) == 1: + prey_certainty_counter += 1 + + if max(agent8.predator_belief_state.values()) == 1: + predator_certainty_counter += 1 + + believed_prey_curr_pos = utils.return_max_prey_belief(agent8.prey_belief_state, arena) + believed_predator_curr_pos = utils.return_max_predator_belief(agent8.predator_belief_state, arena) + + agent8.move(arena, believed_prey_curr_pos, believed_predator_curr_pos) + + # Checking termination states + if agent8.curr_pos == prey.curr_pos: + win_count += 1 + break + elif agent8.curr_pos == predator.curr_pos: + loss_count += 1 + break + + # update belief state + agent8.prey_belief_state = utils.update_prey_belief_state(agent8.prey_belief_state, \ + agent8.curr_pos, \ + agent8.prev_pos, \ + arena, \ + found_prey, \ + node_surveyed, \ + 'after_agent_moves') + + agent8.predator_belief_state = utils.update_predator_belief_state(agent8.predator_belief_state, \ + agent8.curr_pos, \ + agent8.prev_pos, \ + arena, \ + found_predator, \ + node_surveyed, \ + 'after_agent_moves') + + prey.move(arena) + + agent8.prey_belief_state = utils.update_prey_belief_state(agent8.prey_belief_state, \ + agent8.curr_pos, \ + agent8.prev_pos, \ + arena, \ + found_prey, \ + node_surveyed, \ + 'after_prey_moves') + + # Checking termination states + if agent8.curr_pos == prey.curr_pos: + win_count += 1 + break + + predator.distracted_move(agent8.curr_pos, arena) + + agent8.predator_belief_state = utils.update_predator_belief_state(agent8.predator_belief_state, \ + agent8.curr_pos, \ + agent8.prev_pos, \ + arena, \ + found_predator, \ + node_surveyed, \ + 'after_predator_moves') + # Checking termination states + if agent8.curr_pos == predator.curr_pos: + loss_count += 1 + break + + step_count += 1 + + # Forcing termination + if step_count >= forced_termination_threshold: + forced_termination += 1 + break + if step_count != 0: + prey_certainty += prey_certainty_counter / step_count + predator_certainty += predator_certainty_counter / step_count + else: + prey_certainty = 0.0 + predator_certainty = 0.0 + + game_count += 1 + + data_row = ["Agent_8_survey_or_move", win_count * 100 / number_of_games, loss_count * 100 / number_of_games, + forced_termination * 100 / number_of_games, prey_certainty * 100 / number_of_games, predator_certainty * 100 / number_of_games] + return data_row diff --git a/Agent_8_wdd_handled.py b/Agent_8_wdd_handled.py index 405fa60..53050c7 100644 --- a/Agent_8_wdd_handled.py +++ b/Agent_8_wdd_handled.py @@ -206,7 +206,7 @@ def begin(arena): if step_count != 0: predator_certainty += predator_certainty_counter / step_count else: - predator_certainty = 1.0 + predator_certainty = 0.0 game_count += 1 data_row = ["Agent_8_wdd_handled", win_count * 100 / number_of_games, loss_count * 100 / number_of_games, diff --git a/Agent_8_with_defective_drone.py b/Agent_8_with_defective_drone.py index b5bbd7f..0d1b962 100644 --- a/Agent_8_with_defective_drone.py +++ b/Agent_8_with_defective_drone.py @@ -84,6 +84,8 @@ def begin(arena): number_of_games = config.NUMBER_OF_GAMES forced_termination_threshold = config.FORCED_TERMINATION_THRESHOLD + prey_certainty = 0.0 + predator_certainty = 0.0 while game_count < number_of_games: # Creating objects prey = Prey() @@ -93,6 +95,8 @@ def begin(arena): step_count = 0 found_prey = False found_predator = True + prey_certainty_counter = 0 + predator_certainty_counter = 0 while 1: print("In game Agent_8_wdd at game_count: ", game_count, " step_count: ", step_count) print(agent8_wdd.curr_pos, prey.curr_pos, predator.curr_pos) @@ -117,7 +121,8 @@ def begin(arena): found_prey, \ node_surveyed, \ 'after_survey') - + if max(agent8_wdd.prey_belief_state.values()) == 1: + prey_certainty_counter += 1 agent8_wdd.predator_belief_state = utils.update_predator_belief_state(agent8_wdd.predator_belief_state, \ agent8_wdd.curr_pos, \ agent8_wdd.prev_pos, \ @@ -126,6 +131,8 @@ def begin(arena): node_surveyed, \ 'after_survey') + if max(agent8_wdd.predator_belief_state.values()) == 1: + predator_certainty_counter += 1 believed_prey_curr_pos = utils.return_max_prey_belief(agent8_wdd.prey_belief_state, arena) believed_predator_curr_pos = utils.return_max_predator_belief(agent8_wdd.predator_belief_state, arena) @@ -192,9 +199,17 @@ def begin(arena): if step_count >= forced_termination_threshold: forced_termination += 1 break - + if step_count != 0: + prey_certainty += prey_certainty_counter / step_count + else: + prey_certainty = 0.0 + + if step_count != 0: + predator_certainty += predator_certainty_counter / step_count + else: + predator_certainty = 0.0 game_count += 1 data_row = ["Agent_8_wdd", win_count * 100 / number_of_games, loss_count * 100 / number_of_games, - forced_termination * 100 / number_of_games] + forced_termination * 100 / number_of_games, prey_certainty * 100 / number_of_games, predator_certainty * 100 / number_of_games] return data_row diff --git a/config.py b/config.py index 3632679..fcb9ce6 100644 --- a/config.py +++ b/config.py @@ -1,6 +1,6 @@ -FILE_PATH = 'D:/Desktop/Fall_22_Academics/520_Intro to AI/Project_2/Data/' -# FILE_PATH = '/Users/dhanur/Everything/Subjects/520_Intro_to_Artificial_Intelligence/Projects/Project_2/circle_of_life/The-Circle-of-Life/data/' -FILE_NAME = 'Agent_data.csv' +# FILE_PATH = 'D:/Desktop/Fall_22_Academics/520_Intro to AI/Project_2/Data/' +FILE_PATH = '/Users/dhanur/Everything/Subjects/520_Intro_to_Artificial_Intelligence/Projects/Project_2/circle_of_life/The-Circle-of-Life/data/' +FILE_NAME = 'Agent_move_or_survey_data.csv' NUMBER_OF_GAMES = 30 NUMBER_OF_ARENAS = 100 diff --git a/run.py b/run.py index 937219b..909f80f 100644 --- a/run.py +++ b/run.py @@ -15,6 +15,8 @@ from Agent_7_wdd_handled import Agent_7_wdd_handled from Agent_8_with_defective_drone import Agent_8_wdd from Agent_8_wdd_handled import Agent_8_wdd_handled +from Agent_7_survey_or_move import Agent_7_survey_or_move +from Agent_8_survey_or_move import Agent_8_survey_or_move # from Agent_9 import Agent_9 import utils @@ -47,6 +49,9 @@ def run(): results.append(Agent_8_wdd.begin(arena)) results.append(Agent_8_wdd_handled.begin(arena)) # results.append(Agent_9.begin(arena)) + results.append(Agent_7_survey_or_move.begin(arena)) + results.append(Agent_8_survey_or_move.begin(arena)) + print('-'*100)