Skip to content

Commit

Permalink
Code freeze
Browse files Browse the repository at this point in the history
  • Loading branch information
dhanur-sharma committed Nov 15, 2022
1 parent 2d507f2 commit 9211bf8
Show file tree
Hide file tree
Showing 10 changed files with 513 additions and 14 deletions.
2 changes: 1 addition & 1 deletion Agent_7.py
Original file line number Diff line number Diff line change
Expand Up @@ -232,7 +232,7 @@ def begin(arena):
if step_count != 0:
predator_certainty += predator_certainty_counter / step_count
else:
predator_certainty = 1.0
predator_certainty = 0.0

game_count += 1

Expand Down
247 changes: 247 additions & 0 deletions Agent_7_survey_or_move.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,247 @@
import random
import config
import utils
from prey import Prey
from predator import Predator

class Agent_7_survey_or_move:

def __init__(self, prey_loc, predator_loc):
"""
Initializing the position of the Agent at locations where prey and predator are not present
Also initializes the belief state of the agent
Parameters:
self
prey_loc (int): Location of the prey
predator_loc (int): Location of the predator
"""

# Handling condition where prey and predator are spawned on the same location
list_to_choose_from = list(range(50))
if prey_loc == predator_loc:
list_to_choose_from.remove(prey_loc)
else:
list_to_choose_from.remove(prey_loc)
list_to_choose_from.remove(predator_loc)

self.curr_pos = random.choice(list_to_choose_from)

self.prev_pos = 999

# Initialize prey belief state
self.prey_belief_state = dict.fromkeys([i for i in range(50)], 1/49)
self.prey_belief_state[self.curr_pos] = 0
# print(f'Initial prey belief state: {self.prey_belief_state}')

# Initialize peadator belief state
self.predator_belief_state = dict.fromkeys([i for i in range(50)], 0)
self.predator_belief_state[predator_loc] = 1
# print(f'Initial predator belief state: {self.predator_belief_state}')

def move(self, arena, prey_loc, predator_loc):
"""
Moves Agent 1 according to the given priority
Parameters:
self
arena (dictionary): Adjacency list representing the graph
prey_loc (int): Location of prey
predator_loc (int): Location of Predator
"""

pos = utils.best_node(arena, self.curr_pos, prey_loc, predator_loc)

# Handling Sitting and praying case
if pos == 999:
pass
else:
self.prev_pos = self.curr_pos
self.curr_pos = pos


def begin(arena):
"""
Creates all the maze objects and plays number of games and collects data
Parameters:
arena (dict): Arena to use
Returns:
data_row (list): Results evaluated for the agent
"""

# Initiating game variables
game_count = 0
step_count = 0

# Initiating variables for analysis
win_count = 0
loss_count = 0
forced_termination = 0
data_row = []

number_of_games = config.NUMBER_OF_GAMES
forced_termination_threshold = config.FORCED_TERMINATION_THRESHOLD

prey_certainty = 0.0
predator_certainty = 0.0
while game_count < number_of_games:
# Creating objects
prey = Prey()
predator = Predator()
agent7 = Agent_7_survey_or_move(prey.curr_pos, predator.curr_pos)
zero_values=0
step_count = 0
found_prey = False
found_predator = True
prey_certainty_counter = 0
predator_certainty_counter = 0
believed_predator_curr_pos = predator.curr_pos
survey = False
node_surveyed = 0
while 1:
print("In game Agent_7 at game_count: ", game_count, " step_count: ", step_count)
print(agent7.curr_pos, prey.curr_pos, predator.curr_pos)

if survey:
# Check if it knows where the predator is
for i in agent7.predator_belief_state.keys():
if agent7.predator_belief_state[i] == 0 or agent7.predator_belief_state[i] == 0.0:
zero_values+=1


print(zero_values)


if zero_values == 49 :
found_prey, prey_node_surveyed = utils.survey_prey(agent7, prey)
else:
found_predator, predator_node_surveyed = utils.survey_predator(agent7, predator)

if prey_node_surveyed != None:
node_surveyed = prey_node_surveyed
if prey_node_surveyed == predator.curr_pos:
found_predator = True

else:
node_surveyed = predator_node_surveyed
if predator_node_surveyed == prey.curr_pos:
found_prey = True
zero_values = 0

agent7.prey_belief_state = utils.update_prey_belief_state(agent7.prey_belief_state, \
agent7.curr_pos, \
agent7.prev_pos, \
arena, \
found_prey, \
node_surveyed, \
'after_survey')



agent7.predator_belief_state = utils.update_predator_belief_state(agent7.predator_belief_state, \
agent7.curr_pos, \
agent7.prev_pos, \
arena, \
found_predator, \
node_surveyed, \
'after_survey')

if max(agent7.prey_belief_state.values()) == 1:
prey_certainty_counter += 1

if max(agent7.predator_belief_state.values()) == 1:
predator_certainty_counter += 1

believed_prey_curr_pos = utils.return_max_prey_belief(agent7.prey_belief_state, arena)
believed_predator_curr_pos = utils.return_max_predator_belief(agent7.predator_belief_state, arena)


#using the max belief node for prey
agent7.move(arena, believed_prey_curr_pos, believed_predator_curr_pos)

# Checking termination states
if agent7.curr_pos == prey.curr_pos:
win_count += 1
break
elif agent7.curr_pos == predator.curr_pos:
loss_count += 1
break

# update belief state
agent7.prey_belief_state = utils.update_prey_belief_state(agent7.prey_belief_state, \
agent7.curr_pos, \
agent7.prev_pos, \
arena, \
found_prey, \
node_surveyed, \
'after_agent_moves')

agent7.predator_belief_state = utils.update_predator_belief_state(agent7.predator_belief_state, \
agent7.curr_pos, \
agent7.prev_pos, \
arena, \
found_predator, \
node_surveyed, \
'after_agent_moves')

prey.move(arena)

agent7.prey_belief_state = utils.update_prey_belief_state(agent7.prey_belief_state, \
agent7.curr_pos, \
agent7.prev_pos, \
arena, \
found_prey, \
node_surveyed, \
'after_prey_moves')

# Checking termination states
if agent7.curr_pos == prey.curr_pos:
win_count += 1
break

predator.distracted_move(agent7.curr_pos, arena)

agent7.predator_belief_state = utils.update_predator_belief_state(agent7.predator_belief_state, \
agent7.curr_pos, \
agent7.prev_pos, \
arena, \
found_predator, \
node_surveyed, \
'after_predator_moves')


found_prey = False
found_predator = False

predator_node_surveyed = None
prey_node_surveyed = None

# Checking termination states
if agent7.curr_pos == predator.curr_pos:
loss_count += 1
break

step_count += 1

# Forcing termination
if step_count >= forced_termination_threshold:
forced_termination += 1
break
if step_count != 0:
prey_certainty += prey_certainty_counter / step_count
else:
prey_certainty = 0.0

if step_count != 0:
predator_certainty += predator_certainty_counter / step_count
else:
predator_certainty = 0.0

game_count += 1

data_row = ["Agent_7_survey_or_move", win_count * 100 / number_of_games, loss_count * 100 / number_of_games,
forced_termination * 100 / number_of_games, prey_certainty * 100 / number_of_games, predator_certainty * 100 / number_of_games]

return data_row
2 changes: 1 addition & 1 deletion Agent_7_wdd_handled.py
Original file line number Diff line number Diff line change
Expand Up @@ -207,7 +207,7 @@ def begin(arena):
if step_count != 0:
predator_certainty += predator_certainty_counter / step_count
else:
predator_certainty = 1.0
predator_certainty = 0.0
game_count += 1

data_row = ["Agent_7_wdd_handled", win_count * 100 / number_of_games, loss_count * 100 / number_of_games,
Expand Down
22 changes: 19 additions & 3 deletions Agent_7_with_defective_drone.py
Original file line number Diff line number Diff line change
Expand Up @@ -84,6 +84,8 @@ def begin(arena):
number_of_games = config.NUMBER_OF_GAMES
forced_termination_threshold = config.FORCED_TERMINATION_THRESHOLD

prey_certainty = 0.0
predator_certainty = 0.0
while game_count < number_of_games:
# Creating objects
prey = Prey()
Expand All @@ -93,6 +95,8 @@ def begin(arena):
step_count = 0
found_prey = False
found_predator = True
prey_certainty_counter = 0
predator_certainty_counter = 0
while 1:
print("In game Agent_7_wdd at game_count: ", game_count, " step_count: ", step_count)
print(agent7_wdd.curr_pos, prey.curr_pos, predator.curr_pos)
Expand All @@ -118,14 +122,18 @@ def begin(arena):
node_surveyed, \
'after_survey')

if max(agent7_wdd.prey_belief_state.values()) == 1:
prey_certainty_counter += 1

agent7_wdd.predator_belief_state = utils.update_predator_belief_state(agent7_wdd.predator_belief_state, \
agent7_wdd.curr_pos, \
agent7_wdd.prev_pos, \
arena, \
found_predator, \
node_surveyed, \
'after_survey')

if max(agent7_wdd.predator_belief_state.values()) == 1:
predator_certainty_counter += 1

believed_prey_curr_pos = utils.return_max_prey_belief(agent7_wdd.prey_belief_state, arena)
believed_predator_curr_pos = utils.return_max_predator_belief(agent7_wdd.predator_belief_state, arena)
Expand Down Expand Up @@ -193,10 +201,18 @@ def begin(arena):
if step_count >= forced_termination_threshold:
forced_termination += 1
break

if step_count != 0:
prey_certainty += prey_certainty_counter / step_count
else:
prey_certainty = 0.0

if step_count != 0:
predator_certainty += predator_certainty_counter / step_count
else:
predator_certainty = 0.0
game_count += 1

data_row = ["Agent_7_wdd", win_count * 100 / number_of_games, loss_count * 100 / number_of_games,
forced_termination * 100 / number_of_games]
forced_termination * 100 / number_of_games, prey_certainty * 100 / number_of_games, predator_certainty * 100 / number_of_games]
return data_row

8 changes: 6 additions & 2 deletions Agent_8.py
Original file line number Diff line number Diff line change
Expand Up @@ -193,8 +193,12 @@ def begin(arena):
forced_termination += 1
break

prey_certainty += prey_certainty_counter / step_count
predator_certainty += predator_certainty_counter / step_count
if step_count != 0:
prey_certainty += prey_certainty_counter / step_count
predator_certainty += predator_certainty_counter / step_count
else:
prey_certainty = 0.0
predator_certainty = 0.0

game_count += 1

Expand Down
Loading

0 comments on commit 9211bf8

Please sign in to comment.