Skip to content

Commit

Permalink
Code sanitation done
Browse files Browse the repository at this point in the history
  • Loading branch information
athar-va committed Nov 14, 2022
1 parent df9ee91 commit 4e1888f
Show file tree
Hide file tree
Showing 21 changed files with 96 additions and 1,444 deletions.
6 changes: 6 additions & 0 deletions .idea/vcs.xml

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

37 changes: 37 additions & 0 deletions .idea/workspace.xml

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

67 changes: 0 additions & 67 deletions Agent_1.py
Original file line number Diff line number Diff line change
@@ -1,17 +1,9 @@
import random
from pprint import pprint

import config
import utils
from prey import Prey
from predator import Predator
"""
# Test Imports
from pprint import pprint
import environment as env
"""



class Agent_1:
Expand Down Expand Up @@ -206,62 +198,3 @@ def begin(arena):
forced_termination * 100 / number_of_games, 100.0, 100.0]
# data.append(data_row)
return data_row


"""
# Class Test code
#arena=env.generate_environement()
arena = {0: [1, 49, 48],
1: [2, 0, 46],
2: [3, 1, 5],
3: [4, 2, 7],
4: [5, 3, 6],
5: [6, 4, 2],
6: [7, 5, 4],
7: [8, 6, 3],
8: [9, 7, 10],
9: [10, 8, 11],
10: [11, 9, 8],
11: [12, 10, 9],
12: [13, 11, 14],
13: [14, 12, 15],
14: [15, 13, 12],
15: [16, 14, 13],
16: [17, 15, 19],
17: [18, 16, 20],
18: [19, 17, 21],
19: [20, 18, 16],
20: [21, 19, 17],
21: [22, 20, 18],
22: [23, 21, 26],
23: [24, 22, 25],
24: [25, 23, 28],
25: [26, 24, 23],
26: [27, 25, 22],
27: [28, 26, 30],
28: [29, 27, 24],
29: [30, 28, 31],
30: [31, 29, 27],
31: [32, 30, 29],
32: [33, 31, 35],
33: [34, 32],
34: [35, 33, 39],
35: [36, 34, 32],
36: [37, 35, 38],
37: [38, 36, 41],
38: [39, 37, 36],
39: [40, 38, 34],
40: [41, 39, 44],
41: [42, 40, 37],
42: [43, 41],
43: [44, 42, 47],
44: [45, 43, 40],
45: [46, 44, 49],
46: [47, 45, 1],
47: [48, 46, 43],
48: [49, 47, 0],
49: [0, 48, 45]}
# print(a1.curr_pos)
a1.move(arena, 5, 6)
# pprint(arena)
"""
86 changes: 1 addition & 85 deletions Agent_2.py
Original file line number Diff line number Diff line change
@@ -1,17 +1,9 @@
import random
from pprint import pprint
import matplotlib.pyplot as plt
import config
import utils
from prey import Prey
from predator import Predator
import networkx as nx
"""
# Test Imports
from pprint import pprint
import environment as env

"""


class Agent_2:
Expand Down Expand Up @@ -131,82 +123,6 @@ def begin(arena):

data_row = ["Agent_2", win_count * 100 / number_of_games, loss_count * 100 / number_of_games,
forced_termination * 100 / number_of_games, 100.0, 100.0]
# data.append(data_row)


# chocolate pan
# if loss_count * 100 / number_of_games > 30:
# pprint(arena)
# print("Agent:",test_agent_pos," Prey :",test_prey_pos, " Predator :",test_predator_pos)
#
# edges = []
# for key in arena:
# for i in arena[key]:
# edges.append([key,i])
# #print(edges)
# graph=nx.Graph()
# graph.add_edges_from(edges)
# nx.draw_networkx(graph)
# plt.show()
#
# exit(0)
return data_row

return data_row

"""
# Class Test code
#arena=env.generate_environement()
arena = {0: [1, 49, 48],
1: [2, 0, 46],
2: [3, 1, 5],
3: [4, 2, 7],
4: [5, 3, 6],
5: [6, 4, 2],
6: [7, 5, 4],
7: [8, 6, 3],
8: [9, 7, 10],
9: [10, 8, 11],
10: [11, 9, 8],
11: [12, 10, 9],
12: [13, 11, 14],
13: [14, 12, 15],
14: [15, 13, 12],
15: [16, 14, 13],
16: [17, 15, 19],
17: [18, 16, 20],
18: [19, 17, 21],
19: [20, 18, 16],
20: [21, 19, 17],
21: [22, 20, 18],
22: [23, 21, 26],
23: [24, 22, 25],
24: [25, 23, 28],
25: [26, 24, 23],
26: [27, 25, 22],
27: [28, 26, 30],
28: [29, 27, 24],
29: [30, 28, 31],
30: [31, 29, 27],
31: [32, 30, 29],
32: [33, 31, 35],
33: [34, 32],
34: [35, 33, 39],
35: [36, 34, 32],
36: [37, 35, 38],
37: [38, 36, 41],
38: [39, 37, 36],
39: [40, 38, 34],
40: [41, 39, 44],
41: [42, 40, 37],
42: [43, 41],
43: [44, 42, 47],
44: [45, 43, 40],
45: [46, 44, 49],
46: [47, 45, 1],
47: [48, 46, 43],
48: [49, 47, 0],
49: [0, 48, 45]}
# print(a1.curr_pos)
a1.move(arena, 5, 6)
# pprint(arena)
"""
91 changes: 5 additions & 86 deletions Agent_3.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,4 @@
import random
from pprint import pprint

import config
import utils
from prey import Prey
Expand Down Expand Up @@ -33,7 +31,7 @@ def __init__(self, prey_loc, predator_loc):

self.prey_belief_state = dict.fromkeys([i for i in range(50)], 1/49)
self.prey_belief_state[self.curr_pos] = 0
# print(f'Initial belief state: {self.prey_belief_state}')


def move(self, arena, prey_loc, predator_loc):
"""
Expand Down Expand Up @@ -108,28 +106,12 @@ def begin(arena):
'after_survey')
if max(agent3.prey_belief_state.values()) == 1:
prey_certainty_counter += 1
# print('belief state after survey:')
# pprint(agent3.prey_belief_state)
# print('sum of prob: ', sum(agent3.prey_belief_state.values()))
"""
# print(found_prey)
if found_prey:
# found the prey and now have to use a variable assignment tree to track the prey
pass
else:
# Choose a node at random and assume it is where the prey is
agent3.prey_belief_state[node_surveyed] = 0
for i in range(50):
degree = utils.get_degree(arena, i)
if i != node_surveyed:
agent3.prey_belief_state[i] += 1/48 # Has to be phrased in the form of previous probability and next probability in terms of the degree of neighbours of this node
"""

believed_prey_curr_pos = utils.return_max_prey_belief(agent3.prey_belief_state, arena)
# print(f'believed_prey_curr_pos: {believed_prey_curr_pos}')

#using the max belief node for prey
agent3.move(arena, believed_prey_curr_pos, predator.curr_pos)
# print(f'agent after movement: {agent3.curr_pos}')

# Checking termination states
if agent3.curr_pos == prey.curr_pos:
win_count += 1
Expand All @@ -147,9 +129,6 @@ def begin(arena):
node_surveyed, \
'after_agent_moves')

# print('belief state after_agent_moves:')
# pprint(agent3.prey_belief_state)
# print('sum of prob: ', sum(agent3.prey_belief_state.values()))

prey.move(arena)

Expand All @@ -166,9 +145,7 @@ def begin(arena):
node_surveyed, \
'after_prey_moves')

# print('belief state after_prey_moves:')
# pprint(agent3.prey_belief_state)
# print('sum of prob: ', sum(agent3.prey_belief_state.values()))

predator.move(agent3.curr_pos, arena)

# Checking termination states
Expand All @@ -191,64 +168,6 @@ def begin(arena):

data_row = ["Agent_3", win_count * 100 / number_of_games, loss_count * 100 / number_of_games,
forced_termination * 100 / number_of_games, prey_certainty * 100 / number_of_games, 100.0]
# data.append(data_row)
return data_row

return data_row

"""
# Class Test code
#arena=env.generate_environement()
arena = {0: [1, 49, 48],
1: [2, 0, 46],
2: [3, 1, 5],
3: [4, 2, 7],
4: [5, 3, 6],
5: [6, 4, 2],
6: [7, 5, 4],
7: [8, 6, 3],
8: [9, 7, 10],
9: [10, 8, 11],
10: [11, 9, 8],
11: [12, 10, 9],
12: [13, 11, 14],
13: [14, 12, 15],
14: [15, 13, 12],
15: [16, 14, 13],
16: [17, 15, 19],
17: [18, 16, 20],
18: [19, 17, 21],
19: [20, 18, 16],
20: [21, 19, 17],
21: [22, 20, 18],
22: [23, 21, 26],
23: [24, 22, 25],
24: [25, 23, 28],
25: [26, 24, 23],
26: [27, 25, 22],
27: [28, 26, 30],
28: [29, 27, 24],
29: [30, 28, 31],
30: [31, 29, 27],
31: [32, 30, 29],
32: [33, 31, 35],
33: [34, 32],
34: [35, 33, 39],
35: [36, 34, 32],
36: [37, 35, 38],
37: [38, 36, 41],
38: [39, 37, 36],
39: [40, 38, 34],
40: [41, 39, 44],
41: [42, 40, 37],
42: [43, 41],
43: [44, 42, 47],
44: [45, 43, 40],
45: [46, 44, 49],
46: [47, 45, 1],
47: [48, 46, 43],
48: [49, 47, 0],
49: [0, 48, 45]}
# print(a1.curr_pos)
a1.move(arena, 5, 6)
# pprint(arena)
"""
Loading

0 comments on commit 4e1888f

Please sign in to comment.