class GameState: """ A GameState specifies the full game state, including the food, capsules, agent configurations and score changes. GameStates are used by the Game object to capture the actual state of the game and can be used by agents to reason about the game. Much of the information in a GameState is stored in a GameStateData object. We strongly suggest that you access that data via the accessor methods below rather than referring to the GameStateData object directly. Note that in classic Pacman, Pacman is always agent 0. """ #################################################### # Accessor methods: use these to access state data # #################################################### # static variable keeps track of which states have had get_legal_actions called explored = set() def get_and_reset_explored(): tmp = GameState.explored.copy() GameState.explored = set() return tmp get_and_reset_explored = staticmethod(get_and_reset_explored) def get_legal_actions(self, agent_index=0): """ Returns the legal actions for the agent specified. """ # GameState.explored.add(self) if self.is_win() or self.is_lose(): return [] if agent_index == 0: # Pacman is moving return PacmanRules.get_legal_actions(self) else: return GhostRules.get_legal_actions(self, agent_index) def generate_successor(self, agent_index, action): """ Returns the successor state after the specified agent takes the action. """ # Check that successors exist if self.is_win() or self.is_lose(): raise Exception('Can\'t generate a successor of a terminal state.') # Copy current state state = GameState(self) # Let agent's logic deal with its action's effects on the board if agent_index == 0: # Pacman is moving state.data._eaten = [False for i in range(state.get_num_agents())] PacmanRules.apply_action(state, action) else: # A ghost is moving GhostRules.apply_action(state, action, agent_index) # Time passes if agent_index == 0: state.data.score_change += -TIME_PENALTY # Penalty for waiting around else: GhostRules.decrement_timer(state.data.agent_states[agent_index]) # Resolve multi-agent effects GhostRules.check_death(state, agent_index) # Book keeping state.data._agent_moved = agent_index state.data.score += state.data.score_change GameState.explored.add(self) GameState.explored.add(state) return state def get_legal_pacman_actions(self): return self.get_legal_actions(0) def generate_pacman_successor(self, action): """ Generates the successor state after the specified pacman move """ return self.generate_successor(0, action) def get_pacman_state(self): """ Returns an AgentState object for pacman (in game.py) state.pos gives the current position state.direction gives the travel vector """ return self.data.agent_states[0].copy() def get_pacman_position(self): return self.data.agent_states[0].get_position() def get_ghost_states(self): return self.data.agent_states[1:] def get_ghost_state(self, agent_index): if agent_index == 0 or agent_index >= self.get_num_agents(): raise Exception("Invalid index passed to get_ghost_state") return self.data.agent_states[agent_index] def get_ghost_position(self, agent_index): if agent_index == 0: raise Exception("Pacman's index passed to get_ghost_position") return self.data.agent_states[agent_index].get_position() def get_ghost_positions(self): return [s.get_position() for s in self.get_ghost_states()] def get_num_agents(self): return len(self.data.agent_states) def get_score(self): return float(self.data.score) def get_capsules(self): """ Returns a list of positions (x,y) of the remaining capsules. """ return self.data.capsules def get_num_food(self): return self.data.food.count() def get_food(self): """ Returns a Grid of boolean food indicator variables. Grids can be accessed via list notation, so to check if there is food at (x,y), just call current_food = state.get_food() if current_food[x][y] == True: ... """ return self.data.food def get_walls(self): """ Returns a Grid of boolean wall indicator variables. Grids can be accessed via list notation, so to check if there is a wall at (x,y), just call walls = state.get_walls() if walls[x][y] == True: ... """ return self.data.layout.walls def has_food(self, x, y): return self.data.food[x][y] def has_wall(self, x, y): return self.data.layout.walls[x][y] def is_lose(self): return self.data._lose def is_win(self): return self.data._win ############################################# # Helper methods: # # You shouldn't need to call these directly # ############################################# def __init__(self, prev_state=None): """ Generates a new state by copying information from its predecessor. """ if prev_state != None: # Initial state self.data = GameStateData(prev_state.data) else: self.data = GameStateData() def deep_copy(self): state = GameState(self) state.data = self.data.deep_copy() return state def __eq__(self, other): """ Allows two states to be compared. """ return hasattr(other, 'data') and self.data == other.data def __hash__(self): """ Allows states to be keys of dictionaries. """ return hash(self.data) def __str__(self): return str(self.data) def initialize(self, layout, num_ghost_agents=1000): """ Creates an initial game state from a layout array (see layout.py). """ self.data.initialize(layout, num_ghost_agents)
class GameState: """ A GameState specifies the full game state, including the food, capsules, agent configurations and score changes. GameStates are used by the Game object to capture the actual state of the game and can be used by agents to reason about the game. Much of the information in a GameState is stored in a GameStateData object. We strongly suggest that you access that data via the accessor methods below rather than referring to the GameStateData object directly. """ #################################################### # Accessor methods: use these to access state data # #################################################### def get_legal_actions(self, agent_index=0): """ Returns the legal actions for the agent specified. """ return AgentRules.get_legal_actions(self, agent_index) def generate_successor(self, agent_index, action): """ Returns the successor state (a GameState object) after the specified agent takes the action. """ # Copy current state state = GameState(self) # Find appropriate rules for the agent AgentRules.apply_action(state, action, agent_index) AgentRules.check_death(state, agent_index) AgentRules.decrement_timer(state.data.agent_states[agent_index]) # Book keeping state.data._agent_moved = agent_index state.data.score += state.data.score_change state.data.timeleft = self.data.timeleft - 1 return state def get_agent_state(self, index): return self.data.agent_states[index] def get_agent_position(self, index): """ Returns a location tuple if the agent with the given index is observable; if the agent is unobservable, returns None. """ agent_state = self.data.agent_states[index] ret = agent_state.get_position() if ret: return tuple(int(x) for x in ret) return ret def get_num_agents(self): return len(self.data.agent_states) def get_score(self): """ Returns a number corresponding to the current score. """ return self.data.score def get_red_food(self): """ Returns a matrix of food that corresponds to the food on the red team's side. For the matrix m, m[x][y]=true if there is food in (x,y) that belongs to red (meaning red is protecting it, blue is trying to eat it). """ return half_grid(self.data.food, red=True) def get_blue_food(self): """ Returns a matrix of food that corresponds to the food on the blue team's side. For the matrix m, m[x][y]=true if there is food in (x,y) that belongs to blue (meaning blue is protecting it, red is trying to eat it). """ return half_grid(self.data.food, red=False) def get_red_capsules(self): return half_list(self.data.capsules, self.data.food, red=True) def get_blue_capsules(self): return half_list(self.data.capsules, self.data.food, red=False) def get_walls(self): """ Just like get_food but for walls """ return self.data.layout.walls def has_food(self, x, y): """ Returns true if the location (x,y) has food, regardless of whether it's blue team food or red team food. """ return self.data.food[x][y] def has_wall(self, x, y): """ Returns true if (x,y) has a wall, false otherwise. """ return self.data.layout.walls[x][y] def is_over(self): return self.data._win def get_red_team_indices(self): """ Returns a list of agent index numbers for the agents on the red team. """ return self.red_team[:] def get_blue_team_indices(self): """ Returns a list of the agent index numbers for the agents on the blue team. """ return self.blue_team[:] def is_on_red_team(self, agent_index): """ Returns true if the agent with the given agent_index is on the red team. """ return self.teams[agent_index] def get_agent_distances(self): """ Returns a noisy distance to each agent. """ if 'agent_distances' in dir(self): return self.agent_distances else: return None def get_distance_prob(self, true_distance, noisy_distance): "Returns the probability of a noisy distance given the true distance" if noisy_distance - true_distance in SONAR_NOISE_VALUES: return 1.0 / SONAR_NOISE_RANGE else: return 0 def get_initial_agent_position(self, agent_index): "Returns the initial position of an agent." return self.data.layout.agent_positions[agent_index][1] def get_capsules(self): """ Returns a list of positions (x,y) of the remaining capsules. """ return self.data.capsules ############################################# # Helper methods: # # You shouldn't need to call these directly # ############################################# def __init__(self, prev_state=None): """ Generates a new state by copying information from its predecessor. """ if prev_state != None: # Initial state self.data = GameStateData(prev_state.data) self.blue_team = prev_state.blue_team self.red_team = prev_state.red_team self.data.timeleft = prev_state.data.timeleft self.teams = prev_state.teams self.agent_distances = prev_state.agent_distances else: self.data = GameStateData() self.agent_distances = [] def deep_copy(self): state = GameState(self) state.data = self.data.deep_copy() state.data.timeleft = self.data.timeleft state.blue_team = self.blue_team[:] state.red_team = self.red_team[:] state.teams = self.teams[:] state.agent_distances = self.agent_distances[:] return state def make_observation(self, index): state = self.deep_copy() # Adds the sonar signal pos = state.get_agent_position(index) n = state.get_num_agents() distances = [noisy_distance(pos, state.get_agent_position(i)) for i in range(n)] state.agent_distances = distances # Remove states of distant opponents if index in self.blue_team: team = self.blue_team other_team = self.red_team else: other_team = self.blue_team team = self.red_team for enemy in other_team: seen = False enemy_pos = state.get_agent_position(enemy) for teammate in team: if util.manhattan_distance(enemy_pos, state.get_agent_position(teammate)) <= SIGHT_RANGE: seen = True if not seen: state.data.agent_states[enemy].configuration = None return state def __eq__(self, other): """ Allows two states to be compared. """ if other == None: return False return self.data == other.data def __hash__(self): """ Allows states to be keys of dictionaries. """ return int(hash(self.data)) def __str__(self): return str(self.data) def initialize(self, layout, num_agents): """ Creates an initial game state from a layout array (see layout.py). """ self.data.initialize(layout, num_agents) positions = [a.configuration for a in self.data.agent_states] self.blue_team = [i for i, p in enumerate(positions) if not self.is_red(p)] self.red_team = [i for i, p in enumerate(positions) if self.is_red(p)] self.teams = [self.is_red(p) for p in positions] #This is usually 60 (always 60 with random maps) #However, if layout map is specified otherwise, it could be less global TOTAL_FOOD TOTAL_FOOD = layout.total_food def is_red(self, config_or_pos): width = self.data.layout.width if type(config_or_pos) == type((0, 0)): return config_or_pos[0] < width / 2 else: return config_or_pos.pos[0] < width / 2