def get_distance_to_enemy_ghosts_factor(self, agent_positions, agent_index): #return 0 enemy_agent_index_1 = (agent_index + 1) % 4 enemy_agent_index_2 = (agent_index + 3) % 4 i_am_ghost = self.is_ghost(agent_index) enemy_1_is_ghost = self.is_ghost(enemy_agent_index_1) enemy_2_is_ghost = self.is_ghost(enemy_agent_index_2) if i_am_ghost or (not enemy_1_is_ghost and not enemy_2_is_ghost): return 0 distance_enemy_1 = CaptureAgent.getMazeDistance( self.root_agent_object, agent_positions[agent_index], agent_positions[enemy_agent_index_1]) distance_enemy_2 = CaptureAgent.getMazeDistance( self.root_agent_object, agent_positions[agent_index], agent_positions[enemy_agent_index_2]) if enemy_1_is_ghost and enemy_2_is_ghost: distance = (distance_enemy_1 + distance_enemy_2) / 2 elif enemy_1_is_ghost: distance = distance_enemy_1 else: distance = distance_enemy_2 if distance > 3: return 0 if distance == 0: distance = 1 distane_to_enemy_ghosts_factor = (-1 / distance) * 5 return distane_to_enemy_ghosts_factor
def chooseAction(self, gameState): """ Picks among actions randomly. """ actions = gameState.getLegalActions(self.index) ''' You should change this in your own agent. ''' optimalAction = "Stop" danger = False threshold = 8 for enemy in self.enemyTeam: if CaptureAgent.getMazeDistance(self, getAgentDistances(enemy), gameState.getAgentPosition(self.index)) < threshold: danger = True if danger == True: optimalAction = random.choice(actions) else: optimal = 999 for action in actions: successor = self.getSuccessor(gameState, action) foodList = self.getFood(successor).asList() myPos = successor.getAgentState(self.index).getPosition() minDistance = min([(self.getMazeDistance(myPos, food), food) for food in foodList]) food = minDistance[1] distance = CaptureAgent.getMazeDistance(self, successor.getAgentPosition(self.index), food) if distance < optimal: optimal = distance optimalAction = action return optimalAction
def _get_successors(game_state: GameState, agent_position, targets: list, ghosts: dict, neighbors, agent: CaptureAgent, ghost_influence_range, step_cost): res = [] for next_position in neighbors[agent_position]: # next_position is not in influenced area of ghosts or ghost is scared if all([ utility.is_agent_scared(game_state, ghost_agent_index) or agent.getMazeDistance( next_position, ghost_position) > ghost_influence_range for ghost_agent_index, ghost_position in ghosts.items() ]): if next_position not in targets: res.append( ((next_position, targets), utility.position_to_direction(agent_position, next_position), step_cost)) else: next_target = list(targets).copy() next_target.remove(next_position) res.append( ((next_position, tuple(next_target)), utility.position_to_direction(agent_position, next_position), step_cost)) return res
def get_distance_to_agent(self,agent_positions,from_agent,to_agent): if (agent_positions[from_agent] != (-1,-1) and agent_positions[to_agent] != (-1,-1)): return [CaptureAgent.getMazeDistance(self.root_agent_object, agent_positions[from_agent], agent_positions[to_agent]),False] distance_vec = self.node_state.getAgentDistances() return [distance_vec[to_agent],True]
def closest_food(agent_position, game_state: GameState, is_red, agent: CaptureAgent): food = [] if is_red: food = game_state.getBlueFood().asList() else: food = game_state.getRedFood().asList() min_dist = min( [agent.getMazeDistance(agent_position, pos) for pos in food]) chosen_food = random.choice( list( filter( lambda x: agent.getMazeDistance(agent_position, x) == min_dist, food))) return chosen_food
def kill_enemy_agent_factor(self, agent_positions, agent_index): [enemy_agent_index_1, enemy_agent_index_2] = self.get_enemy_indices(agent_index) food_enemy_1 = self.node_state.data.agentStates[ enemy_agent_index_1].numCarrying food_enemy_2 = self.node_state.data.agentStates[ enemy_agent_index_2].numCarrying if (food_enemy_1 == 0 and food_enemy_2 == 0): return 0 if self.node_state.data.agentStates[agent_index].scaredTimer > 0: return 0 if (food_enemy_1 > food_enemy_2): target_enemy = enemy_agent_index_1 food_enemy = food_enemy_1 else: target_enemy = enemy_agent_index_2 food_enemy = food_enemy_2 [distance_enemy, noisy] = self.get_distance_to_agent(agent_positions, agent_index, target_enemy) we_are_blue_team = agent_index % 2 if we_are_blue_team: #center_of_our_half_pos = (3 * self.node_state.data.layout.width/4,self.node_state.data.layout.height/2) if (agent_index < 2): goal_agent = (self.node_state.data.layout.width / 2 + 1, self.node_state.data.layout.height / 4) else: goal_agent = (self.node_state.data.layout.width / 2 + 1, 3 * self.node_state.data.layout.height / 4) else: #center_of_our_half_pos = (self.node_state.data.layout.width / 4, self.node_state.data.layout.height / 2) if (agent_index < 2): goal_agent = (self.node_state.data.layout.width / 2 - 1, self.node_state.data.layout.height / 4) else: goal_agent = (self.node_state.data.layout.width / 2 - 1, 3 * self.node_state.data.layout.height / 4) goal_agent = self.get_closest_non_wall_position(goal_agent) if (noisy): distance_enemy = CaptureAgent.getMazeDistance( self.root_agent_object, agent_positions[agent_index], goal_agent) factor = 1 / (distance_enemy + 0.1) * food_enemy factor = factor * 50 return factor
def evalute_state_one_agent_defensive(self, agent_position, agent_index, agent_is_on_enemy_team, team_mate_position): if self.check_if_valid_coordinates(self.root_agent_object.middle): distance_to_middle_factor = CaptureAgent.getMazeDistance( self.root_agent_object, agent_position, self.root_agent_object.middle) else: distance_to_middle_factor = 0 if self.check_if_valid_coordinates(self.root_agent_object.upperHalf): distance_to_upper_half = CaptureAgent.getMazeDistance( self.root_agent_object, agent_position, self.root_agent_object.upperHalf) else: distance_to_upper_half = 0 if self.check_if_valid_coordinates(self.root_agent_object.lowerHalf): distance_to_lower_half = CaptureAgent.getMazeDistance( self.root_agent_object, agent_position, self.root_agent_object.lowerHalf) else: distance_to_lower_half = 0 enemies = [ self.node_state.getAgentState(i) for i in CaptureAgent.getOpponents(self.root_agent_object, self.node_state) ] invaders = [ a for a in enemies if a.isPacman and a.getPosition() != None ] number_of_invaders = len(invaders) invader_distance = 0 if len(invaders) > 0: dists = [ CaptureAgent.getMazeDistance(self.root_agent_object, agent_position, a.getPosition()) for a in invaders ] invader_distance = min(dists) distance_from_each_other = CaptureAgent.getMazeDistance( self.root_agent_object, agent_position, team_mate_position) state_value = number_of_invaders * (-9999) + invader_distance * (-1000) + distance_to_middle_factor * (-200) + \ distance_to_lower_half*(-50) + distance_to_upper_half*(-50) + distance_from_each_other * (200) return state_value
def chooseAction(self, gameState): actions = gameState.getLegalActions(self.index) default = 'Stop' capsules = CaptureAgent.getCapsulesYouAreDefending(self, gameState) if(capsules): capsuleToGoTo = capsules[0] myPos = gameState.getAgentPosition(self.index) if(myPos != capsuleToGoTo and not self.onGuard): for action in actions: newState = self.getSuccessor(gameState, action) newDis = CaptureAgent.getMazeDistance(self, newState.getAgentState(self.index).getPosition(),capsuleToGoTo); currentDis = CaptureAgent.getMazeDistance(self, myPos,capsuleToGoTo); if newDis < currentDis: return action else: if not self.onGuard: default = actions[0] self.onGuard = True return default
def get_opponents_ghosts_min_dist(game_state: GameState, agent_index, agent: CaptureAgent, agent_position): opponents_ghosts_positions = get_opponents_ghosts_positions( game_state, agent_index) min_dist = POSITIVE_INFINITY for agent_ghost_index, ghost_position in opponents_ghosts_positions.items( ): dist = agent.getMazeDistance(agent_position, ghost_position) if not is_agent_scared(game_state, agent_ghost_index) and dist < min_dist: min_dist = dist return min_dist
def get_distances_to_food_factor(self, agent_position, agent_index, enemy_team): if (enemy_team): food = CaptureAgent.getFoodYouAreDefending(self.root_agent_object, self.node_state) else: food = CaptureAgent.getFood(self.root_agent_object, self.node_state) distances = [] blue_capsule = (-1, -1) red_capsule = (-1, -1) for c in self.node_state.data.capsules: if c[0] > self.node_state.data.layout.width / 2: blue_capsule = c else: red_capsule = c blue_side = agent_index % 2 if blue_side: our_capsule = red_capsule else: our_capsule = blue_capsule for i in range(0, food.width): for j in range(0, food.height): if food.data[i][j] or our_capsule == (i, j): distance = CaptureAgent.getMazeDistance( self.root_agent_object, agent_position, (i, j)) distances.append(distance) if (len(distances) == 0): return 1 #distance_to_food_factor = sum(distances) / float(len(distances)) distance_to_food_factor = min(distances) if (distance_to_food_factor == 0): distance_to_food_factor = 1 distance_to_food_factor = 1 / distance_to_food_factor distance_to_food_factor = distance_to_food_factor * 1 return distance_to_food_factor
def get_closest_distance_to_home(self,agent_position,agent_index): blue_side = agent_index % 2 width = self.node_state.data.layout.width height = self.node_state.data.layout.height if blue_side: col = int(width/2) else: col = int(width/2 - 1) distances = [] for i in range(height): if not self.node_state.data.layout.isWall((col,i)): dist = CaptureAgent.getMazeDistance(self.root_agent_object,agent_position,(col,i)) distances.append(dist) return min(distances)
def are_foods_in_same_cluster(food1, food2, game_state: GameState, agent: CaptureAgent, cluster_radius=0): """ :param food1: :param food2: :param game_state: :param agent: :param cluster_radius: inclusive :return: """ visited = set() stack = Stack() stack.push(food1) while not stack.isEmpty(): current_food = stack.pop() visited.add(current_food) if current_food == food2: return True for neighbor in get_neighbor(current_food): # although this food is connected with food1, this food is too far and not considered in the cluster if agent.getMazeDistance(current_food, food1) > cluster_radius: continue if neighbor in visited: continue x, y = neighbor if game_state.hasFood(x, y): stack.push(neighbor) return False
def initial_offensive_position_calculation(red_boundary_positions: list, blue_boundary_positions: list, agent: CaptureAgent, red_agents_position, blue_agents_position, game_state: GameState): from teams.pacman_ai.inference.inference import DiscreteDistribution red_boundary_positions_original = red_boundary_positions.copy() blue_boundary_positions_original = blue_boundary_positions.copy() targets = [] for i in range(0, game_state.getNumAgents()): if game_state.isOnRedTeam(i): if i == 0: # assign closest boundary position to agent0 min_dist = min([ agent.getMazeDistance(red_agents_position[i], pos) for pos in red_boundary_positions ]) agent0_target = random.choice( list( filter( lambda x: agent.getMazeDistance( red_agents_position[i], x) == min_dist, red_boundary_positions))) red_boundary_positions.remove(agent0_target) targets.append(agent0_target) else: # no possible positions, start again if not red_boundary_positions: red_boundary_positions = red_boundary_positions_original.copy( ) distribution = DiscreteDistribution() for pos in red_boundary_positions: distribution[pos] = agent.getMazeDistance( red_agents_position[i], pos) distribution.normalize() agent_i_target = distribution.sample() targets.append(agent_i_target) else: if i == 1: # assign closest boundary position to agent0 min_dist = min([ agent.getMazeDistance(blue_agents_position[i], pos) for pos in blue_boundary_positions ]) agent0_target = random.choice( list( filter( lambda x: agent.getMazeDistance( blue_agents_position[i], x) == min_dist, blue_boundary_positions))) blue_boundary_positions.remove(agent0_target) targets.append(agent0_target) else: # no possible positions, start again if not blue_boundary_positions: blue_boundary_positions = blue_boundary_positions_original.copy( ) distribution = DiscreteDistribution() for pos in blue_boundary_positions: distribution[pos] = agent.getMazeDistance( blue_agents_position[i], pos) distribution.normalize() agent_i_target = distribution.sample() targets.append(agent_i_target) return targets
def get_maintain_distance_factor(self, agent_position_1, agent_position_2): distance_between = CaptureAgent.getMazeDistance( self.root_agent_object, agent_position_1, agent_position_2) return distance_between / 300