Exemplo n.º 1
0
def opponent_ghost_distance_evaluation(game_state: GameState, agent_index,
                                       search_agent: CaptureAgent):
    """
    :param game_state:
    :param agent_index: playing player's index
    :param search_agent: CaptureAgent to get mazeDistance
    :return: evaluate how far is the agent from opponents ghosts
    """
    opponent_ghost_positions = utility.get_opponents_ghosts_positions(
        game_state, agent_index).values()
    if opponent_ghost_positions:
        agent_position = game_state.getAgentPosition(agent_index)
        max_dist = NEGATIVE_INFINITY
        for ghost_pos in opponent_ghost_positions:
            max_dist = max(
                max_dist,
                search_agent.distancer.getDistance(ghost_pos, agent_position))
        return max_dist
    else:
        return POSITIVE_INFINITY
Exemplo n.º 2
0
def are_foods_in_same_cluster(food1,
                              food2,
                              game_state: GameState,
                              agent: CaptureAgent,
                              cluster_radius=0):
    """

    :param food1:
    :param food2:
    :param game_state:
    :param agent:
    :param cluster_radius: inclusive
    :return:
    """

    visited = set()

    stack = Stack()
    stack.push(food1)

    while not stack.isEmpty():
        current_food = stack.pop()
        visited.add(current_food)

        if current_food == food2:
            return True

        for neighbor in get_neighbor(current_food):
            # although this food is connected with food1, this food is too far and not considered in the cluster
            if agent.getMazeDistance(current_food, food1) > cluster_radius:
                continue

            if neighbor in visited:
                continue

            x, y = neighbor
            if game_state.hasFood(x, y):
                stack.push(neighbor)

    return False
Exemplo n.º 3
0
    def registerInitialState(self, gameState: GameState):
        super().registerInitialState(gameState)

        self.red_movable, self.blue_movable = utility.partition_location(
            gameState)
        self.all_movable = gameState.getWalls().asList(False)
        self.neighbors = utility.calculate_neighbors(gameState,
                                                     self.all_movable)
        self.dead_end_path = utility.calculate_dead_end(
            self.all_movable, self.neighbors)
        self.dead_end_path_length = dead_end_path_length_calculation(
            self.dead_end_path)
        self.red_boundary = utility.agent_boundary_calculation(
            self.red_movable, True)
        self.blue_boundary = utility.agent_boundary_calculation(
            self.blue_movable, False)

        if not BasicAgent.INITIAL_TARGET:
            BasicAgent.INITIAL_TARGET = utility.initial_offensive_position_calculation(
                self.red_boundary, self.blue_boundary, self,
                utility.get_agents_positions(gameState, 0),
                utility.get_agents_positions(gameState, 1), gameState)
Exemplo n.º 4
0
    def __init__(self, startingGameState: GameState,
                 captureAgent: CaptureAgent):
        self.expanded = 0
        self.startingGameState = startingGameState
        self.captureAgent: CaptureAgent = captureAgent
        self.enemies = self.captureAgent.getOpponents(startingGameState)
        self.walls = startingGameState.getWalls()
        self.intialPosition = self.startingGameState.getAgentPosition(
            self.captureAgent.index)
        self.gridWidth = self.captureAgent.getFood(startingGameState).width
        self.gridHeight = self.captureAgent.getFood(startingGameState).height
        if self.captureAgent.red:
            self.boundary = int(self.gridWidth / 2) - 1
            self.myPreciousFood = self.startingGameState.getRedFood()
        else:
            self.boundary = int(self.gridWidth / 2)
            self.myPreciousFood = self.startingGameState.getBlueFood()

        (self.viableBoundaryPositions,
         self.possibleEnemyEntryPositions) = self.getViableBoundaryPositions()

        self.GOAL_POSITION = self.getGoalPosition()
        self.goalDistance = self.captureAgent.getMazeDistance(
            self.GOAL_POSITION, self.intialPosition)
Exemplo n.º 5
0
    def UCT(self, rootState, index, enemyIndices ):
        fixedState = fixState(rootState, index, enemyIndices)
        rootNode = Node(state = fixedState, index=index)

        timeout = time.time() + 0.1
        movesLeft = 300 - self.movesMade
        timeLeft = self.initialTime + 120 - time.time()
        if timeLeft > 0 and movesLeft > 0:
            timeout += timeLeft / ((rootState.getNumAgents() -1 )*movesLeft)

        #print "time for this move", timeout - time.time(), "moves left", movesLeft, "time left", timeLeft

        counter = 0

        while time.time() < timeout:
            counter += 1

            node = rootNode
            state = GameState(node.state)
            index = node.index

   	
            #select
            while not node.untriedMoves and node.childNodes:
                node = node.UCTSelectChild()
                state = state.generateSuccessor(index % state.getNumAgents(), node.move)
                index += 1

            #expand
            if node.untriedMoves:
                move = random.choice(node.untriedMoves)
                state = state.generateSuccessor(index % state.getNumAgents(), move)
                node = node.addChild(move, state, (index+1) % state.getNumAgents() )

            count = 0
            #rollout
            while count < 8 and state.getLegalActions(index % state.getNumAgents()):

                legalActions = state.getLegalActions(index % state.getNumAgents())
                state = state.generateSuccessor(index % state.getNumAgents(), random.choice(legalActions))

                index += 1
                count += 1

            #backpropagate
            evaluation = self.evaluate(state)
            while node:
                node.update(evaluation)
                node = node.parentNode

        #print "iterated ", counter, " times"

        return max(rootNode.childNodes, key = lambda c: c.visits).move # return the move that was most visited
Exemplo n.º 6
0
def initial_offensive_position_calculation(red_boundary_positions: list,
                                           blue_boundary_positions: list,
                                           agent: CaptureAgent,
                                           red_agents_position,
                                           blue_agents_position,
                                           game_state: GameState):
    from teams.pacman_ai.inference.inference import DiscreteDistribution

    red_boundary_positions_original = red_boundary_positions.copy()
    blue_boundary_positions_original = blue_boundary_positions.copy()
    targets = []

    for i in range(0, game_state.getNumAgents()):
        if game_state.isOnRedTeam(i):
            if i == 0:
                # assign closest boundary position to agent0
                min_dist = min([
                    agent.getMazeDistance(red_agents_position[i], pos)
                    for pos in red_boundary_positions
                ])
                agent0_target = random.choice(
                    list(
                        filter(
                            lambda x: agent.getMazeDistance(
                                red_agents_position[i], x) == min_dist,
                            red_boundary_positions)))
                red_boundary_positions.remove(agent0_target)
                targets.append(agent0_target)
            else:
                # no possible positions, start again
                if not red_boundary_positions:
                    red_boundary_positions = red_boundary_positions_original.copy(
                    )

                distribution = DiscreteDistribution()
                for pos in red_boundary_positions:
                    distribution[pos] = agent.getMazeDistance(
                        red_agents_position[i], pos)
                distribution.normalize()
                agent_i_target = distribution.sample()
                targets.append(agent_i_target)
        else:
            if i == 1:
                # assign closest boundary position to agent0
                min_dist = min([
                    agent.getMazeDistance(blue_agents_position[i], pos)
                    for pos in blue_boundary_positions
                ])
                agent0_target = random.choice(
                    list(
                        filter(
                            lambda x: agent.getMazeDistance(
                                blue_agents_position[i], x) == min_dist,
                            blue_boundary_positions)))
                blue_boundary_positions.remove(agent0_target)
                targets.append(agent0_target)
            else:
                # no possible positions, start again
                if not blue_boundary_positions:
                    blue_boundary_positions = blue_boundary_positions_original.copy(
                    )

                distribution = DiscreteDistribution()
                for pos in blue_boundary_positions:
                    distribution[pos] = agent.getMazeDistance(
                        blue_agents_position[i], pos)
                distribution.normalize()
                agent_i_target = distribution.sample()
                targets.append(agent_i_target)

    return targets
Exemplo n.º 7
0
def get_agent_num_food_packed(game_state: GameState, agent_index):
    return game_state.getAgentState(agent_index).numCarrying
Exemplo n.º 8
0
def is_agent_scared(game_state: GameState, agent_index):
    return game_state.getAgentState(agent_index).scaredTimer > 0
Exemplo n.º 9
0
def is_agent_ghost(game_state: GameState, agent_index):
    return not game_state.getAgentState(agent_index).isPacman