예제 #1
0
파일: ucs.py 프로젝트: bendelv/Inge-B3_AI
    def find_goal(self, state):
        """
        arg:
        - State: representation of the game at the time t.

        Define a way to solve the problem (eat all the food).

        return:
        - A stack containing the movements.
        """
        initState = self.useful_state(state)
        fringe = PriorityQueue()
        ways = {}

        priority = 0

        while not state.isWin():

            successors = state.generatePacmanSuccessors()

            for succ in successors:
                """
                Get successors after legal actions of current state.
                Push new state, move and current state on the fringe
                plus the cost of the edge to this successor.
                """
                succState = succ[0]
                succMove = succ[1]

                x, y = succState.getPacmanPosition()
                cost = self.cost_way(state.hasFood(x, y), priority)

                fringe.push([succState, succMove, state], cost)

            while True:
                """
                Pop a new element from the fringe
                as long as it has been visited.
                """
                priority, popped = fringe.pop()
                represCurrState = self.useful_state(popped[0])
                represPrevState = self.useful_state(popped[2])

                if represCurrState not in ways:
                    break

            ways[represCurrState] = [represPrevState, popped[1]]
            state = popped[0]

        moves = Stack()
        key = self.useful_state(popped[0])

        while not key == initState:
            """
            Traceback the way to the initial state and store it.
            """
            moves.push((ways.get(key))[1])
            key = (ways.get(key))[0]

        return moves
예제 #2
0
    def get_action(self, state):
        """
        Given a pacman game state, returns a legal move.

        Arguments:
        ----------
        - `state`: the current game state. See FAQ and class
                   `pacman.GameState`.

        Return:
        -------
        - A legal move as defined in `game.Directions`.
        """

        # If no solution has already been calculated.
        if self.iterator == -1:

            # Initialization of the fringe and the visited nodes.
            fringe = Stack()
            visited = set()

            # Pushing the initial state in the fringe.
            fringe.push((state, []))

            while not fringe.isEmpty():
                # Pop of the next pacman state.
                current, self.solution = fringe.pop()

                position = current.getPacmanPosition()
                food = current.getFood()

                # If the state was already visited, we go directly to the next.
                if (position, food) in visited:
                    continue

                # Else, we add it to the visited nodes.
                visited.add((position, food))

                # If all the dots are eaten, we stop: Pacman wins!
                if current.isWin():
                    break

                # Otherwise, we add the unvisited states to the fringe.
                for (child, action) in current.generatePacmanSuccessors():
                    position = child.getPacmanPosition()
                    food = child.getFood()

                    if (position, food) not in visited:
                        fringe.push((child, self.solution + [action]))

        # If a solution has been found, it is returned.
        self.iterator += 1

        return self.solution[self.iterator]
예제 #3
0
파일: dfs.py 프로젝트: tcrasset/Pacman
    def compute_tree(self, state):
        """
        Given a pacman state, computes a path from that state to a state
        where pacman has eaten all the food dots.
        Arguments:
        ----------
        - `state`: the current game state.
        Return:
        -------
        - A list of legal moves as defined in `game.Directions`
        """
        fringe = Stack()  # a stack
        visited = set()  # an empty set to maintain visited nodes

        # a dictionary to maintain path information :
        # key -> (parent state, action to reach child)
        meta = dict()
        meta[state] = (None, None)

        # Append root
        fringe.push(state)

        # While not empty
        while not fringe.isEmpty():
            # Pick one available state
            current_node = fringe.pop()

            # If all food dots found, stop and compute a path
            if current_node.isWin():
                return self.construct_path(current_node, meta)

            # Get info on current node
            curr_pos = current_node.getPacmanPosition()
            curr_food = current_node.getFood()

            if (hash(curr_pos), hash(curr_food)) not in visited:
                # Add the current node to the visited set
                visited.add((hash(curr_pos), hash(curr_food)))

                # For each successor of the current node
                successors = current_node.generatePacmanSuccessors()
                for next_node, next_action in successors:
                    # Get info on successor
                    next_pos = next_node.getPacmanPosition()
                    next_food = next_node.getFood()

                    # Check if it was already visited
                    if (hash(next_pos), hash(next_food)) not in visited:
                        # If not, update meta and put the successor on the fringe
                        meta[next_node] = (current_node, next_action)
                        fringe.push(next_node)
예제 #4
0
파일: astar.py 프로젝트: bendelv/Inge-B3_AI
    def find_goal(self, state):
        """
        arg:
        - State: representation of the game at the time t.

        Define a way to solve the problem (eat all the food).

        return:
        - A stack containing the movements.
        """
        initState = self.useful_state(state)
        fringe = PriorityQueue()
        foodSet = set()
        ways = {}
        priority = 0
        """
        Loop trough the food grid and initialize the set of food positions.
        """
        foodPos = state.getFood()
        for i in np.arange(foodPos.width):
            for j in np.arange(foodPos.height):
                if foodPos[i][j]:
                    foodSet.add((i, j))

        popped = [None, None, None, foodSet]

        while not state.isWin():
            successors = state.generatePacmanSuccessors()

            for succ in successors:
                """
                Get successors after legal actions of current state.
                Push new state, move, current state on the fringe
                and the current set of food positions
                plus the cost of the edge to this successor.
                """
                succState = succ[0]
                succMove = succ[1]

                x, y = succState.getPacmanPosition()
                pacmanPos = [x, y]
                food = state.hasFood(x, y)

                foodSetCopy = popped[3].copy()

                if food:
                    foodSetCopy.remove((x, y))

                cost = self.cost_way(food, pacmanPos, foodSetCopy, priority)
                fringe.push([succState, succMove, state, foodSetCopy], cost)

            while True:
                """
                Pop a new element from the fringe
                as long as it has been visited.
                """
                priority, popped = fringe.pop()
                represCurrState = self.useful_state(popped[0])
                represPrevState = self.useful_state(popped[2])

                if represCurrState not in ways:
                    break

            ways[represCurrState] = [represPrevState, popped[1]]
            state = popped[0]

        moves = Stack()
        key = self.useful_state(popped[0])

        while not key == initState:
            """
            Traceback the way to the initial state and store it.
            """
            moves.push((ways.get(key))[1])
            key = (ways.get(key))[0]

        return moves