Beispiel #1
0
    def find_goal(self, state):
        """
        arg:
        - State: representation of the game at the time t.

        Define a way to solve the problem (eat all the food).

        return:
        - A stack containing the movements.
        """
        initState = self.useful_state(state)
        fringe = PriorityQueue()
        ways = {}

        priority = 0

        while not state.isWin():

            successors = state.generatePacmanSuccessors()

            for succ in successors:
                """
                Get successors after legal actions of current state.
                Push new state, move and current state on the fringe
                plus the cost of the edge to this successor.
                """
                succState = succ[0]
                succMove = succ[1]

                x, y = succState.getPacmanPosition()
                cost = self.cost_way(state.hasFood(x, y), priority)

                fringe.push([succState, succMove, state], cost)

            while True:
                """
                Pop a new element from the fringe
                as long as it has been visited.
                """
                priority, popped = fringe.pop()
                represCurrState = self.useful_state(popped[0])
                represPrevState = self.useful_state(popped[2])

                if represCurrState not in ways:
                    break

            ways[represCurrState] = [represPrevState, popped[1]]
            state = popped[0]

        moves = Stack()
        key = self.useful_state(popped[0])

        while not key == initState:
            """
            Traceback the way to the initial state and store it.
            """
            moves.push((ways.get(key))[1])
            key = (ways.get(key))[0]

        return moves
Beispiel #2
0
    def ucsAlgo(self, state):
        """
        Implements the uniform cost algorithm
        """
        visitedNodes = set()
        cost = 0
        queueOfStates = PriorityQueue()
        queueOfStates.push(state, cost)
        dictionnary = dict()
        dictionnary[state] = (None)

        while not queueOfStates.isEmpty():
            cost, actualState = queueOfStates.pop()
            position = actualState.getPacmanPosition()
            food = actualState.getFood()
            # If the goal is fullfilled, we win
            if actualState.isWin():
                return self.getWinPath(dictionnary, actualState)

            if (position, food) in visitedNodes:
                continue
            else:
                visitedNodes.add((position, food))
                for successors in actualState.generatePacmanSuccessors():

                    dictionnary[successors[0]] = actualState
                    if food == successors[0].getFood():
                        incrementedCost = 1
                    else:
                        incrementedCost = 0
                    totalCost = cost + incrementedCost
                    queueOfStates.push(successors[0], totalCost)
Beispiel #3
0
    def dfs(self, currentPos):

        visited = []
        fringe = PriorityQueue()

        fringe.push((currentPos, []), 0)

        while not fringe.isEmpty():
            node = fringe.pop()
            print("node -> ", node)
            depth, successorPos, path = node[0], node[1][0], node[1][1]
            # print()
            # print(path)
            # print(actualPos.generatePacmanSuccessors())

            successorPacmanState = (successorPos.getPacmanPosition(), successorPos.getFood())
            if successorPacmanState not in visited:
                visited.append(successorPacmanState)

                for successor, direction in successorPos.generatePacmanSuccessors():
                    # print(direction)
                    goal = True
                    for directions in successor.getFood():
                        for isGoalState in directions:
                            if isGoalState:
                                goal = False
                    if goal:
                        return path + [direction]
                    else:
                        depth = len(path)
                        fringe.push((successor, path + [direction]),
                                    -depth)  # depth -1 for dfs| depth for bfs | depth + 1 for ucs
Beispiel #4
0
    def compute_tree(self, state):
        """
        Given a pacman state, computes a path from that state to a state
        where pacman has eaten all the food dots.
        Arguments:
        ----------
        - `state`: the current game state.
        Return:
        -------
        - A list of legal moves as defined in `game.Directions`
        """
        fringe = PriorityQueue()  # a priority queue
        visited = set()  # an empty set to maintain visited nodes

        # a dictionary to maintain path information :
        # key -> (parent state, action to reach child)
        meta = dict()
        meta[state] = (None, None)

        # Append root
        fringe.update(state, 1)

        # While not empty
        while not fringe.isEmpty():
            # Pick one available state
            current_cost, current_node = fringe.pop()

            # If all food dots found, stop and compute a path
            if current_node.isWin():
                return self.construct_path(current_node, meta)

            # Get info on current node
            curr_pos = current_node.getPacmanPosition()
            curr_food = current_node.getFood()

            if (hash(curr_pos), hash(curr_food)) not in visited:
                # Add the current node to the visited set
                visited.add((hash(curr_pos), hash(curr_food)))

                # For each successor of the current node
                successors = current_node.generatePacmanSuccessors()
                for next_node, next_action in successors:

                    # Get info on successor
                    next_pos = next_node.getPacmanPosition()
                    next_food = next_node.getFood()

                    # Check if it was already visited
                    if (hash(next_pos), hash(next_food)) not in visited:

                        # If not, update meta
                        meta[next_node] = (current_node, next_action)

                        # Assign priority based on the presence of food
                        x, y = next_node.getPacmanPosition()
                        cost = 0 if current_node.hasFood(x, y) else 1

                        # Put the successor on the fringe
                        fringe.update(next_node, current_cost + cost)
Beispiel #5
0
    def get_action(self, state):
        """
        Given a pacman game state, returns a legal move.

        Arguments:
        ----------
        - `state`: the current game state. See FAQ and class
                   `pacman.GameState`.

        Return:
        -------
        - A legal move as defined in `game.Directions`.
        """

        # If no solution has already been calculated.
        if self.iterator == -1:

            # Initialization of the fringe and the visited nodes.
            fringe = PriorityQueue()
            visited = set()

            # Pushing the initial state in the fringe.
            cost = len(self.solution) + self.heuristic(state)
            fringe.push((state, []), cost)

            while not fringe.isEmpty():
                # Pop of the next pacman state.
                current, self.solution = fringe.pop()[1]

                position = current.getPacmanPosition()
                food = current.getFood()

                # If the state was already visited, we go directly to the next.
                if (position, food) in visited:
                    continue

                # Else, we add it to the visited nodes.
                visited.add((position, food))

                # If all the dots are eaten, we stop: Pacman wins!
                if current.isWin():
                    break

                # Otherwise, we add the unvisited states to the fringe.
                for (child, action) in current.generatePacmanSuccessors():
                    position = child.getPacmanPosition()
                    food = child.getFood()

                    if (child, food) not in visited:
                        cost = len(self.solution) + self.heuristic(child)
                        fringe.push((child, self.solution + [action]), cost)

        # If a solution has been found, it is returned.
        self.iterator += 1

        return self.solution[self.iterator]
Beispiel #6
0
    def ucs(self, start):
        explored = []
        fringe = PriorityQueue()
        fringe.push((start, []), 0)

        while True:
            node = fringe.pop()
            cost, state, path = node[0], node[1][0], node[1][1]

            pacmanState = (state.getPacmanPosition(), state.getFood())
            if pacmanState not in explored:
                explored.append(pacmanState)
                for successor, move in state.generatePacmanSuccessors():
                    # Testing if goal state:
                    goalState = True
                    for i in successor.getFood():
                        for j in i:
                            if j:  # not goal state, keep expanding
                                goalState = False

                    if goalState:
                        print("ok")
                        return path + [move]
                    else:
                        cost = len(path)
                        fringe.push((successor, path + [move]), cost + 1)
Beispiel #7
0
    def astar(self, state):
        """
        Given a pacman game state,
        returns a list of legal moves to solve the search layout, using the A*
        algorithm.
        Arguments:
        ----------
        - `state`: the current game state. See FAQ and class
                   `pacman.GameState`.
        Return:
        -------
        - A list of legal moves as defined in `game.Directions`.
        """
        self.init_food_list = state.getFood().asList()
        fringe = PriorityQueue()
        fringe.push((state, [], 0), 0)
        closed = set()

        while True:
            # Failure case
            if fringe.isEmpty() == 1:
                return []

            # Take the node with the lowest priority of the fringe
            priority, (current, path, backward_cost) = fringe.pop()

            # Win case
            if current.isWin():
                return path

            # Take the key of the current state
            current_key = self.key(current)

            if current_key not in closed:
                closed.add(current_key)

                for next_state, action in current.generatePacmanSuccessors():
                    next_key = self.key(next_state)
                    if next_key not in closed:
                        next_path = path + [action]
                        next_backward_cost = g(backward_cost,
                                               next_state.getNumFood())
                        next_priority = h(next_state) + next_backward_cost
                        fringe.push(
                            (next_state, next_path, next_backward_cost),
                            next_priority)
Beispiel #8
0
    def astar(self, state):
        """
        Given a pacman game state,
        returns a list of legal moves to solve the search layout.
        Arguments:
        ----------
        - `state`: the current game state. See FAQ and class
                   `pacman.GameState`.
        Return:
        -------
        - A list of legal moves as defined in `game.Directions`.
        """
        path = []
        cost = 0
        fringe = PriorityQueue()
        fringe.push((state, path,cost),0)
        closed = set()

        while True:
            if fringe.isEmpty():
                return []  # failure

            f, (current,path,cost) = fringe.pop()

            if current.isWin():
                return path

            current_key = key(current)

            if current_key not in closed:
                closed.add(current_key)

                for next_state, action in current.generatePacmanSuccessors():
                    
                    if not current.getFood().__eq__(next_state.getFood()):
                        stepCost = 1
                    else :
                        stepCost = 2
                    fringe.push((next_state, path + [action],cost + stepCost),cost + stepCost +h(next_state))

        return path
Beispiel #9
0
    def astarAlgo(self, state):
        """
        Implements the a-star algorithm
        """
        cost = 0
        visitedNodes = set()
        queueOfStates = PriorityQueue()
        queueOfStates.push((state, cost), 0)
        dictionnary = dict()
        dictionnary[state] = (None)
        foods = state.getFood()

        while not queueOfStates.isEmpty():
            _, (actualState, cost) = queueOfStates.pop()
            position = actualState.getPacmanPosition()
            food = actualState.getFood()

            if foods != actualState.getFood():
                return self.getWinDistance(dictionnary, actualState)

            if (position, food) in visitedNodes:
                continue
            else:
                visitedNodes.add((position, food))
                for successors in actualState.generatePacmanSuccessors():

                    dictionnary[successors[0]] = actualState
                    if food == successors[0].getFood():
                        incrementedCost = 10
                    else:
                        incrementedCost = 1

                    heuristic = self.heuristicFunction(successors[0])
                    totalCost = cost + incrementedCost + heuristic * 10
                    queueOfStates.push((successors[0], cost + incrementedCost),
                                       totalCost)

        return 0
Beispiel #10
0
    def astar(self, currentPos):

        visited = []
        fringe = PriorityQueue()

        fringe.push((currentPos, []), 0)
        while not fringe.isEmpty():
            node = fringe.pop()
            # print("node -> ", node)
            depth, successorPos, path = node[0], node[1][0], node[1][1]
            # print()
            # print(path)
            # print(actualPos.generatePacmanSuccessors())
            successorPacmanState = (successorPos.getPacmanPosition(),
                                    successorPos.getFood())
            if successorPacmanState not in visited:
                visited.append(successorPacmanState)

                for successor, direction in successorPos.generatePacmanSuccessors(
                ):
                    minDist = self.get_distance(successor)
                    cost = 1
                    if minDist < depth:
                        cost = -1
                    goal = True
                    for foods in successor.getFood():
                        for isGoalState in foods:
                            if isGoalState:
                                goal = False
                    if goal:
                        return path + [direction]
                    else:
                        depth = cost + len(path) + self.get_distance(successor)
                        fringe.push(
                            (successor, path + [direction]), depth
                        )  # depth -1 for dfs| depth for bfs | depth + 1 for ucs
Beispiel #11
0
    def find_goal(self, state):
        """
        arg:
        - State: representation of the game at the time t.

        Define a way to solve the problem (eat all the food).

        return:
        - A stack containing the movements.
        """
        initState = self.useful_state(state)
        fringe = PriorityQueue()
        foodSet = set()
        ways = {}
        priority = 0
        """
        Loop trough the food grid and initialize the set of food positions.
        """
        foodPos = state.getFood()
        for i in np.arange(foodPos.width):
            for j in np.arange(foodPos.height):
                if foodPos[i][j]:
                    foodSet.add((i, j))

        popped = [None, None, None, foodSet]

        while not state.isWin():
            successors = state.generatePacmanSuccessors()

            for succ in successors:
                """
                Get successors after legal actions of current state.
                Push new state, move, current state on the fringe
                and the current set of food positions
                plus the cost of the edge to this successor.
                """
                succState = succ[0]
                succMove = succ[1]

                x, y = succState.getPacmanPosition()
                pacmanPos = [x, y]
                food = state.hasFood(x, y)

                foodSetCopy = popped[3].copy()

                if food:
                    foodSetCopy.remove((x, y))

                cost = self.cost_way(food, pacmanPos, foodSetCopy, priority)
                fringe.push([succState, succMove, state, foodSetCopy], cost)

            while True:
                """
                Pop a new element from the fringe
                as long as it has been visited.
                """
                priority, popped = fringe.pop()
                represCurrState = self.useful_state(popped[0])
                represPrevState = self.useful_state(popped[2])

                if represCurrState not in ways:
                    break

            ways[represCurrState] = [represPrevState, popped[1]]
            state = popped[0]

        moves = Stack()
        key = self.useful_state(popped[0])

        while not key == initState:
            """
            Traceback the way to the initial state and store it.
            """
            moves.push((ways.get(key))[1])
            key = (ways.get(key))[0]

        return moves