Example #1
0
    def findPathToClosestDot(self, gameState):
        """
        Returns a path (a list of actions) to the closest dot, starting from
        gameState.
        """

        return search.ucs(AnyFoodSearchProblem(gameState))
 def findPathToClosestDot(self, gameState):
     "Returns a path (a list of actions) to the closest dot, starting from gameState"
     # Here are some useful elements of the startState
     startPosition = gameState.getPacmanPosition()
     food = gameState.getFood()
     walls = gameState.getWalls()
     problem = AnyFoodSearchProblem(gameState)
     """
     actionlist = []
     nodes = set()
     fringe = util.PriorityQueue()
     start = search.Node(startPosition, None, 0, None, 0, 0)
     fringe.push(start, 0)
     while not fringe.isEmpty():
         current = fringe.pop()
         if problem.isGoalState(current.state):
             while current.parent != None:
                 actionlist.append(current.action)
                 current = current.parent
                 actionlist.reverse()
             return actionlist
         if current.state not in nodes:
             nodes.add(current.state)
             for child in problem.getSuccessors(current.state):
                 children = search.Node(child[0], child[1], 0, current, 0, greedy((child[0], food), problem))
                 fringe.push(children, children.heuristic)
     return []
 """
     return search.ucs(problem)
Example #3
0
def cornersHeuristic(state, problem):
    """
	A heuristic for the CornersProblem that you defined.

	  state:   The current search state
			   (a data structure you chose in your search problem)

	  problem: The CornersProblem instance for this layout.

	This function should always return a number that is a lower bound
	on the shortest path from the state to a goal of the problem; i.e.
	it should be admissible (as well as consistent).
	"""
    corners = problem.corners  # These are the corner coordinates
    # These are the walls of the maze, as a Grid (game.py)
    walls = problem.walls

    "*** YOUR CODE HERE ***"

    totalDist = 0
    node = state[0]
    cornersVisited = state[1]

    l = [
        len(
            search.ucs(
                PositionSearchProblem(problem.gameState,
                                      start=node,
                                      goal=corner,
                                      warn=False))) for corner in corners
        if corner not in cornersVisited
    ]
    if len(l) == 0:
        return 0
    return max(l)
Example #4
0
    def findPathToClosestDot(self, gameState):
        """
        Returns a path (a list of actions) to the closest dot, starting from
        gameState.
        """
        # Here are some useful elements of the startState
        startPosition = gameState.getPacmanPosition()
        food = gameState.getFood()
        walls = gameState.getWalls()
        problem = AnyFoodSearchProblem(gameState)

        "*** YOUR CODE HERE ***"
        uneaten = food.asList()
        #print uneaten
        xy1 = startPosition

        def manhdistance(xy1, xy2):
            return (abs(xy1[0] - xy2[0]) + abs(xy1[1] - xy2[1]))

        priorityqueue = util.PriorityQueue()
        if not len(uneaten) == 0:
            for xy in uneaten:
                #print len(xy)
                d = manhdistance(xy, xy1)
                priorityqueue.push(xy, d)
            problem.goal= priorityqueue.pop()
            #distance = priorityqueue.pop()[1]

        path = search.ucs(problem)
        return path
Example #5
0
 def solve(start, nodes, weight):
     source  = max((v, k) for k,v in weight[start].items())[1]
     x,y     = source
     nodes[x][y] = False
     problem = PathRouteProblem(source, nodes, weight)
     actions = search.ucs(problem)
     total   = problem.getCostOfActions(actions)
     return (actions, total)
Example #6
0
 def findPathToClosestDot(self, gameState):
     "Returns a path (a list of actions) to the closest dot, starting from gameState"
     x,y = gameState.getPacmanPosition()
     food = gameState.getFood()
     walls = gameState.getWalls()
     problem = AnyFoodSearchProblem(gameState)
         
     return search.ucs(problem)
Example #7
0
def on_click():
    """
    This function defines the action of the 'Next' button.
    """
    global algo, counter, next_button, IT_Del_problem, start, goal
    IT_Del_problem = GraphProblem(start.get(), goal.get(), map_IT_Del)
    if "Uniform Cost Search" == algo.get():
        node = uniform_cost_search(IT_Del_problem)
        if node is not None:
            final_path = ucs(IT_Del_problem).solution()
            final_path.append(start.get())

            show_cost_and_path(final_path, ucs(IT_Del_problem).path_cost)

            display_final(final_path)
            next_button.config(state="disabled")
        counter += 1
Example #8
0
def maze_distance(point1, point2, game_state):
    return len(
        search.ucs(
            PositionSearchProblem(gameState=game_state,
                                  start=point1,
                                  goal=point2,
                                  warn=False,
                                  visualize=False)))
Example #9
0
    def findPathToClosestDot(self, gameState):
        "Returns a path (a list of actions) to the closest dot, starting from gameState"
        # Here are some useful elements of the startState
        startPosition = gameState.getPacmanPosition()
        food = gameState.getFood()
        walls = gameState.getWalls()
        problem = AnyFoodSearchProblem(gameState)

        return search.ucs(problem)
        util.raiseNotDefined()
Example #10
0
  def findPathToClosestDot(self, gameState):
    "Returns a path (a list of actions) to the closest dot, starting from gameState"
    # Here are some useful elements of the startState
    startPosition = gameState.getPacmanPosition()
    food = gameState.getFood()
    walls = gameState.getWalls()
    problem = AnyFoodSearchProblem(gameState)

    #return search.astar(problem,simplefoodHeuristic) # Same as UCS
    return search.ucs(problem)
Example #11
0
    def findPathToClosestDot(self, gameState):
        "Returns a path (a list of actions) to the closest dot, starting from gameState"
        # Here are some useful elements of the startState
        startPosition = gameState.getPacmanPosition()
        food = gameState.getFood()
        walls = gameState.getWalls()
        problem = AnyFoodSearchProblem(gameState)

        "*** YOUR CODE HERE ***"
        return search.ucs(problem)
Example #12
0
def distanceToFood(point1, point2, gameState):

    x1, y1 = point1
    x2, y2 = point2

    walls = gameState.getWalls()
    assert not walls[x1][y1], 'point1 is a wall: ' + point1
    assert not walls[x2][y2], 'point2 is a wall: ' + str(point2)
    problem = PositionSearchProblem(gameState, start=point1, goal=point2, warn=False, visualize=False)
    return len(search.ucs(problem))
def on_click():
    '''
    This function defines the action of the 'Next' button.
    '''
    global algo, counter, next_button, romania_problem, start, goal
    romania_problem = GraphProblem(start.get(), goal.get(), romania_map)
    if "Breadth-First Tree Search" == algo.get():
        node = breadth_first_tree_search(romania_problem)
        if node is not None:
            final_path = bfts(romania_problem).solution()
            final_path.append(start.get())
            display_final(final_path)
            next_button.config(state="disabled")
        counter += 1
    elif "Depth-First Tree Search" == algo.get():
        node = depth_first_tree_search(romania_problem)
        if node is not None:
            final_path = dfts(romania_problem).solution()
            final_path.append(start.get())
            display_final(final_path)
            next_button.config(state="disabled")
        counter += 1
    elif "Breadth-First Search" == algo.get():
        node = breadth_first_search(romania_problem)
        if node is not None:
            final_path = bfs(romania_problem).solution()
            final_path.append(start.get())
            display_final(final_path)
            next_button.config(state="disabled")
        counter += 1
    elif "Depth-First Graph Search" == algo.get():
        node = depth_first_graph_search(romania_problem)
        if node is not None:
            final_path = dfgs(romania_problem).solution()
            final_path.append(start.get())
            display_final(final_path)
            next_button.config(state="disabled")
        counter += 1
    elif "Uniform Cost Search" == algo.get():
        node = uniform_cost_search(romania_problem)
        if node is not None:
            final_path = ucs(romania_problem).solution()
            final_path.append(start.get())
            display_final(final_path)
            next_button.config(state="disabled")
        counter += 1
    elif "A* - Search" == algo.get():
        node = astar_search(romania_problem)
        if node is not None:
            final_path = asts(romania_problem).solution()
            final_path.append(start.get())
            display_final(final_path)
            next_button.config(state="disabled")
        counter += 1
Example #14
0
def on_click():
    """
    This function defines the action of the 'Next' button.
    """
    global algo, counter, next_button, romania_problem, start, goal
    romania_problem = GraphProblem(start.get(), goal.get(), romania_map)
    if "Breadth-First Tree Search" == algo.get():
        node = breadth_first_tree_search(romania_problem)
        if node is not None:
            final_path = bfts(romania_problem).solution()
            final_path.append(start.get())
            display_final(final_path)
            next_button.config(state="disabled")
        counter += 1
    elif "Depth-First Tree Search" == algo.get():
        node = depth_first_tree_search(romania_problem)
        if node is not None:
            final_path = dfts(romania_problem).solution()
            final_path.append(start.get())
            display_final(final_path)
            next_button.config(state="disabled")
        counter += 1
    elif "Breadth-First Graph Search" == algo.get():
        node = breadth_first_graph_search(romania_problem)
        if node is not None:
            final_path = bfs(romania_problem).solution()
            final_path.append(start.get())
            display_final(final_path)
            next_button.config(state="disabled")
        counter += 1
    elif "Depth-First Graph Search" == algo.get():
        node = depth_first_graph_search(romania_problem)
        if node is not None:
            final_path = dfgs(romania_problem).solution()
            final_path.append(start.get())
            display_final(final_path)
            next_button.config(state="disabled")
        counter += 1
    elif "Uniform Cost Search" == algo.get():
        node = uniform_cost_search(romania_problem)
        if node is not None:
            final_path = ucs(romania_problem).solution()
            final_path.append(start.get())
            display_final(final_path)
            next_button.config(state="disabled")
        counter += 1
    elif "A* - Search" == algo.get():
        node = astar_search(romania_problem)
        if node is not None:
            final_path = asts(romania_problem).solution()
            final_path.append(start.get())
            display_final(final_path)
            next_button.config(state="disabled")
        counter += 1
Example #15
0
 def findPathToClosestDot(self, gameState):
     """
     Returns a path (a list of actions) to the closest dot, starting from
     gameState.
     """
     # Here are some useful elements of the startState
     startPosition = gameState.getPacmanPosition()
     food = gameState.getFood()
     walls = gameState.getWalls()
     problem = AnyFoodSearchProblem(gameState)
     "*** YOUR CODE HERE ***"
     return search.ucs(problem)
Example #16
0
    def findPathToClosestDot(self, gameState):
        """
        Returns a path (a list of actions) to the closest dot, starting from
        gameState.
        """
        # Here are some useful elements of the startState
        startPosition = gameState.getPacmanPosition()
        food = gameState.getFood()
        walls = gameState.getWalls()
        problem = AnyFoodSearchProblem(gameState)

        # easy peasy, though I did try astar it reaquires modifying the class more than I wanted to
        return search.ucs(problem)
Example #17
0
    def findPathToClosestDot(self, gameState):
        """
        Returns a path (a list of actions) to the closest dot, starting from
        gameState.
        """
        # Here are some useful elements of the startState
        startPosition = gameState.getPacmanPosition()
        food = gameState.getFood()
        walls = gameState.getWalls()
        problem = AnyFoodSearchProblem(gameState)
        
#        print 'Finding Dot', gameState
#        print search.ucs(problem);
        return search.ucs(problem);
Example #18
0
 def registerInitialState(self, state):
     "This method is called before any moves are made."
     "*** YOUR CODE HERE ***"
     walls = state.getWalls()
     top, right = walls.height - 2, walls.width - 2
     self.top, self.right = top, right
     self.corners = ((1, 1), (1, top), (right, 1), (right, top))
     cornersDistance = [((mazeDistance(state.getPacmanPosition(), cp,
                                       state)), cp) for cp in self.corners]
     scD = min(cornersDistance)[1]
     problem = PositionSearchProblem(state,
                                     start=state.getPacmanPosition,
                                     goal=scD,
                                     warn=False)
     self.path = search.ucs(problem)
    def findPathToClosestDot(self, gameState):
        """
        Returns a path (a list of actions) to the closest dot, starting from
        gameState.
        """
        # Here are some useful elements of the startState
        startPosition = gameState.getPacmanPosition()
        food = gameState.getFood()
        walls = gameState.getWalls()
        problem = AnyFoodSearchProblem(gameState)

        "*** YOUR CODE HERE ***"
        path = search.ucs(problem)
        return path  #epistrefw to apotelesma tou ucs algorithmou me to pio apodotiko monopati
        util.raiseNotDefined()
Example #20
0
def mazeDistanceU(point1, point2, gameState):
    """
    Returns the maze distance between any two points, using the search functions
    you have already built.  The gameState can be any game state -- Pacman's position
    in that state is ignored.
    Example usage: mazeDistance( (2,4), (5,6), gameState)
    This might be a useful helper function for your ApproximateSearchAgent.
    """
    x1, y1 = point1
    x2, y2 = point2
    walls = gameState.getWalls()
    assert not walls[x1][y1], 'point1 is a wall: ' + point1
    assert not walls[x2][y2], 'point2 is a wall: ' + str(point2)
    prob = PositionSearchProblem(gameState, start=point1, goal=point2, warn=False)
    return len(search.ucs(prob))
Example #21
0
    def findPathToClosestDot(self, gameState):
        """
        Returns a path (a list of actions) to the closest dot, starting from
        gameState.
        """
        # Here are some useful elements of the startState
        startPosition = gameState.getPacmanPosition()
        food = gameState.getFood()
        walls = gameState.getWalls()
        problem = AnyFoodSearchProblem(gameState)

        "*** YOUR CODE HERE ***"
        #We calculate the optimal path to the closest dot by using uniform cost search on the problem and returning the resulting list of actions.
        return search.ucs(problem)

        util.raiseNotDefined()
Example #22
0
    def findPathToClosestDot(self, gameState):
        """
        Returns a path (a list of actions) to the closest dot, starting from
        gameState.
        """
        # Here are some useful elements of the startState
        startPosition = gameState.getPacmanPosition()
        food = gameState.getFood()
        walls = gameState.getWalls()
        problem = AnyFoodSearchProblem(gameState)

        "*** YOUR CODE HERE ***"
        temp_path = search.ucs(problem)
        #temp_path = search.bfs(problem);
        #temp_path = search.dfs(problem); ##MUST SEE -- Nice Backtrack can be seen
        return temp_path
Example #23
0
def foodHeuristic(state, problem):
    """
    Your heuristic for the FoodSearchProblem goes here.

    This heuristic must be consistent to ensure correctness.  First, try to come
    up with an admissible heuristic; almost all admissible heuristics will be
    consistent as well.

    If using A* ever finds a solution that is worse uniform cost search finds,
    your heuristic is *not* consistent, and probably not admissible!  On the
    other hand, inadmissible or inconsistent heuristics may find optimal
    solutions, so be careful.

    The state is a tuple ( pacmanPosition, foodGrid ) where foodGrid is a Grid
    (see game.py) of either True or False. You can call foodGrid.asList() to get
    a list of food coordinates instead.

    If you want access to info like walls, capsules, etc., you can query the
    problem.  For example, problem.walls gives you a Grid of where the walls
    are.

    If you want to *store* information to be reused in other calls to the
    heuristic, there is a dictionary called problem.heuristicInfo that you can
    use. For example, if you only want to count the walls once and store that
    value, try: problem.heuristicInfo['wallCount'] = problem.walls.count()
    Subsequent calls to this heuristic can access
    problem.heuristicInfo['wallCount']
    """
    position, foodGrid = state
    "*** YOUR CODE HERE ***"
    x1, y1 = position
    distances = [0]
    game_state = problem.startingGameState
    walls = game_state.getWalls()
    position_search_prob = PositionSearchProblem(game_state,
                                                 start=position,
                                                 goal=(1, 1),
                                                 warn=False,
                                                 visualize=False)
    for food in foodGrid.asList():
        x2, y2 = food
        position_search_prob.goal = food
        # distances.append(util.manhattanDistance(position, food)) #fails a test by expanding more nodes 9000+
        if not walls[x1][y1] and not walls[x2][y2]:
            # BFS/ucs search expands around 4137 nodes
            distances.append(len(search.ucs(position_search_prob)))
    return max(distances)
Example #24
0
def mazeDistance(point1, point2, gameState):
    """
    Returns the maze distance between any two points, using the search functions
    you have already built.  The gameState can be any game state -- Pacman's position
    in that state is ignored.

    Example usage: mazeDistance( (2,4), (5,6), gameState)

    This might be a useful helper function for your ApproximateSearchAgent.
    """
    x1, y1 = point1
    x2, y2 = point2
    walls = gameState.getWalls()
    assert not walls[x1][y1], 'point1 is a wall: ' + point1
    assert not walls[x2][y2], 'point2 is a wall: ' + str(point2)
    prob = PositionSearchProblem(gameState, start=point1, goal=point2, warn=False)
    return len(search.ucs(prob))
Example #25
0
    def findPathToClosestDot(self, gameState):
        "Returns a path (a list of actions) to the closest dot, starting from gameState"
        startPosition = gameState.getPacmanPosition()
        food = gameState.getFood()
        walls = gameState.getWalls()
        problem = AnyFoodSearchProblem(gameState)

        foodMax = []
        for i, row in enumerate(food):
            for j, col in enumerate(row):
                if food[i][j]:
                    foodMax.append((mazeDistance(startPosition, (i,j), gameState), (i, j)))
        if not foodMax:
            return []
        else:
            p = PositionSearchProblem(gameState, start = startPosition, goal = min(foodMax)[1], warn = False)
            return search.ucs(p) 
Example #26
0
def cornersHeuristic1(state, problem):
    """
    A heuristic for the CornersProblem that you defined.

      state:   The current search state
               (a data structure you chose in your search problem)

      problem: The CornersProblem instance for this layout.

    This function should always return a number that is a lower bound on the
    shortest path from the state to a goal of the problem; i.e.  it should be
    admissible (as well as consistent).
    """
    corners = problem.corners  # These are the corner coordinates
    walls = problem.walls  # These are the walls of the maze, as a Grid (game.py)

    "*** YOUR CODE HERE ***"
    current = state[0]
    exploredCorners = state[1]
    x, y = current
    distances = []
    h = 0
    unexploredCorners = []

    for corner in corners:
        if corner not in exploredCorners:
            unexploredCorners.append(corner)

    if unexploredCorners:
        for corner in unexploredCorners:
            x_c, y_c = corner
            # Use search algorithm to compute the distance
            searchProblem = PositionSearchProblem(gameState=problem.gameState,
                                                  start=current,
                                                  goal=corner,
                                                  warn=False,
                                                  visualize=False)
            dist = len(search.ucs(searchProblem)
                       )  # UCS and BFS are both ok. But dfs is not acceptable.
            distances.append(dist)

        # Picks the largest distance as the heuristic value
        h = max(distances)

    return h
Example #27
0
def foodHeuristic(state, problem):
    """
	Your heuristic for the FoodSearchProblem goes here.

	This heuristic must be consistent to ensure correctness.  First, try to come up
	with an admissible heuristic; almost all admissible heuristics will be consistent
	as well.

	If using A* ever finds a solution that is worse uniform cost search finds,
	your heuristic is *not* consistent, and probably not admissible!  On the other hand,
	inadmissible or inconsistent heuristics may find optimal solutions, so be careful.

	The state is a tuple ( pacmanPosition, foodGrid ) where foodGrid is a
	Grid (see game.py) of either True or False. You can call foodGrid.asList()
	to get a list of food coordinates instead.

	If you want access to info like walls, capsules, etc., you can query the problem.
	For example, problem.walls gives you a Grid of where the walls are.

	If you want to *store* information to be reused in other calls to the heuristic,
	there is a dictionary called problem.heuristicInfo that you can use. For example,
	if you only want to count the walls once and store that value, try:
	  problem.heuristicInfo['wallCount'] = problem.walls.count()
	Subsequent calls to this heuristic can access problem.heuristicInfo['wallCount']
	"""
    position, foodGrid = state
    walls = problem.walls
    "*** YOUR CODE HERE ***"

    # def localDist(start, end):
    #     return abs(start[0] - end[0]) + abs(start[1] - end[1])

    l = [
        len(
            search.ucs(
                PositionSearchProblem(problem.gameState,
                                      start=position,
                                      goal=food,
                                      warn=False)))
        for food in foodGrid.asList()
    ]
    if len(l) == 0:
        return 0
    return max(l)
Example #28
0
def foodHeuristic(state, problem):
    """
    Your heuristic for the FoodSearchProblem goes here.

    This heuristic must be consistent to ensure correctness.  First, try to come
    up with an admissible heuristic; almost all admissible heuristics will be
    consistent as well.

    If using A* ever finds a solution that is worse uniform cost search finds,
    your heuristic is *not* consistent, and probably not admissible!  On the
    other hand, inadmissible or inconsistent heuristics may find optimal
    solutions, so be careful.

    The state is a tuple ( pacmanPosition, foodGrid ) where foodGrid is a Grid
    (see game.py) of either True or False. You can call foodGrid.asList() to get
    a list of food coordinates instead.

    If you want access to info like walls, capsules, etc., you can query the
    problem.  For example, problem.walls gives you a Grid of where the walls
    are.

    If you want to *store* information to be reused in other calls to the
    heuristic, there is a dictionary called problem.heuristicInfo that you can
    use. For example, if you only want to count the walls once and store that
    value, try: problem.heuristicInfo['wallCount'] = problem.walls.count()
    Subsequent calls to this heuristic can access
    problem.heuristicInfo['wallCount']
    """
    position, foodGrid = state
    if problem.isGoalState(state):
        return 0
    distances = []
    for foodPosition in foodGrid.asList():
        searchProblem = PositionSearchProblem(problem.startingGameState, start=position, goal=foodPosition, warn=False, visualize=False)
        distances.append(len(search.ucs(searchProblem)))
    # distances = list(map(lambda x: abs(state[0][0] - x[0]) + abs(state[0][1] - x[1]), foodGrid.asList())) # about 9551 nodes
    return max(distances)
Example #29
0
    def solve(self):
        """
        This method should return a sequence of actions that covers all target locations on the board.
        This time we trade optimality for speed.
        Therefore, your agent should try and cover one target location at a time. Each time, aiming for the closest uncovered location.
        You may define helpful functions as you wish.

        Probably a good way to start, would be something like this --

        current_state = self.board.__copy__()
        backtrace = []

        while ....

            actions = set of actions that covers the closets uncovered target location
            add actions to backtrace

        return backtrace
        """
        current_state = self.board.__copy__()
        backtrace = []

        sortedTargets = self.sortFunction()

        problem = BlokusCoverProblem(current_state.board_w, current_state.board_h,
                                     current_state.piece_list, self.startingPoint)
        for target in sortedTargets:
            problem.targets = [target]
            actions = ucs(problem)
            problem.actions.extend(actions)

            for action in actions:
                problem.board.add_move(0, action)
            backtrace.extend(actions)
            self.expanded += problem.expanded
        return backtrace
Example #30
0
 def findPathToClosestDot(self, gameState):
     "Returns a path (a list of actions) to the closest dot, starting from gameState"
     # Here are some useful elements of the startState
     problem = AnyFoodSearchProblem(gameState)
     return search.ucs(problem)
Example #31
0
            [S, N, N, N],
            [N, M, M, M],
            [N, R, R, R],
            [N, N, N, G]
        ]
        goal = (3, 3)
    elif choice == 'big':
        grid = [
            [S, R, R, R, M, G],
            [N, R, R, R, M, N],
            [N, M, R, R, R, N],
            [N, N, N, N, R, N],
            [N, R, N, N, N, N],
            [N, R, N, N, N, N]
        ]
        goal = (0, 5)
    elif choice == 'random':
        rows, cols = 10, 10
        goal = (random.randint(0, rows-1), random.randint(0, cols-1))
        types = (NORMAL, MOUNTAIN, RIVER)
        grid = [[random.choice(types) for _ in range(rows)] for _ in range(cols)]
        grid[start[0]][start[1]] = START
        grid[goal[0]][goal[1]] = GOAL
    
    # Run each algorithm and visualize the result
    print(visualize('BFS', grid, goal, bfs(grid, start, goal)))
    print(visualize('DFS', grid, goal, dfs(grid, start, goal)))
    print(visualize('Dijkstra’s algorithm', grid, goal, ucs(grid, start, goal)))
    print(visualize('A* search', grid, goal, a_star(grid, start, goal)))

    When running from the command line, compare every algorithm against
    one of the tests.
    """
    choice = sys.argv[1]
    if choice not in {'little', 'big', 'random'}:
        print('Usage: python3 test.py (little|big|random)')

    start = (0, 0)
    if choice == 'little':
        grid = [[S, N, N, N], [N, M, M, M], [N, R, R, R], [N, N, N, G]]
        goal = (3, 3)
    elif choice == 'big':
        grid = [[S, R, R, R, M, G], [N, R, R, R, M, N], [N, M, R, R, R, N],
                [N, N, N, N, R, N], [N, R, N, N, N, N], [N, R, N, N, N, N]]
        goal = (0, 5)
    elif choice == 'random':
        rows, cols = 10, 10
        goal = (random.randint(0, rows - 1), random.randint(0, cols - 1))
        types = (NORMAL, MOUNTAIN, RIVER)
        grid = [[random.choice(types) for _ in range(rows)]
                for _ in range(cols)]
        grid[start[0]][start[1]] = START
        grid[goal[0]][goal[1]] = GOAL

    # Run each algorithm and visualize the result
    print(visualize('BFS', grid, goal, bfs(grid, start, goal)))
    print(visualize('DFS', grid, goal, dfs(grid, start, goal)))
    print(visualize('Dijkstra’s algorithm', grid, goal, ucs(grid, start,
                                                            goal)))
    print(visualize('A* search', grid, goal, a_star(grid, start, goal)))
Example #33
0
 def findPathToClosestDot(self, gameState):
     "Returns a path (a list of actions) to the closest dot, starting from gameState"
     # Here are some useful elements of the startState
     problem = AnyFoodSearchProblem(gameState)
     return search.ucs(problem)
def cornersHeuristic(state, problem):
    """

    A heuristic for the CornersProblem that you defined.



      state:   The current search state

               (a data structure you chose in your search problem)



      problem: The CornersProblem instance for this layout.



    This function should always return a number that is a lower bound on the

    shortest path from the state to a goal of the problem; i.e.  it should be

    admissible (as well as consistent).

    """
    corners = problem.corners  # These are the corner coordinates
    walls = problem.walls  # These are the walls of the maze, as a Grid (game.py)

    "*** YOUR CODE HERE ***"
    #idea: 1) get Manhattan distance of current successor states to each corner
    # 2) return min distance(  state to closet corner) of each successor state, the state with min F=g+ H  value with pop first as next action
    #3) The result is algorithm always choose direction to one of the closet corner
    #     xy1 = state
    #     #"The Min Manhattan distance heuristic for a PositionSearchProblem"
    #     min_distance = min((abs(xy1[0] - corner[0]) + abs(xy1[1] - corner[1])) for corner in corners)
    #     return min_distance

    # new code start here: use UCS to reach Goal list
    #idea: 1) check start point to corner (as Goal) distance, real disstance ,not Manhanttan, return longest action corner

    # 2) check   Manhattan distance close to that longest corner until last corner ,  return last corner
    # as first visit corner
    state2corner_action_list = {}
    temp_corners = []
    #start_state = problem.getStartState()
    #call general search agent, default is "DFS", "Postion search problem"
    #search_method = SearchAgent(fn="aStarSearch",heuristic="manhattanHeuristic")
    #define goal of positon search problem
    for corner in corners:
        #problem = PositionSearchProblem(  corner_game_state.deepCopy(),  goal=corner)
        # use Astar in Postion search to find out list
        #state2corner_action_list[corner]= search.aStarSearch(problem,heuristic=manhattanHeuristic)
        actions = search.ucs(problem)
        state2corner_action_list[corner] = problem.getCostOfActions(actions)
    #assign one of the value to max
    max_action_list = state2corner_action_list[corner]
    #find out first access corner here -> setp 1, return longest path
    for corner in corners:
        if state2corner_action_list[corner] >= max_action_list:
            max_action_list = state2corner_action_list[corner]
            longest_path_corner = corner
    #remove longest path corner, find out closet corner to longest path corner
    temp_corners = corner.remove(longest_path_corner)
    xy1 = longest_path_corner

    while len(temp_corners) > 1:
        # assign first corner
        corner = temp_corners[0]
        # assign min distance to first corner
        min_distance = abs(xy1[0] - corner[0]) + abs(xy1[1] - corner[1])
        # find corner has min distance to longest_path corner
        for corner in temp_corners:
            if (abs(xy1[0] - corner[0]) +
                    abs(xy1[1] - corner[1])) <= min_distance:
                min_distance = abs(xy1[0] - corner[0]) + abs(xy1[1] -
                                                             corner[1])
                closest_corner = corner
        # update corner, always remove  corner close to  closest_corner
        temp_corners = corner.remove(closest_corner)
        xy1 = closest_corner

    #temp_corner length should be 1 and it is the target corner !
    target_corner = temp_corners
    print "target corner", target_corner
    #return Manhattn distance to target corner
    xy1 = state
    distance = abs(xy1[0] - target_corner[0]) + abs(xy1[1] - target_corner[1])
    return distance
Example #35
0
    def getAction(self, state):
        """
        From game.py:
        The Agent will receive a GameState and must return an action from
        Directions.{North, South, East, West, Stop}
        """
        "*** YOUR CODE HERE ***"
#     print(state)
        pacpoint=state.getPacmanPosition()
        foodgrid = state.getFood()
        curstate=(pacpoint,foodgrid)
#     if (currentState.getFood().count() > 0):
#     startPosition = state.getPacmanPosition()
#     food = state.getFood()
#     walls = state.getWalls()
#     problem = AnyFoodSearchProblem(state)
#     import search
# #     return 0
#     return search.ucs(problem)
        len(foodgrid[0])
        prob= FoodSearchProblem(state)
        d=[]
        mindist =99999999
        minpoint = pacpoint
        finalans=0
#     print(pacpoint)
#     print(foodgrid.width)
#     print(type(foodgrid))
#     print(state.getFood()[5][5])
        problem = FoodSearchProblem(state)
        flag=False
        for y in range(pacpoint[1],pacpoint[1]+foodgrid.height-1):
            y = y%(foodgrid.height-1)
            if (flag==True):
                break
#         print(x)
            for x in range(pacpoint[0],pacpoint[0]+foodgrid.width-1):
                x=x% (foodgrid.width-1)
#             print(y)
            
                if (state.getFood()[x][y]==True):
#                 print('true')#
#                 ans= mazeDistance(pacpoint,(x,y),state)
#                 ans=((foodHeuristic(((x,y),foodgrid),problem)),0)
                    dist = util.manhattanDistance(pacpoint, (x,y))
#                 if (ans[0]<mindist | x+1==pacpoint[0]|x-1==pacpoint[0]| x+1==pacpoint[1]|x-1==pacpoint[1]):
                    if (dist==1 or (dist<mindist and mazeDistance(pacpoint,(x,y),state)<mindist) ):
                        minpoint=(x,y)
                        if (dist==1):
                            mindist=1
                            flag=True
                            break
                        mindist = dist
                        final = mindist[1]
                    
#                     finalans=ans
                    
                        
#     print(minpoint)
#     print(pacpoint)
#     print(mindist)
#     problem = PositionSearchProblem(state, goal=minpoint, start=pacpoint, warn=False)
            prob = PositionSearchProblem(state, start=pacpoint, goal=minpoint, warn=False)
#     prob = AnyFoodSearchProblem(state)
    
            self.cost+=1
            
#     return finalans[1][0]
            if( mindist!=1):
                return final[0]
            return search.ucs(prob)[0]
Example #36
0
    def getAction(self, state):
        """
        From game.py:
        The Agent will receive a GameState and must return an action from
        Directions.{North, South, East, West, Stop}
        """
        "*** YOUR CODE HERE ***"
        #     print(state)
        pacpoint = state.getPacmanPosition()
        foodgrid = state.getFood()
        curstate = (pacpoint, foodgrid)
        #     if (currentState.getFood().count() > 0):
        #     startPosition = state.getPacmanPosition()
        #     food = state.getFood()
        #     walls = state.getWalls()
        #     problem = AnyFoodSearchProblem(state)
        #     import search
        # #     return 0
        #     return search.ucs(problem)
        len(foodgrid[0])
        prob = FoodSearchProblem(state)
        d = []
        mindist = 99999999
        minpoint = pacpoint
        finalans = 0
        #     print(pacpoint)
        #     print(foodgrid.width)
        #     print(type(foodgrid))
        #     print(state.getFood()[5][5])
        problem = FoodSearchProblem(state)
        flag = False
        for y in range(pacpoint[1], pacpoint[1] + foodgrid.height - 1):
            y = y % (foodgrid.height - 1)
            if (flag == True):
                break
#         print(x)
            for x in range(pacpoint[0], pacpoint[0] + foodgrid.width - 1):
                x = x % (foodgrid.width - 1)
                #             print(y)

                if (state.getFood()[x][y] == True):
                    #                 print('true')#
                    #                 ans= mazeDistance(pacpoint,(x,y),state)
                    #                 ans=((foodHeuristic(((x,y),foodgrid),problem)),0)
                    dist = util.manhattanDistance(pacpoint, (x, y))
                    #                 if (ans[0]<mindist | x+1==pacpoint[0]|x-1==pacpoint[0]| x+1==pacpoint[1]|x-1==pacpoint[1]):
                    if (dist == 1
                            or (dist < mindist
                                and mazeDistance(pacpoint,
                                                 (x, y), state) < mindist)):
                        minpoint = (x, y)
                        if (dist == 1):
                            mindist = 1
                            flag = True
                            break
                        mindist = dist
                        final = mindist[1]

#                     finalans=ans

#     print(minpoint)
#     print(pacpoint)
#     print(mindist)
#     problem = PositionSearchProblem(state, goal=minpoint, start=pacpoint, warn=False)
            prob = PositionSearchProblem(state,
                                         start=pacpoint,
                                         goal=minpoint,
                                         warn=False)
            #     prob = AnyFoodSearchProblem(state)

            self.cost += 1

            #     return finalans[1][0]
            if (mindist != 1):
                return final[0]
            return search.ucs(prob)[0]