Ejemplo n.º 1
0
    def getSuccessors(self, state):
        """
        Returns successor states, the actions they require, and a cost of 1.

         As noted in search.py:
            For a given state, this should return a list of triples, (successor,
            action, stepCost), where 'successor' is a successor to the current
            state, 'action' is the action required to get there, and 'stepCost'
            is the incremental cost of expanding to that successor
        """

        successors = []
        for corner in state[1]:
            directions = []
            start = state[0][0]
            problem = PositionSearchProblem(gameState=self.gameState,
                                            goal=corner,
                                            start=start)
            for direction in search.uniformCostSearch(problem=problem):
                directions.append(direction)
            remainingCorners = []
            for remain_corner in state[1]:
                if remain_corner == corner:
                    continue
                remainingCorners.append(remain_corner)
            newState = [[corner], remainingCorners]
            successors.append([newState, directions, len(directions)])

        self._expanded += 1  # DO NOT CHANGE
        successors.sort(cmp=compSuccessors)
        return successors
Ejemplo n.º 2
0
    def findPathToClosestDot(self, gameState):
        """
        Returns a path (a list of actions) to the closest dot, starting from
        gameState.
        """
        # Here are some useful elements of the startState
        startPosition = gameState.getPacmanPosition()
        food = gameState.getFood()
        walls = gameState.getWalls()
        problem = AnyFoodSearchProblem(gameState)

        "*** YOUR CODE HERE ***"
        return search.uniformCostSearch(problem)    # find path to closest food
        """
        For the above:
        Path found with cost 350.
        Pacman emerges victorious! Score: 2360
        Average Score: 2360.0
        Scores:        2360.0
        Win Rate:      1/1 (1.00)
        Record:        Win
        """
        #return search.bfs(problem)
        """
        For the above:
        Path found with cost 350.
        Pacman emerges victorious! Score: 2360
        Average Score: 2360.0
        Scores:        2360.0
        Win Rate:      1/1 (1.00)
        Record:        Win
        """

        util.raiseNotDefined()
Ejemplo n.º 3
0
 def findPathToClosestDot(self, gameState):
     """
     Returns a path (a list of actions) to the closest dot, starting from
     gameState.
     """
     # Here are some useful elements of the startState
     startPosition = gameState.getPacmanPosition()
     food = gameState.getFood().asList()
     walls = gameState.getWalls()
     problem = AnyFoodSearchProblem(gameState)
     # dist, prev  = bfs_finding(startPosition, problem)
     # if len(food) == 0:
     #     return 0
     # min_dist = 10000000000
     # for x in food:
     #     if x not in dist:
     #         continue
     #     if dist[x] < min_dist:
     #         min_dist = dist[x]
     #         min_food = x
     # res = []
     # while True:
     #     if min_food == startPosition:
     #         break
     #     prev_pos = prev[min_food][0]
     #     prev_dir = prev[min_food][1]
     #     min_food = prev_pos
     #     res.append(prev_dir)
     return search.uniformCostSearch(problem)
     "*** YOUR CODE HERE ***"
     util.raiseNotDefined()
Ejemplo n.º 4
0
    def __init__(self, costFn=lambda x: 1):
        self.env = game()
        self.env.reset()
        self.action = -1
        self.costFn = costFn

        while True:
            suc = self.getSuccessors(self.getStartState())

            if len(suc) < 1:
                self.env.setlose()
                done = True
                # step = random.randrange(0, 3)
                # self.env.step(step)
            else:
                step = random.choice(suc)
                self.env.step(step[1])

            path = search.uniformCostSearch(self)
            for action in path:
                # do it! render the previous view
                self.env.render()
                done = self.env.step(action)
                # print(env.getFood(), env.getLose(), env.getReward())

            if done:
                break
Ejemplo n.º 5
0
 def findPathToClosestDot(self, gameState):
   "Returns a path (a list of actions) to the closest dot, starting from gameState"
   # Here are some useful elements of the startState
   startPosition = gameState.getPacmanPosition()
   food = gameState.getFood()
   walls = gameState.getWalls()
   problem = AnyFoodSearchProblem(gameState)
   return search.uniformCostSearch(problem) 
Ejemplo n.º 6
0
 def runTest(self):
     print "Path result for DFS:", search.depthFirstSearch(self)
     print "Path result for BFS:", search.breadthFirstSearch(self)
     print "Path result for UCS:", search.uniformCostSearch(self)
     print "Path result for A*:", search.aStarSearch(
         self, search.nullHeuristic)
     print "Path result for A* with letter heuristic:", search.aStarSearch(
         self, letterHeuristic)
Ejemplo n.º 7
0
    def findPathToClosestDot(self, gameState):
        "Returns a path (a list of actions) to the closest dot, starting from gameState"
        # Here are some useful elements of the startState

        #Really simple. Just called one of the functions in search.py
        problem = AnyFoodSearchProblem(gameState)
        path = search.uniformCostSearch(problem)
        return path
Ejemplo n.º 8
0
 def findPathToClosestDot(self, gameState):
     """
     Returns a path (a list of actions) to the closest dot, starting from
     gameState.
     """
     problem = AnyFoodSearchProblem(gameState)
     from search import uniformCostSearch
     return uniformCostSearch(problem)
Ejemplo n.º 9
0
    def findPathToClosestDot(self, gameState):
        """
        Returns a path (a list of actions) to the closest dot, starting from
        gameState.
        """
        # Here are some useful elements of the startState
        problem = AnyFoodSearchProblem(gameState)

        return search.uniformCostSearch(problem)
Ejemplo n.º 10
0
  def findPathToClosestDot(self, gameState):
    "Returns a path (a list of actions) to the closest dot, starting from gameState"
    # Here are some useful elements of the startState
    startPosition = gameState.getPacmanPosition()
    food = gameState.getFood()
    walls = gameState.getWalls()
    problem = AnyFoodSearchProblem(gameState)

    return search.uniformCostSearch(problem)
Ejemplo n.º 11
0
 def findPathToClosestDot(self, gameState):
     "Returns a path (a list of actions) to the closest dot, starting from gameState"
     # Here are some useful elements of the startState
     startPosition = gameState.getPacmanPosition()
     food = gameState.getFood()
     walls = gameState.getWalls()
     problem = AnyFoodSearchProblem(gameState)
     #print gameState
     "*** YOUR CODE HERE ***"
     # return search.breadthFirstSearch(problem) # bfs score 2360
     return search.uniformCostSearch(problem)  # UCS score 2387
Ejemplo n.º 12
0
    def getAction(self, state):
        """
        From game.py:
        The Agent will receive a GameState and must return an action from
        Directions.{North, South, East, West, Stop}
        """
        "*** YOUR CODE HERE ***"
        actions = search.uniformCostSearch(problem)

        return actions
        util.raiseNotDefined()
Ejemplo n.º 13
0
    def solve(self):
        """Solve the current 8-puzzle using the selected algorithm.

        """
        self.clearStats()

        self.problem = eightPuzzle.EightPuzzleSearchProblem(self.puzzle)
        searchAlgorithm = self.algorithm.get()
        if searchAlgorithm == "Depth-first Search":
            self.solution = search.depthFirstSearch(self.problem)
            self.stateHistory = self.solution[3]
        elif searchAlgorithm == "Breadth-first Search":
            self.solution = search.breadthFirstSearch(self.problem)
            self.stateHistory = self.solution[3]
            pass
        elif searchAlgorithm == "Uniform-cost Search":
            self.solution = search.uniformCostSearch(self.problem)
            self.stateHistory = self.solution[3]
            pass
        elif searchAlgorithm == "Greedy Best-first Search":
            #self.solution = search.greedyBestFirstSearch(self.problem)
            #self.stateHistory = self.solution[3]
            pass
        elif searchAlgorithm == "A* Search [Manhattan Dist.]":
            #self.solution = search.aStarSearch(self.problem)
            #self.stateHistory = self.solution[3]
            pass
        else:
            #self.solution = search.recursiveBestFirstSearch(self.problem)
            #self.stateHistory = self.solution[3]
            pass

        self.stats = tk.Label(self, text= (
                "A solution was found in %.3f seconds and requires %s moves."
                %(
                    self.solution[2],
                    (len(self.solution[0]) - 1)
                    )
                )
            )
        self.stats.grid(row=3, column=1, columnspan=5)

        self.stats2 = tk.Label(self, text= (
                "%s nodes were expanded during the search."
                %(len(self.solution[1]))
                )
            )
        self.stats2.grid(row=4, column=1, columnspan=5)

        self.stepInfo = tk.Label(self, text= (
            "Use Previous and Next to show the solution moves.")
            )
        self.stepInfo.grid(row=5, column=1, columnspan=5, padx=5, pady=5)
    def findPathToClosestDot(self, gameState):
        "Returns a path (a list of actions) to the closest dot, starting from gameState"
        # Here are some useful elements of the startState
        startPosition = gameState.getPacmanPosition()
        food = gameState.getFood()
        walls = gameState.getWalls()
        problem = AnyFoodSearchProblem(gameState)

        "*** YOUR CODE HERE ***"
        from search import uniformCostSearch
        return uniformCostSearch(problem)
        util.raiseNotDefined()
    def findPathToClosestDot(self, gameState):
        "Returns a path (a list of actions) to the closest dot, starting from gameState"
        # Here are some useful elements of the startState
        startPosition = gameState.getPacmanPosition()
        food = gameState.getFood()
        walls = gameState.getWalls()
        problem = AnyFoodSearchProblem(gameState)

        "*** YOUR CODE HERE ***"
        from search import uniformCostSearch
        return uniformCostSearch(problem)
        util.raiseNotDefined()
    def findPathToClosestDot(self, gameState):
        """
        Returns a path (a list of actions) to the closest dot, starting from
        gameState.
        """
        # Here are some useful elements of the startState
        startPosition = gameState.getPacmanPosition()
        food = gameState.getFood()
        walls = gameState.getWalls()
        problem = AnyFoodSearchProblem(gameState)

        # We can use either any positionalSearch Algorithms to calculate the nearest food node path
        return search.uniformCostSearch(problem)
 def findPathToClosestDot(self, gameState):
     """
     Returns a path (a list of actions) to the closest dot, starting from
     gameState.
     """
     # Here are some useful elements of the startState
     startPosition = gameState.getPacmanPosition()
     food = gameState.getFood()
     walls = gameState.getWalls()
     problem = AnyFoodSearchProblem(gameState)
     #running the uniform cost search with the goal of any food
     #will find the closest one
     return search.uniformCostSearch(problem)
    def findPathToClosestDot(self, gameState):
        """
        Returns a path (a list of actions) to the closest dot, starting from
        gameState.
        """
        # Here are some useful elements of the startState
        startPosition = gameState.getPacmanPosition()
        food = gameState.getFood()
        walls = gameState.getWalls()
        problem = AnyFoodSearchProblem(gameState)

        # We can use either any positionalSearch Algorithms to calculate the nearest food node path
        return search.uniformCostSearch(problem)
Ejemplo n.º 19
0
 def findPathToClosestDot(self, gameState):
     """
     Returns a path (a list of actions) to the closest dot, starting from
     gameState.
     """
     # Here are some useful elements of the startState
     startPosition = gameState.getPacmanPosition()
     food = gameState.getFood()
     walls = gameState.getWalls()
     problem = AnyFoodSearchProblem(gameState)
     #print "food: ", food.asList()
     "*** YOUR CODE HERE ***"
     return search.uniformCostSearch(problem)
Ejemplo n.º 20
0
    def findPathToClosestDot(self, gameState):
        """
        Returns a path (a list of actions) to the closest dot, starting from
        gameState.
        """
        # Here are some useful elements of the startState
        startPosition = gameState.getPacmanPosition()
        food = gameState.getFood()
        walls = gameState.getWalls()
        problem = AnyFoodSearchProblem(gameState)

        "*** YOUR CODE HERE ***"
        #return search.breadthFirstSearch(problem) # COST = 350
        #return search.depthFirstSearch(problem) # COST = 4808
        return search.uniformCostSearch(problem)  # COST = 350
    def findPathToClosestDot(self, gameState):
        """
        Returns a path (a list of actions) to the closest dot, starting from
        gameState.
        """
        # Here are some useful elements of the startState
        startPosition = gameState.getPacmanPosition()
        food = gameState.getFood()
        #print("fffffffooooooooddddddd: ", food)
        walls = gameState.getWalls()
        problem = AnyFoodSearchProblem(gameState)
        #while problem.isGoalState()
        return search.uniformCostSearch(problem)

        "*** YOUR CODE HERE ***"
        util.raiseNotDefined()
Ejemplo n.º 22
0
 def findPathToClosestDot(self, gameState):
     "Returns a path (a list of actions) to the closest dot, starting from gameState"
     # Here are some useful elements of the startState
     startPosition = gameState.getPacmanPosition()
     food = gameState.getFood()
     walls = gameState.getWalls()
     problem = AnyFoodSearchProblem(gameState)
     
     "*** YOUR CODE HERE ***"
     copyFoodGrid = list(food.copy())[:]
     height = len(copyFoodGrid)
     width = len(copyFoodGrid[0])
     x,y = closestDot(startPosition,copyFoodGrid,width,height)
     searchFood = PositionSearchProblem(gameState, lambda x: 1, (x,y), startPosition)
     action = search.uniformCostSearch(searchFood)
     return action
Ejemplo n.º 23
0
    def findPathToClosestDot(self, gameState):
        "Returns a path (a list of actions) to the closest dot, starting from gameState"
        # Here are some useful elements of the startState
        startPosition = gameState.getPacmanPosition()
        food = gameState.getFood()
        walls = gameState.getWalls()
        problem = AnyFoodSearchProblem(gameState)

        "*** YOUR CODE HERE ***"
        from search import uniformCostSearch, breadthFirstSearch, aStarSearch, depthFirstSearch
        result = uniformCostSearch(problem)
        '''result2 = breadthFirstSearch(problem)'''
        '''result3 = depthFirstSearch(problem)'''
        '''result4 = aStarSearch(problem, foodHeuristic)'''

        return result
Ejemplo n.º 24
0
    def findPathToClosestDot(self, gameState):
        """
        Returns a path (a list of actions) to the closest dot, starting from
        gameState.
        """
        # Here are some useful elements of the startState
        startPosition = gameState.getPacmanPosition()
        food = gameState.getFood()
        walls = gameState.getWalls()
        problem = AnyFoodSearchProblem(gameState)

        "*** YOUR CODE HERE ***"

        #         Radiate outward in cardinal directions
        # If the direction has a food pellet apply

        return search.uniformCostSearch(problem)
Ejemplo n.º 25
0
 def findPathToClosestDot(self, gameState):
     """
     Returns a path (a list of actions) to the closest dot, starting from
     gameState.
     """
     # Here are some useful elements of the startState
     startPosition = gameState.getPacmanPosition()
     food = gameState.getFood()
     walls = gameState.getWalls()
     # minDis = 99999
     
     # for x, y in food.asList():
     #     minDis > mazeDistance(startPosition, (x,y))
     #     closestDot = (x, y)
     problem = AnyFoodSearchProblem(gameState)
     from search import uniformCostSearch
     return uniformCostSearch(problem)
Ejemplo n.º 26
0
    def findPathToClosestDot(self, gameState):
        """
        Returns a path (a list of actions) to the closest dot, starting from
        gameState.
        """
        # Here are some useful elements of the startState
        startPosition = gameState.getPacmanPosition()
        food = gameState.getFood()
        walls = gameState.getWalls()
        problem = AnyFoodSearchProblem(gameState)
        
        "*** YOUR CODE HERE ***"
        
        
#         Radiate outward in cardinal directions
# If the direction has a food pellet apply 

        return search.uniformCostSearch(problem)
Ejemplo n.º 27
0
    def findPathToClosestDot(self, gameState):
        """
        Returns a path (a list of actions) to the closest dot, starting from
        gameState.
        """
        # Here are some useful elements of the startState
        startPosition = gameState.getPacmanPosition()
        food = gameState.getFood()
        walls = gameState.getWalls()
        problem = AnyFoodSearchProblem(gameState)

        "*** YOUR CODE HERE ***"
        pos = (-1, -1)
        min1 = 99999
        # for i in food:
        #     if min1 > util.manhattanDistance(startPosition,i):
        #         min1 = util.manhattanDistance(startPosition,i)
        #         pos = i
        return search.uniformCostSearch(problem)
Ejemplo n.º 28
0
    def findPathToClosestDot(self, gameState):
        """
        Returns a path (a list of actions) to the closest dot, starting from
        gameState.
        """
        # Here are some useful elements of the startState
        startPosition = gameState.getPacmanPosition()
        food = gameState.getFood()
        walls = gameState.getWalls()
        problem = AnyFoodSearchProblem(gameState)
        """
        For this search problem, we used uniform cost search as it is the fastest, greedy way of
        solving the problem. It does so by following the action or list of actions with the least
        cost.

        It runs almost the same as BFS since it does not stop until a level has been traversed.
        DFS runs a lot slower since it does some backtracking when traversing the maze.
        """
        return search.uniformCostSearch(problem)
Ejemplo n.º 29
0
    def findPathToClosestDot(self, gameState):
        """
        Returns a path (a list of actions) to the closest dot, starting from
        gameState.
        """
        # Here are some useful elements of the startState
        startPosition = gameState.getPacmanPosition()
        food = gameState.getFood()
        walls = gameState.getWalls()
        problem = AnyFoodSearchProblem(gameState)

        "*** YOUR CODE HERE ***"
        #        heuristic = []
        #        cur_position = gameState[0]
        #        foodspots = food.asList()
        #        for foodspot in foodspots:
        #            distance = (mazeDistance(cur_position, foodspot, gameState))
        #            heuristic.append(foodspot,distance)
        #
        #        frontier = util.PriorityQueue()
        #        startstate = problem.getStartState()
        #        path = []
        #        explored = []
        #        cost = 0
        #        frontier.push((startstate,path,cost),cost)
        #
        #        for i in range(0,100000):
        #            if frontier.isEmpty() == 0:
        #                node,path,cost = frontier.pop()
        #                if node in explored:
        #                    continue
        #                else:
        #                    explored.append(node)
        #                if problem.isGoalState(node):
        #                    return path
        #                else:
        #                    successors = problem.getSuccessors(node)
        #                for s_nodes,actions,s_cost in successors:
        #                    t_cost = cost + s_cost
        #                    frontier.push((s_nodes, path + [actions],t_cost),t_cost)

        return search.uniformCostSearch(problem)
        util.raiseNotDefined()
Ejemplo n.º 30
0
	def findPathToClosestDot(self, gameState):
		"""
		Returns a path (a list of actions) to the closest dot, starting from
		gameState.
		"""
		# Here are some useful elements of the startState
		startPosition = gameState.getPacmanPosition()
		food = gameState.getFood()
		walls = gameState.getWalls()
		problem = AnyFoodSearchProblem(gameState)

		"*** YOUR CODE HERE ***"
		foodWhere = food.asList()
		dis = dict()
		for f in foodWhere:
			dis[f] = mazeDistance(startPosition, f, gameState)
		nearest = min(dis, key=dis.get)
		print nearest
		return search.uniformCostSearch(problem)
		util.raiseNotDefined()
Ejemplo n.º 31
0
def foodHeuristic(state, problem):
    """
    Your heuristic for the FoodSearchProblem goes here.

    This heuristic must be consistent to ensure correctness.  First, try to come up
    with an admissible heuristic; almost all admissible heuristics will be consistent
    as well.

    If using A* ever finds a solution that is worse uniform cost search finds,
    your heuristic is *not* consistent, and probably not admissible!  On the other hand,
    inadmissible or inconsistent heuristics may find optimal solutions, so be careful.

    The state is a tuple ( pacmanPosition, foodGrid ) where foodGrid is a
    Grid (see game.py) of either True or False. You can call foodGrid.asList()
    to get a list of food coordinates instead.

    If you want access to info like walls, capsules, etc., you can query the problem.
    For example, problem.walls gives you a Grid of where the walls are.

    If you want to *store* information to be reused in other calls to the heuristic,
    there is a dictionary called problem.heuristicInfo that you can use. For example,
    if you only want to count the walls once and store that value, try:
      problem.heuristicInfo['wallCount'] = problem.walls.count()
    Subsequent calls to this heuristic can access problem.heuristicInfo['wallCount']
    """
    position, foodGrid = state
    height = foodGrid.height
    width = foodGrid.width
    num = 0
    #count number of foods remaining
    for x in range(width):
      for y in range(height):
        if foodGrid[x][y]:
          num+=1
    #to get that sweet 3rd point, we do an actual search when we get to low numbaz
    if num < 5:
      problem = FoodSearchNoWallsProblem(position,foodGrid,height,width)
      actions = search.uniformCostSearch(problem)
      return problem.getCostOfActions(actions)
    return num
Ejemplo n.º 32
0
    def findPathToClosestDot(self, gameState):
        """
        Returns a path (a list of actions) to the closest dot, starting from
        gameState.
        """
        # Here are some useful elements of the startState
        startPosition = gameState.getPacmanPosition()
        food = gameState.getFood()
        walls = gameState.getWalls()
        problem = AnyFoodSearchProblem(gameState)

        #
        # *** HERE'S OUR CODE ***
        # Honestly, we tried a trial and error to all the search algorithm
        # available to search.py. We found out that BFS is the best for
        # finding the closest food since it will expand the shallowest node
        # first. We tried the DFS, then it return a Path Cost of 5324. While
        # the BFS returned a Path Cost of only 350, as well as the UCS and A*
        # (A* and UCS has the same Path Cost of 350 since this problem hasn't
        # heuristic - therefore it uses nullHeuristic.)
        # PS: Lincy, check mah grammar bibi gerl. hahaha
        return search.uniformCostSearch(problem)
Ejemplo n.º 33
0
    def findPathToClosestDot(self, gameState):
        """
        Returns a path (a list of actions) to the closest dot, starting from
        gameState.
        """
        # Here are some useful elements of the startState
        startPosition = gameState.getPacmanPosition()
        food = gameState.getFood()
        walls = gameState.getWalls()
        problem = AnyFoodSearchProblem(gameState)

        closestPellet = (99999, 99999)
        for pellet in food.aslist():
            if euclideandistance(startingPosition, pellet) < euclideandistance(
                    startingPosition, closestPellet):
                closestPellet = pellet

        if closestPellet != None:
            problem = PositionSearchProblem(gameState,
                                            start=startPosition,
                                            goal=closestPellet,
                                            warn=false)
            return search.uniformCostSearch(problem)
Ejemplo n.º 34
0
def findSolution(method, filterActionsValue, player, end_rect, walls):
    '''
    Used to call the correct problem and search method deteremined by the GUI
    Returns the path from a node
    '''
    pos = player.position
    problem = None
    if (filterActionsValue == 1):
        problem = ProblemFilter((pos[0], pos[1], walls),
                                (end_rect.x, end_rect.y))
    else:
        problem = Problem((pos[0], pos[1], walls), (end_rect.x, end_rect.y))
    solution = None
    if (method == 0):
        solution = search.breadthFirstTreeSearch(problem)
    elif (method == 1):
        solution = search.breadthFirstGraphSearch(problem)
    elif (method == 2):
        solution = search.depthFirstTreeSearch(problem)
    elif (method == 3):
        solution = search.depthFirstGraphSearch(problem)
    elif (method == 4):
        solution = search.uniformCostSearch(problem)
    elif (method == 5):
        solution = search.iterativeDeepeningSearch(problem)
    elif (method == 6):
        solution = search.greedyTreeSearch(problem)
    elif (method == 7):
        solution = search.greedyGraphSearch(problem)
    elif (method == 8):
        solution = search.astarTreeSearch(problem)
    elif (method == 9):
        solution = search.astarGraphSearch(problem)
    if (solution == None):
        raise Exception
    print(solution.path())
    return solution.path()
Ejemplo n.º 35
0
        # Execute a random legal move
        puzzle = puzzle.result(random.sample(puzzle.legalMoves(), 1)[0])
    return puzzle


if __name__ == '__main__':
    random.seed(1)
    puzzle = createRandomEightPuzzle(25)
    print('A random puzzle:')
    print(puzzle)

    problem = EightPuzzleSearchProblem(puzzle)

    print "**************************** Uniform Cost Search ************************"
    #Uniform cost algorithm
    uresult = search.uniformCostSearch(problem)

    #store the path in upath
    upath = uresult[0]

    #store the number of nodes generated in ugenNodes
    ugenNodes = uresult[2]

    #store the cost in ucost
    ucost = uresult[1]
    print('Uniform cost Search found a path of %d moves: %s' %
          (len(upath), str(upath)))
    curr = puzzle
    i = 1
    for a in upath:
        curr = curr.result(a)
Ejemplo n.º 36
0
 def __init__(self):
     self.searchFunction = lambda prob: search.uniformCostSearch(prob)
     self.searchType = FoodSearchProblem
Ejemplo n.º 37
0
 def runTest(self):
     print "Path result for DFS:",search.depthFirstSearch(self)
     print "Path result for BFS:",search.breadthFirstSearch(self)
     print "Path result for UCS:",search.uniformCostSearch(self)
     #print "Path result for A*:",search.aStarSearch(self,search.nullHeuristic)
     print "Path result for A* with letter heuristic:",search.aStarSearch(self,letterHeuristic)
Ejemplo n.º 38
0
 def __init__(self):
     self.searchFunction = lambda prob: search.uniformCostSearch(prob)
     self.searchType = FoodSearchProblem
Ejemplo n.º 39
0
		solution.display()
	else:
		print 'No solution found!!'
	'''



	n = 5
	strategy = search.astar

	puzzle = game.NPuzzle(n)
	puzzle.randomStartState()
	puzzle.randomGoalState(47)
	puzzle.startState.display()
	puzzle.goalState.display()

	if strategy == search.bfs:
		solution = search.breadthFirstSearch(puzzle)
	elif strategy == search.dfs:
		solution  = search.depthFirstSearch(puzzle)
	elif strategy == search.ucs:
		solution  = search.uniformCostSearch(puzzle)
	elif strategy == search.dls:
		solution = search.depthLimitedSearch(puzzle,6)
	elif strategy == search.astar:
		solution = search.astar(puzzle, game.manhattanDistance)

	if solution != None:
		solution.display()
	else:
		print 'No solution found!!'
def foodHeuristic(state, problem):
    """
    Your heuristic for the FoodSearchProblem goes here.

    This heuristic must be consistent to ensure correctness.  First, try to come up
    with an admissible heuristic; almost all admissible heuristics will be consistent
    as well.

    If using A* ever finds a solution that is worse uniform cost search finds,
    your heuristic is *not* consistent, and probably not admissible!  On the other hand,
    inadmissible or inconsistent heuristics may find optimal solutions, so be careful.

    The state is a tuple ( pacmanPosition, foodGrid ) where foodGrid is a
    Grid (see game.py) of either True or False. You can call foodGrid.asList()
    to get a list of food coordinates instead.

    If you want access to info like walls, capsules, etc., you can query the problem.
    For example, problem.walls gives you a Grid of where the walls are.

    If you want to *store* information to be reused in other calls to the heuristic,
    there is a dictionary called problem.heuristicInfo that you can use. For example,
    if you only want to count the walls once and store that value, try:
      problem.heuristicInfo['wallCount'] = problem.walls.count()
    Subsequent calls to this heuristic can access problem.heuristicInfo['wallCount']
    """
    position, foodGrid = state
    "*** YOUR CODE HERE ***"

    class tempPositionSearchProblem(search.SearchProblem):
        def __init__(self,
                     walls,
                     costFn=lambda x: 1,
                     goal=(1, 1),
                     start=None,
                     warn=True):
            self.walls = walls
            if start != None: self.startState = start
            self.goal = goal
            self.costFn = costFn
            if warn and (gameState.getNumFood() != 1
                         or not gameState.hasFood(*goal)):
                print 'Warning: this does not look like a regular search maze'

            # For display purposes
            self._visited, self._visitedlist, self._expanded = {}, [], 0

        def getStartState(self):
            return self.startState

        def isGoalState(self, state):
            isGoal = state == self.goal

            # For display purposes only
            if isGoal:
                self._visitedlist.append(state)
                import __main__
                if '_display' in dir(__main__):
                    if 'drawExpandedCells' in dir(
                            __main__._display):  # @UndefinedVariable
                        __main__._display.drawExpandedCells(
                            self._visitedlist)  # @UndefinedVariable

            return isGoal

        def getSuccessors(self, state):
            successors = []
            for action in [
                    Directions.NORTH, Directions.SOUTH, Directions.EAST,
                    Directions.WEST
            ]:
                x, y = state
                dx, dy = Actions.directionToVector(action)
                nextx, nexty = int(x + dx), int(y + dy)
                if not self.walls[nextx][nexty]:
                    nextState = (nextx, nexty)
                    cost = self.costFn(nextState)
                    successors.append((nextState, action, cost))

            # Bookkeeping for display purposes
            self._expanded += 1
            if state not in self._visited:
                self._visited[state] = True
                self._visitedlist.append(state)
            return successors

        def getCostOfActions(self, actions):
            """
            Returns the cost of a particular sequence of actions.  If those actions
            include an illegal move, return 999999
            """
            if actions == None: return 999999
            x, y = self.getStartState()
            cost = 0
            for action in actions:
                # Check figure out the next state and see whether its' legal
                dx, dy = Actions.directionToVector(action)
                x, y = int(x + dx), int(y + dy)
                if self.walls[x][y]: return 999999
                cost += self.costFn((x, y))
            return cost

    from util import manhattanDistance

    food = foodGrid.asList()
    toNearestFood = 999999

    if len(food) == 0:
        return 0

    if len(food) == 1:
        return manhattanDistance(position, food[0])
    else:
        longest = 0
        pos1 = -1
        pos2 = -1
        for i in range(0, len(food)):
            for j in range(i + 1, len(food)):
                if (food[i], food[j]) in problem.heuristicInfo:
                    length = problem.heuristicInfo[(food[i], food[j])]
                else:
                    prob = tempPositionSearchProblem(problem.walls,
                                                     start=food[i],
                                                     goal=food[j],
                                                     warn=False)
                    length = len(search.uniformCostSearch(prob))
                    problem.heuristicInfo[(food[i], food[j])] = length
                    problem.heuristicInfo[(food[j], food[i])] = length
                if length > longest:
                    longest = length
                    pos1 = i
                    pos2 = j
        dist = longest + min(manhattanDistance(position, food[pos1]),
                             manhattanDistance(position, food[pos2]))
        return dist