Пример #1
0
    def goHomeAction(self, gamestate):
        print "Going home"
        #find shortest path to home
        #generate exclusion zones around the enemies, find shortest path that doesn't go through an exclusion zone

        #options for exclusion zones:

            #if distance from pos to self>distance from pos to enemy
            #distance from pos to self==distance from pos to enemy

        ez = self.genExclusionZones(gamestate)

        goHomeProb = PacmanPosSearch(self.getMyPos(gamestate), self.data.borderPositions, gamestate, ez)


        def heuristic(state, problem):
            if state in self.data.borderDistances:
                return self.data.borderDistances[state]
            else:
                return 0


        path, _ = search.astar(goHomeProb, heuristic)
        if not path:
            #relax the exclusion zones to only be where the enemy is
            ghp2 = PacmanPosSearch(self.getMyPos(gamestate), self.data.borderPositions, gamestate, list(self.knownEnemies.values()))
            path, _ = search.astar(ghp2, heuristic)
            if not path:
                #still no path home, probably screwed, just stop and pray
                path=[game.Directions.STOP] #just wait, because can't go anywhere
            #hollup
            pass
        return path[0] #return the first action in the path
Пример #2
0
def mhelper(start, goal, problem):
    problem2 = PositionSearchProblem(problem.startingGameState,
                                     start=start,
                                     goal=goal,
                                     warn=False,
                                     visualize=False)

    # return manhattanHeuristic(start, problem2) this only gives 3/4 from the autograder 9000+ nodes stupid way!!!
    print(len(search.astar(problem2, heuristic=euclideanHeuristic)))
    return len(search.astar(
        problem2, heuristic=euclideanHeuristic))  #5/4 4000+ nodes only!!!
Пример #3
0
    def findPathToClosestDot(self, gameState):
        """
        Returns a path (a list of actions) to the closest dot, starting from
        gameState.
        """
        # Here are some useful elements of the startState
        startPosition = gameState.getPacmanPosition()
        food = gameState.getFood()
        walls = gameState.getWalls()
        problem = AnyFoodSearchProblem(gameState)

        "*** YOUR CODE HERE ***"
        search.astar(problem)
Пример #4
0
    def __init__(
        self,
        agent,
        ref_object=None,
        relative_direction="CLOCKWISE",  # this is the memory of the object
    ):
        self.agent = agent
        self.tick = 0
        blocks = [(bpos, bid) for bpos, bid in ref_object.blocks.items()]
        bounds = shapes.get_bounds(blocks)
        center = np.mean([b[0] for b in blocks], axis=0)

        d = max(bounds[1] - bounds[0], bounds[3] - bounds[2], bounds[5] - bounds[4])
        if relative_direction == "CLOCKWISE":
            offsets = shapes.arrange(
                "circle", schematic=None, shapeparams={"encircled_object_radius": d}
            )
        elif relative_direction == "ANTICLOCKWISE":
            offsets = shapes.arrange(
                "circle", schematic=None, shapeparams={"encircled_object_radius": d}
            )
            offsets = offsets[::-1]
        else:
            raise NotImplementedError("TODO other kinds of paths")
        self.path = [np.round(center + o) for o in offsets]
        self.path.append(self.path[0])

        # check each offset to find a nearby reachable point, see if a path
        # is possible now, and error otherwise

        for i in range(len(self.path) - 1):
            path = search.astar(agent, self.path[i + 1], approx=2, pos=self.path[i])
            if path is None:
                raise ErrorWithResponse("I cannot find an appropriate path.")
    def findPathToClosestDot(self, gameState):
        """
        Returns a path (a list of actions) to the closest dot, starting from
        gameState.
        """
        # Here are some useful elements of the startState
        startPosition = gameState.getPacmanPosition()
        food = gameState.getFood()
        # print(food.asList())
        walls = gameState.getWalls()
        problem = AnyFoodSearchProblem(gameState)

        # def greedyHeuristic(state, problem=None):
        #     """
        #     A heuristic function estimates the cost from the current state to the nearest
        #     goal in the provided SearchProblem.  This heuristic is trivial.
        #     """
        #     foodDistance = [(fP, mazeDistance(state, foodPosition, gameState)) for fP in food.asList()]
        #     return min(foodDistance)

        "*** YOUR CODE HERE ***"
        foodDistance = [(foodPosition,
                         mazeDistance(startPosition, foodPosition, gameState))
                        for foodPosition in food.asList()]
        foodDistance = sorted(foodDistance, key=lambda x: x[1], reverse=False)
        closestFood = foodDistance[0][0]
        prob = PositionSearchProblem(gameState,
                                     start=startPosition,
                                     goal=closestFood,
                                     warn=False,
                                     visualize=False)
        return search.astar(prob)
Пример #6
0
def mazeDistance(point1, point2, gameState):
    """
    Returns the maze distance between any two points, using the search functions
    you have already built. The gameState can be any game state -- Pacman's
    position in that state is ignored.

    Example usage: mazeDistance( (2,4), (5,6), gameState)

    This might be a useful helper function for your ApproximateSearchAgent.
    """
    x1, y1 = point1
    x2, y2 = point2
    x1 = int(x1)
    x2 = int(x2)
    y1 = int(y1)
    y2 = int(y2)

    walls = gameState.getWalls()
    assert not walls[x1][y1], 'point1 is a wall: ' + str(point1)
    assert not walls[x2][y2], 'point2 is a wall: ' + str(point2)
    prob = PositionSearchProblem(gameState,
                                 start=(x1, y1),
                                 goal=(x2, y2),
                                 warn=False,
                                 visualize=False)
    return len(search.astar(prob, manhattanHeuristic))
Пример #7
0
    def findPathToClosestDot(self, gameState):
        """
        Returns a path (a list of actions) to the closest dot, starting from
        gameState.
        """
        # Here are some useful elements of the startState
        startPosition = gameState.getPacmanPosition()
        food = gameState.getFood()
        walls = gameState.getWalls()
        problem = AnyFoodSearchProblem(gameState)

        "*** YOUR CODE HERE ***"
        """
        from search import SearchNode
        from search import expand
        rootNode = SearchNode(startPosition, None, None, 0, 0)
        visited = {}

        n = rootNode
        while True:
            successors = expand(problem, n)
            if not successors:
                return n.backtrack()
            m = None
            value = -1
            for succ in successors:
                if succ.cost > value:
                    m = succ
            if n.cost <= m.cost:
                return n.backtrack()
            n = m
            """
        return search.astar(problem)
Пример #8
0
def shortest_path(start, goal, problem):
    # print "searching shortest path"
    new_problem = PositionSearchProblem(problem.startingGameState,
                                        start=start,
                                        goal=goal,
                                        warn=False)
    return len(search.astar(new_problem, heuristic=manhattanHeuristic))
Пример #9
0
    def findPathToClosestDot(self, gameState):
        """
        Returns a path (a list of actions) to the closest dot, starting from
        gameState.
        """
        # Here are some useful elements of the startState
        startPosition = gameState.getPacmanPosition()
        food = gameState.getFood()
        food_list = food.asList()
        walls = gameState.getWalls()
        problem = AnyFoodSearchProblem(gameState)
        max_d = 99999999999999999999999
        n = None
        for i in range(len(food_list)):
            d = mazeDistance(startPosition, food_list[i], gameState)
            if d < max_d:
                max_d = d
                n = i
        print("starting position : ", startPosition)
        print(food_list)
        problem.goal = food_list[n]
        print(food_list[n])

        "*** YOUR CODE HERE ***"
        actions = search.astar(problem)
        print(actions)
        food[food_list[n][0]][food_list[n][1]] = False
        return actions
Пример #10
0
	def getPath(self, gameState, source, target):
		
		# basic caching of paths
		if (source, target) in self.pathCache:
			#print "Found path from %s to %s in pathCache" % (source, target)
			return self.pathCache[(source, target)]
		elif (target, source) in self.pathCache:
			#print "Found path from %s to %s in pathCache" % (source, target)
			return reversed(self.pathCache[(target, source)])

		print "getPath(%s, %s) called, computing using A*" % (source, target)
		# compute path using A* search with known optimal maze distance as heuristic
		problem = MSBPathfindingSearchProblem(source, target, self.legalPositions)
		def heuristic(state, prob):
			return self.getDistancer(gameState).getDistance(state, target)
		path = search.astar(problem, heuristic)
		assert len(path) == self.getDistancer(gameState).getDistance(source, target), "A* found non-optimal path from %s to %s" % (source, target)
		
		# update cache
		self.pathCache[(source, target)] = path
		for i in range(0,len(path)-1):
			self.pathCache[(path[i], target)] = path[i+1:]

		print "getPath(%s, %s) returning; len(pathCache)=%d" % (source, target, len(self.pathCache))
		return path
Пример #11
0
    def __init__(self):
        # Break circular dependency.
        import searchAgents_student

        self.searchFunction = lambda prob: search.astar(
            prob, searchAgents_student.foodHeuristic)
        self.searchType = FoodSearchProblem
Пример #12
0
    def solve(self):
        """
        This method should return a sequence of actions that covers all target locations on the board.
        This time we trade optimality for speed.
        Therefore, your agent should try and cover one target location at a time. Each time, aiming for the closest uncovered location.
        You may define helpful functions as you wish.

        Probably a good way to start, would be something like this --

        current_state = self.board.__copy__()
        backtrace = []

        while ....

            actions = set of actions that covers the closets uncovered target location
            add actions to backtrace

        return backtrace
        """
        target = self.closestPoint([self.startingPoint])
        path = []
        while target is not None:
            coverProblem = BlokusCoverProblem(self.boardW, self.boardH, self.pieceList, self.startingPoint, [target], self.board)
            actions = astar(coverProblem, blokus_cover_heuristic)
            for action in actions:
                path.append(action)
                self.board.add_move(0, action)
            self.pieceList = self.board.piece_list
            legals = list()
            for x in range(self.boardH):
                for y in range(self.boardW):
                    if self.board.connected[0, y, x] and self.board.state[y, x] == -1:
                        legals.append([x, y])
            target = self.closestPoint(legals)
        return path
Пример #13
0
    def findPathToClosestDot(self, gameState: pacman.GameState):
        """
        Returns a path (a list of actions) to the closest dot, starting from
        gameState.
        """
        # Here are some useful elements of the startState
        startPosition = gameState.getPacmanPosition()
        food = gameState.getFood()
        walls = gameState.getWalls()
        problem = AnyFoodSearchProblem(gameState)

        # we don't know where the closest dot is, so let's estimate the closest dot
        import time
        start_time = time.time()

        # print("food (%s): %s" %(type(food), food))

        # getting the closest dot to set it as the goal
        problem.goal = __getClosestGoal__(
            startPosition,
            food.asList())  # so that the heuristic knows the goal

        import search
        astar = search.astar(problem, heuristic=euclideanHeuristic)
        print("findPathToClosestDot() took %2.5f seconds" %
              (time.time() - start_time))
        return astar
Пример #14
0
    def findPathToClosestDot(self, gameState):
        startPosition = gameState.getPacmanPosition()
        food = gameState.getFood()
        walls = gameState.getWalls()
        problem = AnyFoodSearchProblem(gameState)

        return search.astar(problem)
Пример #15
0
 def testAstar(self):
     random.seed(12345)
     randomMoves = 32
     testPuzzle2 = puzzle8.randomState(randomMoves)
     puzzle8.display(testPuzzle2)
     solnPath = search.astar(testPuzzle2, search.manhattanDistance)
     self.assertLessEqual(len(solnPath),randomMoves)
Пример #16
0
 def solve(self):
     if self.algorithm == BFS:
         return search.bfs(self, progress=self.progress)
     else:
         return search.astar(self,
                             heuristic=sokobanHeuristic,
                             progress=self.progress)
Пример #17
0
    def findPathToClosestDot(self, gameState):
        """
        Returns a path (a list of actions) to the closest dot, starting from
        gameState.
        """
        # Here are some useful elements of the startState
        startPosition = gameState.getPacmanPosition()
        food = gameState.getFood()
        foodGrid = food.asList()
        walls = gameState.getWalls()
        problem = AnyFoodSearchProblem(gameState)
        # ret=[]
        # maxi=99999999999
        # t=len(foodGrid)
        # for  i in (0,t-1):
        #     point=foodGrid[i]
        #     temp=mazeDistance(point,startPosition,gameState)
        #     if(len(temp)<maxi):
        #         maxi=len(temp)
        #         ret=temp

        "*** YOUR CODE HERE ***"
        astarList = search.astar(problem)
        bfsList = search.bfs(problem)

        if (len(astarList) < len(bfsList)):
            return astarList
        else:
            return bfsList
        util.raiseNotDefined()
Пример #18
0
    def findPathToClosestDot(self, gameState):
        """
        Returns a path (a list of actions) to the closest dot, starting from
        gameState.
        """
        # Here are some useful elements of the startState
        startPosition = gameState.getPacmanPosition()
        food = gameState.getFood()
        walls = gameState.getWalls()
        problem = AnyFoodSearchProblem(gameState)

        "*** YOUR CODE HERE ***"
        foods = food.asList()
        # print(foods)
        point1 = startPosition
        dists = [(mazeDistance1(point1, g, gameState), g) for g in foods]
        dists = sorted(dists, key=lambda x: x[0])
        _, point2 = dists[0]
        prob = PositionSearchProblem(gameState,
                                     start=point1,
                                     goal=point2,
                                     warn=False,
                                     visualize=False)

        # actions = search.ucs(prob)
        actions = search.astar(prob, heuristic=manhattanHeuristic)
        return actions
Пример #19
0
    def getAction(self, state):
        """
        Returns the next action the agent will take
        """

        # problem = MyFoodSearchProblem(state, self.index)

        if self.svePojedeno:
            return Directions.STOP
        else:

            if not self.actions:
                # actions = search.aStarSearch(problem)
                # print(actions)
                actions = search.astar(MyAgentFoodSearchProblem(state, self.index))  # implementirani a* iz search.py koji dobija taj nas food problem kao param
                self.actions = actions

            if self.actions:
                nextAction = self.actions[0]
                self.actions.remove(nextAction)
                return nextAction

            else:
                self.svePojedeno = True
                return Directions.STOP
Пример #20
0
    def findPathToClosestDot(self, gameState):
        """
        Returns a path (a list of actions) to the closest dot, starting from
        gameState.
        """
        from search import astar

        def mhDist(p1, p2):
            return abs(p1[0] - p2[0]) + abs(p1[1] - p2[1])

        # Here are some useful elements of the startState
        startPosition = gameState.getPacmanPosition()
        food = gameState.getFood()
        walls = gameState.getWalls()

        dist = 999999999999999999
        p = None

        for food in food.asList():
            tmp_problem = AnyFoodSearchProblem(gameState, food)
            tmp_dist = manhattanHeuristic(startPosition, tmp_problem)
            if tmp_dist < dist:
                dist = tmp_dist
                p = tmp_problem

        return astar(p, manhattanHeuristic)
Пример #21
0
    def step(self, agent):
        super().step(agent)
        if self.finished:
            return
        self.interrupted = False
        # wait certain amount of ticks until issuing next step
        # while not (agent.memory.get_time() - self.last_stepped_time) > self.throttling_tick:
        #    pass

        # replace blocks if possible
        R = self.replace.copy()
        self.replace.clear()
        for (pos, idm) in R:
            agent.set_held_item(idm)
            if agent.place_block(*pos):
                logging.info("Move: replaced {}".format((pos, idm)))
            else:
                # try again later
                self.replace.add((pos, idm))
        if len(self.replace) > 0:
            logging.info("Replace remaining: {}".format(self.replace))

        # check if finished
        if manhat_dist(tuple(agent.pos), self.target) <= self.approx:
            if len(self.replace) > 0:
                logging.error(
                    "Move finished with non-empty replace set: {}".format(
                        self.replace))
            self.finished = True
            if self.memid is not None:
                locmemid = agent.memory.add_location(self.target)
                locmem = agent.memory.get_location_by_id(locmemid)
                agent.memory.update_recent_entities(mems=[locmem])
                agent.memory.add_triple(subj=self.memid,
                                        pred_text="task_effect_",
                                        obj=locmemid)
                chat_mem_triples = agent.memory.get_triples(
                    subj=None, pred_text="chat_effect_", obj=self.memid)
                if len(chat_mem_triples) > 0:
                    chat_memid = chat_mem_triples[0][0]
                    agent.memory.add_triple(subj=chat_memid,
                                            pred_text="chat_effect_",
                                            obj=locmemid)
            return

        # get path
        if self.path is None or tuple(agent.pos) != self.path[-1]:
            self.path = search.astar(agent, self.target, self.approx)
            if self.path is None:
                self.handle_no_path(agent)
                return

        # take a step on the path
        assert tuple(agent.pos) == self.path.pop()
        step = tuple(self.path[-1] - agent.pos)
        step_fn = getattr(agent, self.STEP_FNS[step])
        step_fn()

        self.last_stepped_time = agent.memory.get_time()
Пример #22
0
 def findPathToClosestDot(self, gameState):
     "Returns a path (a list of actions) to the closest dot, starting from gameState"
     # Here are some useful elements of the startState
     pos = gameState.getPacmanPosition()
     food = gameState.getFood()
     walls = gameState.getWalls()
     problem = ContestProblem(gameState)
     return search.astar(problem)
Пример #23
0
 def getroute(self, point1, point2, gameState):
     x1, y1 = point1
     x2, y2 = point2
     walls = gameState.getWalls()
     assert not walls[x1][y1], 'point1 is a wall: ' + str(point1)
     assert not walls[x2][y2], 'point2 is a wall: ' + str(point2)
     prob = PositionSearchProblem(gameState,
                                  start=point1,
                                  goal=point2,
                                  warn=False,
                                  visualize=False)
     search.astar(prob, cornersHeuristic)
     c = prob._expanded
     self._expanded += c
     search.bfs(prob)
     self.gap += (prob._expanded - c)
     return search.astar(prob, cornersHeuristic)
Пример #24
0
 def findPathToClosestDot(self, gameState):
     "Returns a path (a list of actions) to the closest dot, starting from gameState"
     # Here are some useful elements of the startState
     startPosition = gameState.getPacmanPosition()
     food = gameState.getFood()
     walls = gameState.getWalls()
     problem = AnyFoodSearchProblem(gameState)
     return search.astar(problem)
Пример #25
0
    def findPathToClosestDot(self, gameState):
        """
        Returns a path (a list of actions) to the closest dot, starting from
        gameState.
        """
        problem = AnyFoodSearchProblem(gameState)

        return search.astar(problem)
Пример #26
0
def mazePath(point1, point2, gameState):
    x1, y1 = point1
    x2, y2 = point2
    walls = gameState.getWalls()
    assert not walls[x1][y1], 'point1 is a wall: ' + str(point1)
    assert not walls[x2][y2], 'point2 is a wall: ' + str(point2)
    prob = PositionSearchProblem(
        gameState, start=point1, goal=point2, warn=False, visualize=False)
    return search.astar(prob, manhattanHeuristic)
Пример #27
0
 def fetch(self, block_name):
     """Mine and return a block to the player."""
     imag_bot = _ImaginaryBot(self._pos, self._inventory)
     block_id = getattr(block, block_name).id
     block_loc = self._get_block_loc(block_id)
     mine_prob = _MineProblem(imag_bot, block_loc, block_id)
     mine_actions = astar(mine_prob, _mine_heuristic)
     self.take_actions(mine_actions, _DELAY)
     imag_bot = _ImaginaryBot(self._pos, self._inventory)
     player_loc = _player_loc()
     return_prob = _ReturnProblem(imag_bot, block_id, player_loc)
     return_actions = astar(return_prob, _return_heuristic)
     imag_bot.take_actions(return_actions)
     return_actions.append({
         'func': '_place',
         'args': (imag_bot.get_pos() + player_loc) / 2,
         'kwargs': {'block': block_id}
     })
     self.take_actions(return_actions, _DELAY)
Пример #28
0
    def findPathToClosestDot(self, gameState):
        """
        Returns a path (a list of actions) to the closest dot, starting from
        gameState.
        """
        # Here are some useful elements of the startState
        problem = AnyFoodSearchProblem(gameState)

        "*** YOUR CODE HERE ***"
        return search.astar(problem)
Пример #29
0
def muhDistance(point1, point2, gameState):
    """
    cheap crappy coppy of ur distance formula, hopfully useful.
    """
    prob = PositionSearchProblem(gameState,
                                 start=point1,
                                 goal=point2,
                                 warn=False,
                                 visualize=False)
    return len(search.astar(prob, manhattanHeuristic))
Пример #30
0
 def findPathToClosestDot(self, gameState):
     """
     Returns a path (a list of actions) to the closest dot, starting from
     gameState.
     """
     # Here are some useful elements of the startState
     global randomTarget
     # startPosition = gameState.getPacmanPosition(self.index)
     # food = gameState.getFood().asList()
     # walls = gameState.getWalls()
     problem = MultiFSP(gameState, self.index)
     random = RandomFSP(gameState, self.index)
     # print(randomTarget)
     "*** YOUR CODE HERE ***"
     path = search.astar(problem)
     if randomTarget[self.index]:
         # print("random")
         path = search.astar(random)
         # randomTarget[self.index] = False
     return path
Пример #31
0
 def getAction(self, state):
     """
     From game.py:
     The Agent will receive a GameState and must return an action from
     Directions.{North, South, East, West, Stop}
     """
     startPosition = state.getPacmanPosition()
     food = state.getFood()
     walls = state.getWalls()
     problem = AnyFoodSearchProblem((startPosition, food))
     return search.astar(problem, 0)
Пример #32
0
    def get_target(self, agent):
        p = agent.pos
        for i, c in enumerate(sorted(self.xyz_remaining, key=lambda c: util.manhat_dist(p, c))):
            path = search.astar(agent, c, approx=2)
            if path is not None:
                if i > 0:
                    logging.debug("Destroy get_target wasted {} astars".format(i))
                return c

        # No path to any of the blocks
        return None
Пример #33
0
 def eatCapsuleAction(self, gamestate):
     print "Eating capsule"
     ez = self.genExclusionZones(gamestate)
     prob = PacmanPosSearch(self.getMyPos(gamestate), self.getCapsules(gamestate), gamestate, ez)
     #TODO: add a heuristic using dist to nearest capsule
     path, _ = search.astar(prob) #use a-star, null heuristic
     if not path:
         #hollup
         HLA.goHome(self, gamestate)
         pass
     return path[0]
Пример #34
0
 def findPathToClosestDot(self, gameState):
     """
     Returns a path (a list of actions) to the closest dot, starting from
     gameState.
     """
     # Here are some useful elements of the startState
     startPosition = gameState.getPacmanPosition()
     food = gameState.getFood()
     walls = gameState.getWalls()
     problem = AnyFoodSearchProblem(gameState)
     #We can use BFS, UCS and A* in this case. They all will return the paths to the nearest dot.
     return search.astar(problem,search.nullHeuristic)
Пример #35
0
def shortestPath(problem, newGoal, pacmanPos):
    """
    This is a helper function for my foodHeuristic.  The idea behind this helper function is that
    it finds the lowest cost path to a dot from the pacman position with considering the walls of course.
    That means if the pacman follows the path it will result in the lowest cost to get to that dot.  
    """
    updateProblem = PositionSearchProblem(problem.startingGameState,
                                          goal=newGoal,
                                          start=pacmanPos,
                                          warn=False)
    path_length = len(search.astar(updateProblem, manhattanHeuristic))
    return path_length
Пример #36
0
 def getAction(self, state):
     """
     From game.py:
     The Agent will receive a GameState and must return an action from
     Directions.{North, South, East, West, Stop}
     """
     "*** YOUR CODE HERE ***"
     pos = state.getPacmanPosition()
     problem = AnyFoodSearchProblem(state)
     approxHueristicFn = lambda nextPos,prob: mazeDistance(nextPos, pos, state)
     return search.astar(problem,approxHueristicFn)[0]
     util.raiseNotDefined()
Пример #37
0
 def fetch(self, block_name):
     """Mine and return a block to the player."""
     imag_bot = _ImaginaryBot(self._pos, self._inventory)
     block_id = getattr(block, block_name).id
     block_loc = self._get_block_loc(block_id)
     mine_prob = _MineProblem(imag_bot, block_loc, block_id)
     mine_actions = astar(mine_prob, _mine_heuristic)
     self.take_actions(mine_actions, _DELAY)
     imag_bot = _ImaginaryBot(self._pos, self._inventory)
     player_loc = _player_loc()
     return_prob = _ReturnProblem(imag_bot, block_id, player_loc)
     return_actions = astar(return_prob, _return_heuristic)
     imag_bot.take_actions(return_actions)
     return_actions.append({
         'func': '_place',
         'args': (imag_bot.get_pos() + player_loc) / 2,
         'kwargs': {
             'block': block_id
         }
     })
     self.take_actions(return_actions, _DELAY)
Пример #38
0
    def find_path_to_closest_dot(self, game_state):
        """
        Returns a path (a list of actions) to the closest dot, starting from
        game_state.
        """
        # Here are some useful elements of the start_state
        start_position = game_state.get_pacman_position()
        food = game_state.get_food()
        walls = game_state.get_walls()
        problem = AnyFoodSearchProblem(game_state)

        return search.astar(problem)
Пример #39
0
def mazeDistance(point1, point2, gameState):
    """
    Returns the maze distance between any two points, using the search functions
    you have already built.  The gameState can be any game state -- Pacman's position
    in that state is ignored.
    """
    x1, y1 = point1
    x2, y2 = point2
    walls = gameState.getWalls()
    assert not walls[x1][y1], 'point1 is a wall: ' + point1
    assert not walls[x2][y2], 'point2 is a wall: ' + str(point2)
    prob = PositionSearchProblem(gameState, start=point1, goal=point2, warn=False, visualize=False)
    return len(search.astar(prob, manhattanHeuristic))
Пример #40
0
 def findPathToClosestDot(self, gameState):
     """
     Returns a path (a list of actions) to the closest dot, starting from
     gameState.
     """
     # Here are some useful elements of the startState
     startPosition = gameState.getPacmanPosition()
     food = gameState.getFood()
     walls = gameState.getWalls()
     problem = AnyFoodSearchProblem(gameState)
     
     "*** YOUR CODE HERE ***"
     return search.astar(problem)
     util.raiseNotDefined()
Пример #41
0
def foodDistance(pointFrom, pointGo, gameState):
  """
  Returns the maze distance between any two points, using the search functions
  you have already built.  The gameState can be any game state -- Pacman's position
  in that state is ignored.
  
  Example usage: mazeDistance( (2,4), (5,6), gameState)
  
  This might be a useful helper function for your ApproximateSearchAgent.
  """
  x1, y1 = pointFrom
  x2, y2 = pointGo
  walls = gameState.getWalls()
  prob = PositionSearchProblem(gameState, start=pointFrom, goal=pointGo, warn=False)
  return len(search.astar(prob))
Пример #42
0
def mazeDistance(point1, point2, gameState):
    """
    Returns the maze distance between any two points, using the search functions
    you have already built.  The gameState can be any game state -- Pacman's position
    in that state is ignored.

    Example usage: mazeDistance( (2,4), (5,6), gameState)

    This might be a useful helper function for your ApproximateSearchAgent.
    """
    x1, y1 = point1
    x2, y2 = point2
    walls = gameState.getWalls()
    assert not walls[x1][y1], 'point1 is a wall: ' + point1
    assert not walls[x2][y2], 'point2 is a wall: ' + str(point2)
    prob = PositionSearchProblem(gameState, start=point1, goal=point2, warn=False)
    return len(search.astar(prob, manhattanHeuristic))
Пример #43
0
    def getAction(self, state):
        """
        From game.py:
        The Agent will receive a GameState and must return an action from
        Directions.{North, South, East, West, Stop}
        """
        "*** YOUR CODE HERE ***"
        pacpos = state.getPacmanPosition()
        print pacpos
        if pacpos[0] == 1 and pacpos[1] == 3 or (pacpos[0] == 14 and pacpos[1] == 9 and state.getFood()[14][10]) :
            return Directions.NORTH
#        if pacpos[0] == 15 and pacpos[1] == 1:
#            return Directions.EAST
        legalActions = state.getLegalActions()
        problem = AnyFoodSearchProblem(state)
        path = search.astar(problem)
        return path[0]
Пример #44
0
 def findPathToClosestDot(self, gameState):
     "Returns a path (a list of actions) to the closest dot, starting from gameState"
     # Here are some useful elements of the startState
     startPosition = gameState.getPacmanPosition()
     food = gameState.getFood()
     walls = gameState.getWalls()
     problem = AnyFoodSearchProblem(gameState)
     "*** YOUR CODE HERE ***"
     minDist=9999
     for eachFood in food.asList():
         dist=abs(startPosition[0] - eachFood[0]) + abs(startPosition[1] - eachFood[1])
         if dist<minDist:
              minDist=dist
              closestFood=eachFogetscoreod
     prob=problem
     prob.goal=closestFood
     directions = search.astar(prob,manhattanHeuristic)
     return directions
Пример #45
0
    def eatFoodAction(self, gamestate):
        print "Eating some dope ass food"
        ez = self.genExclusionZones(gamestate)
        teammate_targets=set()
        for teammate in self.getTeam(gamestate):
            if teammate!=self.index:
                teammate_targets.add(self.data.get_food_target(teammate))

        goals = [pos for pos in self.getFood(gamestate).asList() if pos not in teammate_targets]

        prob = PacmanPosSearch(self.getMyPos(gamestate), goals, gamestate, ez)

        path, target = search.astar(prob)
        self.data.set_food_target(self.index, target)
        if path == None:
            #no good food to eat, just go home
            return HLA.goHome(self, gamestate)
        else:
            return path[0]
Пример #46
0
def solve_astar(start_state, targets_list):
    start_time = time.time()

    assert type(start_state)        is Gamestate
    assert type(targets_list)       is list
    assert type(targets_list[0])    is list
    assert type(targets_list[0][0]) is int

    all_actions = []
    cur_state = start_state

    for targets in targets_list:
        actions, cur_state = search.astar(cur_state, search.heuristic_3, targets, q=True)
        all_actions += actions
        cur_state.print_board()

    cur_state.print_board()
    assert cur_state.is_goal_state()

    print len(all_actions), "moves"
    print "solved in %s seconds" % (time.time() - start_time)

    return all_actions
Пример #47
0
 def __init__(self):
     self.searchFunction = lambda prob: search.astar(prob, cornersAndCapsulesHeuristic)
     self.searchType = CornersAndCapsulesProblem
Пример #48
0
		solution.display()
	else:
		print 'No solution found!!'
	'''



	n = 5
	strategy = search.astar

	puzzle = game.NPuzzle(n)
	puzzle.randomStartState()
	puzzle.randomGoalState(47)
	puzzle.startState.display()
	puzzle.goalState.display()

	if strategy == search.bfs:
		solution = search.breadthFirstSearch(puzzle)
	elif strategy == search.dfs:
		solution  = search.depthFirstSearch(puzzle)
	elif strategy == search.ucs:
		solution  = search.uniformCostSearch(puzzle)
	elif strategy == search.dls:
		solution = search.depthLimitedSearch(puzzle,6)
	elif strategy == search.astar:
		solution = search.astar(puzzle, game.manhattanDistance)

	if solution != None:
		solution.display()
	else:
		print 'No solution found!!'
Пример #49
0
def shortest_path(start, goal, problem):
    # print "searching shortest path"
    new_problem = PositionSearchProblem(problem.startingGameState,
                                        start = start, goal = goal, warn = False)
    return len(search.astar(new_problem, heuristic = manhattanHeuristic))
Пример #50
0
 def foodSearchHeuristic(position, food):
     p = PositionSearchProblem(problem.startingGameState, start=position, goal=food, warn=False, visualize=False)
     return len(search.astar(p))