Esempio n. 1
0
    def get_action(self, perception):
        """ This is the main method for all your agents

        Along with the __init__, you must at least implement this method in
        all your agents to make them work properly.

        This method receives a perception from the environment and returns
        an action after performing the A* search with manhattan_distance as
        heuristics.
        """
        state = self.__state_from_perception(perception)
        i,j = state[0]
        grid, _ = perception

        # if gas station and tank not full, refill
        if grid[i][j] == self.player_number+7 and state[1] < self.tank_capacity:
            return 'REFILL'

        self.start_agent(perception, self.problem_reference,
                         tank_capacity=self.tank_capacity)
        node = util.a_star(self.problem, self.heuristic)
        if not node:  # Search did not find any action
            return 'STOP'
        action = node.action
        last_action = None
        while node.parent is not None:
            node = node.parent
            last_action = action
            action = node.action
        return last_action
Esempio n. 2
0
    def get_action(self, perception):
        """ This is the main method for all your agents

        Along with the __init__, you must at least implement this method in
        all your agents to make them work properly.

        This method receives a perception from the environment and returns
        an action after performing the A* search with manhattan_distance as
        heuristics.
        """
        self.start_agent(perception,
                         self.problem_reference,
                         0,
                         tank_capacity=self.tank_capacity)
        node = util.a_star(self.problem, self.manhattan_distance)

        if not node:  # Search did not find any action
            people_numbers = [3, 6, 7]
            grid = perception[0]
            for i in range(len(grid)):
                for j in range(len(grid[0])):
                    if grid[i][j] in people_numbers:
                        self.problem.pp = self.problem.ppAlcan
                        node = util.a_star(self.problem,
                                           self.manhattan_distance)
                        if not node:
                            return 'STOP'
                        action = node.action
                        last_action = None
                        while node.parent is not None:
                            node = node.parent
                            last_action = action
                            action = node.action
                        return last_action
            return 'STOP'

        action = node.action
        la = []
        la.append(action)
        last_action = None
        while node.parent is not None:
            node = node.parent
            last_action = action
            action = node.action
            la.append(node.action)
        return last_action
Esempio n. 3
0
def part2():
    with open('input/11.txt') as f:
        original_path = f.read().strip().split(',')
        goal = furthest_from_origin(original_path)
        print(goal)

        def heuristic(p):
            x, y = p
            gx, gy = goal
            return math.hypot((gx - x), (gy - y))

        shortest_path = a_star((0, 0), goal, moves, heuristic)
        print(len(shortest_path) -
              1)  # remove one, becuase path includes the start state
Esempio n. 4
0
 def get_action(self, perception):
     """ Receives a perception, do a search and returns an action """
     self.start_agent(perception,
                      self.problem_reference,
                      tank_capacity=self.tank_capacity)
     node = util.a_star(self.problem, self.heuristic)
     if not node:  # Search did not find any action
         return 'STOP'
     action = node.action
     last_action = None
     while node.parent is not None:
         node = node.parent
         last_action = action
         action = node.action
     return last_action
Esempio n. 5
0
 def get_action(self, perception):
     """
     Copied from GetClosestPersonOrRefillAgent
     Receives a perception, do a search and returns an action
     """
     self.start_agent(perception,
                      self.problem_reference,
                      tank_capacity=self.tank_capacity)
     node = util.a_star(self.problem, self.avg_manhattan_distance)
     if not node:  # Search did not find any action
         return 'STOP'
     action = node.action
     last_action = None
     while node.parent is not None:
         node = node.parent
         last_action = action
         action = node.action
     return last_action
Esempio n. 6
0
    def get_action(self, perception):
        """ This is the main method for all your agents

        Along with the __init__, you must at least implement this method in
        all your agents to make them work properly.

        This method receives a perception from the environment and returns
        an action after performing the A* search with manhattan_distance as
        heuristics.
        """
        self.start_agent(perception, self.problem_reference,
                         tank_capacity=self.tank_capacity)
        node = util.a_star(self.problem, self.manhattan_distance)
        if not node:  # Search did not find any action
            return 'STOP'
        action = node.action
        last_action = None
        while node.parent is not None:
            node = node.parent
            last_action = action
            action = node.action
        return last_action
Esempio n. 7
0
def test():
    def heuristic(p):
        x, y = p
        return math.hypot(2 - x, 2 - y)

    print(a_star((0, 0), (2, 2), moves, heuristic))