Example #1
0
 def search(self, strategy: 'Strategy') -> '[State, ...]':
     print('Starting search with strategy {}.'.format(strategy), file=sys.stderr, flush=True)
     strategy.add_to_frontier(self.initial_state)
     
     iterations = 0
     while True:
         if iterations == 1000:
             print(strategy.search_status(), file=sys.stderr, flush=True)
             iterations = 0
         
         if memory.get_usage() > memory.max_usage:
             print('Maximum memory usage exceeded.', file=sys.stderr, flush=True)
             return None
         
         if strategy.frontier_empty():
             return None
         
         leaf = strategy.get_and_remove_leaf()
         
         if leaf.is_goal_state():
             return leaf.extract_plan()
         
         strategy.add_to_explored(leaf)
         for child_state in leaf.get_children(): # The list of expanded states is shuffled randomly; see state.py.
             if not strategy.is_explored(child_state) and not strategy.in_frontier(child_state):
                 strategy.add_to_frontier(child_state)
         
         iterations += 1
Example #2
0
 def search_status(self) -> "str":
     return "#Explored: {:6}, #Frontier: {:6}, #Generated: {:6}, Time: {:3.2f} s, Alloc: {:4.2f} MB, MaxAlloc: {:4.2f} MB".format(
         self.explored_count(),
         self.frontier_count(),
         self.explored_count() + self.frontier_count(),
         self.time_spent(),
         memory.get_usage(),
         memory.max_usage,
     )
Example #3
0
    def search2(self, strategy, goalstate, display=False, msg="", max_time = 300, allow_pull =
    True) \
            -> '[State, ...]':
        start = time.perf_counter()

        if msg == "":
            print('Starting search with strategy {}.'.format(strategy),
                  file=sys.stderr,
                  flush=True)

        else:
            search_method = strategy.__repr__()[strategy.__repr__().
                                                find("using"):]
            print('Starting search for: ' + msg + " | " + search_method,
                  file=sys.stderr,
                  flush=True)

        strategy.add_to_frontier(self.initial_state)

        iterations = 0
        while True:

            if display:
                if iterations >= 1:
                    print("\033[H\033[J")  # Stack overflow to clear screen
                    print(leaf)  # Print state
                    input()  # Wait for user input

            if iterations >= 1000:
                print(strategy.search_status(), file=sys.stderr, flush=True)
                iterations = 0

            if iterations % 100 == 0:
                if (time.perf_counter() - start) > max_time:
                    return None, None

            if memory.get_usage() > memory.max_usage:
                print('Maximum memory usage exceeded.',
                      file=sys.stderr,
                      flush=True)
                return None

            if strategy.frontier_empty():
                return None

            leaf = strategy.get_and_remove_leaf()

            if leaf.is_goal_state2(goalstate, allow_pull):
                return leaf.extract_plan(), leaf

            strategy.add_to_explored(leaf)
            for child_state in leaf.get_children():
                if not strategy.is_explored(
                        child_state) and not strategy.in_frontier(child_state):
                    strategy.add_to_frontier(child_state, goalstate)

            iterations += 1
Example #4
0
 def search_status(self) -> 'str':
     return '#Explored: {:4},' \
            '#Frontier: {:3},' \
            'Time: {:3.2f} s,' \
            'Alloc: {:4.2f} MB,' \
            'MaxAlloc: {:4.2f} MB'.format(self.explored_count(),
                                          self.frontier_count(),
                                          self.time_spent(),
                                          memory.get_usage(),
                                          memory.max_usage)
def create_dijkstras_map(state_: State):

    result_dict = defaultdict(int)

    for goal_location in state_.goal_positions.keys():
        preprocessing_current_state = State(state_)
        strategy = StrategyBFS()
        strategy.reset_strategy()
        preprocessing_current_state._dijkstras_location = goal_location
        strategy.add_to_frontier(preprocessing_current_state)

        iterations = 0
        while True:

            if iterations == 1000:
                if testing:
                    print(f"dijkstras {strategy.search_status()}",
                          file=sys.stderr,
                          flush=True)
                iterations = 0

            if memory.get_usage() > memory.max_usage:
                if testing:
                    print('Maximum memory usage exceeded.',
                          file=sys.stderr,
                          flush=True)
                raise Exception('Maximum mem used')

            if strategy.frontier_empty():
                # print('Done with dijkstras', file=sys.stderr, flush=True)
                break

            leaf = strategy.get_and_remove_leaf()

            # Add element to dict
            result_dict[(goal_location, leaf._dijkstras_location)] = leaf.g

            strategy.add_to_explored(leaf)

            for child_state in leaf.get_children_dijkstras(
            ):  # The list of expanded states is shuffled randomly; see state.py.
                if not strategy.is_explored(
                        child_state) and not strategy.in_frontier(child_state):
                    strategy.add_to_frontier(child_state)
            iterations += 1

    return result_dict
Example #6
0
    def search(self, strategy: "Strategy") -> "[State, ...]":
        print(
            "Starting search with strategy {}.".format(strategy),
            file=sys.stderr,
            flush=True,
        )
        strategy.add_to_frontier(self.initial_state)
        iterations = 0
        # prints dimension (NEW)
        print(
            "max columns are: ",
            self.initial_state.MAX_COL,
            "max rows are: ",
            self.initial_state.MAX_ROW,
            file=sys.stderr,
            flush=True,
        )
        while True:
            if iterations == 1000:
                print(strategy.search_status(), file=sys.stderr, flush=True)
                iterations = 0

            if memory.get_usage() > memory.max_usage:
                print("Maximum memory usage exceeded.", file=sys.stderr, flush=True)
                return None

            if strategy.frontier_empty():
                return None

            leaf = strategy.get_and_remove_leaf()

            if leaf.is_goal_state():
                return leaf.extract_plan()

            strategy.add_to_explored(leaf)
            for (
                child_state
            ) in (
                leaf.get_children()
            ):  # The list of expanded states is shuffled randomly; see state.py.
                if not strategy.is_explored(child_state) and not strategy.in_frontier(
                    child_state
                ):
                    strategy.add_to_frontier(child_state)

            iterations += 1
Example #7
0
    def search3(self, strategy, goalstate) -> '[State, ...]':
        print('Starting search with strategy {}.'.format(strategy),
              file=sys.stderr,
              flush=True)
        strategy.add_to_frontier(self.initial_state)

        iterations = 0
        while True:

            if iterations >= 1:
                print("\033[H\033[J")  # Stack overflow to clear screen
                print(leaf)  # Print state
                input()  # Wait for user input

            if iterations >= 1000:
                print(strategy.search_status(), file=sys.stderr, flush=True)
                iterations = 0

            if memory.get_usage() > memory.max_usage:
                print('Maximum memory usage exceeded.',
                      file=sys.stderr,
                      flush=True)
                return None

            if strategy.frontier_empty():
                return None

            leaf = strategy.get_and_remove_leaf()

            if leaf.is_goal_state2(goalstate):
                return leaf.extract_plan(), leaf

            strategy.add_to_explored(leaf)
            for child_state in leaf.get_children():
                if not strategy.is_explored(
                        child_state) and not strategy.in_frontier(child_state):
                    strategy.add_to_frontier(child_state, goalstate)

            iterations += 1
Example #8
0
 def search(initial_state: 'State', frontier: 'Frontier') -> '[[Action, ...], ...]':
     '''
     Implements the Graph-Search algorithm from R&N figure 3.7.
     '''
     
     start_time = time.perf_counter()
     iterations = 0
     
     print('Starting {}.'.format(frontier.get_name()), file=sys.stderr, flush=True)
     
     frontier.add(initial_state)
     explored = set()
     
     while True:
         if iterations == 1000:
             SearchClient.print_search_status(start_time, explored, frontier)
             iterations = 0
         
         if memory.get_usage() > memory.max_usage:
             SearchClient.print_search_status(start_time, explored, frontier)
             print('Maximum memory usage exceeded.', file=sys.stderr, flush=True)
             return None
         
         if frontier.is_empty():
             SearchClient.print_search_status(start_time, explored, frontier)
             return None
         
         leaf_state = frontier.pop()
         
         if leaf_state.is_goal_state():
             SearchClient.print_search_status(start_time, explored, frontier)
             return leaf_state.extract_plan()
         
         explored.add(leaf_state)
         for state in leaf_state.get_expanded_states():
             if state not in explored and not frontier.contains(state):
                 frontier.add(state)
         
         iterations += 1
    def search(self, strategy_: 'Strategy') -> '[State, ...]':
        print('Starting search with strategy {}.'.format(strategy_),
              file=sys.stderr,
              flush=True)
        strategy_.add_to_frontier(self.initial_state)

        iterations = 0
        while True:
            if iterations == 1000:
                print(strategy_.search_status(), file=sys.stderr, flush=True)
                iterations = 0

            if memory.get_usage() > memory.max_usage:
                print('Maximum memory usage exceeded.',
                      file=sys.stderr,
                      flush=True)
                return None

            if strategy_.frontier_empty():
                return None

            leaf = strategy_.get_and_remove_leaf()
            if self.debug:
                print(leaf, file=sys.stderr, flush=True)
            # if leaf.is_goal_state(): todo
            #     return leaf.extract_plan()
            if strategy_.is_goal_state(leaf):
                return leaf.extract_plan()

            strategy_.add_to_explored(leaf)
            for child_state in leaf.get_children():
                if not strategy_.is_explored(
                        child_state) and not strategy_.in_frontier(
                            child_state):
                    strategy_.add_to_frontier(child_state)

            iterations += 1
    def search_to_box(self, world_state: 'State', box_loc, box_id):
        if len(world_state.reverse_agent_dict()[self.agent_char]) < 1:
            raise Exception("No values for agent ")

        self.world_state = State(world_state)

        if self.strategy == StrategyBestFirst:
            strategy = self.strategy(
                heuristic.AStar(self.world_state,
                                heuristic_func.h_goalassigner_to_box,
                                agent_char=self.agent_char,
                                box_loc=box_loc,
                                box_id=box_id))
        else:
            strategy = self.strategy()
        # In case there has been a previous search we need to clear the elements in the strategy object
        strategy.reset_strategy()

        removed_dict = {
            k: v
            for k, v in self.world_state.agents.items()
            if v[0][1] == self.agent_char
        }

        self.world_state.agents = defaultdict(list, removed_dict)

        self.current_box_id = box_id

        removed_dict = {
            k: v
            for k, v in self.world_state.boxes.items() if k == box_loc
        }

        self.world_state.boxes = defaultdict(list, removed_dict)

        strategy.add_to_frontier(state=self.world_state)

        iterations = 0
        _counter = 0
        while True:

            if _counter == _cfg.max_search_depth:
                return False

            if iterations == 1000:
                if _cfg.testing:
                    print(f"search to box {strategy.search_status()}",
                          file=sys.stderr,
                          flush=True)
                iterations = 0

            if memory.get_usage() > memory.max_usage:
                print('Maximum memory usage exceeded.',
                      file=sys.stderr,
                      flush=True)
                return None

            if strategy.frontier_empty():

                return False

            leaf = strategy.get_and_remove_leaf()

            # We are now adjecent to the box
            if strategy.heuristic.h(leaf) == 1:
                self._convert_plan_to_action_list(leaf.extract_plan())
                return True

            strategy.add_to_explored(leaf)
            x = strategy.explored.pop()
            strategy.explored.add(x)
            for child_state in leaf.get_children(self.agent_char):
                if not strategy.is_explored(
                        child_state) and not strategy.in_frontier(child_state):
                    strategy.add_to_frontier(child_state)
                # elif strategy.is_explored(child_state):
                #     if child_state.g <= strategy.explored_g(child_state):
                #         strategy.add_to_frontier(child_state)

            iterations += 1
            _counter += 1
    def search_conflict_bfs_not_in_list(self,
                                        world_state: 'State',
                                        agent_collision_internal_id,
                                        agent_collision_box,
                                        box_id,
                                        coordinates: list,
                                        move_action_allowed=True):
        '''
        This search method uses bfs to find the first location the agent can move to without being in the list
        of coordinate

        :param agent_collision_box: id of box involved in collision
        :param agent_collision_internal_id: id of agent involved in collision
        :param world_state: world_state with
        :param coordinates: list of coordinates that the agent is not allowed to be in
        :return: None (update the plan of the agent to not interfer with the coordinates give
        '''
        d = {
            'world_state': world_state,
            'agent_collision_internal_id': agent_collision_internal_id,
            'agent_collision_box': agent_collision_box,
            'box_id': box_id,
            'coordinates': coordinates,
            'move_action_allowed': move_action_allowed
        }
        #

        self.world_state = State(world_state)
        if box_id is None:
            move_action_allowed = True
        else:
            # fix for missing boxes in search
            self.current_box_id = box_id

        self.goal_job_id = None

        # test case
        if (box_id is not None) and (utils.cityblock_distance(
                utils._get_agt_loc(self.world_state, self.agent_char),
                utils._get_box_loc(self.world_state, box_id)) > 1):

            locc_agt = utils._get_agt_loc(self.world_state, self.agent_char)
            locc_box = utils._get_box_loc(self.world_state, box_id)

            box_id = None
            move_action_allowed = True

        #If agt is asked to move out of someones plan, then include this someone in the planinng
        if agent_collision_internal_id is not None:
            self.world_state.redirecter_search = True

        removed_dict = {
            k: v
            for k, v in self.world_state.agents.items()
            if (v[0][1] == self.agent_char) or (
                v[0][2] == agent_collision_internal_id)
        }

        self.world_state.agents = defaultdict(list, removed_dict)

        removed_dict = {
            k: v
            for k, v in self.world_state.boxes.items()
            if (v[0][2] == self.current_box_id) or (
                v[0][2] == agent_collision_box)
        }

        self.world_state.boxes = defaultdict(list, removed_dict)

        strategy = StrategyBFS()

        # In case there has been a previous search we need to clear the elements in the strategy object
        strategy.reset_strategy()

        # Fix error with state not knowing which box is allowed to be moved
        self.world_state.sub_goal_box = box_id

        strategy.add_to_frontier(state=self.world_state)

        iterations = 0
        _counter = 0
        while True:

            if _counter == _cfg.max_search_depth:
                return False

            if iterations == 1000:
                if _cfg.testing:
                    print(f"bfs not in list {strategy.search_status()}",
                          file=sys.stderr,
                          flush=True)
                iterations = 0

            if memory.get_usage() > memory.max_usage:
                if _cfg.testing:
                    print('Maximum memory usage exceeded.',
                          file=sys.stderr,
                          flush=True)
                return None

            if strategy.frontier_empty():
                if _cfg.testing:
                    print('Empty frontier BFS NOT IN LIST',
                          file=sys.stderr,
                          flush=True)
                    print(f'{self.agent_char}', file=sys.stderr, flush=True)
                ''' 
                # TODO: Could not find a location where agent is not "in the way" - return something that triggers
                the other collision object to move out of the way
                '''

                return False

            leaf = strategy.get_and_remove_leaf()

            agt_loc = utils._get_agt_loc(leaf, self.agent_char)

            if box_id is not None:
                box_loc = utils._get_box_loc(leaf, self.current_box_id)

            if (box_id is None) and (agt_loc not in coordinates):
                self._reset_plan()
                self._convert_plan_to_action_list(leaf.extract_plan())
                return True
                #break
            else:
                if agt_loc not in coordinates and box_loc not in coordinates:
                    self._reset_plan()
                    self._convert_plan_to_action_list(leaf.extract_plan())
                    return True

            strategy.add_to_explored(leaf)
            x = strategy.explored.pop()
            strategy.explored.add(x)

            for child_state in leaf.get_children(
                    self.agent_char, move_allowed=move_action_allowed
            ):  # The list of expanded states is shuffled randomly;

                if not strategy.is_explored(
                        child_state) and not strategy.in_frontier(child_state):
                    strategy.add_to_frontier(child_state)
            iterations += 1
            _counter += 1
    def search_replanner_heuristic(self,
                                   world_state: 'State',
                                   blocked_locations: list,
                                   agent_to,
                                   box_from=None,
                                   box_to=None):
        self.world_state = State(world_state)

        #Add blocked locations as walls, so the search does not include locations
        for loc in blocked_locations:
            row, col = loc.split(",")
            self.world_state.walls[f'{row},{col}'] = True

        # TODO: Currently: quick fix to solve agent allowed moves problem - rewrite to allow for agents to move unassigned boxes
        if box_from is not None:
            self.world_state.sub_goal_box = self.world_state.boxes[box_from][
                0][2]

        # finding our initial location and removing all other elements to increase speed and simplify world
        for key, value in self.world_state.agents.items():
            if value[0][1] == self.agent_char:
                location = key

        removed_dict = {
            k: v
            for k, v in self.world_state.agents.items()
            if v[0][1] == self.agent_char
        }
        self.world_state.agents = defaultdict(list, removed_dict)

        if box_from is None:
            strategy = StrategyBestFirst(
                heuristic.AStar(self.world_state,
                                heuristic_func.h_replanner_pos,
                                agent_to=agent_to,
                                agent_char=self.agent_char))
        else:
            strategy = StrategyBestFirst(
                heuristic.AStar(self.world_state,
                                heuristic_func.h_replanner_pos,
                                agent_to=agent_to,
                                agent_char=self.agent_char,
                                box_id=self.world_state.boxes[box_from][0][2],
                                box_to=box_to))
        # In case there has been a previous search we need to clear the elements in the strategy object
        strategy.reset_strategy()

        strategy.add_to_frontier(state=self.world_state)

        iterations = 0
        _counter = 0
        while True:

            if _counter == _cfg.max_search_depth:
                for loc in blocked_locations:
                    row, col = loc.split(",")
                    self.world_state.walls.pop(f'{row},{col}')
                return None

            if iterations == 1000:
                if _cfg.testing:
                    print(f"replanner H{strategy.search_status()}",
                          file=sys.stderr,
                          flush=True)
                iterations = 0

            if memory.get_usage() > memory.max_usage:
                if _cfg.testing:
                    print('Maximum memory usage exceeded.',
                          file=sys.stderr,
                          flush=True)
                for loc in blocked_locations:
                    row, col = loc.split(",")
                    self.world_state.walls.pop(f'{row},{col}')
                return None

            if strategy.frontier_empty():
                if _cfg.testing:
                    print('Empty frontier search heuristic',
                          file=sys.stderr,
                          flush=True)
                # finished searchspace without sol
                for loc in blocked_locations:
                    row, col = loc.split(",")
                    self.world_state.walls.pop(f'{row},{col}')
                return None

            leaf = strategy.get_and_remove_leaf()

            # h=0 is the same as goal
            # TODO: Update this to work with something else
            if strategy.heuristic.h(leaf) == 0:
                _temp_plan = leaf.extract_plan()
                for loc in blocked_locations:
                    row, col = loc.split(",")
                    self.world_state.walls.pop(f'{row},{col}')
                return [x.action for x in _temp_plan]

            strategy.add_to_explored(leaf)

            #TODO: HVAD er det her med x
            #x = strategy.explored.pop()
            #strategy.explored.add(x)
            for child_state in leaf.get_children(self.agent_char):
                if not strategy.is_explored(
                        child_state) and not strategy.in_frontier(
                            child_state) and not (child_state.g >
                                                  _cfg.max_replanning_steps):
                    strategy.add_to_frontier(child_state)
            iterations += 1
            _counter += 1
    def search_position(self, world_state: 'State', agent_to):

        if len(world_state.reverse_agent_dict()[self.agent_char]) < 2:
            raise Exception("No values for agent ")

        self.world_state = State(world_state)

        # TODO: CHECK IF STRATEGY REFERS TO THE SAME OBJECT BEFORE IMPLEMENTING MULTI PROC

        if self.strategy == StrategyBestFirst:
            strategy = self.strategy(
                heuristic.AStar(self.world_state,
                                heuristic_func.h_goalassigner_pos,
                                agent_char=self.agent_char,
                                agent_to=agent_to))
        else:
            strategy = self.strategy()
        # In case there has been a previous search we need to clear the elements in the strategy object
        strategy.reset_strategy()

        # finding our initial location and removing all other elements to increase speed and simplify world
        for key, value in self.world_state.agents.items():
            if value[0][1] == self.agent_char:
                location = key

        removed_dict = {
            k: v
            for k, v in self.world_state.agents.items()
            if v[0][1] == self.agent_char
        }

        self.world_state.agents = defaultdict(list, removed_dict)

        # Removing all e
        while len(self.world_state.boxes) > 0:
            self.world_state.boxes.popitem()
        strategy.add_to_frontier(state=self.world_state)

        iterations = 0
        _counter = 0
        while True:

            if _counter == _cfg.max_search_depth:
                return False

            if iterations == 1000:
                if _cfg.testing:
                    print(f"search pos {strategy.search_status()}",
                          file=sys.stderr,
                          flush=True)
                iterations = 0

            if memory.get_usage() > memory.max_usage:
                if _cfg.testing:
                    print('Maximum memory usage exceeded.',
                          file=sys.stderr,
                          flush=True)
                return None

            if strategy.frontier_empty():
                if _cfg.testing:
                    print(f'{self.agent_char}', file=sys.stderr, flush=True)
                    print('Empty frontier search pos',
                          file=sys.stderr,
                          flush=True)

                return None

            leaf = strategy.get_and_remove_leaf()

            if leaf.is_sub_goal_state_agent(agent_to, self.agent_char):
                self._convert_plan_to_action_list(leaf.extract_plan())
                break

            strategy.add_to_explored(leaf)
            x = strategy.explored.pop()
            strategy.explored.add(x)
            for child_state in leaf.get_children(self.agent_char):
                if not strategy.is_explored(
                        child_state) and not strategy.in_frontier(child_state):
                    strategy.add_to_frontier(child_state)
            iterations += 1
            _counter += 1
    def search_with_box(self, world_state: 'State', boxes_visible: list):

        # Get current location of box trying to move
        box_from = _get_box_loc(world_state, self.current_box_id)

        # In case where

        if _cfg.testing:
            print(
                f'Search with box: box_from: {box_from}, box_id = {self.current_box_id}',
                file=sys.stderr)

        if world_state.boxes[box_from][0][0] != self.agent_color:
            raise Exception("Agent cannot move this box")

        self.world_state = State(world_state)

        if self.strategy == StrategyBestFirst:
            strategy = self.strategy(
                heuristic.AStar(self.world_state,
                                heuristic_func.h_goalassigner_with_box,
                                agent_char=self.agent_char,
                                goal_loc=self.goal_job_id,
                                box_id=self.current_box_id))
        else:
            strategy = self.strategy()
        # In case there has been a previous search we need to clear the elements in the strategy object
        strategy.reset_strategy()

        # Update state so the agent only is aware of itself
        removed_dict = {
            k: v
            for k, v in self.world_state.agents.items()
            if v[0][1] == self.agent_char
        }
        self.world_state.agents = defaultdict(list, removed_dict)

        #TODO: boxes_visible added so we can tell agent if there are goal dependencies to worry about (they have to be visible)
        removed_dict = {
            k: v
            for k, v in self.world_state.boxes.items()
            if (k == box_from) or (k in boxes_visible)
        }
        self.world_state.boxes = defaultdict(list, removed_dict)

        # make sure we only move this box
        self.world_state.sub_goal_box = self.current_box_id

        strategy.add_to_frontier(state=self.world_state)

        iterations = 0
        _counter = 0
        while True:

            if _counter == _cfg.max_search_depth:
                return False

            if iterations == 1000:
                if _cfg.testing:
                    print(f"with box{strategy.search_status()}",
                          file=sys.stderr,
                          flush=True)
                iterations = 0

            if memory.get_usage() > memory.max_usage:
                if _cfg.testing:
                    print('Maximum memory usage exceeded.',
                          file=sys.stderr,
                          flush=True)
                return None

            if strategy.frontier_empty():
                if _cfg.testing:
                    print(f'{self.agent_char}', file=sys.stderr, flush=True)
                    print(f'empty frontier search with box',
                          file=sys.stderr,
                          flush=True)
                return None

            leaf = strategy.get_and_remove_leaf()

            if leaf.is_sub_goal_state_box(self.goal_job_id,
                                          self.current_box_id):
                self._convert_plan_to_action_list(leaf.extract_plan())
                break

            strategy.add_to_explored(leaf)
            x = strategy.explored.pop()
            strategy.explored.add(x)

            for child_state in leaf.get_children(
                    self.agent_char
            ):  # The list of expanded states is shuffled randomly; see state.py.
                if not strategy.is_explored(
                        child_state) and not strategy.in_frontier(child_state):
                    strategy.add_to_frontier(child_state)
            iterations += 1
            _counter += 1
Example #15
0
 def print_search_status(start_time: 'int', explored: '{State, ...}', frontier: 'Frontier') -> None:
     status_template = '#Explored: {:8,}, #Frontier: {:8,}, #Generated: {:8,}, Time: {:3.3f} s\n[Alloc: {:4.2f} MB, MaxAlloc: {:4.2f} MB]'
     elapsed_time = time.perf_counter() - start_time
     print(status_template.format(len(explored), frontier.size(), len(explored) + frontier.size(), elapsed_time, memory.get_usage(), memory.max_usage), file=sys.stderr, flush=True)
    def search(self, initial_state, goal_test_str='standard'):
        self.initial_state = initial_state
        strategy = strategy_factory(name=self.strategy_str,
                                    initial_state=initial_state,
                                    weight_value=5,
                                    heuristic_function=self.heuristic_function)
        self.strategy = strategy

        if goal_test_str == "dl_conflict_solved":

            def goal_test(state):
                for pos, goal in state.boxes.items():
                    goal = state.goals.get(pos)
                    box = state.boxes.get(pos)
                    if box is not None and (goal is None
                                            or goal != box.lower()):
                        return False
                agent_goal = state.goals.get(
                    (state.agent_row, state.agent_col))
                #print(agent_goal)
                if agent_goal is None:  # or agent_goal not in "abcdefghijklmnopqrstuvwxyz":  or agent_goal is 'agent':
                    return False

                return True

        else:

            def goal_test(state):
                return state.is_subgoal_state()

        print('Starting search with strategy {}.'.format(strategy),
              file=sys.stderr,
              flush=True)
        strategy.add_to_frontier(initial_state)
        self.solution = None
        iterations = 0
        while True:
            if iterations == 1000:
                print(strategy.search_status(), file=sys.stderr, flush=True)
                iterations = 0

            if memory.get_usage() > memory.max_usage:
                print('Maximum memory usage exceeded.',
                      file=sys.stderr,
                      flush=True)
                self.solution = []
                return None

            if strategy.frontier_empty():
                self.solution = []
                return None

            leaf = strategy.get_and_remove_leaf()
            if goal_test(leaf):
                self.solution = deque(leaf.extract_plan())
                return

            strategy.add_to_explored(leaf)
            for child_state in leaf.get_children():
                if not strategy.is_explored(
                        child_state) and not strategy.in_frontier(child_state):
                    strategy.add_to_frontier(child_state)

            iterations += 1