def initialize_problem(self, problem_instance): self._solver_settings.initialize_heuristic(problem_instance) self._frontier = StatesQueue() self._closed_list = MStarStatesQueue() self._n_of_generated_nodes = 1 self._n_of_expanded_nodes = 0 single_agents_states = [] for agent in problem_instance.get_agents(): s = SingleAgentState(problem_instance.get_map(), agent.get_goal(), agent.get_start(), self._solver_settings) single_agents_states.append(s) starter_state = MStarState(single_agents_states, self._solver_settings) self._frontier.add(starter_state)
def initialize_problem(self, problem_instance): self._solver_settings.initialize_heuristic(problem_instance) self._frontier = [] self._visited_list = StatesQueue() self._n_of_generated_nodes = 1 self._n_of_expanded_nodes = 0 single_agents_states = [] for agent in problem_instance.get_agents(): s = SingleAgentState(problem_instance.get_map(), agent.get_goal(), agent.get_start(), self._solver_settings) single_agents_states.append(s) '''first create single agent state, then get multi agent state Multi agent state is special in A* algorithm''' starter_state = MultiAgentState(single_agents_states, self._solver_settings) self._frontier.append(starter_state)
def initialize_table(self): """ Initialize the table. For each agent it creates two queue structure (one open list and one closed list) that are stored in the respective dictionary at the goal position key. Then a reverse search from the goal to the start is done using Manhattan heuristic. In this way the closed list has been initialized and it stores all the positions from the goal to the start position. (Remember that the search has goal and start inverted) """ for agent in self._problem_instance.get_agents(): goal_pos = agent.get_goal() self._open_lists[goal_pos] = StatesQueue() self._closed_lists[goal_pos] = StatesQueue() from MAPFSolver.Utilities.SingleAgentState import SingleAgentState from MAPFSolver.Utilities.SolverSettings import SolverSettings solver_settings = SolverSettings(heuristic="Manhattan") solver_settings.initialize_heuristic(self._problem_instance) starter_state = SingleAgentState(self._problem_instance.get_map(), agent.get_start(), goal_pos, solver_settings) self._open_lists[goal_pos].add(starter_state) self.resume_rra_star(agent.get_start(), goal_pos)