def mdr(paths_file: str, adv_agent_id: int, adv_agent_ds: int, out_file_name: str = None, robust_mode: str = 'DISABLE'): logging.getLogger('vgmapf.problems.mapf.multi_agent_pathfinding').setLevel(logging.INFO) paths_file = pathlib.Path(paths_file) lp = paths_serializer.load(paths_file) adv_agent = [a for a in lp.agents if a.id == adv_agent_id][0] adv_agent.is_adversarial = True adv_agent.damage_steps = adv_agent_ds robust_mode = getattr(mdr_finder.RobustPathMode, robust_mode) if not out_file_name: out_file_name = paths_file.parent / ( paths_file.stem + f'-mdr-a_{adv_agent_id}-ds_{adv_agent_ds}' + paths_file.suffix) paths = {a.id: a.path for a in lp.agents} original_makespan = mdr_finder.get_makespan(paths, lp.agents) mdrf = mdr_finder.MaxDamageRouteFinder(lp.grid, lp.agents, astar.Searcher, lambda agnt: dict( h_func=mapf_heuristics.get_good_manhatten_like_heuristic(agnt)), robust_mode) goal_state, info = mdrf.find() paths = goal_state.paths for aid, p in paths.items(): p_cells = [x.cell for x in p] LOG.info(f'Agent [{aid:02d}], path length: {len(p)}') print(lp.grid.to_str(p_cells, p_cells[0], p_cells[-1], path_chr=str(aid)[0])) mdr_makespan = mdr_finder.get_makespan(paths, lp.agents) paths_serializer.dump(out_file_name, lp.agents, [paths[a.id] for a in lp.agents], lp.grid) LOG.info(f'Original makespan: {original_makespan} | MDR makespan: {mdr_makespan} | MDR info: {info}') return info, original_makespan, mdr_makespan
def _stage_25_kamikaze(kamikaze_on_robust_paths_dir, rc, p): LOG.info(f'STARTED kamikaze on {p}') parts = p.stem.split('-') org_path_name = parts[1] adv_agent_id = int(parts[2].split('_')[1]) adv_agent_ds = int(parts[3].split('_')[1]) robust_radius = int(parts[4].split('_')[1]) lp = paths_serializer.load(p) result_row = _stage_25_Result( rc.map_file_name, p.name, adv_agent_id, adv_agent_ds, robust_radius ) for robust_mode in [RobustPathMode.OFFLINE]: kamikaze_route_path = kamikaze_on_robust_paths_dir / f'kamikaze-{robust_mode.name}-{org_path_name}-agent_{adv_agent_id}-ds_{adv_agent_ds}-rr_{robust_radius}{p.suffix}' kamikaze_paths = None try: with benchmark_utils.time_it(f'Running kamikaze with robust_mode={robust_mode}') as ti: kp = kamikaze_planner.KamikazePlanner( lp.grid, lp.agents, astar.Searcher, lambda agnt: dict( h_func=mapf_heuristics.get_good_manhatten_like_heuristic(agnt) ), robust_mode=robust_mode, robust_radius=robust_radius ) goal_state, plan_metadata = kp.find() assert isinstance(goal_state, kamikaze_planner.KamikazeState) kamikaze_paths = goal_state.paths result_row.is_kamikaze_successful = True result_row.kamikaze_cost = goal_state.g() result_row.kamikaze_expanded_nodes = plan_metadata.expanded_states result_row.kamikaze_visited_nodes = plan_metadata.visited_states result_row.kamikaze_run_time_seconds = ti.getElapsed() collision = goal_state.get_collision() result_row.collision_target_agent_id = collision.target_agent_id result_row.collision_step = collision.step result_row.collision_cell = str(collision.cell) except kamikaze_planner.NotFoundError: result_row.is_kamikaze_successful = False except Exception as e: LOG.error(e, exc_info=True) result_row.comment = str(e) paths_serializer.dump(kamikaze_route_path, lp.agents, [kamikaze_paths[a.id] for a in lp.agents] if kamikaze_paths else None, lp.grid, metadata=vars(result_row)) LOG.info(f'FINISHED kamikaze on {p}') return result_row
def _stage_3_normal_mdr(mdr_on_normal_paths_dir, adv_agent_id, adv_agent_ds, rc, p): LOG.info(f'STARTED MDR on {p} with agent [{adv_agent_id}] and DS={adv_agent_ds}') lp = paths_serializer.load(p) adv_agent = [a for a in lp.agents if a.id == adv_agent_id][0] adv_agent.is_adversarial = True adv_agent.damage_steps = adv_agent_ds for a in lp.agents: if a is not adv_agent: a.is_adversarial = False a.damage_steps = 0 paths = {a.id: a.path for a in lp.agents} original_makespan = mdr_finder.get_makespan(paths, lp.agents) mdr_route_path = mdr_on_normal_paths_dir / f'mdr-{p.stem}-agent_{adv_agent.id}-ds_{adv_agent_ds}{p.suffix}' mdr_paths = None try: with benchmark_utils.time_it(f'Running MDR for adv agent:{adv_agent.id}, ds: {adv_agent_ds}') as ti: mdrf = mdr_finder.MaxDamageRouteFinder(lp.grid, lp.agents, astar.Searcher, lambda agnt: dict( h_func=mapf_heuristics.get_good_manhatten_like_heuristic(agnt))) goal_state, mdr_run_info = mdrf.find() mdr_paths = goal_state.paths mdr_run_time = ti.getElapsed() mdr_makespan = mdr_finder.get_makespan(mdr_paths, lp.agents) result = _stage_3_Result( rc.map_file_name, p.name, adv_agent.id, adv_agent_ds, original_makespan, mdr_makespan, mdr_run_info.expanded_states, mdr_run_info.visited_states, mdr_run_time, ) except Exception as e: LOG.error(e) result = _stage_3_Result( rc.map_file_name, p.name, adv_agent.id, adv_agent_ds, original_makespan, comment=str(e) ) paths_serializer.dump(mdr_route_path, lp.agents, [mdr_paths[a.id] for a in lp.agents] if mdr_paths else None, lp.grid, metadata=vars(result)) return result
def _stage_1_normal_paths(normal_paths_dir, grid, rc, permutation_idx): try: with benchmark_utils.time_it(f'Building path #{permutation_idx}'): LOG.info(f'STARTED permutation {permutation_idx + 1:03d}/{rc.permutations:03d}') if permutation_idx > 0: random.shuffle(rc.agents) agents = [agent.Agent(**a) for a in rc.agents] with benchmark_utils.time_it() as t: mf = multi_agent_pathfinding.MapfFinder(grid, agents) mf.find_paths(astar.Searcher, lambda agnt: dict(h_func=mapf_heuristics.get_good_manhatten_like_heuristic(agnt))) mf.validate_paths() out_path = normal_paths_dir / f'{permutation_idx:03d}.path' mf.save_paths(out_path, metadata=dict( mapf_run_time_sec=t.getElapsed(), makespan=mf.agents_repo.get_makespan(only_non_adversarial=False), agents_metadata=[ dict( id=a.id, start_cell=a.start_cell, goal_cell=a.goal_cell, path_cost=a.path_cost, path_expanded_nodes=a.expanded_nodes, motion_equation=a.motion_equation.name, start_policy=a.start_policy.name, goal_polciy=a.goal_policy.name, ) for a in agents ] )) LOG.info(f'FINISHED permutation {permutation_idx + 1:03d}/{rc.permutations:03d} => {out_path}') except Exception as e: LOG.error(e, exc_info=True)
def _stage_4_robust_mdr(mdr_on_robust_paths_dir, rc, p): # TODO: only calculate robust MDR paths for paths where MDR had a big impact on the original path # robust_path-{p.stem}-agent_{adv_agent.id}-ds_{adv_agent_ds}{p.suffix} LOG.info(f'STARTED robust MDR on {p}') parts = p.stem.split('-') org_path_name = parts[1] adv_agent_id = int(parts[2].split('_')[1]) adv_agent_ds = int(parts[3].split('_')[1]) robust_radius = int(parts[4].split('_')[1]) org_path_file_path = p.parent.parent / STAGE_01_NORMAL_PATHS / (org_path_name + p.suffix) org_lp = paths_serializer.load(org_path_file_path) org_makespan = org_lp.get_makespan() lp = paths_serializer.load(p) mdr_info = {} paths = {a.id: a.path for a in lp.agents} original_robust_makespan = mdr_finder.get_makespan(paths, lp.agents) result_row = _stage_4_Result( rc.map_file_name, p.name, adv_agent_id, adv_agent_ds, org_makespan, original_robust_makespan, robust_radius ) for robust_mode in [RobustPathMode.ONLINE_CONST]: mdr_route_path = mdr_on_robust_paths_dir / f'mdr-{robust_mode.name}-{org_path_name}-agent_{adv_agent_id}-ds_{adv_agent_ds}-rr_{robust_radius}{p.suffix}' mdr_paths = None try: with benchmark_utils.time_it(f'Running MDR with robust_mode={robust_mode}') as ti: mdrf = mdr_finder.MaxDamageRouteFinder( lp.grid, lp.agents, astar.Searcher, lambda agnt: dict( h_func=mapf_heuristics.get_good_manhatten_like_heuristic(agnt) ), robust_mode=robust_mode, robust_radius=robust_radius ) goal_state, mdr_run_info = mdrf.find() mdr_paths = goal_state.paths # ms_mdr_online_path: int = -1 # mdr_online_expanded_nodes: int = -1 # mdr_online_visited_nodes: int = -1 # mdr_online_run_time_seconds: float = 0.0 mdr_results = { f'ms_mdr_{robust_mode.name.lower()}_path': mdr_finder.get_makespan(mdr_paths, lp.agents), f'mdr_{robust_mode.name.lower()}_expanded_nodes': mdr_run_info.visited_states, f'mdr_{robust_mode.name.lower()}_visited_nodes': mdr_run_info.expanded_states, f'mdr_{robust_mode.name.lower()}_run_time_seconds': ti.getElapsed() } for k, v in mdr_results.items(): setattr(result_row, k, v) except Exception as e: LOG.error(e, exc_info=True) result_row.comment = str(e) paths_serializer.dump(mdr_route_path, lp.agents, [mdr_paths[a.id] for a in lp.agents] if mdr_paths else None, lp.grid, metadata=vars(result_row)) LOG.info(f'FINISHED robust MDR on {p}') return result_row
def _stage_2_normal_robust(robust_paths_dir, grid, max_adv_agent_ds, p): try: LOG.info(f'STARTED normal robust on {p}') lp = paths_serializer.load(p) for org_adv_agent in lp.agents: adv_agent_id = org_adv_agent.id agents = [a.clone(clear_path=False) for a in lp.agents] adv_agent = [a for a in agents if a.id == adv_agent_id][0] LOG.info(f'STARTED Robust paths with agent {adv_agent.id}') for adv_agent_ds in range(1, max_adv_agent_ds + 1): for robust_radius in range(1, 2*adv_agent_ds+1): LOG.info(f'STARTED Robust paths with agent {adv_agent.id} and DS={adv_agent_ds} ' f'and Robust Radius={robust_radius}') adv_agent.is_adversarial = True adv_agent.damage_steps = adv_agent_ds for a in agents: if a is not adv_agent: a.is_adversarial = False a.damage_steps = 0 a.path = None try: with benchmark_utils.time_it() as t: mf = multi_agent_pathfinding.MapfFinder(grid, agents, adv_agent_radiuses={adv_agent.id: robust_radius}) mf.find_paths(astar.Searcher, lambda agnt: dict( h_func=mapf_heuristics.get_good_manhatten_like_heuristic(agnt))) mf.validate_paths() out_path = robust_paths_dir / f'robust_path-{p.stem}-agent_{adv_agent.id}-ds_{adv_agent_ds}' \ f'-rr_{robust_radius}{p.suffix}' mf.save_paths(out_path, metadata=dict( mapf_run_time_sec=t.getElapsed(), makespan=mf.agents_repo.get_makespan(only_non_adversarial=False), adv_radiuses=mf.adv_agent_radiuses, agents=[ dict( id=a.id, start_cell=a.start_cell, goal_cell=a.goal_cell, path_cost=a.path_cost, path_expanded_nodes=a.expanded_nodes, motion_equation=a.motion_equation.name, start_policy=a.start_policy.name, goal_polciy=a.goal_policy.name, is_adversarial=a.is_adversarial, damage_steps=a.damage_steps, ) for a in agents ] )) except Exception as e: LOG.error( f'Failed creating robust route for {org_adv_agent}, ds={adv_agent_ds}, rr={robust_radius}:' f' {e}, moving on...') LOG.info(f'FINISHED normal robust on {p}') except Exception as e: LOG.error(e)
def mapf(run_config_file_name: str, out_file_name: str = None, random_seed=None, permutations: int = None, map_file_name: str = None): rc = config.load(run_config_file_name) if permutations is not None: rc.permutations = permutations if map_file_name: rc.map_file_name = map_file_name random.seed(random_seed) if out_file_name: base_out_path = pathlib.Path(out_file_name) else: timestamp = time_utils.get_current_time_stamp() base_out_path = pathlib.Path(__file__).parent.joinpath( 'routes', timestamp, f'paths-{timestamp}.path' ) base_out_path.parent.mkdir(parents=True, exist_ok=True) g = grid2d.Grid2D.from_file(pathlib.Path(rc.map_file_name)) _update_start_and_goal_cells(rc, g) LOG.info(f'STARTING mapf test, run_config: {rc}, base_out_path: {base_out_path}') for permutation_idx in range(rc.permutations): with benchmark_utils.time_it(f'Building path #{permutation_idx}'): LOG.info(f'STARTED permutation {permutation_idx + 1:03d}/{rc.permutations:03d}') if permutation_idx > 0: random.shuffle(rc.agents) agents = [agent.Agent(**a) for a in rc.agents] mf = multi_agent_pathfinding.MapfFinder(g, agents) mf.find_paths(astar.Searcher, lambda agnt: dict(h_func=mapf_heuristics.get_good_manhatten_like_heuristic(agnt))) for a in mf.agents: LOG.debug( f"[{permutation_idx + 1:03d}/{rc.permutations:03d}]:: Agent: {a.id}, path len: {len(a.path)} " f"path cost: {a.path_cost}, expanded nodes: {a.expanded_nodes}") print(g.to_str(a.cells_path(), a.start_cell, a.goal_cell, path_chr=str(a.id)[0])) out_path_base = base_out_path.parent / ( base_out_path.stem + f'-{permutation_idx:03d}' + base_out_path.suffix) mf.save_paths(out_path_base) LOG.info(f'FINISHED permutation {permutation_idx + 1:03d}/{rc.permutations:03d} => {out_path_base}') mf.validate_paths() robust_route = RobustPathMode(rc.robust_route) if robust_route == RobustPathMode.OFFLINE: makespan_original = mf.agents_repo.get_makespan() for agnt in agents: if not agnt.is_adversarial: agnt.path = None agnt.path_cost = 0 agnt.expanded_nodes = 0 mf_robust = multi_agent_pathfinding.MapfFinder(g, agents, adv_agent_radiuses={a.id: a.damage_steps * 2 for a in agents if a.is_adversarial}) mf_robust.find_paths(astar.Searcher, lambda agnt: dict(h_func=mapf_heuristics.get_good_manhatten_like_heuristic(agnt))) for a in mf_robust.agents: LOG.debug( f"[{permutation_idx + 1:03d}/{rc.permutations:03d}]:: Agent: {a.id}, path len: {len(a.path)} " f"path cost: {a.path_cost}, expanded nodes: {a.expanded_nodes}") print(g.to_str(a.cells_path(), a.start_cell, a.goal_cell, path_chr=str(a.id)[0])) out_path_robust = base_out_path.parent / (base_out_path.stem + f'-{permutation_idx:03d}-robust' + base_out_path.suffix) mf_robust.save_paths(out_path_robust) LOG.info(f'FINISHED permutation {permutation_idx + 1:03d}/{rc.permutations:03d} => {out_path_robust}') mf_robust.validate_paths() makespan_robust = mf_robust.agents_repo.get_makespan() LOG.info(f'The difference in makespan is {makespan_robust - makespan_original}') return base_out_path.parent
def test_cbs(run_config_file_name: str, out_file_name: str = None, random_seed=None, permutations=None): rc = config.load(run_config_file_name) if permutations is not None: rc.permutations = permutations random.seed(random_seed) if out_file_name: base_out_path = pathlib.Path(out_file_name) else: base_out_path = pathlib.Path(rc.map_file_name).parent.joinpath( f'routes-{time_utils.get_current_time_stamp()}.csv') g = grid2d.Grid2D.from_file(pathlib.Path(rc.map_file_name)) if not rc.start: rc.start = g.get_random_free_cell() if not rc.end: rc.end = g.get_random_free_cell({rc.start}) agent_count = len(rc.agents) agents_have_start = False agents_have_end = False for a in rc.agents: if a.get('start_cell'): agents_have_start = True if a.get('goal_cell'): agents_have_end = True if not agents_have_start: start_cells = [rc.start] + g.find_free_cells_around(rc.start, agent_count - 1) else: start_cells = [a['start_cell'] for a in rc.agents] if not agents_have_end: end_cells = [rc.end] + g.find_free_cells_around(rc.end, agent_count - 1, set(start_cells)) else: end_cells = [a['goal_cell'] for a in rc.agents] for a, sc, gc in zip(rc.agents, start_cells, end_cells): a['start_cell'] = sc a['goal_cell'] = gc LOG.info(f'STARTING mapf test, run_config: {rc}, base_out_path: {base_out_path}') for permutation_idx in range(rc.permutations): LOG.info(f'STARTED permutation {permutation_idx:03d}/{rc.permutations:03d}') # random.shuffle(rc.agents) agents = [agent.Agent(**a) for a in rc.agents] cbs_finder = cbs.CbsMafpFinder(g) agents_repo, total_cost = cbs_finder.find_path(agent_repository.AgentRepository(agents), astar.Searcher, lambda agnt: dict( h_func=mapf_heuristics.get_good_manhatten_like_heuristic( agnt))) for a in agents_repo.agents: LOG.debug( f"[{permutation_idx:03d}/{rc.permutations:3d}]:: Agent: {a.id}, path len: {len(a.path)} path cost: " f"{a.path_cost}, expanded nodes: {a.expanded_nodes}") print(g.to_str(a.cells_path(), a.start_cell, a.goal_cell, path_chr=str(a.id)[0])) out_path = base_out_path.parent / (base_out_path.stem + f'-{permutation_idx:03d}' + base_out_path.suffix) cbs_finder.save_paths(agents_repo, out_path) LOG.info(f'FINISHED permutation {permutation_idx:03d}/{rc.permutations:03d} => {out_path}') cbs_finder.validate_paths(g, agents_repo)