def _astar(self, global_plan, start_time, max_time): closed_set = set() open_set = [] came_from = {} g = {self.start: 0} heapq.heappush(open_set, (0, 0, self.start)) while open_set: time = timeit.default_timer() if start_time != None and (time - start_time) > max_time: raise TimeExceeded() _, time_step, cur = heapq.heappop(open_set) if cur == self.goal: return self._reverse_path((time_step, cur), came_from) closed_set.add(cur) for successor in self._successors(cur, time_step, global_plan, start_time, max_time): # Skip successor in closed list if successor in closed_set and successor != cur: continue score = g[cur] + 1 # Ignore path if it is a longer variant if successor in g and score >= g[successor] \ and successor != cur: continue came_from[time_step + 1, successor] = (time_step, cur) g[successor] = score heapq.heappush(open_set, (score + self.h.dist(successor), time_step + 1, successor)) raise util.NoPathsFoundException()
def poc(agents, start_time=None, max_time=1): paths = [] for agent in agents: agent.plan(start_time=start_time, max_time=max_time) paths.append(agent.path) count = 0 conflicts = util.paths_conflict(paths) init_conflicts = len(find_conflicts(agents, conflicts)) while conflicts: time = timeit.default_timer() if start_time != None and (time - start_time) > max_time: raise TimeExceeded() #print('Exporting conflicts') count += 1 #print(f'Conflicts found: {len(conflicts)}') #pprint(conflicts) # Add the conflicts to the agents conflict_sets = find_conflicts(agents, conflicts) #pprint(conflict_sets) for conflict in conflict_sets: c = Conflict(conflict[0], conflict[1], conflict_sets[conflict]) for agent in c.agents: agent.conflicts.add(c) # Get the agents to resolve a conflict for agent in agents: try: conflict = agent.conflicts[0] except IndexError: continue # Clear agents from each others priorities before resolving for agent1 in conflict.agents: agent1.stable_prio = list(set(agent1.stable_prio)) for agent2 in conflict.agents: try: agent1.stable_prio.remove(agent2) except ValueError: pass # Ignore if agent is not in the list conflict.resolve(start_time=start_time, max_time=max_time) for agent in conflict.agents: agent.conflicts.remove(conflict) agent.old_conflicts.add(conflict) # Calculate the final paths paths = [agent.path for agent in agents] conflicts = util.paths_conflict(paths) #print(f'Final conflicts found: {len(conflicts)}') # Output final conflict free paths # Find number of conflicts solved conflicts = set() for agent in agents: conflicts.update(agent.old_conflicts) return { 'paths': paths, 'initial': init_conflicts, 'solved': len(conflicts), }
def successor_states(world, current, goals, start_time, max_time): for succ in rec_successor_states(world, current, 0): time = timeit.default_timer() if start_time != None and (time - start_time) > max_time: raise TimeExceeded() if util.paths_conflict(tuple(zip(current, succ))): print('Conflicting paths found', current, succ) continue score = sum(1 for i in range(len(current)) if goals[i] != succ[i]) yield score, tuple(succ)
def standard_algorithm(agents, world, starts, goals, start_time=None, max_time=None): starts = tuple(starts) goals = tuple(goals) closed_set = set() open_set = [] came_from = {} g = {starts: 0} count = 0 heapq.heappush(open_set, (0, count, starts)) # Set up heuristics heur = {} for i in range(agents): heur[goals[i]] = RRAstar(world, starts[i], goals[i]) # Display predicted cost pred_cost = heur_dist(heur, goals, starts) print(f'predicted cost: {pred_cost}') while open_set: time = timeit.default_timer() if start_time != None and (time - start_time) > max_time: raise TimeExceeded() f, _, current = heapq.heappop(open_set) #print(f'f: {f:4}, current: {current}') if current == goals: return reconstruct_path(came_from, current) closed_set.add(current) for cost, neighbour in successor_states(world, current, goals, start_time, max_time): if neighbour in closed_set: continue score = g[current] + cost # We found a longer path, ignore it if neighbour in g and score >= g[neighbour]: continue came_from[neighbour] = current g[neighbour] = score count += 1 heapq.heappush(open_set, (score + heur_dist(heur, goals, current), count, neighbour)) return None
def dimpp(agents, start_time, max_time): n = len(agents) for a in range(n): agents[a].plan([], start_time, max_time) global_plan = [agents[a].path] try: for i in range(1, n): cur_time = timeit.default_timer() if start_time != None and (cur_time - start_time) > max_time: raise TimeExceeded() j = (a + i) % n agents[j].plan([], start_time, max_time) # Go to next agent if there are no conflicts conflicts = util.paths_conflict(global_plan + [agents[j].path]) if len(conflicts) == 0: global_plan.append(agents[j].path) continue # Try to insert a wait in the plan at the first conflict time = min(conflicts, key=lambda c: c['time'])['time'] if time < len(agents[j].path): agents[j].path.insert(time, agents[j].path[time]) # Check if solved conflicts = util.paths_conflict(global_plan + [agents[j].path]) if len(conflicts) == 0: global_plan.append(agents[j].path) continue # If still not solved replan with constraints agents[j].plan(global_plan, start_time, max_time) conflicts = util.paths_conflict(global_plan + [agents[j].path]) # If there are still conflicts than finding a solution # didn't work if len(conflicts) > 0: raise util.NoPathsFoundException() global_plan.append(agents[j].path) except util.NoPathsFoundException: continue # If this is reached then we should have a good solution conflicts = util.paths_conflict(global_plan) assert len(conflicts) == 0 global_plan = global_plan[n - a:] + global_plan[:n - a] assert len(global_plan) == n assert global_plan[0][0] == agents[0].start return {'paths': global_plan, 'initial': 0, 'solved': 0} # If we go through all agents and no plan has been found then we've failed raise util.NoPathsFoundException
def _successors(self, pos, time, global_plan, start_time, max_time): successors = [pos] + self.world.neighbours(pos) filtered = [] for successor in successors: for other_path in global_plan: cur_time = timeit.default_timer() if start_time != None and (cur_time - start_time) > max_time: raise TimeExceeded() if len(other_path[time:]) >= 2: if util.moves_conflict(other_path[time:time + 2], (pos, successor)): break elif util.moves_conflict((other_path[-1], other_path[-1]), (pos, successor)): break else: filtered.append(successor) return filtered
def _successors(self, pos, time, start_time=None, max_time=None): successors = [pos] + self.world.neighbours(pos) filtered = [] for successor in successors: for other_agent in self._actual_prio: cur_time = timeit.default_timer() if start_time != None and (cur_time - start_time) > max_time: raise TimeExceeded() path = other_agent.path if len(path[time:]) >= 2: paths = [path[time:time + 2], (pos, successor)] else: paths = [[path[-1]], (pos, successor)] if util.paths_conflict(paths): break else: filtered.append(successor) return filtered
def resolve(self, start_time=None, max_time=1): # If this is not the first conflict for an agent then don't bother for agent in self.agents: if agent.conflicts[0] != self: return # Check if this conflict has occurred before out = None for conflict in agent.old_conflicts: if conflict == self: out = conflict.solution break # Generate all possible orderings orderings = itertools.permutations(self.agents) best_ordering = None best_makespan = 9999 best_length = 9999 for ordering in orderings: time = timeit.default_timer() if start_time != None and (time - start_time) > max_time: raise TimeExceeded() # Skip ordering if it occurred before if ordering == out: continue # Replan for agents according to this permutation for agent_idx in range(len(ordering)): ordering[agent_idx].priorities = list(ordering[:agent_idx]) ordering[agent_idx].plan(start_time=start_time, max_time=max_time) # Find the makespan of the new solution length = sum(len(agent.path) for agent in self.agents) if length < best_length: best_length = length best_ordering = ordering # Finally, let the agents replan with the added priorities #print('best', best_ordering) for agent in range(len(best_ordering)): best_ordering[agent].stable_prio += best_ordering[:agent] best_ordering[agent].plan(start_time=start_time, max_time=max_time) self.solution = best_ordering
def version1(agents, start_time, max_time, visualize=False): paths = [] for agent in agents: agent.plan(start_time=start_time, max_time=max_time) paths.append(agent.path) if visualize: vis = visualisation.Visualisation(agents[0].world, len(agents), scale=20) count = 0 conflicts = util.paths_conflict(paths) init_conflicts = len(convert_conflicts(agents, conflicts)) while conflicts: time = timeit.default_timer() if start_time != None and (time - start_time) > max_time: raise TimeExceeded() if visualize: print('Exporting conflicts') im = vis.draw_paths_with_conflicts(paths, conflicts) im.save(f'conflict_{count:05}.png') count += 1 #print(f'Conflicts found: {len(conflicts)}') #pprint(conflicts) conflict_objs = convert_conflicts(agents, conflicts) #pprint(conflict_objs) # Add conflicts to agents for agent in agents: agent.conflicts.clear() for conflict in conflict_objs.values(): for agent in conflict.agents: agent.conflicts.add(conflict) # Get the agents to resove the conflicts for agent in agents: try: conflict = agent.conflicts[0] except IndexError: continue # Agent has no conflicts conflict.resolve(agents, start_time, max_time) # Update the list of conflicts paths = [agent.path for agent in agents] conflicts = util.paths_conflict(paths) #print() # Just a new line to break up iterations # Final visualisation if visualize: print('Exporting final conflicts') im = vis.draw_paths_with_conflicts(paths, conflicts) im.save(f'conflict_{count:05}.png') # Find number of conflicts solved conflicts = set() for agent in agents: conflicts.update(agent.resolved_conflicts) # Find the size of conflicts sizes = (len(c.agents) for c in conflicts) return {'paths': paths, 'initial': init_conflicts, 'solved': len(conflicts), 'sizes': sizes, }
def resolve(self, agents, start_time, max_time): # Don't try to solve a conflict after having already done so if self.solution: return # If this is not the first conflict for an agent then don't bother for agent in self.agents: if agent.conflicts[0] != self: return agent.current_conflict = self #print(f'Resolving conflict {self}') best_score = -float('inf') partially_solved = False while best_score == -float('inf') or partially_solved: time = timeit.default_timer() if start_time != None and (time - start_time) > max_time: raise TimeExceeded() partially_solved = True proposals = tuple(filter(lambda p: p != None, (agent.propose(self) for agent in self.agents))) self.proposals += proposals # If no proposals have been made then go with the best one so far if len(proposals) == 0: self.solution = max(self.proposals, key=lambda p: p['score']) if self.solution['score'] == -float('inf'): raise ConflictNotSolved() break if len(proposals) == 1: self.solution = proposals[0] break # Evaluate the proposals for proposal in proposals: #print('Evaluation proposal', proposal) self.proposal = proposal proposal['score'] = 0 # Plan new paths for i in range(proposal['level']): proposal[0].plan(start_time, max_time) # Replanning for all agents shouldnt be too bad because # of caching for agent in self.agents: agent.plan(start_time, max_time) # Evaluate new paths try: for agent in self.agents: paths = tuple(a.path for a in agents) conflicts = util.paths_conflict(paths) conflicts = convert_conflicts(agents, conflicts) proposal['score'] += agent.evaluate(conflicts) except ConflictNotSolved: proposal['score'] = -float('inf') continue except ConflictPartiallySolved as e: proposal['score'] += e.args[0] else: partially_solved = False #print(f"Proposal score {proposal['score']}") # Pick the proposal with the highest sum of votes #pprint(self.proposals) self.solution = max(self.proposals, key=lambda p: p['score']) best_score = max(p['score'] for p in self.proposals) #print(f'Best score: {best_score}') self.leaf_proposals = sorted((p for p in self.proposals if not p['children']), key=lambda p: p['score']) self.proposal = None #print('SOLUTION', self.solution) # Tell the agents that we are done for agent in self.agents: agent.current_conflict = None agent.resolved_conflict(self) agent.plan(start_time, max_time)