def astar(world, start, goal, window): closed_set = set() open_set = [] came_from = {} g = {start: 0} heapq.heappush(open_set, (0, 0, start)) h = RRAstar(world, start, goal) while open_set: f, time_step, cur = heapq.heappop(open_set) print('evaluating', f, time_step, cur) pprint(open_set) if time_step > window: return reverse_path((time_step, cur), came_from) closed_set.add(cur) for successor in (world.neighbours(cur) + [cur]): if successor in closed_set and cur != successor: continue if time_step == window: score = g[cur] + h.dist(successor) elif cur == goal and successor == goal: score = g[cur] else: score = g[cur] + 1 # Ignore a path if it is a longer variant if successor in g and score >= g[successor] and successor != cur: continue came_from[time_step + 1, successor] = (time_step, cur) g[successor] = score if time_step == window: heapq.heappush(open_set, (score, time_step + 1, successor)) else: heapq.heappush( open_set, (score + h.dist(successor), time_step + 1, successor)) raise util.NoPathsFoundException()
class Agent: def __init__(self, world, start, goal): self.world = world self.start = start self.goal = goal self.h = RRAstar(world, start, goal) self.path = [] def plan(self, global_plan, start_time, max_time): self.path = self._astar(global_plan, start_time, max_time) def _astar(self, global_plan, start_time, max_time): closed_set = set() open_set = [] came_from = {} g = {self.start: 0} heapq.heappush(open_set, (0, 0, self.start)) while open_set: time = timeit.default_timer() if start_time != None and (time - start_time) > max_time: raise TimeExceeded() _, time_step, cur = heapq.heappop(open_set) if cur == self.goal: return self._reverse_path((time_step, cur), came_from) closed_set.add(cur) for successor in self._successors(cur, time_step, global_plan, start_time, max_time): # Skip successor in closed list if successor in closed_set and successor != cur: continue score = g[cur] + 1 # Ignore path if it is a longer variant if successor in g and score >= g[successor] \ and successor != cur: continue came_from[time_step + 1, successor] = (time_step, cur) g[successor] = score heapq.heappush(open_set, (score + self.h.dist(successor), time_step + 1, successor)) raise util.NoPathsFoundException() def _successors(self, pos, time, global_plan, start_time, max_time): successors = [pos] + self.world.neighbours(pos) filtered = [] for successor in successors: for other_path in global_plan: cur_time = timeit.default_timer() if start_time != None and (cur_time - start_time) > max_time: raise TimeExceeded() if len(other_path[time:]) >= 2: if util.moves_conflict(other_path[time:time + 2], (pos, successor)): break elif util.moves_conflict((other_path[-1], other_path[-1]), (pos, successor)): break else: filtered.append(successor) return filtered def _reverse_path(self, state, came_from): path = [state[1]] while state in came_from: state = came_from[state] path.append(state[1]) path.reverse() return path def __repr__(self): return f'{self.start}-{self.goal}'
class Agent: def __init__(self, world, start, goal, weights=Weights(1, 3), caching=True): self.world = world self.start = start self.goal = goal self.h = RRAstar(world, start, goal) self.path = [] self.path_cache = {} self.conflicts = SortedListWithKey(key=lambda conflict: conflict.time) self.resolved_conflicts = [] self.current_conflict = None self.higher_prio = frozenset() self.weights = weights self.caching = caching def plan(self, start_time, max_time): #print(f'Planning for agent {self}') self.old_path = self.path self.construct_higher_prio() # Check if there is a path in the cache for this prio set up if self.caching and self.higher_prio in self.path_cache: # Check if the cached path conflicts with those of higher prio paths = [agent.path for agent in self.higher_prio] conflicts = util.paths_conflict(paths) if not conflicts: #print('Using cached path') self.path = self.path_cache[self.higher_prio] return self.path = self._astar(start_time, max_time) # Update the cache self.path_cache[self.higher_prio] = self.path def construct_higher_prio(self): prio = [] # Construct priorities from the conflict solutions for conflict in self.resolved_conflicts: for i in range(conflict.solution['level'] + 1): if conflict.solution[i] == self: continue prio.append(conflict.solution[i]) # Update with the current proposed solution if self.current_conflict != None: for i in range(self.current_conflict.proposal['level'] + 1): if self.current_conflict.proposal[i] == self: continue prio.append(self.current_conflict.proposal[i]) self.higher_prio = frozenset(prio) #print(f' Agent {self} final prio: {self.higher_prio}') def propose(self, conflict): # If there are no proposals yet, propose to go first if len(conflict.proposals) == 0: proposal = {'score': None, 'level': 0, 0: self} return proposal def resolved_conflict(self, conflict): self.resolved_conflicts.append(conflict) def evaluate(self, conflicts): # Current conflict solved if self.current_conflict in conflicts.values(): #print('Conflict not solved') raise ConflictNotSolved() score = 0 # Change in path length score += (len(self.old_path) - len(self.path)) * self.weights.path_len # Change in conflicts filtered = list(filter(lambda c: self in c.agents, conflicts.values())) #print(f'{self} {len(self.conflicts)} {len(filtered)}') score += (len(self.conflicts) - len(filtered)) * \ self.weights.conflict_count #print(f'Agent score {self}: {score}') return score def _astar(self, start_time, max_time): closed_set = set() open_set = [] came_from = {} g = {self.start: 0} heapq.heappush(open_set, (0, 0, self.start)) while open_set: time = timeit.default_timer() if start_time != None and (time - start_time) > max_time: raise TimeExceeded() _, time_step, cur = heapq.heappop(open_set) if cur == self.goal: return self._reverse_path((time_step, cur), came_from) closed_set.add(cur) for successor in self._successors(cur, time_step, start_time, max_time): # Skip successor in closed list if successor in closed_set and successor != cur: continue score = g[cur] + 1 # Ignore a path if it is a longer variant if successor in g and score >= g[successor] \ and successor != cur: continue came_from[time_step + 1, successor] = (time_step, cur) g[successor] = score heapq.heappush(open_set, (score + self.h.dist(successor), time_step + 1, successor)) raise util.NoPathsFoundException() def _successors(self, pos, time, start_time, max_time): successors = [pos] + self.world.neighbours(pos) filtered = [] for successor in successors: for other_agent in self.higher_prio: cur_time = timeit.default_timer() if start_time != None and (cur_time - start_time) > max_time: raise TimeExceeded() path = other_agent.path if len(path[time:]) >= 2: if util.moves_conflict(path[time:time + 2], (pos, successor)): break else: if util.moves_conflict((path[-1], path[-1]), (pos, successor)): break else: filtered.append(successor) return filtered def _reverse_path(self, state, came_from): path = [state[1]] while state in came_from: state = came_from[state] path.append(state[1]) path.reverse() return path def __repr__(self): return f'{self.start}-{self.goal}'
class Agent: def __init__(self, world, start, goal): self.world = world self.start = start self.goal = goal self.h = RRAstar(world, start, goal) self.path = [] self.conflicts = SortedListWithKey(key=lambda c: c.time) self.priorities = [] self.stable_prio = [] self._actual_prio = set() self.old_conflicts = set() def plan(self, start_time=None, max_time=None): self.path = self._astar(start_time, max_time) def _astar(self, start_time, max_time): self._actual_prio = set(self.stable_prio + self.priorities) closed_set = set() open_set = [] came_from = {} g = {self.start: 0} heapq.heappush(open_set, (0, 0, self.start)) while open_set: time = timeit.default_timer() if start_time != None and (time - start_time) > max_time: raise TimeExceeded() _, time_step, cur = heapq.heappop(open_set) if cur == self.goal: return self._reverse_path((time_step, cur), came_from) closed_set.add(cur) for successor in self._successors(cur, time_step, start_time, max_time): # Skip successor in closed list if successor in closed_set and successor != cur: continue score = g[cur] + 1 # Ignore a path if it is longer if successor in g and score >= g[successor] \ and successor != cur: continue came_from[time_step + 1, successor] = (time_step, cur) g[successor] = score heapq.heappush( open_set, (score + self.h.dist(successor), time_step + 1, successor)) raise util.NoPathsFoundException() def _successors(self, pos, time, start_time=None, max_time=None): successors = [pos] + self.world.neighbours(pos) filtered = [] for successor in successors: for other_agent in self._actual_prio: cur_time = timeit.default_timer() if start_time != None and (cur_time - start_time) > max_time: raise TimeExceeded() path = other_agent.path if len(path[time:]) >= 2: paths = [path[time:time + 2], (pos, successor)] else: paths = [[path[-1]], (pos, successor)] if util.paths_conflict(paths): break else: filtered.append(successor) return filtered def _reverse_path(self, state, came_from): path = [state[1]] while state in came_from: state = came_from[state] path.append(state[1]) path.reverse() return path def __repr__(self): return f'{self.start}-{self.goal}'