def extract_order(evaluations, preimage): stream_results = set() processed = set(map(fact_from_fd, preimage)) queue = deque(processed) while queue: fact = queue.popleft() result = evaluations[evaluation_from_fact(fact)] if result is None: continue stream_results.add(result) for fact2 in result.instance.get_domain(): if fact2 not in processed: queue.append(fact2) orders = set() for result in stream_results: for fact in result.instance.get_domain(): result2 = evaluations[evaluation_from_fact(fact)] if result2 is not None: orders.add((result2, result)) ordered_results = [] for result in topological_sort(stream_results, orders): if isinstance(result, SynthStreamResult): ordered_results.extend(result.decompose()) else: ordered_results.append(result) return ordered_results
def dynamic_programming(store, vertices, valid_head_fn, stats_fn, prune=True, greedy=False): # 2^N rather than N! # TODO: can just do on the infos themselves dominates = lambda v1, v2: all( s1 <= s2 for s1, s2 in zip(stats_fn(v1), stats_fn(v2))) effort_orders = set() if prune: for i, v1 in enumerate(vertices): for v2 in vertices[i + 1:]: if dominates(v1, v2): effort_orders.add((v1, v2)) # Includes equality elif dominates(v2, v1): effort_orders.add((v2, v1)) _, out_priority_orders = neighbors_from_orders(effort_orders) priority_ordering = topological_sort(vertices, effort_orders)[::-1] # TODO: could the greedy strategy lead to premature choices # TODO: this starts to blow up subset = frozenset() queue = deque([subset]) # Acyclic because subsets subproblems = {subset: Subproblem(0, None, None)} while queue: if store.is_terminated(): return vertices subset = queue.popleft() # TODO: greedy version of this applied = set() for v in priority_ordering: if greedy and applied: break if (v not in subset) and valid_head_fn( v, subset) and not (out_priority_orders[v] & applied): applied.add(v) new_subset = frozenset([v]) | subset p_success, overhead = stats_fn(v) new_cost = overhead + p_success * subproblems[subset].cost subproblem = Subproblem( new_cost, v, subset) # Adds new element to the front if new_subset not in subproblems: queue.append(new_subset) subproblems[new_subset] = subproblem elif new_cost < subproblems[new_subset].cost: subproblems[new_subset] = subproblem ordering = [] subset = frozenset(vertices) while True: if subset not in subproblems: print(vertices) # TODO: some sort of bug where the problem isn't solved? subproblem = subproblems[subset] if subproblem.head is None: break ordering.append(subproblem.head) subset = subproblem.subset return ordering
def layer_reorder_stream_plan(stream_plan, **kwargs): if not stream_plan: return stream_plan stream_orders = get_partial_orders(stream_plan) reversed_orders = {(s2, s1) for s1, s2 in stream_orders} distances = compute_distances(stream_plan) priority_fn = lambda s: Score(not s.external.has_outputs, distances[s], -s. stats_heuristic()) reverse_order = topological_sort(stream_plan, reversed_orders, priority_fn=priority_fn) return reverse_order[::-1]
def greedy_reorder_stream_plan(stream_plan, **kwargs): if not stream_plan: return stream_plan return topological_sort(stream_plan, get_partial_orders(stream_plan), priority_fn=lambda s: s.get_statistics().overhead)
def dynamic_programming(store, vertices, valid_head_fn, stats_fn=Performance.get_statistics, prune=True, greedy=False, **kwargs): # TODO: include context here as a weak constraint # TODO: works in the absence of partial orders # TODO: can also more manually reorder # 2^N rather than N! start_time = time.time() effort_orders = set() # 1 cheaper than 2 if prune: effort_orders.update( compute_pruning_orders(vertices, stats_fn=stats_fn, **kwargs)) _, out_priority_orders = neighbors_from_orders( effort_orders) # more expensive priority_ordering = topological_sort( vertices, effort_orders)[::-1] # most expensive to cheapest # TODO: can break ties with index on action plan to prioritize doing the temporally first things # TODO: could the greedy strategy lead to premature choices # TODO: this starts to blow up - group together similar streams (e.g. collision streams) to decrease size # TODO: key grouping concern are partial orders and ensuring feasibility (isomorphism) # TODO: flood-fill cheapest as soon as something that has no future dependencies has been found # TODO: do the forward version to take advantage of sink vertices subset = frozenset() queue = deque([subset]) # Acyclic because subsets subproblems = {subset: Subproblem(cost=0, head=None, subset=None)} while queue: # searches backward from last to first if store.is_terminated(): return vertices subset = queue.popleft( ) # TODO: greedy/weighted A* version of this (heuristic is next cheapest stream) applied = set() # TODO: roll-out more than one step to cut the horizon # TODO: compute a heuristic that's the best case affordances from subsequent streams for v in priority_ordering: # most expensive first if greedy and applied: break if (v not in subset) and valid_head_fn( v, subset) and not (out_priority_orders[v] & applied): applied.add(v) new_subset = frozenset([v]) | subset p_success, overhead = stats_fn(v) new_cost = overhead + p_success * subproblems[subset].cost subproblem = Subproblem( cost=new_cost, head=v, subset=subset) # Adds new element to the front if new_subset not in subproblems: queue.append(new_subset) subproblems[new_subset] = subproblem elif new_cost < subproblems[new_subset].cost: subproblems[new_subset] = subproblem ordering = [] subset = frozenset(vertices) while True: if subset not in subproblems: print(vertices) # TODO: some sort of bug where the problem isn't solved? subproblem = subproblems[subset] if subproblem.head is None: break ordering.append(subproblem.head) subset = subproblem.subset #print('Streams: {} | Expected cost: {:.3f} | Time: {:.3f}'.format( # len(ordering), compute_expected_cost(ordering, stats_fn=stats_fn), elapsed_time(start_time))) return ordering