def replan_with_optimizers(evaluations, external_plan, domain, optimizers): # TODO: return multiple plans? # TODO: can instead have multiple goal binding combinations # TODO: can replan using samplers as well if not is_plan(external_plan): return None optimizers = list( filter(lambda s: isinstance(s, ComponentStream), optimizers)) if not optimizers: return None stream_plan, function_plan = partition_external_plan(external_plan) free_parameters = {o for r in stream_plan for o in r.output_objects} #free_parameters = {o for r in stream_plan for o in r.output_objects if isinstance(o, OptimisticObject)} initial_evaluations = { e: n for e, n in evaluations.items() if n.result == INIT_EVALUATION } #initial_evaluations = evaluations goal_facts = set() for result in stream_plan: goal_facts.update( filter( lambda f: evaluation_from_fact(f) not in initial_evaluations, result.get_certified())) visited_facts = set() new_results = [] for fact in goal_facts: retrace_instantiation(fact, optimizers, initial_evaluations, free_parameters, visited_facts, new_results) # TODO: ensure correct ordering new_results = list( filter(lambda r: isinstance(r, ComponentStream), new_results)) #from pddlstream.algorithms.scheduling.recover_streams import get_achieving_streams, extract_stream_plan #node_from_atom = get_achieving_streams(evaluations, stream_results) # TODO: make these lower effort #extract_stream_plan(node_from_atom, target_facts, stream_plan) optimizer_results = [] for optimizer in {get_optimizer(r) for r in new_results }: # None is like a unique optimizer: relevant_results = [ r for r in new_results if get_optimizer(r) == optimizer ] optimizer_results.extend( combine_optimizer_plan(relevant_results, function_plan)) #print(str_from_object(set(map(fact_from_evaluation, evaluations)))) #print(str_from_object(set(goal_facts))) # TODO: can do the flexibly sized optimizers search from pddlstream.algorithms.scheduling.postprocess import reschedule_stream_plan optimizer_plan = reschedule_stream_plan(initial_evaluations, goal_facts, copy.copy(domain), (stream_plan + optimizer_results), unique_binding=True) if not is_plan(optimizer_plan): return None return optimizer_plan + function_plan
def iterative_plan_streams(all_evaluations, externals, optimistic_solve_fn, complexity_limit, **effort_args): # Previously didn't have unique optimistic objects that could be constructed at arbitrary depths complexity_evals = { e: n for e, n in all_evaluations.items() if n.complexity <= complexity_limit } num_iterations = 0 while True: num_iterations += 1 results, exhausted = optimistic_process_streams( complexity_evals, externals, complexity_limit, **effort_args) stream_plan, action_plan, cost, final_depth = hierarchical_plan_streams( complexity_evals, externals, results, optimistic_solve_fn, complexity_limit, depth=0, constraints=None, **effort_args) print('Attempt: {} | Results: {} | Depth: {} | Success: {}'.format( num_iterations, len(results), final_depth, is_plan(action_plan))) if is_plan(action_plan): return stream_plan, action_plan, cost if final_depth == 0: status = INFEASIBLE if exhausted else FAILED return status, status, cost
def replan_with_optimizers(evaluations, external_plan, domain, externals): # TODO: return multiple plans? # TODO: can instead have multiple goal binding combinations # TODO: can replan using samplers as well if not is_plan(external_plan): return external_plan optimizer_streams = list( filter(lambda s: type(s) in [VariableStream, ConstraintStream], externals)) if not optimizer_streams: return external_plan stream_plan, function_plan = partition_external_plan(external_plan) goal_facts = set() for result in stream_plan: goal_facts.update( filter(lambda f: evaluation_from_fact(f) not in evaluations, result.get_certified())) visited_facts = set() new_results = [] for fact in goal_facts: retrace_instantiation(fact, optimizer_streams, evaluations, visited_facts, new_results) variable_results = filter(lambda r: isinstance(r.external, VariableStream), new_results) constraint_results = filter( lambda r: isinstance(r.external, ConstraintStream), new_results) new_results = variable_results + constraint_results # TODO: ensure correct ordering #from pddlstream.algorithms.scheduling.recover_streams import get_achieving_streams, extract_stream_plan #node_from_atom = get_achieving_streams(evaluations, stream_results) # TODO: make these lower effort #extract_stream_plan(node_from_atom, target_facts, stream_plan) optimizer_results = [] for optimizer in {get_optimizer(r) for r in new_results }: # None is like a unique optimizer: relevant_results = [ r for r in new_results if get_optimizer(r) == optimizer ] optimizer_results.extend( combine_optimizer_plan(relevant_results, function_plan)) #print(str_from_object(set(map(fact_from_evaluation, evaluations)))) #print(str_from_object(set(goal_facts))) # TODO: can do the flexibly sized optimizers search from pddlstream.algorithms.scheduling.postprocess import reschedule_stream_plan combined_plan = reschedule_stream_plan(evaluations, goal_facts, copy.copy(domain), (stream_plan + optimizer_results), unique_binding=True, unit_efforts=True) if not is_plan(combined_plan): return external_plan return combined_plan + function_plan
def decompose_stream_plan(stream_plan): if not is_plan(stream_plan): return stream_plan new_stream_plan = [] for result in stream_plan: new_stream_plan.extend(decompose_result(result)) return new_stream_plan
def is_refined(stream_plan): # TODO: lazily expand the shared objects in some cases to prevent increase in size if not is_plan(stream_plan): return True # TODO: some of these opt_index equal None return all((result.opt_index is None) or (result.opt_index == 0) for result in stream_plan)
def value_from_obj_plan(obj_plan): if not is_plan(obj_plan): return obj_plan #return [(action,) + tuple(values_from_objects(args)) for action, args in obj_plan] #return [(action, tuple(values_from_objects(args))) for action, args in obj_plan] value_plan = [] for action in obj_plan: if len(action) == 3: name, inputs, outputs = action new_inputs = params_from_objects(inputs) # values_from_objects new_outputs = outputs if isinstance(new_outputs, collections.Sequence): new_outputs = params_from_objects( new_outputs) # values_from_objects new_action = (name, new_inputs, new_outputs) elif isinstance(action, DurativeAction): name, args, start, duration = action name, index = name[:-2], int(name[-1]) if index != 0: continue new_action = DurativeAction(name, tuple(map(param_from_object, args)), start, duration) else: new_action = transform_action_args( action, param_from_object) # values_from_objects value_plan.append(new_action) return value_plan
def hierarchical_plan_streams(evaluations, externals, results, optimistic_solve_fn, complexity_limit, depth, constraints, **effort_args): if MAX_DEPTH <= depth: return OptSolution(None, None, INF), depth stream_plan, opt_plan, cost = optimistic_solve_fn(evaluations, results, constraints) if not is_plan(opt_plan) or is_refined(stream_plan): return OptSolution(stream_plan, opt_plan, cost), depth #action_plan, preimage_facts = opt_plan #dump_plans(stream_plan, action_plan, cost) #create_visualizations(evaluations, stream_plan, depth) #print(depth, get_length(stream_plan)) #print('Stream plan ({}, {:.3f}): {}\nAction plan ({}, {:.3f}): {}'.format( # get_length(stream_plan), compute_plan_effort(stream_plan), stream_plan, # get_length(action_plan), cost, str_from_plan(action_plan))) #if is_plan(stream_plan): # for result in stream_plan: # effort = compute_result_effort(result, unit_efforts=True) # if effort != 0: # print(result, effort) #print() # TODO: identify control parameters that can be separated across actions new_depth = depth + 1 new_results, bindings = optimistic_stream_evaluation(evaluations, stream_plan) if not (CONSTRAIN_STREAMS or CONSTRAIN_PLANS): return OptSolution(FAILED, FAILED, INF), new_depth #if CONSTRAIN_STREAMS: # next_results = compute_stream_results(evaluations, new_results, externals, complexity_limit, **effort_args) #else: next_results, _ = optimistic_process_streams(evaluations, externals, complexity_limit, **effort_args) next_constraints = None if CONSTRAIN_PLANS: next_constraints = compute_skeleton_constraints(opt_plan, bindings) return hierarchical_plan_streams(evaluations, externals, next_results, optimistic_solve_fn, complexity_limit, new_depth, next_constraints, **effort_args)
def solve_restart(problem, max_time=INF, max_restarts=0, iteration_time=INF, abort=True, **kwargs): # TODO: iteratively lower the cost bound # TODO: a sequence of different planner configurations # TODO: reset objects and/or streams if (max_restarts >= 1) and (iteration_time == INF): iteration_time = min(2 * 60, iteration_time) assert (max_restarts == 0) or (iteration_time != INF) assert max_restarts >= 0 start_time = time.time() for attempt in irange(1+max_restarts): iteration_start_time = time.time() if elapsed_time(start_time) > max_time: break if attempt >= 1: print(SEPARATOR) #solution = planner_fn(problem) # Or include the problem in the lambda remaining_time = min(iteration_time, max_time-elapsed_time(start_time)) solution = solve(problem, max_time=remaining_time, **kwargs) plan, cost, certificate = solution if is_plan(plan): # TODO: INFEASIBLE return solution if abort and (elapsed_time(iteration_start_time) < remaining_time): break # TODO: return the cause of failure certificate = Certificate(all_facts=[], preimage_facts=[]) # TODO: aggregate return Solution(None, INF, certificate)
def stream_plan_complexity(evaluations, stream_plan, stream_calls, complexity_op=COMPLEXITY_OP): if not is_plan(stream_plan): return INF # TODO: difference between a result having a particular complexity and the next result having something #optimistic_facts = {} optimistic_facts = {fact: evaluations[evaluation_from_fact(fact)].complexity for fact in stream_plan_preimage(stream_plan)} result_complexities = [] #complexity = 0 for i, result in enumerate(stream_plan): # if result.external.get_complexity(num_calls=INF) == 0: # TODO: skip if true result_complexity = complexity_op([0] + [optimistic_facts[fact] #optimistic_complexity(evaluations, optimistic_facts, fact) for fact in result.get_domain()]) # if stream_calls is None: # num_calls = result.instance.num_calls # else: num_calls = stream_calls[i] result_complexity += result.external.get_complexity(num_calls) result_complexities.append(result_complexity) #complexity = complexity_op(complexity, result_complexity) for fact in result.get_certified(): if fact not in optimistic_facts: optimistic_facts[fact] = result_complexity complexity = complexity_op([0] + result_complexities) return complexity
def hierarchical_plan_streams(evaluations, externals, results, optimistic_solve_fn, complexity_limit, depth, constraints, **effort_args): if MAX_DEPTH <= depth: return None, INF, depth combined_plan, cost = optimistic_solve_fn(evaluations, results, constraints) if not is_plan(combined_plan): return combined_plan, cost, depth stream_plan, action_plan = separate_plan(combined_plan, stream_only=False) #dump_plans(stream_plan, action_plan, cost) #create_visualizations(evaluations, stream_plan, depth) #print(depth, get_length(stream_plan)) if is_refined(stream_plan): return combined_plan, cost, depth new_results, bindings = optimistic_stream_evaluation( evaluations, stream_plan) if not CONSTRAIN_STREAMS and not CONSTRAIN_PLANS: return None, INF, depth + 1 if CONSTRAIN_STREAMS: next_results = compute_stream_results(evaluations, new_results, externals, **effort_args) else: next_results, _ = optimistic_process_streams(evaluations, externals, complexity_limit, **effort_args) next_constraints = None if CONSTRAIN_PLANS: next_constraints = compute_skeleton_constraints(action_plan, bindings) return hierarchical_plan_streams(evaluations, externals, next_results, optimistic_solve_fn, complexity_limit, depth + 1, next_constraints, **effort_args)
def stream_plan_complexity(evaluations, stream_plan, stream_calls=None): if not is_plan(stream_plan): return INF # TODO: difference between a result having a particular complexity and the next result having something optimistic_facts = {} total_complexity = 0 for i, result in enumerate(stream_plan): result_complexity = 0 for fact in result.get_domain(): evaluation = evaluation_from_fact(fact) if evaluation in evaluations: fact_complexity = evaluations[evaluation].complexity else: fact_complexity = optimistic_facts[fact] result_complexity = COMPLEXITY_OP(result_complexity, fact_complexity) if stream_calls is None: result_complexity += result.instance.num_calls + 1 elif i < len(stream_calls): result_complexity += stream_calls[i] + 1 else: result_complexity += 1 for fact in result.get_certified(): if fact not in optimistic_facts: optimistic_facts[fact] = result_complexity total_complexity = COMPLEXITY_OP(total_complexity, result_complexity) return total_complexity
def hierarchical_plan_streams(evaluations, externals, results, optimistic_solve_fn, complexity_limit, depth, constraints, **effort_args): if MAX_DEPTH <= depth: return None, INF, depth combined_plan, cost = optimistic_solve_fn(evaluations, results, constraints) if not is_plan(combined_plan): return combined_plan, cost, depth stream_plan, action_plan = separate_plan(combined_plan, stream_only=False) #dump_plans(stream_plan, action_plan, cost) #create_visualizations(evaluations, stream_plan, depth) #print(depth, get_length(stream_plan)) #print('Stream plan ({}, {:.3f}): {}\nAction plan ({}, {:.3f}): {}'.format( # get_length(stream_plan), compute_plan_effort(stream_plan), stream_plan, # get_length(action_plan), cost, str_from_plan(action_plan))) #if is_plan(stream_plan): # for result in stream_plan: # effort = compute_result_effort(result, unit_efforts=True) # if effort != 0: # print(result, effort) #print() if is_refined(stream_plan): return combined_plan, cost, depth new_results, bindings = optimistic_stream_evaluation(evaluations, stream_plan) if not CONSTRAIN_STREAMS and not CONSTRAIN_PLANS: return None, INF, depth + 1 if CONSTRAIN_STREAMS: next_results = compute_stream_results(evaluations, new_results, externals, **effort_args) else: next_results, _ = optimistic_process_streams(evaluations, externals, complexity_limit, **effort_args) next_constraints = None if CONSTRAIN_PLANS: next_constraints = compute_skeleton_constraints(action_plan, bindings) return hierarchical_plan_streams(evaluations, externals, next_results, optimistic_solve_fn, complexity_limit, depth + 1, next_constraints, **effort_args)
def combine_optimizers_greedy(evaluations, external_plan): if not is_plan(external_plan): return external_plan # The key thing is that a variable must be grounded before it can used in a non-stream thing # TODO: construct variables in order # TODO: graph cut algorithm to minimize the number of constraints that are excluded # TODO: reorder to ensure that constraints are done first since they are likely to fail as tests incoming_edges, outgoing_edges = neighbors_from_orders( get_partial_orders(external_plan)) queue = [] functions = [] for v in external_plan: if not incoming_edges[v]: (functions if isinstance(v, FunctionResult) else queue).append(v) current = [] ordering = [] while queue: optimizer = get_optimizer(current[-1]) if current else None for v in queue: if optimizer == get_optimizer(v): current.append(v) break else: ordering.extend(combine_optimizer_plan(current, functions)) current = [queue[0]] v1 = current[-1] queue.remove(v1) for v2 in outgoing_edges[v1]: incoming_edges[v2].remove(v1) if not incoming_edges[v2]: (functions if isinstance(v2, FunctionResult) else queue).append(v2) ordering.extend(combine_optimizer_plan(current, functions)) return ordering + functions
def value_from_obj_plan(obj_plan): if not is_plan(obj_plan): return obj_plan value_plan = [] for action in obj_plan: # TODO: I shouldn't need this decomposition any more, right? if isinstance(action, StreamAction): name, inputs, outputs = action new_inputs = params_from_objects(inputs) new_outputs = outputs #if isinstance(new_outputs, collections.Sequence): # TODO: what was this for? new_outputs = params_from_objects(new_outputs) new_action = StreamAction(name, new_inputs, new_outputs) elif isinstance(action, DurativeAction): name, args, start, duration = action name, index = name[:-2], int(name[-1]) if index != 0: continue new_action = DurativeAction(name, tuple(map(param_from_object, args)), start, duration) elif isinstance(action, Action): new_action = transform_action_args( action, param_from_object) # values_from_objects elif isinstance(action, Assignment): new_action = transform_action_args(action, param_from_object) else: raise ValueError(action) value_plan.append(new_action) return value_plan
def compute_plan_effort(stream_plan, **kwargs): # TODO: compute effort in the delete relaxation way if not is_plan(stream_plan): return INF if not stream_plan: return 0 return sum(result.get_effort(**kwargs) for result in stream_plan)
def add_plan(self, plan, cost): # TODO: double-check that plan is a solution if not is_plan(plan) or (self.best_cost <= cost): return solution = Solution(plan, cost) self.best_plan, self.best_cost = solution self.solutions.append(solution)
def process_stream_plan(store, domain, disabled, stream_plan, action_plan, cost, bind=True, max_failures=0): # Bad old implementation of this method # The only advantage of this vs skeleton is that this can avoid the combinatorial growth in bindings if not is_plan(stream_plan): return stream_plan = [result for result in stream_plan if result.optimistic] free_objects = get_free_objects(stream_plan) bindings = {} bound_plan = [] for i, opt_result in enumerate(stream_plan): if (store.best_cost <= cost) or (max_failures < (i - len(bound_plan))): # TODO: this terminates early when bind=False break opt_inputs = [inp for inp in opt_result.instance.input_objects if inp in free_objects] if (not bind and opt_inputs) or not all(inp in bindings for inp in opt_inputs): continue bound_result = opt_result.remap_inputs(bindings) bound_instance = bound_result.instance if bound_instance.enumerated or not is_instance_ready(store.evaluations, bound_instance): continue # TODO: could remove disabled and just use complexity_limit new_results = process_instance(store, domain, bound_instance) if not bound_instance.enumerated: disabled.add(bound_instance) if not new_results: continue bound_plan.append(new_results[0]) bindings = update_bindings(bindings, bound_result, bound_plan[-1]) cost = update_cost(cost, opt_result, bound_plan[-1]) if len(stream_plan) == len(bound_plan): store.add_plan(bind_action_plan(action_plan, bindings), cost)
def compute_plan_effort(stream_plan, **kwargs): if not is_plan(stream_plan): return INF if not stream_plan: return 0 return sum( compute_result_effort(result, **kwargs) for result in stream_plan)
def reorder_stream_plan(stream_plan, **kwargs): if not is_plan(stream_plan): return stream_plan stream_orders = get_partial_orders(stream_plan) in_stream_orders, out_stream_orders = neighbors_from_orders(stream_orders) valid_combine = lambda v, subset: out_stream_orders[v] <= subset #valid_combine = lambda v, subset: in_stream_orders[v] & subset return dynamic_programming(stream_plan, valid_combine, get_stream_stats, **kwargs)
def compute_expected_cost(stream_plan, stats_fn=Performance.get_statistics): if not is_plan(stream_plan): return INF expected_cost = 0. for result in reversed(stream_plan): p_success, overhead = stats_fn(result) expected_cost = overhead + p_success * expected_cost return expected_cost
def test_init_goal(problem, **kwargs): problem = create_simplified_problem(problem, use_actions=False, use_streams=False, new_goal=None) plan, cost, certificate = solve(problem, **kwargs) assert not plan is_goal = is_plan(plan) return is_goal, certificate
def compute_expected_cost(stream_plan, stats_fn=get_stream_stats): # TODO: prioritize cost functions as they can prune when we have a better plan if not is_plan(stream_plan): return INF expected_cost = 0 for result in reversed(stream_plan): p_success, overhead = stats_fn(result) expected_cost = overhead + p_success * expected_cost return expected_cost
def solve_incremental(problem, constraints=PlanConstraints(), unit_costs=False, success_cost=INF, max_iterations=INF, max_time=INF, start_complexity=0, complexity_step=1, max_complexity=INF, verbose=False, **search_args): """ Solves a PDDLStream problem by alternating between applying all possible streams and searching :param problem: a PDDLStream problem :param constraints: PlanConstraints on the set of legal solutions :param max_time: the maximum amount of time to apply streams :param max_iterations: the maximum amount of search iterations :param unit_costs: use unit action costs rather than numeric costs :param success_cost: an exclusive (strict) upper bound on plan cost to terminate :param start_complexity: the stream complexity on the first iteration :param complexity_step: the increase in the complexity limit after each iteration :param max_complexity: the maximum stream complexity :param verbose: if True, this prints the result of each stream application :param search_args: keyword args for the search subroutine :return: a tuple (plan, cost, evaluations) where plan is a sequence of actions (or None), cost is the cost of the plan, and evaluations is init but expanded using stream applications """ # max_complexity = 0 => current # complexity_step = INF => exhaustive # success_cost = terminate_cost = decision_cost evaluations, goal_expression, domain, externals = parse_problem( problem, constraints=constraints, unit_costs=unit_costs) store = SolutionStore(evaluations, max_time, success_cost, verbose) # TODO: include other info here? ensure_no_fluent_streams(externals) if UPDATE_STATISTICS: load_stream_statistics(externals) num_iterations = num_calls = 0 complexity_limit = start_complexity instantiator = Instantiator(externals, evaluations) num_calls += process_stream_queue(instantiator, store, complexity_limit, verbose=verbose) while not store.is_terminated() and (num_iterations <= max_iterations) and (complexity_limit <= max_complexity): num_iterations += 1 print('Iteration: {} | Complexity: {} | Calls: {} | Evaluations: {} | Solved: {} | Cost: {} | Time: {:.3f}'.format( num_iterations, complexity_limit, num_calls, len(evaluations), store.has_solution(), store.best_cost, store.elapsed_time())) plan, cost = solve_finite(evaluations, goal_expression, domain, max_cost=min(store.best_cost, constraints.max_cost), **search_args) if is_plan(plan): store.add_plan(plan, cost) if not instantiator: break if complexity_step is None: # TODO: option to select the next k-smallest complexities complexity_limit = instantiator.min_complexity() else: complexity_limit += complexity_step num_calls += process_stream_queue(instantiator, store, complexity_limit, verbose=verbose) #retrace_stream_plan(store, domain, goal_expression) #print('Final queue size: {}'.format(len(instantiator))) if UPDATE_STATISTICS: write_stream_statistics(externals, verbose) return store.extract_solution()
def check_dominated(skeleton_queue, stream_plan): if not is_plan(stream_plan): return True for skeleton in skeleton_queue.skeletons: # TODO: has stream_plans and account for different output object values if frozenset(stream_plan) <= frozenset(skeleton.stream_plan): print(stream_plan) print(skeleton.stream_plan) raise NotImplementedError()
def solve_skeleton(evaluations={}, opt_solutions=[], max_time=INF, success_cost=0, max_complexity=INF, reorder=False, visualize=True): store = SolutionStore(evaluations=evaluations, max_time=max_time, success_cost=success_cost, verbose=True) skeleton_queue = SkeletonQueue(store, domain=None, disable=True) for opt_solution in opt_solutions: # TODO: add and then process later stream_plan, opt_plan, cost = opt_solution # stream_plan = replan_with_optimizers(evaluations, stream_plan, domain, externals) or stream_plan stream_plan = combine_optimizers(evaluations, stream_plan) # stream_plan = get_synthetic_stream_plan(stream_plan, # evaluations # [s for s in synthesizers if not s.post_only]) # stream_plan = recover_optimistic_outputs(stream_plan) if reorder: # TODO: this blows up memory wise for long stream plans stream_plan = reorder_stream_plan(store, stream_plan) num_optimistic = sum(r.optimistic for r in stream_plan) if stream_plan else 0 action_plan = opt_plan.action_plan if is_plan(opt_plan) else opt_plan print('Stream plan ({}, {}, {:.3f}): {}\nAction plan ({}, {:.3f}): {}'.format( get_length(stream_plan), num_optimistic, compute_plan_effort(stream_plan), stream_plan, get_length(action_plan), cost, str_from_plan(action_plan))) if is_plan(stream_plan) and visualize: num_iterations = 0 log_plans(stream_plan, action_plan, num_iterations) #create_visualizations(evaluations, stream_plan, num_iterations) visualize_stream_plan(stream_plan) ################ # # optimizer_plan = replan_with_optimizers(evaluations, stream_plan, domain, optimizers) # optimizer_plan = None # if optimizer_plan is not None: # # TODO: post process a bound plan # print('Optimizer plan ({}, {:.3f}): {}'.format( # get_length(optimizer_plan), compute_plan_effort(optimizer_plan), optimizer_plan)) # skeleton_queue.new_skeleton(optimizer_plan, opt_plan, cost) # TODO: these are quite different #skeleton_queue.process(stream_plan, opt_plan, cost, complexity_limit=INF, max_time=INF) skeleton_queue.process(stream_plan, opt_plan, cost, complexity_limit=max_complexity, max_time=0) return store.extract_solution()
def decompose_stream_plan(stream_plan): if not is_plan(stream_plan): return stream_plan new_stream_plan = [] for result in stream_plan: if isinstance(result, SynthStreamResult): new_stream_plan.extend(result.decompose()) else: new_stream_plan.append(result) return new_stream_plan
def process(self, stream_plan, action_plan, cost, complexity_limit, max_time=0): # TODO: manually add stream_plans for synthesizers/optimizers start_time = time.time() if is_plan(stream_plan): #print([result for result in stream_plan if result.optimistic]) #raw_input('New skeleton') self.new_skeleton(stream_plan, action_plan, cost) self.greedily_process() elif stream_plan is INFEASIBLE: # TODO: use complexity_limit self.process_until_new() self.timed_process(max_time - elapsed_time(start_time)) self.accelerate_best_bindings()
def recover_optimistic_outputs(stream_plan): if not is_plan(stream_plan): return stream_plan new_mapping = {} new_stream_plan = [] for result in stream_plan: new_result = result.remap_inputs(new_mapping) new_stream_plan.append(new_result) if isinstance(new_result, StreamResult): opt_result = new_result.instance.opt_results[ 0] # TODO: empty if disabled new_mapping.update( safe_zip(new_result.output_objects, opt_result.output_objects)) return new_stream_plan
def reorder_combined_plan(evaluations, combined_plan, action_info, domain, **kwargs): if not is_plan(combined_plan): return combined_plan stream_plan, action_plan = separate_plan(combined_plan) orders = get_combined_orders(evaluations, stream_plan, action_plan, domain) _, out_orders = neighbors_from_orders(orders) valid_combine = lambda v, subset: out_orders[v] <= subset def stats_fn(operator): if isinstance(operator, Result): return get_stream_stats(operator) name, _ = operator info = action_info[name] return info.p_success, info.overhead return dynamic_programming(combined_plan, valid_combine, stats_fn, **kwargs)
def process(self, stream_plan, action_plan, cost, complexity_limit, max_time=0, accelerate=False): start_time = time.time() if is_plan(stream_plan): self.new_skeleton(stream_plan, action_plan, cost) self.greedily_process() elif (stream_plan is INFEASIBLE) and not self.process_until_new(): # Move this after process_complexity return INFEASIBLE if not self.queue: return FAILED # TODO: add and process self.timed_process(max_time=(max_time - elapsed_time(start_time))) self.process_complexity(complexity_limit) if accelerate: self.accelerate_best_bindings() return FAILED