def extract_order(evaluations, preimage): stream_results = set() processed = set(map(fact_from_fd, preimage)) queue = deque(processed) while queue: fact = queue.popleft() result = evaluations[evaluation_from_fact(fact)] if result is None: continue stream_results.add(result) for fact2 in result.instance.get_domain(): if fact2 not in processed: queue.append(fact2) orders = set() for result in stream_results: for fact in result.instance.get_domain(): result2 = evaluations[evaluation_from_fact(fact)] if result2 is not None: orders.add((result2, result)) ordered_results = [] for result in topological_sort(stream_results, orders): if isinstance(result, SynthStreamResult): ordered_results.extend(result.decompose()) else: ordered_results.append(result) return ordered_results
def simultaneous_stream_plan(evaluations, goal_expression, domain, stream_results, negated, unit_costs=True, **kwargs): if negated: raise NotImplementedError() function_evaluations = {e: None for e in evaluations} for result in stream_results: if isinstance(result, FunctionResult): for fact in result.get_certified(): function_evaluations[evaluation_from_fact(fact)] = result new_domain, stream_result_from_name = add_stream_actions(domain, stream_results) combined_plan, _ = solve_finite(function_evaluations, goal_expression, new_domain, unit_costs=unit_costs, **kwargs) if combined_plan is None: return None, None, INF # TODO: return plan cost stream_plan = [] action_plan = [] for name, args in combined_plan: if name in stream_result_from_name: stream_plan.append(stream_result_from_name[name]) else: action_plan.append((name, args)) action_cost = len(action_plan) function_plan = set() if not unit_costs: action_cost = 0 results_from_head = get_results_from_head(function_evaluations) for name, args in action_plan: action = find(lambda a: a.name == name, domain.actions) pddl_args = tuple(map(pddl_from_object, args)) function_plan.update(extract_function_results(results_from_head, action, pddl_args)) action_cost += get_cost(domain, results_from_head, name, args) return (stream_plan + list(function_plan)), action_plan, action_cost
def add_certified(evaluations, result): new_evaluations = [] for fact in result.get_certified(): evaluation = evaluation_from_fact(fact) if evaluation not in evaluations: evaluations[evaluation] = result new_evaluations.append(evaluation) return new_evaluations
def proccess_stream_plan(key, sampling_problem, queue, evaluations, store): instance, num_processed, bindings, stream_plan, action_plan, cost = sampling_problem if not stream_plan: store.add_plan(bind_plan(bindings, action_plan), cost) return if store.best_cost <= cost: instance.disabled = False # TODO: only disable if not used elsewhere # TODO: could just hash instances return opt_result = stream_plan[ 0] # TODO: could do several at once but no real point assert (not any( evaluation_from_fact(f) not in evaluations for f in instance.get_domain())) # TODO: hash combinations to prevent repeats results = [] for i in range(num_processed, len(instance.results_history)): results.extend(instance.results_history[i]) if not results and not instance.enumerated: #print(key.attempts, key.length) results = instance.next_results(verbose=store.verbose) for result in results: add_certified(evaluations, result) if (type(result) is PredicateResult) and (opt_result.value != result.value): continue # TODO: check if satisfies target certified new_bindings = bindings.copy() if isinstance(result, StreamResult): for opt, obj in zip(opt_result.output_objects, result.output_objects): assert (opt not in new_bindings ) # TODO: return failure if conflicting bindings new_bindings[opt] = obj new_stream_plan = stream_plan[1:] new_cost = cost if type(result) is FunctionResult: new_cost += (result.value - opt_result.value) new_key = SkeletonKey(0, len(new_stream_plan)) new_skeleton = Skeleton( instantiate_first(new_bindings, new_stream_plan), 0, new_bindings, new_stream_plan, action_plan, new_cost) heappush(queue, HeapElement(new_key, new_skeleton)) if (key.attempts == 0) and isinstance( opt_result, SynthStreamResult): # TODO: only add if failure? new_stream_plan = opt_result.decompose() + stream_plan[1:] new_key = SkeletonKey(0, len(new_stream_plan)) new_skeleton = Skeleton(instantiate_first(bindings, new_stream_plan), 0, bindings, new_stream_plan, action_plan, cost) heappush(queue, HeapElement(new_key, new_skeleton)) if not instance.enumerated: new_key = SkeletonKey(key.attempts + 1, len( stream_plan)) # TODO: compute expected sampling effort required new_skeleton = Skeleton(instance, len(instance.results_history), bindings, stream_plan, action_plan, cost) heappush(queue, HeapElement(new_key, new_skeleton))
def optimistic_process_streams(evaluations, streams, double_bindings=None): instantiator = Instantiator(evaluations, streams) stream_results = [] while instantiator.stream_queue: stream_instance = instantiator.stream_queue.popleft() if not is_double_bound(stream_instance, double_bindings): continue for stream_result in stream_instance.next_optimistic(): for fact in stream_result.get_certified(): instantiator.add_atom(evaluation_from_fact(fact)) stream_results.append( stream_result) # TODO: don't readd if all repeated facts? return stream_results
def process_immediate_stream_plan(evaluations, stream_plan, disabled, verbose): new_evaluations = [] for opt_result in stream_plan: instance = opt_result.instance if set(map(evaluation_from_fact, instance.get_domain())) <= evaluations: disable_stream_instance(instance, disabled) for result in instance.next_results(verbose=verbose): for fact in result.get_certified(): evaluation = evaluation_from_fact(fact) #evaluations.add(evaluation) # To be used on next iteration new_evaluations.append(evaluation) evaluations.update(new_evaluations) return new_evaluations
def evaluations_from_stream_plan(evaluations, stream_plan): result_from_evaluation = {e: None for e in evaluations} opt_evaluations = set(evaluations) for result in stream_plan: if isinstance(result, StreamResult): effort = result.instance.get_effort() if effort == INF: continue assert(not result.instance.disabled) assert(not result.instance.enumerated) domain = set(map(evaluation_from_fact, result.instance.get_domain())) if not (domain <= opt_evaluations): continue for fact in result.get_certified(): evaluation = evaluation_from_fact(fact) if evaluation not in result_from_evaluation: result_from_evaluation[evaluation] = result opt_evaluations.add(evaluation) return result_from_evaluation
def get_combined_orders(evaluations, stream_plan, action_plan, domain): if action_plan is None: return None # TODO: could just do this within relaxed # TODO: do I want to strip the fluents and just do the partial ordering? stream_instances = get_stream_instances(stream_plan) negative_results = filter( lambda r: isinstance(r, PredicateResult) and (r.value == False), stream_plan) negative_init = set( get_init((evaluation_from_fact(f) for r in negative_results for f in r.get_certified()), negated=True)) #negated_from_name = {r.instance.external.name for r in negative_results} opt_evaluations = evaluations_from_stream_plan(evaluations, stream_plan) goal_expression = ('and', ) task = task_from_domain_problem( domain, get_problem(opt_evaluations, goal_expression, domain, unit_costs=True)) action_instances = get_action_instances(task, action_plan) replace_derived(task, negative_init, action_instances) #combined_instances = stream_instances + action_instances orders = set() for i, a1 in enumerate(action_plan): for a2 in action_plan[i + 1:]: orders.add((a1, a2)) # TODO: just store first achiever here for i, instance1 in enumerate(stream_instances): for j in range(i + 1, len(stream_instances)): effects = {e for _, e in instance1.add_effects} if effects & set(stream_instances[j].precondition): orders.add((stream_plan[i], stream_plan[j])) for i, instance1 in enumerate(stream_instances): for j, instance2 in enumerate(action_instances): effects = {e for _, e in instance1.add_effects} | \ {e.negate() for _, e in instance1.del_effects} if effects & set(instance2.precondition): orders.add((stream_plan[i], action_plan[j])) return orders
def optimistic_process_stream_plan(evaluations, stream_plan): # TODO: can also use the instantiator and operate directly on the outputs # TODO: could bind by just using new_evaluations evaluations = set(evaluations) opt_evaluations = set(evaluations) opt_bindings = defaultdict(list) opt_results = [] for opt_result in stream_plan: # TODO: could just do first step for instance in optimistic_stream_grounding(opt_result.instance, opt_bindings, evaluations, opt_evaluations): results = instance.next_optimistic() opt_evaluations.update( evaluation_from_fact(f) for r in results for f in r.get_certified()) opt_results += results for result in results: if isinstance(result, StreamResult): # Could not add if same value for opt, obj in zip(opt_result.output_objects, result.output_objects): opt_bindings[opt].append(obj) return opt_results, opt_bindings
def process_stream_plan(evaluations, stream_plan, disabled, verbose, quick_fail=True, layers=False, max_values=INF): # TODO: can also use the instantiator and operate directly on the outputs # TODO: could bind by just using new_evaluations plan_index = get_stream_plan_index(stream_plan) streams_from_output = defaultdict(list) for result in stream_plan: if isinstance(result, StreamResult): for obj in result.output_objects: streams_from_output[obj].append(result) shared_output_streams = { s for streams in streams_from_output.values() if 1 < len(streams) for s in streams } #shared_output_streams = {} print(shared_output_streams) print(plan_index) opt_bindings = defaultdict(list) opt_evaluations = set() opt_results = [] failed = False stream_queue = deque(stream_plan) while stream_queue and implies(quick_fail, not failed): opt_result = stream_queue.popleft() real_instances, opt_instances = ground_stream_instances( opt_result.instance, opt_bindings, evaluations, opt_evaluations, plan_index) first_step = all( isinstance(o, Object) for o in opt_result.instance.input_objects) num_instances = min(len(real_instances), max_values) \ if (layers or first_step or (opt_result not in shared_output_streams)) else 0 opt_instances += real_instances[num_instances:] real_instances = real_instances[:num_instances] new_results = [] local_failure = False for instance in real_instances: results = instance.next_results(verbose=verbose) for result in results: add_certified(evaluations, result) disable_stream_instance(instance, disabled) local_failure |= not results if isinstance(opt_result, PredicateResult) and not any( opt_result.value == r.value for r in results): local_failure = True # TODO: check for instance? new_results += results for instance in opt_instances: #print(instance, instance.opt_index) results = instance.next_optimistic() opt_evaluations.update( evaluation_from_fact(f) for r in results for f in r.get_certified()) opt_results += results local_failure |= not results new_results += results for result in new_results: if isinstance(result, StreamResult): # Could not add if same value for opt, obj in zip(opt_result.output_objects, result.output_objects): opt_bindings[opt].append(obj) if local_failure and isinstance(opt_result, SynthStreamResult): stream_queue.extendleft(reversed(opt_result.decompose())) failed = False # TODO: check if satisfies target certified else: failed |= local_failure if verbose: print('Success: {}'.format(not failed)) if failed: return None, None # TODO: just return binding # TODO: could also immediately switch to binding if plan_index == 0 afterwards return opt_results, opt_bindings
def static_opt_gen_fn(*input_values): instance = stream.get_instance(tuple(map(Object.from_value, input_values))) if all(evaluation_from_fact(f) in evaluations for f in instance.get_domain()): return for output_values in stream.opt_gen_fn(*input_values): yield output_values
def static_fn(*input_values): instance = stream.get_instance(tuple(map(Object.from_value, input_values))) if all(evaluation_from_fact(f) in evaluations for f in instance.get_domain()): return None return tuple(FutureValue(stream.name, input_values, o) for o in stream.outputs)