def examine_instantiated(problem, constraints=PlanConstraints(), unit_costs=False, max_time=INF, verbose=False, **search_args): domain_pddl, constant_map, stream_pddl, _, init, goal = problem stream_map = DEBUG problem = PDDLProblem(domain_pddl, constant_map, stream_pddl, stream_map, init, goal) evaluations, goal_exp, domain, externals = parse_problem( problem, constraints=constraints, unit_costs=unit_costs) store = SolutionStore(evaluations, max_time, success_cost=INF, verbose=verbose) #externals = compile_fluents_as_attachments(domain, externals) # instantiator = Instantiator(externals, evaluations) process_stream_queue(instantiator, store, complexity_limit=INF, verbose=verbose) #plan, cost = solve_finite(evaluations, goal_exp, domain, max_cost=max_cost, **search_args) debug = False assert not isinstance(domain, SimplifiedDomain) problem = get_problem(evaluations, goal_exp, domain, unit_costs) task = task_from_domain_problem(domain, problem) with Verbose(debug): instantiated = instantiate_task(task) instantiated = convert_instantiated(instantiated) return instantiated
def solve_current(problem, constraints=PlanConstraints(), unit_costs=False, verbose=False, **search_args): """ Solves a PDDLStream problem without applying any streams Will fail if the problem requires stream applications :param problem: a PDDLStream problem :param constraints: PlanConstraints on the available solutions :param unit_costs: use unit action costs rather than numeric costs :param verbose: if True, this prints the result of each stream application :param search_args: keyword args for the search subroutine :return: a tuple (plan, cost, evaluations) where plan is a sequence of actions (or None), cost is the cost of the plan, and evaluations is init but expanded using stream applications """ evaluations, goal_expression, domain, externals = parse_problem( problem, constraints=constraints, unit_costs=unit_costs) instantiator = Instantiator(externals, evaluations) process_function_queue(instantiator, evaluations, verbose=verbose) plan, cost = solve_finite(evaluations, goal_expression, domain, max_cost=constraints.max_cost, **search_args) return revert_solution(plan, cost, evaluations)
def examine_instantiated(problem, constraints=PlanConstraints(), unit_costs=False, unique=False, verbose=False, debug=False): # TODO: refactor to an analysis file domain_pddl, constant_map, stream_pddl, _, init, goal = problem stream_map = DEBUG if unique else SHARED_DEBUG # DEBUG_MODES problem = PDDLProblem(domain_pddl, constant_map, stream_pddl, stream_map, init, goal) evaluations, goal_exp, domain, externals = parse_problem( problem, constraints=constraints, unit_costs=unit_costs) assert not isinstance(domain, SimplifiedDomain) # store = SolutionStore(evaluations, max_time, success_cost=INF, verbose=verbose) # instantiator = Instantiator(externals, evaluations) # process_stream_queue(instantiator, store, complexity_limit=INF, verbose=verbose) # results = [] # TODO: extract from process_stream_queue #set_unique(externals) # domain.actions[:] = [] # TODO: only instantiate axioms # TODO: drop all fluents and instantiate # TODO: relaxed planning version of this results, exhausted = optimistic_process_streams(evaluations, externals, complexity_limit=INF, max_effort=None) evaluations = evaluations_from_stream_plan(evaluations, results, max_effort=None) problem = get_problem(evaluations, goal_exp, domain, unit_costs) task = task_from_domain_problem(domain, problem) with Verbose(debug): instantiated = instantiate_task(task) if instantiated is None: return None # TODO: reinstantiate actions? instantiated.axioms[:] = [reinstantiate_axiom(axiom) for axiom in instantiated.axioms] instantiated = convert_instantiated(instantiated) return results, instantiated
def solve_incremental(problem, constraints=PlanConstraints(), unit_costs=False, success_cost=INF, max_iterations=INF, max_time=INF, start_complexity=0, complexity_step=1, max_complexity=INF, verbose=False, **search_args): """ Solves a PDDLStream problem by alternating between applying all possible streams and searching :param problem: a PDDLStream problem :param constraints: PlanConstraints on the set of legal solutions :param max_time: the maximum amount of time to apply streams :param max_iterations: the maximum amount of search iterations :param unit_costs: use unit action costs rather than numeric costs :param success_cost: an exclusive (strict) upper bound on plan cost to terminate :param start_complexity: the stream complexity on the first iteration :param complexity_step: the increase in the complexity limit after each iteration :param max_complexity: the maximum stream complexity :param verbose: if True, this prints the result of each stream application :param search_args: keyword args for the search subroutine :return: a tuple (plan, cost, evaluations) where plan is a sequence of actions (or None), cost is the cost of the plan, and evaluations is init but expanded using stream applications """ # max_complexity = 0 => current # complexity_step = INF => exhaustive # success_cost = terminate_cost = decision_cost evaluations, goal_expression, domain, externals = parse_problem( problem, constraints=constraints, unit_costs=unit_costs) store = SolutionStore(evaluations, max_time, success_cost, verbose) # TODO: include other info here? ensure_no_fluent_streams(externals) if UPDATE_STATISTICS: load_stream_statistics(externals) num_iterations = num_calls = 0 complexity_limit = start_complexity instantiator = Instantiator(externals, evaluations) num_calls += process_stream_queue(instantiator, store, complexity_limit, verbose=verbose) while not store.is_terminated() and (num_iterations <= max_iterations) and (complexity_limit <= max_complexity): num_iterations += 1 print('Iteration: {} | Complexity: {} | Calls: {} | Evaluations: {} | Solved: {} | Cost: {} | Time: {:.3f}'.format( num_iterations, complexity_limit, num_calls, len(evaluations), store.has_solution(), store.best_cost, store.elapsed_time())) plan, cost = solve_finite(evaluations, goal_expression, domain, max_cost=min(store.best_cost, constraints.max_cost), **search_args) if is_plan(plan): store.add_plan(plan, cost) if not instantiator: break if complexity_step is None: # TODO: option to select the next k-smallest complexities complexity_limit = instantiator.min_complexity() else: complexity_limit += complexity_step num_calls += process_stream_queue(instantiator, store, complexity_limit, verbose=verbose) #retrace_stream_plan(store, domain, goal_expression) #print('Final queue size: {}'.format(len(instantiator))) if UPDATE_STATISTICS: write_stream_statistics(externals, verbose) return store.extract_solution()
def solve_current(problem, **search_kwargs): """ Solves a PDDLStream problem without applying any streams Will fail if the problem requires stream applications :param problem: a PDDLStream problem :param search_kwargs: keyword args for the search subroutine :return: a tuple (plan, cost, evaluations) where plan is a sequence of actions (or None), cost is the cost of the plan, and evaluations is init but expanded using stream applications """ evaluations, goal_expression, domain, externals = parse_problem(problem) plan, cost = solve_finite(evaluations, goal_expression, domain, **search_kwargs) return revert_solution(plan, cost, evaluations)
def solve_exhaustive(problem, max_time=300, verbose=True, **search_kwargs): """ Solves a PDDLStream problem by applying all possible streams and searching once Requires a finite max_time when infinitely many stream instances :param problem: a PDDLStream problem :param max_time: the maximum amount of time to apply streams :param verbose: if True, this prints the result of each stream application :param search_kwargs: keyword args for the search subroutine :return: a tuple (plan, cost, evaluations) where plan is a sequence of actions (or None), cost is the cost of the plan, and evaluations is init but expanded using stream applications """ start_time = time.time() evaluations, goal_expression, domain, externals = parse_problem(problem) ensure_no_fluent_streams(externals) instantiator = Instantiator(evaluations, externals) while instantiator.stream_queue and (elapsed_time(start_time) < max_time): process_stream_queue(instantiator, evaluations, verbose=verbose) plan, cost = solve_finite(evaluations, goal_expression, domain, **search_kwargs) return revert_solution(plan, cost, evaluations)
def solve_incremental(problem, max_time=INF, max_cost=INF, layers=1, verbose=True, **search_kwargs): """ Solves a PDDLStream problem by alternating between applying all possible streams and searching :param problem: a PDDLStream problem :param max_time: the maximum amount of time to apply streams :param max_cost: a strict upper bound on plan cost :param layers: the number of stream application layers per iteration :param verbose: if True, this prints the result of each stream application :param search_kwargs: keyword args for the search subroutine :return: a tuple (plan, cost, evaluations) where plan is a sequence of actions (or None), cost is the cost of the plan, and evaluations is init but expanded using stream applications """ store = SolutionStore(max_time, max_cost, verbose) # TODO: include other info here? evaluations, goal_expression, domain, externals = parse_problem(problem) ensure_no_fluent_streams(externals) #load_stream_statistics(externals) instantiator = Instantiator(evaluations, externals) num_iterations = 0 while not store.is_terminated(): num_iterations += 1 print( 'Iteration: {} | Evaluations: {} | Cost: {} | Time: {:.3f}'.format( num_iterations, len(evaluations), store.best_cost, store.elapsed_time())) function_process_stream_queue(instantiator, evaluations, store) plan, cost = solve_finite(evaluations, goal_expression, domain, **search_kwargs) store.add_plan(plan, cost) if not instantiator.stream_queue: break layered_process_stream_queue(instantiator, evaluations, store, layers) #write_stream_statistics(externals, verbose) return revert_solution(store.best_plan, store.best_cost, evaluations)
def solve_exhaustive(problem, constraints=PlanConstraints(), unit_costs=False, max_time=300, verbose=False, **search_args): """ Solves a PDDLStream problem by applying all possible streams and searching once Requires a finite max_time when infinitely many stream instances :param problem: a PDDLStream problem :param constraints: PlanConstraints on the available solutions :param unit_costs: use unit action costs rather than numeric costs :param max_time: the maximum amount of time to apply streams :param verbose: if True, this prints the result of each stream application :param search_args: keyword args for the search subroutine :return: a tuple (plan, cost, evaluations) where plan is a sequence of actions (or None), cost is the cost of the plan, and evaluations is init but expanded using stream applications """ start_time = time.time() evaluations, goal_expression, domain, externals = parse_problem( problem, constraints=constraints, unit_costs=unit_costs) ensure_no_fluent_streams(externals) if UPDATE_STATISTICS: load_stream_statistics(externals) instantiator = Instantiator(externals, evaluations) while instantiator.stream_queue and (elapsed_time(start_time) < max_time): process_instance(instantiator, evaluations, instantiator.pop_stream(), verbose=verbose) process_function_queue(instantiator, evaluations, verbose=verbose) plan, cost = solve_finite(evaluations, goal_expression, domain, max_cost=constraints.max_cost, **search_args) if UPDATE_STATISTICS: write_stream_statistics(externals, verbose) return revert_solution(plan, cost, evaluations)
def solve_abstract(problem, constraints=PlanConstraints(), stream_info={}, replan_actions=set(), unit_costs=False, success_cost=INF, max_time=INF, max_iterations=INF, max_memory=INF, initial_complexity=0, complexity_step=1, max_complexity=INF, max_skeletons=INF, search_sample_ratio=0, bind=True, max_failures=0, unit_efforts=False, max_effort=INF, effort_weight=None, reorder=True, visualize=False, verbose=True, **search_kwargs): """ Solves a PDDLStream problem by first planning with optimistic stream outputs and then querying streams :param problem: a PDDLStream problem :param constraints: PlanConstraints on the set of legal solutions :param stream_info: a dictionary from stream name to StreamInfo altering how individual streams are handled :param replan_actions: the actions declared to induce replanning for the purpose of deferred stream evaluation :param unit_costs: use unit action costs rather than numeric costs :param success_cost: the exclusive (strict) upper bound on plan cost to successfully terminate :param max_time: the maximum runtime :param max_iterations: the maximum number of search iterations :param max_memory: the maximum amount of memory :param initial_complexity: the initial stream complexity limit :param complexity_step: the increase in the stream complexity limit per iteration :param max_complexity: the maximum stream complexity limit :param max_skeletons: the maximum number of plan skeletons (max_skeletons=None indicates not adaptive) :param search_sample_ratio: the desired ratio of sample time / search time when max_skeletons!=None :param bind: if True, propagates parameter bindings when max_skeletons=None :param max_failures: the maximum number of stream failures before switching phases when max_skeletons=None :param unit_efforts: use unit stream efforts rather than estimated numeric efforts :param max_effort: the maximum amount of stream effort :param effort_weight: a multiplier for stream effort compared to action costs :param reorder: if True, reorder stream plans to minimize the expected sampling overhead :param visualize: if True, draw the constraint network and stream plan as a graphviz file :param verbose: if True, print the result of each stream application :param search_kwargs: keyword args for the search subroutine :return: a tuple (plan, cost, evaluations) where plan is a sequence of actions (or None), cost is the cost of the plan (INF if no plan), and evaluations is init expanded using stream applications """ # TODO: select whether to search or sample based on expected success rates # TODO: no optimizers during search with relaxed_stream_plan # TODO: locally optimize only after a solution is identified # TODO: replan with a better search algorithm after feasible # TODO: change the search algorithm and unit costs based on the best cost use_skeletons = (max_skeletons is not None) #assert implies(use_skeletons, search_sample_ratio > 0) eager_disabled = (effort_weight is None ) # No point if no stream effort biasing num_iterations = eager_calls = 0 complexity_limit = initial_complexity evaluations, goal_exp, domain, externals = parse_problem( problem, stream_info=stream_info, constraints=constraints, unit_costs=unit_costs, unit_efforts=unit_efforts) identify_non_producers(externals) enforce_simultaneous(domain, externals) compile_fluent_streams(domain, externals) # TODO: make effort_weight be a function of the current cost # if (effort_weight is None) and not has_costs(domain): # effort_weight = 1 load_stream_statistics(externals) if visualize and not has_pygraphviz(): visualize = False print( 'Warning, visualize=True requires pygraphviz. Setting visualize=False' ) if visualize: reset_visualizations() streams, functions, negative, optimizers = partition_externals( externals, verbose=verbose) eager_externals = list(filter(lambda e: e.info.eager, externals)) positive_externals = streams + functions + optimizers has_optimizers = bool(optimizers) # TODO: deprecate assert implies(has_optimizers, use_skeletons) ################ store = SolutionStore(evaluations, max_time, success_cost, verbose, max_memory=max_memory) skeleton_queue = SkeletonQueue(store, domain, disable=not has_optimizers) disabled = set() # Max skeletons after a solution while (not store.is_terminated()) and ( num_iterations < max_iterations) and (complexity_limit <= max_complexity): num_iterations += 1 eager_instantiator = Instantiator( eager_externals, evaluations) # Only update after an increase? if eager_disabled: push_disabled(eager_instantiator, disabled) if eager_externals: eager_calls += process_stream_queue( eager_instantiator, store, complexity_limit=complexity_limit, verbose=verbose) ################ print( '\nIteration: {} | Complexity: {} | Skeletons: {} | Skeleton Queue: {} | Disabled: {} | Evaluations: {} | ' 'Eager Calls: {} | Cost: {:.3f} | Search Time: {:.3f} | Sample Time: {:.3f} | Total Time: {:.3f}' .format(num_iterations, complexity_limit, len(skeleton_queue.skeletons), len(skeleton_queue), len(disabled), len(evaluations), eager_calls, store.best_cost, store.search_time, store.sample_time, store.elapsed_time())) optimistic_solve_fn = get_optimistic_solve_fn( goal_exp, domain, negative, replan_actions=replan_actions, reachieve=use_skeletons, max_cost=min(store.best_cost, constraints.max_cost), max_effort=max_effort, effort_weight=effort_weight, **search_kwargs) # TODO: just set unit effort for each stream beforehand if (max_skeletons is None) or (len(skeleton_queue.skeletons) < max_skeletons): disabled_axioms = create_disabled_axioms( skeleton_queue) if has_optimizers else [] if disabled_axioms: domain.axioms.extend(disabled_axioms) stream_plan, opt_plan, cost = iterative_plan_streams( evaluations, positive_externals, optimistic_solve_fn, complexity_limit, max_effort=max_effort) for axiom in disabled_axioms: domain.axioms.remove(axiom) else: stream_plan, opt_plan, cost = OptSolution( INFEASIBLE, INFEASIBLE, INF) # TODO: apply elsewhere ################ #stream_plan = replan_with_optimizers(evaluations, stream_plan, domain, externals) or stream_plan stream_plan = combine_optimizers(evaluations, stream_plan) #stream_plan = get_synthetic_stream_plan(stream_plan, # evaluations # [s for s in synthesizers if not s.post_only]) #stream_plan = recover_optimistic_outputs(stream_plan) if reorder: # TODO: this blows up memory wise for long stream plans stream_plan = reorder_stream_plan(store, stream_plan) num_optimistic = sum(r.optimistic for r in stream_plan) if stream_plan else 0 action_plan = opt_plan.action_plan if is_plan(opt_plan) else opt_plan print('Stream plan ({}, {}, {:.3f}): {}\nAction plan ({}, {:.3f}): {}'. format(get_length(stream_plan), num_optimistic, compute_plan_effort(stream_plan), stream_plan, get_length(action_plan), cost, str_from_plan(action_plan))) if is_plan(stream_plan) and visualize: log_plans(stream_plan, action_plan, num_iterations) create_visualizations(evaluations, stream_plan, num_iterations) ################ if (stream_plan is INFEASIBLE) and (not eager_instantiator) and ( not skeleton_queue) and (not disabled): break if not is_plan(stream_plan): print('No plan: increasing complexity from {} to {}'.format( complexity_limit, complexity_limit + complexity_step)) complexity_limit += complexity_step if not eager_disabled: reenable_disabled(evaluations, domain, disabled) #print(stream_plan_complexity(evaluations, stream_plan)) if not use_skeletons: process_stream_plan(store, domain, disabled, stream_plan, opt_plan, cost, bind=bind, max_failures=max_failures) continue ################ #optimizer_plan = replan_with_optimizers(evaluations, stream_plan, domain, optimizers) optimizer_plan = None if optimizer_plan is not None: # TODO: post process a bound plan print('Optimizer plan ({}, {:.3f}): {}'.format( get_length(optimizer_plan), compute_plan_effort(optimizer_plan), optimizer_plan)) skeleton_queue.new_skeleton(optimizer_plan, opt_plan, cost) allocated_sample_time = (search_sample_ratio * store.search_time) - store.sample_time \ if len(skeleton_queue.skeletons) <= max_skeletons else INF if skeleton_queue.process(stream_plan, opt_plan, cost, complexity_limit, allocated_sample_time) is INFEASIBLE: break ################ summary = store.export_summary() summary.update({ 'iterations': num_iterations, 'complexity': complexity_limit, 'skeletons': len(skeleton_queue.skeletons), }) print('Summary: {}'.format(str_from_object( summary, ndigits=3))) # TODO: return the summary write_stream_statistics(externals, verbose) return store.extract_solution()
def solve_focused(problem, stream_info={}, action_info={}, dynamic_streams=[], max_time=INF, max_cost=INF, unit_costs=False, commit=True, effort_weight=None, eager_layers=1, visualize=False, verbose=True, postprocess=False, **search_kwargs): """ Solves a PDDLStream problem by first hypothesizing stream outputs and then determining whether they exist :param problem: a PDDLStream problem :param action_info: a dictionary from stream name to ActionInfo for planning and execution :param stream_info: a dictionary from stream name to StreamInfo altering how individual streams are handled :param max_time: the maximum amount of time to apply streams :param max_cost: a strict upper bound on plan cost :param commit: if True, it commits to instantiating a particular partial plan-skeleton. :param effort_weight: a multiplier for stream effort compared to action costs :param eager_layers: the number of eager stream application layers per iteration :param visualize: if True, it draws the constraint network and stream plan as a graphviz file :param verbose: if True, this prints the result of each stream application :param search_kwargs: keyword args for the search subroutine :return: a tuple (plan, cost, evaluations) where plan is a sequence of actions (or None), cost is the cost of the plan, and evaluations is init but expanded using stream applications """ # TODO: return to just using the highest level samplers at the start start_time = time.time() num_iterations = 0 best_plan = None; best_cost = INF evaluations, goal_expression, domain, stream_name, externals = parse_problem(problem) action_info = get_action_info(action_info) update_stream_info(externals, stream_info) load_stream_statistics(stream_name, externals) eager_externals = filter(lambda e: e.info.eager, externals) disabled = [] if visualize: clear_visualizations() #functions = filter(lambda s: isinstance(s, Function), externals) functions = filter(lambda s: type(s) is Function, externals) negative = filter(lambda s: type(s) is Predicate and s.is_negative(), externals) streams = filter(lambda s: s not in (functions + negative), externals) stream_results = [] depth = 1 sampling_queue = [] while elapsed_time(start_time) < max_time: search_time = time.time() # TODO: allocate more sampling effort to maintain the balance # TODO: total search time vs most recent search time? if stream_results is None: stream_plan, action_plan, cost = None, None, INF else: num_iterations += 1 print('\nIteration: {} | Depth: {} | Evaluations: {} | Cost: {} | Time: {:.3f}'.format( num_iterations, depth, len(evaluations), best_cost, elapsed_time(start_time))) # TODO: constrain to use previous plan to some degree eagerly_evaluate(evaluations, eager_externals, eager_layers, max_time - elapsed_time(start_time), verbose) stream_results += optimistic_process_streams(evaluations_from_stream_plan(evaluations, stream_results), functions) # TODO: warning check if using simultaneous_stream_plan or relaxed_stream_plan with non-eager functions solve_stream_plan = relaxed_stream_plan if effort_weight is None else simultaneous_stream_plan #solve_stream_plan = sequential_stream_plan if effort_weight is None else simultaneous_stream_plan combined_plan, cost = solve_stream_plan(evaluations, goal_expression, domain, stream_results, negative, max_cost=best_cost, unit_costs=unit_costs, **search_kwargs) combined_plan = reorder_combined_plan(evaluations, combined_plan, action_info, domain) print('Combined plan: {}'.format(combined_plan)) stream_plan, action_plan = separate_plan(combined_plan, action_info) stream_plan = reorder_stream_plan(stream_plan) # TODO: is this strictly redundant? stream_plan = get_synthetic_stream_plan(stream_plan, dynamic_streams) print('Stream plan: {}\n' 'Action plan: {}'.format(stream_plan, action_plan)) if stream_plan is None: if disabled or (depth != 0): if depth == 0: reset_disabled(disabled) stream_results = optimistic_process_streams(evaluations, streams) depth = 0 # Recurse on problems else: break elif len(stream_plan) == 0: if cost < best_cost: best_plan = action_plan; best_cost = cost if best_cost < max_cost: break stream_results = None else: """ sampling_key = SkeletonKey(0, len(stream_plan)) sampling_problem = Skeleton({}, stream_plan, action_plan, cost) heappush(sampling_queue, (sampling_key, sampling_problem)) greedily_process_queue(sampling_queue, evaluations, disabled, max_cost, True, 0, verbose) depth += 1 stream_results = None """ if visualize: create_visualizations(evaluations, stream_plan, num_iterations) option = True if option: # TODO: can instantiate all but subtract stream_results # TODO: can even pass a subset of the fluent state # TODO: can just compute the stream plan preimage # TODO: replan constraining the initial state and plan skeleton # TODO: reuse subproblems # TODO: always start from the initial state (i.e. don't update) old_evaluations = set(evaluations) stream_results, _ = process_stream_plan(evaluations, stream_plan, disabled, verbose) new_evaluations = set(evaluations) - old_evaluations if stream_results is not None: new_instances = [r.instance for r in stream_results] stream_results = optimistic_process_streams(new_evaluations, streams, new_instances) if not commit: stream_results = None depth += 1 reset_disabled(disabled) if postprocess and (not unit_costs) and (best_plan is not None): best_plan = locally_optimize(evaluations, best_plan, goal_expression, domain, functions, negative, dynamic_streams, verbose) write_stream_statistics(stream_name, externals) return revert_solution(best_plan, best_cost, evaluations)
def solve_focused(problem, stream_info={}, action_info={}, synthesizers=[], max_time=INF, max_cost=INF, unit_costs=False, unit_efforts=False, effort_weight=None, eager_layers=1, search_sampling_ratio=1, use_skeleton=True, visualize=False, verbose=True, postprocess=False, **search_kwargs): """ Solves a PDDLStream problem by first hypothesizing stream outputs and then determining whether they exist :param problem: a PDDLStream problem :param action_info: a dictionary from stream name to ActionInfo for planning and execution :param stream_info: a dictionary from stream name to StreamInfo altering how individual streams are handled :param synthesizers: a list of StreamSynthesizer objects :param max_time: the maximum amount of time to apply streams :param max_cost: a strict upper bound on plan cost :param unit_costs: use unit costs rather than numeric costs :param effort_weight: a multiplier for stream effort compared to action costs :param eager_layers: the number of eager stream application layers per iteration :param search_sampling_ratio: the desired ratio of search time / sample time :param visualize: if True, it draws the constraint network and stream plan as a graphviz file :param verbose: if True, this prints the result of each stream application :param postprocess: postprocess the stream plan to find a better solution :param search_kwargs: keyword args for the search subroutine :return: a tuple (plan, cost, evaluations) where plan is a sequence of actions (or None), cost is the cost of the plan, and evaluations is init but expanded using stream applications """ # TODO: return to just using the highest level samplers at the start # TODO: select whether to search or sample based on expected success rates solve_stream_plan_fn = relaxed_stream_plan #solve_stream_plan_fn = relaxed_stream_plan if effort_weight is None else simultaneous_stream_plan #solve_stream_plan_fn = sequential_stream_plan # simultaneous_stream_plan | sequential_stream_plan #solve_stream_plan_fn = incremental_stream_plan # incremental_stream_plan | exhaustive_stream_plan # TODO: warning check if using simultaneous_stream_plan or sequential_stream_plan with non-eager functions # TODO: no optimizers during search with relaxed_stream_plan num_iterations = 0 search_time = sample_time = 0 store = SolutionStore(max_time, max_cost, verbose) # TODO: include other info here? evaluations, goal_expression, domain, externals = parse_problem( problem, stream_info) unit_costs |= not has_costs(domain) full_action_info = get_action_info(action_info) load_stream_statistics(externals + synthesizers) if visualize and not has_pygraphviz(): visualize = False print( 'Warning, visualize=True requires pygraphviz. Setting visualize=False' ) if visualize: reset_visualizations() eager_externals = list(filter(lambda e: e.info.eager, externals)) streams, functions, negative = partition_externals(externals) if verbose: print('Streams: {}\nFunctions: {}\nNegated: {}'.format( streams, functions, negative)) queue = SkeletonQueue(store, evaluations, goal_expression, domain) disabled = set() while not store.is_terminated(): start_time = time.time() num_iterations += 1 print( '\nIteration: {} | Queue: {} | Evaluations: {} | Cost: {} ' '| Search Time: {:.3f} | Sample Time: {:.3f} | Total Time: {:.3f}'. format(num_iterations, len(queue), len(evaluations), store.best_cost, search_time, sample_time, store.elapsed_time())) layered_process_stream_queue( Instantiator(evaluations, eager_externals), evaluations, store, eager_layers) solve_stream_plan = lambda sr: solve_stream_plan_fn( evaluations, goal_expression, domain, sr, negative, max_cost=store. best_cost, #max_cost=min(store.best_cost, max_cost), unit_costs=unit_costs, unit_efforts=unit_efforts, effort_weight=effort_weight, **search_kwargs) #combined_plan, cost = solve_stream_plan(optimistic_process_streams(evaluations, streams + functions)) combined_plan, cost = iterative_solve_stream_plan( evaluations, streams, functions, solve_stream_plan) if action_info: combined_plan = reorder_combined_plan(evaluations, combined_plan, full_action_info, domain) print('Combined plan: {}'.format(combined_plan)) stream_plan, action_plan = separate_plan(combined_plan, full_action_info) #stream_plan = replan_with_optimizers(evaluations, stream_plan, domain, externals) stream_plan = combine_optimizers(evaluations, stream_plan) #stream_plan = get_synthetic_stream_plan(stream_plan, # evaluations # [s for s in synthesizers if not s.post_only]) stream_plan = reorder_stream_plan( stream_plan) # TODO: is this redundant when combined_plan? dump_plans(stream_plan, action_plan, cost) if (stream_plan is not None) and visualize: log_plans(stream_plan, action_plan, num_iterations) create_visualizations(evaluations, stream_plan, num_iterations) search_time += elapsed_time(start_time) # TODO: more generally just add the original plan skeleton to the plan # TODO: cutoff search exploration time at a certain point start_time = time.time() allocated_sample_time = search_sampling_ratio * search_time - sample_time if use_skeleton: terminate = not process_skeleton_queue(store, queue, stream_plan, action_plan, cost, allocated_sample_time) else: terminate = not process_disabled( store, evaluations, domain, disabled, stream_plan, action_plan, cost, allocated_sample_time, effort_weight is not None) sample_time += elapsed_time(start_time) if terminate: break if postprocess and (not unit_costs): # and synthesizers locally_optimize(evaluations, store, goal_expression, domain, functions, negative, synthesizers, visualize) write_stream_statistics(externals + synthesizers, verbose) return revert_solution(store.best_plan, store.best_cost, evaluations)
def solve_focused(problem, stream_info={}, action_info={}, synthesizers=[], max_time=INF, max_cost=INF, unit_costs=None, effort_weight=None, eager_layers=1, visualize=False, verbose=True, postprocess=False, **search_kwargs): """ Solves a PDDLStream problem by first hypothesizing stream outputs and then determining whether they exist :param problem: a PDDLStream problem :param action_info: a dictionary from stream name to ActionInfo for planning and execution :param stream_info: a dictionary from stream name to StreamInfo altering how individual streams are handled :param max_time: the maximum amount of time to apply streams :param max_cost: a strict upper bound on plan cost :param effort_weight: a multiplier for stream effort compared to action costs :param eager_layers: the number of eager stream application layers per iteration :param visualize: if True, it draws the constraint network and stream plan as a graphviz file :param verbose: if True, this prints the result of each stream application :param search_kwargs: keyword args for the search subroutine :return: a tuple (plan, cost, evaluations) where plan is a sequence of actions (or None), cost is the cost of the plan, and evaluations is init but expanded using stream applications """ # TODO: return to just using the highest level samplers at the start search_sampling_ratio = 1 solve_stream_plan_fn = relaxed_stream_plan if effort_weight is None else simultaneous_stream_plan # TODO: warning check if using simultaneous_stream_plan or sequential_stream_plan with non-eager functions num_iterations = 0 search_time = sample_time = 0 store = SolutionStore(max_time, max_cost, verbose) # TODO: include other info here? evaluations, goal_expression, domain, stream_name, externals = parse_problem( problem, stream_info) compile_to_exogenous(evaluations, domain, externals) if unit_costs is None: unit_costs = not has_costs(domain) full_action_info = get_action_info(action_info) load_stream_statistics(stream_name, externals + synthesizers) if visualize: clear_visualizations() # TODO: somehow Functions became no longer eager? eager_externals = list( filter(lambda e: e.info.eager or type(e) == Function, externals)) streams, functions, negative = partition_externals(externals) queue = SkeletonQueue(store, evaluations) # TODO: decide max_sampling_time based on total search_time or likelihood estimates # TODO: switch to searching if believe chance of search better than sampling while not store.is_terminated(): num_iterations += 1 print( '\nIteration: {} | Queue: {} | Evaluations: {} | Cost: {} ' '| Search Time: {:.3f} | Sample Time: {:.3f} | Total Time: {:.3f}'. format(num_iterations, len(queue), len(evaluations), store.best_cost, search_time, sample_time, store.elapsed_time())) start_time = time.time() layered_process_stream_queue( Instantiator(evaluations, eager_externals), evaluations, store, eager_layers) solve_stream_plan = lambda sr: solve_stream_plan_fn( evaluations, goal_expression, domain, sr, negative, max_cost=store.best_cost, #max_cost=min(store.best_cost, max_cost), unit_costs=unit_costs, **search_kwargs) #combined_plan, cost = solve_stream_plan(populate_results(evaluations, streams + functions)) combined_plan, cost = iterative_solve_stream_plan( evaluations, streams, functions, solve_stream_plan) if action_info: combined_plan = reorder_combined_plan(evaluations, combined_plan, full_action_info, domain) print('Combined plan: {}'.format(combined_plan)) stream_plan, action_plan = separate_plan(combined_plan, full_action_info) stream_plan = reorder_stream_plan( stream_plan) # TODO: is this strictly redundant? stream_plan = get_synthetic_stream_plan(stream_plan, synthesizers) print('Stream plan: {}\n' 'Action plan: {}'.format(stream_plan, action_plan)) search_time += elapsed_time(start_time) start_time = time.time() if stream_plan is None: if not queue: break queue.process_until_success() #queue.fairly_process() else: if visualize: create_visualizations(evaluations, stream_plan, num_iterations) queue.new_skeleton(stream_plan, action_plan, cost) queue.greedily_process() sample_time += elapsed_time(start_time) start_time = time.time() queue.timed_process(search_sampling_ratio * search_time - sample_time) sample_time += elapsed_time(start_time) if postprocess and (not unit_costs): locally_optimize(evaluations, store, goal_expression, domain, functions, negative, synthesizers) write_stream_statistics(stream_name, externals + synthesizers, verbose) return revert_solution(store.best_plan, store.best_cost, evaluations)
def solve_committed(problem, max_time=INF, effort_weight=None, visualize=False, verbose=True, **kwargs): # TODO: constrain plan skeleton # TODO: constrain ususable samples # TODO: recursively consider previously exposed binding levels # TODO: parameter for how many times to consider a plan skeleton # TODO: constrain to use previous plan skeleton # TODO: only use stream instances on plan # TODO: identify subset of state to include to further constrain (requires inverting axioms) # TODO: recurse to previous problems start_time = time.time() num_iterations = 0 best_plan = None best_cost = INF evaluations, goal_expression, domain, streams = parse_problem(problem) constraint_solver = ConstraintSolver(problem[3]) disabled = [] if visualize: clear_visualizations() committed = False instantiator = Instantiator(evaluations, streams) #stream_results = [] #while instantiator.stream_queue and (elapsed_time(start_time) < max_time): # stream_results += optimistic_process_stream_queue(instantiator, prioritized=False) # TODO: queue to always consider functions # TODO: can always append functions # Subproblems are which streams you can use while elapsed_time(start_time) < max_time: num_iterations += 1 print('\nIteration: {} | Evaluations: {} | Cost: {} | Time: {:.3f}'. format(num_iterations, len(evaluations), best_cost, elapsed_time(start_time))) stream_results = [] while instantiator.stream_queue and (elapsed_time(start_time) < max_time): stream_results += optimistic_process_stream_queue(instantiator) solve_stream_plan = sequential_stream_plan if effort_weight is None else simultaneous_stream_plan #solve_stream_plan = relaxed_stream_plan # TODO: constrain to use previous plan to some degree stream_plan, action_plan, cost = solve_stream_plan( evaluations, goal_expression, domain, stream_results, **kwargs) print('Stream plan: {}\n' 'Action plan: {}'.format(stream_plan, action_plan)) if stream_plan is None: if committed or disabled: if not committed: reset_disabled(disabled) committed = False instantiator = Instantiator(evaluations, streams) else: break elif (len(stream_plan) == 0) and (cost < best_cost): best_plan = action_plan best_cost = cost break else: if visualize: create_visualizations(evaluations, stream_plan, num_iterations) # TODO: use set of intended stream instances here instead #stream_results = [] committed = True constraint_facts = constraint_solver.solve( get_optimistic_constraints(evaluations, stream_plan), verbose=verbose) if constraint_facts: new_evaluations = map(evaluation_from_fact, constraint_facts) evaluations.update(new_evaluations) else: #new_evaluations = process_stream_plan(evaluations, stream_plan, disabled, verbose) new_evaluations = process_immediate_stream_plan( evaluations, stream_plan, disabled, verbose) for evaluation in new_evaluations: instantiator.add_atom( evaluation) # TODO: return things to try next #while instantiator.stream_queue and (elapsed_time(start_time) < max_time): # stream_results += optimistic_process_stream_queue(instantiator, prioritized=False) #stream_results = stream_plan # TODO: would need to prune disabled # TODO: don't include streams that aren't performable? # TODO: could also only include the previous stream plan # TODO: need to be careful if I only instantiate one that I am not unable to find a plan # TODO: need to always propagate this a little return revert_solution(best_plan, best_cost, evaluations)
def solve_focused(problem, constraints=PlanConstraints(), stream_info={}, action_info={}, synthesizers=[], max_time=INF, max_iterations=INF, max_skeletons=INF, unit_costs=False, success_cost=INF, complexity_step=1, unit_efforts=False, max_effort=INF, effort_weight=None, reorder=True, search_sample_ratio=0, visualize=False, verbose=True, **search_kwargs): """ Solves a PDDLStream problem by first hypothesizing stream outputs and then determining whether they exist :param problem: a PDDLStream problem :param constraints: PlanConstraints on the set of legal solutions :param stream_info: a dictionary from stream name to StreamInfo altering how individual streams are handled :param action_info: a dictionary from stream name to ActionInfo for planning and execution :param synthesizers: a list of StreamSynthesizer objects :param max_time: the maximum amount of time to apply streams :param max_iterations: the maximum number of search iterations :param max_iterations: the maximum number of plan skeletons to consider :param unit_costs: use unit action costs rather than numeric costs :param success_cost: an exclusive (strict) upper bound on plan cost to terminate :param unit_efforts: use unit stream efforts rather than estimated numeric efforts :param complexity_step: the increase in the effort limit after each failure :param max_effort: the maximum amount of effort to consider for streams :param effort_weight: a multiplier for stream effort compared to action costs :param reorder: if True, stream plans are reordered to minimize the expected sampling overhead :param search_sample_ratio: the desired ratio of search time / sample time :param visualize: if True, it draws the constraint network and stream plan as a graphviz file :param verbose: if True, this prints the result of each stream application :param search_kwargs: keyword args for the search subroutine :return: a tuple (plan, cost, evaluations) where plan is a sequence of actions (or None), cost is the cost of the plan, and evaluations is init but expanded using stream applications """ # TODO: select whether to search or sample based on expected success rates # TODO: no optimizers during search with relaxed_stream_plan num_iterations = search_time = sample_time = eager_calls = 0 complexity_limit = float(INITIAL_COMPLEXITY) eager_disabled = effort_weight is None # No point if no stream effort biasing evaluations, goal_exp, domain, externals = parse_problem( problem, stream_info=stream_info, constraints=constraints, unit_costs=unit_costs, unit_efforts=unit_efforts) store = SolutionStore(evaluations, max_time, success_cost, verbose) full_action_info = get_action_info(action_info) load_stream_statistics(externals + synthesizers) if visualize and not has_pygraphviz(): visualize = False print('Warning, visualize=True requires pygraphviz. Setting visualize=False') if visualize: reset_visualizations() streams, functions, negative = partition_externals(externals, verbose=verbose) eager_externals = list(filter(lambda e: e.info.eager, externals)) skeleton_queue = SkeletonQueue(store, goal_exp, domain) disabled = set() while (not store.is_terminated()) and (num_iterations < max_iterations): start_time = time.time() num_iterations += 1 eager_instantiator = Instantiator(eager_externals, evaluations) # Only update after an increase? if eager_disabled: push_disabled(eager_instantiator, disabled) eager_calls += process_stream_queue(eager_instantiator, store, complexity_limit=complexity_limit, verbose=verbose) print('\nIteration: {} | Complexity: {} | Skeletons: {} | Skeleton Queue: {} | Disabled: {} | Evaluations: {} | ' 'Eager Calls: {} | Cost: {:.3f} | Search Time: {:.3f} | Sample Time: {:.3f} | Total Time: {:.3f}'.format( num_iterations, complexity_limit, len(skeleton_queue.skeletons), len(skeleton_queue), len(disabled), len(evaluations), eager_calls, store.best_cost, search_time, sample_time, store.elapsed_time())) optimistic_solve_fn = get_optimistic_solve_fn(goal_exp, domain, negative, max_cost=min(store.best_cost, constraints.max_cost), unit_efforts=unit_efforts, max_effort=max_effort, effort_weight=effort_weight, **search_kwargs) if (max_skeletons is not None) and (len(skeleton_queue.skeletons) < max_skeletons): combined_plan, cost = iterative_plan_streams(evaluations, externals, optimistic_solve_fn, complexity_limit, unit_efforts=unit_efforts, max_effort=max_effort) else: combined_plan, cost = INFEASIBLE, INF if action_info: combined_plan = reorder_combined_plan(evaluations, combined_plan, full_action_info, domain) print('Combined plan: {}'.format(combined_plan)) stream_plan, action_plan = separate_plan(combined_plan, full_action_info) #stream_plan = replan_with_optimizers(evaluations, stream_plan, domain, externals) stream_plan = combine_optimizers(evaluations, stream_plan) #stream_plan = get_synthetic_stream_plan(stream_plan, # evaluations # [s for s in synthesizers if not s.post_only]) if reorder: stream_plan = reorder_stream_plan(stream_plan) # This may be redundant when using reorder_combined_plan print('Stream plan ({}, {:.3f}): {}\nAction plan ({}, {:.3f}): {}'.format( get_length(stream_plan), compute_plan_effort(stream_plan), stream_plan, get_length(action_plan), cost, str_from_plan(action_plan))) if is_plan(stream_plan) and visualize: log_plans(stream_plan, action_plan, num_iterations) create_visualizations(evaluations, stream_plan, num_iterations) search_time += elapsed_time(start_time) if (stream_plan is INFEASIBLE) and (not eager_instantiator) and (not skeleton_queue) and (not disabled): break start_time = time.time() if not is_plan(stream_plan): complexity_limit += complexity_step if not eager_disabled: reenable_disabled(evaluations, domain, disabled) elif not stream_plan: store.add_plan(action_plan, cost) if max_skeletons is None: process_stream_plan(store, domain, disabled, stream_plan) else: allocated_sample_time = (search_sample_ratio * search_time) - sample_time skeleton_queue.process(stream_plan, action_plan, cost, complexity_limit, allocated_sample_time) sample_time += elapsed_time(start_time) write_stream_statistics(externals + synthesizers, verbose) return store.extract_solution()
def solve_serialized(initial_problem, stream_info={}, unit_costs=False, unit_efforts=False, verbose=True, retain_facts=True, **kwargs): # TODO: be careful of CanMove deadends domain_pddl, constant_map, stream_pddl, stream_map, init, goal = initial_problem _, _, domain, streams = parse_problem(initial_problem, stream_info, constraints=None, unit_costs=unit_costs, unit_efforts=unit_efforts) static_init, _ = partition_facts( domain, init) # might not be able to reprove static_int #global_all, global_preimage = [], [] global_plan = [] global_cost = 0 state = list(init) goals = serialize_goal(goal) # TODO: instead just track how the true init updates for i in range(len(goals)): # TODO: option in algorithms to pass in existing facts for stream in streams: stream.reset() goal = And(*goals[:i + 1]) print('Goal:', str_from_object(goal)) # No strict need to reuse streams because generator functions #local_problem = PDDLProblem(domain_pddl, constant_map, stream_pddl, stream_map, state, goal) local_problem = PDDLProblem(domain_pddl, constant_map, streams, None, state, goal) with Verbose(verbose): solution = solve_focused(local_problem, stream_info=stream_info, unit_costs=unit_costs, unit_efforts=unit_efforts, verbose=True, **kwargs) print_solution(solution) local_plan, local_cost, local_certificate = solution if local_plan is None: # TODO: replan upon failure global_certificate = Certificate(all_facts={}, preimage_facts=None) return None, INF, global_certificate if retain_facts: state = local_certificate.all_facts else: _, fluent_facts = partition_facts(domain, state) state = static_init + fluent_facts + local_certificate.preimage_facts # TODO: include functions #print('State:', state) # TODO: indicate when each fact is used # TODO: record failed facts global_plan.extend( local_plan) # TODO: compute preimage of the executed plan global_cost += local_cost static_state, _ = partition_facts(domain, state) #global_all.extend(partition_facts(domain, local_certificate.all_facts)[0]) #global_preimage.extend(static_state) print('Static:', static_state) state = apply_actions(domain, state, local_plan, unit_costs=unit_costs) print(SEPARATOR) #user_input('Continue?') # TODO: could also just test the goal here # TODO: constrain future plan skeletons global_certificate = Certificate(all_facts={}, preimage_facts=None) return global_plan, global_cost, global_certificate
def solve_incremental(problem, constraints=PlanConstraints(), unit_costs=False, success_cost=INF, max_iterations=INF, max_time=INF, max_memory=INF, initial_complexity=0, complexity_step=1, max_complexity=INF, verbose=False, **search_kwargs): """ Solves a PDDLStream problem by alternating between applying all possible streams and searching :param problem: a PDDLStream problem :param constraints: PlanConstraints on the set of legal solutions :param unit_costs: use unit action costs rather than numeric costs :param success_cost: the exclusive (strict) upper bound on plan cost to successfully terminate :param max_time: the maximum runtime :param max_iterations: the maximum number of search iterations :param max_memory: the maximum amount of memory :param initial_complexity: the initial stream complexity limit :param complexity_step: the increase in the stream complexity limit per iteration :param max_complexity: the maximum stream complexity limit :param verbose: if True, print the result of each stream application :param search_kwargs: keyword args for the search subroutine :return: a tuple (plan, cost, evaluations) where plan is a sequence of actions (or None), cost is the cost of the plan (INF if no plan), and evaluations is init expanded using stream applications """ # max_complexity = 0 => current # complexity_step = INF => exhaustive # success_cost = terminate_cost = decision_cost # TODO: warning if optimizers are present evaluations, goal_expression, domain, externals = parse_problem( problem, constraints=constraints, unit_costs=unit_costs) store = SolutionStore( evaluations, max_time, success_cost, verbose, max_memory=max_memory) # TODO: include other info here? if UPDATE_STATISTICS: load_stream_statistics(externals) static_externals = compile_fluents_as_attachments(domain, externals) num_iterations = num_calls = 0 complexity_limit = initial_complexity instantiator = Instantiator(static_externals, evaluations) num_calls += process_stream_queue(instantiator, store, complexity_limit, verbose=verbose) while not store.is_terminated() and (num_iterations < max_iterations) and ( complexity_limit <= max_complexity): num_iterations += 1 print( 'Iteration: {} | Complexity: {} | Calls: {} | Evaluations: {} | Solved: {} | Cost: {:.3f} | ' 'Search Time: {:.3f} | Sample Time: {:.3f} | Time: {:.3f}'.format( num_iterations, complexity_limit, num_calls, len(evaluations), store.has_solution(), store.best_cost, store.search_time, store.sample_time, store.elapsed_time())) plan, cost = solve_finite(evaluations, goal_expression, domain, max_cost=min(store.best_cost, constraints.max_cost), **search_kwargs) if is_plan(plan): store.add_plan(plan, cost) if not instantiator: break if complexity_step is None: # TODO: option to select the next k-smallest complexities complexity_limit = instantiator.min_complexity() else: complexity_limit += complexity_step num_calls += process_stream_queue(instantiator, store, complexity_limit, verbose=verbose) #retrace_stream_plan(store, domain, goal_expression) #print('Final queue size: {}'.format(len(instantiator))) summary = store.export_summary() summary.update({ 'iterations': num_iterations, 'complexity': complexity_limit, }) print('Summary: {}'.format(str_from_object( summary, ndigits=3))) # TODO: return the summary if UPDATE_STATISTICS: write_stream_statistics(externals, verbose) return store.extract_solution()
def solve_focused(problem, max_time=INF, stream_info={}, effort_weight=None, eager_iterations=1, visualize=False, verbose=True, **kwargs): # TODO: eager, negative, context, costs, bindings start_time = time.time() num_iterations = 0 best_plan = None best_cost = INF evaluations, goal_expression, domain, externals = parse_problem(problem) update_stream_info(externals, stream_info) eager_externals = filter(lambda e: e.info.eager, externals) constraint_solver = ConstraintSolver(problem[3]) disabled = [] if visualize: clear_visualizations() while elapsed_time(start_time) < max_time: num_iterations += 1 print('\nIteration: {} | Evaluations: {} | Cost: {} | Time: {:.3f}'. format(num_iterations, len(evaluations), best_cost, elapsed_time(start_time))) eagerly_evaluate(evaluations, eager_externals, eager_iterations, max_time - elapsed_time(start_time), verbose) # TODO: version that just calls one of the incremental algorithms instantiator = Instantiator(evaluations, externals) stream_results = [] while instantiator.stream_queue and (elapsed_time(start_time) < max_time): stream_results += optimistic_process_stream_queue(instantiator) # exhaustive_stream_plan | incremental_stream_plan | simultaneous_stream_plan | sequential_stream_plan | relaxed_stream_plan solve_stream_plan = sequential_stream_plan if effort_weight is None else simultaneous_stream_plan #solve_stream_plan = simultaneous_stream_plan stream_plan, action_plan, cost = solve_stream_plan( evaluations, goal_expression, domain, stream_results, **kwargs) print('Stream plan: {}\n' 'Action plan: {}'.format(stream_plan, action_plan)) if stream_plan is None: if not disabled: break reset_disabled(disabled) elif (len(stream_plan) == 0) and (cost < best_cost): best_plan = action_plan best_cost = cost break else: if visualize: create_visualizations(evaluations, stream_plan, num_iterations) constraint_facts = constraint_solver.solve( get_optimistic_constraints(evaluations, stream_plan), verbose=verbose) if constraint_facts: evaluations.update(map(evaluation_from_fact, constraint_facts)) else: #process_stream_plan(evaluations, stream_plan, disabled, verbose) process_immediate_stream_plan(evaluations, stream_plan, disabled, verbose) return revert_solution(best_plan, best_cost, evaluations)
def solve_focused(problem, constraints=PlanConstraints(), stream_info={}, replan_actions=set(), max_time=INF, max_iterations=INF, initial_complexity=0, complexity_step=1, max_skeletons=INF, bind=True, max_failures=0, unit_costs=False, success_cost=INF, unit_efforts=False, max_effort=INF, effort_weight=None, reorder=True, search_sample_ratio=0, visualize=False, verbose=True, **search_kwargs): """ Solves a PDDLStream problem by first hypothesizing stream outputs and then determining whether they exist :param problem: a PDDLStream problem :param constraints: PlanConstraints on the set of legal solutions :param stream_info: a dictionary from stream name to StreamInfo altering how individual streams are handled :param max_time: the maximum amount of time to apply streams :param max_iterations: the maximum number of search iterations :param max_skeletons: the maximum number of plan skeletons to consider :param unit_costs: use unit action costs rather than numeric costs :param success_cost: an exclusive (strict) upper bound on plan cost to terminate :param unit_efforts: use unit stream efforts rather than estimated numeric efforts :param initial_complexity: the initial effort limit :param complexity_step: the increase in the effort limit after each failure :param max_effort: the maximum amount of effort to consider for streams :param effort_weight: a multiplier for stream effort compared to action costs :param reorder: if True, stream plans are reordered to minimize the expected sampling overhead :param search_sample_ratio: the desired ratio of search time / sample time :param visualize: if True, it draws the constraint network and stream plan as a graphviz file :param verbose: if True, this prints the result of each stream application :param search_kwargs: keyword args for the search subroutine :return: a tuple (plan, cost, evaluations) where plan is a sequence of actions (or None), cost is the cost of the plan, and evaluations is init but expanded using stream applications """ # TODO: select whether to search or sample based on expected success rates # TODO: no optimizers during search with relaxed_stream_plan # TODO: locally optimize only after a solution is identified # TODO: replan with a better search algorithm after feasible num_iterations = search_time = sample_time = eager_calls = 0 complexity_limit = initial_complexity # TODO: make effort_weight be a function of the current cost # TODO: change the search algorithm and unit costs based on the best cost eager_disabled = effort_weight is None # No point if no stream effort biasing evaluations, goal_exp, domain, externals = parse_problem( problem, stream_info=stream_info, constraints=constraints, unit_costs=unit_costs, unit_efforts=unit_efforts) store = SolutionStore(evaluations, max_time, success_cost, verbose) load_stream_statistics(externals) if visualize and not has_pygraphviz(): visualize = False print( 'Warning, visualize=True requires pygraphviz. Setting visualize=False' ) if visualize: reset_visualizations() streams, functions, negative, optimizers = partition_externals( externals, verbose=verbose) eager_externals = list(filter(lambda e: e.info.eager, externals)) use_skeletons = max_skeletons is not None has_optimizers = bool(optimizers) assert implies(has_optimizers, use_skeletons) skeleton_queue = SkeletonQueue(store, domain, disable=not has_optimizers) disabled = set() # Max skeletons after a solution while (not store.is_terminated()) and (num_iterations < max_iterations): start_time = time.time() num_iterations += 1 eager_instantiator = Instantiator( eager_externals, evaluations) # Only update after an increase? if eager_disabled: push_disabled(eager_instantiator, disabled) eager_calls += process_stream_queue(eager_instantiator, store, complexity_limit=complexity_limit, verbose=verbose) print( '\nIteration: {} | Complexity: {} | Skeletons: {} | Skeleton Queue: {} | Disabled: {} | Evaluations: {} | ' 'Eager Calls: {} | Cost: {:.3f} | Search Time: {:.3f} | Sample Time: {:.3f} | Total Time: {:.3f}' .format(num_iterations, complexity_limit, len(skeleton_queue.skeletons), len(skeleton_queue), len(disabled), len(evaluations), eager_calls, store.best_cost, search_time, sample_time, store.elapsed_time())) optimistic_solve_fn = get_optimistic_solve_fn( goal_exp, domain, negative, replan_actions=replan_actions, reachieve=use_skeletons, max_cost=min(store.best_cost, constraints.max_cost), max_effort=max_effort, effort_weight=effort_weight, **search_kwargs) # TODO: just set unit effort for each stream beforehand if (max_skeletons is None) or (len(skeleton_queue.skeletons) < max_skeletons): disabled_axioms = create_disabled_axioms( skeleton_queue) if has_optimizers else [] if disabled_axioms: domain.axioms.extend(disabled_axioms) stream_plan, action_plan, cost = iterative_plan_streams( evaluations, (streams + functions + optimizers), optimistic_solve_fn, complexity_limit, max_effort=max_effort) for axiom in disabled_axioms: domain.axioms.remove(axiom) else: stream_plan, action_plan, cost = INFEASIBLE, INFEASIBLE, INF #stream_plan = replan_with_optimizers(evaluations, stream_plan, domain, externals) or stream_plan stream_plan = combine_optimizers(evaluations, stream_plan) #stream_plan = get_synthetic_stream_plan(stream_plan, # evaluations # [s for s in synthesizers if not s.post_only]) if reorder: stream_plan = reorder_stream_plan( stream_plan ) # This may be redundant when using reorder_combined_plan num_optimistic = sum(r.optimistic for r in stream_plan) if stream_plan else 0 print('Stream plan ({}, {}, {:.3f}): {}\nAction plan ({}, {:.3f}): {}'. format(get_length(stream_plan), num_optimistic, compute_plan_effort(stream_plan), stream_plan, get_length(action_plan), cost, str_from_plan(action_plan))) if is_plan(stream_plan) and visualize: log_plans(stream_plan, action_plan, num_iterations) create_visualizations(evaluations, stream_plan, num_iterations) search_time += elapsed_time(start_time) if (stream_plan is INFEASIBLE) and (not eager_instantiator) and ( not skeleton_queue) and (not disabled): break start_time = time.time() if not is_plan(stream_plan): complexity_limit += complexity_step if not eager_disabled: reenable_disabled(evaluations, disabled) #print(stream_plan_complexity(evaluations, stream_plan)) if use_skeletons: #optimizer_plan = replan_with_optimizers(evaluations, stream_plan, domain, optimizers) optimizer_plan = None if optimizer_plan is not None: # TODO: post process a bound plan print('Optimizer plan ({}, {:.3f}): {}'.format( get_length(optimizer_plan), compute_plan_effort(optimizer_plan), optimizer_plan)) skeleton_queue.new_skeleton(optimizer_plan, action_plan, cost) allocated_sample_time = (search_sample_ratio * search_time) - sample_time \ if len(skeleton_queue.skeletons) <= max_skeletons else INF skeleton_queue.process(stream_plan, action_plan, cost, complexity_limit, allocated_sample_time) else: process_stream_plan(store, domain, disabled, stream_plan, action_plan, cost, bind=bind, max_failures=max_failures) sample_time += elapsed_time(start_time) write_stream_statistics(externals, verbose) return store.extract_solution()