def solve_optimistic_sequential(domain, stream_domain, applied_results, all_results, opt_evaluations, node_from_atom, goal_expression, effort_weight, debug=False, **kwargs): if isinstance(stream_domain, SimplifiedDomain): return solve_optimistic_temporal(domain, stream_domain, applied_results, all_results, opt_evaluations, node_from_atom, goal_expression, effort_weight, debug=debug, **kwargs) problem = get_problem(opt_evaluations, goal_expression, stream_domain) # begin_metric with Verbose(): instantiated = instantiate_task(task_from_domain_problem(stream_domain, problem)) if instantiated is None: return instantiated, None, None, INF cost_from_action = add_stream_efforts(node_from_atom, instantiated, effort_weight) if using_optimizers(applied_results): add_optimizer_effects(instantiated, node_from_atom) # TODO: reachieve=False when using optimizers or should add applied facts instantiate_optimizer_axioms(instantiated, domain, all_results) action_from_name = rename_instantiated_actions(instantiated) with Verbose(debug): sas_task = sas_from_instantiated(instantiated) sas_task.metric = True # TODO: apply renaming to hierarchy as well # solve_from_task | serialized_solve_from_task | abstrips_solve_from_task | abstrips_solve_from_task_sequential renamed_plan, _ = solve_from_task(sas_task, debug=debug, **kwargs) if renamed_plan is None: return instantiated, None, None, INF action_instances = [action_from_name[name] for name, _ in renamed_plan] cost = get_plan_cost(action_instances, cost_from_action) # plan = obj_from_pddl_plan(parse_action(instance.name) for instance in action_instances) plan = obj_from_pddl_plan(map(pddl_from_instance, action_instances)) return instantiated, action_instances, plan, cost
def replace_derived(task, negative_init, action_instances): import pddl_to_prolog import build_model import axiom_rules import pddl original_actions = task.actions original_init = task.init task.actions = [] function_assignments = {f.fluent: f.expression for f in task.init if isinstance(f, pddl.f_expression.FunctionAssignment)} task.init = (set(task.init) | {a.negate() for a in negative_init}) - set(function_assignments) for instance in action_instances: #axiom_plan = extract_axiom_plan(task, instance, negative_from_name={}) # TODO: refactor this # TODO: just instantiate task? with Verbose(False): model = build_model.compute_model(pddl_to_prolog.translate(task)) # Changes based on init # fluent_facts = instantiate.get_fluent_facts(task, model) fluent_facts = MockSet() instantiated_axioms = instantiate_axioms(model, task.init, fluent_facts) goal_list = [] # TODO: include the goal? with Verbose(False): # TODO: helpful_axioms prunes axioms that are already true (e.g. not Unsafe) helpful_axioms, axiom_init, _ = axiom_rules.handle_axioms([instance], instantiated_axioms, goal_list) axiom_from_atom = get_achieving_axioms(task.init | negative_init | set(axiom_init), helpful_axioms) # negated_from_name=negated_from_name) axiom_plan = [] extract_axioms(axiom_from_atom, instance.precondition, axiom_plan) substitute_derived(axiom_plan, instance) assert(is_applicable(task.init, instance)) apply_action(task.init, instance) task.actions = original_actions task.init = original_init
def solve_optimistic_sequential(domain, stream_domain, applied_results, all_results, opt_evaluations, node_from_atom, goal_expression, effort_weight, debug=False, **kwargs): #print(sorted(map(fact_from_evaluation, opt_evaluations))) temporal_plan = None problem = get_problem(opt_evaluations, goal_expression, stream_domain) # begin_metric with Verbose(verbose=False): instantiated = instantiate_task(task_from_domain_problem(stream_domain, problem)) if instantiated is None: return instantiated, None, temporal_plan, INF cost_from_action = {action: action.cost for action in instantiated.actions} add_stream_efforts(node_from_atom, instantiated, effort_weight) if using_optimizers(applied_results): add_optimizer_effects(instantiated, node_from_atom) # TODO: reachieve=False when using optimizers or should add applied facts instantiate_optimizer_axioms(instantiated, domain, all_results) action_from_name = rename_instantiated_actions(instantiated, RENAME_ACTIONS) # TODO: the action unsatisfiable conditions are pruned with Verbose(debug): sas_task = sas_from_instantiated(instantiated) sas_task.metric = True # TODO: apply renaming to hierarchy as well # solve_from_task | serialized_solve_from_task | abstrips_solve_from_task | abstrips_solve_from_task_sequential renamed_plan, _ = solve_from_task(sas_task, debug=debug, **kwargs) if renamed_plan is None: return instantiated, None, temporal_plan, INF action_instances = [action_from_name[name if RENAME_ACTIONS else '({} {})'.format(name, ' '.join(args))] for name, args in renamed_plan] cost = get_plan_cost(action_instances, cost_from_action) return instantiated, action_instances, temporal_plan, cost
def extract_axiom_plan(task, action_instance, negative_from_name, static_state=set()): import pddl_to_prolog import build_model import axiom_rules import instantiate axioms_from_name = get_derived_predicates(task.axioms) derived_preconditions = {l for l in action_instance.precondition if l.predicate in axioms_from_name} nonderived_preconditions = {l for l in action_instance.precondition if l not in derived_preconditions} if not conditions_hold(task.init, nonderived_preconditions): return None axiom_from_action = get_necessary_axioms(action_instance, task.axioms, negative_from_name) if not axiom_from_action: return [] conditions_from_predicate = defaultdict(set) for axiom, mapping in axiom_from_action.values(): for literal in get_literals(axiom.condition): conditions_from_predicate[literal.predicate].add(literal.rename_variables(mapping)) original_init = task.init original_actions = task.actions original_axioms = task.axioms task.init = {atom for atom in task.init if is_useful_atom(atom, conditions_from_predicate)} # TODO: store map from predicate to atom task.actions = axiom_from_action.keys() task.axioms = [] # TODO: maybe it would just be better to drop the negative throughout this process until this end with Verbose(False): model = build_model.compute_model(pddl_to_prolog.translate(task)) # Changes based on init task.actions = original_actions task.axioms = original_axioms opt_facts = instantiate.get_fluent_facts(task, model) | (task.init - static_state) mock_fluent = MockSet(lambda item: (item.predicate in negative_from_name) or (item in opt_facts)) instantiated_axioms = instantiate_necessary_axioms(model, static_state, mock_fluent, axiom_from_action) goal_list = [] with Verbose(False): helpful_axioms, axiom_init, _ = axiom_rules.handle_axioms( [action_instance], instantiated_axioms, goal_list) axiom_init = set(axiom_init) axiom_effects = {axiom.effect for axiom in helpful_axioms} #assert len(axiom_effects) == len(axiom_init) for pre in list(derived_preconditions) + list(axiom_effects): if (pre not in axiom_init) and (pre.negate() not in axiom_init): axiom_init.add(pre.positive().negate()) axiom_from_atom = get_achieving_axioms(task.init | axiom_init, helpful_axioms, negative_from_name) axiom_plan = [] # Could always add all conditions success = extract_axioms(axiom_from_atom, derived_preconditions, axiom_plan, negative_from_name) task.init = original_init #if not success: # return None return axiom_plan
def solve_optimistic_temporal(domain, stream_domain, applied_results, all_results, opt_evaluations, node_from_atom, goal_expression, effort_weight, debug=False, **kwargs): assert domain is stream_domain #assert len(applied_results) == len(all_results) problem = get_problem(opt_evaluations, goal_expression, domain) with Verbose(): instantiated = instantiate_task(task_from_domain_problem(domain, problem)) if instantiated is None: return instantiated, None, None, INF problem = get_problem_pddl(opt_evaluations, goal_expression, domain.pddl) pddl_plan, makespan = solve_tfd(domain.pddl, problem, debug=debug) if pddl_plan is None: return instantiated, None, pddl_plan, makespan instance_from_action_args = defaultdict(list) for instance in instantiated.actions: tokens = instance.name.strip('()').split(' ') name, args = tokens[0], tuple(tokens[1:]) instance_from_action_args[name, args].append(instance) #instance.action, instance.var_mapping action_instances = [] for action in pddl_plan: instances = instance_from_action_args[action.name, action.args] assert len(instances) == 1 # TODO: support 2 <= case action_instances.append(instances[0]) plan = obj_from_pddl_plan(pddl_plan) return instantiated, action_instances, plan, makespan
def examine_instantiated(problem, constraints=PlanConstraints(), unit_costs=False, unique=False, verbose=False, debug=False): # TODO: refactor to an analysis file domain_pddl, constant_map, stream_pddl, _, init, goal = problem stream_map = DEBUG if unique else SHARED_DEBUG # DEBUG_MODES problem = PDDLProblem(domain_pddl, constant_map, stream_pddl, stream_map, init, goal) evaluations, goal_exp, domain, externals = parse_problem( problem, constraints=constraints, unit_costs=unit_costs) assert not isinstance(domain, SimplifiedDomain) # store = SolutionStore(evaluations, max_time, success_cost=INF, verbose=verbose) # instantiator = Instantiator(externals, evaluations) # process_stream_queue(instantiator, store, complexity_limit=INF, verbose=verbose) # results = [] # TODO: extract from process_stream_queue #set_unique(externals) # domain.actions[:] = [] # TODO: only instantiate axioms # TODO: drop all fluents and instantiate # TODO: relaxed planning version of this results, exhausted = optimistic_process_streams(evaluations, externals, complexity_limit=INF, max_effort=None) evaluations = evaluations_from_stream_plan(evaluations, results, max_effort=None) problem = get_problem(evaluations, goal_exp, domain, unit_costs) task = task_from_domain_problem(domain, problem) with Verbose(debug): instantiated = instantiate_task(task) if instantiated is None: return None # TODO: reinstantiate actions? instantiated.axioms[:] = [reinstantiate_axiom(axiom) for axiom in instantiated.axioms] instantiated = convert_instantiated(instantiated) return results, instantiated
def solve_optimistic_temporal(domain, stream_domain, applied_results, all_results, opt_evaluations, node_from_atom, goal_expression, effort_weight, debug=False, **kwargs): # TODO: assert that the unused parameters are off assert domain is stream_domain #assert len(applied_results) == len(all_results) problem = get_problem(opt_evaluations, goal_expression, domain) with Verbose(): instantiated = instantiate_task(task_from_domain_problem(domain, problem)) if instantiated is None: return instantiated, None, None, INF problem = get_problem_pddl(opt_evaluations, goal_expression, domain.pddl) pddl_plan, makespan = solve_tfd(domain.pddl, problem, debug=debug, **kwargs) if pddl_plan is None: return instantiated, None, pddl_plan, makespan instance_from_action_args = defaultdict(list) for instance in instantiated.actions: name, args = parse_action(instance) instance_from_action_args[name, args].append(instance) #instance.action, instance.var_mapping action_instances = [] for action in pddl_plan: instances = instance_from_action_args[action.name, action.args] if len(instances) != 1: for action in instances: action.dump() #assert len(instances) == 1 # TODO: support 2 <= case action_instances.append(instances[0]) temporal_plan = obj_from_pddl_plan(pddl_plan) # pddl_plan is sequential return instantiated, action_instances, temporal_plan, makespan
def extract_axiom_plan(task, goals, negative_from_name, static_state=set()): import pddl_to_prolog import build_model import instantiate # TODO: only reinstantiate the negative axioms axioms_from_name = get_derived_predicates(task.axioms) derived_goals = {l for l in goals if l.predicate in axioms_from_name} axiom_from_action = get_necessary_axioms(derived_goals, task.axioms, negative_from_name) if not axiom_from_action: return [] conditions_from_predicate = defaultdict(set) for axiom, mapping in axiom_from_action.values(): for literal in get_literals(axiom.condition): conditions_from_predicate[literal.predicate].add(literal.rename_variables(mapping)) original_init = task.init original_actions = task.actions original_axioms = task.axioms # TODO: retrieve initial state based on if helpful task.init = {atom for atom in task.init if is_useful_atom(atom, conditions_from_predicate)} # TODO: store map from predicate to atom task.actions = axiom_from_action.keys() task.axioms = [] # TODO: maybe it would just be better to drop the negative throughout this process until this end with Verbose(False): model = build_model.compute_model(pddl_to_prolog.translate(task)) # Changes based on init task.actions = original_actions task.axioms = original_axioms opt_facts = instantiate.get_fluent_facts(task, model) | (task.init - static_state) mock_fluent = MockSet(lambda item: (item.predicate in negative_from_name) or (item in opt_facts)) instantiated_axioms = instantiate_necessary_axioms(model, static_state, mock_fluent, axiom_from_action) axiom_plan = extraction_helper(task.init, instantiated_axioms, derived_goals, negative_from_name) task.init = original_init return axiom_plan
def instantiate_optimizer_axioms(instantiated, domain, results): # Needed for instantiating axioms before adding stream action effects # Otherwise, FastDownward will prune these unreachable axioms # TODO: compute this first and then apply the eager actions stream_init = { fd_from_fact(result.stream_fact) for result in results if isinstance(result, StreamResult) } evaluations = list( map(evaluation_from_fd, stream_init | instantiated.atoms)) temp_domain = make_domain( predicates=[make_predicate(UNSATISFIABLE, [])], axioms=[ax for ax in domain.axioms if ax.name == UNSATISFIABLE]) temp_problem = get_problem(evaluations, Not((UNSATISFIABLE, )), temp_domain) # TODO: UNSATISFIABLE might be in atoms making the goal always infeasible with Verbose(): # TODO: the FastDownward instantiation prunes static preconditions use_fd = False if using_optimizers(results) else FD_INSTANTIATE new_instantiated = instantiate_task(task_from_domain_problem( temp_domain, temp_problem), use_fd=use_fd, check_infeasible=False, prune_static=False) assert new_instantiated is not None instantiated.axioms.extend(new_instantiated.axioms) instantiated.atoms.update(new_instantiated.atoms)
def extraction_helper(state, instantiated_axioms, goals, negative_from_name={}): # TODO: filter instantiated_axioms that aren't applicable? with Verbose(False): helpful_axioms, axiom_init, _ = axiom_rules.handle_axioms( [], instantiated_axioms, goals) axiom_init = set(axiom_init) axiom_effects = {axiom.effect for axiom in helpful_axioms} #assert len(axiom_effects) == len(axiom_init) for pre in list(goals) + list(axiom_effects): if pre.positive() not in axiom_init: axiom_init.add(pre.positive().negate()) goal_action = pddl.PropositionalAction(GOAL_NAME, goals, [], None) axiom_from_atom, _ = get_achieving_axioms(state | axiom_init, helpful_axioms + [goal_action], negative_from_name) axiom_plan = [] # Could always add all conditions success = extract_axioms(state | axiom_init, axiom_from_atom, goals, axiom_plan, negative_from_name) if not success: print('Warning! Could not extract an axiom plan') #return None return axiom_plan
def abstrips_solve_from_task(sas_task, temp_dir=TEMP_DIR, clean=False, debug=False, hierarchy=[], **kwargs): # Like partial order planning in terms of precondition order # TODO: add achieve subgoal actions # TODO: most generic would be a heuristic on each state if not hierarchy: return solve_from_task(sas_task, temp_dir=temp_dir, clean=clean, debug=debug, **kwargs) start_time = time() plan, cost = None, INF with Verbose(debug): print('\n' + 50*'-' + '\n') last_plan = [] for level in range(len(hierarchy)+1): local_sas_task = deepcopy(sas_task) prune_hierarchy_pre_eff(local_sas_task, hierarchy[level:]) # TODO: break if no pruned add_subgoals(local_sas_task, last_plan) write_sas_task(local_sas_task, temp_dir) plan, cost = parse_solution(run_search(temp_dir, debug=True, **kwargs)) if (level == len(hierarchy)) or (plan is None): # TODO: fall back on standard search break last_plan = [name_from_action(action, args) for action, args in plan] if clean: safe_rm_dir(temp_dir) print('Total runtime:', time() - start_time) return plan, cost
def extraction_helper(init, instantiated_axioms, goals, negative_from_name={}): import axiom_rules # TODO: filter instantiated_axioms that aren't applicable? derived_predicates = { instance.effect.predicate for instance in instantiated_axioms } derived_goals = {l for l in goals if l.predicate in derived_predicates} if not derived_goals: return [] with Verbose(False): helpful_axioms, axiom_init, _ = axiom_rules.handle_axioms( [], instantiated_axioms, derived_goals) axiom_init = set(axiom_init) axiom_effects = {axiom.effect for axiom in helpful_axioms} #assert len(axiom_effects) == len(axiom_init) for pre in list(derived_goals) + list(axiom_effects): if pre.positive() not in axiom_init: axiom_init.add(pre.positive().negate()) axiom_from_atom = get_achieving_axioms(init | axiom_init, helpful_axioms, negative_from_name) axiom_plan = [] # Could always add all conditions success = extract_axioms(axiom_from_atom, derived_goals, axiom_plan, negative_from_name) if not success: print('Warning! Could extract an axiom plan') #return None return axiom_plan
def abstrips_solve_from_task_sequential(sas_task, temp_dir=TEMP_DIR, clean=False, debug=False, hierarchy=[], subgoal_horizon=1, **kwargs): # TODO: version that plans for each goal individually # TODO: can reduce to goal serialization if binary flag for each subgoal if not hierarchy: return solve_from_task(sas_task, temp_dir=temp_dir, clean=clean, debug=debug, **kwargs) start_time = time() plan, cost = None, INF with Verbose(debug): last_plan = None for level in range(len(hierarchy) + 1): local_sas_task = deepcopy(sas_task) prune_hierarchy_pre_eff(local_sas_task, hierarchy[level:]) # TODO: break if no pruned # The goal itself is effectively a subgoal # Handle this subgoal horizon subgoal_plan = [local_sas_task.goal.pairs[:]] # TODO: do I want to consider the "subgoal action" as a real action? if last_plan is not None: subgoal_var = add_subgoals(local_sas_task, last_plan) subgoal_plan = [[(subgoal_var, val)] for val in range(1, local_sas_task.variables.ranges[subgoal_var], subgoal_horizon)] + subgoal_plan hierarchy_horizon = min(hierarchy[level-1].horizon, len(subgoal_plan)) subgoal_plan = subgoal_plan[:hierarchy_horizon] plan, cost = plan_subgoals(local_sas_task, subgoal_plan, temp_dir, **kwargs) if (level == len(hierarchy)) or (plan is None): # TODO: fall back on normal # TODO: search in space of subgoals break last_plan = [name_from_action(action, args) for action, args in plan] if clean: safe_rm_dir(temp_dir) print('Total runtime:', time() - start_time) # TODO: record which level of abstraction each operator is at when returning # TODO: return instantiated actions here rather than names (including pruned pre/eff) return plan, cost
def examine_instantiated(problem, constraints=PlanConstraints(), unit_costs=False, max_time=INF, verbose=False, **search_args): domain_pddl, constant_map, stream_pddl, _, init, goal = problem stream_map = DEBUG problem = PDDLProblem(domain_pddl, constant_map, stream_pddl, stream_map, init, goal) evaluations, goal_exp, domain, externals = parse_problem( problem, constraints=constraints, unit_costs=unit_costs) store = SolutionStore(evaluations, max_time, success_cost=INF, verbose=verbose) #externals = compile_fluents_as_attachments(domain, externals) # instantiator = Instantiator(externals, evaluations) process_stream_queue(instantiator, store, complexity_limit=INF, verbose=verbose) #plan, cost = solve_finite(evaluations, goal_exp, domain, max_cost=max_cost, **search_args) debug = False assert not isinstance(domain, SimplifiedDomain) problem = get_problem(evaluations, goal_exp, domain, unit_costs) task = task_from_domain_problem(domain, problem) with Verbose(debug): instantiated = instantiate_task(task) instantiated = convert_instantiated(instantiated) return instantiated
def sas_from_pddl(task, debug=False): #normalize.normalize(task) #sas_task = translate.pddl_to_sas(task) with Verbose(debug): sas_task = sas_from_instantiated(instantiate_task(task)) sas_task.metric = task.use_min_cost_metric # TODO: are these sometimes not equal? return sas_task
def serialized_solve_from_task(sas_task, temp_dir=TEMP_DIR, clean=False, debug=False, hierarchy=[], **kwargs): # TODO: specify goal grouping / group by predicate & objects # TODO: version that solves for all subgoals at once start_time = time() with Verbose(debug): print('\n' + 50*'-' + '\n') subgoal_plan = [sas_task.goal.pairs[:i+1] for i in range(len(sas_task.goal.pairs))] plan, cost = plan_subgoals(sas_task, subgoal_plan, temp_dir, **kwargs) if clean: safe_rm_dir(temp_dir) print('Total runtime:', time() - start_time) return plan, cost
def solve_from_pddl(domain_pddl, problem_pddl, temp_dir=TEMP_DIR, clean=False, debug=False, **kwargs): # TODO: combine with solve_from_task start_time = time() with Verbose(debug): write_pddl(domain_pddl, problem_pddl, temp_dir) #run_translate(temp_dir, verbose) translate_and_write_pddl(domain_pddl, problem_pddl, temp_dir, debug) solution = run_search(temp_dir, debug=debug, **kwargs) if clean: safe_rm_dir(temp_dir) print('Total runtime:', time() - start_time) return parse_solution(solution)
def solve_from_task(task, temp_dir=TEMP_DIR, clean=False, debug=False, **kwargs): start_time = time() with Verbose(debug): translate_and_write_task(task, temp_dir) solution = run_search(temp_dir, debug=True, **kwargs) if clean: safe_rm_dir(temp_dir) print('Total runtime:', time() - start_time) return parse_solution(solution)
def relaxed_stream_plan(evaluations, goal_expression, domain, stream_results, negative, unit_costs, unit_efforts, effort_weight, debug=False, **kwargs): # TODO: alternatively could translate with stream actions on real opt_state and just discard them # TODO: only consider axioms that have stream conditions? applied_results, deferred_results = partition_results(evaluations, stream_results, apply_now=lambda r: not r.external.info.simultaneous) stream_domain, result_from_name = add_stream_actions(domain, deferred_results) node_from_atom = get_achieving_streams(evaluations, applied_results) opt_evaluations = apply_streams(evaluations, applied_results) # if n.effort < INF if any(map(is_optimizer_result, stream_results)): goal_expression = augment_goal(stream_domain, goal_expression) problem = get_problem(opt_evaluations, goal_expression, stream_domain, unit_costs) # begin_metric with Verbose(debug): instantiated = instantiate_task(task_from_domain_problem(stream_domain, problem)) if instantiated is None: return None, INF if (effort_weight is not None) or any(map(is_optimizer_result, applied_results)): add_stream_costs(node_from_atom, instantiated, unit_efforts, effort_weight) add_optimizer_axioms(stream_results, instantiated) with Verbose(debug): sas_task = sas_from_instantiated(instantiated) sas_task.metric = True #sas_task = sas_from_pddl(task) #action_plan, _ = serialized_solve_from_task(sas_task, debug=debug, **kwargs) action_plan, _ = abstrips_solve_from_task(sas_task, debug=debug, **kwargs) #action_plan, _ = abstrips_solve_from_task_sequential(sas_task, debug=debug, **kwargs) if action_plan is None: return None, INF applied_plan, function_plan = partition_external_plan(recover_stream_plan( evaluations, goal_expression, stream_domain, applied_results, action_plan, negative, unit_costs)) deferred_plan, action_plan = partition_plan(action_plan, result_from_name) stream_plan = applied_plan + deferred_plan + function_plan action_plan = obj_from_pddl_plan(action_plan) cost = get_plan_cost(opt_evaluations, action_plan, domain, unit_costs) combined_plan = stream_plan + action_plan return combined_plan, cost
def solve_sequential(evaluations, goal_exp, domain, unit_costs=False, debug=False, **search_args): problem = get_problem(evaluations, goal_exp, domain, unit_costs) task = task_from_domain_problem(domain, problem) if has_attachments(domain): with Verbose(debug): instantiated = instantiate_task(task) return solve_pyplanners(instantiated, **search_args) sas_task = sas_from_pddl(task, debug=debug) return abstrips_solve_from_task(sas_task, debug=debug, **search_args)
def solve_finite(evaluations, goal_exp, domain, unit_costs=False, debug=False, **search_args): if isinstance(domain, SimplifiedDomain): problem = get_problem_pddl(evaluations, goal_exp, domain.pddl) pddl_plan, cost = solve_tfd(domain.pddl, problem, debug=debug) else: problem = get_problem(evaluations, goal_exp, domain, unit_costs) task = task_from_domain_problem(domain, problem) if has_attachments(domain): with Verbose(debug): instantiated = instantiate_task(task) pddl_plan, cost = solve_pyplanners(instantiated, **search_args) else: sas_task = sas_from_pddl(task, debug=debug) pddl_plan, cost = abstrips_solve_from_task(sas_task, debug=debug, **search_args) plan = obj_from_pddl_plan(pddl_plan) return plan, cost
def instantiate_optimizer_axioms(instantiated, evaluations, goal_expression, domain, results): # Needed for instantiating axioms before adding stream action effects # Otherwise, FastDownward will prune these unreachable axioms # TODO: compute this first and then apply the eager actions #stream_evaluations = set(map(evaluation_from_fact, get_stream_facts(applied_results))) stream_domain, result_from_name = add_stream_actions(domain, results) # Need unit_costs=True otherwise obtain an instantiation error problem = get_problem(evaluations, goal_expression, stream_domain, unit_costs=True) with Verbose(): new_instantiated = instantiate_task( task_from_domain_problem(stream_domain, problem)) instantiated.axioms[:] = new_instantiated.axioms instantiated.atoms.update(new_instantiated.atoms)
def solve_from_task(sas_task, temp_dir=TEMP_DIR, clean=False, debug=False, hierarchy=[], **kwargs): # TODO: can solve using another planner and then still translate using FastDownward # Can apply plan constraints (skeleton constraints) here as well start_time = time() with Verbose(debug): print('\n' + 50*'-' + '\n') write_sas_task(sas_task, temp_dir) solution = run_search(temp_dir, debug=True, **kwargs) if clean: safe_rm_dir(temp_dir) print('Total runtime:', time() - start_time) #for axiom in sas_task.axioms: # # TODO: return the set of axioms here as well # var, value = axiom.effect # print(sas_task.variables.value_names[var]) # axiom.dump() return parse_solution(solution)
def solve_from_pddl(domain_pddl, problem_pddl, temp_dir=TEMP_DIR, clean=False, debug=False, **search_kwargs): # TODO: combine with solve_from_task #return solve_tfd(domain_pddl, problem_pddl) start_time = time() with Verbose(debug): write_pddl(domain_pddl, problem_pddl, temp_dir) #run_translate(temp_dir, verbose) translate_and_write_pddl(domain_pddl, problem_pddl, temp_dir, debug) solution = run_search(temp_dir, debug=debug, **search_kwargs) if clean: safe_rm_dir(temp_dir) print('Total runtime: {:.3f}'.format(elapsed_time(start_time))) return solution
def translate_and_write_pddl(domain_pddl, problem_pddl, temp_dir, verbose): domain = parse_domain(domain_pddl) problem = parse_problem(domain, problem_pddl) task = task_from_domain_problem(domain, problem) with Verbose(verbose): translate_and_write_task(task, temp_dir)
def relaxed_stream_plan(evaluations, goal_expression, domain, all_results, negative, unit_efforts, effort_weight, max_effort, simultaneous=False, reachieve=True, unit_costs=False, debug=False, **kwargs): # TODO: alternatively could translate with stream actions on real opt_state and just discard them # TODO: only consider axioms that have stream conditions? applied_results, deferred_results = partition_results( evaluations, all_results, apply_now=lambda r: not (simultaneous or r.external.info.simultaneous)) stream_domain, result_from_name = add_stream_actions( domain, deferred_results) opt_evaluations = apply_streams(evaluations, applied_results) # if n.effort < INF if reachieve: achieved_results = { r for r in evaluations.values() if isinstance(r, Result) } init_evaluations = { e for e, r in evaluations.items() if r not in achieved_results } applied_results = achieved_results | set(applied_results) evaluations = init_evaluations # For clarity # TODO: could iteratively increase max_effort node_from_atom = get_achieving_streams(evaluations, applied_results, unit_efforts=unit_efforts, max_effort=max_effort) if using_optimizers(all_results): goal_expression = add_unsatisfiable_to_goal(stream_domain, goal_expression) problem = get_problem(opt_evaluations, goal_expression, stream_domain, unit_costs) # begin_metric with Verbose(debug): instantiated = instantiate_task( task_from_domain_problem(stream_domain, problem)) if instantiated is None: return None, INF cost_from_action = {action: action.cost for action in instantiated.actions} if (effort_weight is not None) or using_optimizers(applied_results): add_stream_efforts(node_from_atom, instantiated, effort_weight, unit_efforts=unit_efforts) add_optimizer_axioms(all_results, instantiated) action_from_name = rename_instantiated_actions(instantiated) with Verbose(debug): sas_task = sas_from_instantiated(instantiated) sas_task.metric = True # TODO: apply renaming to hierarchy as well # solve_from_task | serialized_solve_from_task | abstrips_solve_from_task | abstrips_solve_from_task_sequential action_plan, _ = solve_from_task(sas_task, debug=debug, **kwargs) if action_plan is None: return None, INF action_instances = [action_from_name[name] for name, _ in action_plan] cost = get_plan_cost(action_instances, cost_from_action, unit_costs) axiom_plans = recover_axioms_plans(instantiated, action_instances) applied_plan, function_plan = partition_external_plan( recover_stream_plan(evaluations, opt_evaluations, goal_expression, stream_domain, node_from_atom, action_instances, axiom_plans, negative, unit_costs)) #action_plan = obj_from_pddl_plan(parse_action(instance.name) for instance in action_instances) action_plan = obj_from_pddl_plan(map(pddl_from_instance, action_instances)) deferred_plan, action_plan = partition_plan(action_plan, result_from_name) stream_plan = applied_plan + deferred_plan + function_plan combined_plan = stream_plan + action_plan return combined_plan, cost
def recover_stream_plan(evaluations, goal_expression, domain, stream_results, action_plan, negative, unit_costs, optimize=True): import pddl_to_prolog import build_model import pddl import axiom_rules import instantiate # Universally quantified conditions are converted into negative axioms # Existentially quantified conditions are made additional preconditions # Universally quantified effects are instantiated by doing the cartesian produce of types (slow) # Added effects cancel out removed effects opt_evaluations = evaluations_from_stream_plan(evaluations, stream_results) opt_task = task_from_domain_problem( domain, get_problem(opt_evaluations, goal_expression, domain, unit_costs)) real_task = task_from_domain_problem( domain, get_problem(evaluations, goal_expression, domain, unit_costs)) function_assignments = { fact.fluent: fact.expression for fact in opt_task.init # init_facts if isinstance(fact, pddl.f_expression.FunctionAssignment) } type_to_objects = instantiate.get_objects_by_type(opt_task.objects, opt_task.types) results_from_head = get_results_from_head(opt_evaluations) action_instances = [] for name, args in action_plan: # TODO: negative atoms in actions candidates = [] for action in opt_task.actions: if action.name != name: continue if len(action.parameters) != len(args): raise NotImplementedError( 'Existential quantifiers are not currently ' 'supported in preconditions: {}'.format(name)) variable_mapping = { p.name: a for p, a in zip(action.parameters, args) } instance = action.instantiate(variable_mapping, set(), MockSet(), type_to_objects, opt_task.use_min_cost_metric, function_assignments) assert (instance is not None) candidates.append(((action, args), instance)) if not candidates: raise RuntimeError( 'Could not find an applicable action {}'.format(name)) action_instances.append(candidates) action_instances.append([(None, get_goal_instance(opt_task.goal))]) axioms_from_name = get_derived_predicates(opt_task.axioms) negative_from_name = {n.name: n for n in negative} opt_task.actions = [] opt_state = set(opt_task.init) real_state = set(real_task.init) preimage_plan = [] function_plan = set() for layer in action_instances: for pair, instance in layer: nonderived_preconditions = [ l for l in instance.precondition if l.predicate not in axioms_from_name ] #nonderived_preconditions = instance.precondition if not conditions_hold(opt_state, nonderived_preconditions): continue opt_task.init = opt_state original_axioms = opt_task.axioms axiom_from_action = get_necessary_axioms(instance, original_axioms, negative_from_name) opt_task.axioms = [] opt_task.actions = axiom_from_action.keys() # TODO: maybe it would just be better to drop the negative throughout this process until this end with Verbose(False): model = build_model.compute_model( pddl_to_prolog.translate( opt_task)) # Changes based on init opt_task.axioms = original_axioms opt_facts = instantiate.get_fluent_facts( opt_task, model) | (opt_state - real_state) mock_fluent = MockSet(lambda item: ( item.predicate in negative_from_name) or (item in opt_facts)) instantiated_axioms = instantiate_necessary_axioms( model, real_state, mock_fluent, axiom_from_action) with Verbose(False): helpful_axioms, axiom_init, _ = axiom_rules.handle_axioms( [instance], instantiated_axioms, []) axiom_from_atom = get_achieving_axioms(opt_state, helpful_axioms, axiom_init, negative_from_name) axiom_plan = [] # Could always add all conditions extract_axioms(axiom_from_atom, instance.precondition, axiom_plan) # TODO: test if no derived solution # TODO: compute required stream facts in a forward way and allow opt facts that are already known required for effects in [instance.add_effects, instance.del_effects]: for i, (conditions, effect) in enumerate(effects[::-1]): if any(c.predicate in axioms_from_name for c in conditions): raise NotImplementedError( 'Conditional effects cannot currently involve derived predicates' ) if conditions_hold(real_state, conditions): # Holds in real state effects[i] = ([], effect) elif not conditions_hold(opt_state, conditions): # Does not hold in optimistic state effects.pop(i) else: # TODO: handle more general case where can choose to achieve particular conditional effects raise NotImplementedError( 'Conditional effects cannot currently involve certified predicates' ) #if any(conditions for conditions, _ in instance.add_effects + instance.del_effects): # raise NotImplementedError('Conditional effects are not currently supported: {}'.format(instance.name)) # TODO: add axiom init to reset state? apply_action(opt_state, instance) apply_action(real_state, instance) preimage_plan.extend(axiom_plan + [instance]) if not unit_costs and (pair is not None): function_plan.update( extract_function_results(results_from_head, *pair)) break else: raise RuntimeError('No action instances are applicable') preimage = plan_preimage(preimage_plan, set()) preimage -= set(real_task.init) negative_preimage = set( filter(lambda a: a.predicate in negative_from_name, preimage)) preimage -= negative_preimage # visualize_constraints(map(fact_from_fd, preimage)) # TODO: prune with rules # TODO: linearization that takes into account satisfied goals at each level # TODO: can optimize for all streams & axioms all at once for literal in negative_preimage: negative = negative_from_name[literal.predicate] instance = negative.get_instance(map(obj_from_pddl, literal.args)) value = not literal.negated if instance.enumerated: assert (instance.value == value) else: function_plan.add( PredicateResult(instance, value, opt_index=instance.opt_index)) node_from_atom = get_achieving_streams(evaluations, stream_results) preimage_facts = list( map(fact_from_fd, filter(lambda l: not l.negated, preimage))) stream_plan = [] extract_stream_plan(node_from_atom, preimage_facts, stream_plan) if not optimize: # TODO: detect this based on unique or not return stream_plan + list(function_plan) # TODO: search in space of partially ordered plans # TODO: local optimization - remove one and see if feasible reschedule_problem = get_problem(evaluations, And(*preimage_facts), domain, unit_costs=True) reschedule_task = task_from_domain_problem(domain, reschedule_problem) reschedule_task.actions, stream_result_from_name = get_stream_actions( stream_results) new_plan, _ = solve_from_task(reschedule_task, planner='max-astar', debug=False) # TODO: investigate admissible heuristics if new_plan is None: return stream_plan + list(function_plan) new_stream_plan = [stream_result_from_name[name] for name, _ in new_plan] return new_stream_plan + list(function_plan)
def solve_serialized(initial_problem, stream_info={}, unit_costs=False, unit_efforts=False, verbose=True, retain_facts=True, **kwargs): # TODO: be careful of CanMove deadends domain_pddl, constant_map, stream_pddl, stream_map, init, goal = initial_problem _, _, domain, streams = parse_problem(initial_problem, stream_info, constraints=None, unit_costs=unit_costs, unit_efforts=unit_efforts) static_init, _ = partition_facts( domain, init) # might not be able to reprove static_int #global_all, global_preimage = [], [] global_plan = [] global_cost = 0 state = list(init) goals = serialize_goal(goal) # TODO: instead just track how the true init updates for i in range(len(goals)): # TODO: option in algorithms to pass in existing facts for stream in streams: stream.reset() goal = And(*goals[:i + 1]) print('Goal:', str_from_object(goal)) # No strict need to reuse streams because generator functions #local_problem = PDDLProblem(domain_pddl, constant_map, stream_pddl, stream_map, state, goal) local_problem = PDDLProblem(domain_pddl, constant_map, streams, None, state, goal) with Verbose(verbose): solution = solve_focused(local_problem, stream_info=stream_info, unit_costs=unit_costs, unit_efforts=unit_efforts, verbose=True, **kwargs) print_solution(solution) local_plan, local_cost, local_certificate = solution if local_plan is None: # TODO: replan upon failure global_certificate = Certificate(all_facts={}, preimage_facts=None) return None, INF, global_certificate if retain_facts: state = local_certificate.all_facts else: _, fluent_facts = partition_facts(domain, state) state = static_init + fluent_facts + local_certificate.preimage_facts # TODO: include functions #print('State:', state) # TODO: indicate when each fact is used # TODO: record failed facts global_plan.extend( local_plan) # TODO: compute preimage of the executed plan global_cost += local_cost static_state, _ = partition_facts(domain, state) #global_all.extend(partition_facts(domain, local_certificate.all_facts)[0]) #global_preimage.extend(static_state) print('Static:', static_state) state = apply_actions(domain, state, local_plan, unit_costs=unit_costs) print(SEPARATOR) #user_input('Continue?') # TODO: could also just test the goal here # TODO: constrain future plan skeletons global_certificate = Certificate(all_facts={}, preimage_facts=None) return global_plan, global_cost, global_certificate
def sas_from_pddl(task, debug=False): #normalize.normalize(task) #sas_task = translate.pddl_to_sas(task) with Verbose(debug): sas_task = sas_from_instantiated(instantiate_task(task)) return sas_task
def plan_streams(evaluations, goal_expression, domain, all_results, negative, effort_weight, max_effort, simultaneous=False, reachieve=True, debug=False, **kwargs): # TODO: alternatively could translate with stream actions on real opt_state and just discard them # TODO: only consider axioms that have stream conditions? #reachieve = reachieve and not using_optimizers(all_results) applied_results, deferred_results = partition_results( evaluations, all_results, apply_now=lambda r: not (simultaneous or r.external.info.simultaneous)) stream_domain, deferred_from_name = add_stream_actions( domain, deferred_results) if reachieve and not using_optimizers(all_results): achieved_results = { n.result for n in evaluations.values() if isinstance(n.result, Result) } init_evaluations = { e for e, n in evaluations.items() if n.result not in achieved_results } applied_results = achieved_results | set(applied_results) evaluations = init_evaluations # For clarity # TODO: could iteratively increase max_effort node_from_atom = get_achieving_streams( evaluations, applied_results, # TODO: apply to all_results? max_effort=max_effort) opt_evaluations = { evaluation_from_fact(f): n.result for f, n in node_from_atom.items() } if using_optimizers(all_results): goal_expression = add_unsatisfiable_to_goal(stream_domain, goal_expression) problem = get_problem(opt_evaluations, goal_expression, stream_domain) # begin_metric with Verbose(debug): instantiated = instantiate_task( task_from_domain_problem(stream_domain, problem)) if instantiated is None: return None, INF if using_optimizers(all_results): # TODO: reachieve=False when using optimizers or should add applied facts instantiate_optimizer_axioms(instantiated, evaluations, goal_expression, domain, all_results) cost_from_action = {action: action.cost for action in instantiated.actions} add_stream_efforts(node_from_atom, instantiated, effort_weight) if using_optimizers(applied_results): add_optimizer_effects(instantiated, node_from_atom) action_from_name = rename_instantiated_actions(instantiated) with Verbose(debug): sas_task = sas_from_instantiated(instantiated) sas_task.metric = True # TODO: apply renaming to hierarchy as well # solve_from_task | serialized_solve_from_task | abstrips_solve_from_task | abstrips_solve_from_task_sequential action_plan, raw_cost = solve_from_task(sas_task, debug=debug, **kwargs) #print(raw_cost) if action_plan is None: return None, INF action_instances = [action_from_name[name] for name, _ in action_plan] simplify_conditional_effects(instantiated.task, action_instances) stream_plan, action_instances = recover_simultaneous( applied_results, negative, deferred_from_name, action_instances) cost = get_plan_cost(action_instances, cost_from_action) axiom_plans = recover_axioms_plans(instantiated, action_instances) stream_plan = recover_stream_plan(evaluations, stream_plan, opt_evaluations, goal_expression, stream_domain, node_from_atom, action_instances, axiom_plans, negative) #action_plan = obj_from_pddl_plan(parse_action(instance.name) for instance in action_instances) action_plan = obj_from_pddl_plan(map(pddl_from_instance, action_instances)) combined_plan = stream_plan + action_plan return combined_plan, cost