def extract_axiom_plan(task, goals, negative_from_name, static_state=set()): import pddl_to_prolog import build_model import instantiate # TODO: only reinstantiate the negative axioms axioms_from_name = get_derived_predicates(task.axioms) derived_goals = {l for l in goals if l.predicate in axioms_from_name} axiom_from_action = get_necessary_axioms(derived_goals, task.axioms, negative_from_name) if not axiom_from_action: return [] conditions_from_predicate = defaultdict(set) for axiom, mapping in axiom_from_action.values(): for literal in get_literals(axiom.condition): conditions_from_predicate[literal.predicate].add(literal.rename_variables(mapping)) original_init = task.init original_actions = task.actions original_axioms = task.axioms # TODO: retrieve initial state based on if helpful task.init = {atom for atom in task.init if is_useful_atom(atom, conditions_from_predicate)} # TODO: store map from predicate to atom task.actions = axiom_from_action.keys() task.axioms = [] # TODO: maybe it would just be better to drop the negative throughout this process until this end with Verbose(False): model = build_model.compute_model(pddl_to_prolog.translate(task)) # Changes based on init task.actions = original_actions task.axioms = original_axioms opt_facts = instantiate.get_fluent_facts(task, model) | (task.init - static_state) mock_fluent = MockSet(lambda item: (item.predicate in negative_from_name) or (item in opt_facts)) instantiated_axioms = instantiate_necessary_axioms(model, static_state, mock_fluent, axiom_from_action) axiom_plan = extraction_helper(task.init, instantiated_axioms, derived_goals, negative_from_name) task.init = original_init return axiom_plan
def replace_derived(task, negative_init, action_instances): import pddl_to_prolog import build_model import axiom_rules import pddl original_actions = task.actions original_init = task.init task.actions = [] function_assignments = {f.fluent: f.expression for f in task.init if isinstance(f, pddl.f_expression.FunctionAssignment)} task.init = (set(task.init) | {a.negate() for a in negative_init}) - set(function_assignments) for instance in action_instances: #axiom_plan = extract_axiom_plan(task, instance, negative_from_name={}) # TODO: refactor this # TODO: just instantiate task? with Verbose(False): model = build_model.compute_model(pddl_to_prolog.translate(task)) # Changes based on init # fluent_facts = instantiate.get_fluent_facts(task, model) fluent_facts = MockSet() instantiated_axioms = instantiate_axioms(model, task.init, fluent_facts) goal_list = [] # TODO: include the goal? with Verbose(False): # TODO: helpful_axioms prunes axioms that are already true (e.g. not Unsafe) helpful_axioms, axiom_init, _ = axiom_rules.handle_axioms([instance], instantiated_axioms, goal_list) axiom_from_atom = get_achieving_axioms(task.init | negative_init | set(axiom_init), helpful_axioms) # negated_from_name=negated_from_name) axiom_plan = [] extract_axioms(axiom_from_atom, instance.precondition, axiom_plan) substitute_derived(axiom_plan, instance) assert(is_applicable(task.init, instance)) apply_action(task.init, instance) task.actions = original_actions task.init = original_init
def extract_axiom_plan(task, action_instance, negative_from_name, static_state=set()): import pddl_to_prolog import build_model import axiom_rules import instantiate axioms_from_name = get_derived_predicates(task.axioms) derived_preconditions = {l for l in action_instance.precondition if l.predicate in axioms_from_name} nonderived_preconditions = {l for l in action_instance.precondition if l not in derived_preconditions} if not conditions_hold(task.init, nonderived_preconditions): return None axiom_from_action = get_necessary_axioms(action_instance, task.axioms, negative_from_name) if not axiom_from_action: return [] conditions_from_predicate = defaultdict(set) for axiom, mapping in axiom_from_action.values(): for literal in get_literals(axiom.condition): conditions_from_predicate[literal.predicate].add(literal.rename_variables(mapping)) original_init = task.init original_actions = task.actions original_axioms = task.axioms task.init = {atom for atom in task.init if is_useful_atom(atom, conditions_from_predicate)} # TODO: store map from predicate to atom task.actions = axiom_from_action.keys() task.axioms = [] # TODO: maybe it would just be better to drop the negative throughout this process until this end with Verbose(False): model = build_model.compute_model(pddl_to_prolog.translate(task)) # Changes based on init task.actions = original_actions task.axioms = original_axioms opt_facts = instantiate.get_fluent_facts(task, model) | (task.init - static_state) mock_fluent = MockSet(lambda item: (item.predicate in negative_from_name) or (item in opt_facts)) instantiated_axioms = instantiate_necessary_axioms(model, static_state, mock_fluent, axiom_from_action) goal_list = [] with Verbose(False): helpful_axioms, axiom_init, _ = axiom_rules.handle_axioms( [action_instance], instantiated_axioms, goal_list) axiom_init = set(axiom_init) axiom_effects = {axiom.effect for axiom in helpful_axioms} #assert len(axiom_effects) == len(axiom_init) for pre in list(derived_preconditions) + list(axiom_effects): if (pre not in axiom_init) and (pre.negate() not in axiom_init): axiom_init.add(pre.positive().negate()) axiom_from_atom = get_achieving_axioms(task.init | axiom_init, helpful_axioms, negative_from_name) axiom_plan = [] # Could always add all conditions success = extract_axioms(axiom_from_atom, derived_preconditions, axiom_plan, negative_from_name) task.init = original_init #if not success: # return None return axiom_plan
def explore(task): if DEBUG: print("DEBUG: Exploring Task Step [1]: create logic program 'prog'") prog = pddl_to_prolog.translate(task) # prog.dump() if DEBUG: print("DEBUG: Exploring Task Step [2]: build model 'model'") model = build_model.compute_model(prog) # print("instantiate.explore task dumps task") # task.dump() if DEBUG: print("DEBUG: Exploring Task Step [3]: instantiate model") with timers.timing("Completing instantiation"): return instantiate(task, model)
def explore(task, max_num_actions, pg_generator): if pg_generator is None: prog = pddl_to_prolog.translate(task) model = build_model.compute_model(prog) with timers.timing("Completing instantiation"): return instantiate(task, model) else: while True: relaxed_reachable, fluent_facts, instantiated_actions, reachable_action_parameters = next( pg_generator) if len(instantiated_actions) >= max_num_actions: return (relaxed_reachable, fluent_facts, instantiated_actions, [], reachable_action_parameters)
return result def popped_elements(self): return queue.queue[:self.queue_pos] def compute_model(prog): rules = convert_rules(prog) unifier = Unifier(rules) # unifier.dump() queue = Queue([fact.atom for fact in prog.facts]) print "Starting instantiation [%d rules]..." % len(rules) while queue: next_atom = queue.pop() matches = unifier.unify(next_atom) for rule, cond_index in matches: rule.update_index(next_atom, cond_index) rule.fire(next_atom, cond_index, queue.push) return queue.queue if __name__ == "__main__": import pddl_to_prolog print "Parsing..." task = pddl.open() print "Writing rules..." prog = pddl_to_prolog.translate(task) print "Computing model..." for atom in compute_model(prog): print atom
def recover_stream_plan(evaluations, goal_expression, domain, stream_results, action_plan, negative, unit_costs, optimize=True): import pddl_to_prolog import build_model import pddl import axiom_rules import instantiate # Universally quantified conditions are converted into negative axioms # Existentially quantified conditions are made additional preconditions # Universally quantified effects are instantiated by doing the cartesian produce of types (slow) # Added effects cancel out removed effects opt_evaluations = evaluations_from_stream_plan(evaluations, stream_results) opt_task = task_from_domain_problem( domain, get_problem(opt_evaluations, goal_expression, domain, unit_costs)) real_task = task_from_domain_problem( domain, get_problem(evaluations, goal_expression, domain, unit_costs)) function_assignments = { fact.fluent: fact.expression for fact in opt_task.init # init_facts if isinstance(fact, pddl.f_expression.FunctionAssignment) } type_to_objects = instantiate.get_objects_by_type(opt_task.objects, opt_task.types) results_from_head = get_results_from_head(opt_evaluations) action_instances = [] for name, args in action_plan: # TODO: negative atoms in actions candidates = [] for action in opt_task.actions: if action.name != name: continue if len(action.parameters) != len(args): raise NotImplementedError( 'Existential quantifiers are not currently ' 'supported in preconditions: {}'.format(name)) variable_mapping = { p.name: a for p, a in zip(action.parameters, args) } instance = action.instantiate(variable_mapping, set(), MockSet(), type_to_objects, opt_task.use_min_cost_metric, function_assignments) assert (instance is not None) candidates.append(((action, args), instance)) if not candidates: raise RuntimeError( 'Could not find an applicable action {}'.format(name)) action_instances.append(candidates) action_instances.append([(None, get_goal_instance(opt_task.goal))]) axioms_from_name = get_derived_predicates(opt_task.axioms) negative_from_name = {n.name: n for n in negative} opt_task.actions = [] opt_state = set(opt_task.init) real_state = set(real_task.init) preimage_plan = [] function_plan = set() for layer in action_instances: for pair, instance in layer: nonderived_preconditions = [ l for l in instance.precondition if l.predicate not in axioms_from_name ] #nonderived_preconditions = instance.precondition if not conditions_hold(opt_state, nonderived_preconditions): continue opt_task.init = opt_state original_axioms = opt_task.axioms axiom_from_action = get_necessary_axioms(instance, original_axioms, negative_from_name) opt_task.axioms = [] opt_task.actions = axiom_from_action.keys() # TODO: maybe it would just be better to drop the negative throughout this process until this end with Verbose(False): model = build_model.compute_model( pddl_to_prolog.translate( opt_task)) # Changes based on init opt_task.axioms = original_axioms opt_facts = instantiate.get_fluent_facts( opt_task, model) | (opt_state - real_state) mock_fluent = MockSet(lambda item: ( item.predicate in negative_from_name) or (item in opt_facts)) instantiated_axioms = instantiate_necessary_axioms( model, real_state, mock_fluent, axiom_from_action) with Verbose(False): helpful_axioms, axiom_init, _ = axiom_rules.handle_axioms( [instance], instantiated_axioms, []) axiom_from_atom = get_achieving_axioms(opt_state, helpful_axioms, axiom_init, negative_from_name) axiom_plan = [] # Could always add all conditions extract_axioms(axiom_from_atom, instance.precondition, axiom_plan) # TODO: test if no derived solution # TODO: compute required stream facts in a forward way and allow opt facts that are already known required for effects in [instance.add_effects, instance.del_effects]: for i, (conditions, effect) in enumerate(effects[::-1]): if any(c.predicate in axioms_from_name for c in conditions): raise NotImplementedError( 'Conditional effects cannot currently involve derived predicates' ) if conditions_hold(real_state, conditions): # Holds in real state effects[i] = ([], effect) elif not conditions_hold(opt_state, conditions): # Does not hold in optimistic state effects.pop(i) else: # TODO: handle more general case where can choose to achieve particular conditional effects raise NotImplementedError( 'Conditional effects cannot currently involve certified predicates' ) #if any(conditions for conditions, _ in instance.add_effects + instance.del_effects): # raise NotImplementedError('Conditional effects are not currently supported: {}'.format(instance.name)) # TODO: add axiom init to reset state? apply_action(opt_state, instance) apply_action(real_state, instance) preimage_plan.extend(axiom_plan + [instance]) if not unit_costs and (pair is not None): function_plan.update( extract_function_results(results_from_head, *pair)) break else: raise RuntimeError('No action instances are applicable') preimage = plan_preimage(preimage_plan, set()) preimage -= set(real_task.init) negative_preimage = set( filter(lambda a: a.predicate in negative_from_name, preimage)) preimage -= negative_preimage # visualize_constraints(map(fact_from_fd, preimage)) # TODO: prune with rules # TODO: linearization that takes into account satisfied goals at each level # TODO: can optimize for all streams & axioms all at once for literal in negative_preimage: negative = negative_from_name[literal.predicate] instance = negative.get_instance(map(obj_from_pddl, literal.args)) value = not literal.negated if instance.enumerated: assert (instance.value == value) else: function_plan.add( PredicateResult(instance, value, opt_index=instance.opt_index)) node_from_atom = get_achieving_streams(evaluations, stream_results) preimage_facts = list( map(fact_from_fd, filter(lambda l: not l.negated, preimage))) stream_plan = [] extract_stream_plan(node_from_atom, preimage_facts, stream_plan) if not optimize: # TODO: detect this based on unique or not return stream_plan + list(function_plan) # TODO: search in space of partially ordered plans # TODO: local optimization - remove one and see if feasible reschedule_problem = get_problem(evaluations, And(*preimage_facts), domain, unit_costs=True) reschedule_task = task_from_domain_problem(domain, reschedule_problem) reschedule_task.actions, stream_result_from_name = get_stream_actions( stream_results) new_plan, _ = solve_from_task(reschedule_task, planner='max-astar', debug=False) # TODO: investigate admissible heuristics if new_plan is None: return stream_plan + list(function_plan) new_stream_plan = [stream_result_from_name[name] for name, _ in new_plan] return new_stream_plan + list(function_plan)
def explore(task): prog = pddl_to_prolog.translate(task) model = build_model.compute_model(prog) return instantiate(task, model)
def pop(self): result = self.queue[self.queue_pos] self.queue_pos += 1 return result def popped_elements(self): return queue.queue[:self.queue_pos] def compute_model(prog): rules = convert_rules(prog) unifier = Unifier(rules) # unifier.dump() queue = Queue([fact.atom for fact in prog.facts]) print "Starting instantiation [%d rules]..." % len(rules) while queue: next_atom = queue.pop() matches = unifier.unify(next_atom) for rule, cond_index in matches: rule.update_index(next_atom, cond_index) rule.fire(next_atom, cond_index, queue.push) return queue.queue if __name__ == "__main__": import pddl_to_prolog print "Parsing..." task = pddl.open() print "Writing rules..." prog = pddl_to_prolog.translate(task) print "Computing model..." for atom in compute_model(prog): print atom
def explore(task): prog = pddl_to_prolog.translate(task) model = build_model.compute_model(prog) with timers.timing("Completing instantiation"): return instantiate(task, model)
def create_partial_grounding_generator(task, action_prioritizer): prog = pddl_to_prolog.translate(task) model = build_model.partial_grounding_compute_model( prog, action_prioritizer=action_prioritizer) return incremental_instantiate(task, model)
def _explore(task, add_fluents = set()): prog = pddl_to_prolog.translate(task, add_fluents) model = build_model.compute_model(prog) with timers.timing("Completing instantiation"): return instantiate(task, model, add_fluents)
def main(): timer = timers.Timer() with timers.timing("Parsing", True): task = pddl_parser.open( domain_filename=options.domain, task_filename=options.task) print('Processing task', task.task_name) with timers.timing("Normalizing task"): normalize.normalize(task) if options.unit_cost: transform_into_unit_cost(task) perform_sanity_checks(task) if options.build_datalog_model: print("Building Datalog model...") prog = pddl_to_prolog.translate(task, options.keep_action_predicates, options.add_inequalities) prog.rename_free_variables() if not options.keep_duplicated_rules: prog.remove_duplicated_rules() with open(options.datalog_file, 'w') as f: #prog.dump(f) prog.dump(f) with timers.timing("Compiling types into unary predicates"): g = compile_types.compile_types(task) with timers.timing("Checking static predicates"): static_pred = static_predicates.check(task) assert isinstance(task.goal, pddl.Conjunction) or \ isinstance(task.goal, pddl.Atom) or \ isinstance(task.goal, pddl.NegatedAtom), \ "Goal is not conjunctive." if options.ground_state_representation: with timers.timing("Generating complete initial state"): reachability.generate_overapproximated_reachable_atoms(task, g) get_initial_state_size(static_pred, task) if options.verbose_data: print("%s %s: initial state size %d : time %s" % ( os.path.basename(os.path.dirname(options.domain)), os.path.basename(options.task), len(task.init), timer)) test_if_experiment(options.test_experiment) # Preprocess a dict of supertypes for every type from the TypeGraph types_dict = get_types_dict(g) # Sets output file from options if os.path.isfile(options.output_file): print( "WARNING: file %s already exists, it will be overwritten" % options.output_file) output = open(options.output_file, "w") sys.stdout = output remove_functions_from_initial_state(task) remove_predicates.remove_unused_predicate_symbols(task) if is_trivially_unsolvable(task, static_pred): output_trivially_unsolvable_task() sys.exit(0) remove_static_predicates_from_goal(task, static_pred) print_names_and_representation(task.domain_name, task.task_name) type_index = {} print_types(task, type_index) predicate_index = {} print_predicates(task, predicate_index, type_index) object_index = {} print_objects(task, object_index, type_index, types_dict) atom_index = {} print_initial_state(task, atom_index, object_index, predicate_index) print_goal(task, atom_index, object_index, predicate_index) print_action_schemas(task, object_index, predicate_index, type_index) test_if_experiment(options.test_experiment) return