def replan_with_optimizers(evaluations, external_plan, domain, optimizers):
    # TODO: return multiple plans?
    # TODO: can instead have multiple goal binding combinations
    # TODO: can replan using samplers as well
    if not is_plan(external_plan):
        return None
    optimizers = list(
        filter(lambda s: isinstance(s, ComponentStream), optimizers))
    if not optimizers:
        return None
    stream_plan, function_plan = partition_external_plan(external_plan)
    free_parameters = {o for r in stream_plan for o in r.output_objects}
    #free_parameters = {o for r in stream_plan for o in r.output_objects if isinstance(o, OptimisticObject)}
    initial_evaluations = {
        e: n
        for e, n in evaluations.items() if n.result == INIT_EVALUATION
    }
    #initial_evaluations = evaluations
    goal_facts = set()
    for result in stream_plan:
        goal_facts.update(
            filter(
                lambda f: evaluation_from_fact(f) not in initial_evaluations,
                result.get_certified()))

    visited_facts = set()
    new_results = []
    for fact in goal_facts:
        retrace_instantiation(fact, optimizers, initial_evaluations,
                              free_parameters, visited_facts, new_results)
    # TODO: ensure correct ordering
    new_results = list(
        filter(lambda r: isinstance(r, ComponentStream), new_results))

    #from pddlstream.algorithms.scheduling.recover_streams import get_achieving_streams, extract_stream_plan
    #node_from_atom = get_achieving_streams(evaluations, stream_results) # TODO: make these lower effort
    #extract_stream_plan(node_from_atom, target_facts, stream_plan)

    optimizer_results = []
    for optimizer in {get_optimizer(r)
                      for r in new_results
                      }:  # None is like a unique optimizer:
        relevant_results = [
            r for r in new_results if get_optimizer(r) == optimizer
        ]
        optimizer_results.extend(
            combine_optimizer_plan(relevant_results, function_plan))
    #print(str_from_object(set(map(fact_from_evaluation, evaluations))))
    #print(str_from_object(set(goal_facts)))

    # TODO: can do the flexibly sized optimizers search
    from pddlstream.algorithms.scheduling.postprocess import reschedule_stream_plan
    optimizer_plan = reschedule_stream_plan(initial_evaluations,
                                            goal_facts,
                                            copy.copy(domain),
                                            (stream_plan + optimizer_results),
                                            unique_binding=True)
    if not is_plan(optimizer_plan):
        return None
    return optimizer_plan + function_plan
Esempio n. 2
0
def create_disable_axiom(external_plan, use_parameters=True):
    # TODO: express constraint mutexes upfront
    # TODO: investigate why use_parameters=True hurts satisfaction
    # TODO: better mix optimization and sampling by determining a splitting point
    # TODO: be careful about the shared objects as parameters
    # TODO: need to block functions & predicates
    stream_plan, _ = partition_external_plan(external_plan)
    assert stream_plan
    #component_plan = stream_plan
    [unsatisfiable] = stream_plan[-1].get_unsatisfiable()
    component_plan = list(flatten(
        r.get_components() for r in stream_plan[:-1])) + list(unsatisfiable)
    increase_free_variables(component_plan)
    #output_objects = get_free_objects(component_plan) if use_parameters else set()
    constraints = [result.stream_fact for result in component_plan]
    optimistic_objects = {
        o
        for f in constraints for o in get_args(f)
        if isinstance(o, OptimisticObject)
    }  # TODO: consider case when variables are free
    #assert optimistic_objects <= output_objects
    #free_objects = list(optimistic_objects & output_objects) # TODO: need to return all variables
    free_objects = optimistic_objects
    parameters = ['?p{}'.format(i) for i in range(len(free_objects))]
    param_from_obj = get_mapping(free_objects, parameters)
    preconditions = substitute_expression(constraints, param_from_obj)
    effect = (UNSATISFIABLE, )
    axiom = make_axiom(parameters, preconditions, effect)
    #axiom.dump()
    return axiom
Esempio n. 3
0
    def __init__(self, optimizer, external_plan):
        optimizer.streams.append(self)
        self.optimizer = optimizer
        self.stream_plan, self.function_plan = partition_external_plan(
            external_plan)
        inputs, domain, outputs, certified, functions, self.macro_from_micro, \
            self.input_objects, self.output_objects, self.fluent_facts = get_cluster_values(external_plan)

        hint = {}
        for result, mapping in safe_zip(self.stream_plan,
                                        self.macro_from_micro):
            if isinstance(result, StreamResult):
                for param, obj in safe_zip(result.external.outputs,
                                           result.output_objects):
                    if isinstance(obj, Object):
                        hint[mapping[param]] = obj.value
        self.objectives = certified + functions
        gen_fn = get_list_gen_fn(optimizer.procedure,
                                 inputs,
                                 outputs,
                                 self.objectives,
                                 hint=hint)
        #assert len(self.get_cluster_plans()) == 1
        super(OptimizerStream,
              self).__init__(optimizer.name, gen_fn, inputs, domain, outputs,
                             certified, optimizer.info)
Esempio n. 4
0
def create_disable_axiom(external_plan):
    # TODO: express constraint mutexes upfront
    stream_plan, _ = partition_external_plan(external_plan)
    #print(stream_plan)
    parameters = []
    preconditions = [result.stream_fact for result in stream_plan]
    derived = (UNSATISFIABLE, )
    # TODO: add parameters in the event that the same skeleton can be blocked twice
    return make_axiom(parameters, preconditions, derived)
Esempio n. 5
0
def replan_with_optimizers(evaluations, external_plan, domain, externals):
    # TODO: return multiple plans?
    # TODO: can instead have multiple goal binding combinations
    # TODO: can replan using samplers as well
    if not is_plan(external_plan):
        return external_plan
    optimizer_streams = list(
        filter(lambda s: type(s) in [VariableStream, ConstraintStream],
               externals))
    if not optimizer_streams:
        return external_plan
    stream_plan, function_plan = partition_external_plan(external_plan)
    goal_facts = set()
    for result in stream_plan:
        goal_facts.update(
            filter(lambda f: evaluation_from_fact(f) not in evaluations,
                   result.get_certified()))

    visited_facts = set()
    new_results = []
    for fact in goal_facts:
        retrace_instantiation(fact, optimizer_streams, evaluations,
                              visited_facts, new_results)
    variable_results = filter(lambda r: isinstance(r.external, VariableStream),
                              new_results)
    constraint_results = filter(
        lambda r: isinstance(r.external, ConstraintStream), new_results)
    new_results = variable_results + constraint_results  # TODO: ensure correct ordering

    #from pddlstream.algorithms.scheduling.recover_streams import get_achieving_streams, extract_stream_plan
    #node_from_atom = get_achieving_streams(evaluations, stream_results) # TODO: make these lower effort
    #extract_stream_plan(node_from_atom, target_facts, stream_plan)

    optimizer_results = []
    for optimizer in {get_optimizer(r)
                      for r in new_results
                      }:  # None is like a unique optimizer:
        relevant_results = [
            r for r in new_results if get_optimizer(r) == optimizer
        ]
        optimizer_results.extend(
            combine_optimizer_plan(relevant_results, function_plan))
    #print(str_from_object(set(map(fact_from_evaluation, evaluations))))
    #print(str_from_object(set(goal_facts)))

    # TODO: can do the flexibly sized optimizers search
    from pddlstream.algorithms.scheduling.postprocess import reschedule_stream_plan
    combined_plan = reschedule_stream_plan(evaluations,
                                           goal_facts,
                                           copy.copy(domain),
                                           (stream_plan + optimizer_results),
                                           unique_binding=True,
                                           unit_efforts=True)
    if not is_plan(combined_plan):
        return external_plan
    return combined_plan + function_plan
Esempio n. 6
0
 def __init__(self, optimizer, external_plan):
     optimizer.streams.append(self)
     self.optimizer = optimizer
     self.stream_plan, self.function_plan = partition_external_plan(
         external_plan)
     inputs, domain, outputs, certified, functions, self.macro_from_micro, \
         self.input_objects, self.output_objects, self.fluent_facts = get_cluster_values(external_plan)
     gen_fn = get_gen_fn(optimizer.procedure, inputs, outputs,
                         certified | functions)
     super(OptimizerStream,
           self).__init__(optimizer.name, gen_fn, inputs, domain, outputs,
                          certified, optimizer.info)
Esempio n. 7
0
    def __init__(self, optimizer, external_plan):
        optimizer.streams.append(self)
        self.optimizer = optimizer
        self.stream_plan, self.function_plan = partition_external_plan(external_plan)
        inputs, domain, outputs, certified, functions, self.macro_from_micro, \
            self.input_objects, self.output_objects, self.fluent_facts = get_cluster_values(external_plan)

        hint = self.create_hint()
        self.objectives = certified + functions
        gen_fn = get_list_gen_fn(optimizer.procedure, inputs, outputs, self.objectives, hint=hint)
        #assert len(self.get_cluster_plans()) == 1
        super(OptimizerStream, self).__init__(optimizer.name, gen_fn, inputs, domain, outputs,
                                              certified, optimizer.info)
Esempio n. 8
0
def combine_optimizers(evaluations, external_plan):
    if not is_plan(external_plan):
        return external_plan
    stream_plan, function_plan = partition_external_plan(external_plan)
    optimizers = {get_optimizer(r)
                  for r in stream_plan}  # None is like a unique optimizer
    if len(optimizers - {None}) == 0:
        return external_plan

    print('Constraint plan: {}'.format(external_plan))
    combined_results = []
    for optimizer in optimizers:
        relevant_results = [
            r for r in stream_plan if get_optimizer(r) == optimizer
        ]
        combined_results.extend(
            combine_optimizer_plan(relevant_results, function_plan))
    return sequence_results(evaluations, combined_results + function_plan)
def combine_optimizers(evaluations, external_plan):
    if not is_plan(external_plan):
        return external_plan
    stream_plan, function_plan = partition_external_plan(external_plan)
    optimizers = {get_optimizer(r)
                  for r in stream_plan}  # None is like a unique optimizer
    if len(optimizers - {None}) == 0:
        return external_plan

    print('Constraint plan: {}'.format(external_plan))
    combined_results = []
    for optimizer in optimizers:
        relevant_results = [
            r for r in stream_plan if get_optimizer(r) == optimizer
        ]
        combined_results.extend(
            combine_optimizer_plan(relevant_results, function_plan))
    combined_results.extend(function_plan)

    current_facts = set()
    for result in combined_results:
        current_facts.update(
            filter(lambda f: evaluation_from_fact(f) in evaluations,
                   result.get_domain()))
    combined_plan = []
    while combined_results:
        for result in combined_results:
            if set(result.get_domain()) <= current_facts:
                combined_plan.append(result)
                current_facts.update(result.get_certified())
                combined_results.remove(result)
                break
        else:  # TODO: can also just try one cluster and return
            raise RuntimeError()
            #return None
    return combined_plan
Esempio n. 10
0
def relaxed_stream_plan(evaluations,
                        goal_expression,
                        domain,
                        all_results,
                        negative,
                        unit_efforts,
                        effort_weight,
                        max_effort,
                        simultaneous=False,
                        reachieve=True,
                        unit_costs=False,
                        debug=False,
                        **kwargs):
    # TODO: alternatively could translate with stream actions on real opt_state and just discard them
    # TODO: only consider axioms that have stream conditions?
    applied_results, deferred_results = partition_results(
        evaluations,
        all_results,
        apply_now=lambda r: not (simultaneous or r.external.info.simultaneous))
    stream_domain, result_from_name = add_stream_actions(
        domain, deferred_results)
    opt_evaluations = apply_streams(evaluations,
                                    applied_results)  # if n.effort < INF

    if reachieve:
        achieved_results = {
            r
            for r in evaluations.values() if isinstance(r, Result)
        }
        init_evaluations = {
            e
            for e, r in evaluations.items() if r not in achieved_results
        }
        applied_results = achieved_results | set(applied_results)
        evaluations = init_evaluations  # For clarity
    # TODO: could iteratively increase max_effort
    node_from_atom = get_achieving_streams(evaluations,
                                           applied_results,
                                           unit_efforts=unit_efforts,
                                           max_effort=max_effort)
    if using_optimizers(all_results):
        goal_expression = add_unsatisfiable_to_goal(stream_domain,
                                                    goal_expression)
    problem = get_problem(opt_evaluations, goal_expression, stream_domain,
                          unit_costs)  # begin_metric

    with Verbose(debug):
        instantiated = instantiate_task(
            task_from_domain_problem(stream_domain, problem))
    if instantiated is None:
        return None, INF
    cost_from_action = {action: action.cost for action in instantiated.actions}
    if (effort_weight is not None) or using_optimizers(applied_results):
        add_stream_efforts(node_from_atom,
                           instantiated,
                           effort_weight,
                           unit_efforts=unit_efforts)
    add_optimizer_axioms(all_results, instantiated)
    action_from_name = rename_instantiated_actions(instantiated)
    with Verbose(debug):
        sas_task = sas_from_instantiated(instantiated)
        sas_task.metric = True

    # TODO: apply renaming to hierarchy as well
    # solve_from_task | serialized_solve_from_task | abstrips_solve_from_task | abstrips_solve_from_task_sequential
    action_plan, _ = solve_from_task(sas_task, debug=debug, **kwargs)
    if action_plan is None:
        return None, INF
    action_instances = [action_from_name[name] for name, _ in action_plan]
    cost = get_plan_cost(action_instances, cost_from_action, unit_costs)
    axiom_plans = recover_axioms_plans(instantiated, action_instances)

    applied_plan, function_plan = partition_external_plan(
        recover_stream_plan(evaluations, opt_evaluations, goal_expression,
                            stream_domain, node_from_atom, action_instances,
                            axiom_plans, negative, unit_costs))
    #action_plan = obj_from_pddl_plan(parse_action(instance.name) for instance in action_instances)
    action_plan = obj_from_pddl_plan(map(pddl_from_instance, action_instances))

    deferred_plan, action_plan = partition_plan(action_plan, result_from_name)
    stream_plan = applied_plan + deferred_plan + function_plan
    combined_plan = stream_plan + action_plan
    return combined_plan, cost