Exemple #1
0
def extract_order(evaluations, preimage):
    stream_results = set()
    processed = set(map(fact_from_fd, preimage))
    queue = deque(processed)
    while queue:
        fact = queue.popleft()
        result = evaluations[evaluation_from_fact(fact)]
        if result is None:
            continue
        stream_results.add(result)
        for fact2 in result.instance.get_domain():
            if fact2 not in processed:
                queue.append(fact2)

    orders = set()
    for result in stream_results:
        for fact in result.instance.get_domain():
            result2 = evaluations[evaluation_from_fact(fact)]
            if result2 is not None:
                orders.add((result2, result))

    ordered_results = []
    for result in topological_sort(stream_results, orders):
        if isinstance(result, SynthStreamResult):
            ordered_results.extend(result.decompose())
        else:
            ordered_results.append(result)
    return ordered_results
Exemple #2
0
def shorten_stream_plan(evaluations, stream_plan, target_facts):
    all_subgoals = set(target_facts) | set(
        flatten(r.instance.get_domain() for r in stream_plan))
    evaluation_subgoals = set(
        filter(evaluations.__contains__, map(evaluation_from_fact,
                                             all_subgoals)))
    open_subgoals = set(
        filter(lambda f: evaluation_from_fact(f) not in evaluations,
               all_subgoals))
    results_from_fact = {}
    for result in stream_plan:
        for fact in result.get_certified():
            results_from_fact.setdefault(fact, []).append(result)

    for removed_result in reversed(stream_plan):  # TODO: only do in order?
        certified_subgoals = open_subgoals & set(
            removed_result.get_certified())
        if not certified_subgoals:  # Could combine with following
            new_stream_plan = stream_plan[:]
            new_stream_plan.remove(removed_result)
            return new_stream_plan
        if all(2 <= len(results_from_fact[fact])
               for fact in certified_subgoals):
            node_from_atom = get_achieving_streams(
                evaluation_subgoals,
                set(stream_plan) - {removed_result})
            if all(fact in node_from_atom for fact in target_facts):
                new_stream_plan = []
                extract_stream_plan(node_from_atom, target_facts,
                                    new_stream_plan)
                return new_stream_plan
    return None
Exemple #3
0
def create_visualizations(evaluations, stream_plan, iteration):
    # TODO: place it in the temp_dir?
    # TODO: decompose any joint streams
    for result in stream_plan:
        create_synthesizer_visualizations(result, iteration)
    filename = ITERATION_TEMPLATE.format(iteration)
    # visualize_stream_plan(stream_plan, path)

    constraints = set()  # TODO: approximates needed facts using produced ones
    for stream in stream_plan:
        constraints.update(
            filter(lambda f: evaluation_from_fact(f) not in evaluations,
                   stream.get_certified()))
    print('Constraints:', str_from_object(constraints))
    visualize_constraints(constraints,
                          os.path.join(CONSTRAINT_NETWORK_DIR, filename))

    from pddlstream.retired.synthesizer import decompose_stream_plan
    decomposed_plan = decompose_stream_plan(stream_plan)
    if len(decomposed_plan) != len(stream_plan):
        visualize_stream_plan(decompose_stream_plan(stream_plan),
                              os.path.join(STREAM_PLAN_DIR, filename))

    #visualize_stream_plan_bipartite(stream_plan, os.path.join(STREAM_PLAN_DIR, 'fused_' + filename))
    visualize_stream_plan(stream_plan,
                          os.path.join(STREAM_PLAN_DIR, 'fused_' + filename))
def replan_with_optimizers(evaluations, external_plan, domain, optimizers):
    # TODO: return multiple plans?
    # TODO: can instead have multiple goal binding combinations
    # TODO: can replan using samplers as well
    if not is_plan(external_plan):
        return None
    optimizers = list(
        filter(lambda s: isinstance(s, ComponentStream), optimizers))
    if not optimizers:
        return None
    stream_plan, function_plan = partition_external_plan(external_plan)
    free_parameters = {o for r in stream_plan for o in r.output_objects}
    #free_parameters = {o for r in stream_plan for o in r.output_objects if isinstance(o, OptimisticObject)}
    initial_evaluations = {
        e: n
        for e, n in evaluations.items() if n.result == INIT_EVALUATION
    }
    #initial_evaluations = evaluations
    goal_facts = set()
    for result in stream_plan:
        goal_facts.update(
            filter(
                lambda f: evaluation_from_fact(f) not in initial_evaluations,
                result.get_certified()))

    visited_facts = set()
    new_results = []
    for fact in goal_facts:
        retrace_instantiation(fact, optimizers, initial_evaluations,
                              free_parameters, visited_facts, new_results)
    # TODO: ensure correct ordering
    new_results = list(
        filter(lambda r: isinstance(r, ComponentStream), new_results))

    #from pddlstream.algorithms.scheduling.recover_streams import get_achieving_streams, extract_stream_plan
    #node_from_atom = get_achieving_streams(evaluations, stream_results) # TODO: make these lower effort
    #extract_stream_plan(node_from_atom, target_facts, stream_plan)

    optimizer_results = []
    for optimizer in {get_optimizer(r)
                      for r in new_results
                      }:  # None is like a unique optimizer:
        relevant_results = [
            r for r in new_results if get_optimizer(r) == optimizer
        ]
        optimizer_results.extend(
            combine_optimizer_plan(relevant_results, function_plan))
    #print(str_from_object(set(map(fact_from_evaluation, evaluations))))
    #print(str_from_object(set(goal_facts)))

    # TODO: can do the flexibly sized optimizers search
    from pddlstream.algorithms.scheduling.postprocess import reschedule_stream_plan
    optimizer_plan = reschedule_stream_plan(initial_evaluations,
                                            goal_facts,
                                            copy.copy(domain),
                                            (stream_plan + optimizer_results),
                                            unique_binding=True)
    if not is_plan(optimizer_plan):
        return None
    return optimizer_plan + function_plan
Exemple #5
0
def optimistic_process_streams(evaluations,
                               streams,
                               double_bindings=None,
                               unit_efforts=False,
                               max_effort=INF):
    # TODO: iteratively increase max_effort to bias towards easier streams to start
    # TODO: cut off instantiation using max_effort
    # TODO: make each repeated optimistic object have ordinal more effort
    # TODO: enforce that the search uses one optimistic object before claiming the next (like in my first version)
    # Can even fall back on converting streams to test streams
    # Additive max effort in case something requires a long sequence to achieve
    results = []
    #effort_from_fact = {}
    instantiator = Instantiator(evaluations, streams)
    while instantiator.stream_queue:
        instance = instantiator.stream_queue.popleft()
        if not is_double_bound(instance, double_bindings):
            continue
        effort = get_instance_effort(instance, unit_efforts)
        #op = sum # max | sum
        #total_effort = effort + op(effort_from_fact[fact] for fact in instance.get_domain())
        if max_effort <= effort:
            continue
        for stream_result in instance.next_optimistic():
            for fact in stream_result.get_certified():
                #effort_from_fact[fact] = min(effort_from_fact.get(fact, INF), effort)
                instantiator.add_atom(evaluation_from_fact(fact))
            results.append(
                stream_result)  # TODO: don't readd if all repeated facts?
    return results
Exemple #6
0
def retrace_instantiation(fact, streams, evaluations, free_parameters,
                          visited_facts, planned_results):
    # Makes two assumptions:
    # 1) Each stream achieves a "primary" fact that uses all of its inputs + outputs
    # 2) Outputs are only free parameters (no constants)
    if (evaluation_from_fact(fact) in evaluations) or (fact in visited_facts):
        return
    visited_facts.add(fact)
    for stream in streams:
        for cert in stream.certified:
            if get_prefix(fact) == get_prefix(cert):
                mapping = get_mapping(get_args(cert),
                                      get_args(fact))  # Should be same anyways
                if not all(p in mapping
                           for p in (stream.inputs + stream.outputs)):
                    # TODO: assumes another effect is sufficient for binding
                    # Create arbitrary objects for inputs/outputs that aren't mentioned
                    # Can lead to incorrect ordering
                    continue

                input_objects = tuple(mapping[p] for p in stream.inputs)
                output_objects = tuple(mapping[p] for p in stream.outputs)
                if not all(out in free_parameters for out in output_objects):
                    # Can only bind if free
                    continue
                instance = stream.get_instance(input_objects)
                for new_fact in instance.get_domain():
                    retrace_instantiation(new_fact, streams, evaluations,
                                          free_parameters, visited_facts,
                                          planned_results)
                planned_results.append(instance.get_result(output_objects))
Exemple #7
0
def stream_plan_complexity(evaluations, stream_plan, stream_calls, complexity_op=COMPLEXITY_OP):
    if not is_plan(stream_plan):
        return INF
    # TODO: difference between a result having a particular complexity and the next result having something
    #optimistic_facts = {}
    optimistic_facts = {fact: evaluations[evaluation_from_fact(fact)].complexity
                        for fact in stream_plan_preimage(stream_plan)}
    result_complexities = []
    #complexity = 0
    for i, result in enumerate(stream_plan):
        # if result.external.get_complexity(num_calls=INF) == 0: # TODO: skip if true
        result_complexity = complexity_op([0] + [optimistic_facts[fact]
                                                 #optimistic_complexity(evaluations, optimistic_facts, fact)
                                                 for fact in result.get_domain()])
        # if stream_calls is None:
        #     num_calls = result.instance.num_calls
        # else:
        num_calls = stream_calls[i]
        result_complexity += result.external.get_complexity(num_calls)
        result_complexities.append(result_complexity)
        #complexity = complexity_op(complexity, result_complexity)
        for fact in result.get_certified():
            if fact not in optimistic_facts:
                optimistic_facts[fact] = result_complexity
    complexity = complexity_op([0] + result_complexities)
    return complexity
Exemple #8
0
def stream_plan_complexity(evaluations, stream_plan, stream_calls=None):
    if not is_plan(stream_plan):
        return INF
    # TODO: difference between a result having a particular complexity and the next result having something
    optimistic_facts = {}
    total_complexity = 0
    for i, result in enumerate(stream_plan):
        result_complexity = 0
        for fact in result.get_domain():
            evaluation = evaluation_from_fact(fact)
            if evaluation in evaluations:
                fact_complexity = evaluations[evaluation].complexity
            else:
                fact_complexity = optimistic_facts[fact]
            result_complexity = COMPLEXITY_OP(result_complexity,
                                              fact_complexity)
        if stream_calls is None:
            result_complexity += result.instance.num_calls + 1
        elif i < len(stream_calls):
            result_complexity += stream_calls[i] + 1
        else:
            result_complexity += 1
        for fact in result.get_certified():
            if fact not in optimistic_facts:
                optimistic_facts[fact] = result_complexity
        total_complexity = COMPLEXITY_OP(total_complexity, result_complexity)
    return total_complexity
Exemple #9
0
def retrace_instantiation(fact, streams, evaluations, visited_facts,
                          planned_results):
    if (evaluation_from_fact(fact) in evaluations) or (fact in visited_facts):
        return
    visited_facts.add(fact)
    for stream in streams:
        for cert in stream.certified:
            if get_prefix(fact) == get_prefix(cert):
                mapping = get_mapping(get_args(cert),
                                      get_args(fact))  # Should be same anyways
                if not all(p in mapping
                           for p in (stream.inputs + stream.outputs)):
                    # TODO: assumes another effect is sufficient for binding
                    # Create arbitrary objects for inputs/outputs that aren't mentioned
                    # Can lead to incorrect ordering
                    continue

                input_objects = tuple(mapping[p] for p in stream.inputs)
                output_objects = tuple(mapping[p] for p in stream.outputs)
                if not all(
                        isinstance(out, OptimisticObject)
                        for out in output_objects):
                    # Can only bind if free
                    continue
                instance = stream.get_instance(input_objects)
                for new_fact in instance.get_domain():
                    retrace_instantiation(new_fact, streams, evaluations,
                                          visited_facts, planned_results)
                result = instance.get_result(output_objects)
                planned_results.append(result)
Exemple #10
0
def add_certified(evaluations, result):
    new_evaluations = []
    for fact in result.get_certified():
        evaluation = evaluation_from_fact(fact)
        if evaluation not in evaluations:
            evaluations[evaluation] = result
            new_evaluations.append(evaluation)
    return new_evaluations
Exemple #11
0
 def static_opt_gen_fn(*input_values):
     instance = stream.get_instance(objects_from_values(input_values))
     if all(
             evaluation_from_fact(f) in evaluations
             for f in instance.get_domain()):
         return
     for output_values in stream.opt_gen_fn(*input_values):
         yield output_values
Exemple #12
0
 def static_fn(*input_values):
     instance = stream.get_instance(objects_from_values(input_values))
     if all(
             evaluation_from_fact(f) in evaluations
             for f in instance.get_domain()):
         return None
     return tuple(
         FutureValue(stream.name, input_values, o) for o in stream.outputs)
Exemple #13
0
def optimistic_process_instance(instantiator, instance):
    for result in instance.next_optimistic():
        new_facts = False
        complexity = instantiator.compute_complexity(instance)
        for fact in result.get_certified():
            new_facts |= instantiator.add_atom(evaluation_from_fact(fact), complexity)
        if isinstance(result, FunctionResult) or new_facts:
            yield result
Exemple #14
0
def add_facts(evaluations, fact, result=None):
    new_evaluations = []
    for fact in fact:
        evaluation = evaluation_from_fact(fact)
        if evaluation not in evaluations:
            evaluations[evaluation] = result
            new_evaluations.append(evaluation)
    return new_evaluations
Exemple #15
0
 def enable(self, evaluations, domain):
     if not self.disabled:
         return
     #if self._disabled_axiom is not None:
     #    self.external.disabled_instances.remove(self)
     #    domain.axioms.remove(self._disabled_axiom)
     #    self._disabled_axiom = None
     #super(StreamInstance, self).enable(evaluations, domain) # TODO: strange infinite loop bug if enabled?
     evaluations.pop(evaluation_from_fact(self.get_blocked_fact()), None)
Exemple #16
0
def replan_with_optimizers(evaluations, external_plan, domain, externals):
    # TODO: return multiple plans?
    # TODO: can instead have multiple goal binding combinations
    # TODO: can replan using samplers as well
    if not is_plan(external_plan):
        return external_plan
    optimizer_streams = list(
        filter(lambda s: type(s) in [VariableStream, ConstraintStream],
               externals))
    if not optimizer_streams:
        return external_plan
    stream_plan, function_plan = partition_external_plan(external_plan)
    goal_facts = set()
    for result in stream_plan:
        goal_facts.update(
            filter(lambda f: evaluation_from_fact(f) not in evaluations,
                   result.get_certified()))

    visited_facts = set()
    new_results = []
    for fact in goal_facts:
        retrace_instantiation(fact, optimizer_streams, evaluations,
                              visited_facts, new_results)
    variable_results = filter(lambda r: isinstance(r.external, VariableStream),
                              new_results)
    constraint_results = filter(
        lambda r: isinstance(r.external, ConstraintStream), new_results)
    new_results = variable_results + constraint_results  # TODO: ensure correct ordering

    #from pddlstream.algorithms.scheduling.recover_streams import get_achieving_streams, extract_stream_plan
    #node_from_atom = get_achieving_streams(evaluations, stream_results) # TODO: make these lower effort
    #extract_stream_plan(node_from_atom, target_facts, stream_plan)

    optimizer_results = []
    for optimizer in {get_optimizer(r)
                      for r in new_results
                      }:  # None is like a unique optimizer:
        relevant_results = [
            r for r in new_results if get_optimizer(r) == optimizer
        ]
        optimizer_results.extend(
            combine_optimizer_plan(relevant_results, function_plan))
    #print(str_from_object(set(map(fact_from_evaluation, evaluations))))
    #print(str_from_object(set(goal_facts)))

    # TODO: can do the flexibly sized optimizers search
    from pddlstream.algorithms.scheduling.postprocess import reschedule_stream_plan
    combined_plan = reschedule_stream_plan(evaluations,
                                           goal_facts,
                                           copy.copy(domain),
                                           (stream_plan + optimizer_results),
                                           unique_binding=True,
                                           unit_efforts=True)
    if not is_plan(combined_plan):
        return external_plan
    return combined_plan + function_plan
Exemple #17
0
def optimistic_process_instance(instantiator, instance, verbose=False):
    for result in instance.next_optimistic():
        if verbose:
            print(result) # TODO: make a debug tools that reports the optimistic streams
        new_facts = False
        complexity = instantiator.compute_complexity(instance)
        for fact in result.get_certified():
            new_facts |= instantiator.add_atom(evaluation_from_fact(fact), complexity)
        if isinstance(result, FunctionResult) or new_facts:
            yield result
Exemple #18
0
 def accelerate_best_bindings(self):
     # TODO: reset the values for old streams
     for skeleton in self.skeletons:
         for _, result in sorted(skeleton.best_binding.bound_results.items(), key=itemgetter(0)):
             # TODO: just accelerate the facts within the plan preimage
             result.call_index = 0 # Pretends the fact was first
             new_complexity = result.compute_complexity(self.evaluations)
             for fact in result.get_certified():
                 evaluation = evaluation_from_fact(fact)
                 if new_complexity < self.evaluations[evaluation].complexity:
                     self.evaluations[evaluation] = EvaluationNode(new_complexity, result)
Exemple #19
0
def evaluations_from_stream_plan(evaluations, stream_results, max_effort=INF):
    opt_evaluations = set(evaluations)
    for result in stream_results:
        assert(not result.instance.disabled)
        assert(not result.instance.enumerated)
        domain = set(map(evaluation_from_fact, result.instance.get_domain()))
        assert(domain <= opt_evaluations)
        opt_evaluations.update(map(evaluation_from_fact, result.get_certified()))
    node_from_atom = get_achieving_streams(evaluations, stream_results)
    result_from_evaluation = {evaluation_from_fact(f): n.result
                              for f, n in node_from_atom.items() if n.effort < max_effort}
    return result_from_evaluation
Exemple #20
0
 def accelerate_best_bindings(self):
     # TODO: reset the values for old streams
     for skeleton in self.skeletons:
         for _, result in sorted(
                 skeleton.best_binding.bound_results.items(),
                 key=itemgetter(0)):
             # TODO: just accelerate the facts within the plan preimage
             for fact in result.get_certified():
                 evaluation = evaluation_from_fact(fact)
                 if evaluation in self.evaluations:  # In the event the fact is returned twice
                     del self.evaluations[evaluation]
             result.call_index = 0  # Pretends the fact was first
             add_certified(self.evaluations, result)
Exemple #21
0
def optimistic_process_streams(evaluations, streams, double_bindings=None):
    instantiator = Instantiator(evaluations, streams)
    stream_results = []
    while instantiator.stream_queue:
        stream_instance = instantiator.stream_queue.popleft()
        if not is_double_bound(stream_instance, double_bindings):
            continue
        for stream_result in stream_instance.next_optimistic():
            for fact in stream_result.get_certified():
                instantiator.add_atom(evaluation_from_fact(fact))
            stream_results.append(
                stream_result)  # TODO: don't readd if all repeated facts?
    return stream_results
Exemple #22
0
def create_visualizations(evaluations, stream_plan, iteration):
    # TODO: place it in the temp_dir?
    # TODO: decompose any joint streams
    for result in stream_plan:
        if isinstance(result, SynthStreamResult):
            create_synthesizer_visualizations(result, iteration)
    filename = ITERATION_TEMPLATE.format(POST_PROCESS if iteration is None else iteration)
    # visualize_stream_plan(stream_plan, path)
    constraints = set() # TODO: approximates needed facts using produced ones
    for stream in stream_plan:
        constraints.update(filter(lambda f: evaluation_from_fact(f) not in evaluations, stream.get_certified()))
    visualize_constraints(constraints, os.path.join(CONSTRAINT_NETWORK_DIR, filename))
    visualize_stream_plan_bipartite(decompose_stream_plan(stream_plan), os.path.join(STREAM_PLAN_DIR, filename))
    visualize_stream_plan_bipartite(stream_plan, os.path.join(STREAM_PLAN_DIR, 'fused_' + filename))
Exemple #23
0
def process_immediate_stream_plan(evaluations, stream_plan, disabled, verbose):
    new_evaluations = []
    for opt_result in stream_plan:
        instance = opt_result.instance
        if set(map(evaluation_from_fact,
                   instance.get_domain())) <= evaluations:
            disable_stream_instance(instance, disabled)
            for result in instance.next_results(verbose=verbose):
                for fact in result.get_certified():
                    evaluation = evaluation_from_fact(fact)
                    #evaluations.add(evaluation) # To be used on next iteration
                    new_evaluations.append(evaluation)
    evaluations.update(new_evaluations)
    return new_evaluations
Exemple #24
0
def evaluate_functions(evaluations, stream_results):
    stream_plan = []
    for opt_result in stream_results:
        instance = opt_result.instance
        # TODO: this seems like a bug (it also previously was ... <= evaluations)
        if isinstance(instance.external, Function) and \
                all(evaluation_from_fact(f) in evaluations for f in instance.get_domain()):
            new_results, new_facts = instance.next_results()
            assert not new_facts
            for result in new_results:
                add_certified(evaluations, result)
        else:
            stream_plan.append(opt_result)
    return stream_plan
def plan_streams(evaluations, goal_expression, domain, all_results, negative, effort_weight, max_effort,
                 simultaneous=False, reachieve=True, replan_actions=set(), **kwargs):
    # TODO: alternatively could translate with stream actions on real opt_state and just discard them
    # TODO: only consider axioms that have stream conditions?
    #reachieve = reachieve and not using_optimizers(all_results)
    #for i, result in enumerate(all_results):
    #    print(i, result, result.get_effort())
    applied_results, deferred_results = partition_results(
        evaluations, all_results, apply_now=lambda r: not (simultaneous or r.external.info.simultaneous))
    stream_domain, deferred_from_name = add_stream_actions(domain, deferred_results)

    if reachieve and not using_optimizers(all_results):
        achieved_results = {n.result for n in evaluations.values() if isinstance(n.result, Result)}
        init_evaluations = {e for e, n in evaluations.items() if n.result not in achieved_results}
        applied_results = achieved_results | set(applied_results)
        evaluations = init_evaluations # For clarity

    # TODO: could iteratively increase max_effort
    node_from_atom = get_achieving_streams(evaluations, applied_results, # TODO: apply to all_results?
                                           max_effort=max_effort)
    opt_evaluations = {evaluation_from_fact(f): n.result for f, n in node_from_atom.items()}
    if UNIVERSAL_TO_CONDITIONAL or using_optimizers(all_results):
        goal_expression = add_unsatisfiable_to_goal(stream_domain, goal_expression)

    temporal = isinstance(stream_domain, SimplifiedDomain)
    optimistic_fn = solve_optimistic_temporal if temporal else solve_optimistic_sequential
    instantiated, action_instances, temporal_plan, cost = optimistic_fn(
        domain, stream_domain, applied_results, all_results, opt_evaluations,
        node_from_atom, goal_expression, effort_weight, **kwargs)
    if action_instances is None:
        return FAILED, FAILED, cost

    action_instances, axiom_plans = recover_axioms_plans(instantiated, action_instances)
    # TODO: extract out the minimum set of conditional effects that are actually required
    #simplify_conditional_effects(instantiated.task, action_instances)
    stream_plan, action_instances = recover_simultaneous(
        applied_results, negative, deferred_from_name, action_instances)

    action_plan = transform_plan_args(map(pddl_from_instance, action_instances), obj_from_pddl)
    replan_step = min([step+1 for step, action in enumerate(action_plan)
                       if action.name in replan_actions] or [len(action_plan)+1]) # step after action application

    stream_plan, opt_plan = recover_stream_plan(evaluations, stream_plan, opt_evaluations, goal_expression, stream_domain,
        node_from_atom, action_instances, axiom_plans, negative, replan_step)
    if temporal_plan is not None:
        # TODO: handle deferred streams
        assert all(isinstance(action, Action) for action in opt_plan.action_plan)
        opt_plan.action_plan[:] = temporal_plan
    return stream_plan, opt_plan, cost
Exemple #26
0
    def disable(self, evaluations, domain):
        #assert not self.disabled
        super(StreamInstance, self).disable(evaluations, domain)
        if not self.external.is_fluent(): # self.fluent_facts:
            if self.external.is_negated() and not self.successes:
                evaluations[evaluation_from_fact(self.get_blocked_fact())] = INTERNAL
            return

        if self.axiom_predicate is not None:
            return
        index = len(self.external.disabled_instances)
        self.external.disabled_instances.append(self)
        self.axiom_predicate = '_ax{}-{}'.format(self.external.blocked_predicate, index)
        evaluations[evaluation_from_fact(self.get_blocked_fact())] = INTERNAL
        # TODO: allow reporting back which components lead to failure

        import pddl
        static_fact = (self.axiom_predicate,) + self.external.inputs
        preconditions = [static_fact] + list(self.fluent_facts)
        self.disabled_axiom = pddl.Axiom(name=self.external.blocked_predicate,
                                         parameters=make_parameters(self.external.inputs),
                                         num_external_parameters=len(self.external.inputs),
                                         condition=make_preconditions(preconditions))
        domain.axioms.append(self.disabled_axiom)
Exemple #27
0
def sequence_results(evaluations, combined_results):
    current_facts = set()
    for result in combined_results:
        current_facts.update(filter(lambda f: evaluation_from_fact(f) in evaluations, result.get_domain()))
    combined_plan = []
    while combined_results:
        for result in combined_results:
            if set(result.get_domain()) <= current_facts:
                combined_plan.append(result)
                current_facts.update(result.get_certified())
                combined_results.remove(result)
                break
        else: # TODO: can also just try one cluster and return
            return None
    return combined_plan
Exemple #28
0
def evaluations_from_stream_plan(evaluations, stream_plan):
    result_from_evaluation = {e: None for e in evaluations}
    opt_evaluations = set(evaluations)
    for result in stream_plan:
        if isinstance(result, StreamResult):
            effort = result.instance.get_effort()
            if effort == INF:
                continue
        assert (not result.instance.disabled)
        assert (not result.instance.enumerated)
        domain = set(map(evaluation_from_fact, result.instance.get_domain()))
        if not (domain <= opt_evaluations):
            continue
        for fact in result.get_certified():
            evaluation = evaluation_from_fact(fact)
            if evaluation not in result_from_evaluation:
                result_from_evaluation[evaluation] = result
                opt_evaluations.add(evaluation)
    return result_from_evaluation
Exemple #29
0
def simultaneous_stream_plan(evaluations,
                             goal_expression,
                             domain,
                             stream_results,
                             negated,
                             unit_costs=True,
                             **kwargs):
    if negated:
        raise NotImplementedError()
    function_evaluations = {e: None for e in evaluations}
    for result in stream_results:
        if isinstance(result, FunctionResult):
            for fact in result.get_certified():
                function_evaluations[evaluation_from_fact(fact)] = result
    new_domain, stream_result_from_name = add_stream_actions(
        domain, stream_results)
    combined_plan, _ = solve_finite(function_evaluations,
                                    goal_expression,
                                    new_domain,
                                    unit_costs=unit_costs,
                                    **kwargs)
    if combined_plan is None:
        return None, None, INF  # TODO: return plan cost
    stream_plan = []
    action_plan = []
    for name, args in combined_plan:
        if name in stream_result_from_name:
            stream_plan.append(stream_result_from_name[name])
        else:
            action_plan.append((name, args))

    action_cost = len(action_plan)
    function_plan = set()
    if not unit_costs:
        action_cost = 0
        results_from_head = get_results_from_head(function_evaluations)
        for name, args in action_plan:
            action = find(lambda a: a.name == name, domain.actions)
            pddl_args = tuple(map(pddl_from_object, args))
            function_plan.update(
                extract_function_results(results_from_head, action, pddl_args))
            action_cost += get_cost(domain, results_from_head, name, args)
    return (stream_plan + list(function_plan)), action_plan, action_cost
Exemple #30
0
def parse_problem(problem, stream_info={}):
    # TODO: just return the problem if already written programmatically
    domain_pddl, constant_map, stream_pddl, stream_map, init, goal = problem
    domain = parse_domain(domain_pddl)
    if len(domain.types) != 1:
        raise NotImplementedError('Types are not currently supported')
    obj_from_constant = parse_constants(domain, constant_map)
    streams = parse_stream_pddl(stream_pddl, stream_map, stream_info)
    evaluations = OrderedDict(
        (evaluation_from_fact(obj_from_value_expression(f)),
         INITIAL_EVALUATION) for f in init)
    goal_expression = obj_from_value_expression(goal)
    check_problem(domain, streams, obj_from_constant)
    parse_goal(goal_expression, domain)  # Just to check that it parses
    #normalize_domain_goal(domain, goal_expression)
    # TODO: refactor the following?
    compile_to_exogenous(evaluations, domain, streams)
    compile_fluent_streams(domain, streams)
    enforce_simultaneous(domain, streams)
    return evaluations, goal_expression, domain, streams