예제 #1
0
def recover_stream_plan(evaluations, current_plan, opt_evaluations,
                        goal_expression, domain, node_from_atom, action_plan,
                        axiom_plans, negative, replan_step):
    # Universally quantified conditions are converted into negative axioms
    # Existentially quantified conditions are made additional preconditions
    # Universally quantified effects are instantiated by doing the cartesian produce of types (slow)
    # Added effects cancel out removed effects
    # TODO: node_from_atom is a subset of opt_evaluations (only missing functions)
    real_task = task_from_domain_problem(
        domain, get_problem(evaluations, goal_expression, domain))
    opt_task = task_from_domain_problem(
        domain, get_problem(opt_evaluations, goal_expression, domain))
    negative_from_name = {
        external.blocked_predicate: external
        for external in negative if external.is_negated()
    }
    real_states, full_plan = recover_negative_axioms(real_task, opt_task,
                                                     axiom_plans, action_plan,
                                                     negative_from_name)
    function_plan = compute_function_plan(opt_evaluations, action_plan)

    full_preimage = plan_preimage(full_plan,
                                  [])  # Does not contain the stream preimage!
    negative_preimage = set(
        filter(lambda a: a.predicate in negative_from_name, full_preimage))
    negative_plan = convert_negative(negative_preimage, negative_from_name,
                                     full_preimage, real_states)
    function_plan.update(negative_plan)
    # TODO: OrderedDict for these plans

    # TODO: this assumes that actions do not negate preimage goals
    positive_preimage = {
        l
        for l in (set(full_preimage) - real_states[0] - negative_preimage)
        if not l.negated
    }
    steps_from_fact = {
        fact_from_fd(l): full_preimage[l]
        for l in positive_preimage
    }
    last_from_fact = {
        fact: min(steps)
        for fact, steps in steps_from_fact.items() if get_prefix(fact) != EQ
    }
    #stream_plan = reschedule_stream_plan(evaluations, target_facts, domain, stream_results)
    # visualize_constraints(map(fact_from_fd, target_facts))

    for result, step in function_plan.items():
        for fact in result.get_domain():
            last_from_fact[fact] = min(step, last_from_fact.get(fact, INF))

    # TODO: get_steps_from_stream
    stream_plan = []
    last_from_stream = dict(function_plan)
    for result in current_plan:  # + negative_plan?
        # TODO: actually compute when these are needed + dependencies
        last_from_stream[result] = 0
        if isinstance(result.external, Function) or (result.external
                                                     in negative):
            if len(action_plan) != replan_step:
                raise NotImplementedError(
                )  # TODO: deferring negated optimizers
            # Prevents these results from being pruned
            function_plan[result] = replan_step
        else:
            stream_plan.append(result)

    curr_evaluations = evaluations_from_stream_plan(evaluations,
                                                    stream_plan,
                                                    max_effort=None)
    extraction_facts = set(last_from_fact) - set(
        map(fact_from_evaluation, curr_evaluations))
    extract_stream_plan(node_from_atom, extraction_facts, stream_plan)

    # Recomputing due to postprocess_stream_plan
    stream_plan = postprocess_stream_plan(evaluations, domain, stream_plan,
                                          last_from_fact)
    node_from_atom = get_achieving_streams(evaluations,
                                           stream_plan,
                                           max_effort=None)
    fact_sequence = [set(result.get_domain())
                     for result in stream_plan] + [extraction_facts]
    for facts in reversed(fact_sequence):  # Bellman ford
        for fact in facts:  # could flatten instead
            result = node_from_atom[fact].result
            if result is None:
                continue
            step = last_from_fact[fact] if result.is_deferrable() else 0
            last_from_stream[result] = min(step,
                                           last_from_stream.get(result, INF))
            for domain_fact in result.instance.get_domain():
                last_from_fact[domain_fact] = min(
                    last_from_stream[result],
                    last_from_fact.get(domain_fact, INF))
    stream_plan.extend(function_plan)

    # Useful to recover the correct DAG
    partial_orders = set()
    for child in stream_plan:
        # TODO: account for fluent objects
        for fact in child.get_domain():
            parent = node_from_atom[fact].result
            if parent is not None:
                partial_orders.add((parent, child))
    #stream_plan = topological_sort(stream_plan, partial_orders)

    bound_objects = set()
    for result in stream_plan:
        if (last_from_stream[result]
                == 0) or not result.is_deferrable(bound_objects=bound_objects):
            for ancestor in get_ancestors(result, partial_orders) | {result}:
                # TODO: this might change descendants of ancestor. Perform in a while loop.
                last_from_stream[ancestor] = 0
                if isinstance(ancestor, StreamResult):
                    bound_objects.update(out for out in ancestor.output_objects
                                         if out.is_unique())

    #local_plan = [] # TODO: not sure what this was for
    #for fact, step in sorted(last_from_fact.items(), key=lambda pair: pair[1]): # Earliest to latest
    #    print(step, fact)
    #    extract_stream_plan(node_from_atom, [fact], local_plan, last_from_fact, last_from_stream)

    # Each stream has an earliest evaluation time
    # When computing the latest, use 0 if something isn't deferred
    # Evaluate each stream as soon as possible
    # Option to defer streams after a point in time?
    # TODO: action costs for streams that encode uncertainty
    state = set(real_task.init)
    remaining_results = list(stream_plan)
    first_from_stream = {}
    #assert 1 <= replan_step # Plan could be empty
    for step, instance in enumerate(action_plan):
        for result in list(remaining_results):
            # TODO: could do this more efficiently if need be
            domain = result.get_domain() + get_fluent_domain(result)
            if conditions_hold(state, map(fd_from_fact, domain)):
                remaining_results.remove(result)
                certified = {
                    fact
                    for fact in result.get_certified()
                    if get_prefix(fact) != EQ
                }
                state.update(map(fd_from_fact, certified))
                if step != 0:
                    first_from_stream[result] = step
        # TODO: assumes no fluent axiom domain conditions
        apply_action(state, instance)
    #assert not remaining_results # Not true if retrace
    if first_from_stream:
        replan_step = min(replan_step, *first_from_stream.values())

    eager_plan = []
    results_from_step = defaultdict(list)
    for result in stream_plan:
        earliest_step = first_from_stream.get(result, 0)
        latest_step = last_from_stream.get(result, 0)
        assert earliest_step <= latest_step
        defer = replan_step <= latest_step
        if not defer:
            eager_plan.append(result)
        # We only perform a deferred evaluation if it has all deferred dependencies
        # TODO: make a flag that also allows dependencies to be deferred
        future = (earliest_step != 0) or defer
        if future:
            future_step = latest_step if defer else earliest_step
            results_from_step[future_step].append(result)

    # TODO: some sort of obj side-effect bug that requires obj_from_pddl to be applied last (likely due to fluent streams)
    eager_plan = convert_fluent_streams(eager_plan, real_states, action_plan,
                                        steps_from_fact, node_from_atom)
    combined_plan = []
    for step, action in enumerate(action_plan):
        combined_plan.extend(result.get_action()
                             for result in results_from_step[step])
        combined_plan.append(
            transform_action_args(pddl_from_instance(action), obj_from_pddl))

    # TODO: the returned facts have the same side-effect bug as above
    # TODO: annotate when each preimage fact is used
    preimage_facts = {
        fact_from_fd(l)
        for l in full_preimage if (l.predicate != EQ) and not l.negated
    }
    for negative_result in negative_plan:  # TODO: function_plan
        preimage_facts.update(negative_result.get_certified())
    for result in eager_plan:
        preimage_facts.update(result.get_domain())
        # Might not be able to regenerate facts involving the outputs of streams
        preimage_facts.update(
            result.get_certified())  # Some facts might not be in the preimage
    # TODO: record streams and axioms
    return eager_plan, OptPlan(combined_plan, preimage_facts)
예제 #2
0
def add_plan_constraints(constraints,
                         domain,
                         evaluations,
                         goal_exp,
                         internal=False):
    if (constraints is None) or (constraints.skeletons is None):
        return goal_exp
    import pddl
    # TODO: unify this with the constraint ordering
    # TODO: can constrain to use a plan prefix
    prefix = get_internal_prefix(internal)
    assigned_predicate = ASSIGNED_PREDICATE.format(prefix)
    bound_predicate = BOUND_PREDICATE.format(prefix)
    group_predicate = GROUP_PREDICATE.format(prefix)
    order_predicate = ORDER_PREDICATE.format(prefix)
    new_facts = []
    for group in constraints.groups:
        for value in constraints.groups[group]:
            # TODO: could make all constants groups (like an equality group)
            fact = (group_predicate, to_obj(group), to_obj(value))
            new_facts.append(fact)
    new_actions = []
    new_goals = []
    for num, skeleton in enumerate(constraints.skeletons):
        actions, orders = skeleton
        incoming_orders, _ = neighbors_from_orders(orders)
        order_facts = [(order_predicate, to_obj('n{}'.format(num)),
                        to_obj('t{}'.format(step)))
                       for step in range(len(actions))]
        for step, (name, args) in enumerate(actions):
            # TODO: could also just remove the free parameter from the action
            new_action = deepcopy(
                find_unique(lambda a: a.name == name, domain.actions))
            local_from_global = {
                a: p.name
                for a, p in safe_zip(args, new_action.parameters)
                if is_parameter(a)
            }

            ancestors, descendants = get_ancestors(step,
                                                   orders), get_descendants(
                                                       step, orders)
            parallel = set(range(
                len(actions))) - ancestors - descendants - {step}

            parameters = set(filter(is_parameter, args))
            ancestor_parameters = parameters & set(
                filter(is_parameter,
                       (p for idx in ancestors for p in actions[idx][1])))
            #descendant_parameters = parameters & set(filter(is_parameter, (p for idx in descendants for p in actions[idx][1])))
            parallel_parameters = parameters & set(
                filter(is_parameter,
                       (p for idx in parallel for p in actions[idx][1])))

            #bound_preconditions = [Imply(bound, assigned) for bound, assigned in safe_zip(bound_facts, assigned_facts)]
            bound_condition = pddl.Conjunction([
                pddl.Disjunction(
                    map(fd_from_fact, [
                        Not((bound_predicate, to_constant(p))),
                        (assigned_predicate, to_constant(p),
                         local_from_global[p])
                    ])) for p in parallel_parameters
            ])
            existing_preconditions = [(assigned_predicate, to_constant(p),
                                       local_from_global[p])
                                      for p in ancestor_parameters]

            constant_pairs = [(a, p.name)
                              for a, p in safe_zip(args, new_action.parameters)
                              if is_constant(a)]
            group_preconditions = [
                (group_predicate if is_hashable(a) and
                 (a in constraints.groups) else EQ, to_obj(a), p)
                for a, p in constant_pairs
            ]
            order_preconditions = [
                order_facts[idx] for idx in incoming_orders[step]
            ]
            new_preconditions = existing_preconditions + group_preconditions + order_preconditions + [
                Not(order_facts[step])
            ]
            new_action.precondition = pddl.Conjunction([
                new_action.precondition, bound_condition,
                make_preconditions(new_preconditions)
            ]).simplified()

            new_parameters = parameters - ancestors
            bound_facts = [(bound_predicate, to_constant(p))
                           for p in new_parameters]
            assigned_facts = [(assigned_predicate, to_constant(p),
                               local_from_global[p]) for p in new_parameters]
            new_effects = bound_facts + assigned_facts + [order_facts[step]]
            new_action.effects.extend(make_effects(new_effects))
            # TODO: should also negate the effects of all other sequences here

            new_actions.append(new_action)
            #new_action.dump()
        new_goals.append(
            And(*[order_facts[idx] for idx in incoming_orders[GOAL_INDEX]]))

    add_predicate(domain, make_predicate(order_predicate, ['?num', '?step']))
    if constraints.exact:
        domain.actions[:] = []
    domain.actions.extend(new_actions)
    new_goal_exp = And(goal_exp, Or(*new_goals))
    for fact in new_facts:
        add_fact(evaluations, fact, result=INTERNAL_EVALUATION)
    return new_goal_exp