Exemple #1
0
def recover_stream_plan(evaluations, current_plan, opt_evaluations,
                        goal_expression, domain, node_from_atom, action_plan,
                        axiom_plans, negative):
    # Universally quantified conditions are converted into negative axioms
    # Existentially quantified conditions are made additional preconditions
    # Universally quantified effects are instantiated by doing the cartesian produce of types (slow)
    # Added effects cancel out removed effects
    # TODO: node_from_atom is a subset of opt_evaluations (only missing functions)
    real_task = task_from_domain_problem(
        domain, get_problem(evaluations, goal_expression, domain))
    opt_task = task_from_domain_problem(
        domain, get_problem(opt_evaluations, goal_expression, domain))
    negative_from_name = get_negative_predicates(negative)

    real_states, combined_plan = recover_negative_axioms(
        real_task, opt_task, axiom_plans, action_plan, negative_from_name)
    function_plan = compute_function_plan(opt_evaluations, action_plan)

    full_preimage = plan_preimage(combined_plan, [])
    stream_preimage = set(full_preimage) - real_states[0]
    negative_preimage = set(
        filter(lambda a: a.predicate in negative_from_name, stream_preimage))
    function_plan.update(
        convert_negative(negative_preimage, negative_from_name, full_preimage,
                         real_states))
    positive_preimage = stream_preimage - negative_preimage

    step_from_fact = {
        fact_from_fd(l): full_preimage[l]
        for l in positive_preimage if not l.negated
    }
    target_facts = {
        fact
        for fact in step_from_fact.keys() if get_prefix(fact) != EQ
    }
    #stream_plan = reschedule_stream_plan(evaluations, target_facts, domain, stream_results)
    # visualize_constraints(map(fact_from_fd, target_facts))

    stream_plan = []
    for result in current_plan:
        if isinstance(result.external, Function) or (result.external
                                                     in negative):
            function_plan.add(
                result)  # Prevents these results from being pruned
        else:
            stream_plan.append(result)
    curr_evaluations = evaluations_from_stream_plan(evaluations,
                                                    stream_plan,
                                                    max_effort=None)
    extraction_facts = target_facts - set(
        map(fact_from_evaluation, curr_evaluations))
    extract_stream_plan(node_from_atom, extraction_facts, stream_plan)
    stream_plan = postprocess_stream_plan(evaluations, domain, stream_plan,
                                          target_facts)
    stream_plan = convert_fluent_streams(stream_plan, real_states, action_plan,
                                         step_from_fact, node_from_atom)

    return stream_plan + list(function_plan)
Exemple #2
0
def solve_optimistic_temporal(domain, stream_domain, applied_results, all_results,
                              opt_evaluations, node_from_atom, goal_expression,
                              effort_weight, debug=False, **kwargs):
    assert domain is stream_domain
    #assert len(applied_results) == len(all_results)
    problem = get_problem(opt_evaluations, goal_expression, domain)
    with Verbose():
        instantiated = instantiate_task(task_from_domain_problem(domain, problem))
    if instantiated is None:
        return instantiated, None, None, INF
    problem = get_problem_pddl(opt_evaluations, goal_expression, domain.pddl)
    pddl_plan, makespan = solve_tfd(domain.pddl, problem, debug=debug)
    if pddl_plan is None:
        return instantiated, None, pddl_plan, makespan
    instance_from_action_args = defaultdict(list)
    for instance in instantiated.actions:
        tokens = instance.name.strip('()').split(' ')
        name, args = tokens[0], tuple(tokens[1:])
        instance_from_action_args[name, args].append(instance)
        #instance.action, instance.var_mapping
    action_instances = []
    for action in pddl_plan:
        instances = instance_from_action_args[action.name, action.args]
        assert len(instances) == 1 # TODO: support 2 <= case
        action_instances.append(instances[0])
    plan = obj_from_pddl_plan(pddl_plan)
    return instantiated, action_instances, plan, makespan
Exemple #3
0
def locally_optimize(evaluations, store, goal_expression, domain, functions, negative,
                     dynamic_streams, visualize, sampling_time=0):
    action_plan = store.best_plan
    if action_plan is None:
        return None
    print('\nPostprocessing | Cost: {} | Total Time: {:.3f}'.format(store.best_cost, store.elapsed_time()))
    # TODO: postprocess current skeletons as well

    task = task_from_domain_problem(domain, get_problem(evaluations, goal_expression, domain, unit_costs=False))
    opt_stream_plan, opt_from_obj = recover_opt_stream_plan(evaluations, action_plan, task)
    opt_stream_plan += optimistic_process_streams(evaluations_from_stream_plan(evaluations, opt_stream_plan), functions)
    opt_action_plan = [(name, tuple(opt_from_obj.get(o, o) for o in args)) for name, args in action_plan]
    pddl_plan = [(name, tuple(map(pddl_from_object, args))) for name, args in opt_action_plan]
    stream_plan = recover_stream_plan(evaluations, goal_expression, domain,
                                      opt_stream_plan, pddl_plan, negative, unit_costs=False)
    stream_plan = get_synthetic_stream_plan(reorder_stream_plan(stream_plan), dynamic_streams)


    # TODO: need to make this just streams
    opt_evaluations = apply_streams(evaluations, stream_plan)
    opt_cost = get_plan_cost(opt_evaluations, opt_action_plan, domain, unit_costs=False)
    dump_plans(stream_plan, opt_action_plan, opt_cost)
    if visualize:
        log_plans(stream_plan, action_plan, None)
        create_visualizations(evaluations, stream_plan, None)

    store.start_time = time.time()
    store.max_cost = store.best_cost
    queue = SkeletonQueue(store, evaluations, goal_expression, domain)
    queue.new_skeleton(stream_plan, opt_action_plan, opt_cost)
    queue.greedily_process()
    queue.timed_process(sampling_time)
Exemple #4
0
def solve_finite(evaluations, goal_exp, domain, unit_costs=False, debug=False, **search_args):
    problem = get_problem(evaluations, goal_exp, domain, unit_costs)
    task = task_from_domain_problem(domain, problem)
    sas_task = sas_from_pddl(task, debug=debug)
    pddl_plan, cost = abstrips_solve_from_task(sas_task, debug=debug, **search_args)
    plan = obj_from_pddl_plan(pddl_plan)
    return plan, cost
Exemple #5
0
def translate_and_write_pddl(domain_pddl, problem_pddl, temp_dir, verbose):
    domain = parse_sequential_domain(domain_pddl)
    problem = parse_problem(domain, problem_pddl)
    task = task_from_domain_problem(domain, problem)
    sas_task = sas_from_pddl(task)
    write_sas_task(sas_task, temp_dir)
    return task
Exemple #6
0
def solve_optimistic_sequential(domain, stream_domain, applied_results, all_results,
                                opt_evaluations, node_from_atom, goal_expression,
                                effort_weight, debug=False, **kwargs):
    if isinstance(stream_domain, SimplifiedDomain):
        return solve_optimistic_temporal(domain, stream_domain, applied_results, all_results,
                                         opt_evaluations, node_from_atom, goal_expression,
                                         effort_weight, debug=debug, **kwargs)

    problem = get_problem(opt_evaluations, goal_expression, stream_domain)  # begin_metric
    with Verbose():
        instantiated = instantiate_task(task_from_domain_problem(stream_domain, problem))
    if instantiated is None:
        return instantiated, None, None, INF

    cost_from_action = add_stream_efforts(node_from_atom, instantiated, effort_weight)
    if using_optimizers(applied_results):
        add_optimizer_effects(instantiated, node_from_atom)
        # TODO: reachieve=False when using optimizers or should add applied facts
        instantiate_optimizer_axioms(instantiated, domain, all_results)
    action_from_name = rename_instantiated_actions(instantiated)
    with Verbose(debug):
        sas_task = sas_from_instantiated(instantiated)
        sas_task.metric = True

    # TODO: apply renaming to hierarchy as well
    # solve_from_task | serialized_solve_from_task | abstrips_solve_from_task | abstrips_solve_from_task_sequential
    renamed_plan, _ = solve_from_task(sas_task, debug=debug, **kwargs)
    if renamed_plan is None:
        return instantiated, None, None, INF
    action_instances = [action_from_name[name] for name, _ in renamed_plan]
    cost = get_plan_cost(action_instances, cost_from_action)
    # plan = obj_from_pddl_plan(parse_action(instance.name) for instance in action_instances)
    plan = obj_from_pddl_plan(map(pddl_from_instance, action_instances))
    return instantiated, action_instances, plan, cost
Exemple #7
0
def reschedule_stream_plan(evaluations,
                           target_facts,
                           domain,
                           stream_results,
                           unique_binding=False,
                           unit_efforts=True):
    # TODO: search in space of partially ordered plans
    # TODO: constrain selection order to be alphabetical?
    goal_expression = And(*target_facts)
    reschedule_problem = get_problem(evaluations,
                                     goal_expression,
                                     domain,
                                     unit_costs=unit_efforts)
    reschedule_task = task_from_domain_problem(domain, reschedule_problem)
    reschedule_task.actions, stream_result_from_name = get_stream_actions(
        stream_results,
        unique_binding=unique_binding,
        unit_efforts=unit_efforts)
    #reschedule_task.axioms = [] # TODO: ensure that the constants are added in the event that axioms are needed?
    sas_task = sas_from_pddl(reschedule_task)
    stream_names, effort = solve_from_task(sas_task,
                                           planner=RESCHEDULE_PLANNER,
                                           max_planner_time=10,
                                           debug=False)
    if stream_names is None:
        return None
    stream_plan = [stream_result_from_name[name] for name, _ in stream_names]
    return stream_plan
def solve_optimistic_temporal(domain, stream_domain, applied_results, all_results,
                              opt_evaluations, node_from_atom, goal_expression,
                              effort_weight, debug=False, **kwargs):
    # TODO: assert that the unused parameters are off
    assert domain is stream_domain
    #assert len(applied_results) == len(all_results)
    problem = get_problem(opt_evaluations, goal_expression, domain)
    with Verbose():
        instantiated = instantiate_task(task_from_domain_problem(domain, problem))
    if instantiated is None:
        return instantiated, None, None, INF
    problem = get_problem_pddl(opt_evaluations, goal_expression, domain.pddl)
    pddl_plan, makespan = solve_tfd(domain.pddl, problem, debug=debug, **kwargs)
    if pddl_plan is None:
        return instantiated, None, pddl_plan, makespan
    instance_from_action_args = defaultdict(list)
    for instance in instantiated.actions:
        name, args = parse_action(instance)
        instance_from_action_args[name, args].append(instance)
        #instance.action, instance.var_mapping
    action_instances = []
    for action in pddl_plan:
        instances = instance_from_action_args[action.name, action.args]
        if len(instances) != 1:
            for action in instances:
                action.dump()
        #assert len(instances) == 1 # TODO: support 2 <= case
        action_instances.append(instances[0])
    temporal_plan = obj_from_pddl_plan(pddl_plan) # pddl_plan is sequential
    return instantiated, action_instances, temporal_plan, makespan
Exemple #9
0
def reschedule_stream_plan(evaluations,
                           target_facts,
                           domain,
                           stream_results,
                           unique_binding=False,
                           unsatisfiable=False,
                           max_effort=INF,
                           planner=RESCHEDULE_PLANNER,
                           debug=False):
    # TODO: search in space of partially ordered plans
    # TODO: constrain selection order to be alphabetical?
    domain.actions[:], stream_result_from_name = get_stream_actions(
        stream_results, unique_binding=unique_binding)
    goal_expression = And(*target_facts)
    if unsatisfiable:  # TODO: ensure that the copy hasn't harmed anything
        goal_expression = add_unsatisfiable_to_goal(domain, goal_expression)
    reschedule_problem = get_problem(evaluations,
                                     goal_expression,
                                     domain,
                                     unit_costs=False)
    reschedule_task = task_from_domain_problem(domain, reschedule_problem)
    #reschedule_task.axioms = [] # TODO: ensure that the constants are added in the event that axioms are needed?
    sas_task = sas_from_pddl(reschedule_task)
    stream_names, effort = solve_from_task(sas_task,
                                           planner=planner,
                                           max_planner_time=10,
                                           max_cost=max_effort,
                                           debug=debug)
    if stream_names is None:
        return None
    stream_plan = [stream_result_from_name[name] for name, _ in stream_names]
    return stream_plan
Exemple #10
0
def examine_instantiated(problem,
                         constraints=PlanConstraints(),
                         unit_costs=False,
                         max_time=INF,
                         verbose=False,
                         **search_args):
    domain_pddl, constant_map, stream_pddl, _, init, goal = problem
    stream_map = DEBUG
    problem = PDDLProblem(domain_pddl, constant_map, stream_pddl, stream_map,
                          init, goal)

    evaluations, goal_exp, domain, externals = parse_problem(
        problem, constraints=constraints, unit_costs=unit_costs)
    store = SolutionStore(evaluations,
                          max_time,
                          success_cost=INF,
                          verbose=verbose)
    #externals = compile_fluents_as_attachments(domain, externals) #
    instantiator = Instantiator(externals, evaluations)
    process_stream_queue(instantiator,
                         store,
                         complexity_limit=INF,
                         verbose=verbose)

    #plan, cost = solve_finite(evaluations, goal_exp, domain, max_cost=max_cost, **search_args)
    debug = False
    assert not isinstance(domain, SimplifiedDomain)
    problem = get_problem(evaluations, goal_exp, domain, unit_costs)
    task = task_from_domain_problem(domain, problem)
    with Verbose(debug):
        instantiated = instantiate_task(task)
        instantiated = convert_instantiated(instantiated)
    return instantiated
Exemple #11
0
def instantiate_optimizer_axioms(instantiated, domain, results):
    # Needed for instantiating axioms before adding stream action effects
    # Otherwise, FastDownward will prune these unreachable axioms
    # TODO: compute this first and then apply the eager actions
    stream_init = {
        fd_from_fact(result.stream_fact)
        for result in results if isinstance(result, StreamResult)
    }
    evaluations = list(
        map(evaluation_from_fd, stream_init | instantiated.atoms))
    temp_domain = make_domain(
        predicates=[make_predicate(UNSATISFIABLE, [])],
        axioms=[ax for ax in domain.axioms if ax.name == UNSATISFIABLE])
    temp_problem = get_problem(evaluations, Not((UNSATISFIABLE, )),
                               temp_domain)
    # TODO: UNSATISFIABLE might be in atoms making the goal always infeasible
    with Verbose():
        # TODO: the FastDownward instantiation prunes static preconditions
        use_fd = False if using_optimizers(results) else FD_INSTANTIATE
        new_instantiated = instantiate_task(task_from_domain_problem(
            temp_domain, temp_problem),
                                            use_fd=use_fd,
                                            check_infeasible=False,
                                            prune_static=False)
        assert new_instantiated is not None
    instantiated.axioms.extend(new_instantiated.axioms)
    instantiated.atoms.update(new_instantiated.atoms)
Exemple #12
0
def examine_instantiated(problem, constraints=PlanConstraints(), unit_costs=False, unique=False, verbose=False, debug=False):
    # TODO: refactor to an analysis file
    domain_pddl, constant_map, stream_pddl, _, init, goal = problem
    stream_map = DEBUG if unique else SHARED_DEBUG # DEBUG_MODES
    problem = PDDLProblem(domain_pddl, constant_map, stream_pddl, stream_map, init, goal)

    evaluations, goal_exp, domain, externals = parse_problem(
        problem, constraints=constraints, unit_costs=unit_costs)
    assert not isinstance(domain, SimplifiedDomain)

    # store = SolutionStore(evaluations, max_time, success_cost=INF, verbose=verbose)
    # instantiator = Instantiator(externals, evaluations)
    # process_stream_queue(instantiator, store, complexity_limit=INF, verbose=verbose)
    # results = [] # TODO: extract from process_stream_queue

    #set_unique(externals)
    # domain.actions[:] = [] # TODO: only instantiate axioms
    # TODO: drop all fluents and instantiate
    # TODO: relaxed planning version of this
    results, exhausted = optimistic_process_streams(evaluations, externals, complexity_limit=INF, max_effort=None)

    evaluations = evaluations_from_stream_plan(evaluations, results, max_effort=None)
    problem = get_problem(evaluations, goal_exp, domain, unit_costs)
    task = task_from_domain_problem(domain, problem)
    with Verbose(debug):
        instantiated = instantiate_task(task)
        if instantiated is None:
            return None
        # TODO: reinstantiate actions?
        instantiated.axioms[:] = [reinstantiate_axiom(axiom) for axiom in instantiated.axioms]
        instantiated = convert_instantiated(instantiated)
    return results, instantiated
def solve_optimistic_sequential(domain, stream_domain, applied_results, all_results,
                                opt_evaluations, node_from_atom, goal_expression,
                                effort_weight, debug=False, **kwargs):
    #print(sorted(map(fact_from_evaluation, opt_evaluations)))
    temporal_plan = None
    problem = get_problem(opt_evaluations, goal_expression, stream_domain)  # begin_metric
    with Verbose(verbose=False):
        instantiated = instantiate_task(task_from_domain_problem(stream_domain, problem))
    if instantiated is None:
        return instantiated, None, temporal_plan, INF

    cost_from_action = {action: action.cost for action in instantiated.actions}
    add_stream_efforts(node_from_atom, instantiated, effort_weight)
    if using_optimizers(applied_results):
        add_optimizer_effects(instantiated, node_from_atom)
        # TODO: reachieve=False when using optimizers or should add applied facts
        instantiate_optimizer_axioms(instantiated, domain, all_results)
    action_from_name = rename_instantiated_actions(instantiated, RENAME_ACTIONS)
    # TODO: the action unsatisfiable conditions are pruned
    with Verbose(debug):
        sas_task = sas_from_instantiated(instantiated)
        sas_task.metric = True

    # TODO: apply renaming to hierarchy as well
    # solve_from_task | serialized_solve_from_task | abstrips_solve_from_task | abstrips_solve_from_task_sequential
    renamed_plan, _ = solve_from_task(sas_task, debug=debug, **kwargs)
    if renamed_plan is None:
        return instantiated, None, temporal_plan, INF

    action_instances = [action_from_name[name if RENAME_ACTIONS else '({} {})'.format(name, ' '.join(args))]
                        for name, args in renamed_plan]
    cost = get_plan_cost(action_instances, cost_from_action)
    return instantiated, action_instances, temporal_plan, cost
Exemple #14
0
def solve_finite(evaluations,
                 goal_expression,
                 domain,
                 unit_costs=None,
                 **kwargs):
    if unit_costs is None:
        unit_costs = not has_costs(domain)
    problem = get_problem(evaluations, goal_expression, domain, unit_costs)
    task = task_from_domain_problem(domain, problem)
    plan_pddl, cost = solve_from_task(task, **kwargs)
    return obj_from_pddl_plan(plan_pddl), cost
Exemple #15
0
def solve_finite(evaluations,
                 goal_expression,
                 domain,
                 unit_costs=None,
                 debug=False,
                 **kwargs):
    if unit_costs is None:
        unit_costs = not has_costs(domain)
    problem = get_problem(evaluations, goal_expression, domain, unit_costs)
    task = task_from_domain_problem(domain, problem)
    sas_task = sas_from_pddl(task, debug=debug)
    plan_pddl, cost = abstrips_solve_from_task(sas_task, debug=debug, **kwargs)
    return obj_from_pddl_plan(plan_pddl), cost
Exemple #16
0
def sequential_stream_plan(evaluations, goal_expression, domain, stream_results,
                           negated, effort_weight, unit_costs=True, debug=False, **kwargs):
    # Intuitively, actions have infinitely more weight than streams
    if negated:
        raise NotImplementedError(negated)
    for result in stream_results:
        if isinstance(result.external, Stream) and result.external.is_fluent():
            raise NotImplementedError('Fluents are not supported')

    # TODO: compute preimage and make that the goal instead
    opt_evaluations = evaluations_from_stream_plan(evaluations, stream_results)
    opt_task = task_from_domain_problem(domain, get_problem(opt_evaluations, goal_expression, domain, unit_costs))
    action_plan, action_cost = abstrips_solve_from_task(sas_from_pddl(opt_task, debug=debug), debug=debug, **kwargs)
    if action_plan is None:
        return None, action_cost

    actions = domain.actions[:]
    domain.actions[:] = []
    stream_domain, stream_result_from_name = add_stream_actions(domain, stream_results) # TODO: effort_weight
    domain.actions.extend(actions)
    stream_task = task_from_domain_problem(stream_domain, get_problem(evaluations, goal_expression, stream_domain, unit_costs))
    action_from_name, function_plan = simplify_actions(opt_evaluations, action_plan, stream_task, actions, unit_costs)

    # TODO: lmcut?
    combined_plan, _ = solve_from_task(sas_from_pddl(opt_task, debug=debug),
                                       planner=kwargs.get('planner', 'ff-astar'),
                                       debug=debug, **kwargs)
    if combined_plan is None:
        return None, INF

    stream_plan, action_plan = [], []
    for name, args in combined_plan:
        if name in stream_result_from_name:
            stream_plan.append(stream_result_from_name[name])
        else:
            action_plan.append(action_from_name[name])
    combined_plan = stream_plan + function_plan + action_plan
    return combined_plan, action_cost
Exemple #17
0
def solve_sequential(evaluations,
                     goal_exp,
                     domain,
                     unit_costs=False,
                     debug=False,
                     **search_args):
    problem = get_problem(evaluations, goal_exp, domain, unit_costs)
    task = task_from_domain_problem(domain, problem)
    if has_attachments(domain):
        with Verbose(debug):
            instantiated = instantiate_task(task)
        return solve_pyplanners(instantiated, **search_args)
    sas_task = sas_from_pddl(task, debug=debug)
    return abstrips_solve_from_task(sas_task, debug=debug, **search_args)
Exemple #18
0
def solve_finite(evaluations, goal_exp, domain, unit_costs=False, debug=False, **search_args):
    if isinstance(domain, SimplifiedDomain):
        problem = get_problem_pddl(evaluations, goal_exp, domain.pddl)
        pddl_plan, cost = solve_tfd(domain.pddl, problem, debug=debug)
    else:
        problem = get_problem(evaluations, goal_exp, domain, unit_costs)
        task = task_from_domain_problem(domain, problem)
        if has_attachments(domain):
            with Verbose(debug):
                instantiated = instantiate_task(task)
            pddl_plan, cost = solve_pyplanners(instantiated, **search_args)
        else:
            sas_task = sas_from_pddl(task, debug=debug)
            pddl_plan, cost = abstrips_solve_from_task(sas_task, debug=debug, **search_args)
    plan = obj_from_pddl_plan(pddl_plan)
    return plan, cost
Exemple #19
0
def instantiate_optimizer_axioms(instantiated, evaluations, goal_expression,
                                 domain, results):
    # Needed for instantiating axioms before adding stream action effects
    # Otherwise, FastDownward will prune these unreachable axioms
    # TODO: compute this first and then apply the eager actions
    #stream_evaluations = set(map(evaluation_from_fact, get_stream_facts(applied_results)))
    stream_domain, result_from_name = add_stream_actions(domain, results)
    # Need unit_costs=True otherwise obtain an instantiation error
    problem = get_problem(evaluations,
                          goal_expression,
                          stream_domain,
                          unit_costs=True)
    with Verbose():
        new_instantiated = instantiate_task(
            task_from_domain_problem(stream_domain, problem))
    instantiated.axioms[:] = new_instantiated.axioms
    instantiated.atoms.update(new_instantiated.atoms)
Exemple #20
0
def solve_finite(evaluations,
                 goal_exp,
                 domain,
                 unit_costs=False,
                 debug=False,
                 **search_args):
    if isinstance(domain, SimplifiedDomain):
        problem = get_problem_pddl(evaluations, goal_exp, domain.pddl)
        pddl_plan, cost = solve_tfd(domain.pddl, problem, debug=debug)
    else:
        task = task_from_domain_problem(
            domain, get_problem(evaluations, goal_exp, domain, unit_costs))
        sas_task = sas_from_pddl(task, debug=debug)
        pddl_plan, cost = abstrips_solve_from_task(sas_task,
                                                   debug=debug,
                                                   **search_args)
    plan = obj_from_pddl_plan(pddl_plan)
    return plan, cost
Exemple #21
0
def relaxed_stream_plan(evaluations, goal_expression, domain, stream_results,
                        negative, unit_costs, **kwargs):
    # TODO: alternatively could translate with stream actions on real opt_state and just discard them
    opt_evaluations = evaluations_from_stream_plan(evaluations, stream_results)
    problem = get_problem(opt_evaluations, goal_expression, domain, unit_costs)
    task = task_from_domain_problem(domain, problem)
    action_plan, action_cost = solve_from_task(task, **kwargs)
    if action_plan is None:
        return None, action_cost
    # TODO: just use solve finite?

    stream_plan = recover_stream_plan(evaluations, goal_expression, domain,
                                      stream_results, action_plan, negative,
                                      unit_costs)
    action_plan = obj_from_pddl_plan(action_plan)
    combined_plan = stream_plan + action_plan

    return combined_plan, action_cost
Exemple #22
0
def is_solution(domain, evaluations, action_plan, goal_expression):
    task = task_from_domain_problem(
        domain,
        get_problem(evaluations, goal_expression, domain, unit_costs=True))
    action_instances = get_action_instances(
        task, action_plan) + [get_goal_instance(task.goal)]
    #original_init = task.init
    task.init = set(task.init)
    for instance in action_instances:
        axiom_plan = extract_axiom_plan(task,
                                        instance,
                                        negative_from_name={},
                                        static_state=task.init)
        if axiom_plan is None:
            return False
        #substitute_derived(axiom_plan, instance)
        #if not is_applicable(task.init, instance):
        #    return False
        apply_action(task.init, instance)
    return True
Exemple #23
0
def get_combined_orders(evaluations, stream_plan, action_plan, domain):
    if not is_plan(action_plan):
        return action_plan
    # TODO: could just do this within relaxed
    # TODO: do I want to strip the fluents and just do the partial ordering?

    stream_instances = get_stream_instances(stream_plan)
    negative_results = filter(
        lambda r: isinstance(r, PredicateResult) and (r.value == False),
        stream_plan)
    negative_init = set(
        fd_from_evaluation(evaluation_from_fact(f)) for r in negative_results
        for f in r.get_certified())
    #negated_from_name = {r.instance.external.name for r in negative_results}
    opt_evaluations = evaluations_from_stream_plan(evaluations, stream_plan)
    goal_expression = And()
    task = task_from_domain_problem(
        domain,
        get_problem(opt_evaluations, goal_expression, domain, unit_costs=True))

    action_instances = get_action_instances(task, action_plan)
    replace_derived(task, negative_init, action_instances)

    #combined_instances = stream_instances + action_instances
    orders = set()
    for i, a1 in enumerate(action_plan):
        for a2 in action_plan[i + 1:]:
            orders.add((a1, a2))
    # TODO: just store first achiever here
    for i, instance1 in enumerate(stream_instances):
        for j in range(i + 1, len(stream_instances)):
            effects = {e for _, e in instance1.add_effects}
            if effects & set(stream_instances[j].precondition):
                orders.add((stream_plan[i], stream_plan[j]))
    for i, instance1 in enumerate(stream_instances):
        for j, instance2 in enumerate(action_instances):
            effects = {e for _, e in  instance1.add_effects} | \
                      {e.negate() for _, e in  instance1.del_effects}
            if effects & set(instance2.precondition):
                orders.add((stream_plan[i], action_plan[j]))
    return orders
Exemple #24
0
def reschedule_stream_plan(evaluations,
                           preimage_facts,
                           domain,
                           stream_results,
                           unique_binding=False,
                           unit_costs=True):
    # TODO: search in space of partially ordered plans
    # TODO: constrain selection order to be alphabetical?
    reschedule_problem = get_problem(evaluations,
                                     And(*preimage_facts),
                                     domain,
                                     unit_costs=unit_costs)
    reschedule_task = task_from_domain_problem(domain, reschedule_problem)
    reschedule_task.actions, stream_result_from_name = get_stream_actions(
        stream_results, unique_binding=unique_binding)
    #reschedule_task.axioms = [] # TODO: ensure that the constants are added in the even that axioms are needed?
    new_plan, _ = solve_from_task(reschedule_task,
                                  planner=RESCHEDULE_PLANNER,
                                  max_planner_time=10,
                                  debug=False)
    return [stream_result_from_name[name] for name, _ in new_plan]
Exemple #25
0
def apply_actions(domain, state, plan, unit_costs=False):
    # Goal serialization just assumes the tail of the plan includes an abstract action to achieve each condition
    static_state, _ = partition_facts(domain, state)
    print('Static:', static_state)
    # TODO: might need properties that involve an object that aren't useful yet
    evaluations = evaluations_from_init(state)
    #goal_exp = obj_from_value_expression(goal)
    goal_exp = None
    problem = get_problem(evaluations, goal_exp, domain, unit_costs)
    task = task_from_domain_problem(domain, problem)
    task.init = set(task.init)
    for instance in get_action_instances(
            task, transform_plan_args(plan, Object.from_value)):
        apply_action(task.init, instance)
    fluents = get_fluents(domain)
    fluent_state = [
        value_from_evaluation(evaluation_from_fd(atom)) for atom in task.init
        if isinstance(atom, pddl.Atom) and atom.predicate in fluents
    ]
    print('Fluent:', fluent_state)
    state = static_state + fluent_state
    return state
Exemple #26
0
def relaxed_stream_plan(evaluations, goal_expression, domain, stream_results, negative, unit_costs,
                        unit_efforts, effort_weight, debug=False, **kwargs):
    # TODO: alternatively could translate with stream actions on real opt_state and just discard them
    # TODO: only consider axioms that have stream conditions?
    applied_results, deferred_results = partition_results(evaluations, stream_results,
                                                          apply_now=lambda r: not r.external.info.simultaneous)
    stream_domain, result_from_name = add_stream_actions(domain, deferred_results)
    node_from_atom = get_achieving_streams(evaluations, applied_results)
    opt_evaluations = apply_streams(evaluations, applied_results) # if n.effort < INF
    if any(map(is_optimizer_result, stream_results)):
        goal_expression = augment_goal(stream_domain, goal_expression)
    problem = get_problem(opt_evaluations, goal_expression, stream_domain, unit_costs) # begin_metric

    with Verbose(debug):
        instantiated = instantiate_task(task_from_domain_problem(stream_domain, problem))
    if instantiated is None:
        return None, INF
    if (effort_weight is not None) or any(map(is_optimizer_result, applied_results)):
        add_stream_costs(node_from_atom, instantiated, unit_efforts, effort_weight)
    add_optimizer_axioms(stream_results, instantiated)
    with Verbose(debug):
        sas_task = sas_from_instantiated(instantiated)
        sas_task.metric = True
    #sas_task = sas_from_pddl(task)
    #action_plan, _ = serialized_solve_from_task(sas_task, debug=debug, **kwargs)
    action_plan, _ = abstrips_solve_from_task(sas_task, debug=debug, **kwargs)
    #action_plan, _ = abstrips_solve_from_task_sequential(sas_task, debug=debug, **kwargs)
    if action_plan is None:
        return None, INF

    applied_plan, function_plan = partition_external_plan(recover_stream_plan(
        evaluations, goal_expression, stream_domain, applied_results, action_plan, negative, unit_costs))
    deferred_plan, action_plan = partition_plan(action_plan, result_from_name)
    stream_plan = applied_plan + deferred_plan + function_plan
    action_plan = obj_from_pddl_plan(action_plan)
    cost = get_plan_cost(opt_evaluations, action_plan, domain, unit_costs)
    combined_plan = stream_plan + action_plan
    return combined_plan, cost
Exemple #27
0
def recover_stream_plan(evaluations,
                        goal_expression,
                        domain,
                        stream_results,
                        action_plan,
                        negative,
                        unit_costs,
                        optimize=True):
    import pddl_to_prolog
    import build_model
    import pddl
    import axiom_rules
    import instantiate
    # Universally quantified conditions are converted into negative axioms
    # Existentially quantified conditions are made additional preconditions
    # Universally quantified effects are instantiated by doing the cartesian produce of types (slow)
    # Added effects cancel out removed effects

    opt_evaluations = evaluations_from_stream_plan(evaluations, stream_results)
    opt_task = task_from_domain_problem(
        domain,
        get_problem(opt_evaluations, goal_expression, domain, unit_costs))
    real_task = task_from_domain_problem(
        domain, get_problem(evaluations, goal_expression, domain, unit_costs))
    function_assignments = {
        fact.fluent: fact.expression
        for fact in opt_task.init  # init_facts
        if isinstance(fact, pddl.f_expression.FunctionAssignment)
    }
    type_to_objects = instantiate.get_objects_by_type(opt_task.objects,
                                                      opt_task.types)
    results_from_head = get_results_from_head(opt_evaluations)

    action_instances = []
    for name, args in action_plan:  # TODO: negative atoms in actions
        candidates = []
        for action in opt_task.actions:
            if action.name != name:
                continue
            if len(action.parameters) != len(args):
                raise NotImplementedError(
                    'Existential quantifiers are not currently '
                    'supported in preconditions: {}'.format(name))
            variable_mapping = {
                p.name: a
                for p, a in zip(action.parameters, args)
            }
            instance = action.instantiate(variable_mapping, set(), MockSet(),
                                          type_to_objects,
                                          opt_task.use_min_cost_metric,
                                          function_assignments)
            assert (instance is not None)
            candidates.append(((action, args), instance))
        if not candidates:
            raise RuntimeError(
                'Could not find an applicable action {}'.format(name))
        action_instances.append(candidates)
    action_instances.append([(None, get_goal_instance(opt_task.goal))])

    axioms_from_name = get_derived_predicates(opt_task.axioms)
    negative_from_name = {n.name: n for n in negative}
    opt_task.actions = []
    opt_state = set(opt_task.init)
    real_state = set(real_task.init)
    preimage_plan = []
    function_plan = set()
    for layer in action_instances:
        for pair, instance in layer:
            nonderived_preconditions = [
                l for l in instance.precondition
                if l.predicate not in axioms_from_name
            ]
            #nonderived_preconditions = instance.precondition
            if not conditions_hold(opt_state, nonderived_preconditions):
                continue
            opt_task.init = opt_state
            original_axioms = opt_task.axioms
            axiom_from_action = get_necessary_axioms(instance, original_axioms,
                                                     negative_from_name)
            opt_task.axioms = []
            opt_task.actions = axiom_from_action.keys()
            # TODO: maybe it would just be better to drop the negative throughout this process until this end
            with Verbose(False):
                model = build_model.compute_model(
                    pddl_to_prolog.translate(
                        opt_task))  # Changes based on init
            opt_task.axioms = original_axioms

            opt_facts = instantiate.get_fluent_facts(
                opt_task, model) | (opt_state - real_state)
            mock_fluent = MockSet(lambda item: (
                item.predicate in negative_from_name) or (item in opt_facts))
            instantiated_axioms = instantiate_necessary_axioms(
                model, real_state, mock_fluent, axiom_from_action)
            with Verbose(False):
                helpful_axioms, axiom_init, _ = axiom_rules.handle_axioms(
                    [instance], instantiated_axioms, [])
            axiom_from_atom = get_achieving_axioms(opt_state, helpful_axioms,
                                                   axiom_init,
                                                   negative_from_name)
            axiom_plan = []  # Could always add all conditions
            extract_axioms(axiom_from_atom, instance.precondition, axiom_plan)
            # TODO: test if no derived solution

            # TODO: compute required stream facts in a forward way and allow opt facts that are already known required
            for effects in [instance.add_effects, instance.del_effects]:
                for i, (conditions, effect) in enumerate(effects[::-1]):
                    if any(c.predicate in axioms_from_name
                           for c in conditions):
                        raise NotImplementedError(
                            'Conditional effects cannot currently involve derived predicates'
                        )
                    if conditions_hold(real_state, conditions):
                        # Holds in real state
                        effects[i] = ([], effect)
                    elif not conditions_hold(opt_state, conditions):
                        # Does not hold in optimistic state
                        effects.pop(i)
                    else:
                        # TODO: handle more general case where can choose to achieve particular conditional effects
                        raise NotImplementedError(
                            'Conditional effects cannot currently involve certified predicates'
                        )
            #if any(conditions for conditions, _ in instance.add_effects + instance.del_effects):
            #    raise NotImplementedError('Conditional effects are not currently supported: {}'.format(instance.name))

            # TODO: add axiom init to reset state?
            apply_action(opt_state, instance)
            apply_action(real_state, instance)
            preimage_plan.extend(axiom_plan + [instance])
            if not unit_costs and (pair is not None):
                function_plan.update(
                    extract_function_results(results_from_head, *pair))
            break
        else:
            raise RuntimeError('No action instances are applicable')

    preimage = plan_preimage(preimage_plan, set())
    preimage -= set(real_task.init)
    negative_preimage = set(
        filter(lambda a: a.predicate in negative_from_name, preimage))
    preimage -= negative_preimage
    # visualize_constraints(map(fact_from_fd, preimage))
    # TODO: prune with rules
    # TODO: linearization that takes into account satisfied goals at each level
    # TODO: can optimize for all streams & axioms all at once

    for literal in negative_preimage:
        negative = negative_from_name[literal.predicate]
        instance = negative.get_instance(map(obj_from_pddl, literal.args))
        value = not literal.negated
        if instance.enumerated:
            assert (instance.value == value)
        else:
            function_plan.add(
                PredicateResult(instance, value, opt_index=instance.opt_index))

    node_from_atom = get_achieving_streams(evaluations, stream_results)
    preimage_facts = list(
        map(fact_from_fd, filter(lambda l: not l.negated, preimage)))
    stream_plan = []
    extract_stream_plan(node_from_atom, preimage_facts, stream_plan)
    if not optimize:  # TODO: detect this based on unique or not
        return stream_plan + list(function_plan)

    # TODO: search in space of partially ordered plans
    # TODO: local optimization - remove one and see if feasible

    reschedule_problem = get_problem(evaluations,
                                     And(*preimage_facts),
                                     domain,
                                     unit_costs=True)
    reschedule_task = task_from_domain_problem(domain, reschedule_problem)
    reschedule_task.actions, stream_result_from_name = get_stream_actions(
        stream_results)
    new_plan, _ = solve_from_task(reschedule_task,
                                  planner='max-astar',
                                  debug=False)
    # TODO: investigate admissible heuristics
    if new_plan is None:
        return stream_plan + list(function_plan)

    new_stream_plan = [stream_result_from_name[name] for name, _ in new_plan]
    return new_stream_plan + list(function_plan)
Exemple #28
0
def locally_optimize(evaluations, store, goal_expression, domain, functions,
                     negative, dynamic_streams):
    action_plan = store.best_plan
    if action_plan is None:
        return None
    print('\nPostprocessing')  # TODO: postprocess current skeleton as well

    task = task_from_domain_problem(
        domain,
        get_problem(evaluations, goal_expression, domain, unit_costs=False))
    plan_instances = get_action_instances(
        task, action_plan) + [get_goal_instance(task.goal)]
    replace_derived(task, set(), plan_instances)
    preimage = filter(lambda a: not a.negated,
                      plan_preimage(plan_instances, []))

    opt_stream_plan = []
    opt_from_obj = {}
    for stream_result in extract_order(evaluations, preimage):
        input_objects = tuple(
            opt_from_obj.get(o, o)
            for o in stream_result.instance.input_objects)
        instance = stream_result.instance.external.get_instance(input_objects)
        #assert(not instance.disabled)
        instance.disabled = False
        opt_results = instance.next_optimistic()
        if not opt_results:
            continue
        opt_result = opt_results[0]
        opt_stream_plan.append(opt_result)
        for obj, opt in zip(stream_result.output_objects,
                            opt_result.output_objects):
            opt_from_obj[obj] = opt

    opt_stream_plan += optimistic_process_streams(
        evaluations_from_stream_plan(evaluations, opt_stream_plan), functions)
    opt_action_plan = [(name, tuple(opt_from_obj.get(o, o) for o in args))
                       for name, args in action_plan]
    pddl_plan = [(name, map(pddl_from_object, args))
                 for name, args in opt_action_plan]
    stream_plan = recover_stream_plan(evaluations,
                                      goal_expression,
                                      domain,
                                      opt_stream_plan,
                                      pddl_plan,
                                      negative,
                                      unit_costs=False)

    stream_plan = reorder_stream_plan(stream_plan)
    stream_plan = get_synthetic_stream_plan(stream_plan, dynamic_streams)
    print('Stream plan: {}\n'
          'Action plan: {}'.format(stream_plan, opt_action_plan))
    opt_cost = 0  # TODO: compute this cost for real

    store.start_time = time.time()
    store.max_cost = store.best_cost
    #sampling_time = 10
    sampling_time = 0
    queue = SkeletonQueue(evaluations, store)
    queue.new_skeleton(stream_plan, opt_action_plan, opt_cost)
    queue.greedily_process(sampling_time)
Exemple #29
0
def recover_stream_plan(evaluations, current_plan, opt_evaluations,
                        goal_expression, domain, node_from_atom, action_plan,
                        axiom_plans, negative, replan_step):
    # Universally quantified conditions are converted into negative axioms
    # Existentially quantified conditions are made additional preconditions
    # Universally quantified effects are instantiated by doing the cartesian produce of types (slow)
    # Added effects cancel out removed effects
    # TODO: node_from_atom is a subset of opt_evaluations (only missing functions)
    real_task = task_from_domain_problem(
        domain, get_problem(evaluations, goal_expression, domain))
    opt_task = task_from_domain_problem(
        domain, get_problem(opt_evaluations, goal_expression, domain))
    negative_from_name = {
        external.blocked_predicate: external
        for external in negative if external.is_negated()
    }
    real_states, combined_plan = recover_negative_axioms(
        real_task, opt_task, axiom_plans, action_plan, negative_from_name)
    function_plan = compute_function_plan(opt_evaluations, action_plan)

    # TODO: record the supporting facts
    full_preimage = plan_preimage(combined_plan, [])
    stream_preimage = set(full_preimage) - real_states[0]
    negative_preimage = set(
        filter(lambda a: a.predicate in negative_from_name, stream_preimage))
    function_plan.update(
        convert_negative(negative_preimage, negative_from_name, full_preimage,
                         real_states))
    positive_preimage = stream_preimage - negative_preimage

    steps_from_fact = {
        fact_from_fd(l): full_preimage[l]
        for l in positive_preimage if not l.negated
    }
    target_facts = {
        fact
        for fact in steps_from_fact.keys() if get_prefix(fact) != EQ
    }
    #stream_plan = reschedule_stream_plan(evaluations, target_facts, domain, stream_results)
    # visualize_constraints(map(fact_from_fd, target_facts))

    # TODO: get_steps_from_stream
    stream_plan = []
    step_from_stream = {}
    for result in current_plan:
        # TODO: actually compute when these are needed + dependencies
        step_from_stream[result] = 0
        if isinstance(result.external, Function) or (result.external
                                                     in negative):
            function_plan.add(
                result)  # Prevents these results from being pruned
        else:
            stream_plan.append(result)

    curr_evaluations = evaluations_from_stream_plan(evaluations,
                                                    stream_plan,
                                                    max_effort=None)
    extraction_facts = target_facts - set(
        map(fact_from_evaluation, curr_evaluations))
    step_from_fact = {
        fact: min(steps_from_fact[fact])
        for fact in extraction_facts
    }
    extract_stream_plan(node_from_atom, extraction_facts, stream_plan,
                        step_from_fact, step_from_stream)
    stream_plan = postprocess_stream_plan(evaluations, domain, stream_plan,
                                          target_facts)

    eager_plan = []
    actions_from_step = {}
    for result in (stream_plan + list(function_plan)):
        if (result.opt_index != 0) or (step_from_stream.get(result, 0) <
                                       replan_step):
            eager_plan.append(result)
        else:
            actions_from_step.setdefault(step_from_stream[result],
                                         []).append(result.get_action())
    eager_plan = convert_fluent_streams(eager_plan, real_states, action_plan,
                                        steps_from_fact, node_from_atom)

    # print(action_plan)
    # # TODO: propagate this forward in the future
    # start_from_stream = {}
    # for result in eager_plan:
    #     stuff = list(map(fd_from_fact, get_fluent_domain(result)))
    #     index = len(real_states)
    #     for i, state in enumerate(real_states):
    #         if conditions_hold(state, stuff):
    #             start_from_stream[result] = i
    #             index = i
    #             break
    #     #else:
    #     #start_from_stream[result] = len(real_states)
    #     print(index, result)

    # TODO: some sort of obj side-effect bug that requires obj_from_pddl to be applied last (likely due to fluent streams)
    #action_plan = transform_plan_args(map(pddl_from_instance, action_instances), obj_from_pddl)
    for step, action in enumerate(action_plan):
        actions_from_step.setdefault(step, []).append(
            transform_action_args(pddl_from_instance(action), obj_from_pddl))
    action_plan = list(
        flatten(actions_from_step[step] for step in sorted(actions_from_step)))
    return eager_plan, action_plan
Exemple #30
0
def recover_stream_plan(evaluations, current_plan, opt_evaluations,
                        goal_expression, domain, node_from_atom, action_plan,
                        axiom_plans, negative, replan_step):
    # Universally quantified conditions are converted into negative axioms
    # Existentially quantified conditions are made additional preconditions
    # Universally quantified effects are instantiated by doing the cartesian produce of types (slow)
    # Added effects cancel out removed effects
    # TODO: node_from_atom is a subset of opt_evaluations (only missing functions)
    real_task = task_from_domain_problem(
        domain, get_problem(evaluations, goal_expression, domain))
    opt_task = task_from_domain_problem(
        domain, get_problem(opt_evaluations, goal_expression, domain))
    negative_from_name = {
        external.blocked_predicate: external
        for external in negative if external.is_negated()
    }
    real_states, full_plan = recover_negative_axioms(real_task, opt_task,
                                                     axiom_plans, action_plan,
                                                     negative_from_name)
    function_plan = compute_function_plan(opt_evaluations, action_plan)

    full_preimage = plan_preimage(full_plan,
                                  [])  # Does not contain the stream preimage!
    negative_preimage = set(
        filter(lambda a: a.predicate in negative_from_name, full_preimage))
    negative_plan = convert_negative(negative_preimage, negative_from_name,
                                     full_preimage, real_states)
    function_plan.update(negative_plan)
    # TODO: OrderedDict for these plans

    # TODO: this assumes that actions do not negate preimage goals
    positive_preimage = {
        l
        for l in (set(full_preimage) - real_states[0] - negative_preimage)
        if not l.negated
    }
    steps_from_fact = {
        fact_from_fd(l): full_preimage[l]
        for l in positive_preimage
    }
    last_from_fact = {
        fact: min(steps)
        for fact, steps in steps_from_fact.items() if get_prefix(fact) != EQ
    }
    #stream_plan = reschedule_stream_plan(evaluations, target_facts, domain, stream_results)
    # visualize_constraints(map(fact_from_fd, target_facts))

    for result, step in function_plan.items():
        for fact in result.get_domain():
            last_from_fact[fact] = min(step, last_from_fact.get(fact, INF))

    # TODO: get_steps_from_stream
    stream_plan = []
    last_from_stream = dict(function_plan)
    for result in current_plan:  # + negative_plan?
        # TODO: actually compute when these are needed + dependencies
        last_from_stream[result] = 0
        if isinstance(result.external, Function) or (result.external
                                                     in negative):
            if len(action_plan) != replan_step:
                raise NotImplementedError(
                )  # TODO: deferring negated optimizers
            # Prevents these results from being pruned
            function_plan[result] = replan_step
        else:
            stream_plan.append(result)

    curr_evaluations = evaluations_from_stream_plan(evaluations,
                                                    stream_plan,
                                                    max_effort=None)
    extraction_facts = set(last_from_fact) - set(
        map(fact_from_evaluation, curr_evaluations))
    extract_stream_plan(node_from_atom, extraction_facts, stream_plan)

    # Recomputing due to postprocess_stream_plan
    stream_plan = postprocess_stream_plan(evaluations, domain, stream_plan,
                                          last_from_fact)
    node_from_atom = get_achieving_streams(evaluations,
                                           stream_plan,
                                           max_effort=None)
    fact_sequence = [set(result.get_domain())
                     for result in stream_plan] + [extraction_facts]
    for facts in reversed(fact_sequence):  # Bellman ford
        for fact in facts:  # could flatten instead
            result = node_from_atom[fact].result
            if result is None:
                continue
            step = last_from_fact[fact] if result.is_deferrable() else 0
            last_from_stream[result] = min(step,
                                           last_from_stream.get(result, INF))
            for domain_fact in result.instance.get_domain():
                last_from_fact[domain_fact] = min(
                    last_from_stream[result],
                    last_from_fact.get(domain_fact, INF))
    stream_plan.extend(function_plan)

    # Useful to recover the correct DAG
    partial_orders = set()
    for child in stream_plan:
        # TODO: account for fluent objects
        for fact in child.get_domain():
            parent = node_from_atom[fact].result
            if parent is not None:
                partial_orders.add((parent, child))
    #stream_plan = topological_sort(stream_plan, partial_orders)

    bound_objects = set()
    for result in stream_plan:
        if (last_from_stream[result]
                == 0) or not result.is_deferrable(bound_objects=bound_objects):
            for ancestor in get_ancestors(result, partial_orders) | {result}:
                # TODO: this might change descendants of ancestor. Perform in a while loop.
                last_from_stream[ancestor] = 0
                if isinstance(ancestor, StreamResult):
                    bound_objects.update(out for out in ancestor.output_objects
                                         if out.is_unique())

    #local_plan = [] # TODO: not sure what this was for
    #for fact, step in sorted(last_from_fact.items(), key=lambda pair: pair[1]): # Earliest to latest
    #    print(step, fact)
    #    extract_stream_plan(node_from_atom, [fact], local_plan, last_from_fact, last_from_stream)

    # Each stream has an earliest evaluation time
    # When computing the latest, use 0 if something isn't deferred
    # Evaluate each stream as soon as possible
    # Option to defer streams after a point in time?
    # TODO: action costs for streams that encode uncertainty
    state = set(real_task.init)
    remaining_results = list(stream_plan)
    first_from_stream = {}
    #assert 1 <= replan_step # Plan could be empty
    for step, instance in enumerate(action_plan):
        for result in list(remaining_results):
            # TODO: could do this more efficiently if need be
            domain = result.get_domain() + get_fluent_domain(result)
            if conditions_hold(state, map(fd_from_fact, domain)):
                remaining_results.remove(result)
                certified = {
                    fact
                    for fact in result.get_certified()
                    if get_prefix(fact) != EQ
                }
                state.update(map(fd_from_fact, certified))
                if step != 0:
                    first_from_stream[result] = step
        # TODO: assumes no fluent axiom domain conditions
        apply_action(state, instance)
    #assert not remaining_results # Not true if retrace
    if first_from_stream:
        replan_step = min(replan_step, *first_from_stream.values())

    eager_plan = []
    results_from_step = defaultdict(list)
    for result in stream_plan:
        earliest_step = first_from_stream.get(result, 0)
        latest_step = last_from_stream.get(result, 0)
        assert earliest_step <= latest_step
        defer = replan_step <= latest_step
        if not defer:
            eager_plan.append(result)
        # We only perform a deferred evaluation if it has all deferred dependencies
        # TODO: make a flag that also allows dependencies to be deferred
        future = (earliest_step != 0) or defer
        if future:
            future_step = latest_step if defer else earliest_step
            results_from_step[future_step].append(result)

    # TODO: some sort of obj side-effect bug that requires obj_from_pddl to be applied last (likely due to fluent streams)
    eager_plan = convert_fluent_streams(eager_plan, real_states, action_plan,
                                        steps_from_fact, node_from_atom)
    combined_plan = []
    for step, action in enumerate(action_plan):
        combined_plan.extend(result.get_action()
                             for result in results_from_step[step])
        combined_plan.append(
            transform_action_args(pddl_from_instance(action), obj_from_pddl))

    # TODO: the returned facts have the same side-effect bug as above
    # TODO: annotate when each preimage fact is used
    preimage_facts = {
        fact_from_fd(l)
        for l in full_preimage if (l.predicate != EQ) and not l.negated
    }
    for negative_result in negative_plan:  # TODO: function_plan
        preimage_facts.update(negative_result.get_certified())
    for result in eager_plan:
        preimage_facts.update(result.get_domain())
        # Might not be able to regenerate facts involving the outputs of streams
        preimage_facts.update(
            result.get_certified())  # Some facts might not be in the preimage
    # TODO: record streams and axioms
    return eager_plan, OptPlan(combined_plan, preimage_facts)