Esempio n. 1
0
def shorten_stream_plan(evaluations, stream_plan, target_facts):
    all_subgoals = set(target_facts) | set(
        flatten(r.instance.get_domain() for r in stream_plan))
    evaluation_subgoals = set(
        filter(evaluations.__contains__, map(evaluation_from_fact,
                                             all_subgoals)))
    open_subgoals = set(
        filter(lambda f: evaluation_from_fact(f) not in evaluations,
               all_subgoals))
    results_from_fact = {}
    for result in stream_plan:
        for fact in result.get_certified():
            results_from_fact.setdefault(fact, []).append(result)

    for removed_result in reversed(stream_plan):  # TODO: only do in order?
        certified_subgoals = open_subgoals & set(
            removed_result.get_certified())
        if not certified_subgoals:  # Could combine with following
            new_stream_plan = stream_plan[:]
            new_stream_plan.remove(removed_result)
            return new_stream_plan
        if all(2 <= len(results_from_fact[fact])
               for fact in certified_subgoals):
            node_from_atom = get_achieving_streams(
                evaluation_subgoals,
                set(stream_plan) - {removed_result})
            if all(fact in node_from_atom for fact in target_facts):
                new_stream_plan = []
                extract_stream_plan(node_from_atom, target_facts,
                                    new_stream_plan)
                return new_stream_plan
    return None
Esempio n. 2
0
def evaluations_from_stream_plan(evaluations, stream_results, max_effort=INF):
    opt_evaluations = set(evaluations)
    for result in stream_results:
        assert(not result.instance.disabled)
        assert(not result.instance.enumerated)
        domain = set(map(evaluation_from_fact, result.instance.get_domain()))
        assert(domain <= opt_evaluations)
        opt_evaluations.update(map(evaluation_from_fact, result.get_certified()))
    node_from_atom = get_achieving_streams(evaluations, stream_results)
    result_from_evaluation = {evaluation_from_fact(f): n.result
                              for f, n in node_from_atom.items() if n.effort < max_effort}
    return result_from_evaluation
Esempio n. 3
0
def plan_streams(evaluations, goal_expression, domain, all_results, negative, effort_weight, max_effort,
                 simultaneous=False, reachieve=True, replan_actions=set(), **kwargs):
    # TODO: alternatively could translate with stream actions on real opt_state and just discard them
    # TODO: only consider axioms that have stream conditions?
    #reachieve = reachieve and not using_optimizers(all_results)
    #for i, result in enumerate(all_results):
    #    print(i, result, result.get_effort())
    applied_results, deferred_results = partition_results(
        evaluations, all_results, apply_now=lambda r: not (simultaneous or r.external.info.simultaneous))
    stream_domain, deferred_from_name = add_stream_actions(domain, deferred_results)

    if reachieve and not using_optimizers(all_results):
        achieved_results = {n.result for n in evaluations.values() if isinstance(n.result, Result)}
        init_evaluations = {e for e, n in evaluations.items() if n.result not in achieved_results}
        applied_results = achieved_results | set(applied_results)
        evaluations = init_evaluations # For clarity

    # TODO: could iteratively increase max_effort
    node_from_atom = get_achieving_streams(evaluations, applied_results, # TODO: apply to all_results?
                                           max_effort=max_effort)
    opt_evaluations = {evaluation_from_fact(f): n.result for f, n in node_from_atom.items()}
    if UNIVERSAL_TO_CONDITIONAL or using_optimizers(all_results):
        goal_expression = add_unsatisfiable_to_goal(stream_domain, goal_expression)

    temporal = isinstance(stream_domain, SimplifiedDomain)
    optimistic_fn = solve_optimistic_temporal if temporal else solve_optimistic_sequential
    instantiated, action_instances, temporal_plan, cost = optimistic_fn(
        domain, stream_domain, applied_results, all_results, opt_evaluations,
        node_from_atom, goal_expression, effort_weight, **kwargs)
    if action_instances is None:
        return FAILED, FAILED, cost

    action_instances, axiom_plans = recover_axioms_plans(instantiated, action_instances)
    # TODO: extract out the minimum set of conditional effects that are actually required
    #simplify_conditional_effects(instantiated.task, action_instances)
    stream_plan, action_instances = recover_simultaneous(
        applied_results, negative, deferred_from_name, action_instances)

    action_plan = transform_plan_args(map(pddl_from_instance, action_instances), obj_from_pddl)
    replan_step = min([step+1 for step, action in enumerate(action_plan)
                       if action.name in replan_actions] or [len(action_plan)+1]) # step after action application

    stream_plan, opt_plan = recover_stream_plan(evaluations, stream_plan, opt_evaluations, goal_expression, stream_domain,
        node_from_atom, action_instances, axiom_plans, negative, replan_step)
    if temporal_plan is not None:
        # TODO: handle deferred streams
        assert all(isinstance(action, Action) for action in opt_plan.action_plan)
        opt_plan.action_plan[:] = temporal_plan
    return stream_plan, opt_plan, cost
Esempio n. 4
0
def relaxed_stream_plan(evaluations, goal_expression, domain, stream_results, negative, unit_costs,
                        unit_efforts, effort_weight, debug=False, **kwargs):
    # TODO: alternatively could translate with stream actions on real opt_state and just discard them
    # TODO: only consider axioms that have stream conditions?
    applied_results, deferred_results = partition_results(evaluations, stream_results,
                                                          apply_now=lambda r: not r.external.info.simultaneous)
    stream_domain, result_from_name = add_stream_actions(domain, deferred_results)
    node_from_atom = get_achieving_streams(evaluations, applied_results)
    opt_evaluations = apply_streams(evaluations, applied_results) # if n.effort < INF
    if any(map(is_optimizer_result, stream_results)):
        goal_expression = augment_goal(stream_domain, goal_expression)
    problem = get_problem(opt_evaluations, goal_expression, stream_domain, unit_costs) # begin_metric

    with Verbose(debug):
        instantiated = instantiate_task(task_from_domain_problem(stream_domain, problem))
    if instantiated is None:
        return None, INF
    if (effort_weight is not None) or any(map(is_optimizer_result, applied_results)):
        add_stream_costs(node_from_atom, instantiated, unit_efforts, effort_weight)
    add_optimizer_axioms(stream_results, instantiated)
    with Verbose(debug):
        sas_task = sas_from_instantiated(instantiated)
        sas_task.metric = True
    #sas_task = sas_from_pddl(task)
    #action_plan, _ = serialized_solve_from_task(sas_task, debug=debug, **kwargs)
    action_plan, _ = abstrips_solve_from_task(sas_task, debug=debug, **kwargs)
    #action_plan, _ = abstrips_solve_from_task_sequential(sas_task, debug=debug, **kwargs)
    if action_plan is None:
        return None, INF

    applied_plan, function_plan = partition_external_plan(recover_stream_plan(
        evaluations, goal_expression, stream_domain, applied_results, action_plan, negative, unit_costs))
    deferred_plan, action_plan = partition_plan(action_plan, result_from_name)
    stream_plan = applied_plan + deferred_plan + function_plan
    action_plan = obj_from_pddl_plan(action_plan)
    cost = get_plan_cost(opt_evaluations, action_plan, domain, unit_costs)
    combined_plan = stream_plan + action_plan
    return combined_plan, cost
Esempio n. 5
0
def plan_streams(evaluations, goal_expression, domain, all_results, negative, effort_weight, max_effort,
                 simultaneous=False, reachieve=True, **kwargs):
    # TODO: alternatively could translate with stream actions on real opt_state and just discard them
    # TODO: only consider axioms that have stream conditions?
    #reachieve = reachieve and not using_optimizers(all_results)
    applied_results, deferred_results = partition_results(
        evaluations, all_results, apply_now=lambda r: not (simultaneous or r.external.info.simultaneous))
    stream_domain, deferred_from_name = add_stream_actions(domain, deferred_results)

    if reachieve and not using_optimizers(all_results):
        achieved_results = {n.result for n in evaluations.values() if isinstance(n.result, Result)}
        init_evaluations = {e for e, n in evaluations.items() if n.result not in achieved_results}
        applied_results = achieved_results | set(applied_results)
        evaluations = init_evaluations # For clarity
    # TODO: could iteratively increase max_effort
    node_from_atom = get_achieving_streams(evaluations, applied_results, # TODO: apply to all_results?
                                           max_effort=max_effort)
    opt_evaluations = {evaluation_from_fact(f): n.result for f, n in node_from_atom.items()}
    if UNIVERSAL_TO_CONDITIONAL or using_optimizers(all_results):
        goal_expression = add_unsatisfiable_to_goal(stream_domain, goal_expression)
    instantiated, action_instances, action_plan, cost = solve_optimistic_sequential(
        domain, stream_domain, applied_results, all_results, opt_evaluations, node_from_atom, goal_expression, effort_weight, **kwargs)
    if action_plan is None:
        return action_plan, cost

    axiom_plans = recover_axioms_plans(instantiated, action_instances)
    # TODO: extract out the minimum set of conditional effects that are actually required
    #simplify_conditional_effects(instantiated.task, action_instances)
    stream_plan, action_instances = recover_simultaneous(
        applied_results, negative, deferred_from_name, action_instances)

    stream_plan = recover_stream_plan(evaluations, stream_plan, opt_evaluations, goal_expression,
                                      stream_domain, node_from_atom, action_instances, axiom_plans, negative)

    combined_plan = stream_plan + action_plan
    return combined_plan, cost
Esempio n. 6
0
def recover_stream_plan(evaluations, current_plan, opt_evaluations,
                        goal_expression, domain, node_from_atom, action_plan,
                        axiom_plans, negative, replan_step):
    # Universally quantified conditions are converted into negative axioms
    # Existentially quantified conditions are made additional preconditions
    # Universally quantified effects are instantiated by doing the cartesian produce of types (slow)
    # Added effects cancel out removed effects
    # TODO: node_from_atom is a subset of opt_evaluations (only missing functions)
    real_task = task_from_domain_problem(
        domain, get_problem(evaluations, goal_expression, domain))
    opt_task = task_from_domain_problem(
        domain, get_problem(opt_evaluations, goal_expression, domain))
    negative_from_name = {
        external.blocked_predicate: external
        for external in negative if external.is_negated()
    }
    real_states, full_plan = recover_negative_axioms(real_task, opt_task,
                                                     axiom_plans, action_plan,
                                                     negative_from_name)
    function_plan = compute_function_plan(opt_evaluations, action_plan)

    full_preimage = plan_preimage(full_plan,
                                  [])  # Does not contain the stream preimage!
    negative_preimage = set(
        filter(lambda a: a.predicate in negative_from_name, full_preimage))
    negative_plan = convert_negative(negative_preimage, negative_from_name,
                                     full_preimage, real_states)
    function_plan.update(negative_plan)
    # TODO: OrderedDict for these plans

    # TODO: this assumes that actions do not negate preimage goals
    positive_preimage = {
        l
        for l in (set(full_preimage) - real_states[0] - negative_preimage)
        if not l.negated
    }
    steps_from_fact = {
        fact_from_fd(l): full_preimage[l]
        for l in positive_preimage
    }
    last_from_fact = {
        fact: min(steps)
        for fact, steps in steps_from_fact.items() if get_prefix(fact) != EQ
    }
    #stream_plan = reschedule_stream_plan(evaluations, target_facts, domain, stream_results)
    # visualize_constraints(map(fact_from_fd, target_facts))

    for result, step in function_plan.items():
        for fact in result.get_domain():
            last_from_fact[fact] = min(step, last_from_fact.get(fact, INF))

    # TODO: get_steps_from_stream
    stream_plan = []
    last_from_stream = dict(function_plan)
    for result in current_plan:  # + negative_plan?
        # TODO: actually compute when these are needed + dependencies
        last_from_stream[result] = 0
        if isinstance(result.external, Function) or (result.external
                                                     in negative):
            if len(action_plan) != replan_step:
                raise NotImplementedError(
                )  # TODO: deferring negated optimizers
            # Prevents these results from being pruned
            function_plan[result] = replan_step
        else:
            stream_plan.append(result)

    curr_evaluations = evaluations_from_stream_plan(evaluations,
                                                    stream_plan,
                                                    max_effort=None)
    extraction_facts = set(last_from_fact) - set(
        map(fact_from_evaluation, curr_evaluations))
    extract_stream_plan(node_from_atom, extraction_facts, stream_plan)

    # Recomputing due to postprocess_stream_plan
    stream_plan = postprocess_stream_plan(evaluations, domain, stream_plan,
                                          last_from_fact)
    node_from_atom = get_achieving_streams(evaluations,
                                           stream_plan,
                                           max_effort=None)
    fact_sequence = [set(result.get_domain())
                     for result in stream_plan] + [extraction_facts]
    for facts in reversed(fact_sequence):  # Bellman ford
        for fact in facts:  # could flatten instead
            result = node_from_atom[fact].result
            if result is None:
                continue
            step = last_from_fact[fact] if result.is_deferrable() else 0
            last_from_stream[result] = min(step,
                                           last_from_stream.get(result, INF))
            for domain_fact in result.instance.get_domain():
                last_from_fact[domain_fact] = min(
                    last_from_stream[result],
                    last_from_fact.get(domain_fact, INF))
    stream_plan.extend(function_plan)

    # Useful to recover the correct DAG
    partial_orders = set()
    for child in stream_plan:
        # TODO: account for fluent objects
        for fact in child.get_domain():
            parent = node_from_atom[fact].result
            if parent is not None:
                partial_orders.add((parent, child))
    #stream_plan = topological_sort(stream_plan, partial_orders)

    bound_objects = set()
    for result in stream_plan:
        if (last_from_stream[result]
                == 0) or not result.is_deferrable(bound_objects=bound_objects):
            for ancestor in get_ancestors(result, partial_orders) | {result}:
                # TODO: this might change descendants of ancestor. Perform in a while loop.
                last_from_stream[ancestor] = 0
                if isinstance(ancestor, StreamResult):
                    bound_objects.update(out for out in ancestor.output_objects
                                         if out.is_unique())

    #local_plan = [] # TODO: not sure what this was for
    #for fact, step in sorted(last_from_fact.items(), key=lambda pair: pair[1]): # Earliest to latest
    #    print(step, fact)
    #    extract_stream_plan(node_from_atom, [fact], local_plan, last_from_fact, last_from_stream)

    # Each stream has an earliest evaluation time
    # When computing the latest, use 0 if something isn't deferred
    # Evaluate each stream as soon as possible
    # Option to defer streams after a point in time?
    # TODO: action costs for streams that encode uncertainty
    state = set(real_task.init)
    remaining_results = list(stream_plan)
    first_from_stream = {}
    #assert 1 <= replan_step # Plan could be empty
    for step, instance in enumerate(action_plan):
        for result in list(remaining_results):
            # TODO: could do this more efficiently if need be
            domain = result.get_domain() + get_fluent_domain(result)
            if conditions_hold(state, map(fd_from_fact, domain)):
                remaining_results.remove(result)
                certified = {
                    fact
                    for fact in result.get_certified()
                    if get_prefix(fact) != EQ
                }
                state.update(map(fd_from_fact, certified))
                if step != 0:
                    first_from_stream[result] = step
        # TODO: assumes no fluent axiom domain conditions
        apply_action(state, instance)
    #assert not remaining_results # Not true if retrace
    if first_from_stream:
        replan_step = min(replan_step, *first_from_stream.values())

    eager_plan = []
    results_from_step = defaultdict(list)
    for result in stream_plan:
        earliest_step = first_from_stream.get(result, 0)
        latest_step = last_from_stream.get(result, 0)
        assert earliest_step <= latest_step
        defer = replan_step <= latest_step
        if not defer:
            eager_plan.append(result)
        # We only perform a deferred evaluation if it has all deferred dependencies
        # TODO: make a flag that also allows dependencies to be deferred
        future = (earliest_step != 0) or defer
        if future:
            future_step = latest_step if defer else earliest_step
            results_from_step[future_step].append(result)

    # TODO: some sort of obj side-effect bug that requires obj_from_pddl to be applied last (likely due to fluent streams)
    eager_plan = convert_fluent_streams(eager_plan, real_states, action_plan,
                                        steps_from_fact, node_from_atom)
    combined_plan = []
    for step, action in enumerate(action_plan):
        combined_plan.extend(result.get_action()
                             for result in results_from_step[step])
        combined_plan.append(
            transform_action_args(pddl_from_instance(action), obj_from_pddl))

    # TODO: the returned facts have the same side-effect bug as above
    # TODO: annotate when each preimage fact is used
    preimage_facts = {
        fact_from_fd(l)
        for l in full_preimage if (l.predicate != EQ) and not l.negated
    }
    for negative_result in negative_plan:  # TODO: function_plan
        preimage_facts.update(negative_result.get_certified())
    for result in eager_plan:
        preimage_facts.update(result.get_domain())
        # Might not be able to regenerate facts involving the outputs of streams
        preimage_facts.update(
            result.get_certified())  # Some facts might not be in the preimage
    # TODO: record streams and axioms
    return eager_plan, OptPlan(combined_plan, preimage_facts)
Esempio n. 7
0
def relaxed_stream_plan(evaluations,
                        goal_expression,
                        domain,
                        all_results,
                        negative,
                        unit_efforts,
                        effort_weight,
                        max_effort,
                        simultaneous=False,
                        reachieve=True,
                        unit_costs=False,
                        debug=False,
                        **kwargs):
    # TODO: alternatively could translate with stream actions on real opt_state and just discard them
    # TODO: only consider axioms that have stream conditions?
    applied_results, deferred_results = partition_results(
        evaluations,
        all_results,
        apply_now=lambda r: not (simultaneous or r.external.info.simultaneous))
    stream_domain, result_from_name = add_stream_actions(
        domain, deferred_results)
    opt_evaluations = apply_streams(evaluations,
                                    applied_results)  # if n.effort < INF

    if reachieve:
        achieved_results = {
            r
            for r in evaluations.values() if isinstance(r, Result)
        }
        init_evaluations = {
            e
            for e, r in evaluations.items() if r not in achieved_results
        }
        applied_results = achieved_results | set(applied_results)
        evaluations = init_evaluations  # For clarity
    # TODO: could iteratively increase max_effort
    node_from_atom = get_achieving_streams(evaluations,
                                           applied_results,
                                           unit_efforts=unit_efforts,
                                           max_effort=max_effort)
    if using_optimizers(all_results):
        goal_expression = add_unsatisfiable_to_goal(stream_domain,
                                                    goal_expression)
    problem = get_problem(opt_evaluations, goal_expression, stream_domain,
                          unit_costs)  # begin_metric

    with Verbose(debug):
        instantiated = instantiate_task(
            task_from_domain_problem(stream_domain, problem))
    if instantiated is None:
        return None, INF
    cost_from_action = {action: action.cost for action in instantiated.actions}
    if (effort_weight is not None) or using_optimizers(applied_results):
        add_stream_efforts(node_from_atom,
                           instantiated,
                           effort_weight,
                           unit_efforts=unit_efforts)
    add_optimizer_axioms(all_results, instantiated)
    action_from_name = rename_instantiated_actions(instantiated)
    with Verbose(debug):
        sas_task = sas_from_instantiated(instantiated)
        sas_task.metric = True

    # TODO: apply renaming to hierarchy as well
    # solve_from_task | serialized_solve_from_task | abstrips_solve_from_task | abstrips_solve_from_task_sequential
    action_plan, _ = solve_from_task(sas_task, debug=debug, **kwargs)
    if action_plan is None:
        return None, INF
    action_instances = [action_from_name[name] for name, _ in action_plan]
    cost = get_plan_cost(action_instances, cost_from_action, unit_costs)
    axiom_plans = recover_axioms_plans(instantiated, action_instances)

    applied_plan, function_plan = partition_external_plan(
        recover_stream_plan(evaluations, opt_evaluations, goal_expression,
                            stream_domain, node_from_atom, action_instances,
                            axiom_plans, negative, unit_costs))
    #action_plan = obj_from_pddl_plan(parse_action(instance.name) for instance in action_instances)
    action_plan = obj_from_pddl_plan(map(pddl_from_instance, action_instances))

    deferred_plan, action_plan = partition_plan(action_plan, result_from_name)
    stream_plan = applied_plan + deferred_plan + function_plan
    combined_plan = stream_plan + action_plan
    return combined_plan, cost
Esempio n. 8
0
def recover_stream_plan(evaluations, goal_expression, domain, stream_results, action_plan, negative, unit_costs):
    import pddl
    import instantiate
    # Universally quantified conditions are converted into negative axioms
    # Existentially quantified conditions are made additional preconditions
    # Universally quantified effects are instantiated by doing the cartesian produce of types (slow)
    # Added effects cancel out removed effects

    real_task = task_from_domain_problem(domain, get_problem(evaluations, goal_expression, domain, unit_costs))
    node_from_atom = get_achieving_streams(evaluations, stream_results)
    opt_evaluations = apply_streams(evaluations, stream_results)
    opt_task = task_from_domain_problem(domain, get_problem(opt_evaluations, goal_expression, domain, unit_costs))
    function_assignments = {fact.fluent: fact.expression for fact in opt_task.init  # init_facts
                            if isinstance(fact, pddl.f_expression.FunctionAssignment)}
    type_to_objects = instantiate.get_objects_by_type(opt_task.objects, opt_task.types)
    results_from_head = get_results_from_head(opt_evaluations)
    action_instances = instantiate_actions(opt_task, type_to_objects, function_assignments, action_plan)
    negative_from_name = get_negative_predicates(negative)
    axioms_from_name = get_derived_predicates(opt_task.axioms)

    opt_task.init = set(opt_task.init)
    real_states = [set(real_task.init)] # TODO: had old way of doing this (~July 2018)
    preimage_plan = []
    function_plan = set()
    for layer in action_instances:
        for pair, action_instance in layer:
            axiom_plan = extract_axiom_plan(opt_task, action_instance, negative_from_name,
                                            static_state=real_states[-1])
            if axiom_plan is None:
                continue
            simplify_conditional_effects(real_states[-1], opt_task.init, action_instance, axioms_from_name)
            preimage_plan.extend(axiom_plan + [action_instance])
            apply_action(opt_task.init, action_instance)
            real_states.append(set(real_states[-1]))
            apply_action(real_states[-1], action_instance)
            if not unit_costs and (pair is not None):
                function_result = extract_function_results(results_from_head, *pair)
                if function_result is not None:
                    function_plan.add(function_result)
            break
        else:
            raise RuntimeError('No action instances are applicable')

    # TODO: could instead just accumulate difference between real and opt
    full_preimage = plan_preimage(preimage_plan, [])
    stream_preimage = set(full_preimage) - real_states[0]
    negative_preimage = set(filter(lambda a: a.predicate in negative_from_name, stream_preimage))
    positive_preimage = stream_preimage - negative_preimage
    function_plan.update(convert_negative(negative_preimage, negative_from_name, full_preimage, real_states))

    step_from_fact = {fact_from_fd(l): full_preimage[l] for l in positive_preimage if not l.negated}
    target_facts = list(step_from_fact.keys())
    #stream_plan = reschedule_stream_plan(evaluations, target_facts, domain, stream_results)
    stream_plan = []
    extract_stream_plan(node_from_atom, target_facts, stream_plan)
    stream_plan = prune_stream_plan(evaluations, stream_plan, target_facts)
    stream_plan = convert_fluent_streams(stream_plan, real_states, step_from_fact, node_from_atom)
    # visualize_constraints(map(fact_from_fd, stream_preimage))

    if DO_RESCHEDULE: # TODO: detect this based on unique or not
        # TODO: maybe test if partial order between two ways of achieving facts, if not prune
        new_stream_plan = reschedule_stream_plan(evaluations, target_facts, domain, stream_plan)
        if new_stream_plan is not None:
            stream_plan = new_stream_plan
    return stream_plan + list(function_plan)
Esempio n. 9
0
def plan_streams(evaluations,
                 goal_expression,
                 domain,
                 all_results,
                 negative,
                 effort_weight,
                 max_effort,
                 simultaneous=False,
                 reachieve=True,
                 debug=False,
                 **kwargs):
    # TODO: alternatively could translate with stream actions on real opt_state and just discard them
    # TODO: only consider axioms that have stream conditions?
    #reachieve = reachieve and not using_optimizers(all_results)
    applied_results, deferred_results = partition_results(
        evaluations,
        all_results,
        apply_now=lambda r: not (simultaneous or r.external.info.simultaneous))
    stream_domain, deferred_from_name = add_stream_actions(
        domain, deferred_results)

    if reachieve and not using_optimizers(all_results):
        achieved_results = {
            n.result
            for n in evaluations.values() if isinstance(n.result, Result)
        }
        init_evaluations = {
            e
            for e, n in evaluations.items() if n.result not in achieved_results
        }
        applied_results = achieved_results | set(applied_results)
        evaluations = init_evaluations  # For clarity
    # TODO: could iteratively increase max_effort
    node_from_atom = get_achieving_streams(
        evaluations,
        applied_results,  # TODO: apply to all_results?
        max_effort=max_effort)
    opt_evaluations = {
        evaluation_from_fact(f): n.result
        for f, n in node_from_atom.items()
    }
    if using_optimizers(all_results):
        goal_expression = add_unsatisfiable_to_goal(stream_domain,
                                                    goal_expression)
    problem = get_problem(opt_evaluations, goal_expression,
                          stream_domain)  # begin_metric
    with Verbose(debug):
        instantiated = instantiate_task(
            task_from_domain_problem(stream_domain, problem))
    if instantiated is None:
        return None, INF

    if using_optimizers(all_results):
        # TODO: reachieve=False when using optimizers or should add applied facts
        instantiate_optimizer_axioms(instantiated, evaluations,
                                     goal_expression, domain, all_results)
    cost_from_action = {action: action.cost for action in instantiated.actions}
    add_stream_efforts(node_from_atom, instantiated, effort_weight)
    if using_optimizers(applied_results):
        add_optimizer_effects(instantiated, node_from_atom)
    action_from_name = rename_instantiated_actions(instantiated)
    with Verbose(debug):
        sas_task = sas_from_instantiated(instantiated)
        sas_task.metric = True

    # TODO: apply renaming to hierarchy as well
    # solve_from_task | serialized_solve_from_task | abstrips_solve_from_task | abstrips_solve_from_task_sequential
    action_plan, raw_cost = solve_from_task(sas_task, debug=debug, **kwargs)
    #print(raw_cost)
    if action_plan is None:
        return None, INF
    action_instances = [action_from_name[name] for name, _ in action_plan]
    simplify_conditional_effects(instantiated.task, action_instances)
    stream_plan, action_instances = recover_simultaneous(
        applied_results, negative, deferred_from_name, action_instances)
    cost = get_plan_cost(action_instances, cost_from_action)
    axiom_plans = recover_axioms_plans(instantiated, action_instances)

    stream_plan = recover_stream_plan(evaluations, stream_plan,
                                      opt_evaluations, goal_expression,
                                      stream_domain, node_from_atom,
                                      action_instances, axiom_plans, negative)
    #action_plan = obj_from_pddl_plan(parse_action(instance.name) for instance in action_instances)
    action_plan = obj_from_pddl_plan(map(pddl_from_instance, action_instances))

    combined_plan = stream_plan + action_plan
    return combined_plan, cost