コード例 #1
0
def replan_with_optimizers(evaluations, external_plan, domain, optimizers):
    # TODO: return multiple plans?
    # TODO: can instead have multiple goal binding combinations
    # TODO: can replan using samplers as well
    if not is_plan(external_plan):
        return None
    optimizers = list(
        filter(lambda s: isinstance(s, ComponentStream), optimizers))
    if not optimizers:
        return None
    stream_plan, function_plan = partition_external_plan(external_plan)
    free_parameters = {o for r in stream_plan for o in r.output_objects}
    #free_parameters = {o for r in stream_plan for o in r.output_objects if isinstance(o, OptimisticObject)}
    initial_evaluations = {
        e: n
        for e, n in evaluations.items() if n.result == INIT_EVALUATION
    }
    #initial_evaluations = evaluations
    goal_facts = set()
    for result in stream_plan:
        goal_facts.update(
            filter(
                lambda f: evaluation_from_fact(f) not in initial_evaluations,
                result.get_certified()))

    visited_facts = set()
    new_results = []
    for fact in goal_facts:
        retrace_instantiation(fact, optimizers, initial_evaluations,
                              free_parameters, visited_facts, new_results)
    # TODO: ensure correct ordering
    new_results = list(
        filter(lambda r: isinstance(r, ComponentStream), new_results))

    #from pddlstream.algorithms.scheduling.recover_streams import get_achieving_streams, extract_stream_plan
    #node_from_atom = get_achieving_streams(evaluations, stream_results) # TODO: make these lower effort
    #extract_stream_plan(node_from_atom, target_facts, stream_plan)

    optimizer_results = []
    for optimizer in {get_optimizer(r)
                      for r in new_results
                      }:  # None is like a unique optimizer:
        relevant_results = [
            r for r in new_results if get_optimizer(r) == optimizer
        ]
        optimizer_results.extend(
            combine_optimizer_plan(relevant_results, function_plan))
    #print(str_from_object(set(map(fact_from_evaluation, evaluations))))
    #print(str_from_object(set(goal_facts)))

    # TODO: can do the flexibly sized optimizers search
    from pddlstream.algorithms.scheduling.postprocess import reschedule_stream_plan
    optimizer_plan = reschedule_stream_plan(initial_evaluations,
                                            goal_facts,
                                            copy.copy(domain),
                                            (stream_plan + optimizer_results),
                                            unique_binding=True)
    if not is_plan(optimizer_plan):
        return None
    return optimizer_plan + function_plan
コード例 #2
0
def replan_with_optimizers(evaluations, external_plan, domain, externals):
    # TODO: return multiple plans?
    # TODO: can instead have multiple goal binding combinations
    # TODO: can replan using samplers as well
    if not is_plan(external_plan):
        return external_plan
    optimizer_streams = list(
        filter(lambda s: type(s) in [VariableStream, ConstraintStream],
               externals))
    if not optimizer_streams:
        return external_plan
    stream_plan, function_plan = partition_external_plan(external_plan)
    goal_facts = set()
    for result in stream_plan:
        goal_facts.update(
            filter(lambda f: evaluation_from_fact(f) not in evaluations,
                   result.get_certified()))

    visited_facts = set()
    new_results = []
    for fact in goal_facts:
        retrace_instantiation(fact, optimizer_streams, evaluations,
                              visited_facts, new_results)
    variable_results = filter(lambda r: isinstance(r.external, VariableStream),
                              new_results)
    constraint_results = filter(
        lambda r: isinstance(r.external, ConstraintStream), new_results)
    new_results = variable_results + constraint_results  # TODO: ensure correct ordering

    #from pddlstream.algorithms.scheduling.recover_streams import get_achieving_streams, extract_stream_plan
    #node_from_atom = get_achieving_streams(evaluations, stream_results) # TODO: make these lower effort
    #extract_stream_plan(node_from_atom, target_facts, stream_plan)

    optimizer_results = []
    for optimizer in {get_optimizer(r)
                      for r in new_results
                      }:  # None is like a unique optimizer:
        relevant_results = [
            r for r in new_results if get_optimizer(r) == optimizer
        ]
        optimizer_results.extend(
            combine_optimizer_plan(relevant_results, function_plan))
    #print(str_from_object(set(map(fact_from_evaluation, evaluations))))
    #print(str_from_object(set(goal_facts)))

    # TODO: can do the flexibly sized optimizers search
    from pddlstream.algorithms.scheduling.postprocess import reschedule_stream_plan
    combined_plan = reschedule_stream_plan(evaluations,
                                           goal_facts,
                                           copy.copy(domain),
                                           (stream_plan + optimizer_results),
                                           unique_binding=True,
                                           unit_efforts=True)
    if not is_plan(combined_plan):
        return external_plan
    return combined_plan + function_plan
コード例 #3
0
ファイル: satisfaction.py プロジェクト: OolongQian/Robotics
def constraint_satisfaction(stream_pddl, stream_map, init, terms, stream_info={},
                            costs=True, max_cost=INF, success_cost=INF, max_time=INF,
                            unit_efforts=False, max_effort=INF,
                            max_skeletons=INF, search_sample_ratio=1, verbose=True, **search_args):
    # Approaches
    # 1) Existential quantification of bindings in goal conditions
    # 2) Backtrack useful streams and then schedule. Create arbitrary outputs for not mentioned.
    # 3) Construct all useful streams and then associate outputs with bindings
    #    Useful stream must satisfy at least one fact. How should these assignments be propagated though?
    #    Make an action that maps each stream result to unbound values?
    # TODO: include functions again for cost-sensitive satisfaction
    # TODO: convert init into streams to bind certain facts
    # TODO: investigate constraint satisfaction techniques for binding instead
    # TODO: could also instantiate all possible free parameters even if not useful
    # TODO: effort that is a function of the number of output parameters (degrees of freedom)
    # TODO: use a CSP solver instead of a planner internally
    # TODO: max_iterations?
    if not terms:
        return {}, 0, init
    constraints, negated, functions = partition_facts(set(map(obj_from_existential_expression, terms)))
    if not costs:
        functions = []
    evaluations = evaluations_from_init(init)
    goal_facts = set(filter(lambda f: evaluation_from_fact(f) not in evaluations, constraints))
    free_parameters = sorted(get_parameters(goal_facts))
    print('Parameters:', free_parameters)

    externals = parse_stream_pddl(stream_pddl, stream_map, stream_info, unit_efforts=unit_efforts)
    stream_results = extract_streams(evaluations, externals, goal_facts)
    function_plan = plan_functions(negated + functions, externals)
    plan_skeleton = [Assignment(free_parameters)]
    cost = get_optimistic_cost(function_plan)
    if max_cost < cost:
        return None, INF, init
    # TODO: detect connected components
    # TODO: eagerly evaluate fully bound constraints

    # TODO: consider other results if this fails
    domain = create_domain(goal_facts)
    init_evaluations = evaluations.copy()
    store = SolutionStore(evaluations, max_time=max_time, success_cost=success_cost, verbose=verbose)
    queue = SkeletonQueue(store, domain, disable=False)
    num_iterations = search_time = sample_time = 0
    planner = 'ff-astar'  # TODO: toggle within reschedule_stream_plan
    #last_clusters = set()
    #last_success = True
    while not store.is_terminated():
        num_iterations += 1
        start_time = time.time()
        print('\nIteration: {} | Skeletons: {} | Skeleton Queue: {} | Evaluations: {} | '
              'Cost: {:.3f} | Search Time: {:.3f} | Sample Time: {:.3f} | Total Time: {:.3f}'.format(
            num_iterations, len(queue.skeletons), len(queue),
            len(evaluations), store.best_cost, search_time, sample_time, store.elapsed_time()))
        external_plan = None
        if len(queue.skeletons) < max_skeletons:
            domain.axioms[:] = create_disabled_axioms(queue, use_parameters=False)
            #dominated = are_domainated(last_clusters, clusters)
            #last_clusters = clusters
            #if last_success or not dominated: # Could also keep a history of results
            stream_plan = reschedule_stream_plan(init_evaluations, goal_facts, domain, stream_results,
                                                 unique_binding=True, unsatisfiable=True,
                                                 max_effort=max_effort, planner=planner, **search_args)
            if stream_plan is not None:
                external_plan = reorder_stream_plan(combine_optimizers(
                    init_evaluations, stream_plan + list(function_plan)))
        print('Stream plan ({}, {:.3f}): {}'.format(
            get_length(external_plan), compute_plan_effort(external_plan), external_plan))
        last_success = (external_plan is not None)
        search_time += elapsed_time(start_time)

        # Once a constraint added for a skeleton, it should only be relaxed
        start_time = time.time()
        if last_success: # Only works if create_disable_axioms never changes
            allocated_sample_time = (search_sample_ratio * search_time) - sample_time
        else:
            allocated_sample_time = INF
        queue.process(external_plan, plan_skeleton, cost=cost,
                      complexity_limit=INF,  max_time=allocated_sample_time)
        sample_time += elapsed_time(start_time)
        if not last_success and not queue:
            break
        # TODO: exhaustively compute all plan skeletons and add to queue within the focused algorithm

    write_stream_statistics(externals, verbose)
    action_plan, cost, facts = revert_solution(store.best_plan, store.best_cost, evaluations)
    bindings = bindings_from_plan(plan_skeleton, action_plan)
    return bindings, cost, facts
コード例 #4
0
def recover_stream_plan(evaluations, goal_expression, domain, stream_results, action_plan, negative, unit_costs):
    import pddl
    import instantiate
    # Universally quantified conditions are converted into negative axioms
    # Existentially quantified conditions are made additional preconditions
    # Universally quantified effects are instantiated by doing the cartesian produce of types (slow)
    # Added effects cancel out removed effects

    real_task = task_from_domain_problem(domain, get_problem(evaluations, goal_expression, domain, unit_costs))
    node_from_atom = get_achieving_streams(evaluations, stream_results)
    opt_evaluations = apply_streams(evaluations, stream_results)
    opt_task = task_from_domain_problem(domain, get_problem(opt_evaluations, goal_expression, domain, unit_costs))
    function_assignments = {fact.fluent: fact.expression for fact in opt_task.init  # init_facts
                            if isinstance(fact, pddl.f_expression.FunctionAssignment)}
    type_to_objects = instantiate.get_objects_by_type(opt_task.objects, opt_task.types)
    results_from_head = get_results_from_head(opt_evaluations)
    action_instances = instantiate_actions(opt_task, type_to_objects, function_assignments, action_plan)
    negative_from_name = get_negative_predicates(negative)
    axioms_from_name = get_derived_predicates(opt_task.axioms)

    opt_task.init = set(opt_task.init)
    real_states = [set(real_task.init)] # TODO: had old way of doing this (~July 2018)
    preimage_plan = []
    function_plan = set()
    for layer in action_instances:
        for pair, action_instance in layer:
            axiom_plan = extract_axiom_plan(opt_task, action_instance, negative_from_name,
                                            static_state=real_states[-1])
            if axiom_plan is None:
                continue
            simplify_conditional_effects(real_states[-1], opt_task.init, action_instance, axioms_from_name)
            preimage_plan.extend(axiom_plan + [action_instance])
            apply_action(opt_task.init, action_instance)
            real_states.append(set(real_states[-1]))
            apply_action(real_states[-1], action_instance)
            if not unit_costs and (pair is not None):
                function_result = extract_function_results(results_from_head, *pair)
                if function_result is not None:
                    function_plan.add(function_result)
            break
        else:
            raise RuntimeError('No action instances are applicable')

    # TODO: could instead just accumulate difference between real and opt
    full_preimage = plan_preimage(preimage_plan, [])
    stream_preimage = set(full_preimage) - real_states[0]
    negative_preimage = set(filter(lambda a: a.predicate in negative_from_name, stream_preimage))
    positive_preimage = stream_preimage - negative_preimage
    function_plan.update(convert_negative(negative_preimage, negative_from_name, full_preimage, real_states))

    step_from_fact = {fact_from_fd(l): full_preimage[l] for l in positive_preimage if not l.negated}
    target_facts = list(step_from_fact.keys())
    #stream_plan = reschedule_stream_plan(evaluations, target_facts, domain, stream_results)
    stream_plan = []
    extract_stream_plan(node_from_atom, target_facts, stream_plan)
    stream_plan = prune_stream_plan(evaluations, stream_plan, target_facts)
    stream_plan = convert_fluent_streams(stream_plan, real_states, step_from_fact, node_from_atom)
    # visualize_constraints(map(fact_from_fd, stream_preimage))

    if DO_RESCHEDULE: # TODO: detect this based on unique or not
        # TODO: maybe test if partial order between two ways of achieving facts, if not prune
        new_stream_plan = reschedule_stream_plan(evaluations, target_facts, domain, stream_plan)
        if new_stream_plan is not None:
            stream_plan = new_stream_plan
    return stream_plan + list(function_plan)