Example #1
0
def locally_optimize(evaluations, store, goal_expression, domain, functions, negative,
                     dynamic_streams, visualize, sampling_time=0):
    action_plan = store.best_plan
    if action_plan is None:
        return None
    print('\nPostprocessing | Cost: {} | Total Time: {:.3f}'.format(store.best_cost, store.elapsed_time()))
    # TODO: postprocess current skeletons as well

    task = task_from_domain_problem(domain, get_problem(evaluations, goal_expression, domain, unit_costs=False))
    opt_stream_plan, opt_from_obj = recover_opt_stream_plan(evaluations, action_plan, task)
    opt_stream_plan += optimistic_process_streams(evaluations_from_stream_plan(evaluations, opt_stream_plan), functions)
    opt_action_plan = [(name, tuple(opt_from_obj.get(o, o) for o in args)) for name, args in action_plan]
    pddl_plan = [(name, tuple(map(pddl_from_object, args))) for name, args in opt_action_plan]
    stream_plan = recover_stream_plan(evaluations, goal_expression, domain,
                                      opt_stream_plan, pddl_plan, negative, unit_costs=False)
    stream_plan = get_synthetic_stream_plan(reorder_stream_plan(stream_plan), dynamic_streams)


    # TODO: need to make this just streams
    opt_evaluations = apply_streams(evaluations, stream_plan)
    opt_cost = get_plan_cost(opt_evaluations, opt_action_plan, domain, unit_costs=False)
    dump_plans(stream_plan, opt_action_plan, opt_cost)
    if visualize:
        log_plans(stream_plan, action_plan, None)
        create_visualizations(evaluations, stream_plan, None)

    store.start_time = time.time()
    store.max_cost = store.best_cost
    queue = SkeletonQueue(store, evaluations, goal_expression, domain)
    queue.new_skeleton(stream_plan, opt_action_plan, opt_cost)
    queue.greedily_process()
    queue.timed_process(sampling_time)
Example #2
0
def locally_optimize(evaluations, store, goal_expression, domain, functions,
                     negative, dynamic_streams):
    action_plan = store.best_plan
    if action_plan is None:
        return None
    print('\nPostprocessing')  # TODO: postprocess current skeleton as well

    task = task_from_domain_problem(
        domain,
        get_problem(evaluations, goal_expression, domain, unit_costs=False))
    plan_instances = get_action_instances(
        task, action_plan) + [get_goal_instance(task.goal)]
    replace_derived(task, set(), plan_instances)
    preimage = filter(lambda a: not a.negated,
                      plan_preimage(plan_instances, []))

    opt_stream_plan = []
    opt_from_obj = {}
    for stream_result in extract_order(evaluations, preimage):
        input_objects = tuple(
            opt_from_obj.get(o, o)
            for o in stream_result.instance.input_objects)
        instance = stream_result.instance.external.get_instance(input_objects)
        #assert(not instance.disabled)
        instance.disabled = False
        opt_results = instance.next_optimistic()
        if not opt_results:
            continue
        opt_result = opt_results[0]
        opt_stream_plan.append(opt_result)
        for obj, opt in zip(stream_result.output_objects,
                            opt_result.output_objects):
            opt_from_obj[obj] = opt

    opt_stream_plan += optimistic_process_streams(
        evaluations_from_stream_plan(evaluations, opt_stream_plan), functions)
    opt_action_plan = [(name, tuple(opt_from_obj.get(o, o) for o in args))
                       for name, args in action_plan]
    pddl_plan = [(name, map(pddl_from_object, args))
                 for name, args in opt_action_plan]
    stream_plan = recover_stream_plan(evaluations,
                                      goal_expression,
                                      domain,
                                      opt_stream_plan,
                                      pddl_plan,
                                      negative,
                                      unit_costs=False)

    stream_plan = reorder_stream_plan(stream_plan)
    stream_plan = get_synthetic_stream_plan(stream_plan, dynamic_streams)
    print('Stream plan: {}\n'
          'Action plan: {}'.format(stream_plan, opt_action_plan))
    opt_cost = 0  # TODO: compute this cost for real

    store.start_time = time.time()
    store.max_cost = store.best_cost
    #sampling_time = 10
    sampling_time = 0
    queue = SkeletonQueue(evaluations, store)
    queue.new_skeleton(stream_plan, opt_action_plan, opt_cost)
    queue.greedily_process(sampling_time)
Example #3
0
def solve_focused(problem,
                  stream_info={},
                  action_info={},
                  synthesizers=[],
                  max_time=INF,
                  max_cost=INF,
                  unit_costs=None,
                  effort_weight=None,
                  eager_layers=1,
                  visualize=False,
                  verbose=True,
                  postprocess=False,
                  **search_kwargs):
    """
    Solves a PDDLStream problem by first hypothesizing stream outputs and then determining whether they exist
    :param problem: a PDDLStream problem
    :param action_info: a dictionary from stream name to ActionInfo for planning and execution
    :param stream_info: a dictionary from stream name to StreamInfo altering how individual streams are handled
    :param max_time: the maximum amount of time to apply streams
    :param max_cost: a strict upper bound on plan cost
    :param effort_weight: a multiplier for stream effort compared to action costs
    :param eager_layers: the number of eager stream application layers per iteration
    :param visualize: if True, it draws the constraint network and stream plan as a graphviz file
    :param verbose: if True, this prints the result of each stream application
    :param search_kwargs: keyword args for the search subroutine
    :return: a tuple (plan, cost, evaluations) where plan is a sequence of actions
        (or None), cost is the cost of the plan, and evaluations is init but expanded
        using stream applications
    """
    # TODO: return to just using the highest level samplers at the start
    search_sampling_ratio = 1
    solve_stream_plan_fn = relaxed_stream_plan if effort_weight is None else simultaneous_stream_plan
    # TODO: warning check if using simultaneous_stream_plan or sequential_stream_plan with non-eager functions
    num_iterations = 0
    search_time = sample_time = 0
    store = SolutionStore(max_time, max_cost,
                          verbose)  # TODO: include other info here?
    evaluations, goal_expression, domain, stream_name, externals = parse_problem(
        problem, stream_info)
    compile_to_exogenous(evaluations, domain, externals)
    if unit_costs is None:
        unit_costs = not has_costs(domain)
    full_action_info = get_action_info(action_info)
    load_stream_statistics(stream_name, externals + synthesizers)
    if visualize:
        clear_visualizations()
    # TODO: somehow Functions became no longer eager?
    eager_externals = list(
        filter(lambda e: e.info.eager or type(e) == Function, externals))
    streams, functions, negative = partition_externals(externals)
    queue = SkeletonQueue(store, evaluations)
    # TODO: decide max_sampling_time based on total search_time or likelihood estimates
    # TODO: switch to searching if believe chance of search better than sampling
    while not store.is_terminated():
        num_iterations += 1
        print(
            '\nIteration: {} | Queue: {} | Evaluations: {} | Cost: {} '
            '| Search Time: {:.3f} | Sample Time: {:.3f} | Total Time: {:.3f}'.
            format(num_iterations, len(queue), len(evaluations),
                   store.best_cost, search_time, sample_time,
                   store.elapsed_time()))

        start_time = time.time()
        layered_process_stream_queue(
            Instantiator(evaluations, eager_externals), evaluations, store,
            eager_layers)
        solve_stream_plan = lambda sr: solve_stream_plan_fn(
            evaluations,
            goal_expression,
            domain,
            sr,
            negative,
            max_cost=store.best_cost,
            #max_cost=min(store.best_cost, max_cost),
            unit_costs=unit_costs,
            **search_kwargs)
        #combined_plan, cost = solve_stream_plan(populate_results(evaluations, streams + functions))
        combined_plan, cost = iterative_solve_stream_plan(
            evaluations, streams, functions, solve_stream_plan)
        if action_info:
            combined_plan = reorder_combined_plan(evaluations, combined_plan,
                                                  full_action_info, domain)
            print('Combined plan: {}'.format(combined_plan))
        stream_plan, action_plan = separate_plan(combined_plan,
                                                 full_action_info)
        stream_plan = reorder_stream_plan(
            stream_plan)  # TODO: is this strictly redundant?
        stream_plan = get_synthetic_stream_plan(stream_plan, synthesizers)
        print('Stream plan: {}\n'
              'Action plan: {}'.format(stream_plan, action_plan))
        search_time += elapsed_time(start_time)

        start_time = time.time()
        if stream_plan is None:
            if not queue:
                break
            queue.process_until_success()
            #queue.fairly_process()
        else:
            if visualize:
                create_visualizations(evaluations, stream_plan, num_iterations)
            queue.new_skeleton(stream_plan, action_plan, cost)
            queue.greedily_process()
        sample_time += elapsed_time(start_time)

        start_time = time.time()
        queue.timed_process(search_sampling_ratio * search_time - sample_time)
        sample_time += elapsed_time(start_time)

    if postprocess and (not unit_costs):
        locally_optimize(evaluations, store, goal_expression, domain,
                         functions, negative, synthesizers)
    write_stream_statistics(stream_name, externals + synthesizers, verbose)
    return revert_solution(store.best_plan, store.best_cost, evaluations)