Ejemplo n.º 1
0
def get_achieving_streams(evaluations, stream_results, op=sum):
    # TODO: could do this with bound_stream_instances instead
    unprocessed_from_atom = defaultdict(list)
    none = (None,) # None
    node_from_atom = {none: Node(0, None)}
    conditions_from_stream = {}
    remaining_from_stream = {}
    for stream_result in stream_results:
        conditions_from_stream[stream_result] = stream_result.instance.get_domain() + (none,)
        remaining_from_stream[stream_result] = len(conditions_from_stream[stream_result])
        for atom in conditions_from_stream[stream_result]:
            unprocessed_from_atom[atom].append(stream_result)
    for atom in evaluations:
        if is_atom(atom):
            node_from_atom[fact_from_evaluation(atom)] = Node(0, None)

    queue = [HeapElement(node.effort, atom) for atom, node in node_from_atom.items()]
    while queue:
        atom = heappop(queue).value
        if atom not in unprocessed_from_atom:
            continue
        for stream_result in unprocessed_from_atom[atom]:
            remaining_from_stream[stream_result] -= 1
            if remaining_from_stream[stream_result]:
                continue
            effort = 1
            total_effort = op(node_from_atom[cond].effort for cond in conditions_from_stream[stream_result]) + effort
            for new_atom in stream_result.get_certified():
                if (new_atom not in node_from_atom) or (total_effort < node_from_atom[new_atom].effort):
                    node_from_atom[new_atom] = Node(total_effort, stream_result)
                    heappush(queue, HeapElement(total_effort, new_atom))
        del unprocessed_from_atom[atom]
    del node_from_atom[none]
    return node_from_atom
Ejemplo n.º 2
0
def proccess_stream_plan(key, sampling_problem, queue, evaluations, store):
    instance, num_processed, bindings, stream_plan, action_plan, cost = sampling_problem
    if not stream_plan:
        store.add_plan(bind_plan(bindings, action_plan), cost)
        return
    if store.best_cost <= cost:
        instance.disabled = False  # TODO: only disable if not used elsewhere
        # TODO: could just hash instances
        return
    opt_result = stream_plan[
        0]  # TODO: could do several at once but no real point
    assert (not any(
        evaluation_from_fact(f) not in evaluations
        for f in instance.get_domain()))
    # TODO: hash combinations to prevent repeats

    results = []
    for i in range(num_processed, len(instance.results_history)):
        results.extend(instance.results_history[i])
    if not results and not instance.enumerated:
        #print(key.attempts, key.length)
        results = instance.next_results(verbose=store.verbose)
    for result in results:
        add_certified(evaluations, result)
        if (type(result) is PredicateResult) and (opt_result.value !=
                                                  result.value):
            continue  # TODO: check if satisfies target certified
        new_bindings = bindings.copy()
        if isinstance(result, StreamResult):
            for opt, obj in zip(opt_result.output_objects,
                                result.output_objects):
                assert (opt not in new_bindings
                        )  # TODO: return failure if conflicting bindings
                new_bindings[opt] = obj
        new_stream_plan = stream_plan[1:]
        new_cost = cost
        if type(result) is FunctionResult:
            new_cost += (result.value - opt_result.value)
        new_key = SkeletonKey(0, len(new_stream_plan))
        new_skeleton = Skeleton(
            instantiate_first(new_bindings, new_stream_plan), 0, new_bindings,
            new_stream_plan, action_plan, new_cost)
        heappush(queue, HeapElement(new_key, new_skeleton))

    if (key.attempts == 0) and isinstance(
            opt_result, SynthStreamResult):  # TODO: only add if failure?
        new_stream_plan = opt_result.decompose() + stream_plan[1:]
        new_key = SkeletonKey(0, len(new_stream_plan))
        new_skeleton = Skeleton(instantiate_first(bindings,
                                                  new_stream_plan), 0,
                                bindings, new_stream_plan, action_plan, cost)
        heappush(queue, HeapElement(new_key, new_skeleton))
    if not instance.enumerated:
        new_key = SkeletonKey(key.attempts + 1, len(
            stream_plan))  # TODO: compute expected sampling effort required
        new_skeleton = Skeleton(instance, len(instance.results_history),
                                bindings, stream_plan, action_plan, cost)
        heappush(queue, HeapElement(new_key, new_skeleton))
Ejemplo n.º 3
0
 def push_instance(self, instance):
     # TODO: flush stale priorities?
     complexity = self.compute_complexity(instance)
     priority = Priority(complexity, self.num_pushes)
     queue = self.function_queue if isinstance(
         instance, FunctionInstance) else self.stream_queue
     if isinstance(queue, deque):
         queue.append(HeapElement(priority, instance))
     else:
         heappush(queue, HeapElement(priority, instance))
     self.num_pushes += 1
Ejemplo n.º 4
0
def topological_sort(vertices, orders, priority_fn=lambda v: 0):
    # Can also do a DFS version
    incoming_edges, outgoing_edges = neighbors_from_orders(orders)
    ordering = []
    queue = []
    for v in vertices:
        if not incoming_edges[v]:
            heappush(queue, HeapElement(priority_fn(v), v))
    while queue:
        v1 = heappop(queue).value
        ordering.append(v1)
        for v2 in outgoing_edges[v1]:
            incoming_edges[v2].remove(v1)
            if not incoming_edges[v2]:
                heappush(queue, HeapElement(priority_fn(v2), v2))
    return ordering
Ejemplo n.º 5
0
 def push_instance(self, instance):
     # TODO: flush stale priorities?
     complexity = self.compute_complexity(instance)
     priority = Priority(complexity, self.num_pushes)
     heappush(self.queue, HeapElement(priority, instance))
     self.num_pushes += 1
     if self.verbose:
         print(self.num_pushes, instance)
Ejemplo n.º 6
0
 def add_skeleton(self, stream_plan, plan_attempts, bindings, plan_index,
                  cost):
     stream_plan = [result.remap_inputs(bindings) for result in stream_plan]
     attempted = sum(plan_attempts) != 0  # Bias towards unused
     effort = compute_effort(plan_attempts)
     key = SkeletonKey(attempted, effort)
     skeleton = Skeleton(stream_plan, plan_attempts, bindings, plan_index,
                         cost)
     heappush(self.queue, HeapElement(key, skeleton))
Ejemplo n.º 7
0
def get_achieving_streams(evaluations,
                          stream_results,
                          max_effort=INF,
                          **effort_args):
    unprocessed_from_atom = defaultdict(list)
    node_from_atom = {NULL_COND: Node(0, None)}
    conditions_from_stream = {}
    remaining_from_stream = {}
    for result in stream_results:
        conditions_from_stream[result] = result.instance.get_domain() + (
            NULL_COND, )
        remaining_from_stream[result] = len(conditions_from_stream[result])
        for atom in conditions_from_stream[result]:
            unprocessed_from_atom[atom].append(result)
    for atom in evaluations:
        if not is_negated_atom(atom):
            node_from_atom[fact_from_evaluation(atom)] = Node(0, None)

    queue = [
        HeapElement(node.effort, atom)
        for atom, node in node_from_atom.items()
    ]
    while queue:
        atom = heappop(queue).value
        if atom not in unprocessed_from_atom:
            continue
        for result in unprocessed_from_atom[atom]:
            remaining_from_stream[result] -= 1
            if remaining_from_stream[result]:
                continue
            effort = result.get_effort(**effort_args)
            total_effort = effort + EFFORT_OP(
                node_from_atom[cond].effort
                for cond in conditions_from_stream[result])
            if max_effort <= total_effort:
                continue
            for new_atom in result.get_certified():
                if (new_atom not in node_from_atom) or (
                        total_effort < node_from_atom[new_atom].effort):
                    node_from_atom[new_atom] = Node(total_effort, result)
                    heappush(queue, HeapElement(total_effort, new_atom))
        del unprocessed_from_atom[atom]
    del node_from_atom[NULL_COND]
    return node_from_atom
Ejemplo n.º 8
0
def get_achieving_streams(evaluations,
                          stream_results,
                          unit_efforts=False):  #, max_effort=INF):
    # TODO: could do this with bound_stream_instances instead
    unprocessed_from_atom = defaultdict(list)
    node_from_atom = {NULL_COND: Node(0, None)}
    conditions_from_stream = {}
    remaining_from_stream = {}
    for result in stream_results:
        conditions_from_stream[result] = result.instance.get_domain() + (
            NULL_COND, )
        remaining_from_stream[result] = len(conditions_from_stream[result])
        for atom in conditions_from_stream[result]:
            unprocessed_from_atom[atom].append(result)
    for atom in evaluations:
        if is_atom(atom):
            node_from_atom[fact_from_evaluation(atom)] = Node(0, None)

    queue = [
        HeapElement(node.effort, atom)
        for atom, node in node_from_atom.items()
    ]
    while queue:
        atom = heappop(queue).value
        if atom not in unprocessed_from_atom:
            continue
        for result in unprocessed_from_atom[atom]:
            remaining_from_stream[result] -= 1
            if remaining_from_stream[result]:
                continue
            effort = get_instance_effort(result.instance, unit_efforts)
            total_effort = effort + COMBINE_OP(
                node_from_atom[cond].effort
                for cond in conditions_from_stream[result])
            #if max_effort <= total_effort:
            #    continue
            for new_atom in result.get_certified():
                if (new_atom not in node_from_atom) or (
                        total_effort < node_from_atom[new_atom].effort):
                    node_from_atom[new_atom] = Node(total_effort, result)
                    heappush(queue, HeapElement(total_effort, new_atom))
        del unprocessed_from_atom[atom]
    del node_from_atom[NULL_COND]
    return node_from_atom
Ejemplo n.º 9
0
 def add_skeleton(self, stream_plan, plan_attempts, bindings, plan_index,
                  cost):
     stream_plan = instantiate_plan(bindings, stream_plan)
     #score = score_stream_plan(stream_plan)
     attempted = sum(plan_attempts) != 0
     effort = compute_effort(plan_attempts)
     #effort = compute_score(plan_attempts)
     #effort = compute_score2(plan_attempts)
     key = SkeletonKey(attempted, effort)
     skeleton = Skeleton(stream_plan, plan_attempts, bindings, plan_index,
                         cost)
     heappush(self.queue, HeapElement(key, skeleton))
Ejemplo n.º 10
0
 def get_element(self):
     # TODO: instead of remaining, use the index in the queue to reprocess earlier ones
     # TODO: include the complexity here as well
     remaining = len(self.skeleton.stream_plan) - self.index
     priority = Priority(self.attempts, remaining, self.cost)
     return HeapElement(priority, self)
Ejemplo n.º 11
0
 def get_element(self):
     return HeapElement(self.get_priority(), self)
Ejemplo n.º 12
0
def solve_focused(problem,
                  stream_info={},
                  action_info={},
                  synthesizers=[],
                  max_time=INF,
                  max_cost=INF,
                  unit_costs=None,
                  sampling_time=0,
                  effort_weight=None,
                  eager_layers=1,
                  visualize=False,
                  verbose=True,
                  postprocess=False,
                  **search_kwargs):
    """
    Solves a PDDLStream problem by first hypothesizing stream outputs and then determining whether they exist
    :param problem: a PDDLStream problem
    :param action_info: a dictionary from stream name to ActionInfo for planning and execution
    :param stream_info: a dictionary from stream name to StreamInfo altering how individual streams are handled
    :param max_time: the maximum amount of time to apply streams
    :param max_cost: a strict upper bound on plan cost
    :param effort_weight: a multiplier for stream effort compared to action costs
    :param eager_layers: the number of eager stream application layers per iteration
    :param visualize: if True, it draws the constraint network and stream plan as a graphviz file
    :param verbose: if True, this prints the result of each stream application
    :param search_kwargs: keyword args for the search subroutine
    :return: a tuple (plan, cost, evaluations) where plan is a sequence of actions
        (or None), cost is the cost of the plan, and evaluations is init but expanded
        using stream applications
    """
    # TODO: return to just using the highest level samplers at the start
    solve_stream_plan_fn = relaxed_stream_plan if effort_weight is None else simultaneous_stream_plan
    # TODO: warning check if using simultaneous_stream_plan or sequential_stream_plan with non-eager functions
    num_iterations = 0
    store = SolutionStore(max_time, max_cost,
                          verbose)  # TODO: include other info here?
    evaluations, goal_expression, domain, stream_name, externals = parse_problem(
        problem, stream_info)
    compile_to_exogenous(evaluations, domain, externals)
    if unit_costs is None:
        unit_costs = not has_costs(domain)
    full_action_info = get_action_info(action_info)
    load_stream_statistics(stream_name, externals + synthesizers)
    if visualize:
        clear_visualizations()
    eager_externals = list(filter(lambda e: e.info.eager, externals))
    streams, functions, negative = partition_externals(externals)
    queue = []
    # TODO: switch to searching if believe chance of search better than sampling
    while not store.is_terminated():
        num_iterations += 1
        # TODO: decide max_sampling_time based on total search_time or likelihood estimates
        print(
            '\nIteration: {} | Queue: {} | Evaluations: {} | Cost: {} | Time: {:.3f}'
            .format(num_iterations, len(queue), len(evaluations),
                    store.best_cost, store.elapsed_time()))
        layered_process_stream_queue(
            Instantiator(evaluations, eager_externals), evaluations, store,
            eager_layers)
        solve_stream_plan = lambda sr: solve_stream_plan_fn(
            evaluations,
            goal_expression,
            domain,
            sr,
            negative,
            max_cost=store.best_cost,
            #max_cost=min(store.best_cost, max_cost),
            unit_costs=unit_costs,
            **search_kwargs)
        #combined_plan, cost = solve_stream_plan(populate_results(evaluations, streams + functions))
        combined_plan, cost = iterative_solve_stream_plan(
            evaluations, streams, functions, solve_stream_plan)
        if action_info:
            combined_plan = reorder_combined_plan(evaluations, combined_plan,
                                                  full_action_info, domain)
            print('Combined plan: {}'.format(combined_plan))
        stream_plan, action_plan = separate_plan(combined_plan,
                                                 full_action_info)
        stream_plan = reorder_stream_plan(
            stream_plan)  # TODO: is this strictly redundant?
        stream_plan = get_synthetic_stream_plan(stream_plan, synthesizers)
        print('Stream plan: {}\n'
              'Action plan: {}'.format(stream_plan, action_plan))

        if stream_plan is None:
            if queue:
                fairly_process_queue(queue, evaluations, store)
            else:
                break
        else:
            if visualize:
                create_visualizations(evaluations, stream_plan, num_iterations)
            heappush(
                queue,
                HeapElement(
                    SkeletonKey(0, len(stream_plan)),
                    Skeleton(instantiate_first({}, stream_plan), 0, {},
                             stream_plan, action_plan, cost)))
            greedily_process_queue(queue, evaluations, store, sampling_time)

    if postprocess and (not unit_costs):
        locally_optimize(evaluations, store, goal_expression, domain,
                         functions, negative, synthesizers)
    write_stream_statistics(stream_name, externals + synthesizers, verbose)
    return revert_solution(store.best_plan, store.best_cost, evaluations)
Ejemplo n.º 13
0
 def push_binding(self, binding):
     # TODO: add to standby if not active
     priority = binding.get_priority()
     element = HeapElement(priority, binding)
     heappush(self.queue, element)