示例#1
0
def solve_problem(problem, T, mode='dfs', gas=np.inf):
    examples = [util.decode_example(x) for x in problem['examples']]
    predictions = problem.get('prediction', np.zeros(len(impl.FUNCTIONS)))
    scores = dict(zip(impl.FUNCTIONS, predictions))
    ctx = context.Context(scores)
    start = time.time()
    if mode == 'dfs':
        search_func = search.dfs
    elif mode == 'sort-and-add':
        search_func = search.sort_and_add
    else:
        raise ValueError('invalid search mode {}'.format(mode))
    solution, steps_used = search_func(examples, T, ctx, gas)
    end = time.time()
    if solution:
        solution = solution.prefix
    return solution, end - start, steps_used
示例#2
0
def generate_contexts(final_ctx):
    scores_map = {}
    score = 0

    def _get_nearest_partner(f):
        """Returns the partner function to f with the highest score.
        If f is lambda, partner must be a regular function.
        If f is a function and takes lambda as first argument,
        partner is a lambda.
        A function that doesn't take any lambdas is its own patner.

        There are no functions in dsl that take multiple lambdas or a lambda that is not the first argument.

        Arguments:
            f (Function): function to find partner for.

        Returns:
            Function or None. None is if there is no valid partner.
        """
        if f in final_ctx.functions:
            input_type = f.type.input_types[0]
            if not isinstance(input_type, types.FunctionType):
                return f
            if (input_type in final_ctx.typemap
                    and final_ctx.typemap[input_type]):
                return final_ctx.typemap[input_type][0]
        else:
            for partner in final_ctx.functions:
                if partner.type.input_types[0] == f.type:
                    return partner

    partners = [_get_nearest_partner(x) for x, _ in final_ctx.items]

    for i, (f, score) in enumerate(final_ctx.items):
        partner = partners[i]
        if not partner or f in scores_map:
            continue

        scores_map[f] = score

        if partner not in scores_map:
            partner_score = final_ctx.scores_map[partner]
            scores_map[partner] = partner_score

        yield context.Context(copy.copy(scores_map))
示例#3
0
def solve_problem(problem, T, mode='dfs', gas=np.inf):
    examples = [util.decode_example(x) for x in problem['examples']]
    predictions = problem.get('prediction', np.zeros(len(impl.FUNCTIONS)))
    if mode != 'beam':
        scores = dict(zip(impl.FUNCTIONS, predictions))
        ctx = context.Context(scores)
    start = time.time()
    if mode == 'dfs':
        search_func = search.dfs
        solution, steps_used = search_func(examples, T, ctx, gas)
    elif mode == 'sort-and-add':
        search_func = search.sort_and_add
        solution, steps_used = search_func(examples, T, ctx, gas)
    else:
        search_func = search.beam_search
        solution, steps_used = search_func(examples, T, predictions, gas)
    end = time.time()
    if solution:
        solution = solution.prefix
    return solution, end - start, steps_used
示例#4
0
def main():
    # Run all scenarios 30 times for a good average of execution times and to verify reproducibility
    for i in range(30):
        print("Test run " + str(i + 1))
        # Run the clustering algorithm
        scenarios = cluster_nodes(visualisation=False)
        #print("MAIN SCENARIOS:", scenarios)
        #print("TEST1", scenarios[0][0])
        # Extract the clusters in each scenario from a list of all scenarios
        scenarioRanks = []
        ranks = []
        clusterList = []
        for scenario in scenarios:
            for clusters in scenario:
                #print("CLUSTERSMAIN", clusters)
                scenarioRanks.append(rank(clusters))

            ranks.append(scenarioRanks)

        #print("Ranks:", ranks)
        #print("Length:", len(ranks))
        #print("Ranks for one scenario:", ranks[0])

        # Package the ranks of one cluster together with its leader to prepare for DeepCoder processing

        examples = []
        for scenario in ranks:
            for clusterRanks in scenario:
                # Strip away node ID / index in sample from a copy of the list
                strippedRanks = []
                #print("TEST999:", clusterRanks)
                for i in range(len(clusterRanks)):
                    strippedRanks.append(clusterRanks[i][1])

                # Ground truth / oracle
                leader = monarchical_leader_election(strippedRanks)

                # Shuffle the order of the list of ranks to avoid DeepCoder search finding incorrect program
                # E.g. the format obtained from the function 'rank' is sorted meaning DeepCoder can incorrectly believe
                # that getting the last element (tail) of the list is also correct.
                numpy.random.shuffle(strippedRanks)
                #print("Shuffled ranks:", strippedRanks)

                #Build the input-output tuple
                examples.append(([strippedRanks], leader))
                #print("TEST2", strippedRanks)
                #print("IndexLeader:", len(strippedRanks)-1)
                #print("Leader:", leader)

        #print("Full examples:", examples)
        #print("TEST3:", examples[0])

        # Preprocessing
        decoded_examples = [decode_example(x) for x in examples]
        predictions = numpy.zeros(len(impl.FUNCTIONS))
        scores = dict(zip(impl.FUNCTIONS, predictions))
        ctx = context.Context(scores)

        #print("VALUE CONSTRUCTION EXAMPLES", decoded_examples)

        # Pass formatted rank and elected leader as input-output examples to DeepCoder

        # Depth-first search (DFS)
        dfs_start = time.perf_counter()
        dfs_wallclock_start = time.time()
        dfs_solution, dfs_steps_used = dfs(decoded_examples, 1, ctx, numpy.inf)
        dfs_end = time.perf_counter()
        dfs_wallclock_end = time.time()

        # Sort and add enumerative search
        saa_start = time.perf_counter()
        saa_wallclock_start = time.time()
        saa_solution, saa_steps_used = sort_and_add(decoded_examples, 1, ctx,
                                                    numpy.inf)
        saa_end = time.perf_counter()
        saa_wallclock_end = time.time()

        # Compare the elected leader from running the program inferred by DeepCoder to the ground truth from the oracle
        if dfs_solution:
            dfs_solution = dfs_solution.prefix
            print(
                "\nSynthesised program using DFS consistent with ground truth:",
                test_leader_election(dfs_solution, decoded_examples))
        else:
            print("\nNo solution found with DFS")

        if saa_solution:
            saa_solution = saa_solution.prefix
            print(
                "Synthesised program using sort and add consistent with ground truth:",
                test_leader_election(saa_solution, decoded_examples))

        else:
            print("No solution found with sort and add")

        # Print DFS results
        print("\nDFS result:", dfs_solution)
        print("Execution time:", dfs_end - dfs_start, "\nWall time:",
              dfs_wallclock_end - dfs_wallclock_start)
        print("Steps used:", dfs_steps_used)

        # Print Sort and add results
        print("\nSort and add result:", saa_solution)
        print("Execution time:", saa_end - saa_start, "\nWall time:",
              saa_wallclock_end - saa_wallclock_start)
        print("Steps used:", saa_steps_used)
        print("\n-------------------------------------------------\n")