Exemple #1
0
def get_constraints(current_spec):
    '''Generates a new distinguishing input which is not a part of the existing
    specification

    Arguments:
        - current_spec (str): The specification for the problem including the
        existing constraints for the problem.

    Returns:
        - list: contains all the specifications

    '''
    file_sexp = parser.sexpFromFile(current_spec)
    benchmark_tuple = parser.extract_benchmark(file_sexp)
    (
            theories,
            syn_ctx,
            synth_instantiator,
            macro_instantiator,
            uf_instantiator,
            constraints,
            grammar_map,
            forall_vars_map,
            default_grammar_sfs
            ) = benchmark_tuple
    inputs = []
    vals= []
    for constraint in constraints:
        inputs.append(constraint.children[0].children[0].value_object.value_object)
        vals.append(constraint.children[1].value_object.value_object)
    result=[]
    for i in range(0,len(inputs)):
        result.append(inputs[i]+'; '+vals[i])
    #print(result)
    return result
def make_solver(file_sexp):
    benchmark_tuple = parser.extract_benchmark(file_sexp)
    (theories, syn_ctx, synth_instantiator, macro_instantiator,
     uf_instantiator, constraints, grammar_map,
     forall_vars_map) = benchmark_tuple

    assert len(theories) == 1
    theory = theories[0]

    solvers = [
        #("LIA Unification", lia_unification_solver),
        ("STD Unification", std_unification_solver),
        #("Classic Esolver", classic_esolver),
        #("Memoryless Esolver", memoryless_esolver)
    ]
    rewritten_constraints = utils.timeout(
        massage_constraints,
        (syn_ctx, macro_instantiator, uf_instantiator, theory, constraints),
        {},
        timeout_duration=120,
        default=None)
    if rewritten_constraints is not None:
        constraints = rewritten_constraints
    else:
        solvers = [("Memoryless Esolver", memoryless_esolver)]

    synth_funs = list(synth_instantiator.get_functions().values())
    specification, verifier = make_specification(synth_funs, theory, syn_ctx,
                                                 constraints)

    solver_args = (theory, syn_ctx, synth_funs, grammar_map, specification,
                   verifier)

    for solver_name, solver in solvers:
        try:
            print("Trying solver:", solver_name)
            final_solutions = solver(*solver_args)
            if final_solutions == "NO SOLUTION":
                print("(fail)")
            else:
                print("FINAL_SOLUTION")
                print_solutions(synth_funs, final_solutions)
            break
        except UnsuitableSolverException as exception:
            print(exception)
            pass
    else:
        # print("Unable to solve!")
        pass
Exemple #3
0
def evaluate(expr, input, synth_file='./euphony/benchmarks/string/test/phone-5-test.sl'):
    '''Evaluate an expression for a given input

    Arguments:
        - expr(FunctionExpression): The expression to evaluate
        - input(str): the input to the given expression.
        - synth_file(str): path to a synthesis file defining the available
          available grammar. This file doesn't need to correspond to the same
          file that the expression was generated from. It just needs to have the
          same grammar definition. Constraints are not used in any way. It's
          required in order to load the proper logic into the expression
          evaluator. This is set to a file containing string logic by default.

    Returns:
        - (str): output from evaluating expression against given input
    '''
    eval_ctx = EvaluationContext()
    val_map = (Value(input, value_type=StringType()),)
    eval_ctx.set_valuation_map(val_map)
    # print('attempting to eval: {}'.format(expr))
    # print('eval ctx: {} -- {}'.format(val_map, type(val_map)))

    # This is pretty flow since it needs to re-read the synthesis file to know
    # the grammar that it needs to parse. Personally, I am not so concerned
    # about performance, but rather just getting it to work in the first place.
    file_sexp = parser.sexpFromFile(synth_file)
    benchmark_tuple = parser.extract_benchmark(file_sexp)
    (
            theories,
            syn_ctx,
            synth_instantiator,
            macro_instantiator,
            uf_instantiator,
            constraints,
            grammar_map,
            forall_vars_map,
            default_grammar_sfs
            ) = benchmark_tuple
    # We should only have one grammar, so just grab the only key to this map.
    # The type of the object is a SynthFun
    #print(constraints)
    synth_fun = list(grammar_map.keys())[0]
    # Pass this into the interpretation along with the expression we're
    # attempting to evaluate, or else we'll get an UnboundLetException
    eval_ctx.set_interpretation(synth_fun, expr)
    return evaluate_expression_raw(expr, eval_ctx)
Exemple #4
0
def get_specification(file_sexp):
    benchmark_tuple = parser.extract_benchmark(file_sexp)
    (theories, syn_ctx, synth_instantiator, macro_instantiator,
     uf_instantiator, constraints, grammar_map, forall_vars_map,
     default_grammar_sfs) = benchmark_tuple
    assert len(theories) == 1
    theory = theories[0]

    # XXX : to avoid getting stuck in massaging constraints
    # rewritten_constraints = utils.timeout(
    #     massage_constraints,
    #     (syn_ctx, macro_instantiator, uf_instantiator, theory, constraints),
    #     {},
    #     timeout_duration=120,
    #     default=None
    # )
    # constraints = rewritten_constraints
    synth_funs = list(synth_instantiator.get_functions().values())
    specification, _ = make_specification(synth_funs, theory, syn_ctx,
                                          constraints)
    return specification
Exemple #5
0
def print_stat(benchmark_files, phog_file):
    from os.path import basename
    for benchmark_file in benchmark_files:
        # print('loading: ', benchmark_file)
        file_sexp = parser.sexpFromFile(benchmark_file)
        benchmark_tuple = parser.extract_benchmark(file_sexp)
        (theories, syn_ctx, synth_instantiator, macro_instantiator,
         uf_instantiator, constraints, grammar_map, forall_vars_map,
         default_grammar_sfs) = benchmark_tuple
        assert len(theories) == 1
        theory = theories[0]
        specification = get_specification(file_sexp)
        synth_funs = list(synth_instantiator.get_functions().values())
        grammar = grammar_map[synth_funs[0]]

        phog = SPhog(grammar, phog_file, synth_funs[0].range_type, specification) if options.use_sphog() else \
            Phog(grammar, phog_file, synth_funs[0].range_type)

        defs, _ = parser.filter_sexp_for('define-fun', file_sexp)
        if defs is None or len(defs) == 0:
            print('cannot find a solution!')
            exit(0)
        [name, args_data, ret_type_data, interpretation] = defs[-1]

        ((arg_vars, arg_types, arg_var_map),
         return_type) = parser._process_function_defintion(
             args_data, ret_type_data)
        expr = parser.sexp_to_expr(interpretation, syn_ctx, arg_var_map)
        i = 0
        subs_pairs = []
        for (var_expr, ty) in zip(arg_vars, arg_types):
            param_expr = exprs.FormalParameterExpression(None, ty, i)
            subs_pairs.append((var_expr, param_expr))
            i += 1
        expr = exprs.substitute_all(expr, subs_pairs)

        score = phog.get_score(expr)
        print(basename(benchmark_file), ' \t', score)
Exemple #6
0
def make_solver(file_sexp, phog_file, rcfg_file):
    benchmark_tuple = parser.extract_benchmark(file_sexp)
    (theories, syn_ctx, synth_instantiator, macro_instantiator,
     uf_instantiator, constraints, grammar_map, forall_vars_map,
     default_grammar_sfs) = benchmark_tuple
    assert len(theories) == 1
    theory = theories[0]

    solvers = [("LIA Unification", lia_unification_solver),
               ("STD Unification", std_unification_solver),
               ("Classic Esolver", classic_esolver),
               ("Memoryless Esolver", memoryless_esolver)]

    # for constraint in constraints:
    #     inputs = constraint.children[0].children
    #     output = constraint.children[1]
    #     print('%s\t%s' % (' '.join([exprs.expression_to_string(input) for input in inputs]), exprs.expression_to_string(output)))
    #
    # exit(0)
    rewritten_constraints = utils.timeout(
        massage_constraints,
        (syn_ctx, macro_instantiator, uf_instantiator, theory, constraints),
        {},
        timeout_duration=120,
        default=None)
    if rewritten_constraints is not None:
        constraints = rewritten_constraints
    else:
        solvers = [
            # ("LIA Unification", lia_unification_solver),
            ("Memoryless Esolver", memoryless_esolver)
        ]

    # add constant rules + add nt addrs (for PHOG)
    # for sf in grammar_map.keys():
    #     if sf in default_grammar_sfs:
    #         grammar_map[sf].add_constant_rules(phog.collect_constants_from_phog(phog_file, sf.range_type))

    # RCFG
    for sf in grammar_map.keys():
        if not sf in default_grammar_sfs:
            grammar_map[
                sf] = grammar_map[sf] if rcfg_file == '' else augment_grammar(
                    grammar_map[sf], rcfg_file)
            grammar_map[sf].compute_rule_to_nt_addrs()

    synth_funs = list(synth_instantiator.get_functions().values())
    specification, verifier = make_specification(synth_funs, theory, syn_ctx,
                                                 constraints)

    solver_args = (theory, syn_ctx, synth_funs, grammar_map, specification,
                   verifier, phog_file)

    for solver_name, solver in solvers:
        try:
            #print("Trying solver:", solver_name)
            final_solutions = solver(*solver_args)
            if final_solutions == "NO SOLUTION":
                print("(fail)")
            else:
                print_solutions(synth_funs, final_solutions)
            break
        except UnsuitableSolverException as exception:
            pass  #print(exception)
    else:
        print("Unable to solve!")
Exemple #7
0
def generate_distinguishing_input(current_spec, candidate_programs, failure_threshold=100, args=None):
    '''Generates a new distinguishing input which is not a part of the existing
    specification

    Arguments:
        - current_spec (str): The specification for the problem including the
        existing constraints for the problem.

    Returns:
        - str: a string representing the distinguishing input not part of the
        current specification. Also can return None if no distinguishing input can be found.

    '''
    file_sexp = parser.sexpFromFile(current_spec)
    benchmark_tuple = parser.extract_benchmark(file_sexp)
    (
            theories,
            syn_ctx,
            synth_instantiator,
            macro_instantiator,
            uf_instantiator,
            constraints,
            grammar_map,
            forall_vars_map,
            default_grammar_sfs
            ) = benchmark_tuple
    inputs = []
    for constraint in constraints:
        inputs.append(constraint.children[0].children[0].value_object.value_object)
    #print(inputs)
    import string_builder
    inputs_r = [string_builder.RString(inp) for inp in inputs]
    #print(inputs_r)
    print_grid = lambda x: [print(item) for item in x]
    def distances(input_list, distfunc):
        dists = []
        for x in input_list:
            # print(x)
            # print(x.groupstr)
            curr_dists = []
            for y in input_list:
                curr_dists.append(distfunc(x, y))
            dists.append(curr_dists)
        # print_grid(dists)
        return dists


    # find the metric which varies the least between all the examples
    char_dist  = distances(inputs_r, lambda x, y: x.orig_distance(y))
    class_dist = distances(inputs_r, lambda x, y: x.class_distance(y))
    group_dist = distances(inputs_r, lambda x, y: x.group_distance(y))
    vals = {
        'char': char_dist,
        'class': class_dist,
        'group': group_dist
    }
    stats = {}
    for key in vals:
        val = vals[key]
        tril = np.tril(np.array(val))
        mu = tril.mean()
        std = tril.std()
        stats[key] = string_builder.TextStats(mu, std)
        # print("{}: mean: {}; std: {}".format(key, mu, std))
    stat_set = string_builder.EditStatSet(stats['char'], stats['class'], stats['group'])
    distinguishing_input = None
    loop_count = 0
    total_generated = 0
    while True:
        distinguished = None
        for constraint_input in inputs_r:
            mutated = constraint_input.generate_mutation(stat_set)
            total_generated += 1
            # run input across all outputs to see if there is an input which returns more
            # than one unique result.
            results = set([evaluate(expression, mutated, current_spec) for expression in candidate_programs])
            if len(results) > 1:
                if args is not None and args.auto:
                    distinguishing_input = mutated
                    distinguished = True
                    break
                selected = input('Found a distinguishing input ({}). Programs generated outputs: {}. Acceptable? (Y)es/(n)o/(f)inish: '.format(mutated, results)).lower()
                if selected == '' or selected == 'y' or selected == 'f':
                    distinguishing_input = mutated
                    distinguished = True
                    break
        if distinguished is not None:
            break
        loop_count += 1
        if loop_count > failure_threshold:
            break

    if args is not None and args.verbose:
        print("returning input {}. Total generated inputs: {}".format(distinguishing_input, total_generated))
    return distinguishing_input