Beispiel #1
0
def get_constraints(current_spec):
    '''Generates a new distinguishing input which is not a part of the existing
    specification

    Arguments:
        - current_spec (str): The specification for the problem including the
        existing constraints for the problem.

    Returns:
        - list: contains all the specifications

    '''
    file_sexp = parser.sexpFromFile(current_spec)
    benchmark_tuple = parser.extract_benchmark(file_sexp)
    (
            theories,
            syn_ctx,
            synth_instantiator,
            macro_instantiator,
            uf_instantiator,
            constraints,
            grammar_map,
            forall_vars_map,
            default_grammar_sfs
            ) = benchmark_tuple
    inputs = []
    vals= []
    for constraint in constraints:
        inputs.append(constraint.children[0].children[0].value_object.value_object)
        vals.append(constraint.children[1].value_object.value_object)
    result=[]
    for i in range(0,len(inputs)):
        result.append(inputs[i]+'; '+vals[i])
    #print(result)
    return result
Beispiel #2
0
def test_make_solver(benchmark_files):
    for benchmark_file in benchmark_files:
        # print(benchmark_file)
        file_sexp = parser.sexpFromFile(benchmark_file)

        # import cProfile, pstats
        # pr = cProfile.Profile()
        # pr.enable()
        make_solver(file_sexp)
Beispiel #3
0
def evaluate(expr, input, synth_file='./euphony/benchmarks/string/test/phone-5-test.sl'):
    '''Evaluate an expression for a given input

    Arguments:
        - expr(FunctionExpression): The expression to evaluate
        - input(str): the input to the given expression.
        - synth_file(str): path to a synthesis file defining the available
          available grammar. This file doesn't need to correspond to the same
          file that the expression was generated from. It just needs to have the
          same grammar definition. Constraints are not used in any way. It's
          required in order to load the proper logic into the expression
          evaluator. This is set to a file containing string logic by default.

    Returns:
        - (str): output from evaluating expression against given input
    '''
    eval_ctx = EvaluationContext()
    val_map = (Value(input, value_type=StringType()),)
    eval_ctx.set_valuation_map(val_map)
    # print('attempting to eval: {}'.format(expr))
    # print('eval ctx: {} -- {}'.format(val_map, type(val_map)))

    # This is pretty flow since it needs to re-read the synthesis file to know
    # the grammar that it needs to parse. Personally, I am not so concerned
    # about performance, but rather just getting it to work in the first place.
    file_sexp = parser.sexpFromFile(synth_file)
    benchmark_tuple = parser.extract_benchmark(file_sexp)
    (
            theories,
            syn_ctx,
            synth_instantiator,
            macro_instantiator,
            uf_instantiator,
            constraints,
            grammar_map,
            forall_vars_map,
            default_grammar_sfs
            ) = benchmark_tuple
    # We should only have one grammar, so just grab the only key to this map.
    # The type of the object is a SynthFun
    #print(constraints)
    synth_fun = list(grammar_map.keys())[0]
    # Pass this into the interpretation along with the expression we're
    # attempting to evaluate, or else we'll get an UnboundLetException
    eval_ctx.set_interpretation(synth_fun, expr)
    return evaluate_expression_raw(expr, eval_ctx)
Beispiel #4
0
def print_stat(benchmark_files, phog_file):
    from os.path import basename
    for benchmark_file in benchmark_files:
        # print('loading: ', benchmark_file)
        file_sexp = parser.sexpFromFile(benchmark_file)
        benchmark_tuple = parser.extract_benchmark(file_sexp)
        (theories, syn_ctx, synth_instantiator, macro_instantiator,
         uf_instantiator, constraints, grammar_map, forall_vars_map,
         default_grammar_sfs) = benchmark_tuple
        assert len(theories) == 1
        theory = theories[0]
        specification = get_specification(file_sexp)
        synth_funs = list(synth_instantiator.get_functions().values())
        grammar = grammar_map[synth_funs[0]]

        phog = SPhog(grammar, phog_file, synth_funs[0].range_type, specification) if options.use_sphog() else \
            Phog(grammar, phog_file, synth_funs[0].range_type)

        defs, _ = parser.filter_sexp_for('define-fun', file_sexp)
        if defs is None or len(defs) == 0:
            print('cannot find a solution!')
            exit(0)
        [name, args_data, ret_type_data, interpretation] = defs[-1]

        ((arg_vars, arg_types, arg_var_map),
         return_type) = parser._process_function_defintion(
             args_data, ret_type_data)
        expr = parser.sexp_to_expr(interpretation, syn_ctx, arg_var_map)
        i = 0
        subs_pairs = []
        for (var_expr, ty) in zip(arg_vars, arg_types):
            param_expr = exprs.FormalParameterExpression(None, ty, i)
            subs_pairs.append((var_expr, param_expr))
            i += 1
        expr = exprs.substitute_all(expr, subs_pairs)

        score = phog.get_score(expr)
        print(basename(benchmark_file), ' \t', score)
Beispiel #5
0
def get_string_solutions(input_file, num_sols=1):
    '''Returns a set of FunctionExpressions representing solutions from the
    euphony synthesizer.

    The outputs of this function can be then evaluated and/or fed into a neural
    network for ranking

    Arguments:
        - input_file (str): The file containing the logic set and constraints
          for euphony to synthesize
        - num_sols (int): The number of solutions that the solver should
          generate

    Returns:
        - list: <- FunctionExpression (see euphony.exprs.FunctionExpression)
    '''
    if not os.path.exists(input_file):
        print("Input file does not exist: {}".format(input_file))
        sys.exit(1)
    benchmark_files = input_file
    sphog_file = "./euphony/phog_str"
    rcfg_file = ''
    options.noindis = True
    options.inc = False
    options.allex = False
    options.stat = False
    options.noheuristic = False
    options.numsols = num_sols
    options.rewrite = False
    if not os.path.exists(sphog_file):
        print("Can't find sphog file! {}".format(sphog_file))
        sys.exit(1)
    file_sexp = parser.sexpFromFile(input_file)
    # result is a list of lists
    sols = benchmarks.make_solver(file_sexp, sphog_file, rcfg_file, options=options)
    return [sol for innersols in sols for sol in innersols]
Beispiel #6
0
def get_func_exprs_grammars(benchmark_files):
    global eu

    # Grammars
    results = []
    # for eusolver
    ite_related_macros = []
    for benchmark_file in benchmark_files:
        fun_exprs = []
        print('Loading : ', benchmark_file)

        file_sexp = parser.sexpFromFile(benchmark_file)
        if file_sexp is None:
            continue

        core_instantiator = semantics_core.CoreInstantiator()
        theory_instantiators = [
            parser.get_theory_instantiator(theory)
            for theory in parser._known_theories
        ]

        macro_instantiator = semantics_core.MacroInstantiator()
        uf_instantiator = semantics_core.UninterpretedFunctionInstantiator()
        synth_instantiator = semantics_core.SynthFunctionInstantiator()

        syn_ctx = synthesis_context.SynthesisContext(core_instantiator,
                                                     *theory_instantiators,
                                                     macro_instantiator,
                                                     uf_instantiator,
                                                     synth_instantiator)
        syn_ctx.set_macro_instantiator(macro_instantiator)

        defs, _ = parser.filter_sexp_for('define-fun', file_sexp)
        if defs is None: defs = []

        for [name, args_data, ret_type_data, interpretation] in defs:
            for eusolver in ([True, False] if eu else [False]):
                ((arg_vars, arg_types, arg_var_map),
                 return_type) = parser._process_function_defintion(
                     args_data, ret_type_data)
                expr = parser.sexp_to_expr(interpretation, syn_ctx,
                                           arg_var_map)
                macro_func = semantics_types.MacroFunction(
                    name, len(arg_vars), tuple(arg_types), return_type, expr,
                    arg_vars)
                # for eusolver  (recording macro functions of which definition include ite)
                if eusolver:
                    app = exprs.find_application(expr, 'ite')
                    if app is not None: ite_related_macros.append(name)

                macro_instantiator.add_function(name, macro_func)
                i = 0
                subs_pairs = []
                for (var_expr, ty) in zip(arg_vars, arg_types):
                    param_expr = exprs.FormalParameterExpression(None, ty, i)
                    subs_pairs.append((var_expr, param_expr))
                    i += 1
                expr = exprs.substitute_all(expr, subs_pairs)
                # resolve macro functions involving ite (for enumeration of pred exprs (eusolver))
                if eusolver:
                    for fname in ite_related_macros:
                        app = exprs.find_application(expr, fname)
                        if app is None: continue
                        expr = macro_instantiator.instantiate_macro(
                            expr, fname)
                fun_exprs.append(expr)

        @static_var("cnt", 0)
        def rename(synth_funs_data):
            for synth_fun_data in synth_funs_data:
                # to avoid duplicated names
                synth_fun_data[0] = "__aux_name__" + benchmark_file + str(
                    rename.cnt)
                rename.cnt += 1

        # collect grammars
        synth_funs_data, _ = parser.filter_sexp_for('synth-fun', file_sexp)
        if len(synth_funs_data) == 0:
            synth_funs_data, _ = parser.filter_sexp_for('synth-inv', file_sexp)
            rename(synth_funs_data)
            synth_funs_grammar_data = parser.process_synth_invs(
                synth_funs_data, synth_instantiator, syn_ctx)
        else:
            rename(synth_funs_data)
            synth_funs_grammar_data = parser.process_synth_funcs(
                synth_funs_data, synth_instantiator, syn_ctx)

        grammar = None
        for synth_fun, arg_vars, grammar_data in synth_funs_grammar_data:
            if grammar_data != 'Default grammar':
                # we only consider a single function synthesis for now
                grammar = parser.sexp_to_grammar(arg_vars, grammar_data,
                                                 synth_fun, syn_ctx)
                break

        results.append((fun_exprs, grammar))

    return results
Beispiel #7
0
def test_make_solver(benchmark_files, phog_file, rcfg_file):
    for benchmark_file in benchmark_files:
        file_sexp = parser.sexpFromFile(benchmark_file)
        make_solver(file_sexp, phog_file, rcfg_file)
Beispiel #8
0
def get_func_exprs_grammars(benchmark_files):
    # expected format:
    #   sygus format problem
    #   (check-synth)
    #   a single solution
    global eu

    @static_var("cnt", 0)
    def rename(synth_funs_data):
        for synth_fun_data in synth_funs_data:
            # to avoid duplicated names
            synth_fun_data[0] = "__aux_name__" + benchmark_file + str(
                rename.cnt)
            rename.cnt += 1

    exprs_per_category = {}
    # decision tree : label -> exprs
    ## label : (ret_type, eu, STD spec / PBE spec, spec information ... )

    # for eusolver
    ite_related_macros = []
    # all vocabs
    all_vocabs = set([])

    for benchmark_file in benchmark_files:
        print('Loading : ', benchmark_file)
        file_sexp = parser.sexpFromFile(benchmark_file)
        if file_sexp is None:
            continue

        ## specification
        specification = get_specification(file_sexp)
        all_vocabs.update(basic_vocabs_for_spec(specification))

        core_instantiator = semantics_core.CoreInstantiator()
        theory_instantiators = [
            parser.get_theory_instantiator(theory)
            for theory in parser._known_theories
        ]
        macro_instantiator = semantics_core.MacroInstantiator()
        uf_instantiator = semantics_core.UninterpretedFunctionInstantiator()
        synth_instantiator = semantics_core.SynthFunctionInstantiator()

        syn_ctx = synthesis_context.SynthesisContext(core_instantiator,
                                                     *theory_instantiators,
                                                     macro_instantiator,
                                                     uf_instantiator,
                                                     synth_instantiator)
        syn_ctx.set_macro_instantiator(macro_instantiator)

        # collect grammars
        synth_funs_data, _ = parser.filter_sexp_for('synth-fun', file_sexp)
        if len(synth_funs_data) == 0:
            synth_funs_data, _ = parser.filter_sexp_for('synth-inv', file_sexp)
            # rename(synth_funs_data)
            synth_funs_grammar_data = parser.process_synth_invs(
                synth_funs_data, synth_instantiator, syn_ctx)
        else:
            # rename(synth_funs_data)
            synth_funs_grammar_data = parser.process_synth_funcs(
                synth_funs_data, synth_instantiator, syn_ctx)

        # handling only single function problems for now
        fetchop_func = fetchop
        spec_flag = ()
        synth_fun_name = ''
        for synth_fun, arg_vars, grammar_data in synth_funs_grammar_data:
            if grammar_data != 'Default grammar':
                synth_fun_name = synth_fun.function_name
                grammar = parser.sexp_to_grammar(arg_vars, grammar_data,
                                                 synth_fun, syn_ctx)
                # spec flag
                spec_flag = get_spec_flag(specification, grammar)
                # fetchop func
                fetchop_func = get_fetchop_func(specification, grammar)
                all_vocabs.update(
                    get_vocabs_from_grammar(grammar, fetchop_func))

        defs, _ = parser.filter_sexp_for('define-fun', file_sexp)
        if defs is None: defs = []
        if len(defs) > 0:
            for [name, args_data, ret_type_data, interpretation] in defs:
                print(name, ' ', synth_fun_name)
                if synth_fun_name in name:
                    for eusolver in ([True] if eu else [False]):
                        ((arg_vars, arg_types, arg_var_map),
                         return_type) = parser._process_function_defintion(
                             args_data, ret_type_data)
                        # category flag
                        flag = (return_type, eusolver, spec_flag)

                        expr = parser.sexp_to_expr(interpretation, syn_ctx,
                                                   arg_var_map)
                        macro_func = semantics_types.MacroFunction(
                            name, len(arg_vars), tuple(arg_types), return_type,
                            expr, arg_vars)
                        # for eusolver  (recording macro functions of which definition include ite)
                        if eusolver:
                            app = exprs.find_application(expr, 'ite')
                            if app is not None: ite_related_macros.append(name)

                        macro_instantiator.add_function(name, macro_func)
                        i = 0
                        subs_pairs = []
                        for (var_expr, ty) in zip(arg_vars, arg_types):
                            param_expr = exprs.FormalParameterExpression(
                                None, ty, i)
                            subs_pairs.append((var_expr, param_expr))
                            i += 1
                        expr = exprs.substitute_all(expr, subs_pairs)
                        # resolve macro functions involving ite (for enumeration of pred exprs (eusolver))
                        if eusolver:
                            for fname in ite_related_macros:
                                app = exprs.find_application(expr, fname)
                                if app is None: continue
                                expr = macro_instantiator.instantiate_macro(
                                    expr, fname)
                        if flag not in exprs_per_category:
                            exprs_per_category[flag] = set([])
                        exprs_per_category[flag].add((expr, fetchop_func))

    return exprs_per_category, all_vocabs
Beispiel #9
0
    core_instantiator = semantics_core.CoreInstantiator()
    theory_instantiators = [
        parser.get_theory_instantiator(theory)
        for theory in parser._known_theories
    ]

    macro_instantiator = semantics_core.MacroInstantiator()
    uf_instantiator = semantics_core.UninterpretedFunctionInstantiator()
    synth_instantiator = semantics_core.SynthFunctionInstantiator()

    syn_ctx = synthesis_context.SynthesisContext(core_instantiator,
                                                 *theory_instantiators,
                                                 macro_instantiator,
                                                 uf_instantiator,
                                                 synth_instantiator)

    file_sexp = parser.sexpFromFile(benchmark_file)
    defs, _ = parser.filter_sexp_for('define-fun', file_sexp)
    if defs is None or len(defs) == 0:
        print('No function can be found!')
        exit(0)

    [name, args_data, ret_type_data, interpretation] = defs[-1]
    ((arg_vars, arg_types, arg_var_map),
     return_type) = parser._process_function_defintion(args_data,
                                                       ret_type_data)
    expr = parser.sexp_to_expr(interpretation, syn_ctx, arg_var_map)
    # if basename(benchmark_file) in solved:
    print(exprs.get_expression_size(expr))
    # print(exprs.get_expression_size(expr), '\t', prog2runtime[basename(benchmark_file)])
Beispiel #10
0
def generate_distinguishing_input(current_spec, candidate_programs, failure_threshold=100, args=None):
    '''Generates a new distinguishing input which is not a part of the existing
    specification

    Arguments:
        - current_spec (str): The specification for the problem including the
        existing constraints for the problem.

    Returns:
        - str: a string representing the distinguishing input not part of the
        current specification. Also can return None if no distinguishing input can be found.

    '''
    file_sexp = parser.sexpFromFile(current_spec)
    benchmark_tuple = parser.extract_benchmark(file_sexp)
    (
            theories,
            syn_ctx,
            synth_instantiator,
            macro_instantiator,
            uf_instantiator,
            constraints,
            grammar_map,
            forall_vars_map,
            default_grammar_sfs
            ) = benchmark_tuple
    inputs = []
    for constraint in constraints:
        inputs.append(constraint.children[0].children[0].value_object.value_object)
    #print(inputs)
    import string_builder
    inputs_r = [string_builder.RString(inp) for inp in inputs]
    #print(inputs_r)
    print_grid = lambda x: [print(item) for item in x]
    def distances(input_list, distfunc):
        dists = []
        for x in input_list:
            # print(x)
            # print(x.groupstr)
            curr_dists = []
            for y in input_list:
                curr_dists.append(distfunc(x, y))
            dists.append(curr_dists)
        # print_grid(dists)
        return dists


    # find the metric which varies the least between all the examples
    char_dist  = distances(inputs_r, lambda x, y: x.orig_distance(y))
    class_dist = distances(inputs_r, lambda x, y: x.class_distance(y))
    group_dist = distances(inputs_r, lambda x, y: x.group_distance(y))
    vals = {
        'char': char_dist,
        'class': class_dist,
        'group': group_dist
    }
    stats = {}
    for key in vals:
        val = vals[key]
        tril = np.tril(np.array(val))
        mu = tril.mean()
        std = tril.std()
        stats[key] = string_builder.TextStats(mu, std)
        # print("{}: mean: {}; std: {}".format(key, mu, std))
    stat_set = string_builder.EditStatSet(stats['char'], stats['class'], stats['group'])
    distinguishing_input = None
    loop_count = 0
    total_generated = 0
    while True:
        distinguished = None
        for constraint_input in inputs_r:
            mutated = constraint_input.generate_mutation(stat_set)
            total_generated += 1
            # run input across all outputs to see if there is an input which returns more
            # than one unique result.
            results = set([evaluate(expression, mutated, current_spec) for expression in candidate_programs])
            if len(results) > 1:
                if args is not None and args.auto:
                    distinguishing_input = mutated
                    distinguished = True
                    break
                selected = input('Found a distinguishing input ({}). Programs generated outputs: {}. Acceptable? (Y)es/(n)o/(f)inish: '.format(mutated, results)).lower()
                if selected == '' or selected == 'y' or selected == 'f':
                    distinguishing_input = mutated
                    distinguished = True
                    break
        if distinguished is not None:
            break
        loop_count += 1
        if loop_count > failure_threshold:
            break

    if args is not None and args.verbose:
        print("returning input {}. Total generated inputs: {}".format(distinguishing_input, total_generated))
    return distinguishing_input
Beispiel #11
0
            default_grammar_sfs
            ) = benchmark_tuple
    inputs = []
    vals= []
    for constraint in constraints:
        inputs.append(constraint.children[0].children[0].value_object.value_object)
        vals.append(constraint.children[1].value_object.value_object)
    result=[]
    for i in range(0,len(inputs)):
        result.append(inputs[i]+'; '+vals[i])
    print(result)
    return result

def get_programs(current_spec):
    '
    file_sexp = parser.sexpFromFile(current_spec)
    

    core_instantiator = semantics_core.CoreInstantiator()
    theory_instantiators = [parser.get_theory_instantiator(theory) for theory in parser._known_theories]

    macro_instantiator = semantics_core.MacroInstantiator()
    uf_instantiator = semantics_core.UninterpretedFunctionInstantiator()
    synth_instantiator = semantics_core.SynthFunctionInstantiator()

    syn_ctx = synthesis_context.SynthesisContext(
            core_instantiator,
            *theory_instantiators,
            macro_instantiator,
            uf_instantiator,
            synth_instantiator)