Exemple #1
0
def generate():
    total_count = 0
    syntax_valid_count = 0
    files = []
    for root, d_names, f_names in os.walk(seed_path):
        for f in f_names:
            files.append(os.path.join(root, f))
    for file in files:
        if 'deepfuzz' in file:
            continue
        if not file.endswith('.kt'):
            continue
        #try:
        text = open(file, 'r').read()
        # text = pp.replace_macro(text, file)
        text = pp.remove_comment(text)
        #text = pp.remove_space(text)
        is_valid = pp.verify_correctness(text, file, 'deepfuzz_original')
        if not is_valid:
            continue
        total_count += 1
        text = synthesis(text, 'g3', 'sample')
        is_valid = pp.verify_correctness(text, file, 'deepfuzz_g1_nosample')
        if (is_valid):
            syntax_valid_count += 1
        #except:
        #continue
    pass_rate = syntax_valid_count / total_count
    print('syntax_valid_count: %d' % syntax_valid_count)
    print('total_count: %d' % total_count)
    print('pass rate: %f' % pass_rate)
Exemple #2
0
        print(sentences[i] + "\t" + next_chars[i])


path = './testData1'
files = []
valid_count = 0
for root, d_names, f_names in os.walk(path):
    try:
        for f in f_names:
            files.append(os.path.join(root, f))
    except Exception:
        continue
for file in files:
    try:
        print('--------------------------------------------------')
        print(file)
        # if 'nocomment' in file or 'nospace' in file or 'nomacro' in file or 'raw' in file:
        #     command = 'rm ' + file
        #     p = Popen(command, shell=True, stdin=PIPE, stdout=PIPE, stderr=STDOUT, close_fds=True)
        text = open(file, 'r').read()
        text = pp.remove_comment(text)
        # text = pp.replace_macro(text, file)
        text = pp.remove_space(text)
        is_valid = pp.verify_correctness(text, file, 'nospace')
        if is_valid:
            valid_count += 1
            generate_training_data(text)
    except Exception:
        continue
print(valid_count)
        add_asm('%s_START:' %inst_id)
        traverse_instruction(block)

        cmp_op = traverse_condition(pred)
        add_asm('%s %s_END' %(op_to_cmd[cmp_op], inst_id))
        add_asm('jmp %s_START' %inst_id)
        add_asm('%s_END:' %inst_id)

    leave_block()

def traverse_condition(pred):
    cmp_op, e1, e2 = pred
    traverse_expression(e1)
    traverse_expression(e2)
    add_cmp_asm('cmpq')
    return cmp_op

if __name__ == '__main__':
    parser = cparse.parser
    s = remove_blank(remove_comment(sys.stdin.read()))
    asts = parser.parse(s)

    enter_block('global')
    map(traverse_ast, asts)
    if_call_cat()
    if_exist_string()
    add_str_literal_asm()
    leave_block()

    print '\n'.join(instructions)
def p_argument_expression_list_2(t):
    '''argument_expression_list : argument_expression_list COMMA expression'''
    t[0] = t[1] + [t[3]]

# constant:
def p_constant(t): 
   '''constant : ICONST'''
   t[0] = 'ICONST',t[1]

def p_constant_1(t):
    '''constant : FCONST'''
    t[0] = 'FCONST',t[1]

def p_error(t):
    if t:
         print("Syntax error at token", t)
         # Just discard the token and tell the parser it's okay.
         parser.errok()
    else:
         print("Syntax error at EOF")



parser = yacc.yacc(method='LALR')

if __name__ == '__main__':
    s = sys.stdin.read()
    s = remove_blank(remove_comment(s))
    print s
    pp.pprint(parser.parse(s))
    '''constant : ICONST'''
    t[0] = 'ICONST', t[1]


def p_constant_1(t):
    '''constant : FCONST'''
    t[0] = 'FCONST', t[1]


# def p_empty(t):
#     'empty : '
#     t[0] = None


def p_error(t):
    # print "Whoa, We're hosed"
    if t:
        print("Syntax error at token", t[0])
        # Just discard the token and tell the parser it's okay.
        parser.errok()
    else:
        print("Syntax error at EOF")


parser = yacc.yacc(method='LALR')

if __name__ == '__main__':
    s = sys.stdin.read()
    s = remove_blank(remove_comment(s))
    pp.pprint(parser.parse(s))