def main(args):
    errors = []
    with open(args[0]) as f:
        s = json.load(f)
    grammar = s['[grammar]']
    start = s['[start]']
    key = args[1]
    command = args[2]
    directory = args[3]
    count = int(args[4])
    os.makedirs(directory, exist_ok=True)
    fuzzer = F.LimitFuzzer(grammar)
    i = 0
    seen = set()
    while True:
        try:
            v = fuzzer.fuzz(start)
            if not v.strip(): continue
            if v in seen: continue
            seen.add(v)
            print(repr(v))
            fn = '%s/%s.input.x' % (directory, key)
            with open(fn, 'w+') as f:
                print(v, end='', file=f)
            o = util.do([command, fn])
            if o.returncode != 0:
                continue
            else:
                i += 1
                with open('%s/%s.input.%d' % (directory, key, i), 'w+') as fn:
                    print(v, end='', file=fn)
        except RecursionError:
            pass
        if i >= count: break
    return errors
Exemple #2
0
def main(args):
    if not args or args[0] == '-h': usage()
    errors = []
    with open(args[0]) as f:
        golden = json.load(f)
    ggrammar = golden['[grammar]']
    gstart = golden['[start]']

    with open(args[1]) as f:
        mined = json.load(f)
    mgrammar = mined['[grammar]']
    mstart = mined['[start]']

    command = args[2]
    directory = args[3]
    count = int(args[4])
    key = args[5]
    os.makedirs(directory, exist_ok=True)
    fuzzer = F.LimitFuzzer(ggrammar)
    i = 0
    correct = 0
    seen = set()
    timeout = 0
    while True:
        try:
            v = fuzzer.fuzz(gstart)
            if not v.strip(): continue
            if v in seen: continue
            seen.add(v)
            fn = '%s/%s.input.x' % (directory, key)
            with open(fn, 'w+') as f:
                print(v, end='', file=f)
            o = util.do([command, fn])
            if o.returncode != 0:
                continue
            else:
                #print(repr(v))
                i += 1
                o = util.do(["python3", "./src/parser.py", args[1], fn],
                            timeout=60)
                if o.returncode == 0:
                    correct += 1
                    #print('parsed. %d/%d (timeout: %d)' % (correct, i, timeout))
                else:
                    print(
                        'not parsed %d/%d (timeout: %d)' %
                        (i - correct, i, timeout), o.stdout, o.stderr)
        except RecursionError:
            print('recursion.')
            pass
        except subprocess.TimeoutExpired:
            timeout += 1
            print('timedout.')
            pass
        if i >= count: break
    with open("%s/%s.precision_" % (directory, key), 'w+') as f:
        print('%s result: %d/%d (timeout: %d)' % (key, correct, i, timeout),
              file=f)
    return errors
Exemple #3
0
def consider_merging(a, b, key, cfg, start):
    g = gen_new_grammar(a, b, key, cfg)
    fzz = fuzz.LimitFuzzer(g)
    for i in range(config.P3Check):
        v = fzz.fuzz(start)
        r = check.check(v)
        if not r:
            return None
    return g
Exemple #4
0
def generalize_single_token(grammar, start, k, q, r, command, blacklist):
    # first we replace the token with a temporary key
    gk = GK
    # was there a previous widened char? and if ther wase,
    # do we belong to it?
    char = grammar[k][q][r]
    if r > 0 and grammar[k][q][r - 1][-1] == '+':
        # remove the +
        last_char = grammar[k][q][r - 1][0:-1]
        if last_char in ASCII_MAP and char in ASCII_MAP[last_char]:
            #we are part of the last.
            grammar[k][q][r] = last_char + '+'
            return grammar

    g_ = copy.deepcopy(grammar)
    g_[k][q][r] = gk
    g_[gk] = [[char]]
    #reachable_keys = grammartools.reachable_dict(g_)
    # now, we need a path to reach this.
    fg = grammartools.get_focused_grammar(g_, (gk, []))
    fuzzer = F.LimitFuzzer(fg)
    #skel_tree = find_path_key(g_, start, gk, reachable_keys, fuzzer)
    tree = None
    check = 0
    while tree is None:
        #tree = flush_tree(skel_tree, fuzzer, gk, char)
        #tree = fuzzer.gen_key(grammartools.focused_key(start), depth=0, max_depth=1)
        tree = fuzzer.iter_gen_key(grammartools.focused_key(start),
                                   max_depth=1)
        val = util.check(char, char,
                         '<__CHECK__(%d/%d)>' % (check, MAX_CHECKS), tree,
                         command, char, char)
        check += 1
        if not val:
            tree = None
        if check > MAX_CHECKS:
            print("Exhausted limit for key:%s, rule:%d, token:%d, char:%s" %
                  (k, q, r, char),
                  file=sys.stderr)
            blacklist.append((k, q, r, char))
            #raise "Exhausted limit for key:%s, rule:%d, token:%d, char:%s" % (k, q, r, char)
            return grammar
        # now we need to make sure that this works.

    gen_token = find_max_generalized(tree, char, gk, command)
    if gen_token != char:
        # try widening
        gen_token = find_max_widened(tree, gen_token, gk, command)
    del g_[gk]
    g_[k][q][r] = gen_token
    # preserve the order
    grammar[k][q][r] = gen_token
    return grammar
Exemple #5
0
def check_key(g, gk, start, command):
    fg = G.get_focused_grammar(g, (gk, []))
    fuzzer = F.LimitFuzzer(fg)
    tree = None
    check = 0
    while tree is None:
        tree = fuzzer.iter_gen_key(G.focused_key(start), max_depth=0)
        val = util.check('', '',
                         '<__MINE_CHECK__(%d/%d)>' % (check, MAX_CHECKS), tree,
                         command, '', '')
        check += 1
        if not val:
            tree = None
        if check > MAX_CHECKS:
            print("Exhausted limit for key:%s" % gk, file=sys.stderr)
            return