Пример #1
0
def shorten_tweet(tweet):
    if len(tweet) > 140:
        print_ok("Status too long, will be shortened to 140 chars!")
        short_tweet = tweet[:137] + "..."
    else:
        short_tweet = tweet
    return short_tweet
Пример #2
0
    def pull(self):
        fuzzers = self.__get_fuzzers()

        local_path = self.fuzzer_config['sync_dir']
        remote_path = self.server_config['remote_path']

        options = list(_rsync_default_options)
        excludes = []

        # exclude our previously pushed fuzzer states from being pulled again
        # and avoid to overwrite our local fuzz data
        for fuzzer in fuzzers:
            excludes += ['{}.sync'.format(fuzzer), fuzzer]

        # restrict to certain session, if requested
        if self.fuzzer_config['session'] is not None:
            options += [
                '--include=\"/{}*/\"'.format(self.fuzzer_config['session'])
            ]
            excludes += ['*']

        print_ok('Pulling {}/* <- {}/'.format(local_path, remote_path))
        self.rsync_get(remote_path,
                       local_path,
                       rsync_options=options,
                       rsync_excludes=excludes)
Пример #3
0
def afl_reseed(sync_dir, coll_dir):
    fuzzer_queues = afl_collect.get_fuzzer_instances(sync_dir, crash_dirs=False)

    for fuzzer in fuzzer_queues:
        # move original fuzzer queues out of the way
        date_time = time.strftime("%Y-%m-%d-%H:%M:%S")
        queue_dir = os.path.join(sync_dir, fuzzer[0], "queue")
        queue_bak = "%s.%s" % (queue_dir, date_time)
        os.makedirs(queue_bak, exist_ok=True)

        queue_ls = os.listdir(queue_dir)

        for item in queue_ls:
            abs_item = os.path.join(queue_dir, item)
            if os.path.isfile(abs_item):
                shutil.move(abs_item, queue_bak)

        # copy newly generated corpus into queues
        print_ok("Reseeding %s into queue %s" % (os.path.basename(coll_dir), queue_dir))
        coll_ls = os.listdir(coll_dir)

        for item in coll_ls:
            abs_item = os.path.join(coll_dir, item)
            if os.path.isfile(abs_item):
                shutil.copy2(abs_item, queue_dir)

    return fuzzer_queues
Пример #4
0
def shorten_tweet(tweet):
    if len(tweet) > 140:
        print_ok("Status too long, will be shortened to 140 chars!")
        short_tweet = tweet[:137] + "..."
    else:
        short_tweet = tweet
    return short_tweet
Пример #5
0
 def run_job(self, job):
     job_module = self.get_module(job['module'])
     job_func = self.get_member(job_module, job['function'])
     job_args = [job['module'].rsplit('.', 1)[1]] + job['params'].split()
     if not self.quiet:
         print_ok('Executing \'{}\' ({}.{})'.format(job['name'], job['module'], job['function']))
     job_func(job_args)
Пример #6
0
    def pull(self):
        fuzzers = self.__get_fuzzers()

        local_path = self.fuzzer_config['sync_dir']
        remote_path = self.server_config['remote_path']

        options = list(_rsync_default_options)
        excludes = self.__excludes

        # exclude our previously pushed fuzzer states from being pulled again
        # and avoid to overwrite our local fuzz data
        for fuzzer in fuzzers:
            excludes += ['{}.sync'.format(fuzzer), fuzzer]

        # restrict to certain session, if requested
        if self.fuzzer_config['session'] is not None:
            # make sure defaults are excluded from explicitly included locations
            for exclude_rule in self.__excludes:
                options += ['--exclude=\"/{}*/{}\"'.format(self.fuzzer_config['session'], exclude_rule)]
            # recursively include everything that does match the session name
            options += ['--include=\"/{}*/\"'.format(self.fuzzer_config['session'])]
            options += ['--include=\"/{}*/*\"'.format(self.fuzzer_config['session'])]
            # exclude everything else
            excludes += ['*']

        print_ok('Pulling {}/* <- {}/'.format(local_path, remote_path))
        self.rsync_get(remote_path, local_path, rsync_options=options, rsync_excludes=excludes)
Пример #7
0
def invoke_cmin(input_dir,
                output_dir,
                target_cmd,
                mem_limit=None,
                timeout=None,
                qemu=False):
    success = True
    cmin_cmd = "afl-cmin "

    if mem_limit is not None:
        cmin_cmd += "-m %s " % convert_mem_limit(mem_limit)

    if timeout is not None:
        cmin_cmd += "-t %d " % int(timeout)

    if qemu:
        cmin_cmd += "-Q "

    cmd = "-I %s-i %s -o %s -- %s" % (cmin_cmd, input_dir, output_dir,
                                      target_cmd)
    print_ok("Executing: %s" % cmd)
    try:
        subprocess.check_call(cmd, shell=True)
    except subprocess.CalledProcessError as e:
        print_warn("afl-cmin failed with exit code %d!" % e.returncode)
        success = False
    return success
Пример #8
0
def afl_reseed(sync_dir, coll_dir):
    fuzzer_queues = afl_collect.get_fuzzer_instances(sync_dir, crash_dirs=False)

    for fuzzer in fuzzer_queues:
        # move original fuzzer queues out of the way
        date_time = time.strftime("%Y-%m-%d-%H:%M:%S")
        queue_dir = os.path.join(sync_dir, fuzzer[0], "queue")
        queue_bak = "%s.%s" % (queue_dir, date_time)
        os.makedirs(queue_bak, exist_ok=True)

        queue_ls = os.listdir(queue_dir)

        for item in queue_ls:
            abs_item = os.path.join(queue_dir, item)
            if os.path.isfile(abs_item):
                shutil.move(abs_item, queue_bak)

        # copy newly generated corpus into queues
        print_ok("Reseeding %s into queue %s" % (os.path.basename(coll_dir), queue_dir))
        coll_ls = os.listdir(coll_dir)

        for item in coll_ls:
            abs_item = os.path.join(coll_dir, item)
            if os.path.isfile(abs_item):
                shutil.copy2(abs_item, queue_dir)

    return fuzzer_queues
Пример #9
0
def generate_gdb_exploitable_script(script_filename,
                                    sample_index,
                                    target_cmd,
                                    script_id=0,
                                    intermediate=False):
    target_cmd = target_cmd.split()
    gdb_target_binary = target_cmd[0]
    gdb_run_cmd = " ".join(target_cmd[1:])

    if not intermediate:
        script_filename = os.path.abspath(os.path.expanduser(script_filename))
        print_ok(
            "Generating final gdb+exploitable script '%s' for %d samples..." %
            (script_filename, len(sample_index.outputs())))
    else:
        script_filename = os.path.abspath(
            os.path.expanduser("%s.%d" % (script_filename, script_id)))
        print_ok(
            "Generating intermediate gdb+exploitable script '%s' for %d samples..."
            % (script_filename, len(sample_index.outputs())))

    try:
        fd = open(script_filename, "w")

        # <script header>
        # source exploitable.py if necessary
        if gdb_exploitable_path:
            fd.writelines("source %s\n" % gdb_exploitable_path)

        # load executable
        fd.writelines("file %s\n" % gdb_target_binary)
        # </script_header>

        # fill script with content
        for f in sample_index.index:
            fd.writelines("echo Crash\ sample:\ '%s'\\n\n" % f['output'])

            if not stdin_mode(target_cmd):
                run_cmd = "run " + gdb_run_cmd + "\n"
            else:
                run_cmd = "run " + gdb_run_cmd + "< @@" + "\n"

            if intermediate:
                run_cmd = run_cmd.replace("@@", f['input'])
            else:
                run_cmd = run_cmd.replace(
                    "@@", os.path.join(sample_index.output_dir, f['output']))

            fd.writelines(run_cmd)
            fd.writelines("exploitable\n")

        # <script_footer>
        fd.writelines("quit")
        # </script_footer>

        fd.close()
    except (FileExistsError, PermissionError):
        print_err("Could not open script file '%s' for writing!" %
                  script_filename)
Пример #10
0
 def run_job(self, job):
     job_module = self.get_module(job['module'])
     job_func = self.get_member(job_module, job['function'])
     job_args = [job['module'].rsplit('.', 1)[1]] + job['params'].split()
     if not self.quiet:
         print_ok('Executing \'{}\' ({}.{})'.format(job['name'],
                                                    job['module'],
                                                    job['function']))
     job_func(job_args)
Пример #11
0
def fetch_stats(config_settings, twitter_inst):
    doExit = False
    # { 'fuzzer_dir': (stat, old_stat) }
    stat_dict = dict()
    while not doExit:
        try:
            for fuzzer in config_settings['fuzz_dirs']:
                stats = load_stats(fuzzer)

                if not stats:
                    continue

                sum_stats = summarize_stats(stats)

                try:
                    # stat_dict has already been initialized for fuzzer
                    #  old_stat <- last_stat
                    old_stats = stat_dict[fuzzer][0].copy()
                except KeyError:
                    # stat_dict has not yet been initialized for fuzzer
                    #  old_stat <- cur_stat
                    old_stats = sum_stats.copy()

                # initialize/update stat_dict
                stat_dict[fuzzer] = (sum_stats, old_stats)

                stat_change = diff_stats(sum_stats, old_stats)

                if not diff_stats:
                    continue

                print(prettify_stat(sum_stats, stat_change, True))

                tweet = prettify_stat(sum_stats, stat_change, False)

                l = len(tweet)
                c = clr.LRD if l > 140 else clr.LGN
                print_ok("Tweeting status (%s%d" % (c, l) + clr.RST +
                         " chars)...")

                try:
                    twitter_inst.statuses.update(status=shorten_tweet(tweet))
                except (twitter.TwitterHTTPError, URLError):
                    print_warn(
                        "Problem connecting to Twitter! Tweet not sent!")
                except Exception as e:
                    print_err("Sending tweet failed (Reason: " + clr.GRA +
                              "%s" % e.__cause__ + clr.RST + ")")

            if float(config_settings['interval']) < 0:
                doExit = True
            else:
                time.sleep(float(config_settings['interval']) * 60)
        except KeyboardInterrupt:
            print("\b\b")
            print_ok("Aborted by user. Good bye!")
            doExit = True
Пример #12
0
def invoke_tmin(input_files,
                output_dir,
                target_cmd,
                num_threads=1,
                mem_limit=None,
                timeout=None):
    if not os.path.exists(output_dir):
        os.makedirs(output_dir, exist_ok=True)

    in_queue_lock = threading.Lock()
    out_queue_lock = threading.Lock()
    in_queue = queue.Queue(len(input_files))
    out_queue = queue.Queue(len(input_files))

    # fill input queue with input files
    in_queue_lock.acquire()
    for f in input_files:
        in_queue.put(f)
    in_queue_lock.release()

    thread_list = []

    tmin_cmd = "afl-tmin "

    if mem_limit is not None:
        tmin_cmd += "-m %d " % int(mem_limit)

    if timeout is not None:
        tmin_cmd += "-t %d " % int(timeout)

    for i in range(0, num_threads, 1):
        t = AflThread.AflTminThread(i, tmin_cmd, target_cmd, output_dir,
                                    in_queue, out_queue, in_queue_lock,
                                    out_queue_lock)
        thread_list.append(t)
        print_ok("Starting afl-tmin worker %d." % i)
        t.daemon = True
        t.start()

    print_ok(
        "Be patient, afl-tmin is running. This can take hours, if not days...")

    for t in thread_list:
        t.join()

    files_processed = []

    # read processed files from output queue
    out_queue_lock.acquire()
    while not out_queue.empty():
        files_processed.append(out_queue.get())
    out_queue_lock.release()

    return len(files_processed)
Пример #13
0
def write_pgid_file(conf_settings):
    if "interactive" in conf_settings and not conf_settings["interactive"]:
        # write/append PGID to file /tmp/afl-multicore.PGID.<SESSION>
        f = open("/tmp/afl_multicore.PGID.%s" % conf_settings["session"], "a")
        if f.writable():
            f.write("%d\n" % os.getpgid(0))
        f.close()
        print_ok("For progress info check: %s/%sxxx/fuzzer_stats!" %
                 (conf_settings["output"], conf_settings["session"]))
    else:
        print_ok("Check the newly created screen windows!")
Пример #14
0
def fetch_stats(config_settings, twitter_inst):
    doExit = False
    # { 'fuzzer_dir': (stat, old_stat) }
    stat_dict = dict()
    while not doExit:
        try:
            for fuzzer in config_settings["fuzz_dirs"]:
                stats = load_stats(fuzzer)

                if not stats:
                    continue

                sum_stats = summarize_stats(stats)

                try:
                    # stat_dict has already been initialized for fuzzer
                    #  old_stat <- last_stat
                    old_stats = stat_dict[fuzzer][0].copy()
                except KeyError:
                    # stat_dict has not yet been initialized for fuzzer
                    #  old_stat <- cur_stat
                    old_stats = sum_stats.copy()

                # initialize/update stat_dict
                stat_dict[fuzzer] = (sum_stats, old_stats)

                stat_change = diff_stats(sum_stats, old_stats)

                if not diff_stats:
                    continue

                print(prettify_stat(sum_stats, stat_change, True))

                tweet = prettify_stat(sum_stats, stat_change, False)

                l = len(tweet)
                c = clr.LRD if l > 140 else clr.LGN
                print_ok("Tweeting status (%s%d" % (c, l) + clr.RST + " chars)...")

                try:
                    twitter_inst.statuses.update(status=shorten_tweet(tweet))
                except (twitter.TwitterHTTPError, URLError):
                    print_warn("Problem connecting to Twitter! Tweet not sent!")
                except Exception as e:
                    print_err("Sending tweet failed (Reason: " + clr.GRA + "%s" % e.__cause__ + clr.RST + ")")

            if float(config_settings["interval"]) < 0:
                doExit = True
            else:
                time.sleep(float(config_settings["interval"]) * 60)
        except KeyboardInterrupt:
            print("\b\b")
            print_ok("Aborted by user. Good bye!")
            doExit = True
Пример #15
0
def generate_gdb_exploitable_script(script_filename, sample_index, target_cmd, script_id=0, intermediate=False):
    target_cmd = target_cmd.split()
    gdb_target_binary = target_cmd[0]
    gdb_run_cmd = " ".join(target_cmd[1:])

    if not intermediate:
        script_filename = os.path.abspath(os.path.expanduser(script_filename))
        print_ok(
            "Generating final gdb+exploitable script '%s' for %d samples..."
            % (script_filename, len(sample_index.outputs()))
        )
    else:
        script_filename = os.path.abspath(os.path.expanduser("%s.%d" % (script_filename, script_id)))
        print_ok(
            "Generating intermediate gdb+exploitable script '%s' for %d samples..."
            % (script_filename, len(sample_index.outputs()))
        )

    try:
        fd = open(script_filename, "w")

        # <script header>
        # source exploitable.py if necessary
        if gdb_exploitable_path:
            fd.writelines("source %s\n" % gdb_exploitable_path)

        # load executable
        fd.writelines("file %s\n" % gdb_target_binary)
        # </script_header>

        # fill script with content
        for f in sample_index.index:
            fd.writelines("echo Crash\ sample:\ '%s'\\n\n" % f["output"])

            if not stdin_mode(target_cmd):
                run_cmd = "run " + gdb_run_cmd + "\n"
            else:
                run_cmd = "run " + gdb_run_cmd + "< @@" + "\n"

            if intermediate:
                run_cmd = run_cmd.replace("@@", f["input"])
            else:
                run_cmd = run_cmd.replace("@@", os.path.join(sample_index.output_dir, f["output"]))

            fd.writelines(run_cmd)
            fd.writelines("exploitable\n")

        # <script_footer>
        fd.writelines("quit")
        # </script_footer>

        fd.close()
    except (FileExistsError, PermissionError):
        print_err("Could not open script file '%s' for writing!" % script_filename)
Пример #16
0
def write_pgid_file(conf_settings):
    print("")
    if not conf_settings["interactive"]:
        # write/append PGID to file /tmp/afl-multicore.PGID.<SESSION>
        f = open("/tmp/afl_multicore.PGID.%s" % conf_settings["session"], "a")
        if f.writable():
            f.write("%d\n" % os.getpgid(0))
        f.close()
        print_ok("For progress info check: %s/%sxxx/fuzzer_stats!" % (conf_settings["output"],
                                                                      conf_settings["session"]))
    else:
        print_ok("Check the newly created screen windows!")
Пример #17
0
    def run(self):
        doExit = False
        while not doExit:
            try:
                for job in self.config['jobs']:
                    self.run_job(job)

                if float(self.config['interval']) < 0:
                    doExit = True
                else:
                    time.sleep(float(self.config['interval']) * 60)
            except KeyboardInterrupt:
                print('\b\b')
                print_ok('Aborted by user. Good bye!')
                doExit = True
Пример #18
0
    def run(self):
        doExit = False
        while not doExit:
            try:
                for job in self.config['jobs']:
                    self.run_job(job)

                if float(self.config['interval']) < 0:
                    doExit = True
                else:
                    time.sleep(float(self.config['interval']) * 60)
            except KeyboardInterrupt:
                print('\b\b')
                print_ok('Aborted by user. Good bye!')
                doExit = True
Пример #19
0
def kill_session(session):
    if os.path.isfile("/tmp/afl_multicore.PGID.%s" % session):
        f = open("/tmp/afl_multicore.PGID.%s" % session)
        pgids = f.readlines()

        for pgid in pgids:
            try:
                print_ok("Killing jobs with PGID %s" % pgid.strip("\r\n"))
                os.killpg(int(pgid), signal.SIGTERM)
            except ProcessLookupError:
                print_warn("No processes with PGID %s found!" % (pgid.strip("\r\n")))

        f.close()
        os.remove("/tmp/afl_multicore.PGID.%s" % session)
    else:
        print_err("PGID file '/tmp/afl_multicore.PGID.%s' not found! Aborting!" % session)
Пример #20
0
def invoke_tmin(input_files, output_dir, target_cmd, num_threads=1, mem_limit=None, timeout=None):
    if not os.path.exists(output_dir):
        os.makedirs(output_dir, exist_ok=True)

    in_queue_lock = threading.Lock()
    out_queue_lock = threading.Lock()
    in_queue = queue.Queue(len(input_files))
    out_queue = queue.Queue(len(input_files))

    # fill input queue with input files
    in_queue_lock.acquire()
    for f in input_files:
        in_queue.put(f)
    in_queue_lock.release()

    thread_list = []

    tmin_cmd = "afl-tmin "

    if mem_limit is not None:
        tmin_cmd += "-m %d " % int(mem_limit)

    if timeout is not None:
        tmin_cmd += "-t %d " % int(timeout)

    for i in range(0, num_threads, 1):
        t = AflThread.AflTminThread(i, tmin_cmd, target_cmd, output_dir, in_queue, out_queue, in_queue_lock, out_queue_lock)
        thread_list.append(t)
        print_ok("Starting afl-tmin worker %d." % i)
        t.daemon = True
        t.start()

    print_ok("Be patient, afl-tmin is running. This can take hours, if not days...")

    for t in thread_list:
        t.join()

    files_processed = []

    # read processed files from output queue
    out_queue_lock.acquire()
    while not out_queue.empty():
        files_processed.append(out_queue.get())
    out_queue_lock.release()

    return len(files_processed)
Пример #21
0
    def run(self):
        doExit = False
        while not doExit:
            try:
                time_start = datetime.datetime.now()
                for job in self.config['jobs']:
                    self.run_job(job)

                print_ok('All jobs done [{}]'.format(datetime.datetime.now() -
                                                     time_start))

                if float(self.config['interval']) < 0:
                    doExit = True
                else:
                    time.sleep(float(self.config['interval']) * 60)
            except KeyboardInterrupt:
                print('\b\b')
                print_ok('Aborted by user. Good bye!')
                doExit = True
Пример #22
0
    def push(self):
        fuzzers = self.__get_fuzzers()

        # restrict to certain session, if requested
        if self.fuzzer_config['session'] is not None:
            fuzzers = (fuzzer for fuzzer in fuzzers if fuzzer.startswith(self.fuzzer_config['session']))

        excludes = []

        if self.fuzzer_config['exclude_crashes']:
            excludes += ['crashes*/']

        if self.fuzzer_config['exclude_hangs']:
            excludes += ['hangs*/']

        for f in fuzzers:
            local_path = os.path.join(self.fuzzer_config['sync_dir'], f)
            remote_path = os.path.join(self.server_config['remote_path'], f)
            print_ok('Pushing {} -> {}.sync'.format(local_path, remote_path))
            self.rsync_put(local_path, remote_path, rsync_excludes=excludes)
Пример #23
0
def kill_session(session):
    if os.path.isfile("/tmp/afl_multicore.PGID.%s" % session):
        f = open("/tmp/afl_multicore.PGID.%s" % session)
        pgids = f.readlines()

        for pgid in pgids:
            try:
                print_ok("Killing jobs with PGID %s" % pgid.strip('\r\n'))
                os.killpg(int(pgid), signal.SIGTERM)
            except ProcessLookupError:
                print_warn("No processes with PGID %s found!" %
                           (pgid.strip('\r\n')))

        f.close()
        os.remove("/tmp/afl_multicore.PGID.%s" % session)
    else:
        print_err(
            "PGID file '/tmp/afl_multicore.PGID.%s' not found! Aborting!" %
            session)
        sys.exit(1)
Пример #24
0
    def push(self):
        fuzzers = self.__get_fuzzers()

        # restrict to certain session, if requested
        if self.fuzzer_config['session'] is not None:
            fuzzers = (fuzzer for fuzzer in fuzzers if fuzzer.startswith(self.fuzzer_config['session']))

        excludes = self.__excludes

        if self.fuzzer_config['exclude_crashes']:
            excludes += ['crashes*/']

        if self.fuzzer_config['exclude_hangs']:
            excludes += ['hangs*/']

        for f in fuzzers:
            local_path = os.path.join(self.fuzzer_config['sync_dir'], f)
            remote_path = os.path.join(self.server_config['remote_path'], f)
            print_ok('Pushing {} -> {}.sync'.format(local_path, remote_path))
            self.rsync_put(local_path, remote_path, rsync_excludes=excludes)
Пример #25
0
def fetch_stats(config_settings, twitter_inst):
    stat_dict = dict()
    for fuzzer in config_settings['fuzz_dirs']:
        stats = load_stats(fuzzer)

        if not stats:
            continue

        sum_stats = summarize_stats(stats)

        try:
            with open('.afl_stats.{}'.format(os.path.basename(fuzzer)),
                      'r') as f:
                old_stats = json.load(f)
        except FileNotFoundError:
            old_stats = sum_stats.copy()

        # initialize/update stat_dict
        stat_dict[fuzzer] = (sum_stats, old_stats)

        stat_change = diff_stats(sum_stats, old_stats)

        with open('.afl_stats.{}'.format(os.path.basename(fuzzer)), 'w') as f:
            json.dump(sum_stats, f)

        print(prettify_stat(sum_stats, stat_change, True))

        tweet = prettify_stat(sum_stats, stat_change, False)

        l = len(tweet)
        c = clr.LRD if l > 140 else clr.LGN

        if twitter_inst:
            print_ok("Tweeting status (%s%d" % (c, l) + clr.RST + " chars)...")
            try:
                twitter_inst.statuses.update(status=shorten_tweet(tweet))
            except (twitter.TwitterHTTPError, URLError):
                print_warn("Problem connecting to Twitter! Tweet not sent!")
            except Exception as e:
                print_err("Sending tweet failed (Reason: " + clr.GRA +
                          "%s" % e.__cause__ + clr.RST + ")")
Пример #26
0
    def pull(self):
        fuzzers = self.__get_fuzzers()

        local_path = self.fuzzer_config['sync_dir']
        remote_path = self.server_config['remote_path']

        options = list(_rsync_default_options)
        excludes = []

        # exclude our previously pushed fuzzer states from being pulled again
        # and avoid to overwrite our local fuzz data
        for fuzzer in fuzzers:
            excludes += ['{}.sync'.format(fuzzer), fuzzer]

        # restrict to certain session, if requested
        if self.fuzzer_config['session'] is not None:
            options += ['--include=\"/{}*/\"'.format(self.fuzzer_config['session'])]
            excludes += ['*']

        print_ok('Pulling {}/* <- {}/'.format(local_path, remote_path))
        self.rsync_get(remote_path, local_path, rsync_options=options, rsync_excludes=excludes)
Пример #27
0
def fetch_stats(config_settings, twitter_inst):
    stat_dict = dict()
    for fuzzer in config_settings['fuzz_dirs']:
        stats = load_stats(fuzzer)

        if not stats:
            continue

        sum_stats = summarize_stats(stats)

        try:
            with open('.afl_stats.{}'.format(os.path.basename(fuzzer)), 'r') as f:
                old_stats = json.load(f)
        except FileNotFoundError:
            old_stats = sum_stats.copy()

        # initialize/update stat_dict
        stat_dict[fuzzer] = (sum_stats, old_stats)

        stat_change = diff_stats(sum_stats, old_stats)

        with open('.afl_stats.{}'.format(os.path.basename(fuzzer)), 'w') as f:
            json.dump(sum_stats, f)

        print(prettify_stat(sum_stats, stat_change, True))

        tweet = prettify_stat(sum_stats, stat_change, False)

        l = len(tweet)
        c = clr.LRD if l > 140 else clr.LGN
        print_ok("Tweeting status (%s%d" % (c, l) + clr.RST + " chars)...")

        try:
            twitter_inst.statuses.update(status=shorten_tweet(tweet))
        except (twitter.TwitterHTTPError, URLError):
            print_warn("Problem connecting to Twitter! Tweet not sent!")
        except Exception as e:
            print_err("Sending tweet failed (Reason: " + clr.GRA + "%s" % e.__cause__ + clr.RST + ")")
Пример #28
0
def main(argv):
    show_info()

    parser = argparse.ArgumentParser(
        description=
        "afl-multicore starts several parallel fuzzing jobs, that are run \
in the background. For fuzzer stats see 'out_dir/SESSION###/fuzzer_stats'!",
        usage="afl-multicore [-c config] [-h] [-t] [-v] <cmd> <jobs>")

    parser.add_argument(
        "-c",
        "--config",
        dest="config_file",
        help="afl-multicore config file (Default: afl-multicore.conf)!",
        default="afl-multicore.conf")
    parser.add_argument("-t",
                        "--test",
                        dest="test_run",
                        action="store_const",
                        const=True,
                        default=False,
                        help="Perform \
a test run by starting a single afl instance in interactive mode using a test output directory."
                        )
    parser.add_argument(
        "-v",
        "--verbose",
        dest="verbose",
        action="store_const",
        const=True,
        default=False,
        help=
        "For debugging purposes do not redirect stderr/stdout of the created \
subprocesses to /dev/null (Default: off). Check 'nohup.out' for further outputs."
    )
    parser.add_argument(
        "cmd", help="afl-multicore command to execute: start, resume, add.")
    parser.add_argument("jobs",
                        help="Number of instances to start/resume/add.")

    args = parser.parse_args(argv[1:])

    conf_settings, environment = read_config(
        os.path.abspath(os.path.expanduser(args.config_file)))

    if args.test_run:
        signal.signal(signal.SIGINT, sigint_handler)
        conf_settings["output"] += "_test"
        conf_settings["interactive"] = False
        args.jobs = 1
        args.cmd = "start"

    if args.cmd != "resume":
        conf_settings["input"] = os.path.abspath(
            os.path.expanduser(conf_settings["input"]))
        if not os.path.exists(conf_settings["input"]):
            print_err("No valid directory provided for <INPUT_DIR>!")
            sys.exit(1)
    else:
        conf_settings["input"] = "-"

    conf_settings["output"] = os.path.abspath(
        os.path.expanduser(conf_settings["output"]))

    slave_off, slave_start = get_slave_count(args.cmd, conf_settings)

    if conf_settings["interactive"]:
        if not check_screen():
            print_err(
                "When using screen mode, please run afl-multicore from inside a screen session!"
            )
            sys.exit(1)

        setup_screen(int(args.jobs), environment)

    target_cmd = build_target_cmd(conf_settings)
    master_cmd = build_master_cmd(conf_settings, target_cmd)

    if args.test_run:
        with subprocess.Popen(master_cmd.split()) as test_proc:
            print_ok("Test instance started (PID: %d)" % test_proc.pid)

    if not conf_settings["slave_only"]:
        print_ok("Starting master instance...")

        if not conf_settings["interactive"]:
            if not args.verbose:
                master = subprocess.Popen(" ".join(['nohup',
                                                    master_cmd]).split(),
                                          stdout=subprocess.DEVNULL,
                                          stderr=subprocess.DEVNULL)
            else:
                master = subprocess.Popen(" ".join(['nohup',
                                                    master_cmd]).split())
            print(" Master 000 started (PID: %d)" % master.pid)
        else:
            subprocess.Popen("screen -X select 1".split())
            screen_cmd = [
                "screen", "-X", "eval",
                "exec %s" % master_cmd, "next"
            ]
            subprocess.Popen(screen_cmd)
            print(" Master 000 started inside new screen window")

    print_ok("Starting slave instances...")
    for i in range(slave_start, int(args.jobs) + slave_start - slave_off, 1):
        slave_cmd = build_slave_cmd(conf_settings, i, target_cmd)

        if not conf_settings["interactive"]:
            if not args.verbose:
                slave = subprocess.Popen(" ".join(['nohup',
                                                   slave_cmd]).split(),
                                         stdout=subprocess.DEVNULL,
                                         stderr=subprocess.DEVNULL)
            else:
                slave = subprocess.Popen(" ".join(['nohup',
                                                   slave_cmd]).split())
            print(" Slave %03d started (PID: %d)" % (i, slave.pid))
        else:
            subprocess.Popen(["screen", "-X", "select", "%d" % (i + 1)])
            screen_cmd = [
                "screen", "-X", "eval",
                "exec %s" % slave_cmd, "next"
            ]
            subprocess.Popen(screen_cmd)
            print(" Slave %03d started inside new screen window" % i)

    write_pgid_file(conf_settings)
Пример #29
0
def execute_gdb_script(out_dir, script_filename, num_samples, num_threads):
    classification_data = []

    out_dir = os.path.expanduser(out_dir) + "/"

    grep_for = [
        "Crash sample: '",
        "Exploitability Classification: ",
        "Short description: ",
        "Hash: ",
    ]

    queue_list = []

    thread_list = []

    for n in range(0, num_threads, 1):
        script_args = [
            str(gdb_binary),
            "-x",
            str(os.path.join(out_dir, "%s.%d" % (script_filename, n))),
        ]

        out_queue = queue.Queue()
        out_queue_lock = threading.Lock()
        queue_list.append((out_queue, out_queue_lock))

        t = AflThread.GdbThread(n, script_args, out_dir, grep_for, out_queue,
                                out_queue_lock)
        thread_list.append(t)
        print_ok("Executing gdb+exploitable script '%s.%d'..." %
                 (script_filename, n))
        t.daemon = True
        t.start()

    for t in thread_list:
        t.join()

    grepped_output = []

    for q in queue_list:
        q[1].acquire()
        while not q[0].empty():
            grepped_output.append(q[0].get())
        q[1].release()

    i = 1
    print("*** GDB+EXPLOITABLE SCRIPT OUTPUT ***")
    for g in range(0, len(grepped_output) - len(grep_for) + 1, len(grep_for)):
        if grepped_output[g + 3] == "EXPLOITABLE":
            cex = clr.RED
            ccl = clr.BRI
        elif grepped_output[g + 3] == "PROBABLY_EXPLOITABLE":
            cex = clr.YEL
            ccl = clr.BRI
        elif grepped_output[g + 3] == "PROBABLY_NOT_EXPLOITABLE":
            cex = clr.BRN
            ccl = clr.RST
        elif grepped_output[g + 3] == "NOT_EXPLOITABLE":
            cex = clr.GRN
            ccl = clr.GRA
        elif grepped_output[g + 3] == "UNKNOWN":
            cex = clr.BLU
            ccl = clr.GRA
        else:
            cex = clr.GRA
            ccl = clr.GRA

        if len(grepped_output[g]) < 24:
            # Assume simplified sample file names,
            # so save some output space.
            ljust_width = 24
        else:
            ljust_width = 64
        print("%s[%05d]%s %s: %s%s%s %s[%s]%s" %
              (clr.GRA, i, clr.RST, grepped_output[g].ljust(
                  ljust_width, '.'), cex, grepped_output[g + 3], clr.RST, ccl,
               grepped_output[g + 1], clr.RST))
        classification_data.append({
            'Sample':
            grepped_output[g],
            'Classification':
            grepped_output[g + 3],
            'Classification_Description':
            grepped_output[g + 1],
            'Hash':
            grepped_output[g + 2],
            'User_Comment':
            ''
        })
        i += 1

    if i > 1 and i < num_samples:
        print("%s[%05d]%s %s: %sINVALID SAMPLE (Aborting!)%s" %
              (clr.GRA, i, clr.RST, grepped_output[-1].ljust(
                  ljust_width, '.'), clr.LRD, clr.RST))
        print(clr.LRD + "Returned data may be incomplete!" + clr.RST)
    print("*** ***************************** ***")

    # remove intermediate gdb scripts...
    for n in range(0, num_threads, 1):
        os.remove(os.path.join(out_dir, "%s.%d" % (script_filename, n)))

    return classification_data
Пример #30
0
def main(argv):
    show_info()

    parser = argparse.ArgumentParser(
        description=
        "afl-multicore starts several parallel fuzzing jobs, that are run \
in the background. For fuzzer stats see 'out_dir/SESSION###/fuzzer_stats'!",
        usage=
        "afl-multicore [-c config] [-h] [-s secs] [-t] [-v] <cmd> <jobs[,offset]>"
    )

    parser.add_argument(
        "-c",
        "--config",
        dest="config_file",
        help="afl-multicore config file (Default: afl-multicore.conf)!",
        default="afl-multicore.conf")
    parser.add_argument("-s",
                        "--startup-delay",
                        dest="startup_delay",
                        default=None,
                        help="Wait a configurable  amount \
of time after starting/resuming each afl instance to avoid interference during fuzzer startup. Provide wait time in \
seconds.")
    parser.add_argument("-t",
                        "--test",
                        dest="test_run",
                        action="store_const",
                        const=True,
                        default=False,
                        help="Perform \
a test run by starting a single afl instance in interactive mode using a test output directory."
                        )
    parser.add_argument(
        "-v",
        "--verbose",
        dest="verbose",
        action="store_const",
        const=True,
        default=False,
        help=
        "For debugging purposes do not redirect stderr/stdout of the created \
subprocesses to /dev/null (Default: off). Check 'nohup.out' for further outputs."
    )
    parser.add_argument(
        "cmd", help="afl-multicore command to execute: start, resume, add.")
    parser.add_argument(
        "jobs",
        help=
        "Number of instances to start/resume/add. For resumes you may specify an optional \
job offset that allows to resume specific (ranges of) afl-instances.")

    args = parser.parse_args(argv[1:])

    conf_settings = read_config(
        os.path.abspath(os.path.expanduser(args.config_file)))

    if args.test_run:
        signal.signal(signal.SIGINT, sigint_handler)
        conf_settings["output"] += "_test"
        conf_settings["interactive"] = False
        args.jobs = 1
        args.cmd = "start"

    jobs_count, jobs_offset = get_job_counts(args.jobs)

    if args.cmd != "resume":
        conf_settings["input"] = os.path.abspath(
            os.path.expanduser(conf_settings["input"]))
        jobs_offset = 0
        if not os.path.exists(conf_settings["input"]):
            print_err("No valid directory provided for <INPUT_DIR>!")
            sys.exit(1)
    else:
        conf_settings["input"] = "-"

    conf_settings["output"] = os.path.abspath(
        os.path.expanduser(conf_settings["output"]))

    instances_started = get_started_instance_count(args.cmd, conf_settings)
    master_count = get_master_count(conf_settings)

    if "interactive" in conf_settings and conf_settings["interactive"]:
        if not check_screen():
            print_err(
                "When using screen mode, please run afl-multicore from inside a screen session!"
            )
            sys.exit(1)

        if "environment" in conf_settings:
            setup_screen(jobs_count, conf_settings["environment"])
        else:
            setup_screen(jobs_count, [])

    target_cmd = build_target_cmd(conf_settings)

    if args.test_run:
        cmd = build_master_cmd(conf_settings, 0, target_cmd)
        with subprocess.Popen(cmd.split()) as test_proc:
            print_ok("Test instance started (PID: %d)" % test_proc.pid)

    print_ok("Starting fuzzer instance(s)...")
    jobs_offset += instances_started
    jobs_count += jobs_offset
    for i in range(jobs_offset, jobs_count, 1):
        is_master = has_master(conf_settings, i)

        if is_master:
            cmd = build_master_cmd(conf_settings, i, target_cmd)
        else:
            cmd = build_slave_cmd(conf_settings, i, target_cmd)

        if "interactive" in conf_settings and conf_settings["interactive"]:
            subprocess.Popen(["screen", "-X", "select", "%d" % (i + 1)])
            screen_cmd = ["screen", "-X", "eval", "exec %s" % cmd, "next"]
            subprocess.Popen(screen_cmd)
            if is_master:
                if master_count == 1:
                    print(" Master %03d started inside new screen window" % i)
                else:
                    print(
                        " Master %03d/%03d started inside new screen window" %
                        (i, master_count - 1))
            else:
                print(" Slave %03d started inside new screen window" % i)
        else:
            if not args.verbose:
                fuzzer_inst = subprocess.Popen(" ".join(['nohup',
                                                         cmd]).split(),
                                               stdout=subprocess.DEVNULL,
                                               stderr=subprocess.DEVNULL)
            else:
                fuzzer_inst = subprocess.Popen(" ".join(['nohup',
                                                         cmd]).split())
            if is_master:
                if master_count == 1:
                    print(" Master %03d started inside new screen window" % i)
                else:
                    print(" Master %03d/%03d started (PID: %d)" %
                          (i, master_count - 1, fuzzer_inst.pid))
            else:
                print(" Slave %03d started (PID: %d)" % (i, fuzzer_inst.pid))

        if i < (jobs_count - 1):
            startup_delay(conf_settings, i, args.cmd, args.startup_delay)

    write_pgid_file(conf_settings)
Пример #31
0
def main(argv):
    show_info()

    parser = argparse.ArgumentParser(description="Post selected contents of fuzzer_stats to Twitter.",
                                     usage="afl-stats [-h] [-c config]\n")

    parser.add_argument("-c", "--config", dest="config_file",
                        help="afl-stats config file (Default: afl-stats.conf)!", default="afl-stats.conf")

    args = parser.parse_args(argv[1:])

    config_settings = read_config(args.config_file)

    twitter_inst = twitter_init()

    doExit = False

    # { 'fuzzer_dir': (stat, old_stat) }
    stat_dict = dict()

    while not doExit:
        try:
            for fuzzer in config_settings['fuzz_dirs']:
                stats = load_stats(fuzzer)

                if not stats:
                    continue

                sum_stats = summarize_stats(stats)

                try:
                    # stat_dict has already been initialized for fuzzer
                    #  old_stat <- last_stat
                    old_stats = stat_dict[fuzzer][0].copy()
                except KeyError:
                    # stat_dict has not yet been initialized for fuzzer
                    #  old_stat <- cur_stat
                    old_stats = sum_stats.copy()

                # initialize/update stat_dict
                stat_dict[fuzzer] = (sum_stats, old_stats)

                stat_change = diff_stats(sum_stats, old_stats)

                if not diff_stats:
                    continue

                print(prettify_stat(sum_stats, stat_change, True))

                tweet = prettify_stat(sum_stats, stat_change, False)

                l = len(tweet)
                c = clr.LRD if l>140 else clr.LGN
                print_ok("Tweeting status (%s%d" % (c, l) + clr.RST + " chars)...")

                try:
                    twitter_inst.statuses.update(status=shorten_tweet(tweet))
                except (twitter.TwitterHTTPError, URLError):
                    print_warn("Problem connecting to Twitter! Tweet not sent!")
                except Exception as e:
                    print_err("Sending tweet failed (Reason: " + clr.GRA + "%s" % e.__cause__ + clr.RST + ")")

            if float(config_settings['interval']) < 0:
                doExit = True
            else:
                time.sleep(float(config_settings['interval'])*60)
        except KeyboardInterrupt:
                print("\b\b")
                print_ok("Aborted by user. Good bye!")
                doExit = True
Пример #32
0
def sigint_handler(signal, frame):
    print()
    print_ok("Test run aborted by user!")
    sys.exit(0)
Пример #33
0
def main(argv):
    show_info()

    parser = argparse.ArgumentParser(description="afl-collect copies all crash sample files from an afl sync dir used \
by multiple fuzzers when fuzzing in parallel into a single location providing easy access for further crash analysis.",
                                     usage="afl-collect [-d DATABASE] [-e|-g GDB_EXPL_SCRIPT_FILE] [-f LIST_FILENAME]\n \
[-h] [-j THREADS] [-m] [-r] [-rr] sync_dir collection_dir -- target_cmd")

    parser.add_argument("sync_dir", help="afl synchronisation directory crash samples will be collected from.")
    parser.add_argument("collection_dir",
                        help="Output directory that will hold a copy of all crash samples and other generated files. \
Existing files in the collection directory will be overwritten!")
    parser.add_argument("-d", "--database", dest="database_file", help="Submit sample data into an sqlite3 database (\
only when used together with '-e'). afl-collect skips processing of samples already found in existing database.",
                        default=None)
    parser.add_argument("-e", "--execute-gdb-script", dest="gdb_expl_script_file",
                        help="Generate and execute a gdb+exploitable script after crash sample collection for crash \
classification. (Like option '-g', plus script execution.)",
                        default=None)
    parser.add_argument("-f", "--filelist", dest="list_filename", default=None,
                        help="Writes all collected crash sample filenames into a file in the collection directory.")
    parser.add_argument("-g", "--generate-gdb-script", dest="gdb_script_file",
                        help="Generate gdb script to run 'exploitable.py' on all collected crash samples. Generated \
script will be placed into collection directory.", default=None)
    parser.add_argument("-j", "--threads", dest="num_threads", default=1,
                        help="Enable parallel analysis by specifying the number of threads afl-collect will utilize.")
    parser.add_argument("-m", "--minimize-filenames", dest="min_filename", action="store_const", const=True,
                        default=False, help="Minimize crash sample file names by only keeping fuzzer name and ID.")
    parser.add_argument("-r", "--remove-invalid", dest="remove_invalid", action="store_const", const=True,
                        default=False, help="Verify collected crash samples and remove samples that do not lead to \
crashes (runs 'afl-vcrash.py -r' on collection directory). This step is done prior to any script file \
or file list generation/execution.")
    parser.add_argument("-rr", "--remove-unexploitable", dest="remove_unexploitable", action="store_const", const=True,
                        default=False, help="Remove crash samples that have an exploitable classification of \
'NOT_EXPLOITABLE' or 'PROBABLY_NOT_EXPLOITABLE'. Sample file removal will take place after gdb+exploitable \
script execution. Has no effect without '-e'.")
    parser.add_argument("target_cmd", nargs="+", help="Path to the target binary and its command line arguments. \
Use '@@' to specify crash sample input file position (see afl-fuzz usage).")

    args = parser.parse_args(argv[1:])

    sync_dir = os.path.abspath(os.path.expanduser(args.sync_dir))
    if not os.path.exists(sync_dir):
        print_err("No valid directory provided for <SYNC_DIR>!")
        return

    if args.collection_dir:
        out_dir = os.path.abspath(os.path.expanduser(args.collection_dir))
        if not os.path.exists(out_dir):
            os.makedirs(out_dir, exist_ok=True)
    else:
        print_err("No valid directory provided for <OUT_DIR>!")
        return

    args.target_cmd = " ".join(args.target_cmd).split()
    args.target_cmd[0] = os.path.abspath(os.path.expanduser(args.target_cmd[0]))
    if not os.path.exists(args.target_cmd[0]):
        print_err("Target binary not found!")
        return
    args.target_cmd = " ".join(args.target_cmd)

    if args.database_file:
        db_file = os.path.abspath(os.path.expanduser(args.database_file))
    else:
        db_file = None

    print_ok("Going to collect crash samples from '%s'." % sync_dir)

    # initialize database
    if db_file:
        lite_db = con_sqlite.sqliteConnector(db_file)
        lite_db.init_database()
    else:
        lite_db = None

    fuzzers = get_fuzzer_instances(sync_dir)
    print_ok("Found %d fuzzers, collecting crash samples." % len(fuzzers))

    sample_index = build_sample_index(sync_dir, out_dir, fuzzers, lite_db, args.min_filename)

    if len(sample_index.index) > 0:
        print_ok("Successfully indexed %d crash samples." % len(sample_index.index))
    elif db_file:
        print_warn("No unseen samples found. Check your database for results!")
        return
    else:
        print_warn("No samples found. Check directory settings!")
        return

    if args.remove_invalid:
        from afl_utils import afl_vcrash
        invalid_samples = afl_vcrash.verify_samples(int(args.num_threads), sample_index.inputs(), args.target_cmd)

        # store invalid samples in db
        print_ok("Saving invalid sample info to database.")
        if args.gdb_expl_script_file and db_file:
            for sample in invalid_samples:
                sample_name = sample_index.outputs(input_file=sample)
                dataset = {'sample': sample_name[0]['output'], 'classification': 'INVALID',
                           'description': 'Sample does not cause a crash in the target.', 'hash': ''}
                if not lite_db.dataset_exists(dataset):
                    lite_db.insert_dataset(dataset)

        # remove invalid samples from sample index
        sample_index.remove_inputs(invalid_samples)
        print_warn("Removed %d invalid crash samples from index." % len(invalid_samples))

    # generate gdb+exploitable script
    if args.gdb_expl_script_file:
        divided_index = sample_index.divide(int(args.num_threads))

        for i in range(0, int(args.num_threads), 1):
            generate_gdb_exploitable_script(os.path.join(out_dir, args.gdb_expl_script_file), divided_index[i],
                                            args.target_cmd, i, intermediate=True)

        # execute gdb+exploitable script
        classification_data = execute_gdb_script(out_dir, args.gdb_expl_script_file, len(sample_index.inputs()),
                                                 int(args.num_threads))

        # Submit crash classification data into database
        print_ok("Saving sample classification info to database.")
        if db_file:
            for dataset in classification_data:
                if not lite_db.dataset_exists(dataset):
                    lite_db.insert_dataset(dataset)

        # de-dupe by exploitable hash
        seen = set()
        seen_add = seen.add
        classification_data_dedupe = [x for x in classification_data
                                      if x['hash'] not in seen and not seen_add(x['hash'])]

        # remove dupe samples identified by exploitable hash
        uninteresting_samples = [x['sample'] for x in classification_data
                                 if x not in classification_data_dedupe]

        sample_index.remove_outputs(uninteresting_samples)

        print_warn("Removed %d duplicate samples from index. Will continue with %d remaining samples." %
                   (len(uninteresting_samples), len(sample_index.index)))

        # remove crash samples that are classified uninteresting
        if args.remove_unexploitable:
            classification_unexploitable = [
                'NOT_EXPLOITABLE',
                'PROBABLY_NOT_EXPLOITABLE',
            ]

            uninteresting_samples = []

            for c in classification_data_dedupe:
                if c['classification'] in classification_unexploitable:
                    uninteresting_samples.append(c['sample'])

            sample_index.remove_outputs(uninteresting_samples)
            print_warn("Removed %d uninteresting crash samples from index." % len(uninteresting_samples))

        # generate output gdb script
        generate_gdb_exploitable_script(os.path.join(out_dir, args.gdb_expl_script_file), sample_index,
                                        args.target_cmd, 0)
    elif args.gdb_script_file:
        generate_gdb_exploitable_script(os.path.join(out_dir, args.gdb_script_file), sample_index, args.target_cmd)

    print_ok("Copying %d samples into output directory..." % len(sample_index.index))
    files_collected = copy_samples(sample_index)

    # generate filelist of collected crash samples
    if args.list_filename:
        generate_sample_list(os.path.abspath(os.path.expanduser(args.list_filename)), files_collected)
        print_ok("Generated crash sample list '%s'." % os.path.abspath(os.path.expanduser(args.list_filename)))
Пример #34
0
def main(argv):
    show_info()

    parser = argparse.ArgumentParser(description="afl-multicore starts several parallel fuzzing jobs, that are run \
in the background. For fuzzer stats see 'out_dir/SESSION###/fuzzer_stats'!",
                                     usage="afl-multicore [-c config] [-h] [-t] [-v] <cmd> <jobs>")

    parser.add_argument("-c", "--config", dest="config_file",
                        help="afl-multicore config file (Default: afl-multicore.conf)!", default="afl-multicore.conf")
    parser.add_argument("-t", "--test", dest="test_run", action="store_const", const=True, default=False, help="Perform \
a test run by starting a single afl instance in interactive mode using a test output directory.")
    parser.add_argument("-v", "--verbose", dest="verbose", action="store_const", const=True,
                        default=False, help="For debugging purposes do not redirect stderr/stdout of the created \
subprocesses to /dev/null (Default: off). Check 'nohup.out' for further outputs.")
    parser.add_argument("cmd", help="afl-multicore command to execute: start, resume, add.")
    parser.add_argument("jobs", help="Number of instances to start/resume/add.")

    args = parser.parse_args(argv[1:])

    conf_settings, environment = read_config(os.path.abspath(os.path.expanduser(args.config_file)))

    if args.test_run:
        signal.signal(signal.SIGINT, sigint_handler)
        conf_settings["output"] += "_test"
        conf_settings["interactive"] = False
        args.jobs = 1
        args.cmd = "start"

    if args.cmd != "resume":
        conf_settings["input"] = os.path.abspath(os.path.expanduser(conf_settings["input"]))
        if not os.path.exists(conf_settings["input"]):
            print_err("No valid directory provided for <INPUT_DIR>!")
            return
    else:
        conf_settings["input"] = "-"

    conf_settings["output"] = os.path.abspath(os.path.expanduser(conf_settings["output"]))

    if args.cmd == "add":
        slave_start = 0
        slave_off = 0
        dirs = os.listdir(conf_settings["output"])
        for d in dirs:
            if os.path.isdir(os.path.abspath(os.path.join(conf_settings["output"], d))) \
                    and conf_settings["session"] in d:
                slave_start += 1
        conf_settings["slave_only"] = True
    else:
        slave_start = 1
        slave_off = 1

    target_cmd = [conf_settings["target"], conf_settings["cmdline"]]
    target_cmd = " ".join(target_cmd).split()
    target_cmd[0] = os.path.abspath(os.path.expanduser(target_cmd[0]))
    if not os.path.exists(target_cmd[0]):
        print_err("Target binary not found!")
        return
    target_cmd = " ".join(target_cmd)

    if conf_settings["interactive"]:
        if not check_screen():
            print_err("When using screen mode, please run afl-multicore from inside a screen session!")
            return

        setup_screen(int(args.jobs), environment)

    # compile command-line for master
    # $ afl-fuzz -i <input_dir> -o <output_dir> -M <session_name>.000 <afl_args> \
    #   </path/to/target.bin> <target_args>
    master_cmd = [afl_path] + afl_cmdline_from_config(conf_settings)
    master_cmd += ["-M", "%s000" % conf_settings["session"], "--", target_cmd]
    master_cmd = " ".join(master_cmd)

    if args.test_run:
        with subprocess.Popen(master_cmd.split()) as test_proc:
            print_ok("Test instance started (PID: %d)" % test_proc.pid)

    if not conf_settings["slave_only"]:
        print_ok("Starting master instance...")

        if not conf_settings["interactive"]:
            if not args.verbose:
                master = subprocess.Popen(" ".join(['nohup', master_cmd]).split(), stdout=subprocess.DEVNULL,
                                          stderr=subprocess.DEVNULL)
            else:
                master = subprocess.Popen(" ".join(['nohup', master_cmd]).split())
            print(" Master 000 started (PID: %d)" % master.pid)
        else:
            subprocess.Popen("screen -X select 1".split())
            screen_cmd = ["screen", "-X", "eval", "exec %s" % master_cmd, "next"]
            subprocess.Popen(screen_cmd)
            print(" Master 000 started inside new screen window")

    # compile command-line for slaves
    print_ok("Starting slave instances...")
    for i in range(slave_start, int(args.jobs)+slave_start-slave_off, 1):
        # $ afl-fuzz -i <input_dir> -o <output_dir> -S <session_name>.NNN <afl_args> \
        #   </path/to/target.bin> <target_args>
        slave_cmd = [afl_path] + afl_cmdline_from_config(conf_settings)
        slave_cmd += ["-S", "%s%03d" % (conf_settings["session"], i), "--", target_cmd]
        slave_cmd = " ".join(slave_cmd)

        if not conf_settings["interactive"]:
            if not args.verbose:
                slave = subprocess.Popen(" ".join(['nohup', slave_cmd]).split(), stdout=subprocess.DEVNULL,
                                         stderr=subprocess.DEVNULL)
            else:
                slave = subprocess.Popen(" ".join(['nohup', slave_cmd]).split())
            print(" Slave %03d started (PID: %d)" % (i, slave.pid))
        else:
            subprocess.Popen(["screen", "-X", "select", "%d" % (i+1)])
            screen_cmd = ["screen", "-X", "eval", "exec %s" % slave_cmd, "next"]
            subprocess.Popen(screen_cmd)
            print(" Slave %03d started inside new screen window" % i)

    print("")
    if not conf_settings["interactive"]:
        # write/append PGID to file /tmp/afl-multicore.PGID.<SESSION>
        f = open("/tmp/afl_multicore.PGID.%s" % conf_settings["session"], "a")
        if f.writable():
            f.write("%d\n" % os.getpgid(0))
        f.close()
        print_ok("For progress info check: %s/%sxxx/fuzzer_stats!" % (conf_settings["output"],
                                                                      conf_settings["session"]))
    else:
        print_ok("Check the newly created screen windows!")
Пример #35
0
def main(argv):
    show_info()

    parser = argparse.ArgumentParser(description="afl-minimize performs several optimization steps to reduce the size\n \
of an afl-fuzz corpus.",
                                     usage="afl-minimize [-c COLLECTION_DIR [--cmin [opts]] [--tmin [opts]]] [-d] [-h]\n \
                   [-j] sync_dir -- target_cmd\n")

    parser.add_argument("-c", "--collect", dest="collection_dir",
                        help="Collect all samples from the synchronisation dir and store them in the collection dir. \
Existing files in the collection directory will be overwritten!", default=None)
    parser.add_argument("--cmin", dest="invoke_cmin", action="store_const", const=True,
                        default=False, help="Run afl-cmin on collection dir. Has no effect without '-c'.")
    parser.add_argument("--cmin-mem-limit", dest="cmin_mem_limit", default=None, help="Set memory limit for afl-cmin.")
    parser.add_argument("--cmin-timeout", dest="cmin_timeout", default=None, help="Set timeout for afl-cmin.")
    parser.add_argument("--tmin", dest="invoke_tmin", action="store_const", const=True,
                        default=False, help="Run afl-tmin on minimized collection dir if used together with '--cmin'\
or on unoptimized collection dir otherwise. Has no effect without '-c'.")
    parser.add_argument("--tmin-mem-limit", dest="tmin_mem_limit", default=None, help="Set memory limit for afl-tmin.")
    parser.add_argument("--tmin-timeout", dest="tmin_timeout", default=None, help="Set timeout for afl-tmin.")
    parser.add_argument("-d", "--dry-run", dest="dry_run", action="store_const", const=True,
                        default=False, help="Perform dry-run on collection dir, if '-c' is provided or on \
synchronisation dir otherwise. Dry-run will move intermittent crashes out of the corpus.")
    parser.add_argument("-j", "--threads", dest="num_threads", default=1,
                        help="Enable parallel dry-run and t-minimization step by specifying the number of threads \
afl-minimize will utilize.")
    parser.add_argument("sync_dir", help="afl synchronisation directory containing multiple fuzzers and their queues.")
    parser.add_argument("target_cmd", nargs="+", help="Path to the target binary and its command line arguments. \
Use '@@' to specify crash sample input file position (see afl-fuzz usage).")

    args = parser.parse_args(argv[1:])

    if not args.collection_dir and not args.dry_run:
        print_err("No operation requested. You should at least provide '-c'")
        print_err("for sample collection or '-d' for a dry-run. Use '--help' for")
        print_err("usage instructions or checkout README.md for details.")
        return

    sync_dir = os.path.abspath(os.path.expanduser(args.sync_dir))
    if not os.path.exists(sync_dir):
        print_err("No valid directory provided for <SYNC_DIR>!")
        return

    args.target_cmd = " ".join(args.target_cmd).split()
    args.target_cmd[0] = os.path.abspath(os.path.expanduser(args.target_cmd[0]))
    if not os.path.exists(args.target_cmd[0]):
        print_err("Target binary not found!")
        return
    args.target_cmd = " ".join(args.target_cmd)

    if not args.num_threads:
        threads = 1
    else:
        threads = int(args.num_threads)

    if args.collection_dir:
        out_dir = os.path.abspath(os.path.expanduser(args.collection_dir))
        if not os.path.exists(out_dir) or len(os.listdir(out_dir)) == 0:
            os.makedirs(out_dir, exist_ok=True)

            print_ok("Looking for fuzzing queues in '%s'." % sync_dir)
            fuzzers = afl_collect.get_fuzzer_instances(sync_dir, crash_dirs=False)

            # collect samples from fuzzer queues
            print_ok("Found %d fuzzers, collecting samples." % len(fuzzers))
            sample_index = afl_collect.build_sample_index(sync_dir, out_dir, fuzzers)

            print_ok("Successfully indexed %d samples." % len(sample_index.index))
            print_ok("Copying %d samples into collection directory..." % len(sample_index.index))
            afl_collect.copy_samples(sample_index)
        else:
            print_warn("Collection directory exists and is not empty!")
            print_warn("Skipping collection step...")

        if args.invoke_cmin:
            # invoke cmin on collection
            print_ok("Executing: afl-cmin -i %s -o %s.cmin -- %s" % (out_dir, out_dir, args.target_cmd))
            invoke_cmin(out_dir, "%s.cmin" % out_dir, args.target_cmd, mem_limit=args.cmin_mem_limit,
                        timeout=args.cmin_timeout)
            if args.invoke_tmin:
                # invoke tmin on minimized collection
                print_ok("Executing: afl-tmin -i %s.cmin/* -o %s.cmin.tmin/* -- %s" % (out_dir, out_dir,
                                                                                       args.target_cmd))
                tmin_num_samples, tmin_samples = afl_collect.get_samples_from_dir("%s.cmin" % out_dir, abs_path=True)
                invoke_tmin(tmin_samples, "%s.cmin.tmin" % out_dir, args.target_cmd, num_threads=threads,
                            mem_limit=args.tmin_mem_limit, timeout=args.tmin_timeout)
        elif args.invoke_tmin:
            # invoke tmin on collection
            print_ok("Executing: afl-tmin -i %s/* -o %s.tmin/* -- %s" % (out_dir, out_dir, args.target_cmd))
            tmin_num_samples, tmin_samples = afl_collect.get_samples_from_dir(out_dir, abs_path=True)
            invoke_tmin(tmin_samples, "%s.tmin" % out_dir, args.target_cmd, num_threads=threads,
                        mem_limit=args.tmin_mem_limit, timeout=args.tmin_timeout)
        if args.dry_run:
            # invoke dry-run on collected/minimized corpus
            if args.invoke_cmin and args.invoke_tmin:
                print_ok("Performing dry-run in %s.cmin.tmin..." % out_dir)
                print_warn("Be patient! Depending on the corpus size this step can take hours...")
                dryrun_num_samples, dryrun_samples = afl_collect.get_samples_from_dir("%s.cmin.tmin" % out_dir,
                                                                                      abs_path=True)
                invoke_dryrun(dryrun_samples, "%s.cmin.tmin.crashes" % out_dir, "%s.cmin.tmin.hangs" % out_dir,
                              args.target_cmd, num_threads=threads)
            elif args.invoke_cmin:
                print_ok("Performing dry-run in %s.cmin..." % out_dir)
                print_warn("Be patient! Depending on the corpus size this step can take hours...")
                dryrun_num_samples, dryrun_samples = afl_collect.get_samples_from_dir("%s.cmin" % out_dir,
                                                                                      abs_path=True)
                invoke_dryrun(dryrun_samples, "%s.cmin.crashes" % out_dir, "%s.cmin.hangs" % out_dir, args.target_cmd,
                              num_threads=threads)
            elif args.invoke_tmin:
                print_ok("Performing dry-run in %s.tmin..." % out_dir)
                print_warn("Be patient! Depending on the corpus size this step can take hours...")
                dryrun_num_samples, dryrun_samples = afl_collect.get_samples_from_dir("%s.tmin" % out_dir,
                                                                                      abs_path=True)
                invoke_dryrun(dryrun_samples, "%s.tmin.crashes" % out_dir, "%s.tmin.hangs" % out_dir, args.target_cmd,
                              num_threads=threads)
            else:
                print_ok("Performing dry-run in %s..." % out_dir)
                print_warn("Be patient! Depending on the corpus size this step can take hours...")
                dryrun_num_samples, dryrun_samples = afl_collect.get_samples_from_dir(out_dir, abs_path=True)
                invoke_dryrun(dryrun_samples, "%s.crashes" % out_dir, "%s.hangs" % out_dir, args.target_cmd,
                              num_threads=threads)
    else:
        if args.dry_run:
            print_ok("Looking for fuzzing queues in '%s'." % sync_dir)
            fuzzers = afl_collect.get_fuzzer_instances(sync_dir, crash_dirs=False)
            print_ok("Found %d fuzzers, performing dry run." % len(fuzzers))
            print_warn("Be patient! Depending on the corpus size this step can take hours...")
            # invoke dry-run on original corpus
            for f in fuzzers:
                for q_dir in f[1]:
                    q_dir_complete = os.path.join(sync_dir, f[0], q_dir)
                    print_ok("Processing %s..." % q_dir_complete)

                    dryrun_num_samples, dryrun_samples = afl_collect.get_samples_from_dir(q_dir_complete, abs_path=True)
                    invoke_dryrun(dryrun_samples, os.path.join(sync_dir, f[0], "crashes"),
                                  os.path.join(sync_dir, f[0], "hangs"), args.target_cmd, num_threads=threads)
Пример #36
0
def generate_gdb_exploitable_script(script_filename,
                                    sample_index,
                                    target_cmd,
                                    script_id=0,
                                    intermediate=False,
                                    asan_mode=False):
    target_cmd = target_cmd.split()
    gdb_target_binary = target_cmd[0]
    gdb_run_cmd = " ".join(target_cmd[1:])

    if not intermediate:
        script_filename = os.path.abspath(os.path.expanduser(script_filename))
        print_ok(
            "Generating final gdb+exploitable script '%s' for %d samples..." %
            (script_filename, len(sample_index.outputs())))
    else:
        script_filename = os.path.abspath(
            os.path.expanduser("%s.%d" % (script_filename, script_id)))
        print_ok(
            "Generating intermediate gdb+exploitable script '%s' for %d samples..."
            % (script_filename, len(sample_index.outputs())))

    gdb_exploitable_path = None
    gdbinit = os.path.expanduser("~/.gdbinit")
    if not os.path.exists(gdbinit) or b"exploitable.py" not in open(
            gdbinit, "rb").read():
        gdb_exploitable_path = os.path.join(exploitable.__path__[0],
                                            "exploitable.py")

    try:
        fd = open(script_filename, "w")

        # <script header>
        # source exploitable.py if necessary
        if gdb_exploitable_path:
            fd.writelines("source %s\n" % gdb_exploitable_path)

        if asan_mode:
            fd.writelines("set pagination off\n")
            fd.writelines("handle SIGSEGV nostop\n")
            asan_logname = '/tmp/{}.{}'.format(
                asan_log_tmpstring, ''.join(
                    random.choice(string.ascii_lowercase + string.digits)
                    for _ in range(10)))

        # load executable
        fd.writelines("file %s\n" % gdb_target_binary)
        # </script_header>

        # fill script with content
        for f in sample_index.index:
            fd.writelines("echo Crash\ sample:\ '%s'\\n\n" % f['output'])

            if not asan_mode:
                if not stdin_mode(target_cmd):
                    run_cmd = "run " + gdb_run_cmd + "\n"
                else:
                    run_cmd = "run " + gdb_run_cmd + "< @@" + "\n"
            else:
                if not stdin_mode(target_cmd):
                    run_cmd = "run " + gdb_run_cmd + " 2> {}".format(
                        asan_logname) + "\n"
                else:
                    run_cmd = "run " + gdb_run_cmd + "< @@" + " 2> {}".format(
                        asan_logname) + "\n"

            if intermediate:
                run_cmd = run_cmd.replace("@@", "'{}'".format(f['input']))
            else:
                run_cmd = run_cmd.replace(
                    "@@",
                    os.path.join(sample_index.output_dir,
                                 "'{}'".format(f['output'])))

            fd.writelines(run_cmd)
            if not asan_mode:
                fd.writelines("exploitable\n")
            else:
                fd.writelines("exploitable -a {}\n".format(asan_logname))

        # <script_footer>
        fd.writelines("quit")
        # </script_footer>

        fd.close()
    except (FileExistsError, PermissionError):
        print_err("Could not open script file '%s' for writing!" %
                  script_filename)
Пример #37
0
def main(argv):
    show_info()

    parser = argparse.ArgumentParser(
        description=
        "afl-collect copies all crash sample files from an afl sync dir used \
by multiple fuzzers when fuzzing in parallel into a single location providing easy access for further crash analysis.",
        usage=
        "afl-collect [-d DATABASE] [-e|-g GDB_EXPL_SCRIPT_FILE] [-f LIST_FILENAME]\n \
[-h] [-j THREADS] [-m] [-r] [-rr] sync_dir collection_dir -- target_cmd")

    parser.add_argument(
        "sync_dir",
        help=
        "afl synchronisation directory crash samples will be collected from.")
    parser.add_argument(
        "collection_dir",
        help=
        "Output directory that will hold a copy of all crash samples and other generated files. \
Existing files in the collection directory will be overwritten!")
    parser.add_argument("-d",
                        "--database",
                        dest="database_file",
                        help="Submit sample data into an sqlite3 database (\
only when used together with '-e'). afl-collect skips processing of samples already found in existing database.",
                        default=None)
    parser.add_argument(
        "-e",
        "--execute-gdb-script",
        dest="gdb_expl_script_file",
        help=
        "Generate and execute a gdb+exploitable script after crash sample collection for crash \
classification. (Like option '-g', plus script execution.)",
        default=None)
    parser.add_argument(
        "-f",
        "--filelist",
        dest="list_filename",
        default=None,
        help=
        "Writes all collected crash sample filenames into a file in the collection directory."
    )
    parser.add_argument(
        "-g",
        "--generate-gdb-script",
        dest="gdb_script_file",
        help=
        "Generate gdb script to run 'exploitable.py' on all collected crash samples. Generated \
script will be placed into collection directory.",
        default=None)
    parser.add_argument(
        "-j",
        "--threads",
        dest="num_threads",
        default=1,
        help=
        "Enable parallel analysis by specifying the number of threads afl-collect will utilize."
    )
    parser.add_argument(
        "-m",
        "--minimize-filenames",
        dest="min_filename",
        action="store_const",
        const=True,
        default=False,
        help=
        "Minimize crash sample file names by only keeping fuzzer name and ID.")
    parser.add_argument(
        "-r",
        "--remove-invalid",
        dest="remove_invalid",
        action="store_const",
        const=True,
        default=False,
        help=
        "Verify collected crash samples and remove samples that do not lead to \
crashes or cause timeouts (runs 'afl-vcrash.py -r' on collection directory). This step is done prior to any script \
file execution or file list generation.")
    parser.add_argument(
        "-rr",
        "--remove-unexploitable",
        dest="remove_unexploitable",
        action="store_const",
        const=True,
        default=False,
        help="Remove crash samples that have an exploitable classification of \
'NOT_EXPLOITABLE' or 'PROBABLY_NOT_EXPLOITABLE'. Sample file removal will take place after gdb+exploitable \
script execution. Has no effect without '-e'.")
    parser.add_argument(
        "target_cmd",
        nargs="+",
        help="Path to the target binary and its command line arguments. \
Use '@@' to specify crash sample input file position (see afl-fuzz usage).")

    args = parser.parse_args(argv[1:])

    sync_dir = os.path.abspath(os.path.expanduser(args.sync_dir))
    if not os.path.exists(sync_dir):
        print_err("No valid directory provided for <SYNC_DIR>!")
        return

    out_dir = os.path.abspath(os.path.expanduser(args.collection_dir))
    if not os.path.exists(out_dir):
        os.makedirs(out_dir, exist_ok=True)

    args.target_cmd = " ".join(args.target_cmd).split()
    args.target_cmd[0] = os.path.abspath(os.path.expanduser(
        args.target_cmd[0]))
    if not os.path.exists(args.target_cmd[0]):
        print_err("Target binary not found!")
        return
    args.target_cmd = " ".join(args.target_cmd)

    if args.database_file:
        db_file = os.path.abspath(os.path.expanduser(args.database_file))
    else:
        db_file = None

    print_ok("Going to collect crash samples from '%s'." % sync_dir)

    # initialize database
    if db_file:
        lite_db = con_sqlite.sqliteConnector(db_file)
        lite_db.init_database('Data', db_table_spec)
    else:
        lite_db = None

    fuzzers = get_fuzzer_instances(sync_dir)
    print_ok("Found %d fuzzers, collecting crash samples." % len(fuzzers))

    sample_index = build_sample_index(sync_dir, out_dir, fuzzers, lite_db,
                                      args.min_filename)

    if len(sample_index.index) > 0:
        print_ok("Successfully indexed %d crash samples." %
                 len(sample_index.index))
    elif db_file:
        print_warn("No unseen samples found. Check your database for results!")
        return
    else:
        print_warn("No samples found. Check directory settings!")
        return

    if args.remove_invalid:
        from afl_utils import afl_vcrash
        invalid_samples, timeout_samples = afl_vcrash.verify_samples(
            int(args.num_threads),
            sample_index.inputs(),
            args.target_cmd,
            timeout_secs=10)

        # store invalid samples in db
        if args.gdb_expl_script_file and db_file:
            print_ok("Saving invalid sample info to database.")
            for sample in invalid_samples:
                sample_name = sample_index.outputs(input_file=sample)
                dataset = {
                    'Sample': sample_name[0],
                    'Classification': 'INVALID',
                    'Classification_Description':
                    'Sample does not cause a crash in the target.',
                    'Hash': '',
                    'User_Comment': ''
                }
                if not lite_db.dataset_exists('Data', dataset, ['Sample']):
                    lite_db.insert_dataset('Data', dataset)

            for sample in timeout_samples:
                sample_name = sample_index.outputs(input_file=sample)
                dataset = {
                    'Sample': sample_name[0],
                    'Classification': 'TIMEOUT',
                    'Classification_Description':
                    'Sample caused a target execution timeout.',
                    'Hash': '',
                    'User_Comment': ''
                }
                if not lite_db.dataset_exists('Data', dataset, ['Sample']):
                    lite_db.insert_dataset('Data', dataset)

        # remove invalid samples from sample index
        sample_index.remove_inputs(invalid_samples + timeout_samples)
        print_warn("Removed %d invalid crash samples from index." %
                   len(invalid_samples))
        print_warn("Removed %d timed out samples from index." %
                   len(timeout_samples))

    # generate gdb+exploitable script
    if args.gdb_expl_script_file:
        divided_index = sample_index.divide(int(args.num_threads))

        for i in range(0, int(args.num_threads), 1):
            generate_gdb_exploitable_script(os.path.join(
                out_dir, args.gdb_expl_script_file),
                                            divided_index[i],
                                            args.target_cmd,
                                            i,
                                            intermediate=True)

        # execute gdb+exploitable script
        classification_data = execute_gdb_script(out_dir,
                                                 args.gdb_expl_script_file,
                                                 len(sample_index.inputs()),
                                                 int(args.num_threads))

        # Submit crash classification data into database
        if db_file:
            print_ok("Saving sample classification info to database.")
            for dataset in classification_data:
                if not lite_db.dataset_exists('Data', dataset, ['Sample']):
                    lite_db.insert_dataset('Data', dataset)

        # de-dupe by exploitable hash
        seen = set()
        seen_add = seen.add
        classification_data_dedupe = [
            x for x in classification_data
            if x['Hash'] not in seen and not seen_add(x['Hash'])
        ]

        # remove dupe samples identified by exploitable hash
        uninteresting_samples = [
            x['Sample'] for x in classification_data
            if x not in classification_data_dedupe
        ]

        sample_index.remove_outputs(uninteresting_samples)

        print_warn(
            "Removed %d duplicate samples from index. Will continue with %d remaining samples."
            % (len(uninteresting_samples), len(sample_index.index)))

        # remove crash samples that are classified uninteresting
        if args.remove_unexploitable:
            classification_unexploitable = [
                'NOT_EXPLOITABLE',
                'PROBABLY_NOT_EXPLOITABLE',
            ]

            uninteresting_samples = []

            for c in classification_data_dedupe:
                if c['Classification'] in classification_unexploitable:
                    uninteresting_samples.append(c['Sample'])

            sample_index.remove_outputs(uninteresting_samples)
            print_warn("Removed %d uninteresting crash samples from index." %
                       len(uninteresting_samples))

        # generate output gdb script
        generate_gdb_exploitable_script(
            os.path.join(out_dir, args.gdb_expl_script_file), sample_index,
            args.target_cmd, 0)
    elif args.gdb_script_file:
        generate_gdb_exploitable_script(
            os.path.join(out_dir, args.gdb_script_file), sample_index,
            args.target_cmd)

    print_ok("Copying %d samples into output directory..." %
             len(sample_index.index))
    files_collected = copy_samples(sample_index)

    # generate filelist of collected crash samples
    if args.list_filename:
        generate_sample_list(
            os.path.abspath(os.path.expanduser(args.list_filename)),
            files_collected)
        print_ok("Generated crash sample list '%s'." %
                 os.path.abspath(os.path.expanduser(args.list_filename)))

    # write db contents to file and close db connection
    if db_file:
        lite_db.commit_close()
Пример #38
0
def main(argv):
    show_info()

    parser = argparse.ArgumentParser(description="afl-minimize performs several optimization steps to reduce the size\n \
of an afl-fuzz corpus.",
                                     usage="afl-minimize [-c COLLECTION_DIR [--cmin [opts]] [--tmin [opts]]] [--reseed]\n \
                   [-d] [-h] [-j] sync_dir -- target_cmd\n")

    parser.add_argument("-c", "--collect", dest="collection_dir",
                        help="Collect all samples from the synchronisation dir and store them in the collection dir.",
                        default=None)
    parser.add_argument("--cmin", dest="invoke_cmin", action="store_const", const=True,
                        default=False, help="Run afl-cmin on collection dir. Has no effect without '-c'.")
    parser.add_argument("--cmin-mem-limit", dest="cmin_mem_limit", default=None, help="Set memory limit for afl-cmin.")
    parser.add_argument("--cmin-timeout", dest="cmin_timeout", default=None, help="Set timeout for afl-cmin.")
    parser.add_argument("--cmin-qemu", dest="cmin_qemu", default=False, action="store_const", const=True,
                        help="Enable qemu mode afl-cmin.")
    parser.add_argument("--reseed", dest="reseed", default=False, action="store_const", const=True, help="Reseed afl-fuzz with the \
collected (and optimized) corpus. This replaces all sync_dir queues with the newly generated corpus.")
    parser.add_argument("--tmin", dest="invoke_tmin", action="store_const", const=True,
                        default=False, help="Run afl-tmin on minimized collection dir if used together with '--cmin'\
or on unoptimized collection dir otherwise. Has no effect without '-c'.")
    parser.add_argument("--tmin-mem-limit", dest="tmin_mem_limit", default=None, help="Set memory limit for afl-tmin.")
    parser.add_argument("--tmin-timeout", dest="tmin_timeout", default=None, help="Set timeout for afl-tmin.")
    parser.add_argument("--tmin-qemu", dest="tmin_qemu", default=False, action="store_const", const=True,
                        help="Enable qemu mode afl-tmin.")
    parser.add_argument("-d", "--dry-run", dest="dry_run", action="store_const", const=True,
                        default=False, help="Perform dry-run on collection dir, if '-c' is provided or on \
synchronisation dir otherwise. Dry-run will move intermittent crashes out of the corpus.")
    parser.add_argument("-j", "--threads", dest="num_threads", default=1,
                        help="Enable parallel dry-run and t-minimization step by specifying the number of threads \
afl-minimize will utilize.")
    parser.add_argument("sync_dir", help="afl synchronisation directory containing multiple fuzzers and their queues.")
    parser.add_argument("target_cmd", nargs="+", help="Path to the target binary and its command line arguments. \
Use '@@' to specify crash sample input file position (see afl-fuzz usage).")

    args = parser.parse_args(argv[1:])

    if not args.collection_dir and not args.dry_run:
        print_err("No operation requested. You should at least provide '-c'")
        print_err("for sample collection or '-d' for a dry-run. Use '--help' for")
        print_err("usage instructions or checkout README.md for details.")
        return

    sync_dir = os.path.abspath(os.path.expanduser(args.sync_dir))
    if not os.path.exists(sync_dir):
        print_err("No valid directory provided for <SYNC_DIR>!")
        return

    args.target_cmd = " ".join(args.target_cmd).split()
    args.target_cmd[0] = os.path.abspath(os.path.expanduser(args.target_cmd[0]))
    if not os.path.exists(args.target_cmd[0]):
        print_err("Target binary not found!")
        return
    args.target_cmd = " ".join(args.target_cmd)

    threads = int(args.num_threads)

    if args.collection_dir:
        out_dir = os.path.abspath(os.path.expanduser(args.collection_dir))
        if not os.path.exists(out_dir) or len(os.listdir(out_dir)) == 0:
            os.makedirs(out_dir, exist_ok=True)

            print_ok("Looking for fuzzing queues in '%s'." % sync_dir)
            fuzzers = afl_collect.get_fuzzer_instances(sync_dir, crash_dirs=False)

            # collect samples from fuzzer queues
            print_ok("Found %d fuzzers, collecting samples." % len(fuzzers))
            sample_index = afl_collect.build_sample_index(sync_dir, out_dir, fuzzers, omit_fuzzer_name=True)

            print_ok("Successfully indexed %d samples." % len(sample_index.index))
            print_ok("Copying %d samples into collection directory..." % len(sample_index.index))
            afl_collect.copy_samples(sample_index)
        else:
            print_warn("Collection directory exists and is not empty!")
            print_warn("Skipping collection step...")

        if args.invoke_cmin:
            # invoke cmin on collection
            invoke_cmin(out_dir, "%s.cmin" % out_dir, args.target_cmd, mem_limit=args.cmin_mem_limit,
                        timeout=args.cmin_timeout, qemu=args.cmin_qemu)
            if args.invoke_tmin:
                # invoke tmin on minimized collection
                tmin_num_samples, tmin_samples = afl_collect.get_samples_from_dir("%s.cmin" % out_dir, abs_path=True)
                invoke_tmin(tmin_samples, "%s.cmin.tmin" % out_dir, args.target_cmd, num_threads=threads,
                            mem_limit=args.tmin_mem_limit, timeout=args.tmin_timeout, qemu=args.tmin_qemu)
        elif args.invoke_tmin:
            # invoke tmin on collection
            tmin_num_samples, tmin_samples = afl_collect.get_samples_from_dir(out_dir, abs_path=True)
            invoke_tmin(tmin_samples, "%s.tmin" % out_dir, args.target_cmd, num_threads=threads,
                        mem_limit=args.tmin_mem_limit, timeout=args.tmin_timeout, qemu=args.tmin_qemu)
        if args.dry_run:
            # invoke dry-run on collected/minimized corpus
            if args.invoke_cmin and args.invoke_tmin:
                print_ok("Performing dry-run in %s.cmin.tmin..." % out_dir)
                print_warn("Be patient! Depending on the corpus size this step can take hours...")
                dryrun_num_samples, dryrun_samples = afl_collect.get_samples_from_dir("%s.cmin.tmin" % out_dir,
                                                                                      abs_path=True)
                invoke_dryrun(dryrun_samples, "%s.cmin.tmin.crashes" % out_dir, "%s.cmin.tmin.hangs" % out_dir,
                              args.target_cmd, num_threads=threads)
            elif args.invoke_cmin:
                print_ok("Performing dry-run in %s.cmin..." % out_dir)
                print_warn("Be patient! Depending on the corpus size this step can take hours...")
                dryrun_num_samples, dryrun_samples = afl_collect.get_samples_from_dir("%s.cmin" % out_dir,
                                                                                      abs_path=True)
                invoke_dryrun(dryrun_samples, "%s.cmin.crashes" % out_dir, "%s.cmin.hangs" % out_dir, args.target_cmd,
                              num_threads=threads)
            elif args.invoke_tmin:
                print_ok("Performing dry-run in %s.tmin..." % out_dir)
                print_warn("Be patient! Depending on the corpus size this step can take hours...")
                dryrun_num_samples, dryrun_samples = afl_collect.get_samples_from_dir("%s.tmin" % out_dir,
                                                                                      abs_path=True)
                invoke_dryrun(dryrun_samples, "%s.tmin.crashes" % out_dir, "%s.tmin.hangs" % out_dir, args.target_cmd,
                              num_threads=threads)
            else:
                print_ok("Performing dry-run in %s..." % out_dir)
                print_warn("Be patient! Depending on the corpus size this step can take hours...")
                dryrun_num_samples, dryrun_samples = afl_collect.get_samples_from_dir(out_dir, abs_path=True)
                invoke_dryrun(dryrun_samples, "%s.crashes" % out_dir, "%s.hangs" % out_dir, args.target_cmd,
                              num_threads=threads)
        elif args.reseed:
            optimized_corpus = out_dir

            if args.invoke_cmin:
                optimized_corpus = optimized_corpus + ".cmin"

            if args.invoke_tmin:
                optimized_corpus = optimized_corpus + ".tmin"

            afl_reseed(sync_dir, optimized_corpus)
    else:
        if args.dry_run:
            print_ok("Looking for fuzzing queues in '%s'." % sync_dir)
            fuzzers = afl_collect.get_fuzzer_instances(sync_dir, crash_dirs=False)
            print_ok("Found %d fuzzers, performing dry run." % len(fuzzers))
            print_warn("Be patient! Depending on the corpus size this step can take hours...")
            # invoke dry-run on original corpus
            for f in fuzzers:
                for q_dir in f[1]:
                    q_dir_complete = os.path.join(sync_dir, f[0], q_dir)
                    print_ok("Processing %s..." % q_dir_complete)

                    dryrun_num_samples, dryrun_samples = afl_collect.get_samples_from_dir(q_dir_complete, abs_path=True)
                    invoke_dryrun(dryrun_samples, os.path.join(sync_dir, f[0], "crashes"),
                                  os.path.join(sync_dir, f[0], "hangs"), args.target_cmd, num_threads=threads)
Пример #39
0
def execute_gdb_script(out_dir, script_filename, num_samples, num_threads):
    classification_data = []

    out_dir = os.path.expanduser(out_dir) + "/"

    grep_for = [
        "Crash sample: '",
        "Exploitability Classification: ",
        "Short description: ",
        "Hash: ",
        ]

    queue_list = []

    thread_list = []

    for n in range(0, num_threads, 1):
        script_args = [
            str(gdb_binary),
            "-x",
            str(os.path.join(out_dir, "%s.%d" % (script_filename, n))),
        ]

        out_queue = queue.Queue()
        out_queue_lock = threading.Lock()
        queue_list.append((out_queue, out_queue_lock))

        t = AflThread.GdbThread(n, script_args, out_dir, grep_for, out_queue, out_queue_lock)
        thread_list.append(t)
        print_ok("Executing gdb+exploitable script '%s.%d'..." % (script_filename, n))
        t.daemon = True
        t.start()

    for t in thread_list:
        t.join()

    grepped_output = []

    for q in queue_list:
        q[1].acquire()
        while not q[0].empty():
            grepped_output.append(q[0].get())
        q[1].release()

    i = 1
    print("*** GDB+EXPLOITABLE SCRIPT OUTPUT ***")
    for g in range(0, len(grepped_output)-len(grep_for)+1, len(grep_for)):
        if grepped_output[g+3] == "EXPLOITABLE":
            cex = clr.RED
            ccl = clr.BRI
        elif grepped_output[g+3] == "PROBABLY_EXPLOITABLE":
            cex = clr.YEL
            ccl = clr.BRI
        elif grepped_output[g+3] == "PROBABLY_NOT_EXPLOITABLE":
            cex = clr.BRN
            ccl = clr.RST
        elif grepped_output[g+3] == "NOT_EXPLOITABLE":
            cex = clr.GRN
            ccl = clr.GRA
        elif grepped_output[g+3] == "UNKNOWN":
            cex = clr.BLU
            ccl = clr.GRA
        else:
            cex = clr.GRA
            ccl = clr.GRA

        if len(grepped_output[g]) < 24:
            # Assume simplified sample file names,
            # so save some output space.
            ljust_width = 24
        else:
            ljust_width = 64
        print("%s[%05d]%s %s: %s%s%s %s[%s]%s" % (clr.GRA, i, clr.RST, grepped_output[g].ljust(ljust_width, '.'), cex,
                                                  grepped_output[g+3], clr.RST, ccl, grepped_output[g+1], clr.RST))
        classification_data.append({'sample': grepped_output[g], 'classification': grepped_output[g+3],
                                    'description': grepped_output[g+1], 'hash': grepped_output[g+2]})
        i += 1

    if i < num_samples:
        print("%s[%05d]%s %s: %sINVALID SAMPLE (Aborting!)%s" % (clr.GRA, i, clr.RST,
                                                                 grepped_output[-1].ljust(ljust_width, '.'),
                                                                 clr.LRD, clr.RST))
        print(clr.LRD + "Returned data may be incomplete!" + clr.RST)
    print("*** ***************************** ***")

    # remove intermediate gdb scripts...
    for n in range(0, num_threads, 1):
        os.remove(os.path.join(out_dir, "%s.%d" % (script_filename, n)))

    return classification_data
Пример #40
0
def main(argv):
    show_info()

    parser = argparse.ArgumentParser(description="afl-multicore starts several parallel fuzzing jobs, that are run \
in the background. For fuzzer stats see 'out_dir/SESSION###/fuzzer_stats'!",
                                     usage="afl-multicore [-c config] [-h] [-s secs] [-t] [-v] <cmd> <jobs>")

    parser.add_argument("-c", "--config", dest="config_file",
                        help="afl-multicore config file (Default: afl-multicore.conf)!", default="afl-multicore.conf")
    parser.add_argument("-s", "--startup-delay", dest="startup_delay", default=None, help="Wait a configurable  amount \
of time after starting/resuming each afl instance to avoid interference during fuzzer startup. Provide wait time in \
seconds.")
    parser.add_argument("-t", "--test", dest="test_run", action="store_const", const=True, default=False, help="Perform \
a test run by starting a single afl instance in interactive mode using a test output directory.")
    parser.add_argument("-v", "--verbose", dest="verbose", action="store_const", const=True,
                        default=False, help="For debugging purposes do not redirect stderr/stdout of the created \
subprocesses to /dev/null (Default: off). Check 'nohup.out' for further outputs.")
    parser.add_argument("cmd", help="afl-multicore command to execute: start, resume, add.")
    parser.add_argument("jobs", help="Number of instances to start/resume/add.")

    args = parser.parse_args(argv[1:])

    conf_settings = read_config(os.path.abspath(os.path.expanduser(args.config_file)))

    if args.test_run:
        signal.signal(signal.SIGINT, sigint_handler)
        conf_settings["output"] += "_test"
        conf_settings["interactive"] = False
        args.jobs = 1
        args.cmd = "start"

    if args.cmd != "resume":
        conf_settings["input"] = os.path.abspath(os.path.expanduser(conf_settings["input"]))
        if not os.path.exists(conf_settings["input"]):
            print_err("No valid directory provided for <INPUT_DIR>!")
            sys.exit(1)
    else:
        conf_settings["input"] = "-"

    conf_settings["output"] = os.path.abspath(os.path.expanduser(conf_settings["output"]))

    slave_off, slave_start = get_slave_count(args.cmd, conf_settings)

    if "interactive" in conf_settings and conf_settings["interactive"]:
        if not check_screen():
            print_err("When using screen mode, please run afl-multicore from inside a screen session!")
            sys.exit(1)

        if "environment" in conf_settings:
            setup_screen(int(args.jobs), conf_settings["environment"])
        else:
            setup_screen(int(args.jobs), [])

    target_cmd = build_target_cmd(conf_settings)
    master_cmd = build_master_cmd(conf_settings, target_cmd)

    if args.test_run:
        with subprocess.Popen(master_cmd.split()) as test_proc:
            print_ok("Test instance started (PID: %d)" % test_proc.pid)

    if "slave_only" not in conf_settings or ("slave_only" in conf_settings and not conf_settings["slave_only"]):
        print_ok("Starting master instance...")

        if "interactive" in conf_settings and conf_settings["interactive"]:
            subprocess.Popen("screen -X select 1".split())
            screen_cmd = ["screen", "-X", "eval", "exec %s" % master_cmd, "next"]
            subprocess.Popen(screen_cmd)
            print(" Master 000 started inside new screen window")
        else:
            if not args.verbose:
                master = subprocess.Popen(" ".join(['nohup', master_cmd]).split(), stdout=subprocess.DEVNULL,
                                          stderr=subprocess.DEVNULL)
            else:
                master = subprocess.Popen(" ".join(['nohup', master_cmd]).split())
            print(" Master 000 started (PID: %d)" % master.pid)

        if args.startup_delay is not None:
            time.sleep(int(args.startup_delay))

    print_ok("Starting slave instances...")
    num_slaves = int(args.jobs)+slave_start-slave_off
    for i in range(slave_start, num_slaves, 1):
        slave_cmd = build_slave_cmd(conf_settings, i, target_cmd)

        if "interactive" in conf_settings and conf_settings["interactive"]:
            subprocess.Popen(["screen", "-X", "select", "%d" % (i + 1)])
            screen_cmd = ["screen", "-X", "eval", "exec %s" % slave_cmd, "next"]
            subprocess.Popen(screen_cmd)
            print(" Slave %03d started inside new screen window" % i)
        else:
            if not args.verbose:
                slave = subprocess.Popen(" ".join(['nohup', slave_cmd]).split(), stdout=subprocess.DEVNULL,
                                         stderr=subprocess.DEVNULL)
            else:
                slave = subprocess.Popen(" ".join(['nohup', slave_cmd]).split())
            print(" Slave %03d started (PID: %d)" % (i, slave.pid))

        if args.startup_delay is not None and i < (num_slaves-1):
            time.sleep(int(args.startup_delay))

    write_pgid_file(conf_settings)
Пример #41
0
def sigint_handler(signal, frame):
    print()
    print_ok("Test run aborted by user!")
    sys.exit(0)