Esempio n. 1
0
class Test(unittest.TestCase):
    def setUp(self):
        self.w = WatchDog("/tmp/foo", 1234567890)

    def tearDown(self):
        pass

    def test_get_watchdog_timeout_cmdline(self):
        expected = 'sudo sh -c "echo file=/tmp/foo > /etc/watchdog.conf && echo change=1234567890 >> /etc/watchdog.conf && /etc/init.d/watchdog restart"'
        self.w._set_cmdline()
        self.assertEqual(self.w.cmdline, expected)

    def test_go(self):
        # cannot test directly, see test_get_watchdog_timeout_cmdline()
        pass
class Test(unittest.TestCase):

    def setUp(self):
        self.w = WatchDog("/tmp/foo", 1234567890)

    def tearDown(self):
        pass

    def test_get_watchdog_timeout_cmdline(self):
        expected = 'sudo sh -c "echo file=/tmp/foo > /etc/watchdog.conf && echo change=1234567890 >> /etc/watchdog.conf && /etc/init.d/watchdog restart"'
        self.w._set_cmdline()
        self.assertEqual(self.w.cmdline, expected)

    def test_go(self):
        # cannot test directly, see test_get_watchdog_timeout_cmdline()
        pass
Esempio n. 3
0
    def _setup_watchdog(self):
        # short circuit if we're not using the watchdog
        if not TWDF.use_watchdog:
            logger.debug('skipping watchdog setup')
            return

        logger.debug('setup watchdog')
        # setup our watchdog file toucher
        touch_watchdog_file()

        # set up the watchdog timeout within the VM and restart the daemon
        with WatchDog(
                TWDF.wdf,
                self.config['runoptions']['watchdogtimeout']) as watchdog:
            watchdog()
 def setUp(self):
     self.w = WatchDog("/tmp/foo", 1234567890)
Esempio n. 5
0
 def setUp(self):
     self.w = WatchDog("/tmp/foo", 1234567890)
Esempio n. 6
0
File: bff.py Progetto: wflk/certfuzz
def main():
    global START_SEED
    hashes = []

    # give up if we don't have a debugger
    debuggers.verify_supported_platform()

    setup_logging_to_console(logger, logging.INFO)
    logger.info("Welcome to BFF!")

    scriptpath = os.path.dirname(sys.argv[0])
    logger.info('Scriptpath is %s', scriptpath)

    # parse command line options
    logger.info('Parsing command line options')
    parser = OptionParser()
    parser.add_option('',
                      '--debug',
                      dest='debug',
                      help='Turn on debugging output',
                      action='store_true')
    parser.add_option('-c',
                      '--config',
                      dest='cfg',
                      help='Config file location')
    (options, args) = parser.parse_args()  #@UnusedVariable

    # Get the cfg file name
    if options.cfg:
        remote_cfg_file = options.cfg
    else:
        remote_cfg_file = get_config_file(scriptpath)

    # die unless the remote config is present
    assert os.path.exists(
        remote_cfg_file
    ), 'Cannot find remote config file: %s, Please create it or use --config option to specify a different location.' % remote_cfg_file

    # copy remote config to local:
    local_cfg_file = os.path.expanduser('~/bff.cfg')
    filetools.copy_file(remote_cfg_file, local_cfg_file)

    # Read the cfg file
    logger.info('Reading config from %s', local_cfg_file)
    cfg = cfg_helper.read_config_options(local_cfg_file)

    # set up local logging
    setup_logfile(cfg.local_dir,
                  log_basename='bff.log',
                  level=logging.DEBUG,
                  max_bytes=1e8,
                  backup_count=3)

    # set up remote logging
    setup_logfile(cfg.output_dir,
                  log_basename='bff.log',
                  level=logging.INFO,
                  max_bytes=1e7,
                  backup_count=5)

    try:
        check_for_script(cfg)
    except:
        logger.warning("Please configure BFF to fuzz a binary.  Exiting...")
        sys.exit()

    z.setup_dirs_and_files(local_cfg_file, cfg)

    # make sure we cache it for the next run
    #    cache_state(cfg.campaign_id, 'cfg', cfg, cfg.cached_config_file)

    sr = get_cached_state('seedrange', cfg.campaign_id,
                          cfg.cached_seedrange_file)
    if not sr:
        sr = SeedRange(cfg.start_seed, cfg.seed_interval, cfg.max_seed)

    # set START_SEED global for timestamping
    START_SEED = sr.s1

    start_process_killer(scriptpath, cfg)

    z.set_unbuffered_stdout()

    # set up the seedfile set so we can pick seedfiles for everything else...
    seedfile_set = get_cached_state('seedfile_set', cfg.campaign_id,
                                    cfg.cached_seedfile_set)
    if not seedfile_set:
        logger.info('Building seedfile set')
        sfs_logfile = os.path.join(cfg.seedfile_output_dir, 'seedfile_set.log')
        with SeedfileSet(
                campaign_id=cfg.campaign_id,
                originpath=cfg.seedfile_origin_dir,
                localpath=cfg.seedfile_local_dir,
                outputpath=cfg.seedfile_output_dir,
                logfile=sfs_logfile,
        ) as sfset:
            seedfile_set = sfset

    # set up the watchdog timeout within the VM and restart the daemon
    if cfg.watchdogtimeout:
        watchdog = WatchDog(cfg.watchdogfile, cfg.watchdogtimeout)
        touch_watchdog_file(cfg)
        watchdog.go()

    cache_state(cfg.campaign_id, 'seedfile_set', seedfile_set,
                cfg.cached_seedfile_set)

    sf = seedfile_set.next_item()

    # Run the program once to cache it into memory
    z.cache_program_once(cfg, sf.path)

    # Give target time to die
    time.sleep(1)

    # flag to indicate whether this is a fresh script start up or not
    first_chunk = True

    # remember our parent process id so we can tell if it changes later
    _last_ppid = os.getppid()

    # campaign.go
    while sr.in_max_range():

        # wipe the tmp dir clean to try to avoid filling the VM disk
        TmpReaper().clean_tmp()

        sf = seedfile_set.next_item()

        r = sf.rangefinder.next_item()
        sr.set_s2()

        while sr.in_range():
            # interval.go
            logger.debug('Starting interval %d-%d', sr.s1, sr.s2)

            # Prevent watchdog from rebooting VM.  If /tmp/fuzzing exists and is stale, the machine will reboot
            touch_watchdog_file(cfg)

            # check parent process id
            _ppid_now = os.getppid()
            if not _ppid_now == _last_ppid:
                logger.warning('Parent process ID changed from %d to %d',
                               _last_ppid, _ppid_now)
                _last_ppid = _ppid_now

            # do the fuzz
            cmdline = cfg.get_command(sf.path)

            if first_chunk:
                # disable the --quiet option in zzuf
                # on the first chunk only
                quiet_flag = False
                first_chunk = False
            else:
                quiet_flag = True

            zzuf = Zzuf(
                cfg.local_dir,
                sr.s1,
                sr.s2,
                cmdline,
                sf.path,
                cfg.zzuf_log_file,
                cfg.copymode,
                r.min,
                r.max,
                cfg.progtimeout,
                quiet_flag,
            )
            saw_crash = zzuf.go()

            if not saw_crash:
                # we must have made it through this chunk without a crash
                # so go to next chunk
                try_count = sr.s1_s2_delta()
                sf.record_tries(tries=try_count)
                r.record_tries(tries=try_count)

                # emit a log entry
                crashcount = z.get_crashcount(cfg.crashers_dir)
                rate = get_rate(sr.s1)
                seed_str = "seeds=%d-%d" % (sr.s1, sr.s2)
                range_str = "range=%.6f-%.6f" % (r.min, r.max)
                rate_str = "Rate=(%.2f/s %.1f/m %.0f/h %.0f/d)" % (
                    rate, rate * 60, rate * 3600, rate * 86400)
                expected_density = seedfile_set.expected_crash_density
                xd_str = "expected=%.9f" % expected_density
                xr_str = 'expected_rate=%.6f uniq/day' % (expected_density *
                                                          rate * 86400)
                logger.info('Fuzzing %s %s %s %s %s %s crash_count=%d',
                            sf.path, seed_str, range_str, rate_str, xd_str,
                            xr_str, crashcount)

                # set s1 to s2 so that as soon as we continue we'll break out of the sr.in_range() loop
                sr.set_s1_to_s2()
                continue

            # we must have seen a crash

            # get the results
            zzuf_log = ZzufLog(cfg.zzuf_log_file,
                               cfg.zzuf_log_out(sf.output_dir))

            # Don't generate cases for killed process or out-of-memory
            # In the default mode, zzuf will report a signal. In copy (and exit code) mode, zzuf will
            # report the exit code in its output log.  The exit code is 128 + the signal number.
            crash_status = zzuf_log.crash_logged(cfg.copymode)
            sr.bookmark_s1()
            sr.s1 = zzuf_log.seed

            # record the fact that we've made it this far
            try_count = sr.s1_delta()
            sf.record_tries(tries=try_count)
            r.record_tries(tries=try_count)

            new_uniq_crash = False
            if crash_status:
                logger.info('Generating testcase for %s', zzuf_log.line)
                # a true crash
                zzuf_range = zzuf_log.range
                # create the temp dir for the results
                cfg.create_tmpdir()
                outfile = cfg.get_testcase_outfile(sf.path, sr.s1)
                logger.debug('Output file is %s', outfile)
                testcase = zzuf.generate_test_case(sf.path, sr.s1, zzuf_range,
                                                   outfile)

                # Do internal verification using GDB / Valgrind / Stderr
                fuzzedfile = file_handlers.basicfile.BasicFile(outfile)

                with BffCrash(cfg, sf, fuzzedfile, cfg.program,
                              cfg.debugger_timeout, cfg.killprocname,
                              cfg.backtracelevels, cfg.crashers_dir, sr.s1,
                              r) as c:
                    if c.is_crash:
                        new_uniq_crash = verify_crasher(
                            c, hashes, cfg, seedfile_set)

                    # record the zzuf log line for this crash
                    if not c.logger:
                        c.get_logger()
                    c.logger.debug("zzuflog: %s", zzuf_log.line)
                    c.logger.info('Command: %s', testcase.cmdline)

                cfg.clean_tmpdir()

            sr.increment_seed()

            # cache objects in case of reboot
            cache_state(cfg.campaign_id, 'seedrange', sr,
                        cfg.cached_seedrange_file)
            pickled_seedfile_file = os.path.join(cfg.cached_objects_dir,
                                                 sf.pkl_file())
            cache_state(cfg.campaign_id, sf.cache_key(), sf,
                        pickled_seedfile_file)
            cache_state(cfg.campaign_id, 'seedfile_set', seedfile_set,
                        cfg.cached_seedfile_set)

            if new_uniq_crash:
                # we had a hit, so break the inner while() loop
                # so we can pick a new range. This is to avoid
                # having a crash-rich range run away with the
                # probability before other ranges have been tried
                break
Esempio n. 7
0
def main():
    global START_SEED
    hashes = []

    # give up if we don't have a debugger
    debuggers.verify_supported_platform()

    setup_logging_to_console(logger, logging.INFO)
    logger.info("Welcome to BFF!")

    scriptpath = os.path.dirname(sys.argv[0])
    logger.info('Scriptpath is %s', scriptpath)

    # parse command line options
    logger.info('Parsing command line options')
    parser = OptionParser()
    parser.add_option('', '--debug', dest='debug', help='Turn on debugging output', action='store_true')
    parser.add_option('-c', '--config', dest='cfg', help='Config file location')
    (options, args) = parser.parse_args()  #@UnusedVariable

    # Get the cfg file name
    if options.cfg:
        remote_cfg_file = options.cfg
    else:
        remote_cfg_file = get_config_file(scriptpath)

    # die unless the remote config is present
    assert os.path.exists(remote_cfg_file), 'Cannot find remote config file: %s, Please create it or use --config option to specify a different location.' % remote_cfg_file

    # copy remote config to local:
    local_cfg_file = os.path.expanduser('~/bff.cfg')
    filetools.copy_file(remote_cfg_file, local_cfg_file)

    # Read the cfg file
    logger.info('Reading config from %s', local_cfg_file)
    cfg = cfg_helper.read_config_options(local_cfg_file)

    # set up local logging
    setup_logfile(cfg.local_dir, log_basename='bff.log', level=logging.DEBUG,
                  max_bytes=1e8, backup_count=3)

    # set up remote logging
    setup_logfile(cfg.output_dir, log_basename='bff.log', level=logging.INFO,
                  max_bytes=1e7, backup_count=5)

    try:
        check_for_script(cfg)
    except:
        logger.warning("Please configure BFF to fuzz a binary.  Exiting...")
        sys.exit()

    z.setup_dirs_and_files(local_cfg_file, cfg)

    # make sure we cache it for the next run
#    cache_state(cfg.campaign_id, 'cfg', cfg, cfg.cached_config_file)

    sr = get_cached_state('seedrange', cfg.campaign_id, cfg.cached_seedrange_file)
    if not sr:
        sr = SeedRange(cfg.start_seed, cfg.seed_interval, cfg.max_seed)

    # set START_SEED global for timestamping
    START_SEED = sr.s1

    start_process_killer(scriptpath, cfg)

    z.set_unbuffered_stdout()

    # set up the seedfile set so we can pick seedfiles for everything else...
    seedfile_set = get_cached_state('seedfile_set', cfg.campaign_id, cfg.cached_seedfile_set)
    if not seedfile_set:
        logger.info('Building seedfile set')
        sfs_logfile = os.path.join(cfg.seedfile_output_dir, 'seedfile_set.log')
        with SeedfileSet(campaign_id=cfg.campaign_id,
                         originpath=cfg.seedfile_origin_dir,
                         localpath=cfg.seedfile_local_dir,
                         outputpath=cfg.seedfile_output_dir,
                         logfile=sfs_logfile,
                         ) as sfset:
            seedfile_set = sfset

    # set up the watchdog timeout within the VM and restart the daemon
    if cfg.watchdogtimeout:
        watchdog = WatchDog(cfg.watchdogfile, cfg.watchdogtimeout)
        touch_watchdog_file(cfg)
        watchdog.go()

    cache_state(cfg.campaign_id, 'seedfile_set', seedfile_set, cfg.cached_seedfile_set)

    sf = seedfile_set.next_item()

    # Run the program once to cache it into memory
    z.cache_program_once(cfg, sf.path)

    # Give target time to die
    time.sleep(1)

    # flag to indicate whether this is a fresh script start up or not
    first_chunk = True

    # remember our parent process id so we can tell if it changes later
    _last_ppid = os.getppid()

    # campaign.go
    while sr.in_max_range():

        # wipe the tmp dir clean to try to avoid filling the VM disk
        TmpReaper().clean_tmp()

        sf = seedfile_set.next_item()

        r = sf.rangefinder.next_item()
        sr.set_s2()

        while sr.in_range():
            # interval.go
            logger.debug('Starting interval %d-%d', sr.s1, sr.s2)

            # Prevent watchdog from rebooting VM.  If /tmp/fuzzing exists and is stale, the machine will reboot
            touch_watchdog_file(cfg)

            # check parent process id
            _ppid_now = os.getppid()
            if not _ppid_now == _last_ppid:
                logger.warning('Parent process ID changed from %d to %d', _last_ppid, _ppid_now)
                _last_ppid = _ppid_now

            # do the fuzz
            cmdline = cfg.get_command(sf.path)

            if first_chunk:
                # disable the --quiet option in zzuf
                # on the first chunk only
                quiet_flag = False
                first_chunk = False
            else:
                quiet_flag = True

            zzuf = Zzuf(cfg.local_dir,
                        sr.s1,
                        sr.s2,
                        cmdline,
                        sf.path,
                        cfg.zzuf_log_file,
                        cfg.copymode,
                        r.min,
                        r.max,
                        cfg.progtimeout,
                        quiet_flag,
                        )
            saw_crash = zzuf.go()

            if not saw_crash:
                # we must have made it through this chunk without a crash
                # so go to next chunk
                try_count = sr.s1_s2_delta()
                sf.record_tries(tries=try_count)
                r.record_tries(tries=try_count)

                # emit a log entry
                crashcount = z.get_crashcount(cfg.crashers_dir)
                rate = get_rate(sr.s1)
                seed_str = "seeds=%d-%d" % (sr.s1, sr.s2)
                range_str = "range=%.6f-%.6f" % (r.min, r.max)
                rate_str = "Rate=(%.2f/s %.1f/m %.0f/h %.0f/d)" % (rate, rate * 60, rate * 3600, rate * 86400)
                expected_density = seedfile_set.expected_crash_density
                xd_str = "expected=%.9f" % expected_density
                xr_str = 'expected_rate=%.6f uniq/day' % (expected_density * rate * 86400)
                logger.info('Fuzzing %s %s %s %s %s %s crash_count=%d',
                    sf.path, seed_str, range_str, rate_str, xd_str, xr_str, crashcount)

                # set s1 to s2 so that as soon as we continue we'll break out of the sr.in_range() loop
                sr.set_s1_to_s2()
                continue

            # we must have seen a crash

            # get the results
            zzuf_log = ZzufLog(cfg.zzuf_log_file, cfg.zzuf_log_out(sf.output_dir))

            # Don't generate cases for killed process or out-of-memory
            # In the default mode, zzuf will report a signal. In copy (and exit code) mode, zzuf will
            # report the exit code in its output log.  The exit code is 128 + the signal number.
            crash_status = zzuf_log.crash_logged(cfg.copymode)
            sr.bookmark_s1()
            sr.s1 = zzuf_log.seed

            # record the fact that we've made it this far
            try_count = sr.s1_delta()
            sf.record_tries(tries=try_count)
            r.record_tries(tries=try_count)

            new_uniq_crash = False
            if crash_status:
                logger.info('Generating testcase for %s', zzuf_log.line)
                # a true crash
                zzuf_range = zzuf_log.range
                # create the temp dir for the results
                cfg.create_tmpdir()
                outfile = cfg.get_testcase_outfile(sf.path, sr.s1)
                logger.debug('Output file is %s', outfile)
                testcase = zzuf.generate_test_case(sf.path, sr.s1, zzuf_range, outfile)

                # Do internal verification using GDB / Valgrind / Stderr
                fuzzedfile = file_handlers.basicfile.BasicFile(outfile)

                with BffCrash(cfg, sf, fuzzedfile, cfg.program, cfg.debugger_timeout,
                              cfg.killprocname, cfg.backtracelevels,
                              cfg.crashers_dir, sr.s1, r) as c:
                    if c.is_crash:
                        new_uniq_crash = verify_crasher(c, hashes, cfg, seedfile_set)

                    # record the zzuf log line for this crash
                    if not c.logger:
                        c.get_logger()
                    c.logger.debug("zzuflog: %s", zzuf_log.line)
                    c.logger.info('Command: %s', testcase.cmdline)

                cfg.clean_tmpdir()

            sr.increment_seed()

            # cache objects in case of reboot
            cache_state(cfg.campaign_id, 'seedrange', sr, cfg.cached_seedrange_file)
            pickled_seedfile_file = os.path.join(cfg.cached_objects_dir, sf.pkl_file())
            cache_state(cfg.campaign_id, sf.cache_key(), sf, pickled_seedfile_file)
            cache_state(cfg.campaign_id, 'seedfile_set', seedfile_set, cfg.cached_seedfile_set)

            if new_uniq_crash:
                # we had a hit, so break the inner while() loop
                # so we can pick a new range. This is to avoid
                # having a crash-rich range run away with the
                # probability before other ranges have been tried
                break