def _crash_builder(self): self.logger.debug('Building new testcase object.') # copy our original testcase as the basis for the new testcase new_testcase = copy.deepcopy(self.testcase) # get a new dir for the next crasher newcrash_tmpdir = tempfile.mkdtemp(prefix='minimizer_crash_builder_', dir=self.tempdir) # get a new filename for the next crasher sfx = self.testcase.fuzzedfile.ext if self.testcase.seedfile: pfx = '%s-' % self.testcase.seedfile.root else: pfx = 'string-' (fd, f) = tempfile.mkstemp(suffix=sfx, prefix=pfx, dir=newcrash_tmpdir) os.close(fd) delete_files(f) outfile = f if os.path.exists(outfile): self._raise('Outfile should not already exist: %s' % outfile) self.logger.debug('\tCopying %s to %s', self.tempfile, outfile) filetools.copy_file(self.tempfile, outfile) if 'copyfuzzedto' in self.cfg['target']: copyfuzzedto = str(self.cfg['target'].get('copyfuzzedto', '')) logger.debug("Copying fuzzed file to " + copyfuzzedto) filetools.copy_file(self.tempfile, copyfuzzedto) if 'postprocessfuzzed' in self.cfg['target']: postprocessfuzzed = str(self.cfg['target']['postprocessfuzzed']) logger.debug("Executing postprocess " + postprocessfuzzed) os.system(postprocessfuzzed) new_testcase.fuzzedfile = BasicFile(outfile) self.logger.debug('\tNew fuzzed_content file: %s %s', new_testcase.fuzzedfile.path, new_testcase.fuzzedfile.md5) # clear out the copied testcase signature so that it will be # regenerated new_testcase.signature = None # replace old testcase details with new info specific to this testcase self.logger.debug('\tUpdating testcase details') new_testcase.update_crash_details() # the tempdir we created is no longer needed because # update_crash_details creates a fresh one shutil.rmtree(newcrash_tmpdir) if os.path.exists(newcrash_tmpdir): logger.warning("Failed to remove temp dir %s", newcrash_tmpdir) return new_testcase
def is_same_crash(self): # get debugger output filename (fd, f) = tempfile.mkstemp(dir=self.tempdir, prefix="minimizer_is_same_crash_") os.close(fd) if os.path.exists(f): delete_files(f) if os.path.exists(f): raise MinimizerError('Unable to get temporary debug file') # create debugger output dbg = self.run_debugger(self.tempfile, f) if dbg.is_crash: newfuzzed_hash = self.get_signature(dbg, self.backtracelevels) else: newfuzzed_hash = None # initialize or increment the counter for this hash if newfuzzed_hash in self.crash_sigs_found: self.crash_sigs_found[newfuzzed_hash] += 1 elif not newfuzzed_hash: # don't do anything with non-crashes pass else: # the testcase is new to this minimization run self.crash_sigs_found[newfuzzed_hash] = 1 self.logger.info('testcase=%s signal=%s', newfuzzed_hash, dbg.signal) if self.save_others and newfuzzed_hash not in self.crash_hashes: # the testcase is not one of the crashes we're looking for # so add it to the other_crashes dict in case our # caller wants to do something with it newcrash = self._crash_builder() if newcrash.is_crash: # note that since we're doing this every time we see a testcase # that's not in self.crash_hashes, we're also effectively # keeping only the smallest hamming distance version of # newfuzzed_hash as we progress through the minimization # process self.other_crashes[newfuzzed_hash] = newcrash # ditch the temp file delete_files(dbg.file) if os.path.exists(dbg.file): raise MinimizerError('Unable to remove temporary debug file') return newfuzzed_hash in self.crash_hashes
def test_delete_files(self): (f1_fd, f1) = tempfile.mkstemp(dir=self.tempdir, text=True) (f2_fd, f2) = tempfile.mkstemp(dir=self.tempdir, text=True) # no need to keep the tmpfiles open for f in (f1_fd, f2_fd): os.close(f) for f in (f1, f2): self.assertTrue(os.path.exists(f)) delete_files(f1, f2) for f in (f1, f2): self.assertFalse(os.path.exists(f))
def _pre_analyze(self, testcase): testcase.set_debugger_template('complete') logger.info('Getting complete debugger output for crash: %s', testcase.fuzzedfile.path) testcase.get_debug_output(testcase.fuzzedfile.path) # We now have full debugger output, including exploitability. # Update the crash object with this info. testcase.update_crash_details() if self.dbg_out_file_orig != testcase.dbg.file: # we have a new debugger output # remove the old one filetools.delete_files(self.dbg_out_file_orig) if os.path.exists(self.dbg_out_file_orig): logger.warning('Failed to remove old debugger file %s', self.dbg_out_file_orig) else: logger.debug('Removed old debug file %s', self.dbg_out_file_orig)
def _pre_analyze(self, testcase): testcase.set_debugger_template('complete') logger.info( 'Getting complete debugger output for crash: %s', testcase.fuzzedfile.path) testcase.get_debug_output(testcase.fuzzedfile.path) # We now have full debugger output, including exploitability. # Update the crash object with this info. testcase.update_crash_details() if self.dbg_out_file_orig != testcase.dbg.file: # we have a new debugger output # remove the old one filetools.delete_files(self.dbg_out_file_orig) if os.path.exists(self.dbg_out_file_orig): logger.warning( 'Failed to remove old debugger file %s', self.dbg_out_file_orig) else: logger.debug( 'Removed old debug file %s', self.dbg_out_file_orig)
def _set_crash_hashes(self): if self.crash_hashes: # shortcut if it's already set return self.crash_hashes miss_count = 0 # we want to keep going until we are 0.95 confident that # if there are any other crashers they have a probability # less than 0.5 max_misses = probability.misses_until_quit(0.95, 0.5) sigs_set = set() times = [] # loop until we've found ALL the testcase signatures while miss_count < max_misses: target_hash_count = len(sigs_set) if target_hash_count > self.max_target_hashes: self._raise( 'Too many crash hashes seen to minimize. Is memory randomization disabled?' ) # (sometimes testcase sigs change for the same input file) (fd, f) = tempfile.mkstemp(prefix='minimizer_set_crash_hashes_', text=True, dir=self.tempdir) os.close(fd) delete_files(f) # run debugger start = time.time() dbg = self.run_debugger(self.tempfile, f) # remember the elapsed time for later end = time.time() delta = end - start if dbg.is_crash: times.append(delta) current_sig = self.get_signature(dbg, self.backtracelevels) # ditch the temp file if os.path.exists(f): delete_files(f) if current_sig: if current_sig in sigs_set: miss_count += 1 else: sigs_set.add(current_sig) miss_count = 0 else: # this testcase had no signature, so skip it miss_count += 1 self.crash_hashes = list(sigs_set) # calculate average time # get stdev avg_time = numpy.average(times) stdev_time = numpy.std(times) # set debugger timeout to 0.99 confidence # TODO: What if the VM becomes slower. # We may give up on crashes before they happen. zscore = 2.58 self.measured_dbg_time = avg_time + (zscore * stdev_time) return self.crash_hashes
# get one last debugger output for the newly minimized file if crash.pc_in_function: # change the debugger template crash.set_debugger_template('complete') else: # use a debugger template that specifies fixed offsets from $pc for disassembly crash.set_debugger_template('complete_nofunction') logger.info('Getting complete debugger output for crash: %s', crash.fuzzedfile.path) crash.get_debug_output(crash.fuzzedfile.path) if dbg_out_file_orig != crash.dbg.file: # we have a new debugger output # remove the old one filetools.delete_files(dbg_out_file_orig) if os.path.exists(dbg_out_file_orig): logger.warning('Failed to remove old debugger file %s', dbg_out_file_orig) else: logger.debug('Removed old debug file %s', dbg_out_file_orig) # use the minimized file for the rest of the analyses analyzers = [ stderr.StdErr, cw_gmalloc.CrashWranglerGmalloc, ] if cfg.use_valgrind: analyzers.extend([ valgrind.Valgrind, callgrind.Callgrind,
touch_watchdog_file(cfg) # get one last debugger output for the newly minimized file if crash.pc_in_function: # change the debugger template crash.set_debugger_template('complete') else: # use a debugger template that specifies fixed offsets from $pc for disassembly crash.set_debugger_template('complete_nofunction') logger.info('Getting complete debugger output for crash: %s', crash.fuzzedfile.path) crash.get_debug_output(crash.fuzzedfile.path) if dbg_out_file_orig != crash.dbg.file: # we have a new debugger output # remove the old one filetools.delete_files(dbg_out_file_orig) if os.path.exists(dbg_out_file_orig): logger.warning('Failed to remove old debugger file %s', dbg_out_file_orig) else: logger.debug('Removed old debug file %s', dbg_out_file_orig) # use the minimized file for the rest of the analyses analyzers = [ stderr.StdErr, cw_gmalloc.CrashWranglerGmalloc, ] if cfg.use_valgrind: analyzers.extend([ valgrind.Valgrind, callgrind.Callgrind, ])