def sanity_check(cls, bin_file): if _fm_import_error is not None: raise _fm_import_error # pylint: disable=raising-bad-type if not os.path.isfile(cls.FM_CONFIG): raise IOError("Missing: %s" % cls.FM_CONFIG) if not os.path.isfile("".join([bin_file, ".fuzzmanagerconf"])): raise IOError("Missing: %s" % "".join([bin_file, ".fuzzmanagerconf"])) ProgramConfiguration.fromBinary(bin_file)
def sanity_check(bin_file): """Perform FuzzManager sanity check. Args: bin_file (str): Binary file being tested. Returns: None """ if not FuzzManagerReporter.FM_CONFIG.is_file(): raise IOError("Missing: %s" % (FuzzManagerReporter.FM_CONFIG,)) if not Path("".join([bin_file, ".fuzzmanagerconf"])).is_file(): raise IOError("Missing: %s.fuzzmanagerconf" % (bin_file,)) ProgramConfiguration.fromBinary(bin_file)
def OnFault(self, run, test, variationCount, monitorData, actionValues): # Setup FuzzManager with information about target and platform data. program_configuration = ProgramConfiguration.fromBinary( self.target_binary) # Prepare FuzzManager with target and crash information. stdout = self._get_value_by_key(monitorData, "stdout.txt", "N/A") stderr = self._get_value_by_key(monitorData, "stderr.txt", "N/A") auxdat = self._get_value_by_key(monitorData, "auxdat.txt", "N/A") crash_info = CrashInfo.fromRawCrashData(stdout, stderr, program_configuration, auxdat) collector = Collector(tool="peach") # Write testcase content and any additional meta information to a temporary ZIP archive. buffer = StringIO.StringIO() zip_buffer = zipfile.ZipFile(buffer, 'w') # Collect |actionValues| crash information from Peach. for i in range(len(actionValues)): if len(actionValues[i]) > 2: data = actionValues[i][2] fileName = "data_%d_%s_%s.txt" % (i, actionValues[i][1], actionValues[i][0]) zip_buffer.writestr(fileName, data) if len(actionValues[i] ) > 3 and actionValues[i][1] != 'output': data = repr(actionValues[i][3]) fileName = "data_%d_%s_%s_action.txt" % ( i, actionValues[i][1], actionValues[i][0]) zip_buffer.writestr(fileName, data) if len(actionValues[i] ) > 3 and actionValues[i][1] == 'output': fileName = "data_%d_%s_%s_fileName.txt" % ( i, actionValues[i][1], actionValues[i][0]) data = actionValues[i][3] zip_buffer.writestr(fileName, data) # Collect |monitorData| crash information from Peach. for k, v in monitorData.items(): zip_buffer.writestr(k, v) zip_buffer.close() with tempfile.NamedTemporaryFile(delete=False, suffix='.zip') as testcase: buffer.seek(0) testcase.write(buffer.getvalue()) testcase.close() # Submit crash report with testcase to FuzzManager. collector.submit(crash_info, testcase.name, metaData=None)
def __init__(self, args, collector): parser = OptionParser(usage="%prog [options] browserDir [testcaseURL]") parser.add_option("--valgrind", action="store_true", dest="valgrind", default=False, help="use valgrind with a reasonable set of options") parser.add_option( "-m", "--minlevel", type="int", dest="minimumInterestingLevel", default=DOM_FINE + 1, help= "minimum domfuzz level for lithium to consider the testcase interesting" ) parser.add_option("--submit", action="store_true", dest="submit", default=False, help="submit to FuzzManager (if interesting)") parser.add_option( "--background", action="store_true", dest="background", default=False, help= "Run the browser in the background on Mac (e.g. for local reduction)" ) options, args = parser.parse_args(args) if len(args) < 1: usage("Missing browserDir argument") browserDir = args[0] # Standalone domInteresting: Optional. Load this URL or file (rather than the Bugzilla front page) # loopdomfuzz: Optional. Test (and possibly splice/reduce) only this URL, rather than looping (but note the prefs file isn't maintained) # Lithium: Required. Reduce this file. options.argURL = args[1] if len(args) > 1 else "" options.browserDir = browserDir # used by loopdomfuzz self.dirs = FigureOutDirs(getFullPath(browserDir)) self.options = options self.env = self.initEnv() self.knownPath = "mozilla-central" self.collector = collector self.runBrowserOptions = self.initRunBrowserOptions() self.pc = ProgramConfiguration.fromBinary( os.path.join(browserDir, "firefox.exe" if sps.isWin else "firefox"))
def add_fault(self): # Setup FuzzManager with target information and platform data. program_configuration = ProgramConfiguration.fromBinary(self.binary) # Prepare FuzzManager with crash information. stdout = "N/A" # Todo: There is no plain stdout logger yet. stderr = "N/A" # Todo: There is no plain stderr logger yet. auxdat = self.bucket.get("crashlog", "N/A").get("data", "N/A") metaData = None testcase = self.save_bucket_as_zip(self.bucket) crash_info = CrashInfo.fromRawCrashData(stdout, stderr, program_configuration, auxdat) # Submit crash report with testcase to FuzzManager. collector = Collector(tool="dharma") collector.submit(crash_info, testcase, metaData)
def create_crash_info(report, target_binary): # read in the log files and create a CrashInfo object aux_data = None if report.log_aux is not None: with open(os.path.join(report.path, report.log_aux), "rb") as log_fp: aux_data = log_fp.read().decode("utf-8", errors="ignore").splitlines() stderr_file = os.path.join(report.path, report.log_err) stdout_file = os.path.join(report.path, report.log_out) with open(stderr_file, "rb") as err_fp, open(stdout_file, "rb") as out_fp: return CrashInfo.fromRawCrashData( out_fp.read().decode("utf-8", errors="ignore").splitlines(), err_fp.read().decode("utf-8", errors="ignore").splitlines(), ProgramConfiguration.fromBinary(target_binary), auxCrashData=aux_data)
def OnFault(self, run, test, variationCount, monitorData, actionValues): # Setup FuzzManager with information about target and platform data. program_configuration = ProgramConfiguration.fromBinary(self.target_binary) # Prepare FuzzManager with target and crash information. stdout = self._get_value_by_key(monitorData, "stdout.txt", "N/A") stderr = self._get_value_by_key(monitorData, "stderr.txt", "N/A") auxdat = self._get_value_by_key(monitorData, "auxdat.txt", "N/A") crash_info = CrashInfo.fromRawCrashData(stdout, stderr, program_configuration, auxdat) collector = Collector(tool="peach") # Write testcase content and any additional meta information to a temporary ZIP archive. buffer = StringIO.StringIO() zip_buffer = zipfile.ZipFile(buffer, 'w') # Collect |actionValues| crash information from Peach. for i in range(len(actionValues)): if len(actionValues[i]) > 2: data = actionValues[i][2] fileName = "data_%d_%s_%s.txt" % (i, actionValues[i][1], actionValues[i][0]) zip_buffer.writestr(fileName, data) if len(actionValues[i]) > 3 and actionValues[i][1] != 'output': data = repr(actionValues[i][3]) fileName = "data_%d_%s_%s_action.txt" % (i, actionValues[i][1], actionValues[i][0]) zip_buffer.writestr(fileName, data) if len(actionValues[i]) > 3 and actionValues[i][1] == 'output': fileName = "data_%d_%s_%s_fileName.txt" % (i, actionValues[i][1], actionValues[i][0]) data = actionValues[i][3] zip_buffer.writestr(fileName, data) # Collect |monitorData| crash information from Peach. for k, v in monitorData.items(): zip_buffer.writestr(k, v) zip_buffer.close() with tempfile.NamedTemporaryFile(delete=False, suffix='.zip') as testcase: buffer.seek(0) testcase.write(buffer.getvalue()) testcase.close() # Submit crash report with testcase to FuzzManager. collector.submit(crash_info, testcase.name, metaData=None)
def add_fault(self): # Setup FuzzManager with target information and platform data. program_configuration = ProgramConfiguration.fromBinary( self.binary) # Prepare FuzzManager with crash information. stdout = "N/A" # Todo: There is no plain stdout logger yet. stderr = "N/A" # Todo: There is no plain stderr logger yet. auxdat = self.bucket.get("crashlog", "N/A").get("data", "N/A") metaData = None testcase = self.save_bucket_as_zip(self.bucket) crash_info = CrashInfo.fromRawCrashData(stdout, stderr, program_configuration, auxdat) # Submit crash report with testcase to FuzzManager. collector = Collector(tool="dharma") collector.submit(crash_info, testcase, metaData)
def crash_info(self): """Create CrashInfo object from logs. Args: None Returns: CrashInfo: CrashInfo based on log data. """ if self._crash_info is None: assert self.path is not None # read in the log files and create a CrashInfo object if self._logs.aux is not None: with open(self._logs.aux, "rb") as log_fp: aux_data = ( log_fp.read().decode("utf-8", errors="ignore").splitlines() ) else: aux_data = None # create ProgramConfiguration that can be reported to a FM server if isfile("%s.fuzzmanagerconf" % (self._target_binary,)): # attempt to use "<target_binary>.fuzzmanagerconf" fm_cfg = ProgramConfiguration.fromBinary(self._target_binary) else: LOG.debug("'%s.fuzzmanagerconf' does not exist", self._target_binary) fm_cfg = None if fm_cfg is None: LOG.debug("creating ProgramConfiguration") cpu = machine().lower() fm_cfg = ProgramConfiguration( basename(self._target_binary), "x86_64" if cpu == "amd64" else cpu, system(), ) with open(self._logs.stderr, "rb") as err_fp, open( self._logs.stdout, "rb" ) as out_fp: self._crash_info = CrashInfo.fromRawCrashData( out_fp.read().decode("utf-8", errors="ignore").splitlines(), err_fp.read().decode("utf-8", errors="ignore").splitlines(), fm_cfg, auxCrashData=aux_data, ) return self._crash_info
def crash_info(self, target_binary): """Create CrashInfo object from logs. Args: target_binary (str): Binary file being tested. Returns: CrashInfo: CrashInfo based on Result log data. """ if self._crash_info is None: # read in the log files and create a CrashInfo object aux_data = None if self.log_aux is not None: with open(os.path.join(self.path, self.log_aux), "rb") as log_fp: aux_data = log_fp.read().decode( "utf-8", errors="ignore").splitlines() stderr_file = os.path.join(self.path, self.log_err) stdout_file = os.path.join(self.path, self.log_out) # create ProgramConfiguration that can be reported to a FM server if os.path.isfile("%s.fuzzmanagerconf" % (target_binary, )): # attempt to use "<target_binary>.fuzzmanagerconf" fm_cfg = ProgramConfiguration.fromBinary(target_binary) else: log.debug("'%s.fuzzmanagerconf' does not exist", target_binary) fm_cfg = None if fm_cfg is None: log.debug("creating ProgramConfiguration") cpu = platform.machine().lower() fm_cfg = ProgramConfiguration( os.path.basename(target_binary), "x86_64" if cpu == "amd64" else cpu, platform.system()) with open(stderr_file, "rb") as err_fp, open(stdout_file, "rb") as out_fp: self._crash_info = CrashInfo.fromRawCrashData( out_fp.read().decode("utf-8", errors="ignore").splitlines(), err_fp.read().decode("utf-8", errors="ignore").splitlines(), fm_cfg, auxCrashData=aux_data) return self._crash_info
def main(argv=None): '''Command line options.''' program_name = os.path.basename(sys.argv[0]) program_version = "v%s" % __version__ program_build_date = "%s" % __updated__ program_version_string = '%%prog %s (%s)' % (program_version, program_build_date) if argv is None: argv = sys.argv[1:] # setup argparser parser = argparse.ArgumentParser() parser.add_argument('--version', action='version', version=program_version_string) # Crash information parser.add_argument("--stdout", dest="stdout", help="File containing STDOUT data", metavar="FILE") parser.add_argument("--stderr", dest="stderr", help="File containing STDERR data", metavar="FILE") parser.add_argument("--crashdata", dest="crashdata", help="File containing external crash data", metavar="FILE") # Actions parser.add_argument("--refresh", dest="refresh", action='store_true', help="Perform a signature refresh") parser.add_argument("--submit", dest="submit", action='store_true', help="Submit a signature to the server") parser.add_argument("--search", dest="search", action='store_true', help="Search cached signatures for the given crash") parser.add_argument("--generate", dest="generate", action='store_true', help="Create a (temporary) local signature in the cache directory") parser.add_argument("--autosubmit", dest="autosubmit", action='store_true', help="Go into auto-submit mode. In this mode, all remaining arguments are interpreted as the crashing command. This tool will automatically obtain GDB crash information and submit it.") parser.add_argument("--download", dest="download", type=int, help="Download the testcase for the specified crash entry", metavar="ID") # Settings parser.add_argument("--sigdir", dest="sigdir", help="Signature cache directory", metavar="DIR") parser.add_argument("--serverhost", dest="serverhost", help="Server hostname for remote signature management", metavar="HOST") parser.add_argument("--serverport", dest="serverport", type=int, help="Server port to use", metavar="PORT") parser.add_argument("--serverproto", dest="serverproto", help="Server protocol to use (default is https)", metavar="PROTO") parser.add_argument("--serverauthtokenfile", dest="serverauthtokenfile", help="File containing the server authentication token", metavar="FILE") parser.add_argument("--clientid", dest="clientid", help="Client ID to use when submitting issues", metavar="ID") parser.add_argument("--platform", dest="platform", help="Platform this crash appeared on", metavar="(x86|x86-64|arm)") parser.add_argument("--product", dest="product", help="Product this crash appeared on", metavar="PRODUCT") parser.add_argument("--productversion", dest="product_version", help="Product version this crash appeared on", metavar="VERSION") parser.add_argument("--os", dest="os", help="OS this crash appeared on", metavar="(windows|linux|macosx|b2g|android)") parser.add_argument("--tool", dest="tool", help="Name of the tool that found this issue", metavar="NAME") parser.add_argument('--args', dest='args', nargs='+', type=str, help="List of program arguments. Backslashes can be used for escaping and are stripped.") parser.add_argument('--env', dest='env', nargs='+', type=str, help="List of environment variables in the form 'KEY=VALUE'") parser.add_argument('--metadata', dest='metadata', nargs='+', type=str, help="List of metadata variables in the form 'KEY=VALUE'") parser.add_argument("--binary", dest="binary", help="Binary that has a configuration file for reading", metavar="BINARY") parser.add_argument("--testcase", dest="testcase", help="File containing testcase", metavar="FILE") parser.add_argument("--testcasequality", dest="testcasequality", default="0", help="Integer indicating test case quality (0 is best and default)", metavar="VAL") # Options that affect how signatures are generated parser.add_argument("--forcecrashaddr", dest="forcecrashaddr", action='store_true', help="Force including the crash address into the signature") parser.add_argument("--forcecrashinst", dest="forcecrashinst", action='store_true', help="Force including the crash instruction into the signature (GDB only)") parser.add_argument("--numframes", dest="numframes", default=8, type=int, help="How many frames to include into the signature (default is 8)") parser.add_argument('rargs', nargs=argparse.REMAINDER) if len(argv) == 0: parser.print_help() return 2 # process options opts = parser.parse_args(argv) # Check that one action is specified actions = [ "refresh", "submit", "search", "generate", "autosubmit", "download" ] haveAction = False for action in actions: if getattr(opts, action): if haveAction: print("Error: Cannot specify multiple actions at the same time", file=sys.stderr) return 2 haveAction = True if not haveAction: print("Error: Must specify an action", file=sys.stderr) return 2 # In autosubmit mode, we try to open a configuration file for the binary specified # on the command line. It should contain the binary-specific settings for submitting. if opts.autosubmit: if not opts.rargs: print("Error: Action --autosubmit requires test arguments to be specified", file=sys.stderr) return 2 # Store the binary candidate only if --binary wasn't also specified if not opts.binary: opts.binary = opts.rargs[0] # We also need to check that (apart from the binary), there is only one file on the command line # (the testcase), if it hasn't been explicitely specified. testcase = opts.testcase testcaseidx = None if testcase == None: for idx, arg in enumerate(opts.rargs[1:]): if os.path.exists(arg): if testcase: print("Error: Multiple potential testcases specified on command line. Must explicitely specify test using --testcase.") return 2 testcase = arg testcaseidx = idx # Either --autosubmit was specified, or someone specified --binary manually # Check that the binary actually exists if opts.binary and not os.path.exists(opts.binary): print("Error: Specified binary does not exist: %s" % opts.binary) return 2 stdout = None stderr = None crashdata = None crashInfo = None args = None env = None metadata = {} if opts.search or opts.generate or opts.submit or opts.autosubmit: if opts.metadata: metadata.update(dict(kv.split('=', 1) for kv in opts.metadata)) if opts.autosubmit: # Try to automatically get arguments from the command line # If the testcase is not the last argument, leave it in the # command line arguments and replace it with a generic placeholder. if testcaseidx == len(opts.rargs[1:]) - 1: args = opts.rargs[1:-1] else: args = opts.rargs[1:] if testcaseidx != None: args[testcaseidx] = "TESTFILE" else: if opts.args: args = [arg.replace('\\', '') for arg in opts.args] if opts.env: env = dict(kv.split('=', 1) for kv in opts.env) # Start without any ProgramConfiguration configuration = None # If we have a binary, try using that to create our ProgramConfiguration if opts.binary: configuration = ProgramConfiguration.fromBinary(opts.binary) if configuration: if env: configuration.addEnvironmentVariables(env) if args: configuration.addProgramArguments(args) if metadata: configuration.addMetadata(metadata) # If configuring through binary failed, try to manually create ProgramConfiguration from command line arguments if configuration == None: if opts.platform == None or opts.product == None or opts.os == None: print("Error: Must specify/configure at least --platform, --product and --os", file=sys.stderr) return 2 configuration = ProgramConfiguration(opts.product, opts.platform, opts.os, opts.product_version, env, args, metadata) if not opts.autosubmit: if opts.stderr == None and opts.crashdata == None: print("Error: Must specify at least either --stderr or --crashdata file", file=sys.stderr) return 2 if opts.stdout: with open(opts.stdout) as f: stdout = f.read() if opts.stderr: with open(opts.stderr) as f: stderr = f.read() if opts.crashdata: with open(opts.crashdata) as f: crashdata = f.read() crashInfo = CrashInfo.fromRawCrashData(stdout, stderr, configuration, auxCrashData=crashdata) if opts.testcase: (testCaseData, isBinary) = Collector.read_testcase(opts.testcase) if not isBinary: crashInfo.testcase = testCaseData serverauthtoken = None if opts.serverauthtokenfile: with open(opts.serverauthtokenfile) as f: serverauthtoken = f.read().rstrip() collector = Collector(opts.sigdir, opts.serverhost, opts.serverport, opts.serverproto, serverauthtoken, opts.clientid, opts.tool) if opts.refresh: collector.refresh() return 0 if opts.submit: testcase = opts.testcase collector.submit(crashInfo, testcase, opts.testcasequality, metadata) return 0 if opts.search: (sig, metadata) = collector.search(crashInfo) if sig == None: print("No match found") return 3 print(sig) if metadata: print(json.dumps(metadata, indent=4)) return 0 if opts.generate: sigFile = collector.generate(crashInfo, opts.forcecrashaddr, opts.forcecrashinst, opts.numframes) if not sigFile: print("Failed to generate a signature for the given crash information.", file=sys.stderr) return 2 print(sigFile) return 0 if opts.autosubmit: runner = AutoRunner.fromBinaryArgs(opts.rargs[0], opts.rargs[1:]) if runner.run(): crashInfo = runner.getCrashInfo(configuration) collector.submit(crashInfo, testcase, opts.testcasequality, metadata) else: print("Error: Failed to reproduce the given crash, cannot submit.", file=sys.stderr) return 2 if opts.download: (retFile, retJSON) = collector.download(opts.download) if not retFile: print("Specified crash entry does not have a testcase", file=sys.stderr) return 2 if "args" in retJSON and retJSON["args"]: args = json.loads(retJSON["args"]) print("Command line arguments: %s" % " ".join(args)) print("") if "env" in retJSON and retJSON["env"]: env = json.loads(retJSON["env"]) print("Environment variables: %s", " ".join([ "%s = %s" % (k,v) for (k,v) in env.items()])) print("") if "metadata" in retJSON and retJSON["metadata"]: metadata = json.loads(retJSON["metadata"]) print("== Metadata ==") for k, v in metadata.items(): print("%s = %s" % (k,v)) print("") print(retFile) return 0
def __init__(self, options, runthis, logPrefix, in_compare_jit, env=None): # pylint: disable=too-complex # pylint: disable=too-many-arguments,too-many-branches,too-many-locals,too-many-statements # If Lithium uses this as an interestingness test, logPrefix is likely not a Path object, so make it one. logPrefix = Path(logPrefix) pathToBinary = runthis[0].expanduser().resolve() # pylint: disable=invalid-name # This relies on the shell being a local one from compile_shell: # Ignore trailing ".exe" in Win, also abspath makes it work w/relative paths like "./js" # pylint: disable=invalid-name assert pathToBinary.with_suffix(".fuzzmanagerconf").is_file() pc = ProgramConfiguration.fromBinary( str(pathToBinary.parent / pathToBinary.stem)) pc.addProgramArguments(runthis[1:-1]) if options.valgrind: runthis = (inspect_shell.constructVgCmdList( errorCode=VALGRIND_ERROR_EXIT_CODE) + [ f"--suppressions={filename}" for filename in "valgrind_suppressions.txt" ] + runthis) timed_run_kw = {"env": (env or deepcopy(os.environ))} # Enable LSan which is enabled with non-ARM64 simulator ASan, only on Linux if platform.system( ) == "Linux" and inspect_shell.queryBuildConfiguration( options.jsengine, "asan"): env_asan_options = "detect_leaks=1," env_lsan_options = "max_leaks=1," if inspect_shell.queryBuildConfiguration(options.jsengine, "arm64-simulator"): env_asan_options = "" env_lsan_options = "" timed_run_kw["env"].update({"ASAN_OPTIONS": env_asan_options}) timed_run_kw["env"].update({"LSAN_OPTIONS": env_lsan_options}) elif not platform.system() == "Windows": timed_run_kw["preexec_fn"] = set_ulimit pc.addEnvironmentVariables(dict(timed_run_kw["env"])) lithium_logPrefix = str(logPrefix).encode("utf-8") if isinstance(lithium_logPrefix, b"".__class__): lithium_logPrefix = lithium_logPrefix.decode("utf-8", errors="replace") # logPrefix should be a string for timed_run in Lithium version 0.2.1 to work properly, apparently runinfo = timedrun.timed_run( [str(x) for x in runthis ], # Convert all Paths/bytes to strings for Lithium options.timeout, lithium_logPrefix, **timed_run_kw) lev = JS_FINE is_oom = False issues = [] auxCrashData = [] # pylint: disable=invalid-name # FuzzManager expects a list of strings rather than an iterable, so bite the # bullet and "readlines" everything into memory. # Collector adds newlines later, see https://git.io/fjoMB out_log = (logPrefix.parent / f"{logPrefix.stem}-out").with_suffix(".txt") with io.open(str(out_log), "r", encoding="utf-8", errors="replace") as f: out = [line.rstrip() for line in f] err_log = (logPrefix.parent / f"{logPrefix.stem}-err").with_suffix(".txt") with io.open(str(err_log), "r", encoding="utf-8", errors="replace") as f: err = [line.rstrip() for line in f] for line in reversed(err): if "[unhandlable oom]" in line: print("Ignoring unhandlable oom...") is_oom = True break if is_oom: lev = JS_FINE crash_log = (logPrefix.parent / f"{logPrefix.stem}-crash").with_suffix(".txt") core_file = logPrefix.parent / f"{logPrefix.stem}-core" if crash_log.is_file(): crash_log.unlink() if core_file.is_file(): core_file.unlink() dbggr_cmd = os_ops.make_dbg_cmd(runthis[0], runinfo.pid) if dbggr_cmd: core_file = Path(dbggr_cmd[-1]) if core_file.is_file(): core_file.unlink() elif options.valgrind and runinfo.return_code == VALGRIND_ERROR_EXIT_CODE: issues.append("valgrind reported an error") lev = max(lev, JS_VG_AMISS) valgrindErrorPrefix = f"=={runinfo.pid}==" for line in err: if valgrindErrorPrefix and line.startswith( valgrindErrorPrefix): issues.append(line.rstrip()) elif runinfo.sta == timedrun.CRASHED: if os_ops.grab_crash_log(runthis[0], runinfo.pid, logPrefix, True): crash_log = (logPrefix.parent / f"{logPrefix.stem}-crash").with_suffix(".txt") with io.open(str(crash_log), "r", encoding="utf-8", errors="replace") as f: auxCrashData = [line.strip() for line in f.readlines()] elif file_manipulation.amiss(logPrefix): issues.append("malloc error") lev = max(lev, JS_NEW_ASSERT_OR_CRASH) elif runinfo.return_code == 0 and not in_compare_jit: # We might have(??) run jsfunfuzz directly, so check for special kinds of bugs for line in out: if line.startswith("Found a bug: ") and not ("NestTest" in line and oomed(err)): lev = JS_DECIDED_TO_EXIT issues.append(line.rstrip()) if options.shellIsDeterministic and not understoodJsfunfuzzExit( out, err) and not oomed(err): issues.append("jsfunfuzz didn't finish") lev = JS_DID_NOT_FINISH # Copy non-crash issues to where FuzzManager's "AssertionHelper" can see it. if lev != JS_FINE: for issue in issues: err.append(f"[Non-crash bug] {issue}") activated = False # Turn on when trying to report *reliable* testcases that do not have a coredump # On Linux, fall back to run testcase via gdb using --args if core file data is unavailable # Note that this second round of running uses a different fuzzSeed as the initial if default jsfunfuzz is run # We should separate this out, i.e. running jsfunfuzz within a debugger, only if core dumps cannot be generated if (activated and platform.system() == "Linux" and shutil.which("gdb") and not auxCrashData and not in_compare_jit): print( "Note: No core file found on Linux - falling back to run via gdb" ) extracted_gdb_cmds = ["-ex", "run"] with io.open(str( Path(__file__).parent.parent / "util" / "gdb_cmds.txt"), "r", encoding="utf-8", errors="replace") as f: for line in f: if line.rstrip() and not line.startswith( "#") and not line.startswith("echo"): extracted_gdb_cmds.append("-ex") extracted_gdb_cmds.append(f"{line.rstrip()}") no_main_log_gdb_log = subprocess.run( (["gdb", "-n", "-batch"] + extracted_gdb_cmds + ["--args"] + [str(x) for x in runthis]), check=True, stderr=subprocess.PIPE, stdout=subprocess.PIPE, ) auxCrashData = no_main_log_gdb_log.stdout # Finally, make a CrashInfo object and parse stack traces for asan/crash/assertion bugs crashInfo = Crash_Info.CrashInfo.fromRawCrashData( out, err, pc, auxCrashData=auxCrashData) create_collector.printCrashInfo(crashInfo) # We only care about crashes and assertion failures on shells with no symbols # Note that looking out for the Assertion failure message is highly SpiderMonkey-specific if not is_oom and (not isinstance(crashInfo, Crash_Info.NoCrashInfo) or "Assertion failure: " in str(crashInfo.rawStderr) or "Segmentation fault" in str(crashInfo.rawStderr) or "Bus error" in str(crashInfo.rawStderr)): lev = max(lev, JS_NEW_ASSERT_OR_CRASH) try: match = options.collector.search(crashInfo) if match[0] is not None: create_collector.printMatchingSignature(match) if match[1].get("frequent"): print("Ignoring frequent bucket") lev = JS_FINE except UnicodeDecodeError: # Sometimes FM throws due to unicode issues print( "Note: FuzzManager is throwing a UnicodeDecodeError, signature matching skipped" ) match = False print( f"{logPrefix} | {summaryString(issues, lev, runinfo.elapsedtime)}") if lev != JS_FINE: summary_log = (logPrefix.parent / f"{logPrefix.stem}-summary").with_suffix(".txt") with io.open(str(summary_log), "w", encoding="utf-8", errors="replace") as f: f.writelines([ f"Number: {logPrefix}\n", f'Command: {" ".join(quote(str(x)) for x in runthis)}\n' ] + [f"Status: {i}\n" for i in issues]) self.lev = lev self.out = out self.err = err self.issues = issues self.crashInfo = crashInfo # pylint: disable=invalid-name self.match = match self.runinfo = runinfo self.return_code = runinfo.return_code
def scan_crashes(base_dir, cmdline_path=None): ''' Scan the base directory for crash tests and submit them to FuzzManager. @type base_dir: String @param base_dir: AFL base directory @type cmdline_path: String @param cmdline_path: Optional command line file to use instead of the one found inside the base directory. @rtype: int @return: Non-zero return code on failure ''' crash_dir = os.path.join(base_dir, "crashes") crash_files = [] for crash_file in os.listdir(crash_dir): # Ignore all files that aren't crash results if not crash_file.startswith("id:"): continue crash_file = os.path.join(crash_dir, crash_file) # Ignore our own status files if crash_file.endswith(".submitted") or crash_file.endswith(".failed"): continue # Ignore files we already processed if os.path.exists(crash_file + ".submitted") or os.path.exists(crash_file + ".failed"): continue crash_files.append(crash_file) if crash_files: # First try to read necessary information for reproducing crashes cmdline = [] test_idx = None if not cmdline_path: cmdline_path = os.path.join(base_dir, "cmdline") with open(cmdline_path, 'r') as cmdline_file: idx = 0 for line in cmdline_file: if '@@' in line: test_idx = idx cmdline.append(line.rstrip('\n')) idx += 1 if test_idx != None: orig_test_arg = cmdline[test_idx] print(cmdline) configuration = ProgramConfiguration.fromBinary(cmdline[0]) if not configuration: print( "Error: Creating program configuration from binary failed. Check your binary configuration file.", file=sys.stderr) return 2 collector = Collector() for crash_file in crash_files: stdin = None if test_idx != None: cmdline[test_idx] = orig_test_arg.replace('@@', crash_file) else: with open(crash_file, 'r') as crash_fd: stdin = crash_fd.read() runner = AutoRunner.fromBinaryArgs(cmdline[0], cmdline[1:], stdin=stdin) if runner.run(): crash_info = runner.getCrashInfo(configuration) collector.submit(crash_info, crash_file) open(crash_file + ".submitted", 'a').close() else: open(crash_file + ".failed", 'a').close() print( "Error: Failed to reproduce the given crash, cannot submit.", file=sys.stderr)
def main(argv=None): '''Command line options.''' program_name = os.path.basename(sys.argv[0]) if argv is None: argv = sys.argv[1:] # setup argparser parser = argparse.ArgumentParser(usage='%s --libfuzzer or --aflfuzz [OPTIONS] --cmd <COMMAND AND ARGUMENTS>' % program_name) mainGroup = parser.add_argument_group(title="Main Options", description=None) aflGroup = parser.add_argument_group(title="AFL Options", description="Use these arguments in AFL mode") libfGroup = parser.add_argument_group(title="Libfuzzer Options", description="Use these arguments in Libfuzzer mode" ) fmGroup = parser.add_argument_group(title="FuzzManager Options", description="Use these to specify FuzzManager parameters" ) mainGroup.add_argument("--libfuzzer", dest="libfuzzer", action='store_true', help="Enable LibFuzzer mode") mainGroup.add_argument("--aflfuzz", dest="aflfuzz", action='store_true', help="Enable AFL mode") mainGroup.add_argument("--fuzzmanager", dest="fuzzmanager", action='store_true', help="Use FuzzManager to submit crash results") libfGroup.add_argument('--env', dest='env', nargs='+', type=str, help="List of environment variables in the form 'KEY=VALUE'") libfGroup.add_argument('--cmd', dest='cmd', action='store_true', help="Command with parameters to run") libfGroup.add_argument("--sigdir", dest="sigdir", help="Signature cache directory", metavar="DIR") fmGroup.add_argument("--fuzzmanager-toolname", dest="fuzzmanager_toolname", help="Override FuzzManager tool name (for submitting crash results)") fmGroup.add_argument("--custom-cmdline-file", dest="custom_cmdline_file", help="Path to custom cmdline file", metavar="FILE") fmGroup.add_argument("--env-file", dest="env_file", help="Path to a file with additional environment variables", metavar="FILE") fmGroup.add_argument("--serverhost", help="Server hostname for remote signature management.", metavar="HOST") fmGroup.add_argument("--serverport", dest="serverport", type=int, help="Server port to use", metavar="PORT") fmGroup.add_argument("--serverproto", dest="serverproto", help="Server protocol to use (default is https)", metavar="PROTO") fmGroup.add_argument("--serverauthtokenfile", dest="serverauthtokenfile", help="File containing the server authentication token", metavar="FILE") fmGroup.add_argument("--clientid", dest="clientid", help="Client ID to use when submitting issues", metavar="ID") fmGroup.add_argument("--platform", dest="platform", help="Platform this crash appeared on", metavar="(x86|x86-64|arm)") fmGroup.add_argument("--product", dest="product", help="Product this crash appeared on", metavar="PRODUCT") fmGroup.add_argument("--productversion", dest="product_version", help="Product version this crash appeared on", metavar="VERSION") fmGroup.add_argument("--os", dest="os", help="OS this crash appeared on", metavar="(windows|linux|macosx|b2g|android)") fmGroup.add_argument("--tool", dest="tool", help="Name of the tool that found this issue", metavar="NAME") fmGroup.add_argument('--metadata', dest='metadata', nargs='+', type=str, help="List of metadata variables in the form 'KEY=VALUE'") aflGroup.add_argument("--s3-queue-upload", dest="s3_queue_upload", action='store_true', help="Use S3 to synchronize queues") aflGroup.add_argument("--s3-queue-cleanup", dest="s3_queue_cleanup", action='store_true', help="Cleanup S3 queue entries older than specified refresh interval") aflGroup.add_argument("--s3-queue-status", dest="s3_queue_status", action='store_true', help="Display S3 queue status") aflGroup.add_argument("--s3-build-download", dest="s3_build_download", help="Use S3 to download the build for the specified project", metavar="DIR") aflGroup.add_argument("--s3-build-upload", dest="s3_build_upload", help="Use S3 to upload a new build for the specified project", metavar="FILE") aflGroup.add_argument("--s3-corpus-download", dest="s3_corpus_download", help="Use S3 to download the test corpus for the specified project", metavar="DIR") aflGroup.add_argument("--s3-corpus-download-size", dest="s3_corpus_download_size", help="When downloading the corpus, select only SIZE files randomly", metavar="SIZE") aflGroup.add_argument("--s3-corpus-upload", dest="s3_corpus_upload", help="Use S3 to upload a test corpus for the specified project", metavar="DIR") aflGroup.add_argument("--s3-corpus-replace", dest="s3_corpus_replace", action='store_true', help="In conjunction with --s3-corpus-upload, deletes all other remote test files") aflGroup.add_argument("--s3-corpus-refresh", dest="s3_corpus_refresh", help="Download queues and corpus from S3, combine and minimize, then re-upload.", metavar="DIR") aflGroup.add_argument("--s3-corpus-status", dest="s3_corpus_status", action='store_true', help="Display S3 corpus status") aflGroup.add_argument("--test-file", dest="test_file", help="Optional path to copy the test file to before reproducing", metavar="FILE") aflGroup.add_argument("--afl-timeout", dest="afl_timeout", type=int, default=1000, help="Timeout per test to pass to AFL for corpus refreshing", metavar="MSECS") aflGroup.add_argument("--firefox", dest="firefox", action='store_true', help="Test Program is Firefox (requires FFPuppet installed)") aflGroup.add_argument("--firefox-prefs", dest="firefox_prefs", help="Path to prefs.js file for Firefox", metavar="FILE") aflGroup.add_argument("--firefox-extensions", nargs='+', type=str, dest="firefox_extensions", help="Path extension file for Firefox", metavar="FILE") aflGroup.add_argument("--firefox-testpath", dest="firefox_testpath", help="Path to file to open with Firefox", metavar="FILE") aflGroup.add_argument("--firefox-start-afl", dest="firefox_start_afl", metavar="FILE", help="Start AFL with the given Firefox binary, remaining arguments being passed to AFL") aflGroup.add_argument("--s3-refresh-interval", dest="s3_refresh_interval", type=int, default=86400, help="How often the s3 corpus is refreshed (affects queue cleaning)", metavar="SECS") aflGroup.add_argument("--afl-output-dir", dest="afloutdir", help="Path to the AFL output directory to manage", metavar="DIR") aflGroup.add_argument("--afl-binary-dir", dest="aflbindir", help="Path to the AFL binary directory to use", metavar="DIR") aflGroup.add_argument("--afl-stats", dest="aflstats", help="Collect aggregated statistics while scanning output directories", metavar="FILE") aflGroup.add_argument("--s3-bucket", dest="s3_bucket", help="Name of the S3 bucket to use", metavar="NAME") aflGroup.add_argument("--project", dest="project", help="Name of the subfolder/project inside the S3 bucket", metavar="NAME") aflGroup.add_argument('rargs', nargs=argparse.REMAINDER) if not argv: parser.print_help() return 2 opts = parser.parse_args(argv) if not opts.libfuzzer and not opts.aflfuzz: opts.aflfuzz = True if opts.cmd and opts.aflfuzz: if not opts.firefox: print("Error: Use --cmd either with libfuzzer or with afl in firefox mode", file=sys.stderr) return 2 if opts.libfuzzer: if not opts.rargs: print("Error: No arguments specified", file=sys.stderr) return 2 binary = opts.rargs[0] if not os.path.exists(binary): print("Error: Specified binary does not exist: %s" % binary, file=sys.stderr) return 2 configuration = ProgramConfiguration.fromBinary(binary) if configuration is None: print("Error: Failed to load program configuration based on binary", file=sys.stderr) return 2 env = {} if opts.env: env = dict(kv.split('=', 1) for kv in opts.env) configuration.addEnvironmentVariables(env) # Copy the system environment variables by default and overwrite them # if they are specified through env. env = dict(os.environ) if opts.env: oenv = dict(kv.split('=', 1) for kv in opts.env) configuration.addEnvironmentVariables(oenv) for envkey in oenv: env[envkey] = oenv[envkey] args = opts.rargs[1:] if args: configuration.addProgramArguments(args) metadata = {} if opts.metadata: metadata.update(dict(kv.split('=', 1) for kv in opts.metadata)) configuration.addMetadata(metadata) # Set LD_LIBRARY_PATH for convenience if not 'LD_LIBRARY_PATH' in env: env['LD_LIBRARY_PATH'] = os.path.dirname(binary) collector = Collector(opts.sigdir, opts.fuzzmanager_toolname) signature_repeat_count = 0 last_signature = None while True: process = subprocess.Popen( opts.rargs, # stdout=None, stderr=subprocess.PIPE, env=env, universal_newlines=True ) monitor = LibFuzzerMonitor(process.stderr) monitor.start() monitor.join() print("Process terminated, processing results...", file=sys.stderr) trace = monitor.getASanTrace() testcase = monitor.getTestcase() crashInfo = CrashInfo.fromRawCrashData([], [], configuration, auxCrashData=trace) (sigfile, metadata) = collector.search(crashInfo) if sigfile is not None: if last_signature == sigfile: signature_repeat_count += 1 else: last_signature = sigfile signature_repeat_count = 0 print("Crash matches signature %s, not submitting..." % sigfile, file=sys.stderr) else: collector.generate(crashInfo, forceCrashAddress=True, forceCrashInstruction=False, numFrames=8) collector.submit(crashInfo, testcase) print("Successfully submitted crash.", file=sys.stderr) if signature_repeat_count >= 10: print("Too many crashes with the same signature, exiting...", file=sys.stderr) break if opts.aflfuzz: if opts.firefox or opts.firefox_start_afl: if not haveFFPuppet: print("Error: --firefox and --firefox-start-afl require FFPuppet to be installed", file=sys.stderr) return 2 if opts.custom_cmdline_file: print("Error: --custom-cmdline-file is incompatible with firefox options", file=sys.stderr) return 2 if not opts.firefox_prefs or not opts.firefox_testpath: print("Error: --firefox and --firefox-start-afl require --firefox-prefs and --firefox-testpath to be specified", file=sys.stderr) return 2 if opts.firefox_start_afl: if not opts.aflbindir: print("Error: Must specify --afl-binary-dir for starting AFL with firefox", file=sys.stderr) return 2 (ffp, cmd, env) = setup_firefox(opts.firefox_start_afl, opts.firefox_prefs, opts.firefox_extensions, opts.firefox_testpath) afl_cmd = [ os.path.join(opts.aflbindir, "afl-fuzz") ] opts.rargs.remove("--") afl_cmd.extend(opts.rargs) afl_cmd.extend(cmd) try: subprocess.call(afl_cmd, env=env) except: traceback.print_exc() ffp.clean_up() return 0 afl_out_dirs = [] if opts.afloutdir: if not os.path.exists(os.path.join(opts.afloutdir, "crashes")): # The specified directory doesn't have a "crashes" sub directory. # Either the wrong directory was specified, or this is an AFL multi-process # sychronization directory. Try to figure this out here. sync_dirs = os.listdir(opts.afloutdir) for sync_dir in sync_dirs: if os.path.exists(os.path.join(opts.afloutdir, sync_dir, "crashes")): afl_out_dirs.append(os.path.join(opts.afloutdir, sync_dir)) if not afl_out_dirs: print("Error: Directory %s does not appear to be a valid AFL output/sync directory" % opts.afloutdir, file=sys.stderr) return 2 else: afl_out_dirs.append(opts.afloutdir) # Upload and FuzzManager modes require specifying the AFL directory if opts.s3_queue_upload or opts.fuzzmanager: if not opts.afloutdir: print("Error: Must specify AFL output directory using --afl-output-dir", file=sys.stderr) return 2 if (opts.s3_queue_upload or opts.s3_corpus_refresh or opts.s3_build_download or opts.s3_build_upload or opts.s3_corpus_download or opts.s3_corpus_upload or opts.s3_queue_status): if not opts.s3_bucket or not opts.project: print("Error: Must specify both --s3-bucket and --project for S3 actions", file=sys.stderr) return 2 if opts.s3_queue_status: status_data = get_queue_status(opts.s3_bucket, opts.project) total_queue_files = 0 for queue_name in status_data: print("Queue %s: %s" % (queue_name, status_data[queue_name])) total_queue_files += status_data[queue_name] print("Total queue files: %s" % total_queue_files) return 0 if opts.s3_corpus_status: status_data = get_corpus_status(opts.s3_bucket, opts.project) total_corpus_files = 0 for (status_dt, status_cnt) in sorted(status_data.items()): print("Added %s: %s" % (status_dt, status_cnt)) total_corpus_files += status_cnt print("Total corpus files: %s" % total_corpus_files) return 0 if opts.s3_queue_cleanup: clean_queue_dirs(opts.s3_corpus_refresh, opts.s3_bucket, opts.project, opts.s3_refresh_interval) return 0 if opts.s3_build_download: download_build(opts.s3_build_download, opts.s3_bucket, opts.project) return 0 if opts.s3_build_upload: upload_build(opts.s3_build_upload, opts.s3_bucket, opts.project) return 0 if opts.s3_corpus_download: if opts.s3_corpus_download_size is not None: opts.s3_corpus_download_size = int(opts.s3_corpus_download_size) download_corpus(opts.s3_corpus_download, opts.s3_bucket, opts.project, opts.s3_corpus_download_size) return 0 if opts.s3_corpus_upload: upload_corpus(opts.s3_corpus_upload, opts.s3_bucket, opts.project, opts.s3_corpus_replace) return 0 if opts.s3_corpus_refresh: if not opts.aflbindir: print("Error: Must specify --afl-binary-dir for refreshing the test corpus", file=sys.stderr) return 2 if not os.path.exists(opts.s3_corpus_refresh): os.makedirs(opts.s3_corpus_refresh) queues_dir = os.path.join(opts.s3_corpus_refresh, "queues") print("Cleaning old AFL queues from s3://%s/%s/queues/" % (opts.s3_bucket, opts.project)) clean_queue_dirs(opts.s3_corpus_refresh, opts.s3_bucket, opts.project, opts.s3_refresh_interval) print("Downloading AFL queues from s3://%s/%s/queues/ to %s" % (opts.s3_bucket, opts.project, queues_dir)) download_queue_dirs(opts.s3_corpus_refresh, opts.s3_bucket, opts.project) cmdline_file = os.path.join(opts.s3_corpus_refresh, "cmdline") if not os.path.exists(cmdline_file): print("Error: Failed to download a cmdline file from queue directories.", file=sys.stderr) return 2 print("Downloading build") download_build(os.path.join(opts.s3_corpus_refresh, "build"), opts.s3_bucket, opts.project) with open(os.path.join(opts.s3_corpus_refresh, "cmdline"), 'r') as cmdline_file: cmdline = cmdline_file.read().splitlines() # Assume cmdline[0] is the name of the binary binary_name = os.path.basename(cmdline[0]) # Try locating our binary in the build we just unpacked binary_search_result = [os.path.join(dirpath, filename) for dirpath, dirnames, filenames in os.walk(os.path.join(opts.s3_corpus_refresh, "build")) for filename in filenames if (filename == binary_name and (stat.S_IXUSR & os.stat(os.path.join(dirpath, filename))[stat.ST_MODE]))] if not binary_search_result: print("Error: Failed to locate binary %s in unpacked build." % binary_name, file=sys.stderr) return 2 if len(binary_search_result) > 1: print("Error: Binary name %s is ambiguous in unpacked build." % binary_name, file=sys.stderr) return 2 cmdline[0] = binary_search_result[0] # Download our current corpus into the queues directory as well print("Downloading corpus from s3://%s/%s/corpus/ to %s" % (opts.s3_bucket, opts.project, queues_dir)) download_corpus(queues_dir, opts.s3_bucket, opts.project) # Ensure the directory for our new tests is empty updated_tests_dir = os.path.join(opts.s3_corpus_refresh, "tests") if os.path.exists(updated_tests_dir): shutil.rmtree(updated_tests_dir) os.mkdir(updated_tests_dir) # Run afl-cmin afl_cmin = os.path.join(opts.aflbindir, "afl-cmin") if not os.path.exists(afl_cmin): print("Error: Unable to locate afl-cmin binary.", file=sys.stderr) return 2 if opts.firefox: (ffpInst, ffCmd, ffEnv) = setup_firefox(cmdline[0], opts.firefox_prefs, opts.firefox_extensions, opts.firefox_testpath) cmdline = ffCmd afl_cmdline = [afl_cmin, '-e', '-i', queues_dir, '-o', updated_tests_dir, '-t', str(opts.afl_timeout), '-m', 'none'] if opts.test_file: afl_cmdline.extend(['-f', opts.test_file]) afl_cmdline.extend(cmdline) print("Running afl-cmin") with open(os.devnull, 'w') as devnull: env = os.environ.copy() env['LD_LIBRARY_PATH'] = os.path.dirname(cmdline[0]) if opts.firefox: env.update(ffEnv) subprocess.check_call(afl_cmdline, stdout=devnull, env=env) if opts.firefox: ffpInst.clean_up() # replace existing corpus with reduced corpus print("Uploading reduced corpus to s3://%s/%s/corpus/" % (opts.s3_bucket, opts.project)) upload_corpus(updated_tests_dir, opts.s3_bucket, opts.project, corpus_delete=True) # Prune the queues directory once we successfully uploaded the new # test corpus, but leave everything that's part of our new corpus # so we don't have to download those files again. test_files = [file for file in os.listdir(updated_tests_dir) if os.path.isfile(os.path.join(updated_tests_dir, file))] obsolete_queue_files = [file for file in os.listdir(queues_dir) if os.path.isfile(os.path.join(queues_dir, file)) and file not in test_files] for file in obsolete_queue_files: os.remove(os.path.join(queues_dir, file)) if opts.fuzzmanager or opts.s3_queue_upload or opts.aflstats: last_queue_upload = 0 while True: if opts.fuzzmanager: for afl_out_dir in afl_out_dirs: scan_crashes(afl_out_dir, opts.custom_cmdline_file, opts.env_file, opts.fuzzmanager_toolname, opts.test_file) # Only upload queue files every 20 minutes if opts.s3_queue_upload and last_queue_upload < int(time.time()) - 1200: for afl_out_dir in afl_out_dirs: upload_queue_dir(afl_out_dir, opts.s3_bucket, opts.project, new_cov_only=True) last_queue_upload = int(time.time()) if opts.aflstats: write_aggregated_stats(afl_out_dirs, opts.aflstats, cmdline_path=opts.custom_cmdline_file) time.sleep(10)
def write_aggregated_stats(base_dirs, outfile, cmdline_path=None): ''' Generate aggregated statistics from the given base directories and write them to the specified output file. @type base_dirs: list @param base_dirs: List of AFL base directories @type outfile: str @param outfile: Output file for aggregated statistics @type cmdline_path: String @param cmdline_path: Optional command line file to use instead of the one found inside the base directory. ''' # Which fields to add wanted_fields_total = [ 'execs_done', 'execs_per_sec', 'pending_favs', 'pending_total', 'variable_paths', 'unique_crashes', 'unique_hangs'] # Which fields to aggregate by mean wanted_fields_mean = ['exec_timeout'] # Which fields should be displayed per fuzzer instance wanted_fields_all = ['cycles_done', 'bitmap_cvg'] # Which fields should be aggregated by max wanted_fields_max = ['last_path'] # Warnings to include warnings = list() aggregated_stats = {} for field in wanted_fields_total: aggregated_stats[field] = 0 for field in wanted_fields_mean: aggregated_stats[field] = (0,0) for field in wanted_fields_all: aggregated_stats[field] = [] def convert_num(num): if '.' in num: return float(num) return int(num) for base_dir in base_dirs: stats_path = os.path.join(base_dir, "fuzzer_stats") if not cmdline_path: cmdline_path = os.path.join(base_dir, "cmdline") if os.path.exists(stats_path): with open(stats_path, 'r') as stats_file: stats = stats_file.read() for line in stats.splitlines(): (field_name, field_val) = line.split(':', 1) field_name = field_name.strip() field_val = field_val.strip() if field_name in wanted_fields_total: aggregated_stats[field_name] += convert_num(field_val) elif field_name in wanted_fields_mean: (val, cnt) = aggregated_stats[field_name] aggregated_stats[field_name] = (val + convert_num(field_val), cnt + 1) elif field_name in wanted_fields_all: aggregated_stats[field_name].append(field_val) elif field_name in wanted_fields_max: num_val = convert_num(field_val) if (not field_name in aggregated_stats) or aggregated_stats[field_name] < num_val: aggregated_stats[field_name] = num_val # If we don't have any data here, then the fuzzers haven't written any statistics yet if not aggregated_stats: return # Mean conversion for field_name in wanted_fields_mean: (val, cnt) = aggregated_stats[field_name] if cnt: aggregated_stats[field_name] = float(val) / float(cnt) else: aggregated_stats[field_name] = val # Verify fuzzmanagerconf exists and can be parsed _, cmdline = command_file_to_list(cmdline_path) target_binary = cmdline[0] if cmdline else None if target_binary is not None: if not os.path.isfile("%s.fuzzmanagerconf" % target_binary): warnings.append("WARNING: Missing %s.fuzzmanagerconf\n" % target_binary) elif ProgramConfiguration.fromBinary(target_binary) is None: warnings.append("WARNING: Invalid %s.fuzzmanagerconf\n" % target_binary) # Look for unreported crashes failed_reports = 0 for base_dir in base_dirs: crashes_dir = os.path.join(base_dir, "crashes") if not os.path.isdir(crashes_dir): continue for crash_file in os.listdir(crashes_dir): if crash_file.endswith(".failed"): failed_reports += 1 if failed_reports: warnings.append("WARNING: Unreported crashes detected (%d)\n" % failed_reports) # Write out data fields = [] fields.extend(wanted_fields_total) fields.extend(wanted_fields_mean) fields.extend(wanted_fields_all) fields.extend(wanted_fields_max) max_keylen = max([len(x) for x in fields]) with InterProcessLock(outfile + ".lock"), open(outfile, 'w') as f: for field in fields: if not field in aggregated_stats: continue val = aggregated_stats[field] if isinstance(val, list): val = " ".join(val) f.write("%s%s: %s\n" % (field, " " * (max_keylen + 1 - len(field)), val)) for warning in warnings: f.write(warning) return
def compareLevel(jsEngine, flags, infilename, logPrefix, options, showDetailedDiffs, quickMode): # pylint: disable=invalid-name,missing-docstring,missing-return-doc,missing-return-type-doc,too-complex # pylint: disable=too-many-branches,too-many-arguments,too-many-locals,too-many-statements # options dict must be one we can pass to js_interesting.ShellResult # we also use it directly for knownPath, timeout, and collector # Return: (lev, crashInfo) or (js_interesting.JS_FINE, None) assert isinstance(infilename, Path) combos = shell_flags.basic_flag_sets(jsEngine) if quickMode: # Only used during initial fuzzing. Allowed to have false negatives. combos = [combos[0]] # Remove any of the following flags from being used in compare_jit flags = list( set(flags) - { "--more-compartments", "--no-wasm", "--no-wasm-ion", "--no-wasm-baseline", }) if flags: combos.insert(0, flags) commands = [[jsEngine] + combo + [str(infilename)] for combo in combos] r0 = None prefix0 = None for i, command in enumerate(commands): prefix = logPrefix.parent / f"{logPrefix.stem}-r{i}" command = commands[i] r = js_interesting.ShellResult(options, command, prefix, True) # pylint: disable=invalid-name oom = js_interesting.oomed(r.err) r.err = ignore_some_stderr(r.err) if (r.return_code == 1 or r.return_code == 2) and ( anyLineContains(r.out, "[[script] scriptArgs*]") or (anyLineContains(r.err, "[scriptfile] [scriptarg...]"))): print("Got usage error from:") print(f' {" ".join(quote(str(x)) for x in command)}') assert i file_system_helpers.delete_logs(prefix) elif r.lev > js_interesting.JS_OVERALL_MISMATCH: # would be more efficient to run lithium on one or the other, but meh summary_more_serious = js_interesting.summaryString( r.issues + ["compare_jit found a more serious bug"], r.lev, r.runinfo.elapsedtime) print(f"{infilename} | {summary_more_serious}") summary_log = (logPrefix.parent / f"{logPrefix.stem}-summary").with_suffix(".txt") with io.open(str(summary_log), "w", encoding="utf-8", errors="replace") as f: f.write("\n".join(r.issues + [ " ".join(quote(str(x)) for x in command), "compare_jit found a more serious bug" ]) + "\n") print(f' {" ".join(quote(str(x)) for x in command)}') return r.lev, r.crashInfo elif r.lev != js_interesting.JS_FINE or r.return_code != 0: summary_other = js_interesting.summaryString( r.issues + [ "compare_jit is not comparing output, because the shell exited strangely" ], r.lev, r.runinfo.elapsedtime) print(f"{infilename} | {summary_other}") print(f' {" ".join(quote(str(x)) for x in command)}') file_system_helpers.delete_logs(prefix) if not i: return js_interesting.JS_FINE, None elif oom: # If the shell or python hit a memory limit, we consider the rest of the computation # "tainted" for the purpose of correctness comparison. message = "compare_jit is not comparing output: OOM" summary_oom = js_interesting.summaryString(r.issues + [message], r.lev, r.runinfo.elapsedtime) print(f"{infilename} | {summary_oom}") file_system_helpers.delete_logs(prefix) if not i: return js_interesting.JS_FINE, None elif not i: # Stash output from this run (the first one), so for subsequent runs, we can compare against it. (r0, prefix0) = (r, prefix) # pylint: disable=invalid-name else: # Compare the output of this run (r.out) to the output of the first run (r0.out), etc. def optionDisabledAsmOnOneSide(): # pylint: disable=invalid-name asmMsg = "asm.js type error: Disabled by javascript.options.asmjs" # pylint: disable=invalid-name # pylint: disable=invalid-name # pylint: disable=cell-var-from-loop optionDisabledAsm = anyLineContains( r0.err, asmMsg) or anyLineContains(r.err, asmMsg) # pylint: disable=invalid-name optionDiffers = (("--no-asmjs" in commands[0]) != ("--no-asmjs" in command)) return optionDisabledAsm and optionDiffers mismatchErr = (r.err != r0.err and not optionDisabledAsmOnOneSide()) # pylint: disable=invalid-name mismatchOut = (r.out != r0.out) # pylint: disable=invalid-name if mismatchErr or mismatchOut: # pylint: disable=no-else-return # Generate a short summary for stdout and a long summary for a "*-summary.txt" file. # pylint: disable=invalid-name rerunCommand = " ".join( quote(str(x)) for x in [ "python3 -m funfuzz.js.compare_jit", f'--flags={" ".join(flags)}', f"--timeout={options.timeout}", str(options.knownPath), str(jsEngine), str(infilename.name) ]) (summary, issues) = summarizeMismatch(mismatchErr, mismatchOut, prefix0, prefix) summary = ( f' {" ".join(quote(str(x)) for x in commands[0])}\n' f' {" ".join(quote(str(x)) for x in command)}\n' f"\n" f"{summary}") summary_log = (logPrefix.parent / f"{logPrefix.stem}-summary").with_suffix(".txt") with io.open(str(summary_log), "w", encoding="utf-8", errors="replace") as f: f.write(f"{rerunCommand}\n\n{summary}") summary_overall_mismatch = js_interesting.summaryString( issues, js_interesting.JS_OVERALL_MISMATCH, r.runinfo.elapsedtime) print(f"{infilename} | {summary_overall_mismatch}") if quickMode: print(rerunCommand) if showDetailedDiffs: print(summary) print() # Create a crashInfo object with empty stdout, and stderr showing diffs pc = ProgramConfiguration.fromBinary(str(jsEngine)) # pylint: disable=invalid-name pc.addProgramArguments(flags) crashInfo = Crash_Info.CrashInfo.fromRawCrashData([], summary, pc) # pylint: disable=invalid-name return js_interesting.JS_OVERALL_MISMATCH, crashInfo else: # print "compare_jit: match" file_system_helpers.delete_logs(prefix) # All matched :) file_system_helpers.delete_logs(prefix0) return js_interesting.JS_FINE, None
def compareLevel(jsEngine, flags, infilename, logPrefix, options, showDetailedDiffs, quickMode): # options dict must be one we can pass to jsInteresting.ShellResult # we also use it directly for knownPath, timeout, and collector # Return: (lev, crashInfo) or (jsInteresting.JS_FINE, None) combos = shellFlags.basicFlagSets(jsEngine) if quickMode: # Only used during initial fuzzing. Allowed to have false negatives. combos = [combos[0]] if len(flags): combos.append(flags) commands = [[jsEngine] + combo + [infilename] for combo in combos] for i in range(0, len(commands)): prefix = logPrefix + "-r" + str(i) command = commands[i] r = jsInteresting.ShellResult(options, command, prefix, True) oom = jsInteresting.oomed(r.err) r.err = ignoreSomeOfStderr(r.err) if (r.rc == 1 or r.rc == 2) and (anyLineContains(r.out, '[[script] scriptArgs*]') or anyLineContains(r.err, '[scriptfile] [scriptarg...]')): print "Got usage error from:" print " " + sps.shellify(command) assert i > 0 jsInteresting.deleteLogs(prefix) elif r.lev > jsInteresting.JS_OVERALL_MISMATCH: # would be more efficient to run lithium on one or the other, but meh print infilename + " | " + jsInteresting.summaryString(r.issues + ["compareJIT found a more serious bug"], r.lev, r.runinfo.elapsedtime) with open(logPrefix + "-summary.txt", 'wb') as f: f.write('\n'.join(r.issues + [sps.shellify(command), "compareJIT found a more serious bug"]) + '\n') print " " + sps.shellify(command) return (r.lev, r.crashInfo) elif r.lev != jsInteresting.JS_FINE or r.rc != 0: print infilename + " | " + jsInteresting.summaryString(r.issues + ["compareJIT is not comparing output, because the shell exited strangely"], r.lev, r.runinfo.elapsedtime) print " " + sps.shellify(command) jsInteresting.deleteLogs(prefix) if i == 0: return (jsInteresting.JS_FINE, None) elif oom: # If the shell or python hit a memory limit, we consider the rest of the computation # "tainted" for the purpose of correctness comparison. message = "compareJIT is not comparing output: OOM" print infilename + " | " + jsInteresting.summaryString(r.issues + [message], r.lev, r.runinfo.elapsedtime) jsInteresting.deleteLogs(prefix) if i == 0: return (jsInteresting.JS_FINE, None) elif i == 0: # Stash output from this run (the first one), so for subsequent runs, we can compare against it. (r0, prefix0) = (r, prefix) else: # Compare the output of this run (r.out) to the output of the first run (r0.out), etc. def fpuOptionDisabledAsmOnOneSide(fpuAsmMsg): fpuOptionDisabledAsm = fpuAsmMsg in r0.err or fpuAsmMsg in r.err fpuOptionDiffers = (("--no-fpu" in commands[0]) != ("--no-fpu" in command)) return fpuOptionDisabledAsm and fpuOptionDiffers def optionDisabledAsmOnOneSide(): asmMsg = "asm.js type error: Disabled by javascript.options.asmjs" optionDisabledAsm = anyLineContains(r0.err, asmMsg) or anyLineContains(r.err, asmMsg) optionDiffers = (("--no-asmjs" in commands[0]) != ("--no-asmjs" in command)) return optionDisabledAsm and optionDiffers mismatchErr = (r.err != r0.err and # --no-fpu (on debug x86_32 only) turns off asm.js compilation, among other things. # This should only affect asm.js diagnostics on stderr. not fpuOptionDisabledAsmOnOneSide("asm.js type error: Disabled by lack of floating point support") and # And also wasm stuff. See bug 1243031. not fpuOptionDisabledAsmOnOneSide("WebAssembly is not supported on the current device") and not optionDisabledAsmOnOneSide()) mismatchOut = (r.out != r0.out) if mismatchErr or mismatchOut: # Generate a short summary for stdout and a long summary for a "*-summary.txt" file. rerunCommand = sps.shellify(['~/funfuzz/js/compareJIT.py', "--flags="+' '.join(flags), "--timeout="+str(options.timeout), options.knownPath, jsEngine, os.path.basename(infilename)]) (summary, issues) = summarizeMismatch(mismatchErr, mismatchOut, prefix0, prefix) summary = " " + sps.shellify(commands[0]) + "\n " + sps.shellify(command) + "\n\n" + summary with open(logPrefix + "-summary.txt", 'wb') as f: f.write(rerunCommand + "\n\n" + summary) print infilename + " | " + jsInteresting.summaryString(issues, jsInteresting.JS_OVERALL_MISMATCH, r.runinfo.elapsedtime) if quickMode: print rerunCommand if showDetailedDiffs: print summary print "" # Create a crashInfo object with empty stdout, and stderr showing diffs pc = ProgramConfiguration.fromBinary(jsEngine) pc.addProgramArguments(flags) crashInfo = CrashInfo.CrashInfo.fromRawCrashData([], summary, pc) return (jsInteresting.JS_OVERALL_MISMATCH, crashInfo) else: # print "compareJIT: match" jsInteresting.deleteLogs(prefix) # All matched :) jsInteresting.deleteLogs(prefix0) return (jsInteresting.JS_FINE, None)
def scan_crashes(base_dir, cmdline_path=None, env_path=None, tool_name=None, firefox=None, firefox_prefs=None, firefox_extensions=None, firefox_testpath=None): ''' Scan the base directory for crash tests and submit them to FuzzManager. @type base_dir: String @param base_dir: AFL base directory @type cmdline_path: String @param cmdline_path: Optional command line file to use instead of the one found inside the base directory. @type env_path: String @param env_path: Optional file containing environment variables. @type test_path: String @param test_path: Optional filename where to copy the test before attempting to reproduce a crash. @rtype: int @return: Non-zero return code on failure ''' crash_dir = os.path.join(base_dir, "crashes") crash_files = [] for crash_file in os.listdir(crash_dir): # Ignore all files that aren't crash results if not crash_file.startswith("id:"): continue crash_file = os.path.join(crash_dir, crash_file) # Ignore our own status files if crash_file.endswith(".submitted") or crash_file.endswith(".failed"): continue # Ignore files we already processed if os.path.exists(crash_file + ".submitted") or os.path.exists(crash_file + ".failed"): continue crash_files.append(crash_file) if crash_files: # First try to read necessary information for reproducing crashes cmdline = [] test_idx = None base_env = {} test_in_env = None if env_path: with open(env_path, 'r') as env_file: for line in env_file: (name, val) = line.rstrip('\n').split("=", 1) base_env[name] = val if '@@' in val: test_in_env = name if not cmdline_path: cmdline_path = os.path.join(base_dir, "cmdline") with open(cmdline_path, 'r') as cmdline_file: idx = 0 for line in cmdline_file: if '@@' in line: test_idx = idx cmdline.append(line.rstrip('\n')) idx += 1 if test_idx != None: orig_test_arg = cmdline[test_idx] configuration = ProgramConfiguration.fromBinary(cmdline[0]) if not configuration: print( "Error: Creating program configuration from binary failed. Check your binary configuration file.", file=sys.stderr) return 2 collector = Collector(tool=tool_name) if firefox: (ffpInst, ffCmd, ffEnv) = setup_firefox(cmdline[0], firefox_prefs, firefox_extensions, firefox_testpath) cmdline = ffCmd base_env.update(ffEnv) for crash_file in crash_files: stdin = None env = None if base_env: env = dict(base_env) if test_idx != None: cmdline[test_idx] = orig_test_arg.replace('@@', crash_file) elif test_in_env != None: env[test_in_env] = env[test_in_env].replace('@@', crash_file) elif test_path != None: shutil.copy(crash_file, test_path) else: with open(crash_file, 'r') as crash_fd: stdin = crash_fd.read() print("Processing crash file %s" % crash_file, file=sys.stderr) runner = AutoRunner.fromBinaryArgs(cmdline[0], cmdline[1:], env=env, stdin=stdin) if runner.run(): crash_info = runner.getCrashInfo(configuration) collector.submit(crash_info, crash_file) open(crash_file + ".submitted", 'a').close() print("Success: Submitted crash to server.", file=sys.stderr) else: open(crash_file + ".failed", 'a').close() print( "Error: Failed to reproduce the given crash, cannot submit.", file=sys.stderr) if firefox: ffpInst.clean_up()
def __init__(self, options, runthis, logPrefix, inCompareJIT): pathToBinary = runthis[0] # This relies on the shell being a local one from compileShell.py: pc = ProgramConfiguration.fromBinary(pathToBinary.split('.')[0]) # Ignore trailing ".exe" in Win pc.addProgramArguments(runthis[1:-1]) if options.valgrind: runthis = ( inspectShell.constructVgCmdList(errorCode=VALGRIND_ERROR_EXIT_CODE) + valgrindSuppressions(options.knownPath) + runthis) preexec_fn = ulimitSet if os.name == 'posix' else None runinfo = timedRun.timed_run(runthis, options.timeout, logPrefix, preexec_fn=preexec_fn) lev = JS_FINE issues = [] auxCrashData = [] # FuzzManager expects a list of strings rather than an iterable, so bite the # bullet and 'readlines' everything into memory. with open(logPrefix + "-out.txt") as f: out = f.readlines() with open(logPrefix + "-err.txt") as f: err = f.readlines() if options.valgrind and runinfo.rc == VALGRIND_ERROR_EXIT_CODE: issues.append("valgrind reported an error") lev = max(lev, JS_VG_AMISS) valgrindErrorPrefix = "==" + str(runinfo.pid) + "==" for line in err: if valgrindErrorPrefix and line.startswith(valgrindErrorPrefix): issues.append(line.rstrip()) elif runinfo.sta == timedRun.CRASHED: if sps.grabCrashLog(runthis[0], runinfo.pid, logPrefix, True): with open(logPrefix + "-crash.txt") as f: auxCrashData = [line.strip() for line in f.readlines()] elif detect_malloc_errors.amiss(logPrefix): issues.append("malloc error") lev = max(lev, JS_NEW_ASSERT_OR_CRASH) elif runinfo.rc == 0 and not inCompareJIT: # We might have(??) run jsfunfuzz directly, so check for special kinds of bugs for line in out: if line.startswith("Found a bug: ") and not ("NestTest" in line and oomed(err)): lev = JS_DECIDED_TO_EXIT issues.append(line.rstrip()) if options.shellIsDeterministic and not understoodJsfunfuzzExit(out, err) and not oomed(err): issues.append("jsfunfuzz didn't finish") lev = JS_DID_NOT_FINISH # Copy non-crash issues to where FuzzManager's "AssertionHelper.py" can see it. if lev != JS_FINE: for issue in issues: err.append("[Non-crash bug] " + issue) # Finally, make a CrashInfo object and parse stack traces for asan/crash/assertion bugs crashInfo = CrashInfo.CrashInfo.fromRawCrashData(out, err, pc, auxCrashData=auxCrashData) createCollector.printCrashInfo(crashInfo) if not isinstance(crashInfo, CrashInfo.NoCrashInfo): lev = max(lev, JS_NEW_ASSERT_OR_CRASH) match = options.collector.search(crashInfo) if match[0] is not None: createCollector.printMatchingSignature(match) lev = JS_FINE print logPrefix + " | " + summaryString(issues, lev, runinfo.elapsedtime) if lev != JS_FINE: fileManipulation.writeLinesToFile( ['Number: ' + logPrefix + '\n', 'Command: ' + sps.shellify(runthis) + '\n'] + ['Status: ' + i + "\n" for i in issues], logPrefix + '-summary.txt') self.lev = lev self.out = out self.err = err self.issues = issues self.crashInfo = crashInfo self.match = match self.runinfo = runinfo self.rc = runinfo.rc
def __init__(self, options, runthis, logPrefix, in_compare_jit, env=None): # pylint: disable=too-complex # pylint: disable=too-many-arguments,too-many-branches,too-many-locals,too-many-statements # If Lithium uses this as an interestingness test, logPrefix is likely not a Path object, so make it one. logPrefix = Path(logPrefix) pathToBinary = runthis[0].expanduser().resolve() # pylint: disable=invalid-name # This relies on the shell being a local one from compile_shell: # Ignore trailing ".exe" in Win, also abspath makes it work w/relative paths like "./js" # pylint: disable=invalid-name assert pathToBinary.with_suffix(".fuzzmanagerconf").is_file() pc = ProgramConfiguration.fromBinary( str(pathToBinary.parent / pathToBinary.stem)) pc.addProgramArguments(runthis[1:-1]) if options.valgrind: runthis = (inspect_shell.constructVgCmdList( errorCode=VALGRIND_ERROR_EXIT_CODE) + valgrindSuppressions() + runthis) timed_run_kw = {} timed_run_kw["env"] = (env or os.environ) if not platform.system() == "Windows": timed_run_kw["preexec_fn"] = set_ulimit lithium_logPrefix = str(logPrefix).encode("utf-8") # Total hack to make Python 2/3 work with Lithium if sys.version_info.major == 3 and isinstance(lithium_logPrefix, b"".__class__): # pylint: disable=redefined-variable-type lithium_logPrefix = lithium_logPrefix.decode("utf-8", errors="replace") # logPrefix should be a string for timed_run in Lithium version 0.2.1 to work properly, apparently runinfo = timed_run.timed_run( [str(x) for x in runthis ], # Convert all Paths/bytes to strings for Lithium options.timeout, lithium_logPrefix, **timed_run_kw) lev = JS_FINE issues = [] auxCrashData = [] # pylint: disable=invalid-name # FuzzManager expects a list of strings rather than an iterable, so bite the # bullet and "readlines" everything into memory. out_log = (logPrefix.parent / (logPrefix.stem + "-out")).with_suffix(".txt") with io.open(str(out_log), "r", encoding="utf-8", errors="replace") as f: out = f.readlines() err_log = (logPrefix.parent / (logPrefix.stem + "-err")).with_suffix(".txt") with io.open(str(err_log), "r", encoding="utf-8", errors="replace") as f: err = f.readlines() if options.valgrind and runinfo.return_code == VALGRIND_ERROR_EXIT_CODE: issues.append("valgrind reported an error") lev = max(lev, JS_VG_AMISS) valgrindErrorPrefix = "==" + str(runinfo.pid) + "==" for line in err: if valgrindErrorPrefix and line.startswith( valgrindErrorPrefix): issues.append(line.rstrip()) elif runinfo.sta == timed_run.CRASHED: if os_ops.grab_crash_log(runthis[0], runinfo.pid, logPrefix, True): crash_log = (logPrefix.parent / (logPrefix.stem + "-crash")).with_suffix(".txt") with io.open(str(crash_log), "r", encoding="utf-8", errors="replace") as f: auxCrashData = [line.strip() for line in f.readlines()] elif file_manipulation.amiss(logPrefix): issues.append("malloc error") lev = max(lev, JS_NEW_ASSERT_OR_CRASH) elif runinfo.return_code == 0 and not in_compare_jit: # We might have(??) run jsfunfuzz directly, so check for special kinds of bugs for line in out: if line.startswith("Found a bug: ") and not ("NestTest" in line and oomed(err)): lev = JS_DECIDED_TO_EXIT issues.append(line.rstrip()) if options.shellIsDeterministic and not understoodJsfunfuzzExit( out, err) and not oomed(err): issues.append("jsfunfuzz didn't finish") lev = JS_DID_NOT_FINISH # Copy non-crash issues to where FuzzManager's "AssertionHelper" can see it. if lev != JS_FINE: for issue in issues: err.append("[Non-crash bug] " + issue) activated = False # Turn on when trying to report *reliable* testcases that do not have a coredump # On Linux, fall back to run testcase via gdb using --args if core file data is unavailable # Note that this second round of running uses a different fuzzSeed as the initial if default jsfunfuzz is run # We should separate this out, i.e. running jsfunfuzz within a debugger, only if core dumps cannot be generated if activated and platform.system() == "Linux" and which( "gdb") and not auxCrashData and not in_compare_jit: print( "Note: No core file found on Linux - falling back to run via gdb" ) extracted_gdb_cmds = ["-ex", "run"] with io.open(str( Path(__file__).parent.parent / "util" / "gdb_cmds.txt"), "r", encoding="utf-8", errors="replace") as f: for line in f: if line.rstrip() and not line.startswith( "#") and not line.startswith("echo"): extracted_gdb_cmds.append("-ex") extracted_gdb_cmds.append("%s" % line.rstrip()) no_main_log_gdb_log = subprocess.run( (["gdb", "-n", "-batch"] + extracted_gdb_cmds + ["--args"] + [str(x) for x in runthis]), check=True, stderr=subprocess.PIPE, stdout=subprocess.PIPE) auxCrashData = no_main_log_gdb_log.stdout # Finally, make a CrashInfo object and parse stack traces for asan/crash/assertion bugs crashInfo = CrashInfo.CrashInfo.fromRawCrashData( out, err, pc, auxCrashData=auxCrashData) create_collector.printCrashInfo(crashInfo) # We only care about crashes and assertion failures on shells with no symbols # Note that looking out for the Assertion failure message is highly SpiderMonkey-specific if not isinstance(crashInfo, CrashInfo.NoCrashInfo) or \ "Assertion failure: " in str(crashInfo.rawStderr) or \ "Segmentation fault" in str(crashInfo.rawStderr) or \ "Bus error" in str(crashInfo.rawStderr): lev = max(lev, JS_NEW_ASSERT_OR_CRASH) try: match = options.collector.search(crashInfo) if match[0] is not None: create_collector.printMatchingSignature(match) lev = JS_FINE except UnicodeDecodeError: # Sometimes FM throws due to unicode issues print( "Note: FuzzManager is throwing a UnicodeDecodeError, signature matching skipped" ) match = False print("%s | %s" % (logPrefix, summaryString(issues, lev, runinfo.elapsedtime))) if lev != JS_FINE: summary_log = (logPrefix.parent / (logPrefix.stem + "-summary")).with_suffix(".txt") with io.open(str(summary_log), "w", encoding="utf-8", errors="replace") as f: f.writelines([ "Number: " + str(logPrefix) + "\n", "Command: " + " ".join(quote(str(x)) for x in runthis) + "\n" ] + ["Status: " + i + "\n" for i in issues]) self.lev = lev self.out = out self.err = err self.issues = issues self.crashInfo = crashInfo # pylint: disable=invalid-name self.match = match self.runinfo = runinfo self.return_code = runinfo.return_code
def main(argv=None): '''Command line options.''' program_name = os.path.basename(sys.argv[0]) program_version = "v%s" % __version__ program_build_date = "%s" % __updated__ program_version_string = '%%prog %s (%s)' % (program_version, program_build_date) if argv is None: argv = sys.argv[1:] # setup argparser parser = argparse.ArgumentParser(usage='%s [OPTIONS] --cmd <COMMAND AND ARGUMENTS>' % program_name) mainGroup = parser.add_argument_group(title="Main arguments", description=None) fmGroup = parser.add_argument_group(title="FuzzManager specific options", description="""Values for the options listed here are typically provided through FuzzManager configuration files, but can be overwritten using these options:""") mainGroup.add_argument('--version', action='version', version=program_version_string) mainGroup.add_argument('--cmd', dest='cmd', action='store_true', help="Command with parameters to run") mainGroup.add_argument('--env', dest='env', nargs='+', type=str, help="List of environment variables in the form 'KEY=VALUE'") # Settings fmGroup.add_argument("--sigdir", dest="sigdir", help="Signature cache directory", metavar="DIR") fmGroup.add_argument("--serverhost", dest="serverhost", help="Server hostname for remote signature management", metavar="HOST") fmGroup.add_argument("--serverport", dest="serverport", type=int, help="Server port to use", metavar="PORT") fmGroup.add_argument("--serverproto", dest="serverproto", help="Server protocol to use (default is https)", metavar="PROTO") fmGroup.add_argument("--serverauthtokenfile", dest="serverauthtokenfile", help="File containing the server authentication token", metavar="FILE") fmGroup.add_argument("--clientid", dest="clientid", help="Client ID to use when submitting issues", metavar="ID") fmGroup.add_argument("--platform", dest="platform", help="Platform this crash appeared on", metavar="(x86|x86-64|arm)") fmGroup.add_argument("--product", dest="product", help="Product this crash appeared on", metavar="PRODUCT") fmGroup.add_argument("--productversion", dest="product_version", help="Product version this crash appeared on", metavar="VERSION") fmGroup.add_argument("--os", dest="os", help="OS this crash appeared on", metavar="(windows|linux|macosx|b2g|android)") fmGroup.add_argument("--tool", dest="tool", help="Name of the tool that found this issue", metavar="NAME") fmGroup.add_argument('--metadata', dest='metadata', nargs='+', type=str, help="List of metadata variables in the form 'KEY=VALUE'") parser.add_argument('rargs', nargs=argparse.REMAINDER, help=argparse.SUPPRESS) if len(argv) == 0: parser.print_help() return 2 # process options opts = parser.parse_args(argv) if not opts.rargs: print("Error: No arguments specified", file=sys.stderr) return 2 binary = opts.rargs[0] if not os.path.exists(binary): print("Error: Specified binary does not exist: %s" % binary, file=sys.stderr) return 2 configuration = ProgramConfiguration.fromBinary(binary) if configuration == None: print("Error: Failed to load program configuration based on binary", file=sys.stderr) return 2 if opts.platform == None or opts.product == None or opts.os == None: print("Error: Must use binary configuration file or specify/configure at least --platform, --product and --os", file=sys.stderr) return 2 configuration = ProgramConfiguration(opts.product, opts.platform, opts.os, opts.product_version) env = {} if opts.env: env = dict(kv.split('=', 1) for kv in opts.env) configuration.addEnvironmentVariables(env) # Copy the system environment variables by default and overwrite them # if they are specified through env. env = dict(os.environ) if opts.env: oenv = dict(kv.split('=', 1) for kv in opts.env) configuration.addEnvironmentVariables(oenv) for envkey in oenv: env[envkey] = oenv[envkey] args = opts.rargs[1:] if args: configuration.addProgramArguments(args) metadata = {} if opts.metadata: metadata.update(dict(kv.split('=', 1) for kv in opts.metadata)) configuration.addMetadata(metadata) # Set LD_LIBRARY_PATH for convenience if not 'LD_LIBRARY_PATH' in env: env['LD_LIBRARY_PATH'] = os.path.dirname(binary) serverauthtoken = None if opts.serverauthtokenfile: with open(opts.serverauthtokenfile) as f: serverauthtoken = f.read().rstrip() collector = Collector(opts.sigdir, opts.serverhost, opts.serverport, opts.serverproto, serverauthtoken, opts.clientid, opts.tool) signature_repeat_count = 0 last_signature = None while(True): process = subprocess.Popen( opts.rargs, # stdout=None, stderr=subprocess.PIPE, env=env, universal_newlines=True ) monitor = LibFuzzerMonitor(process.stderr) monitor.start() monitor.join() print("Process terminated, processing results...", file=sys.stderr) trace = monitor.getASanTrace() testcase = monitor.getTestcase() crashInfo = CrashInfo.fromRawCrashData([], [], configuration, auxCrashData=trace) (sigfile, metadata) = collector.search(crashInfo) if sigfile != None: if last_signature == sigfile: signature_repeat_count += 1 else: last_signature = sigfile signature_repeat_count = 0 print("Crash matches signature %s, not submitting..." % sigfile, file=sys.stderr) else: collector.generate(crashInfo, forceCrashAddress=True, forceCrashInstruction=False, numFrames=8) collector.submit(crashInfo, testcase) print("Successfully submitted crash.", file=sys.stderr) if signature_repeat_count >= 10: print("Too many crashes with the same signature, exiting...", file=sys.stderr) break
else: print("Move along, nothing to see...") ### Program Configurations if __name__ == '__main__': print('\n### Program Configurations') if __name__ == '__main__': sys.path.append('FuzzManager') if __name__ == '__main__': from FTB.ProgramConfiguration import ProgramConfiguration # type: ignore if __name__ == '__main__': configuration = ProgramConfiguration.fromBinary( 'simply-buggy/simple-crash') (configuration.product, configuration.platform) ### Crash Info if __name__ == '__main__': print('\n### Crash Info') if __name__ == '__main__': from FTB.Signatures.CrashInfo import CrashInfo # type: ignore if __name__ == '__main__': cmd = ["simply-buggy/simple-crash"] result = subprocess.run(cmd, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
def write_aggregated_stats(base_dirs, outfile, cmdline_path=None): ''' Generate aggregated statistics from the given base directories and write them to the specified output file. @type base_dirs: list @param base_dirs: List of AFL base directories @type outfile: str @param outfile: Output file for aggregated statistics @type cmdline_path: String @param cmdline_path: Optional command line file to use instead of the one found inside the base directory. ''' # Which fields to add wanted_fields_total = [ 'execs_done', 'execs_per_sec', 'pending_favs', 'pending_total', 'variable_paths', 'unique_crashes', 'unique_hangs'] # Which fields to aggregate by mean wanted_fields_mean = ['exec_timeout'] # Which fields should be displayed per fuzzer instance wanted_fields_all = ['cycles_done', 'bitmap_cvg'] # Which fields should be aggregated by max wanted_fields_max = ['last_path'] # Warnings to include warnings = list() aggregated_stats = {} for field in wanted_fields_total: aggregated_stats[field] = 0 for field in wanted_fields_mean: aggregated_stats[field] = (0, 0) for field in wanted_fields_all: aggregated_stats[field] = [] def convert_num(num): if '.' in num: return float(num) return int(num) for base_dir in base_dirs: stats_path = os.path.join(base_dir, "fuzzer_stats") if not cmdline_path: cmdline_path = os.path.join(base_dir, "cmdline") if os.path.exists(stats_path): with open(stats_path, 'r') as stats_file: stats = stats_file.read() for line in stats.splitlines(): (field_name, field_val) = line.split(':', 1) field_name = field_name.strip() field_val = field_val.strip() if field_name in wanted_fields_total: aggregated_stats[field_name] += convert_num(field_val) elif field_name in wanted_fields_mean: (val, cnt) = aggregated_stats[field_name] aggregated_stats[field_name] = (val + convert_num(field_val), cnt + 1) elif field_name in wanted_fields_all: aggregated_stats[field_name].append(field_val) elif field_name in wanted_fields_max: num_val = convert_num(field_val) if (field_name not in aggregated_stats) or aggregated_stats[field_name] < num_val: aggregated_stats[field_name] = num_val # If we don't have any data here, then the fuzzers haven't written any statistics yet if not aggregated_stats: return # Mean conversion for field_name in wanted_fields_mean: (val, cnt) = aggregated_stats[field_name] if cnt: aggregated_stats[field_name] = float(val) / float(cnt) else: aggregated_stats[field_name] = val # Verify fuzzmanagerconf exists and can be parsed _, cmdline = command_file_to_list(cmdline_path) target_binary = cmdline[0] if cmdline else None if target_binary is not None: if not os.path.isfile("%s.fuzzmanagerconf" % target_binary): warnings.append("WARNING: Missing %s.fuzzmanagerconf\n" % target_binary) elif ProgramConfiguration.fromBinary(target_binary) is None: warnings.append("WARNING: Invalid %s.fuzzmanagerconf\n" % target_binary) # Look for unreported crashes failed_reports = 0 for base_dir in base_dirs: crashes_dir = os.path.join(base_dir, "crashes") if not os.path.isdir(crashes_dir): continue for crash_file in os.listdir(crashes_dir): if crash_file.endswith(".failed"): failed_reports += 1 if failed_reports: warnings.append("WARNING: Unreported crashes detected (%d)\n" % failed_reports) # Write out data fields = [] fields.extend(wanted_fields_total) fields.extend(wanted_fields_mean) fields.extend(wanted_fields_all) fields.extend(wanted_fields_max) max_keylen = max([len(x) for x in fields]) with InterProcessLock(outfile + ".lock"), open(outfile, 'w') as f: for field in fields: if field not in aggregated_stats: continue val = aggregated_stats[field] if isinstance(val, list): val = " ".join(val) f.write("%s%s: %s\n" % (field, " " * (max_keylen + 1 - len(field)), val)) for warning in warnings: f.write(warning) return
def main(args=None): '''Command line options.''' # setup argparser parser = argparse.ArgumentParser() parser.add_argument('--version', action='version', version='%s v%s (%s)' % (__file__, __version__, __updated__)) # Crash information parser.add_argument("--stdout", help="File containing STDOUT data", metavar="FILE") parser.add_argument("--stderr", help="File containing STDERR data", metavar="FILE") parser.add_argument("--crashdata", help="File containing external crash data", metavar="FILE") # Actions action_group = parser.add_argument_group( "Actions", "A single action must be selected.") actions = action_group.add_mutually_exclusive_group(required=True) actions.add_argument("--refresh", action='store_true', help="Perform a signature refresh") actions.add_argument("--submit", action='store_true', help="Submit a signature to the server") actions.add_argument("--search", action='store_true', help="Search cached signatures for the given crash") actions.add_argument( "--generate", action='store_true', help="Create a (temporary) local signature in the cache directory") actions.add_argument( "--autosubmit", action='store_true', help= ("Go into auto-submit mode. In this mode, all remaining arguments are interpreted " "as the crashing command. This tool will automatically obtain GDB crash information " "and submit it.")) actions.add_argument( "--download", type=int, help="Download the testcase for the specified crash entry", metavar="ID") actions.add_argument( "--download-all", type=int, help="Download all testcases for the specified signature entry", metavar="ID") actions.add_argument( "--get-clientid", action='store_true', help="Print the client ID used when submitting issues") # Settings parser.add_argument("--sigdir", help="Signature cache directory", metavar="DIR") parser.add_argument("--serverhost", help="Server hostname for remote signature management", metavar="HOST") parser.add_argument("--serverport", type=int, help="Server port to use", metavar="PORT") parser.add_argument("--serverproto", help="Server protocol to use (default is https)", metavar="PROTO") parser.add_argument("--serverauthtokenfile", help="File containing the server authentication token", metavar="FILE") parser.add_argument("--clientid", help="Client ID to use when submitting issues", metavar="ID") parser.add_argument("--platform", help="Platform this crash appeared on", metavar="(x86|x86-64|arm)") parser.add_argument("--product", help="Product this crash appeared on", metavar="PRODUCT") parser.add_argument("--productversion", dest="product_version", help="Product version this crash appeared on", metavar="VERSION") parser.add_argument("--os", help="OS this crash appeared on", metavar="(windows|linux|macosx|b2g|android)") parser.add_argument("--tool", help="Name of the tool that found this issue", metavar="NAME") parser.add_argument( '--args', nargs='+', type=str, help= "List of program arguments. Backslashes can be used for escaping and are stripped." ) parser.add_argument( '--env', nargs='+', type=str, help="List of environment variables in the form 'KEY=VALUE'") parser.add_argument( '--metadata', nargs='+', type=str, help="List of metadata variables in the form 'KEY=VALUE'") parser.add_argument( "--binary", help="Binary that has a configuration file for reading", metavar="BINARY") parser.add_argument("--testcase", help="File containing testcase", metavar="FILE") parser.add_argument( "--testcasequality", default=0, type=int, help= "Integer indicating test case quality (%(default)s is best and default)", metavar="VAL") parser.add_argument( "--testcasesize", type=int, help= "Integer indicating test case size (default is size of testcase data)", metavar="SIZE") # Options that affect how signatures are generated parser.add_argument( "--forcecrashaddr", action='store_true', help="Force including the crash address into the signature") parser.add_argument( "--forcecrashinst", action='store_true', help= "Force including the crash instruction into the signature (GDB only)") parser.add_argument( "--numframes", default=8, type=int, help= "How many frames to include into the signature (default: %(default)s)") parser.add_argument('rargs', nargs=argparse.REMAINDER) # process options opts = parser.parse_args(args=args) # In autosubmit mode, we try to open a configuration file for the binary specified # on the command line. It should contain the binary-specific settings for submitting. if opts.autosubmit: if not opts.rargs: parser.error( "Action --autosubmit requires test arguments to be specified") # Store the binary candidate only if --binary wasn't also specified if not opts.binary: opts.binary = opts.rargs[0] # We also need to check that (apart from the binary), there is only one file on the command line # (the testcase), if it hasn't been explicitely specified. testcase = opts.testcase testcaseidx = None if testcase is None: for idx, arg in enumerate(opts.rargs[1:]): if os.path.exists(arg): if testcase: parser.error( "Multiple potential testcases specified on command line. " "Must explicitly specify test using --testcase.") testcase = arg testcaseidx = idx # Either --autosubmit was specified, or someone specified --binary manually # Check that the binary actually exists if opts.binary and not os.path.exists(opts.binary): parser.error("Error: Specified binary does not exist: %s" % opts.binary) stdout = None stderr = None crashdata = None crashInfo = None args = None env = None metadata = {} if opts.search or opts.generate or opts.submit or opts.autosubmit: if opts.metadata: metadata.update(dict(kv.split('=', 1) for kv in opts.metadata)) if opts.autosubmit: # Try to automatically get arguments from the command line # If the testcase is not the last argument, leave it in the # command line arguments and replace it with a generic placeholder. if testcaseidx == len(opts.rargs[1:]) - 1: args = opts.rargs[1:-1] else: args = opts.rargs[1:] if testcaseidx is not None: args[testcaseidx] = "TESTFILE" else: if opts.args: args = [arg.replace('\\', '') for arg in opts.args] if opts.env: env = dict(kv.split('=', 1) for kv in opts.env) # Start without any ProgramConfiguration configuration = None # If we have a binary, try using that to create our ProgramConfiguration if opts.binary: configuration = ProgramConfiguration.fromBinary(opts.binary) if configuration: if env: configuration.addEnvironmentVariables(env) if args: configuration.addProgramArguments(args) if metadata: configuration.addMetadata(metadata) # If configuring through binary failed, try to manually create ProgramConfiguration from command line arguments if configuration is None: if opts.platform is None or opts.product is None or opts.os is None: parser.error( "Must specify/configure at least --platform, --product and --os" ) configuration = ProgramConfiguration(opts.product, opts.platform, opts.os, opts.product_version, env, args, metadata) if not opts.autosubmit: if opts.stderr is None and opts.crashdata is None: parser.error( "Must specify at least either --stderr or --crashdata file" ) if opts.stdout: with open(opts.stdout) as f: stdout = f.read() if opts.stderr: with open(opts.stderr) as f: stderr = f.read() if opts.crashdata: with open(opts.crashdata) as f: crashdata = f.read() crashInfo = CrashInfo.fromRawCrashData(stdout, stderr, configuration, auxCrashData=crashdata) if opts.testcase: (testCaseData, isBinary) = Collector.read_testcase(opts.testcase) if not isBinary: crashInfo.testcase = testCaseData serverauthtoken = None if opts.serverauthtokenfile: with open(opts.serverauthtokenfile) as f: serverauthtoken = f.read().rstrip() collector = Collector(opts.sigdir, opts.serverhost, opts.serverport, opts.serverproto, serverauthtoken, opts.clientid, opts.tool) if opts.refresh: collector.refresh() return 0 if opts.submit: testcase = opts.testcase collector.submit(crashInfo, testcase, opts.testcasequality, opts.testcasesize, metadata) return 0 if opts.search: (sig, metadata) = collector.search(crashInfo) if sig is None: print("No match found", file=sys.stderr) return 3 print(sig) if metadata: print(json.dumps(metadata, indent=4)) return 0 if opts.generate: sigFile = collector.generate(crashInfo, opts.forcecrashaddr, opts.forcecrashinst, opts.numframes) if not sigFile: print( "Failed to generate a signature for the given crash information.", file=sys.stderr) return 1 print(sigFile) return 0 if opts.autosubmit: runner = AutoRunner.fromBinaryArgs(opts.rargs[0], opts.rargs[1:]) if runner.run(): crashInfo = runner.getCrashInfo(configuration) collector.submit(crashInfo, testcase, opts.testcasequality, opts.testcasesize, metadata) else: print("Error: Failed to reproduce the given crash, cannot submit.", file=sys.stderr) return 1 if opts.download: (retFile, retJSON) = collector.download(opts.download) if not retFile: print("Specified crash entry does not have a testcase", file=sys.stderr) return 1 if "args" in retJSON and retJSON["args"]: args = json.loads(retJSON["args"]) print("Command line arguments: %s" % " ".join(args)) print("") if "env" in retJSON and retJSON["env"]: env = json.loads(retJSON["env"]) print("Environment variables: %s", " ".join("%s = %s" % (k, v) for (k, v) in env.items())) print("") if "metadata" in retJSON and retJSON["metadata"]: metadata = json.loads(retJSON["metadata"]) print("== Metadata ==") for k, v in metadata.items(): print("%s = %s" % (k, v)) print("") print(retFile) return 0 if opts.download_all: downloaded = False for result in collector.download_all(opts.download_all): downloaded = True print(result) if not downloaded: print("Specified signature does not have any testcases", file=sys.stderr) return 1 return 0 if opts.get_clientid: print(collector.clientId) return 0
def main(argv=None): '''Command line options.''' program_name = os.path.basename(sys.argv[0]) if argv is None: argv = sys.argv[1:] # setup argparser parser = argparse.ArgumentParser( usage= '%s --libfuzzer or --aflfuzz [OPTIONS] --cmd <COMMAND AND ARGUMENTS>' % program_name) mainGroup = parser.add_argument_group(title="Main Options", description=None) aflGroup = parser.add_argument_group( title="AFL Options", description="Use these arguments in AFL mode") libfGroup = parser.add_argument_group( title="Libfuzzer Options", description="Use these arguments in Libfuzzer mode") fmGroup = parser.add_argument_group( title="FuzzManager Options", description="Use these to specify FuzzManager parameters") mainGroup.add_argument("--libfuzzer", dest="libfuzzer", action='store_true', help="Enable LibFuzzer mode") mainGroup.add_argument("--aflfuzz", dest="aflfuzz", action='store_true', help="Enable AFL mode") mainGroup.add_argument("--fuzzmanager", dest="fuzzmanager", action='store_true', help="Use FuzzManager to submit crash results") libfGroup.add_argument( '--env', dest='env', nargs='+', type=str, help="List of environment variables in the form 'KEY=VALUE'") libfGroup.add_argument('--cmd', dest='cmd', action='store_true', help="Command with parameters to run") libfGroup.add_argument("--sigdir", dest="sigdir", help="Signature cache directory", metavar="DIR") fmGroup.add_argument( "--fuzzmanager-toolname", dest="fuzzmanager_toolname", help="Override FuzzManager tool name (for submitting crash results)") fmGroup.add_argument("--custom-cmdline-file", dest="custom_cmdline_file", help="Path to custom cmdline file", metavar="FILE") fmGroup.add_argument( "--env-file", dest="env_file", help="Path to a file with additional environment variables", metavar="FILE") fmGroup.add_argument( "--serverhost", help="Server hostname for remote signature management.", metavar="HOST") fmGroup.add_argument("--serverport", dest="serverport", type=int, help="Server port to use", metavar="PORT") fmGroup.add_argument("--serverproto", dest="serverproto", help="Server protocol to use (default is https)", metavar="PROTO") fmGroup.add_argument( "--serverauthtokenfile", dest="serverauthtokenfile", help="File containing the server authentication token", metavar="FILE") fmGroup.add_argument("--clientid", dest="clientid", help="Client ID to use when submitting issues", metavar="ID") fmGroup.add_argument("--platform", dest="platform", help="Platform this crash appeared on", metavar="(x86|x86-64|arm)") fmGroup.add_argument("--product", dest="product", help="Product this crash appeared on", metavar="PRODUCT") fmGroup.add_argument("--productversion", dest="product_version", help="Product version this crash appeared on", metavar="VERSION") fmGroup.add_argument("--os", dest="os", help="OS this crash appeared on", metavar="(windows|linux|macosx|b2g|android)") fmGroup.add_argument("--tool", dest="tool", help="Name of the tool that found this issue", metavar="NAME") fmGroup.add_argument( '--metadata', dest='metadata', nargs='+', type=str, help="List of metadata variables in the form 'KEY=VALUE'") aflGroup.add_argument("--s3-queue-upload", dest="s3_queue_upload", action='store_true', help="Use S3 to synchronize queues") aflGroup.add_argument( "--s3-queue-cleanup", dest="s3_queue_cleanup", action='store_true', help="Cleanup S3 queue entries older than specified refresh interval") aflGroup.add_argument("--s3-queue-status", dest="s3_queue_status", action='store_true', help="Display S3 queue status") aflGroup.add_argument( "--s3-build-download", dest="s3_build_download", help="Use S3 to download the build for the specified project", metavar="DIR") aflGroup.add_argument( "--s3-build-upload", dest="s3_build_upload", help="Use S3 to upload a new build for the specified project", metavar="FILE") aflGroup.add_argument( "--s3-corpus-download", dest="s3_corpus_download", help="Use S3 to download the test corpus for the specified project", metavar="DIR") aflGroup.add_argument( "--s3-corpus-download-size", dest="s3_corpus_download_size", help="When downloading the corpus, select only SIZE files randomly", metavar="SIZE") aflGroup.add_argument( "--s3-corpus-upload", dest="s3_corpus_upload", help="Use S3 to upload a test corpus for the specified project", metavar="DIR") aflGroup.add_argument( "--s3-corpus-replace", dest="s3_corpus_replace", action='store_true', help= "In conjunction with --s3-corpus-upload, deletes all other remote test files" ) aflGroup.add_argument( "--s3-corpus-refresh", dest="s3_corpus_refresh", help= "Download queues and corpus from S3, combine and minimize, then re-upload.", metavar="DIR") aflGroup.add_argument("--s3-corpus-status", dest="s3_corpus_status", action='store_true', help="Display S3 corpus status") aflGroup.add_argument( "--test-file", dest="test_file", help="Optional path to copy the test file to before reproducing", metavar="FILE") aflGroup.add_argument( "--afl-timeout", dest="afl_timeout", type=int, default=1000, help="Timeout per test to pass to AFL for corpus refreshing", metavar="MSECS") aflGroup.add_argument( "--firefox", dest="firefox", action='store_true', help="Test Program is Firefox (requires FFPuppet installed)") aflGroup.add_argument("--firefox-prefs", dest="firefox_prefs", help="Path to prefs.js file for Firefox", metavar="FILE") aflGroup.add_argument("--firefox-extensions", nargs='+', type=str, dest="firefox_extensions", help="Path extension file for Firefox", metavar="FILE") aflGroup.add_argument("--firefox-testpath", dest="firefox_testpath", help="Path to file to open with Firefox", metavar="FILE") aflGroup.add_argument( "--firefox-start-afl", dest="firefox_start_afl", metavar="FILE", help= "Start AFL with the given Firefox binary, remaining arguments being passed to AFL" ) aflGroup.add_argument( "--s3-refresh-interval", dest="s3_refresh_interval", type=int, default=86400, help="How often the s3 corpus is refreshed (affects queue cleaning)", metavar="SECS") aflGroup.add_argument("--afl-output-dir", dest="afloutdir", help="Path to the AFL output directory to manage", metavar="DIR") aflGroup.add_argument("--afl-binary-dir", dest="aflbindir", help="Path to the AFL binary directory to use", metavar="DIR") aflGroup.add_argument( "--afl-stats", dest="aflstats", help="Collect aggregated statistics while scanning output directories", metavar="FILE") aflGroup.add_argument("--s3-bucket", dest="s3_bucket", help="Name of the S3 bucket to use", metavar="NAME") aflGroup.add_argument( "--project", dest="project", help="Name of the subfolder/project inside the S3 bucket", metavar="NAME") aflGroup.add_argument('rargs', nargs=argparse.REMAINDER) if len(argv) == 0: parser.print_help() return 2 opts = parser.parse_args(argv) if not opts.libfuzzer and not opts.aflfuzz: opts.aflfuzz = True if opts.cmd and opts.aflfuzz: if not opts.firefox: print( "Error: Use --cmd either with libfuzzer or with afl in firefox mode", file=sys.sdderr) return 2 if opts.libfuzzer: if not opts.rargs: print("Error: No arguments specified", file=sys.stderr) return 2 binary = opts.rargs[0] if not os.path.exists(binary): print("Error: Specified binary does not exist: %s" % binary, file=sys.stderr) return 2 configuration = ProgramConfiguration.fromBinary(binary) if configuration == None: print( "Error: Failed to load program configuration based on binary", file=sys.stderr) return 2 env = {} if opts.env: env = dict(kv.split('=', 1) for kv in opts.env) configuration.addEnvironmentVariables(env) # Copy the system environment variables by default and overwrite them # if they are specified through env. env = dict(os.environ) if opts.env: oenv = dict(kv.split('=', 1) for kv in opts.env) configuration.addEnvironmentVariables(oenv) for envkey in oenv: env[envkey] = oenv[envkey] args = opts.rargs[1:] if args: configuration.addProgramArguments(args) metadata = {} if opts.metadata: metadata.update(dict(kv.split('=', 1) for kv in opts.metadata)) configuration.addMetadata(metadata) # Set LD_LIBRARY_PATH for convenience if not 'LD_LIBRARY_PATH' in env: env['LD_LIBRARY_PATH'] = os.path.dirname(binary) collector = Collector(opts.sigdir, opts.fuzzmanager_toolname) signature_repeat_count = 0 last_signature = None while (True): process = subprocess.Popen( opts.rargs, # stdout=None, stderr=subprocess.PIPE, env=env, universal_newlines=True) monitor = LibFuzzerMonitor(process.stderr) monitor.start() monitor.join() print("Process terminated, processing results...", file=sys.stderr) trace = monitor.getASanTrace() testcase = monitor.getTestcase() crashInfo = CrashInfo.fromRawCrashData([], [], configuration, auxCrashData=trace) (sigfile, metadata) = collector.search(crashInfo) if sigfile != None: if last_signature == sigfile: signature_repeat_count += 1 else: last_signature = sigfile signature_repeat_count = 0 print("Crash matches signature %s, not submitting..." % sigfile, file=sys.stderr) else: collector.generate(crashInfo, forceCrashAddress=True, forceCrashInstruction=False, numFrames=8) collector.submit(crashInfo, testcase) print("Successfully submitted crash.", file=sys.stderr) if signature_repeat_count >= 10: print("Too many crashes with the same signature, exiting...", file=sys.stderr) break if opts.aflfuzz: if opts.firefox or opts.firefox_start_afl: if not haveFFPuppet: print( "Error: --firefox and --firefox-start-afl require FFPuppet to be installed", file=sys.stderr) return 2 if opts.custom_cmdline_file: print( "Error: --custom-cmdline-file is incompatible with firefox options", file=sys.stderr) return 2 if not opts.firefox_prefs or not opts.firefox_testpath: print( "Error: --firefox and --firefox-start-afl require --firefox-prefs and --firefox-testpath to be specified", file=sys.stderr) return 2 if opts.firefox_start_afl: if not opts.aflbindir: print( "Error: Must specify --afl-binary-dir for starting AFL with firefox", file=sys.stderr) return 2 (ffp, cmd, env) = setup_firefox(opts.firefox_start_afl, opts.firefox_prefs, opts.firefox_extensions, opts.firefox_testpath) afl_cmd = [os.path.join(opts.aflbindir, "afl-fuzz")] opts.rargs.remove("--") afl_cmd.extend(opts.rargs) afl_cmd.extend(cmd) try: subprocess.call(afl_cmd, env=env) except: traceback.print_exc() ffp.clean_up() return 0 afl_out_dirs = [] if opts.afloutdir: if not os.path.exists(os.path.join(opts.afloutdir, "crashes")): # The specified directory doesn't have a "crashes" sub directory. # Either the wrong directory was specified, or this is an AFL multi-process # sychronization directory. Try to figure this out here. sync_dirs = os.listdir(opts.afloutdir) for sync_dir in sync_dirs: if os.path.exists( os.path.join(opts.afloutdir, sync_dir, "crashes")): afl_out_dirs.append( os.path.join(opts.afloutdir, sync_dir)) if not afl_out_dirs: print( "Error: Directory %s does not appear to be a valid AFL output/sync directory" % opts.afloutdir, file=sys.stderr) return 2 else: afl_out_dirs.append(opts.afloutdir) # Upload and FuzzManager modes require specifying the AFL directory if opts.s3_queue_upload or opts.fuzzmanager: if not opts.afloutdir: print( "Error: Must specify AFL output directory using --afl-output-dir", file=sys.stderr) return 2 if (opts.s3_queue_upload or opts.s3_corpus_refresh or opts.s3_build_download or opts.s3_build_upload or opts.s3_corpus_download or opts.s3_corpus_upload or opts.s3_queue_status): if not opts.s3_bucket or not opts.project: print( "Error: Must specify both --s3-bucket and --project for S3 actions", file=sys.stderr) return 2 if opts.s3_queue_status: status_data = get_queue_status(opts.s3_bucket, opts.project) total_queue_files = 0 for queue_name in status_data: print("Queue %s: %s" % (queue_name, status_data[queue_name])) total_queue_files += status_data[queue_name] print("Total queue files: %s" % total_queue_files) return 0 if opts.s3_corpus_status: status_data = get_corpus_status(opts.s3_bucket, opts.project) total_corpus_files = 0 for (status_dt, status_cnt) in sorted(status_data.items()): print("Added %s: %s" % (status_dt, status_cnt)) total_corpus_files += status_cnt print("Total corpus files: %s" % total_corpus_files) return 0 if opts.s3_queue_cleanup: clean_queue_dirs(opts.s3_corpus_refresh, opts.s3_bucket, opts.project, opts.s3_refresh_interval) return 0 if opts.s3_build_download: download_build(opts.s3_build_download, opts.s3_bucket, opts.project) return 0 if opts.s3_build_upload: upload_build(opts.s3_build_upload, opts.s3_bucket, opts.project) return 0 if opts.s3_corpus_download: if opts.s3_corpus_download_size != None: opts.s3_corpus_download_size = int( opts.s3_corpus_download_size) download_corpus(opts.s3_corpus_download, opts.s3_bucket, opts.project, opts.s3_corpus_download_size) return 0 if opts.s3_corpus_upload: upload_corpus(opts.s3_corpus_upload, opts.s3_bucket, opts.project, opts.s3_corpus_replace) return 0 if opts.s3_corpus_refresh: if not opts.aflbindir: print( "Error: Must specify --afl-binary-dir for refreshing the test corpus", file=sys.stderr) return 2 if not os.path.exists(opts.s3_corpus_refresh): os.makedirs(opts.s3_corpus_refresh) queues_dir = os.path.join(opts.s3_corpus_refresh, "queues") print("Cleaning old AFL queues from s3://%s/%s/queues/" % (opts.s3_bucket, opts.project)) clean_queue_dirs(opts.s3_corpus_refresh, opts.s3_bucket, opts.project, opts.s3_refresh_interval) print("Downloading AFL queues from s3://%s/%s/queues/ to %s" % (opts.s3_bucket, opts.project, queues_dir)) download_queue_dirs(opts.s3_corpus_refresh, opts.s3_bucket, opts.project) cmdline_file = os.path.join(opts.s3_corpus_refresh, "cmdline") if not os.path.exists(cmdline_file): print( "Error: Failed to download a cmdline file from queue directories.", file=sys.stderr) return 2 print("Downloading build") download_build(os.path.join(opts.s3_corpus_refresh, "build"), opts.s3_bucket, opts.project) with open(os.path.join(opts.s3_corpus_refresh, "cmdline"), 'r') as cmdline_file: cmdline = cmdline_file.read().splitlines() # Assume cmdline[0] is the name of the binary binary_name = os.path.basename(cmdline[0]) # Try locating our binary in the build we just unpacked binary_search_result = [ os.path.join(dirpath, filename) for dirpath, dirnames, filenames in os.walk( os.path.join(opts.s3_corpus_refresh, "build")) for filename in filenames if (filename == binary_name and ( stat.S_IXUSR & os.stat(os.path.join(dirpath, filename))[stat.ST_MODE])) ] if not binary_search_result: print("Error: Failed to locate binary %s in unpacked build." % binary_name, file=sys.stderr) return 2 if len(binary_search_result) > 1: print("Error: Binary name %s is ambiguous in unpacked build." % binary_name, file=sys.stderr) return 2 cmdline[0] = binary_search_result[0] # Download our current corpus into the queues directory as well print("Downloading corpus from s3://%s/%s/corpus/ to %s" % (opts.s3_bucket, opts.project, queues_dir)) download_corpus(queues_dir, opts.s3_bucket, opts.project) # Ensure the directory for our new tests is empty updated_tests_dir = os.path.join(opts.s3_corpus_refresh, "tests") if os.path.exists(updated_tests_dir): shutil.rmtree(updated_tests_dir) os.mkdir(updated_tests_dir) # Run afl-cmin afl_cmin = os.path.join(opts.aflbindir, "afl-cmin") if not os.path.exists(afl_cmin): print("Error: Unable to locate afl-cmin binary.", file=sys.stderr) return 2 if opts.firefox: (ffpInst, ffCmd, ffEnv) = setup_firefox(cmdline[0], opts.firefox_prefs, opts.firefox_extensions, opts.firefox_testpath) cmdline = ffCmd afl_cmdline = [ afl_cmin, '-e', '-i', queues_dir, '-o', updated_tests_dir, '-t', str(opts.afl_timeout), '-m', 'none' ] if opts.test_file: afl_cmdline.extend(['-f', opts.test_file]) afl_cmdline.extend(cmdline) print("Running afl-cmin") with open(os.devnull, 'w') as devnull: env = os.environ.copy() env['LD_LIBRARY_PATH'] = os.path.dirname(cmdline[0]) if opts.firefox: env.update(ffEnv) subprocess.check_call(afl_cmdline, stdout=devnull, env=env) if opts.firefox: ffpInst.clean_up() # replace existing corpus with reduced corpus print("Uploading reduced corpus to s3://%s/%s/corpus/" % (opts.s3_bucket, opts.project)) upload_corpus(updated_tests_dir, opts.s3_bucket, opts.project, corpus_delete=True) # Prune the queues directory once we successfully uploaded the new # test corpus, but leave everything that's part of our new corpus # so we don't have to download those files again. test_files = [ file for file in os.listdir(updated_tests_dir) if os.path.isfile(os.path.join(updated_tests_dir, file)) ] obsolete_queue_files = [ file for file in os.listdir(queues_dir) if os.path.isfile(os.path.join(queues_dir, file)) and file not in test_files ] for file in obsolete_queue_files: os.remove(os.path.join(queues_dir, file)) if opts.fuzzmanager or opts.s3_queue_upload or opts.aflstats: last_queue_upload = 0 while True: if opts.fuzzmanager: for afl_out_dir in afl_out_dirs: scan_crashes(afl_out_dir, opts.custom_cmdline_file, opts.env_file, opts.fuzzmanager_toolname, opts.test_file) # Only upload queue files every 20 minutes if opts.s3_queue_upload and last_queue_upload < int( time.time()) - 1200: for afl_out_dir in afl_out_dirs: upload_queue_dir(afl_out_dir, opts.s3_bucket, opts.project, new_cov_only=True) last_queue_upload = int(time.time()) if opts.aflstats: write_aggregated_stats(afl_out_dirs, opts.aflstats) time.sleep(10)
def scan_crashes(base_dir, cmdline_path=None, env_path=None, tool_name=None, test_path=None, firefox=None, firefox_prefs=None, firefox_extensions=None, firefox_testpath=None): ''' Scan the base directory for crash tests and submit them to FuzzManager. @type base_dir: String @param base_dir: AFL base directory @type cmdline_path: String @param cmdline_path: Optional command line file to use instead of the one found inside the base directory. @type env_path: String @param env_path: Optional file containing environment variables. @type test_path: String @param test_path: Optional filename where to copy the test before attempting to reproduce a crash. @rtype: int @return: Non-zero return code on failure ''' crash_dir = os.path.join(base_dir, "crashes") crash_files = [] for crash_file in os.listdir(crash_dir): # Ignore all files that aren't crash results if not crash_file.startswith("id:"): continue crash_file = os.path.join(crash_dir, crash_file) # Ignore our own status files if crash_file.endswith(".submitted") or crash_file.endswith(".failed"): continue # Ignore files we already processed if os.path.exists(crash_file + ".submitted") or os.path.exists(crash_file + ".failed"): continue crash_files.append(crash_file) if crash_files: # First try to read necessary information for reproducing crashes base_env = {} test_in_env = None if env_path: with open(env_path, 'r') as env_file: for line in env_file: (name,val) = line.rstrip('\n').split("=", 1) base_env[name] = val if '@@' in val: test_in_env = name if not cmdline_path: cmdline_path = os.path.join(base_dir, "cmdline") test_idx, cmdline = command_file_to_list(cmdline_path) if test_idx is not None: orig_test_arg = cmdline[test_idx] configuration = ProgramConfiguration.fromBinary(cmdline[0]) if not configuration: print("Error: Creating program configuration from binary failed. Check your binary configuration file.", file=sys.stderr) return 2 collector = Collector(tool=tool_name) if firefox: (ffpInst, ffCmd, ffEnv) = setup_firefox(cmdline[0], firefox_prefs, firefox_extensions, firefox_testpath) cmdline = ffCmd base_env.update(ffEnv) for crash_file in crash_files: stdin = None env = None if base_env: env = dict(base_env) if test_idx is not None: cmdline[test_idx] = orig_test_arg.replace('@@', crash_file) elif test_in_env is not None: env[test_in_env] = env[test_in_env].replace('@@', crash_file) elif test_path is not None: shutil.copy(crash_file, test_path) else: with open(crash_file, 'r') as crash_fd: stdin = crash_fd.read() print("Processing crash file %s" % crash_file, file=sys.stderr) runner = AutoRunner.fromBinaryArgs(cmdline[0], cmdline[1:], env=env, stdin=stdin) if runner.run(): crash_info = runner.getCrashInfo(configuration) collector.submit(crash_info, crash_file) open(crash_file + ".submitted", 'a').close() print("Success: Submitted crash to server.", file=sys.stderr) else: open(crash_file + ".failed", 'a').close() print("Error: Failed to reproduce the given crash, cannot submit.", file=sys.stderr) if firefox: ffpInst.clean_up()
def compareLevel(jsEngine, flags, infilename, logPrefix, options, showDetailedDiffs, quickMode): # pylint: disable=invalid-name,missing-docstring,missing-return-doc,missing-return-type-doc,too-complex # pylint: disable=too-many-branches,too-many-arguments,too-many-locals # options dict must be one we can pass to js_interesting.ShellResult # we also use it directly for knownPath, timeout, and collector # Return: (lev, crashInfo) or (js_interesting.JS_FINE, None) combos = shell_flags.basicFlagSets(jsEngine) if quickMode: # Only used during initial fuzzing. Allowed to have false negatives. combos = [combos[0]] if flags: combos.append(flags) commands = [[jsEngine] + combo + [infilename] for combo in combos] for i in range(0, len(commands)): prefix = logPrefix + "-r" + str(i) command = commands[i] r = js_interesting.ShellResult(options, command, prefix, True) # pylint: disable=invalid-name oom = js_interesting.oomed(r.err) r.err = ignoreSomeOfStderr(r.err) if (r.return_code == 1 or r.return_code == 2) and ( anyLineContains(r.out, '[[script] scriptArgs*]') or (anyLineContains(r.err, '[scriptfile] [scriptarg...]'))): print("Got usage error from:") print(" %s" % sps.shellify(command)) assert i js_interesting.deleteLogs(prefix) elif r.lev > js_interesting.JS_OVERALL_MISMATCH: # would be more efficient to run lithium on one or the other, but meh print("%s | %s" % (infilename, js_interesting.summaryString( r.issues + ["compare_jit found a more serious bug"], r.lev, r.runinfo.elapsedtime))) with open(logPrefix + "-summary.txt", 'wb') as f: f.write('\n'.join(r.issues + [ sps.shellify(command), "compare_jit found a more serious bug" ]) + '\n') print(" %s" % sps.shellify(command)) return (r.lev, r.crashInfo) elif r.lev != js_interesting.JS_FINE or r.return_code != 0: print("%s | %s" % ( infilename, js_interesting.summaryString( r.issues + [ "compare_jit is not comparing output, because the shell exited strangely" ], r.lev, r.runinfo.elapsedtime))) print(" %s" % sps.shellify(command)) js_interesting.deleteLogs(prefix) if not i: return (js_interesting.JS_FINE, None) elif oom: # If the shell or python hit a memory limit, we consider the rest of the computation # "tainted" for the purpose of correctness comparison. message = "compare_jit is not comparing output: OOM" print("%s | %s" % (infilename, js_interesting.summaryString(r.issues + [message], r.lev, r.runinfo.elapsedtime))) js_interesting.deleteLogs(prefix) if not i: return (js_interesting.JS_FINE, None) elif not i: # Stash output from this run (the first one), so for subsequent runs, we can compare against it. (r0, prefix0) = (r, prefix) # pylint: disable=invalid-name else: # Compare the output of this run (r.out) to the output of the first run (r0.out), etc. def fpuOptionDisabledAsmOnOneSide(fpuAsmMsg): # pylint: disable=invalid-name,missing-docstring # pylint: disable=missing-return-doc,missing-return-type-doc # pylint: disable=invalid-name fpuOptionDisabledAsm = fpuAsmMsg in r0.err or fpuAsmMsg in r.err # pylint: disable=cell-var-from-loop # pylint: disable=invalid-name # pylint: disable=cell-var-from-loop fpuOptionDiffers = (("--no-fpu" in commands[0]) != ("--no-fpu" in command)) return fpuOptionDisabledAsm and fpuOptionDiffers def optionDisabledAsmOnOneSide(): # pylint: disable=invalid-name,missing-docstring,missing-return-doc # pylint: disable=missing-return-type-doc asmMsg = "asm.js type error: Disabled by javascript.options.asmjs" # pylint: disable=invalid-name # pylint: disable=invalid-name # pylint: disable=cell-var-from-loop optionDisabledAsm = anyLineContains( r0.err, asmMsg) or anyLineContains(r.err, asmMsg) # pylint: disable=invalid-name optionDiffers = (("--no-asmjs" in commands[0]) != ("--no-asmjs" in command)) return optionDisabledAsm and optionDiffers mismatchErr = ( r.err != r0.err and # pylint: disable=invalid-name # --no-fpu (on debug x86_32 only) turns off asm.js compilation, among other things. # This should only affect asm.js diagnostics on stderr. not fpuOptionDisabledAsmOnOneSide( "asm.js type error: " "Disabled by lack of floating point support") and # And also wasm stuff. See bug 1243031. not fpuOptionDisabledAsmOnOneSide( "WebAssembly is not supported on the current device") and not optionDisabledAsmOnOneSide()) mismatchOut = (r.out != r0.out) # pylint: disable=invalid-name if mismatchErr or mismatchOut: # Generate a short summary for stdout and a long summary for a "*-summary.txt" file. # pylint: disable=invalid-name rerunCommand = sps.shellify([ "python -m funfuzz.js.compare_jit", "--flags=" + " ".join(flags), "--timeout=" + str(options.timeout), options.knownPath, jsEngine, os.path.basename(infilename) ]) (summary, issues) = summarizeMismatch(mismatchErr, mismatchOut, prefix0, prefix) summary = " " + sps.shellify( commands[0]) + "\n " + sps.shellify( command) + "\n\n" + summary with open(logPrefix + "-summary.txt", 'wb') as f: f.write(rerunCommand + "\n\n" + summary) print("%s | %s" % (infilename, js_interesting.summaryString( issues, js_interesting.JS_OVERALL_MISMATCH, r.runinfo.elapsedtime))) if quickMode: print(rerunCommand) if showDetailedDiffs: print(summary) print() # Create a crashInfo object with empty stdout, and stderr showing diffs pc = ProgramConfiguration.fromBinary(jsEngine) # pylint: disable=invalid-name pc.addProgramArguments(flags) # pylint: disable=invalid-name crashInfo = CrashInfo.CrashInfo.fromRawCrashData([], summary, pc) # pylint: disable=invalid-name return (js_interesting.JS_OVERALL_MISMATCH, crashInfo) else: # print "compare_jit: match" js_interesting.deleteLogs(prefix) # All matched :) js_interesting.deleteLogs(prefix0) return (js_interesting.JS_FINE, None)
def __init__(self, options, runthis, logPrefix, inCompareJIT): pathToBinary = runthis[0] # This relies on the shell being a local one from compileShell.py: # Ignore trailing ".exe" in Win, also abspath makes it work w/relative paths like './js' pc = ProgramConfiguration.fromBinary( os.path.abspath(pathToBinary).split('.')[0]) pc.addProgramArguments(runthis[1:-1]) if options.valgrind: runthis = (inspectShell.constructVgCmdList( errorCode=VALGRIND_ERROR_EXIT_CODE) + valgrindSuppressions(options.knownPath) + runthis) preexec_fn = ulimitSet if os.name == 'posix' else None runinfo = timed_run.timed_run(runthis, options.timeout, logPrefix, preexec_fn=preexec_fn) lev = JS_FINE issues = [] auxCrashData = [] # FuzzManager expects a list of strings rather than an iterable, so bite the # bullet and 'readlines' everything into memory. with open(logPrefix + "-out.txt") as f: out = f.readlines() with open(logPrefix + "-err.txt") as f: err = f.readlines() if options.valgrind and runinfo.return_code == VALGRIND_ERROR_EXIT_CODE: issues.append("valgrind reported an error") lev = max(lev, JS_VG_AMISS) valgrindErrorPrefix = "==" + str(runinfo.pid) + "==" for line in err: if valgrindErrorPrefix and line.startswith( valgrindErrorPrefix): issues.append(line.rstrip()) elif runinfo.sta == timed_run.CRASHED: if sps.grabCrashLog(runthis[0], runinfo.pid, logPrefix, True): with open(logPrefix + "-crash.txt") as f: auxCrashData = [line.strip() for line in f.readlines()] elif detect_malloc_errors.amiss(logPrefix): issues.append("malloc error") lev = max(lev, JS_NEW_ASSERT_OR_CRASH) elif runinfo.return_code == 0 and not inCompareJIT: # We might have(??) run jsfunfuzz directly, so check for special kinds of bugs for line in out: if line.startswith("Found a bug: ") and not ("NestTest" in line and oomed(err)): lev = JS_DECIDED_TO_EXIT issues.append(line.rstrip()) if options.shellIsDeterministic and not understoodJsfunfuzzExit( out, err) and not oomed(err): issues.append("jsfunfuzz didn't finish") lev = JS_DID_NOT_FINISH # Copy non-crash issues to where FuzzManager's "AssertionHelper.py" can see it. if lev != JS_FINE: for issue in issues: err.append("[Non-crash bug] " + issue) # Finally, make a CrashInfo object and parse stack traces for asan/crash/assertion bugs crashInfo = CrashInfo.CrashInfo.fromRawCrashData( out, err, pc, auxCrashData=auxCrashData) createCollector.printCrashInfo(crashInfo) # We only care about crashes and assertion failures on shells with no symbols # Note that looking out for the Assertion failure message is highly SpiderMonkey-specific if not isinstance(crashInfo, CrashInfo.NoCrashInfo) or \ 'Assertion failure: ' in str(crashInfo.rawStderr) or \ 'Segmentation fault' in str(crashInfo.rawStderr) or \ 'Bus error' in str(crashInfo.rawStderr): lev = max(lev, JS_NEW_ASSERT_OR_CRASH) match = options.collector.search(crashInfo) if match[0] is not None: createCollector.printMatchingSignature(match) lev = JS_FINE print("%s | %s" % (logPrefix, summaryString(issues, lev, runinfo.elapsedtime))) if lev != JS_FINE: fileManipulation.writeLinesToFile([ 'Number: ' + logPrefix + '\n', 'Command: ' + sps.shellify(runthis) + '\n' ] + ['Status: ' + i + "\n" for i in issues], logPrefix + '-summary.txt') self.lev = lev self.out = out self.err = err self.issues = issues self.crashInfo = crashInfo self.match = match self.runinfo = runinfo self.return_code = runinfo.return_code
def scan_crashes(base_dir, cmdline_path=None): ''' Scan the base directory for crash tests and submit them to FuzzManager. @type base_dir: String @param base_dir: AFL base directory @type cmdline_path: String @param cmdline_path: Optional command line file to use instead of the one found inside the base directory. @rtype: int @return: Non-zero return code on failure ''' crash_dir = os.path.join(base_dir, "crashes") crash_files = [] for crash_file in os.listdir(crash_dir): # Ignore all files that aren't crash results if not crash_file.startswith("id:"): continue crash_file = os.path.join(crash_dir, crash_file) # Ignore our own status files if crash_file.endswith(".submitted") or crash_file.endswith(".failed"): continue # Ignore files we already processed if os.path.exists(crash_file + ".submitted") or os.path.exists(crash_file + ".failed"): continue crash_files.append(crash_file) if crash_files: # First try to read necessary information for reproducing crashes cmdline = [] test_idx = None if not cmdline_path: cmdline_path = os.path.join(base_dir, "cmdline") with open(cmdline_path, 'r') as cmdline_file: idx = 0 for line in cmdline_file: if '@@' in line: test_idx = idx cmdline.append(line.rstrip('\n')) idx += 1 if test_idx != None: orig_test_arg = cmdline[test_idx] print(cmdline) configuration = ProgramConfiguration.fromBinary(cmdline[0]) if not configuration: print("Error: Creating program configuration from binary failed. Check your binary configuration file.", file=sys.stderr) return 2 collector = Collector() for crash_file in crash_files: stdin = None if test_idx != None: cmdline[test_idx] = orig_test_arg.replace('@@', crash_file) else: with open(crash_file, 'r') as crash_fd: stdin = crash_fd.read() runner = AutoRunner.fromBinaryArgs(cmdline[0], cmdline[1:], stdin=stdin) if runner.run(): crash_info = runner.getCrashInfo(configuration) collector.submit(crash_info, crash_file) open(crash_file + ".submitted", 'a').close() else: open(crash_file + ".failed", 'a').close() print("Error: Failed to reproduce the given crash, cannot submit.", file=sys.stderr)