def add_fault(self): # Setup FuzzManager with target information and platform data. program_configuration = ProgramConfiguration.fromBinary(self.binary) # Prepare FuzzManager with crash information. stdout = "N/A" # Todo: There is no plain stdout logger yet. stderr = "N/A" # Todo: There is no plain stderr logger yet. auxdat = self.bucket.get("crashlog", "N/A").get("data", "N/A") metaData = None testcase = self.save_bucket_as_zip(self.bucket) crash_info = CrashInfo.fromRawCrashData(stdout, stderr, program_configuration, auxdat) # Submit crash report with testcase to FuzzManager. collector = Collector(tool="dharma") collector.submit(crash_info, testcase, metaData)
def OnFault(self, run, test, variationCount, monitorData, actionValues): # Setup FuzzManager with information about target and platform data. program_configuration = ProgramConfiguration.fromBinary(self.target_binary) # Prepare FuzzManager with target and crash information. stdout = self._get_value_by_key(monitorData, "stdout.txt", "N/A") stderr = self._get_value_by_key(monitorData, "stderr.txt", "N/A") auxdat = self._get_value_by_key(monitorData, "auxdat.txt", "N/A") crash_info = CrashInfo.fromRawCrashData(stdout, stderr, program_configuration, auxdat) collector = Collector(tool="peach") # Write testcase content and any additional meta information to a temporary ZIP archive. buffer = StringIO.StringIO() zip_buffer = zipfile.ZipFile(buffer, 'w') # Collect |actionValues| crash information from Peach. for i in range(len(actionValues)): if len(actionValues[i]) > 2: data = actionValues[i][2] fileName = "data_%d_%s_%s.txt" % (i, actionValues[i][1], actionValues[i][0]) zip_buffer.writestr(fileName, data) if len(actionValues[i]) > 3 and actionValues[i][1] != 'output': data = repr(actionValues[i][3]) fileName = "data_%d_%s_%s_action.txt" % (i, actionValues[i][1], actionValues[i][0]) zip_buffer.writestr(fileName, data) if len(actionValues[i]) > 3 and actionValues[i][1] == 'output': fileName = "data_%d_%s_%s_fileName.txt" % (i, actionValues[i][1], actionValues[i][0]) data = actionValues[i][3] zip_buffer.writestr(fileName, data) # Collect |monitorData| crash information from Peach. for k, v in monitorData.items(): zip_buffer.writestr(k, v) zip_buffer.close() with tempfile.NamedTemporaryFile(delete=False, suffix='.zip') as testcase: buffer.seek(0) testcase.write(buffer.getvalue()) testcase.close() # Submit crash report with testcase to FuzzManager. collector.submit(crash_info, testcase.name, metaData=None)
def runTest(self): config = ProgramConfiguration("test", "x86-64", "linux") crashSignature1 = '{ "symptoms" : [ { "type" : "instruction", "registerNames" : ["r14"] } ] }' crashSignature1Neg = '{ "symptoms" : [ { "type" : "instruction", "registerNames" : ["r14", "rax"] } ] }' crashSignature2 = '{ "symptoms" : [ { "type" : "instruction", "instructionName" : "mov" } ] }' crashSignature2Neg = '{ "symptoms" : [ { "type" : "instruction", "instructionName" : "cmp" } ] }' crashSignature3 = '{ "symptoms" : [ { "type" : "instruction", "instructionName" : "mov", "registerNames" : ["r14", "rbx"] } ] }' crashSignature3Neg = '{ "symptoms" : [ { "type" : "instruction", "instructionName" : "mov", "registerNames" : ["r14", "rax"] } ] }' instructionSig1 = CrashSignature(crashSignature1) instructionSig1Neg = CrashSignature(crashSignature1Neg) instructionSig2 = CrashSignature(crashSignature2) instructionSig2Neg = CrashSignature(crashSignature2Neg) instructionSig3 = CrashSignature(crashSignature3) instructionSig3Neg = CrashSignature(crashSignature3Neg) crashInfo2 = CrashInfo.fromRawCrashData([], [], config, auxCrashData=gdbSampleTrace2.splitlines()) crashInfo3 = CrashInfo.fromRawCrashData([], [], config, auxCrashData=gdbSampleTrace3.splitlines()) self.assertIsInstance(crashInfo2, GDBCrashInfo) self.assertIsInstance(crashInfo3, GDBCrashInfo) self.assert_(instructionSig1.matches(crashInfo2)) self.assertFalse(instructionSig1Neg.matches(crashInfo2)) self.assert_(instructionSig2.matches(crashInfo2)) self.assertFalse(instructionSig2Neg.matches(crashInfo2)) self.assert_(instructionSig3.matches(crashInfo2)) self.assertFalse(instructionSig3Neg.matches(crashInfo2)) # Crash info3 doesn't have register information, ensure we don't match any self.assertFalse(instructionSig1.matches(crashInfo3)) self.assertFalse(instructionSig2.matches(crashInfo3)) self.assertFalse(instructionSig3.matches(crashInfo3))
def runTest(self): config = ProgramConfiguration("test", "x86-64", "linux") crashSignature1 = '{ "symptoms" : [ { "type" : "output", "value" : "test" } ] }' crashSignature1Neg = '{ "symptoms" : [ { "type" : "output", "src" : "stderr", "value" : "test" } ] }' crashSignature2 = '{ "symptoms" : [ { "type" : "output", "src" : "stderr", "value" : { "value" : "^fest$", "matchType" : "pcre" } } ] }' outputSignature1 = CrashSignature(crashSignature1) outputSignature1Neg = CrashSignature(crashSignature1Neg) outputSignature2 = CrashSignature(crashSignature2) gdbOutput = [] stdout = [] stderr = [] stdout.append("Foo") stdout.append("Bartester") stdout.append("Baz") stderr.append("hackfest") crashInfo = CrashInfo.fromRawCrashData(stdout, stderr, config, auxCrashData=gdbOutput) self.assertIsInstance(crashInfo, NoCrashInfo) # Ensure we match on stdout/err if nothing is specified self.assert_(outputSignature1.matches(crashInfo)) # Don't match stdout if stderr is specified self.assertFalse(outputSignature1Neg.matches(crashInfo)) # Check that we're really using PCRE self.assertFalse(outputSignature2.matches(crashInfo)) # Add something the PCRE should match, then retry stderr.append("fest") crashInfo = CrashInfo.fromRawCrashData(stdout, stderr, config, auxCrashData=gdbOutput) self.assert_(outputSignature2.matches(crashInfo))
def test_SignatureGenerationTSanRaceTestAtomic(): config = ProgramConfiguration("test", "x86-64", "linux") for fn in ['tsan-report-atomic.txt', 'tsan-report-atomic-swapped.txt']: with open(os.path.join(CWD, 'resources', fn), 'r') as f: crashInfo = CrashInfo.fromRawCrashData( [], [], config, auxCrashData=f.read().splitlines()) assert (crashInfo.backtrace[0] == "pthread_mutex_destroy") assert ( crashInfo.createShortSignature() == "ThreadSanitizer: data race [@ pthread_mutex_destroy] vs. [@ pthread_mutex_unlock]" ) testSignature = crashInfo.createCrashSignature() assert testSignature.matches(crashInfo) outputSymptoms = [] for symptom in testSignature.symptoms: if isinstance(symptom, OutputSymptom): assert symptom.src == "crashdata" outputSymptoms.append(symptom) assert len(outputSymptoms) == 3 for stringMatchVal in [ "WARNING: ThreadSanitizer: data race", "(Previous )?[Aa]tomic [Rr]ead of size 1 at 0x[0-9a-fA-F]+ by thread T[0-9]+( .+mutexes: .+)?:", "(Previous )?[Ww]rite of size 1 at 0x[0-9a-fA-F]+ by main thread( .+mutexes: .+)?:" ]: found = False for symptom in outputSymptoms: if symptom.output.value == stringMatchVal: found = True assert found, "Couldn't find OutputSymptom with value '%s'" % stringMatchVal
def test_collector_generate_search(tmpdir): '''Test sigcache generation and search''' # create a cache dir cache_dir = tmpdir.mkdir('sigcache').strpath # create a collector collector = Collector(sigCacheDir=cache_dir) # generate a signature from the crash data config = ProgramConfiguration('mozilla-central', 'x86-64', 'linux', version='ba0bc4f26681') crashInfo = CrashInfo.fromRawCrashData([], asanTraceCrash.splitlines(), config) sig = collector.generate(crashInfo, False, False, 8) assert {f.strpath for f in tmpdir.join('sigcache').listdir()} == {sig} # search the sigcache and see that it matches the original sigMatch, meta = collector.search(crashInfo) assert sigMatch == sig assert meta is None # write metadata and make sure that's returned if it exists sigBase, _ = os.path.splitext(sig) with open(sigBase + '.metadata', 'w') as f: f.write('{}') sigMatch, meta = collector.search(crashInfo) assert sigMatch == sig assert meta == {} # make sure another crash doesn't match crashInfo = CrashInfo.fromRawCrashData([], [], config) sigMatch, meta = collector.search(crashInfo) assert sigMatch is None assert meta is None # returns None if sig generation fails result = collector.generate(crashInfo, True, True, 8) assert result is None
def runTest(self): config = ProgramConfiguration("test", "x86-64", "linux") crashInfoPos = CrashInfo.fromRawCrashData( [], [], config, auxCrashData=testTraceWithAuxMessage.splitlines()) crashInfoNeg = CrashInfo.fromRawCrashData( [], [], config, auxCrashData=testTraceWithAuxAndAbortMessage.splitlines()) crashSignaturePos = crashInfoPos.createCrashSignature() crashSignatureNeg = crashInfoNeg.createCrashSignature() # Check that the first crash signature has ASan symptoms but # the second does not because it has a program abort message self.assertIn("/ERROR: AddressSanitizer", str(crashSignaturePos)) self.assertIn("/READ of size", str(crashSignaturePos)) self.assertNotIn("/ERROR: AddressSanitizer", str(crashSignatureNeg)) self.assertNotIn("/READ of size", str(crashSignatureNeg)) # Check matches appropriately self.assertTrue(crashSignaturePos.matches(crashInfoPos)) self.assertTrue(crashSignaturePos.matches(crashInfoNeg)) self.assertFalse(crashSignatureNeg.matches(crashInfoPos)) self.assertTrue(crashSignatureNeg.matches(crashInfoNeg))
def runTest(self): config = ProgramConfiguration("test", "x86-64", "linux") crashSignature1 = '{ "symptoms" : [ { "type" : "stackSize", "size" : 8 } ] }' crashSignature1Neg = '{ "symptoms" : [ { "type" : "stackSize", "size" : 9 } ] }' crashSignature2 = '{ "symptoms" : [ { "type" : "stackSize", "size" : "< 10" } ] }' crashSignature2Neg = '{ "symptoms" : [ { "type" : "stackSize", "size" : "> 10" } ] }' stackSizeSig1 = CrashSignature(crashSignature1) stackSizeSig1Neg = CrashSignature(crashSignature1Neg) stackSizeSig2 = CrashSignature(crashSignature2) stackSizeSig2Neg = CrashSignature(crashSignature2Neg) crashInfo1 = CrashInfo.fromRawCrashData([], [], config, auxCrashData=gdbSampleTrace1.splitlines()) self.assertIsInstance(crashInfo1, GDBCrashInfo) self.assert_(stackSizeSig1.matches(crashInfo1)) self.assertFalse(stackSizeSig1Neg.matches(crashInfo1)) self.assert_(stackSizeSig2.matches(crashInfo1)) self.assertFalse(stackSizeSig2Neg.matches(crashInfo1))
def test_SignatureStackFramesAuxMessagesTest(): config = ProgramConfiguration("test", "x86-64", "linux") crashInfoPos = CrashInfo.fromRawCrashData( [], [], config, auxCrashData=testTraceWithAuxMessage.splitlines()) crashInfoNeg = CrashInfo.fromRawCrashData( [], [], config, auxCrashData=testTraceWithAuxAndAbortMessage.splitlines()) crashSignaturePos = crashInfoPos.createCrashSignature() crashSignatureNeg = crashInfoNeg.createCrashSignature() # Check that the first crash signature has ASan symptoms but # the second does not because it has a program abort message assert "/ERROR: AddressSanitizer" in str(crashSignaturePos) assert "/READ of size" in str(crashSignaturePos) assert "/ERROR: AddressSanitizer" not in str(crashSignatureNeg) assert "/READ of size" not in str(crashSignatureNeg) # Check matches appropriately assert crashSignaturePos.matches(crashInfoPos) assert crashSignaturePos.matches(crashInfoNeg) assert not crashSignatureNeg.matches(crashInfoPos) assert crashSignatureNeg.matches(crashInfoNeg)
def runTest(self): config = ProgramConfiguration("test", "x86-64", "linux") crashSignature1 = '{ "symptoms" : [ { "type" : "stackFrame", "functionName" : "internalAppend" } ] }' crashSignature1Neg = '{ "symptoms" : [ { "type" : "stackFrame", "functionName" : "foobar" } ] }' crashSignature2 = '{ "symptoms" : [ { "type" : "stackFrame", "functionName" : "js::ion::MBasicBlock::setBackedge", "frameNumber" : "<= 4" } ] }' crashSignature2Neg = '{ "symptoms" : [ { "type" : "stackFrame", "functionName" : "js::ion::MBasicBlock::setBackedge", "frameNumber" : "> 4" } ] }' stackFrameSig1 = CrashSignature(crashSignature1) stackFrameSig1Neg = CrashSignature(crashSignature1Neg) stackFrameSig2 = CrashSignature(crashSignature2) stackFrameSig2Neg = CrashSignature(crashSignature2Neg) crashInfo1 = CrashInfo.fromRawCrashData([], [], config, auxCrashData=gdbSampleTrace1.splitlines()) self.assertIsInstance(crashInfo1, GDBCrashInfo) self.assert_(stackFrameSig1.matches(crashInfo1)) self.assertFalse(stackFrameSig1Neg.matches(crashInfo1)) self.assert_(stackFrameSig2.matches(crashInfo1)) self.assertFalse(stackFrameSig2Neg.matches(crashInfo1))
def scan_crashes(base_dir, cmdline_path=None): ''' Scan the base directory for crash tests and submit them to FuzzManager. @type base_dir: String @param base_dir: AFL base directory @type cmdline_path: String @param cmdline_path: Optional command line file to use instead of the one found inside the base directory. @rtype: int @return: Non-zero return code on failure ''' crash_dir = os.path.join(base_dir, "crashes") crash_files = [] for crash_file in os.listdir(crash_dir): # Ignore all files that aren't crash results if not crash_file.startswith("id:"): continue crash_file = os.path.join(crash_dir, crash_file) # Ignore our own status files if crash_file.endswith(".submitted") or crash_file.endswith(".failed"): continue # Ignore files we already processed if os.path.exists(crash_file + ".submitted") or os.path.exists(crash_file + ".failed"): continue crash_files.append(crash_file) if crash_files: # First try to read necessary information for reproducing crashes cmdline = [] test_idx = None if not cmdline_path: cmdline_path = os.path.join(base_dir, "cmdline") with open(cmdline_path, 'r') as cmdline_file: idx = 0 for line in cmdline_file: if '@@' in line: test_idx = idx cmdline.append(line.rstrip('\n')) idx += 1 if test_idx != None: orig_test_arg = cmdline[test_idx] print(cmdline) configuration = ProgramConfiguration.fromBinary(cmdline[0]) if not configuration: print( "Error: Creating program configuration from binary failed. Check your binary configuration file.", file=sys.stderr) return 2 collector = Collector() for crash_file in crash_files: stdin = None if test_idx != None: cmdline[test_idx] = orig_test_arg.replace('@@', crash_file) else: with open(crash_file, 'r') as crash_fd: stdin = crash_fd.read() runner = AutoRunner.fromBinaryArgs(cmdline[0], cmdline[1:], stdin=stdin) if runner.run(): crash_info = runner.getCrashInfo(configuration) collector.submit(crash_info, crash_file) open(crash_file + ".submitted", 'a').close() else: open(crash_file + ".failed", 'a').close() print( "Error: Failed to reproduce the given crash, cannot submit.", file=sys.stderr)
def __init__(self, options, runthis, logPrefix, inCompareJIT): pathToBinary = runthis[0] # This relies on the shell being a local one from compileShell.py: # Ignore trailing ".exe" in Win, also abspath makes it work w/relative paths like './js' pc = ProgramConfiguration.fromBinary( os.path.abspath(pathToBinary).split('.')[0]) pc.addProgramArguments(runthis[1:-1]) if options.valgrind: runthis = (inspectShell.constructVgCmdList( errorCode=VALGRIND_ERROR_EXIT_CODE) + valgrindSuppressions(options.knownPath) + runthis) preexec_fn = ulimitSet if os.name == 'posix' else None runinfo = timed_run.timed_run(runthis, options.timeout, logPrefix, preexec_fn=preexec_fn) lev = JS_FINE issues = [] auxCrashData = [] # FuzzManager expects a list of strings rather than an iterable, so bite the # bullet and 'readlines' everything into memory. with open(logPrefix + "-out.txt") as f: out = f.readlines() with open(logPrefix + "-err.txt") as f: err = f.readlines() if options.valgrind and runinfo.return_code == VALGRIND_ERROR_EXIT_CODE: issues.append("valgrind reported an error") lev = max(lev, JS_VG_AMISS) valgrindErrorPrefix = "==" + str(runinfo.pid) + "==" for line in err: if valgrindErrorPrefix and line.startswith( valgrindErrorPrefix): issues.append(line.rstrip()) elif runinfo.sta == timed_run.CRASHED: if sps.grabCrashLog(runthis[0], runinfo.pid, logPrefix, True): with open(logPrefix + "-crash.txt") as f: auxCrashData = [line.strip() for line in f.readlines()] elif detect_malloc_errors.amiss(logPrefix): issues.append("malloc error") lev = max(lev, JS_NEW_ASSERT_OR_CRASH) elif runinfo.return_code == 0 and not inCompareJIT: # We might have(??) run jsfunfuzz directly, so check for special kinds of bugs for line in out: if line.startswith("Found a bug: ") and not ("NestTest" in line and oomed(err)): lev = JS_DECIDED_TO_EXIT issues.append(line.rstrip()) if options.shellIsDeterministic and not understoodJsfunfuzzExit( out, err) and not oomed(err): issues.append("jsfunfuzz didn't finish") lev = JS_DID_NOT_FINISH # Copy non-crash issues to where FuzzManager's "AssertionHelper.py" can see it. if lev != JS_FINE: for issue in issues: err.append("[Non-crash bug] " + issue) # Finally, make a CrashInfo object and parse stack traces for asan/crash/assertion bugs crashInfo = CrashInfo.CrashInfo.fromRawCrashData( out, err, pc, auxCrashData=auxCrashData) createCollector.printCrashInfo(crashInfo) # We only care about crashes and assertion failures on shells with no symbols # Note that looking out for the Assertion failure message is highly SpiderMonkey-specific if not isinstance(crashInfo, CrashInfo.NoCrashInfo) or \ 'Assertion failure: ' in str(crashInfo.rawStderr) or \ 'Segmentation fault' in str(crashInfo.rawStderr) or \ 'Bus error' in str(crashInfo.rawStderr): lev = max(lev, JS_NEW_ASSERT_OR_CRASH) match = options.collector.search(crashInfo) if match[0] is not None: createCollector.printMatchingSignature(match) lev = JS_FINE print("%s | %s" % (logPrefix, summaryString(issues, lev, runinfo.elapsedtime))) if lev != JS_FINE: fileManipulation.writeLinesToFile([ 'Number: ' + logPrefix + '\n', 'Command: ' + sps.shellify(runthis) + '\n' ] + ['Status: ' + i + "\n" for i in issues], logPrefix + '-summary.txt') self.lev = lev self.out = out self.err = err self.issues = issues self.crashInfo = crashInfo self.match = match self.runinfo = runinfo self.return_code = runinfo.return_code
def test_SignatureMatchWithUnicode(): config = ProgramConfiguration('test', 'x86-64', 'linux') crashInfo = CrashInfo.fromRawCrashData(["(«f => (generator.throw(f))», «undefined»)"], [], config) testSignature = CrashSignature('{"symptoms": [{"src": "stdout", "type": "output", "value": "x"}]}') assert not testSignature.matches(crashInfo)
def main(argv=None): '''Command line options.''' program_name = os.path.basename(sys.argv[0]) if argv is None: argv = sys.argv[1:] # setup argparser parser = argparse.ArgumentParser( usage= '%s --libfuzzer or --aflfuzz [OPTIONS] --cmd <COMMAND AND ARGUMENTS>' % program_name) mainGroup = parser.add_argument_group(title="Main Options", description=None) aflGroup = parser.add_argument_group( title="AFL Options", description="Use these arguments in AFL mode") libfGroup = parser.add_argument_group( title="Libfuzzer Options", description="Use these arguments in Libfuzzer mode") fmGroup = parser.add_argument_group( title="FuzzManager Options", description="Use these to specify FuzzManager parameters") mainGroup.add_argument("--libfuzzer", dest="libfuzzer", action='store_true', help="Enable LibFuzzer mode") mainGroup.add_argument("--aflfuzz", dest="aflfuzz", action='store_true', help="Enable AFL mode") mainGroup.add_argument("--fuzzmanager", dest="fuzzmanager", action='store_true', help="Use FuzzManager to submit crash results") libfGroup.add_argument( '--env', dest='env', nargs='+', type=str, help="List of environment variables in the form 'KEY=VALUE'") libfGroup.add_argument('--cmd', dest='cmd', action='store_true', help="Command with parameters to run") libfGroup.add_argument("--sigdir", dest="sigdir", help="Signature cache directory", metavar="DIR") fmGroup.add_argument( "--fuzzmanager-toolname", dest="fuzzmanager_toolname", help="Override FuzzManager tool name (for submitting crash results)") fmGroup.add_argument("--custom-cmdline-file", dest="custom_cmdline_file", help="Path to custom cmdline file", metavar="FILE") fmGroup.add_argument( "--env-file", dest="env_file", help="Path to a file with additional environment variables", metavar="FILE") fmGroup.add_argument( "--serverhost", help="Server hostname for remote signature management.", metavar="HOST") fmGroup.add_argument("--serverport", dest="serverport", type=int, help="Server port to use", metavar="PORT") fmGroup.add_argument("--serverproto", dest="serverproto", help="Server protocol to use (default is https)", metavar="PROTO") fmGroup.add_argument( "--serverauthtokenfile", dest="serverauthtokenfile", help="File containing the server authentication token", metavar="FILE") fmGroup.add_argument("--clientid", dest="clientid", help="Client ID to use when submitting issues", metavar="ID") fmGroup.add_argument("--platform", dest="platform", help="Platform this crash appeared on", metavar="(x86|x86-64|arm)") fmGroup.add_argument("--product", dest="product", help="Product this crash appeared on", metavar="PRODUCT") fmGroup.add_argument("--productversion", dest="product_version", help="Product version this crash appeared on", metavar="VERSION") fmGroup.add_argument("--os", dest="os", help="OS this crash appeared on", metavar="(windows|linux|macosx|b2g|android)") fmGroup.add_argument("--tool", dest="tool", help="Name of the tool that found this issue", metavar="NAME") fmGroup.add_argument( '--metadata', dest='metadata', nargs='+', type=str, help="List of metadata variables in the form 'KEY=VALUE'") aflGroup.add_argument("--s3-queue-upload", dest="s3_queue_upload", action='store_true', help="Use S3 to synchronize queues") aflGroup.add_argument( "--s3-queue-cleanup", dest="s3_queue_cleanup", action='store_true', help="Cleanup S3 queue entries older than specified refresh interval") aflGroup.add_argument("--s3-queue-status", dest="s3_queue_status", action='store_true', help="Display S3 queue status") aflGroup.add_argument( "--s3-build-download", dest="s3_build_download", help="Use S3 to download the build for the specified project", metavar="DIR") aflGroup.add_argument( "--s3-build-upload", dest="s3_build_upload", help="Use S3 to upload a new build for the specified project", metavar="FILE") aflGroup.add_argument( "--s3-corpus-download", dest="s3_corpus_download", help="Use S3 to download the test corpus for the specified project", metavar="DIR") aflGroup.add_argument( "--s3-corpus-download-size", dest="s3_corpus_download_size", help="When downloading the corpus, select only SIZE files randomly", metavar="SIZE") aflGroup.add_argument( "--s3-corpus-upload", dest="s3_corpus_upload", help="Use S3 to upload a test corpus for the specified project", metavar="DIR") aflGroup.add_argument( "--s3-corpus-replace", dest="s3_corpus_replace", action='store_true', help= "In conjunction with --s3-corpus-upload, deletes all other remote test files" ) aflGroup.add_argument( "--s3-corpus-refresh", dest="s3_corpus_refresh", help= "Download queues and corpus from S3, combine and minimize, then re-upload.", metavar="DIR") aflGroup.add_argument("--s3-corpus-status", dest="s3_corpus_status", action='store_true', help="Display S3 corpus status") aflGroup.add_argument( "--test-file", dest="test_file", help="Optional path to copy the test file to before reproducing", metavar="FILE") aflGroup.add_argument( "--afl-timeout", dest="afl_timeout", type=int, default=1000, help="Timeout per test to pass to AFL for corpus refreshing", metavar="MSECS") aflGroup.add_argument( "--firefox", dest="firefox", action='store_true', help="Test Program is Firefox (requires FFPuppet installed)") aflGroup.add_argument("--firefox-prefs", dest="firefox_prefs", help="Path to prefs.js file for Firefox", metavar="FILE") aflGroup.add_argument("--firefox-extensions", nargs='+', type=str, dest="firefox_extensions", help="Path extension file for Firefox", metavar="FILE") aflGroup.add_argument("--firefox-testpath", dest="firefox_testpath", help="Path to file to open with Firefox", metavar="FILE") aflGroup.add_argument( "--firefox-start-afl", dest="firefox_start_afl", metavar="FILE", help= "Start AFL with the given Firefox binary, remaining arguments being passed to AFL" ) aflGroup.add_argument( "--s3-refresh-interval", dest="s3_refresh_interval", type=int, default=86400, help="How often the s3 corpus is refreshed (affects queue cleaning)", metavar="SECS") aflGroup.add_argument("--afl-output-dir", dest="afloutdir", help="Path to the AFL output directory to manage", metavar="DIR") aflGroup.add_argument("--afl-binary-dir", dest="aflbindir", help="Path to the AFL binary directory to use", metavar="DIR") aflGroup.add_argument( "--afl-stats", dest="aflstats", help="Collect aggregated statistics while scanning output directories", metavar="FILE") aflGroup.add_argument("--s3-bucket", dest="s3_bucket", help="Name of the S3 bucket to use", metavar="NAME") aflGroup.add_argument( "--project", dest="project", help="Name of the subfolder/project inside the S3 bucket", metavar="NAME") aflGroup.add_argument('rargs', nargs=argparse.REMAINDER) if len(argv) == 0: parser.print_help() return 2 opts = parser.parse_args(argv) if not opts.libfuzzer and not opts.aflfuzz: opts.aflfuzz = True if opts.cmd and opts.aflfuzz: if not opts.firefox: print( "Error: Use --cmd either with libfuzzer or with afl in firefox mode", file=sys.sdderr) return 2 if opts.libfuzzer: if not opts.rargs: print("Error: No arguments specified", file=sys.stderr) return 2 binary = opts.rargs[0] if not os.path.exists(binary): print("Error: Specified binary does not exist: %s" % binary, file=sys.stderr) return 2 configuration = ProgramConfiguration.fromBinary(binary) if configuration == None: print( "Error: Failed to load program configuration based on binary", file=sys.stderr) return 2 env = {} if opts.env: env = dict(kv.split('=', 1) for kv in opts.env) configuration.addEnvironmentVariables(env) # Copy the system environment variables by default and overwrite them # if they are specified through env. env = dict(os.environ) if opts.env: oenv = dict(kv.split('=', 1) for kv in opts.env) configuration.addEnvironmentVariables(oenv) for envkey in oenv: env[envkey] = oenv[envkey] args = opts.rargs[1:] if args: configuration.addProgramArguments(args) metadata = {} if opts.metadata: metadata.update(dict(kv.split('=', 1) for kv in opts.metadata)) configuration.addMetadata(metadata) # Set LD_LIBRARY_PATH for convenience if not 'LD_LIBRARY_PATH' in env: env['LD_LIBRARY_PATH'] = os.path.dirname(binary) collector = Collector(opts.sigdir, opts.fuzzmanager_toolname) signature_repeat_count = 0 last_signature = None while (True): process = subprocess.Popen( opts.rargs, # stdout=None, stderr=subprocess.PIPE, env=env, universal_newlines=True) monitor = LibFuzzerMonitor(process.stderr) monitor.start() monitor.join() print("Process terminated, processing results...", file=sys.stderr) trace = monitor.getASanTrace() testcase = monitor.getTestcase() crashInfo = CrashInfo.fromRawCrashData([], [], configuration, auxCrashData=trace) (sigfile, metadata) = collector.search(crashInfo) if sigfile != None: if last_signature == sigfile: signature_repeat_count += 1 else: last_signature = sigfile signature_repeat_count = 0 print("Crash matches signature %s, not submitting..." % sigfile, file=sys.stderr) else: collector.generate(crashInfo, forceCrashAddress=True, forceCrashInstruction=False, numFrames=8) collector.submit(crashInfo, testcase) print("Successfully submitted crash.", file=sys.stderr) if signature_repeat_count >= 10: print("Too many crashes with the same signature, exiting...", file=sys.stderr) break if opts.aflfuzz: if opts.firefox or opts.firefox_start_afl: if not haveFFPuppet: print( "Error: --firefox and --firefox-start-afl require FFPuppet to be installed", file=sys.stderr) return 2 if opts.custom_cmdline_file: print( "Error: --custom-cmdline-file is incompatible with firefox options", file=sys.stderr) return 2 if not opts.firefox_prefs or not opts.firefox_testpath: print( "Error: --firefox and --firefox-start-afl require --firefox-prefs and --firefox-testpath to be specified", file=sys.stderr) return 2 if opts.firefox_start_afl: if not opts.aflbindir: print( "Error: Must specify --afl-binary-dir for starting AFL with firefox", file=sys.stderr) return 2 (ffp, cmd, env) = setup_firefox(opts.firefox_start_afl, opts.firefox_prefs, opts.firefox_extensions, opts.firefox_testpath) afl_cmd = [os.path.join(opts.aflbindir, "afl-fuzz")] opts.rargs.remove("--") afl_cmd.extend(opts.rargs) afl_cmd.extend(cmd) try: subprocess.call(afl_cmd, env=env) except: traceback.print_exc() ffp.clean_up() return 0 afl_out_dirs = [] if opts.afloutdir: if not os.path.exists(os.path.join(opts.afloutdir, "crashes")): # The specified directory doesn't have a "crashes" sub directory. # Either the wrong directory was specified, or this is an AFL multi-process # sychronization directory. Try to figure this out here. sync_dirs = os.listdir(opts.afloutdir) for sync_dir in sync_dirs: if os.path.exists( os.path.join(opts.afloutdir, sync_dir, "crashes")): afl_out_dirs.append( os.path.join(opts.afloutdir, sync_dir)) if not afl_out_dirs: print( "Error: Directory %s does not appear to be a valid AFL output/sync directory" % opts.afloutdir, file=sys.stderr) return 2 else: afl_out_dirs.append(opts.afloutdir) # Upload and FuzzManager modes require specifying the AFL directory if opts.s3_queue_upload or opts.fuzzmanager: if not opts.afloutdir: print( "Error: Must specify AFL output directory using --afl-output-dir", file=sys.stderr) return 2 if (opts.s3_queue_upload or opts.s3_corpus_refresh or opts.s3_build_download or opts.s3_build_upload or opts.s3_corpus_download or opts.s3_corpus_upload or opts.s3_queue_status): if not opts.s3_bucket or not opts.project: print( "Error: Must specify both --s3-bucket and --project for S3 actions", file=sys.stderr) return 2 if opts.s3_queue_status: status_data = get_queue_status(opts.s3_bucket, opts.project) total_queue_files = 0 for queue_name in status_data: print("Queue %s: %s" % (queue_name, status_data[queue_name])) total_queue_files += status_data[queue_name] print("Total queue files: %s" % total_queue_files) return 0 if opts.s3_corpus_status: status_data = get_corpus_status(opts.s3_bucket, opts.project) total_corpus_files = 0 for (status_dt, status_cnt) in sorted(status_data.items()): print("Added %s: %s" % (status_dt, status_cnt)) total_corpus_files += status_cnt print("Total corpus files: %s" % total_corpus_files) return 0 if opts.s3_queue_cleanup: clean_queue_dirs(opts.s3_corpus_refresh, opts.s3_bucket, opts.project, opts.s3_refresh_interval) return 0 if opts.s3_build_download: download_build(opts.s3_build_download, opts.s3_bucket, opts.project) return 0 if opts.s3_build_upload: upload_build(opts.s3_build_upload, opts.s3_bucket, opts.project) return 0 if opts.s3_corpus_download: if opts.s3_corpus_download_size != None: opts.s3_corpus_download_size = int( opts.s3_corpus_download_size) download_corpus(opts.s3_corpus_download, opts.s3_bucket, opts.project, opts.s3_corpus_download_size) return 0 if opts.s3_corpus_upload: upload_corpus(opts.s3_corpus_upload, opts.s3_bucket, opts.project, opts.s3_corpus_replace) return 0 if opts.s3_corpus_refresh: if not opts.aflbindir: print( "Error: Must specify --afl-binary-dir for refreshing the test corpus", file=sys.stderr) return 2 if not os.path.exists(opts.s3_corpus_refresh): os.makedirs(opts.s3_corpus_refresh) queues_dir = os.path.join(opts.s3_corpus_refresh, "queues") print("Cleaning old AFL queues from s3://%s/%s/queues/" % (opts.s3_bucket, opts.project)) clean_queue_dirs(opts.s3_corpus_refresh, opts.s3_bucket, opts.project, opts.s3_refresh_interval) print("Downloading AFL queues from s3://%s/%s/queues/ to %s" % (opts.s3_bucket, opts.project, queues_dir)) download_queue_dirs(opts.s3_corpus_refresh, opts.s3_bucket, opts.project) cmdline_file = os.path.join(opts.s3_corpus_refresh, "cmdline") if not os.path.exists(cmdline_file): print( "Error: Failed to download a cmdline file from queue directories.", file=sys.stderr) return 2 print("Downloading build") download_build(os.path.join(opts.s3_corpus_refresh, "build"), opts.s3_bucket, opts.project) with open(os.path.join(opts.s3_corpus_refresh, "cmdline"), 'r') as cmdline_file: cmdline = cmdline_file.read().splitlines() # Assume cmdline[0] is the name of the binary binary_name = os.path.basename(cmdline[0]) # Try locating our binary in the build we just unpacked binary_search_result = [ os.path.join(dirpath, filename) for dirpath, dirnames, filenames in os.walk( os.path.join(opts.s3_corpus_refresh, "build")) for filename in filenames if (filename == binary_name and ( stat.S_IXUSR & os.stat(os.path.join(dirpath, filename))[stat.ST_MODE])) ] if not binary_search_result: print("Error: Failed to locate binary %s in unpacked build." % binary_name, file=sys.stderr) return 2 if len(binary_search_result) > 1: print("Error: Binary name %s is ambiguous in unpacked build." % binary_name, file=sys.stderr) return 2 cmdline[0] = binary_search_result[0] # Download our current corpus into the queues directory as well print("Downloading corpus from s3://%s/%s/corpus/ to %s" % (opts.s3_bucket, opts.project, queues_dir)) download_corpus(queues_dir, opts.s3_bucket, opts.project) # Ensure the directory for our new tests is empty updated_tests_dir = os.path.join(opts.s3_corpus_refresh, "tests") if os.path.exists(updated_tests_dir): shutil.rmtree(updated_tests_dir) os.mkdir(updated_tests_dir) # Run afl-cmin afl_cmin = os.path.join(opts.aflbindir, "afl-cmin") if not os.path.exists(afl_cmin): print("Error: Unable to locate afl-cmin binary.", file=sys.stderr) return 2 if opts.firefox: (ffpInst, ffCmd, ffEnv) = setup_firefox(cmdline[0], opts.firefox_prefs, opts.firefox_extensions, opts.firefox_testpath) cmdline = ffCmd afl_cmdline = [ afl_cmin, '-e', '-i', queues_dir, '-o', updated_tests_dir, '-t', str(opts.afl_timeout), '-m', 'none' ] if opts.test_file: afl_cmdline.extend(['-f', opts.test_file]) afl_cmdline.extend(cmdline) print("Running afl-cmin") with open(os.devnull, 'w') as devnull: env = os.environ.copy() env['LD_LIBRARY_PATH'] = os.path.dirname(cmdline[0]) if opts.firefox: env.update(ffEnv) subprocess.check_call(afl_cmdline, stdout=devnull, env=env) if opts.firefox: ffpInst.clean_up() # replace existing corpus with reduced corpus print("Uploading reduced corpus to s3://%s/%s/corpus/" % (opts.s3_bucket, opts.project)) upload_corpus(updated_tests_dir, opts.s3_bucket, opts.project, corpus_delete=True) # Prune the queues directory once we successfully uploaded the new # test corpus, but leave everything that's part of our new corpus # so we don't have to download those files again. test_files = [ file for file in os.listdir(updated_tests_dir) if os.path.isfile(os.path.join(updated_tests_dir, file)) ] obsolete_queue_files = [ file for file in os.listdir(queues_dir) if os.path.isfile(os.path.join(queues_dir, file)) and file not in test_files ] for file in obsolete_queue_files: os.remove(os.path.join(queues_dir, file)) if opts.fuzzmanager or opts.s3_queue_upload or opts.aflstats: last_queue_upload = 0 while True: if opts.fuzzmanager: for afl_out_dir in afl_out_dirs: scan_crashes(afl_out_dir, opts.custom_cmdline_file, opts.env_file, opts.fuzzmanager_toolname, opts.test_file) # Only upload queue files every 20 minutes if opts.s3_queue_upload and last_queue_upload < int( time.time()) - 1200: for afl_out_dir in afl_out_dirs: upload_queue_dir(afl_out_dir, opts.s3_bucket, opts.project, new_cov_only=True) last_queue_upload = int(time.time()) if opts.aflstats: write_aggregated_stats(afl_out_dirs, opts.aflstats) time.sleep(10)
def main(argv=None): '''Command line options.''' program_name = os.path.basename(sys.argv[0]) if argv is None: argv = sys.argv[1:] # setup argparser parser = argparse.ArgumentParser(usage='%s --libfuzzer or --aflfuzz [OPTIONS] --cmd <COMMAND AND ARGUMENTS>' % program_name) mainGroup = parser.add_argument_group(title="Main Options", description=None) aflGroup = parser.add_argument_group(title="AFL Options", description="Use these arguments in AFL mode") libfGroup = parser.add_argument_group(title="Libfuzzer Options", description="Use these arguments in Libfuzzer mode" ) fmGroup = parser.add_argument_group(title="FuzzManager Options", description="Use these to specify FuzzManager parameters" ) mainGroup.add_argument("--libfuzzer", dest="libfuzzer", action='store_true', help="Enable LibFuzzer mode") mainGroup.add_argument("--aflfuzz", dest="aflfuzz", action='store_true', help="Enable AFL mode") mainGroup.add_argument("--fuzzmanager", dest="fuzzmanager", action='store_true', help="Use FuzzManager to submit crash results") libfGroup.add_argument('--env', dest='env', nargs='+', type=str, help="List of environment variables in the form 'KEY=VALUE'") libfGroup.add_argument('--cmd', dest='cmd', action='store_true', help="Command with parameters to run") libfGroup.add_argument("--sigdir", dest="sigdir", help="Signature cache directory", metavar="DIR") fmGroup.add_argument("--fuzzmanager-toolname", dest="fuzzmanager_toolname", help="Override FuzzManager tool name (for submitting crash results)") fmGroup.add_argument("--custom-cmdline-file", dest="custom_cmdline_file", help="Path to custom cmdline file", metavar="FILE") fmGroup.add_argument("--env-file", dest="env_file", help="Path to a file with additional environment variables", metavar="FILE") fmGroup.add_argument("--serverhost", help="Server hostname for remote signature management.", metavar="HOST") fmGroup.add_argument("--serverport", dest="serverport", type=int, help="Server port to use", metavar="PORT") fmGroup.add_argument("--serverproto", dest="serverproto", help="Server protocol to use (default is https)", metavar="PROTO") fmGroup.add_argument("--serverauthtokenfile", dest="serverauthtokenfile", help="File containing the server authentication token", metavar="FILE") fmGroup.add_argument("--clientid", dest="clientid", help="Client ID to use when submitting issues", metavar="ID") fmGroup.add_argument("--platform", dest="platform", help="Platform this crash appeared on", metavar="(x86|x86-64|arm)") fmGroup.add_argument("--product", dest="product", help="Product this crash appeared on", metavar="PRODUCT") fmGroup.add_argument("--productversion", dest="product_version", help="Product version this crash appeared on", metavar="VERSION") fmGroup.add_argument("--os", dest="os", help="OS this crash appeared on", metavar="(windows|linux|macosx|b2g|android)") fmGroup.add_argument("--tool", dest="tool", help="Name of the tool that found this issue", metavar="NAME") fmGroup.add_argument('--metadata', dest='metadata', nargs='+', type=str, help="List of metadata variables in the form 'KEY=VALUE'") aflGroup.add_argument("--s3-queue-upload", dest="s3_queue_upload", action='store_true', help="Use S3 to synchronize queues") aflGroup.add_argument("--s3-queue-cleanup", dest="s3_queue_cleanup", action='store_true', help="Cleanup S3 queue entries older than specified refresh interval") aflGroup.add_argument("--s3-queue-status", dest="s3_queue_status", action='store_true', help="Display S3 queue status") aflGroup.add_argument("--s3-build-download", dest="s3_build_download", help="Use S3 to download the build for the specified project", metavar="DIR") aflGroup.add_argument("--s3-build-upload", dest="s3_build_upload", help="Use S3 to upload a new build for the specified project", metavar="FILE") aflGroup.add_argument("--s3-corpus-download", dest="s3_corpus_download", help="Use S3 to download the test corpus for the specified project", metavar="DIR") aflGroup.add_argument("--s3-corpus-download-size", dest="s3_corpus_download_size", help="When downloading the corpus, select only SIZE files randomly", metavar="SIZE") aflGroup.add_argument("--s3-corpus-upload", dest="s3_corpus_upload", help="Use S3 to upload a test corpus for the specified project", metavar="DIR") aflGroup.add_argument("--s3-corpus-replace", dest="s3_corpus_replace", action='store_true', help="In conjunction with --s3-corpus-upload, deletes all other remote test files") aflGroup.add_argument("--s3-corpus-refresh", dest="s3_corpus_refresh", help="Download queues and corpus from S3, combine and minimize, then re-upload.", metavar="DIR") aflGroup.add_argument("--s3-corpus-status", dest="s3_corpus_status", action='store_true', help="Display S3 corpus status") aflGroup.add_argument("--test-file", dest="test_file", help="Optional path to copy the test file to before reproducing", metavar="FILE") aflGroup.add_argument("--afl-timeout", dest="afl_timeout", type=int, default=1000, help="Timeout per test to pass to AFL for corpus refreshing", metavar="MSECS") aflGroup.add_argument("--firefox", dest="firefox", action='store_true', help="Test Program is Firefox (requires FFPuppet installed)") aflGroup.add_argument("--firefox-prefs", dest="firefox_prefs", help="Path to prefs.js file for Firefox", metavar="FILE") aflGroup.add_argument("--firefox-extensions", nargs='+', type=str, dest="firefox_extensions", help="Path extension file for Firefox", metavar="FILE") aflGroup.add_argument("--firefox-testpath", dest="firefox_testpath", help="Path to file to open with Firefox", metavar="FILE") aflGroup.add_argument("--firefox-start-afl", dest="firefox_start_afl", metavar="FILE", help="Start AFL with the given Firefox binary, remaining arguments being passed to AFL") aflGroup.add_argument("--s3-refresh-interval", dest="s3_refresh_interval", type=int, default=86400, help="How often the s3 corpus is refreshed (affects queue cleaning)", metavar="SECS") aflGroup.add_argument("--afl-output-dir", dest="afloutdir", help="Path to the AFL output directory to manage", metavar="DIR") aflGroup.add_argument("--afl-binary-dir", dest="aflbindir", help="Path to the AFL binary directory to use", metavar="DIR") aflGroup.add_argument("--afl-stats", dest="aflstats", help="Collect aggregated statistics while scanning output directories", metavar="FILE") aflGroup.add_argument("--s3-bucket", dest="s3_bucket", help="Name of the S3 bucket to use", metavar="NAME") aflGroup.add_argument("--project", dest="project", help="Name of the subfolder/project inside the S3 bucket", metavar="NAME") aflGroup.add_argument('rargs', nargs=argparse.REMAINDER) if not argv: parser.print_help() return 2 opts = parser.parse_args(argv) if not opts.libfuzzer and not opts.aflfuzz: opts.aflfuzz = True if opts.cmd and opts.aflfuzz: if not opts.firefox: print("Error: Use --cmd either with libfuzzer or with afl in firefox mode", file=sys.stderr) return 2 if opts.libfuzzer: if not opts.rargs: print("Error: No arguments specified", file=sys.stderr) return 2 binary = opts.rargs[0] if not os.path.exists(binary): print("Error: Specified binary does not exist: %s" % binary, file=sys.stderr) return 2 configuration = ProgramConfiguration.fromBinary(binary) if configuration is None: print("Error: Failed to load program configuration based on binary", file=sys.stderr) return 2 env = {} if opts.env: env = dict(kv.split('=', 1) for kv in opts.env) configuration.addEnvironmentVariables(env) # Copy the system environment variables by default and overwrite them # if they are specified through env. env = dict(os.environ) if opts.env: oenv = dict(kv.split('=', 1) for kv in opts.env) configuration.addEnvironmentVariables(oenv) for envkey in oenv: env[envkey] = oenv[envkey] args = opts.rargs[1:] if args: configuration.addProgramArguments(args) metadata = {} if opts.metadata: metadata.update(dict(kv.split('=', 1) for kv in opts.metadata)) configuration.addMetadata(metadata) # Set LD_LIBRARY_PATH for convenience if not 'LD_LIBRARY_PATH' in env: env['LD_LIBRARY_PATH'] = os.path.dirname(binary) collector = Collector(opts.sigdir, opts.fuzzmanager_toolname) signature_repeat_count = 0 last_signature = None while True: process = subprocess.Popen( opts.rargs, # stdout=None, stderr=subprocess.PIPE, env=env, universal_newlines=True ) monitor = LibFuzzerMonitor(process.stderr) monitor.start() monitor.join() print("Process terminated, processing results...", file=sys.stderr) trace = monitor.getASanTrace() testcase = monitor.getTestcase() crashInfo = CrashInfo.fromRawCrashData([], [], configuration, auxCrashData=trace) (sigfile, metadata) = collector.search(crashInfo) if sigfile is not None: if last_signature == sigfile: signature_repeat_count += 1 else: last_signature = sigfile signature_repeat_count = 0 print("Crash matches signature %s, not submitting..." % sigfile, file=sys.stderr) else: collector.generate(crashInfo, forceCrashAddress=True, forceCrashInstruction=False, numFrames=8) collector.submit(crashInfo, testcase) print("Successfully submitted crash.", file=sys.stderr) if signature_repeat_count >= 10: print("Too many crashes with the same signature, exiting...", file=sys.stderr) break if opts.aflfuzz: if opts.firefox or opts.firefox_start_afl: if not haveFFPuppet: print("Error: --firefox and --firefox-start-afl require FFPuppet to be installed", file=sys.stderr) return 2 if opts.custom_cmdline_file: print("Error: --custom-cmdline-file is incompatible with firefox options", file=sys.stderr) return 2 if not opts.firefox_prefs or not opts.firefox_testpath: print("Error: --firefox and --firefox-start-afl require --firefox-prefs and --firefox-testpath to be specified", file=sys.stderr) return 2 if opts.firefox_start_afl: if not opts.aflbindir: print("Error: Must specify --afl-binary-dir for starting AFL with firefox", file=sys.stderr) return 2 (ffp, cmd, env) = setup_firefox(opts.firefox_start_afl, opts.firefox_prefs, opts.firefox_extensions, opts.firefox_testpath) afl_cmd = [ os.path.join(opts.aflbindir, "afl-fuzz") ] opts.rargs.remove("--") afl_cmd.extend(opts.rargs) afl_cmd.extend(cmd) try: subprocess.call(afl_cmd, env=env) except: traceback.print_exc() ffp.clean_up() return 0 afl_out_dirs = [] if opts.afloutdir: if not os.path.exists(os.path.join(opts.afloutdir, "crashes")): # The specified directory doesn't have a "crashes" sub directory. # Either the wrong directory was specified, or this is an AFL multi-process # sychronization directory. Try to figure this out here. sync_dirs = os.listdir(opts.afloutdir) for sync_dir in sync_dirs: if os.path.exists(os.path.join(opts.afloutdir, sync_dir, "crashes")): afl_out_dirs.append(os.path.join(opts.afloutdir, sync_dir)) if not afl_out_dirs: print("Error: Directory %s does not appear to be a valid AFL output/sync directory" % opts.afloutdir, file=sys.stderr) return 2 else: afl_out_dirs.append(opts.afloutdir) # Upload and FuzzManager modes require specifying the AFL directory if opts.s3_queue_upload or opts.fuzzmanager: if not opts.afloutdir: print("Error: Must specify AFL output directory using --afl-output-dir", file=sys.stderr) return 2 if (opts.s3_queue_upload or opts.s3_corpus_refresh or opts.s3_build_download or opts.s3_build_upload or opts.s3_corpus_download or opts.s3_corpus_upload or opts.s3_queue_status): if not opts.s3_bucket or not opts.project: print("Error: Must specify both --s3-bucket and --project for S3 actions", file=sys.stderr) return 2 if opts.s3_queue_status: status_data = get_queue_status(opts.s3_bucket, opts.project) total_queue_files = 0 for queue_name in status_data: print("Queue %s: %s" % (queue_name, status_data[queue_name])) total_queue_files += status_data[queue_name] print("Total queue files: %s" % total_queue_files) return 0 if opts.s3_corpus_status: status_data = get_corpus_status(opts.s3_bucket, opts.project) total_corpus_files = 0 for (status_dt, status_cnt) in sorted(status_data.items()): print("Added %s: %s" % (status_dt, status_cnt)) total_corpus_files += status_cnt print("Total corpus files: %s" % total_corpus_files) return 0 if opts.s3_queue_cleanup: clean_queue_dirs(opts.s3_corpus_refresh, opts.s3_bucket, opts.project, opts.s3_refresh_interval) return 0 if opts.s3_build_download: download_build(opts.s3_build_download, opts.s3_bucket, opts.project) return 0 if opts.s3_build_upload: upload_build(opts.s3_build_upload, opts.s3_bucket, opts.project) return 0 if opts.s3_corpus_download: if opts.s3_corpus_download_size is not None: opts.s3_corpus_download_size = int(opts.s3_corpus_download_size) download_corpus(opts.s3_corpus_download, opts.s3_bucket, opts.project, opts.s3_corpus_download_size) return 0 if opts.s3_corpus_upload: upload_corpus(opts.s3_corpus_upload, opts.s3_bucket, opts.project, opts.s3_corpus_replace) return 0 if opts.s3_corpus_refresh: if not opts.aflbindir: print("Error: Must specify --afl-binary-dir for refreshing the test corpus", file=sys.stderr) return 2 if not os.path.exists(opts.s3_corpus_refresh): os.makedirs(opts.s3_corpus_refresh) queues_dir = os.path.join(opts.s3_corpus_refresh, "queues") print("Cleaning old AFL queues from s3://%s/%s/queues/" % (opts.s3_bucket, opts.project)) clean_queue_dirs(opts.s3_corpus_refresh, opts.s3_bucket, opts.project, opts.s3_refresh_interval) print("Downloading AFL queues from s3://%s/%s/queues/ to %s" % (opts.s3_bucket, opts.project, queues_dir)) download_queue_dirs(opts.s3_corpus_refresh, opts.s3_bucket, opts.project) cmdline_file = os.path.join(opts.s3_corpus_refresh, "cmdline") if not os.path.exists(cmdline_file): print("Error: Failed to download a cmdline file from queue directories.", file=sys.stderr) return 2 print("Downloading build") download_build(os.path.join(opts.s3_corpus_refresh, "build"), opts.s3_bucket, opts.project) with open(os.path.join(opts.s3_corpus_refresh, "cmdline"), 'r') as cmdline_file: cmdline = cmdline_file.read().splitlines() # Assume cmdline[0] is the name of the binary binary_name = os.path.basename(cmdline[0]) # Try locating our binary in the build we just unpacked binary_search_result = [os.path.join(dirpath, filename) for dirpath, dirnames, filenames in os.walk(os.path.join(opts.s3_corpus_refresh, "build")) for filename in filenames if (filename == binary_name and (stat.S_IXUSR & os.stat(os.path.join(dirpath, filename))[stat.ST_MODE]))] if not binary_search_result: print("Error: Failed to locate binary %s in unpacked build." % binary_name, file=sys.stderr) return 2 if len(binary_search_result) > 1: print("Error: Binary name %s is ambiguous in unpacked build." % binary_name, file=sys.stderr) return 2 cmdline[0] = binary_search_result[0] # Download our current corpus into the queues directory as well print("Downloading corpus from s3://%s/%s/corpus/ to %s" % (opts.s3_bucket, opts.project, queues_dir)) download_corpus(queues_dir, opts.s3_bucket, opts.project) # Ensure the directory for our new tests is empty updated_tests_dir = os.path.join(opts.s3_corpus_refresh, "tests") if os.path.exists(updated_tests_dir): shutil.rmtree(updated_tests_dir) os.mkdir(updated_tests_dir) # Run afl-cmin afl_cmin = os.path.join(opts.aflbindir, "afl-cmin") if not os.path.exists(afl_cmin): print("Error: Unable to locate afl-cmin binary.", file=sys.stderr) return 2 if opts.firefox: (ffpInst, ffCmd, ffEnv) = setup_firefox(cmdline[0], opts.firefox_prefs, opts.firefox_extensions, opts.firefox_testpath) cmdline = ffCmd afl_cmdline = [afl_cmin, '-e', '-i', queues_dir, '-o', updated_tests_dir, '-t', str(opts.afl_timeout), '-m', 'none'] if opts.test_file: afl_cmdline.extend(['-f', opts.test_file]) afl_cmdline.extend(cmdline) print("Running afl-cmin") with open(os.devnull, 'w') as devnull: env = os.environ.copy() env['LD_LIBRARY_PATH'] = os.path.dirname(cmdline[0]) if opts.firefox: env.update(ffEnv) subprocess.check_call(afl_cmdline, stdout=devnull, env=env) if opts.firefox: ffpInst.clean_up() # replace existing corpus with reduced corpus print("Uploading reduced corpus to s3://%s/%s/corpus/" % (opts.s3_bucket, opts.project)) upload_corpus(updated_tests_dir, opts.s3_bucket, opts.project, corpus_delete=True) # Prune the queues directory once we successfully uploaded the new # test corpus, but leave everything that's part of our new corpus # so we don't have to download those files again. test_files = [file for file in os.listdir(updated_tests_dir) if os.path.isfile(os.path.join(updated_tests_dir, file))] obsolete_queue_files = [file for file in os.listdir(queues_dir) if os.path.isfile(os.path.join(queues_dir, file)) and file not in test_files] for file in obsolete_queue_files: os.remove(os.path.join(queues_dir, file)) if opts.fuzzmanager or opts.s3_queue_upload or opts.aflstats: last_queue_upload = 0 while True: if opts.fuzzmanager: for afl_out_dir in afl_out_dirs: scan_crashes(afl_out_dir, opts.custom_cmdline_file, opts.env_file, opts.fuzzmanager_toolname, opts.test_file) # Only upload queue files every 20 minutes if opts.s3_queue_upload and last_queue_upload < int(time.time()) - 1200: for afl_out_dir in afl_out_dirs: upload_queue_dir(afl_out_dir, opts.s3_bucket, opts.project, new_cov_only=True) last_queue_upload = int(time.time()) if opts.aflstats: write_aggregated_stats(afl_out_dirs, opts.aflstats, cmdline_path=opts.custom_cmdline_file) time.sleep(10)
def write_aggregated_stats(base_dirs, outfile, cmdline_path=None): ''' Generate aggregated statistics from the given base directories and write them to the specified output file. @type base_dirs: list @param base_dirs: List of AFL base directories @type outfile: str @param outfile: Output file for aggregated statistics @type cmdline_path: String @param cmdline_path: Optional command line file to use instead of the one found inside the base directory. ''' # Which fields to add wanted_fields_total = [ 'execs_done', 'execs_per_sec', 'pending_favs', 'pending_total', 'variable_paths', 'unique_crashes', 'unique_hangs'] # Which fields to aggregate by mean wanted_fields_mean = ['exec_timeout'] # Which fields should be displayed per fuzzer instance wanted_fields_all = ['cycles_done', 'bitmap_cvg'] # Which fields should be aggregated by max wanted_fields_max = ['last_path'] # Warnings to include warnings = list() aggregated_stats = {} for field in wanted_fields_total: aggregated_stats[field] = 0 for field in wanted_fields_mean: aggregated_stats[field] = (0,0) for field in wanted_fields_all: aggregated_stats[field] = [] def convert_num(num): if '.' in num: return float(num) return int(num) for base_dir in base_dirs: stats_path = os.path.join(base_dir, "fuzzer_stats") if not cmdline_path: cmdline_path = os.path.join(base_dir, "cmdline") if os.path.exists(stats_path): with open(stats_path, 'r') as stats_file: stats = stats_file.read() for line in stats.splitlines(): (field_name, field_val) = line.split(':', 1) field_name = field_name.strip() field_val = field_val.strip() if field_name in wanted_fields_total: aggregated_stats[field_name] += convert_num(field_val) elif field_name in wanted_fields_mean: (val, cnt) = aggregated_stats[field_name] aggregated_stats[field_name] = (val + convert_num(field_val), cnt + 1) elif field_name in wanted_fields_all: aggregated_stats[field_name].append(field_val) elif field_name in wanted_fields_max: num_val = convert_num(field_val) if (not field_name in aggregated_stats) or aggregated_stats[field_name] < num_val: aggregated_stats[field_name] = num_val # If we don't have any data here, then the fuzzers haven't written any statistics yet if not aggregated_stats: return # Mean conversion for field_name in wanted_fields_mean: (val, cnt) = aggregated_stats[field_name] if cnt: aggregated_stats[field_name] = float(val) / float(cnt) else: aggregated_stats[field_name] = val # Verify fuzzmanagerconf exists and can be parsed _, cmdline = command_file_to_list(cmdline_path) target_binary = cmdline[0] if cmdline else None if target_binary is not None: if not os.path.isfile("%s.fuzzmanagerconf" % target_binary): warnings.append("WARNING: Missing %s.fuzzmanagerconf\n" % target_binary) elif ProgramConfiguration.fromBinary(target_binary) is None: warnings.append("WARNING: Invalid %s.fuzzmanagerconf\n" % target_binary) # Look for unreported crashes failed_reports = 0 for base_dir in base_dirs: crashes_dir = os.path.join(base_dir, "crashes") if not os.path.isdir(crashes_dir): continue for crash_file in os.listdir(crashes_dir): if crash_file.endswith(".failed"): failed_reports += 1 if failed_reports: warnings.append("WARNING: Unreported crashes detected (%d)\n" % failed_reports) # Write out data fields = [] fields.extend(wanted_fields_total) fields.extend(wanted_fields_mean) fields.extend(wanted_fields_all) fields.extend(wanted_fields_max) max_keylen = max([len(x) for x in fields]) with InterProcessLock(outfile + ".lock"), open(outfile, 'w') as f: for field in fields: if not field in aggregated_stats: continue val = aggregated_stats[field] if isinstance(val, list): val = " ".join(val) f.write("%s%s: %s\n" % (field, " " * (max_keylen + 1 - len(field)), val)) for warning in warnings: f.write(warning) return
def compareLevel(jsEngine, flags, infilename, logPrefix, options, showDetailedDiffs, quickMode): # pylint: disable=invalid-name,missing-docstring,missing-return-doc,missing-return-type-doc,too-complex # pylint: disable=too-many-branches,too-many-arguments,too-many-locals,too-many-statements # options dict must be one we can pass to js_interesting.ShellResult # we also use it directly for knownPath, timeout, and collector # Return: (lev, crashInfo) or (js_interesting.JS_FINE, None) assert isinstance(infilename, Path) combos = shell_flags.basic_flag_sets(jsEngine) if quickMode: # Only used during initial fuzzing. Allowed to have false negatives. combos = [combos[0]] # Remove any of the following flags from being used in compare_jit flags = list( set(flags) - { "--more-compartments", "--no-wasm", "--no-wasm-ion", "--no-wasm-baseline", }) if flags: combos.insert(0, flags) commands = [[jsEngine] + combo + [str(infilename)] for combo in combos] r0 = None prefix0 = None for i, command in enumerate(commands): prefix = logPrefix.parent / f"{logPrefix.stem}-r{i}" command = commands[i] r = js_interesting.ShellResult(options, command, prefix, True) # pylint: disable=invalid-name oom = js_interesting.oomed(r.err) r.err = ignore_some_stderr(r.err) if (r.return_code == 1 or r.return_code == 2) and ( anyLineContains(r.out, "[[script] scriptArgs*]") or (anyLineContains(r.err, "[scriptfile] [scriptarg...]"))): print("Got usage error from:") print(f' {" ".join(quote(str(x)) for x in command)}') assert i file_system_helpers.delete_logs(prefix) elif r.lev > js_interesting.JS_OVERALL_MISMATCH: # would be more efficient to run lithium on one or the other, but meh summary_more_serious = js_interesting.summaryString( r.issues + ["compare_jit found a more serious bug"], r.lev, r.runinfo.elapsedtime) print(f"{infilename} | {summary_more_serious}") summary_log = (logPrefix.parent / f"{logPrefix.stem}-summary").with_suffix(".txt") with io.open(str(summary_log), "w", encoding="utf-8", errors="replace") as f: f.write("\n".join(r.issues + [ " ".join(quote(str(x)) for x in command), "compare_jit found a more serious bug" ]) + "\n") print(f' {" ".join(quote(str(x)) for x in command)}') return r.lev, r.crashInfo elif r.lev != js_interesting.JS_FINE or r.return_code != 0: summary_other = js_interesting.summaryString( r.issues + [ "compare_jit is not comparing output, because the shell exited strangely" ], r.lev, r.runinfo.elapsedtime) print(f"{infilename} | {summary_other}") print(f' {" ".join(quote(str(x)) for x in command)}') file_system_helpers.delete_logs(prefix) if not i: return js_interesting.JS_FINE, None elif oom: # If the shell or python hit a memory limit, we consider the rest of the computation # "tainted" for the purpose of correctness comparison. message = "compare_jit is not comparing output: OOM" summary_oom = js_interesting.summaryString(r.issues + [message], r.lev, r.runinfo.elapsedtime) print(f"{infilename} | {summary_oom}") file_system_helpers.delete_logs(prefix) if not i: return js_interesting.JS_FINE, None elif not i: # Stash output from this run (the first one), so for subsequent runs, we can compare against it. (r0, prefix0) = (r, prefix) # pylint: disable=invalid-name else: # Compare the output of this run (r.out) to the output of the first run (r0.out), etc. def optionDisabledAsmOnOneSide(): # pylint: disable=invalid-name asmMsg = "asm.js type error: Disabled by javascript.options.asmjs" # pylint: disable=invalid-name # pylint: disable=invalid-name # pylint: disable=cell-var-from-loop optionDisabledAsm = anyLineContains( r0.err, asmMsg) or anyLineContains(r.err, asmMsg) # pylint: disable=invalid-name optionDiffers = (("--no-asmjs" in commands[0]) != ("--no-asmjs" in command)) return optionDisabledAsm and optionDiffers mismatchErr = (r.err != r0.err and not optionDisabledAsmOnOneSide()) # pylint: disable=invalid-name mismatchOut = (r.out != r0.out) # pylint: disable=invalid-name if mismatchErr or mismatchOut: # pylint: disable=no-else-return # Generate a short summary for stdout and a long summary for a "*-summary.txt" file. # pylint: disable=invalid-name rerunCommand = " ".join( quote(str(x)) for x in [ "python3 -m funfuzz.js.compare_jit", f'--flags={" ".join(flags)}', f"--timeout={options.timeout}", str(options.knownPath), str(jsEngine), str(infilename.name) ]) (summary, issues) = summarizeMismatch(mismatchErr, mismatchOut, prefix0, prefix) summary = ( f' {" ".join(quote(str(x)) for x in commands[0])}\n' f' {" ".join(quote(str(x)) for x in command)}\n' f"\n" f"{summary}") summary_log = (logPrefix.parent / f"{logPrefix.stem}-summary").with_suffix(".txt") with io.open(str(summary_log), "w", encoding="utf-8", errors="replace") as f: f.write(f"{rerunCommand}\n\n{summary}") summary_overall_mismatch = js_interesting.summaryString( issues, js_interesting.JS_OVERALL_MISMATCH, r.runinfo.elapsedtime) print(f"{infilename} | {summary_overall_mismatch}") if quickMode: print(rerunCommand) if showDetailedDiffs: print(summary) print() # Create a crashInfo object with empty stdout, and stderr showing diffs pc = ProgramConfiguration.fromBinary(str(jsEngine)) # pylint: disable=invalid-name pc.addProgramArguments(flags) crashInfo = Crash_Info.CrashInfo.fromRawCrashData([], summary, pc) # pylint: disable=invalid-name return js_interesting.JS_OVERALL_MISMATCH, crashInfo else: # print "compare_jit: match" file_system_helpers.delete_logs(prefix) # All matched :) file_system_helpers.delete_logs(prefix0) return js_interesting.JS_FINE, None
def main(argv=None): '''Command line options.''' program_name = os.path.basename(sys.argv[0]) program_version = "v%s" % __version__ program_build_date = "%s" % __updated__ program_version_string = '%%prog %s (%s)' % (program_version, program_build_date) if argv is None: argv = sys.argv[1:] # setup argparser parser = argparse.ArgumentParser(usage='%s [OPTIONS] --cmd <COMMAND AND ARGUMENTS>' % program_name) mainGroup = parser.add_argument_group(title="Main arguments", description=None) fmGroup = parser.add_argument_group(title="FuzzManager specific options", description="""Values for the options listed here are typically provided through FuzzManager configuration files, but can be overwritten using these options:""") mainGroup.add_argument('--version', action='version', version=program_version_string) mainGroup.add_argument('--cmd', dest='cmd', action='store_true', help="Command with parameters to run") mainGroup.add_argument('--env', dest='env', nargs='+', type=str, help="List of environment variables in the form 'KEY=VALUE'") # Settings fmGroup.add_argument("--sigdir", dest="sigdir", help="Signature cache directory", metavar="DIR") fmGroup.add_argument("--serverhost", dest="serverhost", help="Server hostname for remote signature management", metavar="HOST") fmGroup.add_argument("--serverport", dest="serverport", type=int, help="Server port to use", metavar="PORT") fmGroup.add_argument("--serverproto", dest="serverproto", help="Server protocol to use (default is https)", metavar="PROTO") fmGroup.add_argument("--serverauthtokenfile", dest="serverauthtokenfile", help="File containing the server authentication token", metavar="FILE") fmGroup.add_argument("--clientid", dest="clientid", help="Client ID to use when submitting issues", metavar="ID") fmGroup.add_argument("--platform", dest="platform", help="Platform this crash appeared on", metavar="(x86|x86-64|arm)") fmGroup.add_argument("--product", dest="product", help="Product this crash appeared on", metavar="PRODUCT") fmGroup.add_argument("--productversion", dest="product_version", help="Product version this crash appeared on", metavar="VERSION") fmGroup.add_argument("--os", dest="os", help="OS this crash appeared on", metavar="(windows|linux|macosx|b2g|android)") fmGroup.add_argument("--tool", dest="tool", help="Name of the tool that found this issue", metavar="NAME") fmGroup.add_argument('--metadata', dest='metadata', nargs='+', type=str, help="List of metadata variables in the form 'KEY=VALUE'") parser.add_argument('rargs', nargs=argparse.REMAINDER, help=argparse.SUPPRESS) if len(argv) == 0: parser.print_help() return 2 # process options opts = parser.parse_args(argv) if not opts.rargs: print("Error: No arguments specified", file=sys.stderr) return 2 binary = opts.rargs[0] if not os.path.exists(binary): print("Error: Specified binary does not exist: %s" % binary, file=sys.stderr) return 2 configuration = ProgramConfiguration.fromBinary(binary) if configuration == None: print("Error: Failed to load program configuration based on binary", file=sys.stderr) return 2 if opts.platform == None or opts.product == None or opts.os == None: print("Error: Must use binary configuration file or specify/configure at least --platform, --product and --os", file=sys.stderr) return 2 configuration = ProgramConfiguration(opts.product, opts.platform, opts.os, opts.product_version) env = {} if opts.env: env = dict(kv.split('=', 1) for kv in opts.env) configuration.addEnvironmentVariables(env) # Copy the system environment variables by default and overwrite them # if they are specified through env. env = dict(os.environ) if opts.env: oenv = dict(kv.split('=', 1) for kv in opts.env) configuration.addEnvironmentVariables(oenv) for envkey in oenv: env[envkey] = oenv[envkey] args = opts.rargs[1:] if args: configuration.addProgramArguments(args) metadata = {} if opts.metadata: metadata.update(dict(kv.split('=', 1) for kv in opts.metadata)) configuration.addMetadata(metadata) # Set LD_LIBRARY_PATH for convenience if not 'LD_LIBRARY_PATH' in env: env['LD_LIBRARY_PATH'] = os.path.dirname(binary) serverauthtoken = None if opts.serverauthtokenfile: with open(opts.serverauthtokenfile) as f: serverauthtoken = f.read().rstrip() collector = Collector(opts.sigdir, opts.serverhost, opts.serverport, opts.serverproto, serverauthtoken, opts.clientid, opts.tool) signature_repeat_count = 0 last_signature = None while(True): process = subprocess.Popen( opts.rargs, # stdout=None, stderr=subprocess.PIPE, env=env, universal_newlines=True ) monitor = LibFuzzerMonitor(process.stderr) monitor.start() monitor.join() print("Process terminated, processing results...", file=sys.stderr) trace = monitor.getASanTrace() testcase = monitor.getTestcase() crashInfo = CrashInfo.fromRawCrashData([], [], configuration, auxCrashData=trace) (sigfile, metadata) = collector.search(crashInfo) if sigfile != None: if last_signature == sigfile: signature_repeat_count += 1 else: last_signature = sigfile signature_repeat_count = 0 print("Crash matches signature %s, not submitting..." % sigfile, file=sys.stderr) else: collector.generate(crashInfo, forceCrashAddress=True, forceCrashInstruction=False, numFrames=8) collector.submit(crashInfo, testcase) print("Successfully submitted crash.", file=sys.stderr) if signature_repeat_count >= 10: print("Too many crashes with the same signature, exiting...", file=sys.stderr) break
def create(self, attrs): ''' Create a CrashEntry instance based on the given dictionary of values received. We need to unflatten foreign relationships like product, platform, os and client and create the foreign objects on the fly if they don't exist in our database yet. ''' missing_keys = {'rawStdout', 'rawStderr', 'rawCrashData'} - set(attrs.keys()) if missing_keys: raise InvalidArgumentException({key: ["This field is required."] for key in missing_keys}) # If mozilla-central is not the only product name try: attrs['product'] = Product.objects.get_or_create(**attrs['product'])[0] except Product.MultipleObjectsReturned: attrs['product'] = Product.objects.filter(**attrs['product']).order_by('id').first() attrs['platform'] = Platform.objects.get_or_create(**attrs['platform'])[0] attrs['os'] = OS.objects.get_or_create(**attrs['os'])[0] attrs['client'] = Client.objects.get_or_create(**attrs['client'])[0] attrs['tool'] = Tool.objects.get_or_create(**attrs['tool'])[0] # Parse the incoming data using the crash signature package from FTB configuration = ProgramConfiguration(attrs['product'].name, attrs['platform'].name, attrs['os'].name, attrs['product'].version) crashInfo = CrashInfo.fromRawCrashData(attrs['rawStdout'], attrs['rawStderr'], configuration, attrs['rawCrashData']) # Populate certain fields here from the CrashInfo object we just got if crashInfo.crashAddress is not None: attrs['crashAddress'] = '0x%x' % crashInfo.crashAddress attrs['shortSignature'] = crashInfo.createShortSignature() # If a testcase is supplied, create a testcase object and store it if 'test' in attrs['testcase']: testcase = attrs['testcase'] testcase_ext = attrs.pop('testcase_ext', None) testcase_quality = testcase.get('quality', 0) testcase_isbinary = testcase.get('isBinary', False) testcase = testcase['test'] if testcase_ext is None: raise RuntimeError("Must provide testcase extension when providing testcase") h = hashlib.new('sha1') if testcase_isbinary: testcase = base64.b64decode(testcase) h.update(testcase) else: h.update(repr(testcase).encode("utf-8")) dbobj = TestCase(quality=testcase_quality, isBinary=testcase_isbinary, size=len(testcase)) dbobj.test.save("%s.%s" % (h.hexdigest(), testcase_ext), ContentFile(testcase)) dbobj.save() attrs['testcase'] = dbobj else: attrs['testcase'] = None # Create our CrashEntry instance return super(CrashEntrySerializer, self).create(attrs)
def __init__(self, options, runthis, logPrefix, in_compare_jit, env=None): # pylint: disable=too-complex # pylint: disable=too-many-arguments,too-many-branches,too-many-locals,too-many-statements # If Lithium uses this as an interestingness test, logPrefix is likely not a Path object, so make it one. logPrefix = Path(logPrefix) pathToBinary = runthis[0].expanduser().resolve() # pylint: disable=invalid-name # This relies on the shell being a local one from compile_shell: # Ignore trailing ".exe" in Win, also abspath makes it work w/relative paths like "./js" # pylint: disable=invalid-name assert pathToBinary.with_suffix(".fuzzmanagerconf").is_file() pc = ProgramConfiguration.fromBinary( str(pathToBinary.parent / pathToBinary.stem)) pc.addProgramArguments(runthis[1:-1]) if options.valgrind: runthis = (inspect_shell.constructVgCmdList( errorCode=VALGRIND_ERROR_EXIT_CODE) + [ f"--suppressions={filename}" for filename in "valgrind_suppressions.txt" ] + runthis) timed_run_kw = {"env": (env or deepcopy(os.environ))} # Enable LSan which is enabled with non-ARM64 simulator ASan, only on Linux if platform.system( ) == "Linux" and inspect_shell.queryBuildConfiguration( options.jsengine, "asan"): env_asan_options = "detect_leaks=1," env_lsan_options = "max_leaks=1," if inspect_shell.queryBuildConfiguration(options.jsengine, "arm64-simulator"): env_asan_options = "" env_lsan_options = "" timed_run_kw["env"].update({"ASAN_OPTIONS": env_asan_options}) timed_run_kw["env"].update({"LSAN_OPTIONS": env_lsan_options}) elif not platform.system() == "Windows": timed_run_kw["preexec_fn"] = set_ulimit pc.addEnvironmentVariables(dict(timed_run_kw["env"])) lithium_logPrefix = str(logPrefix).encode("utf-8") if isinstance(lithium_logPrefix, b"".__class__): lithium_logPrefix = lithium_logPrefix.decode("utf-8", errors="replace") # logPrefix should be a string for timed_run in Lithium version 0.2.1 to work properly, apparently runinfo = timedrun.timed_run( [str(x) for x in runthis ], # Convert all Paths/bytes to strings for Lithium options.timeout, lithium_logPrefix, **timed_run_kw) lev = JS_FINE is_oom = False issues = [] auxCrashData = [] # pylint: disable=invalid-name # FuzzManager expects a list of strings rather than an iterable, so bite the # bullet and "readlines" everything into memory. # Collector adds newlines later, see https://git.io/fjoMB out_log = (logPrefix.parent / f"{logPrefix.stem}-out").with_suffix(".txt") with io.open(str(out_log), "r", encoding="utf-8", errors="replace") as f: out = [line.rstrip() for line in f] err_log = (logPrefix.parent / f"{logPrefix.stem}-err").with_suffix(".txt") with io.open(str(err_log), "r", encoding="utf-8", errors="replace") as f: err = [line.rstrip() for line in f] for line in reversed(err): if "[unhandlable oom]" in line: print("Ignoring unhandlable oom...") is_oom = True break if is_oom: lev = JS_FINE crash_log = (logPrefix.parent / f"{logPrefix.stem}-crash").with_suffix(".txt") core_file = logPrefix.parent / f"{logPrefix.stem}-core" if crash_log.is_file(): crash_log.unlink() if core_file.is_file(): core_file.unlink() dbggr_cmd = os_ops.make_dbg_cmd(runthis[0], runinfo.pid) if dbggr_cmd: core_file = Path(dbggr_cmd[-1]) if core_file.is_file(): core_file.unlink() elif options.valgrind and runinfo.return_code == VALGRIND_ERROR_EXIT_CODE: issues.append("valgrind reported an error") lev = max(lev, JS_VG_AMISS) valgrindErrorPrefix = f"=={runinfo.pid}==" for line in err: if valgrindErrorPrefix and line.startswith( valgrindErrorPrefix): issues.append(line.rstrip()) elif runinfo.sta == timedrun.CRASHED: if os_ops.grab_crash_log(runthis[0], runinfo.pid, logPrefix, True): crash_log = (logPrefix.parent / f"{logPrefix.stem}-crash").with_suffix(".txt") with io.open(str(crash_log), "r", encoding="utf-8", errors="replace") as f: auxCrashData = [line.strip() for line in f.readlines()] elif file_manipulation.amiss(logPrefix): issues.append("malloc error") lev = max(lev, JS_NEW_ASSERT_OR_CRASH) elif runinfo.return_code == 0 and not in_compare_jit: # We might have(??) run jsfunfuzz directly, so check for special kinds of bugs for line in out: if line.startswith("Found a bug: ") and not ("NestTest" in line and oomed(err)): lev = JS_DECIDED_TO_EXIT issues.append(line.rstrip()) if options.shellIsDeterministic and not understoodJsfunfuzzExit( out, err) and not oomed(err): issues.append("jsfunfuzz didn't finish") lev = JS_DID_NOT_FINISH # Copy non-crash issues to where FuzzManager's "AssertionHelper" can see it. if lev != JS_FINE: for issue in issues: err.append(f"[Non-crash bug] {issue}") activated = False # Turn on when trying to report *reliable* testcases that do not have a coredump # On Linux, fall back to run testcase via gdb using --args if core file data is unavailable # Note that this second round of running uses a different fuzzSeed as the initial if default jsfunfuzz is run # We should separate this out, i.e. running jsfunfuzz within a debugger, only if core dumps cannot be generated if (activated and platform.system() == "Linux" and shutil.which("gdb") and not auxCrashData and not in_compare_jit): print( "Note: No core file found on Linux - falling back to run via gdb" ) extracted_gdb_cmds = ["-ex", "run"] with io.open(str( Path(__file__).parent.parent / "util" / "gdb_cmds.txt"), "r", encoding="utf-8", errors="replace") as f: for line in f: if line.rstrip() and not line.startswith( "#") and not line.startswith("echo"): extracted_gdb_cmds.append("-ex") extracted_gdb_cmds.append(f"{line.rstrip()}") no_main_log_gdb_log = subprocess.run( (["gdb", "-n", "-batch"] + extracted_gdb_cmds + ["--args"] + [str(x) for x in runthis]), check=True, stderr=subprocess.PIPE, stdout=subprocess.PIPE, ) auxCrashData = no_main_log_gdb_log.stdout # Finally, make a CrashInfo object and parse stack traces for asan/crash/assertion bugs crashInfo = Crash_Info.CrashInfo.fromRawCrashData( out, err, pc, auxCrashData=auxCrashData) create_collector.printCrashInfo(crashInfo) # We only care about crashes and assertion failures on shells with no symbols # Note that looking out for the Assertion failure message is highly SpiderMonkey-specific if not is_oom and (not isinstance(crashInfo, Crash_Info.NoCrashInfo) or "Assertion failure: " in str(crashInfo.rawStderr) or "Segmentation fault" in str(crashInfo.rawStderr) or "Bus error" in str(crashInfo.rawStderr)): lev = max(lev, JS_NEW_ASSERT_OR_CRASH) try: match = options.collector.search(crashInfo) if match[0] is not None: create_collector.printMatchingSignature(match) if match[1].get("frequent"): print("Ignoring frequent bucket") lev = JS_FINE except UnicodeDecodeError: # Sometimes FM throws due to unicode issues print( "Note: FuzzManager is throwing a UnicodeDecodeError, signature matching skipped" ) match = False print( f"{logPrefix} | {summaryString(issues, lev, runinfo.elapsedtime)}") if lev != JS_FINE: summary_log = (logPrefix.parent / f"{logPrefix.stem}-summary").with_suffix(".txt") with io.open(str(summary_log), "w", encoding="utf-8", errors="replace") as f: f.writelines([ f"Number: {logPrefix}\n", f'Command: {" ".join(quote(str(x)) for x in runthis)}\n' ] + [f"Status: {i}\n" for i in issues]) self.lev = lev self.out = out self.err = err self.issues = issues self.crashInfo = crashInfo # pylint: disable=invalid-name self.match = match self.runinfo = runinfo self.return_code = runinfo.return_code
def scan_crashes(base_dir, cmdline_path=None, env_path=None, tool_name=None, firefox=None, firefox_prefs=None, firefox_extensions=None, firefox_testpath=None): ''' Scan the base directory for crash tests and submit them to FuzzManager. @type base_dir: String @param base_dir: AFL base directory @type cmdline_path: String @param cmdline_path: Optional command line file to use instead of the one found inside the base directory. @type env_path: String @param env_path: Optional file containing environment variables. @type test_path: String @param test_path: Optional filename where to copy the test before attempting to reproduce a crash. @rtype: int @return: Non-zero return code on failure ''' crash_dir = os.path.join(base_dir, "crashes") crash_files = [] for crash_file in os.listdir(crash_dir): # Ignore all files that aren't crash results if not crash_file.startswith("id:"): continue crash_file = os.path.join(crash_dir, crash_file) # Ignore our own status files if crash_file.endswith(".submitted") or crash_file.endswith(".failed"): continue # Ignore files we already processed if os.path.exists(crash_file + ".submitted") or os.path.exists(crash_file + ".failed"): continue crash_files.append(crash_file) if crash_files: # First try to read necessary information for reproducing crashes cmdline = [] test_idx = None base_env = {} test_in_env = None if env_path: with open(env_path, 'r') as env_file: for line in env_file: (name, val) = line.rstrip('\n').split("=", 1) base_env[name] = val if '@@' in val: test_in_env = name if not cmdline_path: cmdline_path = os.path.join(base_dir, "cmdline") with open(cmdline_path, 'r') as cmdline_file: idx = 0 for line in cmdline_file: if '@@' in line: test_idx = idx cmdline.append(line.rstrip('\n')) idx += 1 if test_idx != None: orig_test_arg = cmdline[test_idx] configuration = ProgramConfiguration.fromBinary(cmdline[0]) if not configuration: print( "Error: Creating program configuration from binary failed. Check your binary configuration file.", file=sys.stderr) return 2 collector = Collector(tool=tool_name) if firefox: (ffpInst, ffCmd, ffEnv) = setup_firefox(cmdline[0], firefox_prefs, firefox_extensions, firefox_testpath) cmdline = ffCmd base_env.update(ffEnv) for crash_file in crash_files: stdin = None env = None if base_env: env = dict(base_env) if test_idx != None: cmdline[test_idx] = orig_test_arg.replace('@@', crash_file) elif test_in_env != None: env[test_in_env] = env[test_in_env].replace('@@', crash_file) elif test_path != None: shutil.copy(crash_file, test_path) else: with open(crash_file, 'r') as crash_fd: stdin = crash_fd.read() print("Processing crash file %s" % crash_file, file=sys.stderr) runner = AutoRunner.fromBinaryArgs(cmdline[0], cmdline[1:], env=env, stdin=stdin) if runner.run(): crash_info = runner.getCrashInfo(configuration) collector.submit(crash_info, crash_file) open(crash_file + ".submitted", 'a').close() print("Success: Submitted crash to server.", file=sys.stderr) else: open(crash_file + ".failed", 'a').close() print( "Error: Failed to reproduce the given crash, cannot submit.", file=sys.stderr) if firefox: ffpInst.clean_up()
def scan_crashes(base_dir, cmdline_path=None): ''' Scan the base directory for crash tests and submit them to FuzzManager. @type base_dir: String @param base_dir: AFL base directory @type cmdline_path: String @param cmdline_path: Optional command line file to use instead of the one found inside the base directory. @rtype: int @return: Non-zero return code on failure ''' crash_dir = os.path.join(base_dir, "crashes") crash_files = [] for crash_file in os.listdir(crash_dir): # Ignore all files that aren't crash results if not crash_file.startswith("id:"): continue crash_file = os.path.join(crash_dir, crash_file) # Ignore our own status files if crash_file.endswith(".submitted") or crash_file.endswith(".failed"): continue # Ignore files we already processed if os.path.exists(crash_file + ".submitted") or os.path.exists(crash_file + ".failed"): continue crash_files.append(crash_file) if crash_files: # First try to read necessary information for reproducing crashes cmdline = [] test_idx = None if not cmdline_path: cmdline_path = os.path.join(base_dir, "cmdline") with open(cmdline_path, 'r') as cmdline_file: idx = 0 for line in cmdline_file: if '@@' in line: test_idx = idx cmdline.append(line.rstrip('\n')) idx += 1 if test_idx != None: orig_test_arg = cmdline[test_idx] print(cmdline) configuration = ProgramConfiguration.fromBinary(cmdline[0]) if not configuration: print("Error: Creating program configuration from binary failed. Check your binary configuration file.", file=sys.stderr) return 2 collector = Collector() for crash_file in crash_files: stdin = None if test_idx != None: cmdline[test_idx] = orig_test_arg.replace('@@', crash_file) else: with open(crash_file, 'r') as crash_fd: stdin = crash_fd.read() runner = AutoRunner.fromBinaryArgs(cmdline[0], cmdline[1:], stdin=stdin) if runner.run(): crash_info = runner.getCrashInfo(configuration) collector.submit(crash_info, crash_file) open(crash_file + ".submitted", 'a').close() else: open(crash_file + ".failed", 'a').close() print("Error: Failed to reproduce the given crash, cannot submit.", file=sys.stderr)
def main(argv=None): '''Command line options.''' program_name = os.path.basename(sys.argv[0]) program_version = "v%s" % __version__ program_build_date = "%s" % __updated__ program_version_string = '%%prog %s (%s)' % (program_version, program_build_date) if argv is None: argv = sys.argv[1:] # setup argparser parser = argparse.ArgumentParser() parser.add_argument('--version', action='version', version=program_version_string) # Crash information parser.add_argument("--stdout", dest="stdout", help="File containing STDOUT data", metavar="FILE") parser.add_argument("--stderr", dest="stderr", help="File containing STDERR data", metavar="FILE") parser.add_argument("--crashdata", dest="crashdata", help="File containing external crash data", metavar="FILE") # Actions parser.add_argument("--refresh", dest="refresh", action='store_true', help="Perform a signature refresh") parser.add_argument("--submit", dest="submit", action='store_true', help="Submit a signature to the server") parser.add_argument("--search", dest="search", action='store_true', help="Search cached signatures for the given crash") parser.add_argument("--generate", dest="generate", action='store_true', help="Create a (temporary) local signature in the cache directory") parser.add_argument("--autosubmit", dest="autosubmit", action='store_true', help="Go into auto-submit mode. In this mode, all remaining arguments are interpreted as the crashing command. This tool will automatically obtain GDB crash information and submit it.") parser.add_argument("--download", dest="download", type=int, help="Download the testcase for the specified crash entry", metavar="ID") # Settings parser.add_argument("--sigdir", dest="sigdir", help="Signature cache directory", metavar="DIR") parser.add_argument("--serverhost", dest="serverhost", help="Server hostname for remote signature management", metavar="HOST") parser.add_argument("--serverport", dest="serverport", type=int, help="Server port to use", metavar="PORT") parser.add_argument("--serverproto", dest="serverproto", help="Server protocol to use (default is https)", metavar="PROTO") parser.add_argument("--serverauthtokenfile", dest="serverauthtokenfile", help="File containing the server authentication token", metavar="FILE") parser.add_argument("--clientid", dest="clientid", help="Client ID to use when submitting issues", metavar="ID") parser.add_argument("--platform", dest="platform", help="Platform this crash appeared on", metavar="(x86|x86-64|arm)") parser.add_argument("--product", dest="product", help="Product this crash appeared on", metavar="PRODUCT") parser.add_argument("--productversion", dest="product_version", help="Product version this crash appeared on", metavar="VERSION") parser.add_argument("--os", dest="os", help="OS this crash appeared on", metavar="(windows|linux|macosx|b2g|android)") parser.add_argument("--tool", dest="tool", help="Name of the tool that found this issue", metavar="NAME") parser.add_argument('--args', dest='args', nargs='+', type=str, help="List of program arguments. Backslashes can be used for escaping and are stripped.") parser.add_argument('--env', dest='env', nargs='+', type=str, help="List of environment variables in the form 'KEY=VALUE'") parser.add_argument('--metadata', dest='metadata', nargs='+', type=str, help="List of metadata variables in the form 'KEY=VALUE'") parser.add_argument("--binary", dest="binary", help="Binary that has a configuration file for reading", metavar="BINARY") parser.add_argument("--testcase", dest="testcase", help="File containing testcase", metavar="FILE") parser.add_argument("--testcasequality", dest="testcasequality", default="0", help="Integer indicating test case quality (0 is best and default)", metavar="VAL") # Options that affect how signatures are generated parser.add_argument("--forcecrashaddr", dest="forcecrashaddr", action='store_true', help="Force including the crash address into the signature") parser.add_argument("--forcecrashinst", dest="forcecrashinst", action='store_true', help="Force including the crash instruction into the signature (GDB only)") parser.add_argument("--numframes", dest="numframes", default=8, type=int, help="How many frames to include into the signature (default is 8)") parser.add_argument('rargs', nargs=argparse.REMAINDER) if len(argv) == 0: parser.print_help() return 2 # process options opts = parser.parse_args(argv) # Check that one action is specified actions = [ "refresh", "submit", "search", "generate", "autosubmit", "download" ] haveAction = False for action in actions: if getattr(opts, action): if haveAction: print("Error: Cannot specify multiple actions at the same time", file=sys.stderr) return 2 haveAction = True if not haveAction: print("Error: Must specify an action", file=sys.stderr) return 2 # In autosubmit mode, we try to open a configuration file for the binary specified # on the command line. It should contain the binary-specific settings for submitting. if opts.autosubmit: if not opts.rargs: print("Error: Action --autosubmit requires test arguments to be specified", file=sys.stderr) return 2 # Store the binary candidate only if --binary wasn't also specified if not opts.binary: opts.binary = opts.rargs[0] # We also need to check that (apart from the binary), there is only one file on the command line # (the testcase), if it hasn't been explicitely specified. testcase = opts.testcase testcaseidx = None if testcase == None: for idx, arg in enumerate(opts.rargs[1:]): if os.path.exists(arg): if testcase: print("Error: Multiple potential testcases specified on command line. Must explicitely specify test using --testcase.") return 2 testcase = arg testcaseidx = idx # Either --autosubmit was specified, or someone specified --binary manually # Check that the binary actually exists if opts.binary and not os.path.exists(opts.binary): print("Error: Specified binary does not exist: %s" % opts.binary) return 2 stdout = None stderr = None crashdata = None crashInfo = None args = None env = None metadata = {} if opts.search or opts.generate or opts.submit or opts.autosubmit: if opts.metadata: metadata.update(dict(kv.split('=', 1) for kv in opts.metadata)) if opts.autosubmit: # Try to automatically get arguments from the command line # If the testcase is not the last argument, leave it in the # command line arguments and replace it with a generic placeholder. if testcaseidx == len(opts.rargs[1:]) - 1: args = opts.rargs[1:-1] else: args = opts.rargs[1:] if testcaseidx != None: args[testcaseidx] = "TESTFILE" else: if opts.args: args = [arg.replace('\\', '') for arg in opts.args] if opts.env: env = dict(kv.split('=', 1) for kv in opts.env) # Start without any ProgramConfiguration configuration = None # If we have a binary, try using that to create our ProgramConfiguration if opts.binary: configuration = ProgramConfiguration.fromBinary(opts.binary) if configuration: if env: configuration.addEnvironmentVariables(env) if args: configuration.addProgramArguments(args) if metadata: configuration.addMetadata(metadata) # If configuring through binary failed, try to manually create ProgramConfiguration from command line arguments if configuration == None: if opts.platform == None or opts.product == None or opts.os == None: print("Error: Must specify/configure at least --platform, --product and --os", file=sys.stderr) return 2 configuration = ProgramConfiguration(opts.product, opts.platform, opts.os, opts.product_version, env, args, metadata) if not opts.autosubmit: if opts.stderr == None and opts.crashdata == None: print("Error: Must specify at least either --stderr or --crashdata file", file=sys.stderr) return 2 if opts.stdout: with open(opts.stdout) as f: stdout = f.read() if opts.stderr: with open(opts.stderr) as f: stderr = f.read() if opts.crashdata: with open(opts.crashdata) as f: crashdata = f.read() crashInfo = CrashInfo.fromRawCrashData(stdout, stderr, configuration, auxCrashData=crashdata) if opts.testcase: (testCaseData, isBinary) = Collector.read_testcase(opts.testcase) if not isBinary: crashInfo.testcase = testCaseData serverauthtoken = None if opts.serverauthtokenfile: with open(opts.serverauthtokenfile) as f: serverauthtoken = f.read().rstrip() collector = Collector(opts.sigdir, opts.serverhost, opts.serverport, opts.serverproto, serverauthtoken, opts.clientid, opts.tool) if opts.refresh: collector.refresh() return 0 if opts.submit: testcase = opts.testcase collector.submit(crashInfo, testcase, opts.testcasequality, metadata) return 0 if opts.search: (sig, metadata) = collector.search(crashInfo) if sig == None: print("No match found") return 3 print(sig) if metadata: print(json.dumps(metadata, indent=4)) return 0 if opts.generate: sigFile = collector.generate(crashInfo, opts.forcecrashaddr, opts.forcecrashinst, opts.numframes) if not sigFile: print("Failed to generate a signature for the given crash information.", file=sys.stderr) return 2 print(sigFile) return 0 if opts.autosubmit: runner = AutoRunner.fromBinaryArgs(opts.rargs[0], opts.rargs[1:]) if runner.run(): crashInfo = runner.getCrashInfo(configuration) collector.submit(crashInfo, testcase, opts.testcasequality, metadata) else: print("Error: Failed to reproduce the given crash, cannot submit.", file=sys.stderr) return 2 if opts.download: (retFile, retJSON) = collector.download(opts.download) if not retFile: print("Specified crash entry does not have a testcase", file=sys.stderr) return 2 if "args" in retJSON and retJSON["args"]: args = json.loads(retJSON["args"]) print("Command line arguments: %s" % " ".join(args)) print("") if "env" in retJSON and retJSON["env"]: env = json.loads(retJSON["env"]) print("Environment variables: %s", " ".join([ "%s = %s" % (k,v) for (k,v) in env.items()])) print("") if "metadata" in retJSON and retJSON["metadata"]: metadata = json.loads(retJSON["metadata"]) print("== Metadata ==") for k, v in metadata.items(): print("%s = %s" % (k,v)) print("") print(retFile) return 0
def __init__(self, options, runthis, logPrefix, in_compare_jit, env=None): # pylint: disable=too-complex # pylint: disable=too-many-arguments,too-many-branches,too-many-locals,too-many-statements # If Lithium uses this as an interestingness test, logPrefix is likely not a Path object, so make it one. logPrefix = Path(logPrefix) pathToBinary = runthis[0].expanduser().resolve() # pylint: disable=invalid-name # This relies on the shell being a local one from compile_shell: # Ignore trailing ".exe" in Win, also abspath makes it work w/relative paths like "./js" # pylint: disable=invalid-name assert pathToBinary.with_suffix(".fuzzmanagerconf").is_file() pc = ProgramConfiguration.fromBinary( str(pathToBinary.parent / pathToBinary.stem)) pc.addProgramArguments(runthis[1:-1]) if options.valgrind: runthis = (inspect_shell.constructVgCmdList( errorCode=VALGRIND_ERROR_EXIT_CODE) + valgrindSuppressions() + runthis) timed_run_kw = {} timed_run_kw["env"] = (env or os.environ) if not platform.system() == "Windows": timed_run_kw["preexec_fn"] = set_ulimit lithium_logPrefix = str(logPrefix).encode("utf-8") # Total hack to make Python 2/3 work with Lithium if sys.version_info.major == 3 and isinstance(lithium_logPrefix, b"".__class__): # pylint: disable=redefined-variable-type lithium_logPrefix = lithium_logPrefix.decode("utf-8", errors="replace") # logPrefix should be a string for timed_run in Lithium version 0.2.1 to work properly, apparently runinfo = timed_run.timed_run( [str(x) for x in runthis ], # Convert all Paths/bytes to strings for Lithium options.timeout, lithium_logPrefix, **timed_run_kw) lev = JS_FINE issues = [] auxCrashData = [] # pylint: disable=invalid-name # FuzzManager expects a list of strings rather than an iterable, so bite the # bullet and "readlines" everything into memory. out_log = (logPrefix.parent / (logPrefix.stem + "-out")).with_suffix(".txt") with io.open(str(out_log), "r", encoding="utf-8", errors="replace") as f: out = f.readlines() err_log = (logPrefix.parent / (logPrefix.stem + "-err")).with_suffix(".txt") with io.open(str(err_log), "r", encoding="utf-8", errors="replace") as f: err = f.readlines() if options.valgrind and runinfo.return_code == VALGRIND_ERROR_EXIT_CODE: issues.append("valgrind reported an error") lev = max(lev, JS_VG_AMISS) valgrindErrorPrefix = "==" + str(runinfo.pid) + "==" for line in err: if valgrindErrorPrefix and line.startswith( valgrindErrorPrefix): issues.append(line.rstrip()) elif runinfo.sta == timed_run.CRASHED: if os_ops.grab_crash_log(runthis[0], runinfo.pid, logPrefix, True): crash_log = (logPrefix.parent / (logPrefix.stem + "-crash")).with_suffix(".txt") with io.open(str(crash_log), "r", encoding="utf-8", errors="replace") as f: auxCrashData = [line.strip() for line in f.readlines()] elif file_manipulation.amiss(logPrefix): issues.append("malloc error") lev = max(lev, JS_NEW_ASSERT_OR_CRASH) elif runinfo.return_code == 0 and not in_compare_jit: # We might have(??) run jsfunfuzz directly, so check for special kinds of bugs for line in out: if line.startswith("Found a bug: ") and not ("NestTest" in line and oomed(err)): lev = JS_DECIDED_TO_EXIT issues.append(line.rstrip()) if options.shellIsDeterministic and not understoodJsfunfuzzExit( out, err) and not oomed(err): issues.append("jsfunfuzz didn't finish") lev = JS_DID_NOT_FINISH # Copy non-crash issues to where FuzzManager's "AssertionHelper" can see it. if lev != JS_FINE: for issue in issues: err.append("[Non-crash bug] " + issue) activated = False # Turn on when trying to report *reliable* testcases that do not have a coredump # On Linux, fall back to run testcase via gdb using --args if core file data is unavailable # Note that this second round of running uses a different fuzzSeed as the initial if default jsfunfuzz is run # We should separate this out, i.e. running jsfunfuzz within a debugger, only if core dumps cannot be generated if activated and platform.system() == "Linux" and which( "gdb") and not auxCrashData and not in_compare_jit: print( "Note: No core file found on Linux - falling back to run via gdb" ) extracted_gdb_cmds = ["-ex", "run"] with io.open(str( Path(__file__).parent.parent / "util" / "gdb_cmds.txt"), "r", encoding="utf-8", errors="replace") as f: for line in f: if line.rstrip() and not line.startswith( "#") and not line.startswith("echo"): extracted_gdb_cmds.append("-ex") extracted_gdb_cmds.append("%s" % line.rstrip()) no_main_log_gdb_log = subprocess.run( (["gdb", "-n", "-batch"] + extracted_gdb_cmds + ["--args"] + [str(x) for x in runthis]), check=True, stderr=subprocess.PIPE, stdout=subprocess.PIPE) auxCrashData = no_main_log_gdb_log.stdout # Finally, make a CrashInfo object and parse stack traces for asan/crash/assertion bugs crashInfo = CrashInfo.CrashInfo.fromRawCrashData( out, err, pc, auxCrashData=auxCrashData) create_collector.printCrashInfo(crashInfo) # We only care about crashes and assertion failures on shells with no symbols # Note that looking out for the Assertion failure message is highly SpiderMonkey-specific if not isinstance(crashInfo, CrashInfo.NoCrashInfo) or \ "Assertion failure: " in str(crashInfo.rawStderr) or \ "Segmentation fault" in str(crashInfo.rawStderr) or \ "Bus error" in str(crashInfo.rawStderr): lev = max(lev, JS_NEW_ASSERT_OR_CRASH) try: match = options.collector.search(crashInfo) if match[0] is not None: create_collector.printMatchingSignature(match) lev = JS_FINE except UnicodeDecodeError: # Sometimes FM throws due to unicode issues print( "Note: FuzzManager is throwing a UnicodeDecodeError, signature matching skipped" ) match = False print("%s | %s" % (logPrefix, summaryString(issues, lev, runinfo.elapsedtime))) if lev != JS_FINE: summary_log = (logPrefix.parent / (logPrefix.stem + "-summary")).with_suffix(".txt") with io.open(str(summary_log), "w", encoding="utf-8", errors="replace") as f: f.writelines([ "Number: " + str(logPrefix) + "\n", "Command: " + " ".join(quote(str(x)) for x in runthis) + "\n" ] + ["Status: " + i + "\n" for i in issues]) self.lev = lev self.out = out self.err = err self.issues = issues self.crashInfo = crashInfo # pylint: disable=invalid-name self.match = match self.runinfo = runinfo self.return_code = runinfo.return_code
def restore_object(self, attrs, instance=None): ''' Create a CrashEntry instance based on the given dictionary of values received. We need to unflatten foreign relationships like product, platform, os and client and create the foreign objects on the fly if they don't exist in our database yet. ''' if instance: # Not allowed to update existing instances return instance product = attrs.pop('product', None) product_version = attrs.pop('product_version', None) platform = attrs.pop('platform', None) os = attrs.pop('os', None) client = attrs.pop('client', None) tool = attrs.pop('tool', None) testcase = attrs.pop('testcase', None) testcase_ext = attrs.pop('testcase_ext', None) testcase_quality = attrs.pop('testcase_quality', 0) testcase_isbinary = attrs.pop('testcase_isbinary', False) # Parse the incoming data using the crash signature package from FTB configuration = ProgramConfiguration(product, platform, os, product_version) crashInfo = CrashInfo.fromRawCrashData(attrs['rawStdout'], attrs['rawStderr'], configuration, attrs['rawCrashData']) # Populate certain fields here from the CrashInfo object we just got if crashInfo.crashAddress != None: attrs['crashAddress'] = hex(crashInfo.crashAddress) attrs['shortSignature'] = crashInfo.createShortSignature() def createOrGetModelByName(model, attrs): ''' Generically determine if the given model with the given attributes already exists in our database. If so, return that object, otherwise create it on the fly. @type model: Class @param model: The model to use for filtering and instantiating @type attrs: dict @param attrs: Dictionary of attributes to use for filtering/instantiating @rtype: model @return The model instance ''' objs = model.objects.filter(**attrs) if len(objs) > 1: raise MultipleObjectsReturned("Multiple objects with same keyword combination in database!") if len(objs) == 0: dbobj = model(**attrs) dbobj.save() return dbobj else: return objs.first() # Get or instantiate objects for product, platform, os, client and tool attrs['product'] = createOrGetModelByName(Product, { 'name' : product, 'version' : product_version }) attrs['platform'] = createOrGetModelByName(Platform, { 'name' : platform }) attrs['os'] = createOrGetModelByName(OS, { 'name' : os }) attrs['client'] = createOrGetModelByName(Client, { 'name' : client }) attrs['tool'] = createOrGetModelByName(Tool, { 'name' : tool }) # If a testcase is supplied, create a testcase object and store it if testcase: if testcase_ext == None: raise RuntimeError("Must provide testcase extension when providing testcase") if testcase_isbinary: testcase = base64.b64decode(testcase) h = hashlib.new('sha1') if testcase_isbinary: h.update(str(testcase)) else: h.update(repr(testcase)) dbobj = TestCase(quality=testcase_quality, isBinary=testcase_isbinary, size=len(testcase)) dbobj.test.save("%s.%s" % (h.hexdigest(), testcase_ext), ContentFile(testcase)) dbobj.save() attrs['testcase'] = dbobj else: attrs['testcase'] = None # Create our CrashEntry instance return super(CrashEntrySerializer, self).restore_object(attrs, instance)
def runTest(self): config = ProgramConfiguration("test", "x86", "linux") crashInfo2 = GDBCrashInfo([], gdbRegressionTrace2.splitlines(), config) self.assertEqual(crashInfo2.crashAddress, 0xfffd579cL)
def newSignature(request): if request.method == 'POST': # TODO: FIXME: Update bug here as well bucket = Bucket( signature=request.POST['signature'], shortDescription=request.POST['shortDescription'], frequent="frequent" in request.POST ) return __handleSignaturePost(request, bucket) elif request.method == 'GET': if 'crashid' in request.GET: crashEntry = get_object_or_404(CrashEntry, pk=request.GET['crashid']) configuration = ProgramConfiguration(crashEntry.product.name, crashEntry.platform.name, crashEntry.os.name, crashEntry.product.version) crashInfo = CrashInfo.fromRawCrashData(crashEntry.rawStdout, crashEntry.rawStderr, configuration, crashEntry.rawCrashData) maxStackFrames = 8 forceCrashInstruction = False forceCrashAddress = True errorMsg = None if 'stackframes' in request.GET: maxStackFrames = int(request.GET['stackframes']) if 'forcecrashaddress' in request.GET: forceCrashAddress = bool(int(request.GET['forcecrashaddress'])) if 'forcecrashinstruction' in request.GET: forceCrashInstruction = bool(int(request.GET['forcecrashinstruction'])) # First try to create the signature with the crash address included. # However, if that fails, try without forcing the crash signature. proposedSignature = crashInfo.createCrashSignature( forceCrashAddress=forceCrashAddress, forceCrashInstruction=forceCrashInstruction, maxFrames=maxStackFrames ) if (proposedSignature == None): errorMsg = crashInfo.failureReason proposedSignature = crashInfo.createCrashSignature(maxFrames=maxStackFrames) proposedSignature = str(proposedSignature) proposedShortDesc = crashInfo.createShortSignature() data = { 'new' : True, 'bucket' : { 'pk' : None, 'bug' : None, 'signature' : proposedSignature, 'shortDescription' : proposedShortDesc }, 'error_message' : errorMsg } else: data = { 'new' : True } else: raise SuspiciousOperation return render(request, 'signatures/edit.html', data)
def runTest(self): config = ProgramConfiguration("test", "x86-64", "linux") crashInfo3 = GDBCrashInfo([], gdbRegressionTrace3.splitlines(), config) self.assertEqual(crashInfo3.crashAddress, 0x7fffffffffffL)
else: print("Move along, nothing to see...") ### Program Configurations if __name__ == '__main__': print('\n### Program Configurations') if __name__ == '__main__': sys.path.append('FuzzManager') if __name__ == '__main__': from FTB.ProgramConfiguration import ProgramConfiguration # type: ignore if __name__ == '__main__': configuration = ProgramConfiguration.fromBinary( 'simply-buggy/simple-crash') (configuration.product, configuration.platform) ### Crash Info if __name__ == '__main__': print('\n### Crash Info') if __name__ == '__main__': from FTB.Signatures.CrashInfo import CrashInfo # type: ignore if __name__ == '__main__': cmd = ["simply-buggy/simple-crash"] result = subprocess.run(cmd, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
def __init__(self, options, runthis, logPrefix, inCompareJIT): pathToBinary = runthis[0] # This relies on the shell being a local one from compileShell.py: pc = ProgramConfiguration.fromBinary(pathToBinary.split('.')[0]) # Ignore trailing ".exe" in Win pc.addProgramArguments(runthis[1:-1]) if options.valgrind: runthis = ( inspectShell.constructVgCmdList(errorCode=VALGRIND_ERROR_EXIT_CODE) + valgrindSuppressions(options.knownPath) + runthis) preexec_fn = ulimitSet if os.name == 'posix' else None runinfo = timedRun.timed_run(runthis, options.timeout, logPrefix, preexec_fn=preexec_fn) lev = JS_FINE issues = [] auxCrashData = [] # FuzzManager expects a list of strings rather than an iterable, so bite the # bullet and 'readlines' everything into memory. with open(logPrefix + "-out.txt") as f: out = f.readlines() with open(logPrefix + "-err.txt") as f: err = f.readlines() if options.valgrind and runinfo.rc == VALGRIND_ERROR_EXIT_CODE: issues.append("valgrind reported an error") lev = max(lev, JS_VG_AMISS) valgrindErrorPrefix = "==" + str(runinfo.pid) + "==" for line in err: if valgrindErrorPrefix and line.startswith(valgrindErrorPrefix): issues.append(line.rstrip()) elif runinfo.sta == timedRun.CRASHED: if sps.grabCrashLog(runthis[0], runinfo.pid, logPrefix, True): with open(logPrefix + "-crash.txt") as f: auxCrashData = [line.strip() for line in f.readlines()] elif detect_malloc_errors.amiss(logPrefix): issues.append("malloc error") lev = max(lev, JS_NEW_ASSERT_OR_CRASH) elif runinfo.rc == 0 and not inCompareJIT: # We might have(??) run jsfunfuzz directly, so check for special kinds of bugs for line in out: if line.startswith("Found a bug: ") and not ("NestTest" in line and oomed(err)): lev = JS_DECIDED_TO_EXIT issues.append(line.rstrip()) if options.shellIsDeterministic and not understoodJsfunfuzzExit(out, err) and not oomed(err): issues.append("jsfunfuzz didn't finish") lev = JS_DID_NOT_FINISH # Copy non-crash issues to where FuzzManager's "AssertionHelper.py" can see it. if lev != JS_FINE: for issue in issues: err.append("[Non-crash bug] " + issue) # Finally, make a CrashInfo object and parse stack traces for asan/crash/assertion bugs crashInfo = CrashInfo.CrashInfo.fromRawCrashData(out, err, pc, auxCrashData=auxCrashData) createCollector.printCrashInfo(crashInfo) if not isinstance(crashInfo, CrashInfo.NoCrashInfo): lev = max(lev, JS_NEW_ASSERT_OR_CRASH) match = options.collector.search(crashInfo) if match[0] is not None: createCollector.printMatchingSignature(match) lev = JS_FINE print logPrefix + " | " + summaryString(issues, lev, runinfo.elapsedtime) if lev != JS_FINE: fileManipulation.writeLinesToFile( ['Number: ' + logPrefix + '\n', 'Command: ' + sps.shellify(runthis) + '\n'] + ['Status: ' + i + "\n" for i in issues], logPrefix + '-summary.txt') self.lev = lev self.out = out self.err = err self.issues = issues self.crashInfo = crashInfo self.match = match self.runinfo = runinfo self.rc = runinfo.rc
def main(args=None): '''Command line options.''' # setup argparser parser = argparse.ArgumentParser() parser.add_argument('--version', action='version', version='%s v%s (%s)' % (__file__, __version__, __updated__)) # Crash information parser.add_argument("--stdout", help="File containing STDOUT data", metavar="FILE") parser.add_argument("--stderr", help="File containing STDERR data", metavar="FILE") parser.add_argument("--crashdata", help="File containing external crash data", metavar="FILE") # Actions action_group = parser.add_argument_group( "Actions", "A single action must be selected.") actions = action_group.add_mutually_exclusive_group(required=True) actions.add_argument("--refresh", action='store_true', help="Perform a signature refresh") actions.add_argument("--submit", action='store_true', help="Submit a signature to the server") actions.add_argument("--search", action='store_true', help="Search cached signatures for the given crash") actions.add_argument( "--generate", action='store_true', help="Create a (temporary) local signature in the cache directory") actions.add_argument( "--autosubmit", action='store_true', help= ("Go into auto-submit mode. In this mode, all remaining arguments are interpreted " "as the crashing command. This tool will automatically obtain GDB crash information " "and submit it.")) actions.add_argument( "--download", type=int, help="Download the testcase for the specified crash entry", metavar="ID") actions.add_argument( "--download-all", type=int, help="Download all testcases for the specified signature entry", metavar="ID") actions.add_argument( "--get-clientid", action='store_true', help="Print the client ID used when submitting issues") # Settings parser.add_argument("--sigdir", help="Signature cache directory", metavar="DIR") parser.add_argument("--serverhost", help="Server hostname for remote signature management", metavar="HOST") parser.add_argument("--serverport", type=int, help="Server port to use", metavar="PORT") parser.add_argument("--serverproto", help="Server protocol to use (default is https)", metavar="PROTO") parser.add_argument("--serverauthtokenfile", help="File containing the server authentication token", metavar="FILE") parser.add_argument("--clientid", help="Client ID to use when submitting issues", metavar="ID") parser.add_argument("--platform", help="Platform this crash appeared on", metavar="(x86|x86-64|arm)") parser.add_argument("--product", help="Product this crash appeared on", metavar="PRODUCT") parser.add_argument("--productversion", dest="product_version", help="Product version this crash appeared on", metavar="VERSION") parser.add_argument("--os", help="OS this crash appeared on", metavar="(windows|linux|macosx|b2g|android)") parser.add_argument("--tool", help="Name of the tool that found this issue", metavar="NAME") parser.add_argument( '--args', nargs='+', type=str, help= "List of program arguments. Backslashes can be used for escaping and are stripped." ) parser.add_argument( '--env', nargs='+', type=str, help="List of environment variables in the form 'KEY=VALUE'") parser.add_argument( '--metadata', nargs='+', type=str, help="List of metadata variables in the form 'KEY=VALUE'") parser.add_argument( "--binary", help="Binary that has a configuration file for reading", metavar="BINARY") parser.add_argument("--testcase", help="File containing testcase", metavar="FILE") parser.add_argument( "--testcasequality", default=0, type=int, help= "Integer indicating test case quality (%(default)s is best and default)", metavar="VAL") parser.add_argument( "--testcasesize", type=int, help= "Integer indicating test case size (default is size of testcase data)", metavar="SIZE") # Options that affect how signatures are generated parser.add_argument( "--forcecrashaddr", action='store_true', help="Force including the crash address into the signature") parser.add_argument( "--forcecrashinst", action='store_true', help= "Force including the crash instruction into the signature (GDB only)") parser.add_argument( "--numframes", default=8, type=int, help= "How many frames to include into the signature (default: %(default)s)") parser.add_argument('rargs', nargs=argparse.REMAINDER) # process options opts = parser.parse_args(args=args) # In autosubmit mode, we try to open a configuration file for the binary specified # on the command line. It should contain the binary-specific settings for submitting. if opts.autosubmit: if not opts.rargs: parser.error( "Action --autosubmit requires test arguments to be specified") # Store the binary candidate only if --binary wasn't also specified if not opts.binary: opts.binary = opts.rargs[0] # We also need to check that (apart from the binary), there is only one file on the command line # (the testcase), if it hasn't been explicitely specified. testcase = opts.testcase testcaseidx = None if testcase is None: for idx, arg in enumerate(opts.rargs[1:]): if os.path.exists(arg): if testcase: parser.error( "Multiple potential testcases specified on command line. " "Must explicitly specify test using --testcase.") testcase = arg testcaseidx = idx # Either --autosubmit was specified, or someone specified --binary manually # Check that the binary actually exists if opts.binary and not os.path.exists(opts.binary): parser.error("Error: Specified binary does not exist: %s" % opts.binary) stdout = None stderr = None crashdata = None crashInfo = None args = None env = None metadata = {} if opts.search or opts.generate or opts.submit or opts.autosubmit: if opts.metadata: metadata.update(dict(kv.split('=', 1) for kv in opts.metadata)) if opts.autosubmit: # Try to automatically get arguments from the command line # If the testcase is not the last argument, leave it in the # command line arguments and replace it with a generic placeholder. if testcaseidx == len(opts.rargs[1:]) - 1: args = opts.rargs[1:-1] else: args = opts.rargs[1:] if testcaseidx is not None: args[testcaseidx] = "TESTFILE" else: if opts.args: args = [arg.replace('\\', '') for arg in opts.args] if opts.env: env = dict(kv.split('=', 1) for kv in opts.env) # Start without any ProgramConfiguration configuration = None # If we have a binary, try using that to create our ProgramConfiguration if opts.binary: configuration = ProgramConfiguration.fromBinary(opts.binary) if configuration: if env: configuration.addEnvironmentVariables(env) if args: configuration.addProgramArguments(args) if metadata: configuration.addMetadata(metadata) # If configuring through binary failed, try to manually create ProgramConfiguration from command line arguments if configuration is None: if opts.platform is None or opts.product is None or opts.os is None: parser.error( "Must specify/configure at least --platform, --product and --os" ) configuration = ProgramConfiguration(opts.product, opts.platform, opts.os, opts.product_version, env, args, metadata) if not opts.autosubmit: if opts.stderr is None and opts.crashdata is None: parser.error( "Must specify at least either --stderr or --crashdata file" ) if opts.stdout: with open(opts.stdout) as f: stdout = f.read() if opts.stderr: with open(opts.stderr) as f: stderr = f.read() if opts.crashdata: with open(opts.crashdata) as f: crashdata = f.read() crashInfo = CrashInfo.fromRawCrashData(stdout, stderr, configuration, auxCrashData=crashdata) if opts.testcase: (testCaseData, isBinary) = Collector.read_testcase(opts.testcase) if not isBinary: crashInfo.testcase = testCaseData serverauthtoken = None if opts.serverauthtokenfile: with open(opts.serverauthtokenfile) as f: serverauthtoken = f.read().rstrip() collector = Collector(opts.sigdir, opts.serverhost, opts.serverport, opts.serverproto, serverauthtoken, opts.clientid, opts.tool) if opts.refresh: collector.refresh() return 0 if opts.submit: testcase = opts.testcase collector.submit(crashInfo, testcase, opts.testcasequality, opts.testcasesize, metadata) return 0 if opts.search: (sig, metadata) = collector.search(crashInfo) if sig is None: print("No match found", file=sys.stderr) return 3 print(sig) if metadata: print(json.dumps(metadata, indent=4)) return 0 if opts.generate: sigFile = collector.generate(crashInfo, opts.forcecrashaddr, opts.forcecrashinst, opts.numframes) if not sigFile: print( "Failed to generate a signature for the given crash information.", file=sys.stderr) return 1 print(sigFile) return 0 if opts.autosubmit: runner = AutoRunner.fromBinaryArgs(opts.rargs[0], opts.rargs[1:]) if runner.run(): crashInfo = runner.getCrashInfo(configuration) collector.submit(crashInfo, testcase, opts.testcasequality, opts.testcasesize, metadata) else: print("Error: Failed to reproduce the given crash, cannot submit.", file=sys.stderr) return 1 if opts.download: (retFile, retJSON) = collector.download(opts.download) if not retFile: print("Specified crash entry does not have a testcase", file=sys.stderr) return 1 if "args" in retJSON and retJSON["args"]: args = json.loads(retJSON["args"]) print("Command line arguments: %s" % " ".join(args)) print("") if "env" in retJSON and retJSON["env"]: env = json.loads(retJSON["env"]) print("Environment variables: %s", " ".join("%s = %s" % (k, v) for (k, v) in env.items())) print("") if "metadata" in retJSON and retJSON["metadata"]: metadata = json.loads(retJSON["metadata"]) print("== Metadata ==") for k, v in metadata.items(): print("%s = %s" % (k, v)) print("") print(retFile) return 0 if opts.download_all: downloaded = False for result in collector.download_all(opts.download_all): downloaded = True print(result) if not downloaded: print("Specified signature does not have any testcases", file=sys.stderr) return 1 return 0 if opts.get_clientid: print(collector.clientId) return 0
def compareLevel(jsEngine, flags, infilename, logPrefix, options, showDetailedDiffs, quickMode): # options dict must be one we can pass to jsInteresting.ShellResult # we also use it directly for knownPath, timeout, and collector # Return: (lev, crashInfo) or (jsInteresting.JS_FINE, None) combos = shellFlags.basicFlagSets(jsEngine) if quickMode: # Only used during initial fuzzing. Allowed to have false negatives. combos = [combos[0]] if len(flags): combos.append(flags) commands = [[jsEngine] + combo + [infilename] for combo in combos] for i in range(0, len(commands)): prefix = logPrefix + "-r" + str(i) command = commands[i] r = jsInteresting.ShellResult(options, command, prefix, True) oom = jsInteresting.oomed(r.err) r.err = ignoreSomeOfStderr(r.err) if (r.rc == 1 or r.rc == 2) and (anyLineContains(r.out, '[[script] scriptArgs*]') or anyLineContains(r.err, '[scriptfile] [scriptarg...]')): print "Got usage error from:" print " " + sps.shellify(command) assert i > 0 jsInteresting.deleteLogs(prefix) elif r.lev > jsInteresting.JS_OVERALL_MISMATCH: # would be more efficient to run lithium on one or the other, but meh print infilename + " | " + jsInteresting.summaryString(r.issues + ["compareJIT found a more serious bug"], r.lev, r.runinfo.elapsedtime) with open(logPrefix + "-summary.txt", 'wb') as f: f.write('\n'.join(r.issues + [sps.shellify(command), "compareJIT found a more serious bug"]) + '\n') print " " + sps.shellify(command) return (r.lev, r.crashInfo) elif r.lev != jsInteresting.JS_FINE or r.rc != 0: print infilename + " | " + jsInteresting.summaryString(r.issues + ["compareJIT is not comparing output, because the shell exited strangely"], r.lev, r.runinfo.elapsedtime) print " " + sps.shellify(command) jsInteresting.deleteLogs(prefix) if i == 0: return (jsInteresting.JS_FINE, None) elif oom: # If the shell or python hit a memory limit, we consider the rest of the computation # "tainted" for the purpose of correctness comparison. message = "compareJIT is not comparing output: OOM" print infilename + " | " + jsInteresting.summaryString(r.issues + [message], r.lev, r.runinfo.elapsedtime) jsInteresting.deleteLogs(prefix) if i == 0: return (jsInteresting.JS_FINE, None) elif i == 0: # Stash output from this run (the first one), so for subsequent runs, we can compare against it. (r0, prefix0) = (r, prefix) else: # Compare the output of this run (r.out) to the output of the first run (r0.out), etc. def fpuOptionDisabledAsmOnOneSide(fpuAsmMsg): fpuOptionDisabledAsm = fpuAsmMsg in r0.err or fpuAsmMsg in r.err fpuOptionDiffers = (("--no-fpu" in commands[0]) != ("--no-fpu" in command)) return fpuOptionDisabledAsm and fpuOptionDiffers def optionDisabledAsmOnOneSide(): asmMsg = "asm.js type error: Disabled by javascript.options.asmjs" optionDisabledAsm = anyLineContains(r0.err, asmMsg) or anyLineContains(r.err, asmMsg) optionDiffers = (("--no-asmjs" in commands[0]) != ("--no-asmjs" in command)) return optionDisabledAsm and optionDiffers mismatchErr = (r.err != r0.err and # --no-fpu (on debug x86_32 only) turns off asm.js compilation, among other things. # This should only affect asm.js diagnostics on stderr. not fpuOptionDisabledAsmOnOneSide("asm.js type error: Disabled by lack of floating point support") and # And also wasm stuff. See bug 1243031. not fpuOptionDisabledAsmOnOneSide("WebAssembly is not supported on the current device") and not optionDisabledAsmOnOneSide()) mismatchOut = (r.out != r0.out) if mismatchErr or mismatchOut: # Generate a short summary for stdout and a long summary for a "*-summary.txt" file. rerunCommand = sps.shellify(['~/funfuzz/js/compareJIT.py', "--flags="+' '.join(flags), "--timeout="+str(options.timeout), options.knownPath, jsEngine, os.path.basename(infilename)]) (summary, issues) = summarizeMismatch(mismatchErr, mismatchOut, prefix0, prefix) summary = " " + sps.shellify(commands[0]) + "\n " + sps.shellify(command) + "\n\n" + summary with open(logPrefix + "-summary.txt", 'wb') as f: f.write(rerunCommand + "\n\n" + summary) print infilename + " | " + jsInteresting.summaryString(issues, jsInteresting.JS_OVERALL_MISMATCH, r.runinfo.elapsedtime) if quickMode: print rerunCommand if showDetailedDiffs: print summary print "" # Create a crashInfo object with empty stdout, and stderr showing diffs pc = ProgramConfiguration.fromBinary(jsEngine) pc.addProgramArguments(flags) crashInfo = CrashInfo.CrashInfo.fromRawCrashData([], summary, pc) return (jsInteresting.JS_OVERALL_MISMATCH, crashInfo) else: # print "compareJIT: match" jsInteresting.deleteLogs(prefix) # All matched :) jsInteresting.deleteLogs(prefix0) return (jsInteresting.JS_FINE, None)
def scan_crashes(base_dir, cmdline_path=None, env_path=None, tool_name=None, test_path=None, firefox=None, firefox_prefs=None, firefox_extensions=None, firefox_testpath=None): ''' Scan the base directory for crash tests and submit them to FuzzManager. @type base_dir: String @param base_dir: AFL base directory @type cmdline_path: String @param cmdline_path: Optional command line file to use instead of the one found inside the base directory. @type env_path: String @param env_path: Optional file containing environment variables. @type test_path: String @param test_path: Optional filename where to copy the test before attempting to reproduce a crash. @rtype: int @return: Non-zero return code on failure ''' crash_dir = os.path.join(base_dir, "crashes") crash_files = [] for crash_file in os.listdir(crash_dir): # Ignore all files that aren't crash results if not crash_file.startswith("id:"): continue crash_file = os.path.join(crash_dir, crash_file) # Ignore our own status files if crash_file.endswith(".submitted") or crash_file.endswith(".failed"): continue # Ignore files we already processed if os.path.exists(crash_file + ".submitted") or os.path.exists(crash_file + ".failed"): continue crash_files.append(crash_file) if crash_files: # First try to read necessary information for reproducing crashes base_env = {} test_in_env = None if env_path: with open(env_path, 'r') as env_file: for line in env_file: (name,val) = line.rstrip('\n').split("=", 1) base_env[name] = val if '@@' in val: test_in_env = name if not cmdline_path: cmdline_path = os.path.join(base_dir, "cmdline") test_idx, cmdline = command_file_to_list(cmdline_path) if test_idx is not None: orig_test_arg = cmdline[test_idx] configuration = ProgramConfiguration.fromBinary(cmdline[0]) if not configuration: print("Error: Creating program configuration from binary failed. Check your binary configuration file.", file=sys.stderr) return 2 collector = Collector(tool=tool_name) if firefox: (ffpInst, ffCmd, ffEnv) = setup_firefox(cmdline[0], firefox_prefs, firefox_extensions, firefox_testpath) cmdline = ffCmd base_env.update(ffEnv) for crash_file in crash_files: stdin = None env = None if base_env: env = dict(base_env) if test_idx is not None: cmdline[test_idx] = orig_test_arg.replace('@@', crash_file) elif test_in_env is not None: env[test_in_env] = env[test_in_env].replace('@@', crash_file) elif test_path is not None: shutil.copy(crash_file, test_path) else: with open(crash_file, 'r') as crash_fd: stdin = crash_fd.read() print("Processing crash file %s" % crash_file, file=sys.stderr) runner = AutoRunner.fromBinaryArgs(cmdline[0], cmdline[1:], env=env, stdin=stdin) if runner.run(): crash_info = runner.getCrashInfo(configuration) collector.submit(crash_info, crash_file) open(crash_file + ".submitted", 'a').close() print("Success: Submitted crash to server.", file=sys.stderr) else: open(crash_file + ".failed", 'a').close() print("Error: Failed to reproduce the given crash, cannot submit.", file=sys.stderr) if firefox: ffpInst.clean_up()
def compareLevel(jsEngine, flags, infilename, logPrefix, options, showDetailedDiffs, quickMode): # pylint: disable=invalid-name,missing-docstring,missing-return-doc,missing-return-type-doc,too-complex # pylint: disable=too-many-branches,too-many-arguments,too-many-locals # options dict must be one we can pass to js_interesting.ShellResult # we also use it directly for knownPath, timeout, and collector # Return: (lev, crashInfo) or (js_interesting.JS_FINE, None) combos = shell_flags.basicFlagSets(jsEngine) if quickMode: # Only used during initial fuzzing. Allowed to have false negatives. combos = [combos[0]] if flags: combos.append(flags) commands = [[jsEngine] + combo + [infilename] for combo in combos] for i in range(0, len(commands)): prefix = logPrefix + "-r" + str(i) command = commands[i] r = js_interesting.ShellResult(options, command, prefix, True) # pylint: disable=invalid-name oom = js_interesting.oomed(r.err) r.err = ignoreSomeOfStderr(r.err) if (r.return_code == 1 or r.return_code == 2) and ( anyLineContains(r.out, '[[script] scriptArgs*]') or (anyLineContains(r.err, '[scriptfile] [scriptarg...]'))): print("Got usage error from:") print(" %s" % sps.shellify(command)) assert i js_interesting.deleteLogs(prefix) elif r.lev > js_interesting.JS_OVERALL_MISMATCH: # would be more efficient to run lithium on one or the other, but meh print("%s | %s" % (infilename, js_interesting.summaryString( r.issues + ["compare_jit found a more serious bug"], r.lev, r.runinfo.elapsedtime))) with open(logPrefix + "-summary.txt", 'wb') as f: f.write('\n'.join(r.issues + [ sps.shellify(command), "compare_jit found a more serious bug" ]) + '\n') print(" %s" % sps.shellify(command)) return (r.lev, r.crashInfo) elif r.lev != js_interesting.JS_FINE or r.return_code != 0: print("%s | %s" % ( infilename, js_interesting.summaryString( r.issues + [ "compare_jit is not comparing output, because the shell exited strangely" ], r.lev, r.runinfo.elapsedtime))) print(" %s" % sps.shellify(command)) js_interesting.deleteLogs(prefix) if not i: return (js_interesting.JS_FINE, None) elif oom: # If the shell or python hit a memory limit, we consider the rest of the computation # "tainted" for the purpose of correctness comparison. message = "compare_jit is not comparing output: OOM" print("%s | %s" % (infilename, js_interesting.summaryString(r.issues + [message], r.lev, r.runinfo.elapsedtime))) js_interesting.deleteLogs(prefix) if not i: return (js_interesting.JS_FINE, None) elif not i: # Stash output from this run (the first one), so for subsequent runs, we can compare against it. (r0, prefix0) = (r, prefix) # pylint: disable=invalid-name else: # Compare the output of this run (r.out) to the output of the first run (r0.out), etc. def fpuOptionDisabledAsmOnOneSide(fpuAsmMsg): # pylint: disable=invalid-name,missing-docstring # pylint: disable=missing-return-doc,missing-return-type-doc # pylint: disable=invalid-name fpuOptionDisabledAsm = fpuAsmMsg in r0.err or fpuAsmMsg in r.err # pylint: disable=cell-var-from-loop # pylint: disable=invalid-name # pylint: disable=cell-var-from-loop fpuOptionDiffers = (("--no-fpu" in commands[0]) != ("--no-fpu" in command)) return fpuOptionDisabledAsm and fpuOptionDiffers def optionDisabledAsmOnOneSide(): # pylint: disable=invalid-name,missing-docstring,missing-return-doc # pylint: disable=missing-return-type-doc asmMsg = "asm.js type error: Disabled by javascript.options.asmjs" # pylint: disable=invalid-name # pylint: disable=invalid-name # pylint: disable=cell-var-from-loop optionDisabledAsm = anyLineContains( r0.err, asmMsg) or anyLineContains(r.err, asmMsg) # pylint: disable=invalid-name optionDiffers = (("--no-asmjs" in commands[0]) != ("--no-asmjs" in command)) return optionDisabledAsm and optionDiffers mismatchErr = ( r.err != r0.err and # pylint: disable=invalid-name # --no-fpu (on debug x86_32 only) turns off asm.js compilation, among other things. # This should only affect asm.js diagnostics on stderr. not fpuOptionDisabledAsmOnOneSide( "asm.js type error: " "Disabled by lack of floating point support") and # And also wasm stuff. See bug 1243031. not fpuOptionDisabledAsmOnOneSide( "WebAssembly is not supported on the current device") and not optionDisabledAsmOnOneSide()) mismatchOut = (r.out != r0.out) # pylint: disable=invalid-name if mismatchErr or mismatchOut: # Generate a short summary for stdout and a long summary for a "*-summary.txt" file. # pylint: disable=invalid-name rerunCommand = sps.shellify([ "python -m funfuzz.js.compare_jit", "--flags=" + " ".join(flags), "--timeout=" + str(options.timeout), options.knownPath, jsEngine, os.path.basename(infilename) ]) (summary, issues) = summarizeMismatch(mismatchErr, mismatchOut, prefix0, prefix) summary = " " + sps.shellify( commands[0]) + "\n " + sps.shellify( command) + "\n\n" + summary with open(logPrefix + "-summary.txt", 'wb') as f: f.write(rerunCommand + "\n\n" + summary) print("%s | %s" % (infilename, js_interesting.summaryString( issues, js_interesting.JS_OVERALL_MISMATCH, r.runinfo.elapsedtime))) if quickMode: print(rerunCommand) if showDetailedDiffs: print(summary) print() # Create a crashInfo object with empty stdout, and stderr showing diffs pc = ProgramConfiguration.fromBinary(jsEngine) # pylint: disable=invalid-name pc.addProgramArguments(flags) # pylint: disable=invalid-name crashInfo = CrashInfo.CrashInfo.fromRawCrashData([], summary, pc) # pylint: disable=invalid-name return (js_interesting.JS_OVERALL_MISMATCH, crashInfo) else: # print "compare_jit: match" js_interesting.deleteLogs(prefix) # All matched :) js_interesting.deleteLogs(prefix0) return (js_interesting.JS_FINE, None)
def test_collector_submit(live_server, tmpdir, fm_user, monkeypatch): '''Test crash submission''' monkeypatch.setattr( os.path, 'expanduser', lambda path: tmpdir.strpath) # ensure fuzzmanager config is not used monkeypatch.setattr(time, 'sleep', lambda t: None) # create a collector url = urlsplit(live_server.url) collector = Collector(sigCacheDir=tmpdir.mkdir('sigcache').strpath, serverHost=url.hostname, serverPort=url.port, serverProtocol=url.scheme, serverAuthToken=fm_user.token, clientId='test-fuzzer1', tool='test-tool') testcase_path = tmpdir.mkdir('testcase').join('testcase.js').strpath with open(testcase_path, 'wb') as testcase_fp: testcase_fp.write(exampleTestCase) config = ProgramConfiguration('mozilla-central', 'x86-64', 'linux', version='ba0bc4f26681') crashInfo = CrashInfo.fromRawCrashData([], asanTraceCrash.splitlines(), config) # submit a crash to test server using collector result = collector.submit(crashInfo, testcase_path) # see that the issue was created in the server entry = CrashEntry.objects.get(pk=result['id']) assert entry.rawStdout == '' assert entry.rawStderr == asanTraceCrash assert entry.rawCrashData == '' assert entry.tool.name == 'test-tool' assert entry.client.name == 'test-fuzzer1' assert entry.product.name == config.product assert entry.product.version == config.version assert entry.platform.name == config.platform assert entry.os.name == config.os assert entry.testcase.quality == 0 assert not entry.testcase.isBinary assert entry.testcase.size == len(exampleTestCase) with open(entry.testcase.test.path, 'rb') as testcase_fp: assert testcase_fp.read() == exampleTestCase assert entry.metadata == '' assert entry.env == '' assert entry.args == '' # create a test config with open(tmpdir.join('.fuzzmanagerconf').strpath, 'w') as fp: fp.write('[Main]\n') fp.write('serverhost = %s\n' % url.hostname) fp.write('serverport = %d\n' % url.port) fp.write('serverproto = %s\n' % url.scheme) fp.write('serverauthtoken = %s\n' % fm_user.token) # try a binary testcase via cmd line testcase_path = tmpdir.join('testcase.bin').strpath with open(testcase_path, 'wb') as testcase_fp: testcase_fp.write(b'\0') stdout = tmpdir.join('stdout.txt').strpath with open(stdout, 'w') as fp: fp.write('stdout data') stderr = tmpdir.join('stderr.txt').strpath with open(stderr, 'w') as fp: fp.write('stderr data') crashdata = tmpdir.join('crashdata.txt').strpath with open(crashdata, 'w') as fp: fp.write(asanTraceCrash) result = main([ '--submit', '--tool', 'tool2', '--product', 'mozilla-inbound', '--productversion', '12345', '--os', 'minix', '--platform', 'pdp11', '--env', 'PATH=/home/ken', 'LD_PRELOAD=hack.so', '--metadata', 'var1=val1', 'var2=val2', '--args', './myprog', '--testcase', testcase_path, '--testcasequality', '5', '--stdout', stdout, '--stderr', stderr, '--crashdata', crashdata, ]) assert result == 0 entry = CrashEntry.objects.get( pk__gt=entry.id ) # newer than the last result, will fail if the test db is active assert entry.rawStdout == 'stdout data' assert entry.rawStderr == 'stderr data' assert entry.rawCrashData == asanTraceCrash assert entry.tool.name == 'tool2' assert entry.client.name == platform.node() assert entry.product.name == 'mozilla-inbound' assert entry.product.version == '12345' assert entry.platform.name == 'pdp11' assert entry.os.name == 'minix' assert entry.testcase.quality == 5 assert entry.testcase.isBinary assert entry.testcase.size == 1 with open(entry.testcase.test.path, 'rb') as testcase_fp: assert testcase_fp.read() == b'\0' assert json.loads(entry.metadata) == {'var1': 'val1', 'var2': 'val2'} assert json.loads(entry.env) == { 'PATH': '/home/ken', 'LD_PRELOAD': 'hack.so' } assert json.loads(entry.args) == ['./myprog'] class response_t(object): status_code = 500 text = "Error" def mypost(_session, _url, _data, headers=None): return response_t() monkeypatch.setattr(time, 'sleep', lambda t: None) monkeypatch.setattr(requests.Session, 'post', mypost) with pytest.raises(RuntimeError, match='Server unexpectedly responded'): collector.submit(crashInfo, testcase_path)
def write_aggregated_stats(base_dirs, outfile, cmdline_path=None): ''' Generate aggregated statistics from the given base directories and write them to the specified output file. @type base_dirs: list @param base_dirs: List of AFL base directories @type outfile: str @param outfile: Output file for aggregated statistics @type cmdline_path: String @param cmdline_path: Optional command line file to use instead of the one found inside the base directory. ''' # Which fields to add wanted_fields_total = [ 'execs_done', 'execs_per_sec', 'pending_favs', 'pending_total', 'variable_paths', 'unique_crashes', 'unique_hangs'] # Which fields to aggregate by mean wanted_fields_mean = ['exec_timeout'] # Which fields should be displayed per fuzzer instance wanted_fields_all = ['cycles_done', 'bitmap_cvg'] # Which fields should be aggregated by max wanted_fields_max = ['last_path'] # Warnings to include warnings = list() aggregated_stats = {} for field in wanted_fields_total: aggregated_stats[field] = 0 for field in wanted_fields_mean: aggregated_stats[field] = (0, 0) for field in wanted_fields_all: aggregated_stats[field] = [] def convert_num(num): if '.' in num: return float(num) return int(num) for base_dir in base_dirs: stats_path = os.path.join(base_dir, "fuzzer_stats") if not cmdline_path: cmdline_path = os.path.join(base_dir, "cmdline") if os.path.exists(stats_path): with open(stats_path, 'r') as stats_file: stats = stats_file.read() for line in stats.splitlines(): (field_name, field_val) = line.split(':', 1) field_name = field_name.strip() field_val = field_val.strip() if field_name in wanted_fields_total: aggregated_stats[field_name] += convert_num(field_val) elif field_name in wanted_fields_mean: (val, cnt) = aggregated_stats[field_name] aggregated_stats[field_name] = (val + convert_num(field_val), cnt + 1) elif field_name in wanted_fields_all: aggregated_stats[field_name].append(field_val) elif field_name in wanted_fields_max: num_val = convert_num(field_val) if (field_name not in aggregated_stats) or aggregated_stats[field_name] < num_val: aggregated_stats[field_name] = num_val # If we don't have any data here, then the fuzzers haven't written any statistics yet if not aggregated_stats: return # Mean conversion for field_name in wanted_fields_mean: (val, cnt) = aggregated_stats[field_name] if cnt: aggregated_stats[field_name] = float(val) / float(cnt) else: aggregated_stats[field_name] = val # Verify fuzzmanagerconf exists and can be parsed _, cmdline = command_file_to_list(cmdline_path) target_binary = cmdline[0] if cmdline else None if target_binary is not None: if not os.path.isfile("%s.fuzzmanagerconf" % target_binary): warnings.append("WARNING: Missing %s.fuzzmanagerconf\n" % target_binary) elif ProgramConfiguration.fromBinary(target_binary) is None: warnings.append("WARNING: Invalid %s.fuzzmanagerconf\n" % target_binary) # Look for unreported crashes failed_reports = 0 for base_dir in base_dirs: crashes_dir = os.path.join(base_dir, "crashes") if not os.path.isdir(crashes_dir): continue for crash_file in os.listdir(crashes_dir): if crash_file.endswith(".failed"): failed_reports += 1 if failed_reports: warnings.append("WARNING: Unreported crashes detected (%d)\n" % failed_reports) # Write out data fields = [] fields.extend(wanted_fields_total) fields.extend(wanted_fields_mean) fields.extend(wanted_fields_all) fields.extend(wanted_fields_max) max_keylen = max([len(x) for x in fields]) with InterProcessLock(outfile + ".lock"), open(outfile, 'w') as f: for field in fields: if field not in aggregated_stats: continue val = aggregated_stats[field] if isinstance(val, list): val = " ".join(val) f.write("%s%s: %s\n" % (field, " " * (max_keylen + 1 - len(field)), val)) for warning in warnings: f.write(warning) return