def runTest(self): # test that a forward slash assertion signature matches a backwards slash crash, but only on windows cfg_linux = ProgramConfiguration('test', 'x86-64', 'linux') cfg_windows = ProgramConfiguration('test', 'x86-64', 'windows') fs_lines = testAssertionPathFwSlashes.splitlines() bs_lines = testAssertionPathBwSlashes.splitlines() # native paths on linux use forward slash fs_linux = CrashInfo.fromRawCrashData([], [], cfg_linux, auxCrashData=fs_lines) # backward slash path on linux -- this is invalid and should never happen bs_linux = CrashInfo.fromRawCrashData([], [], cfg_linux, auxCrashData=bs_lines) # forward slashes on windows are valid, and this does happen fs_windows = CrashInfo.fromRawCrashData([], [], cfg_windows, auxCrashData=fs_lines) # native paths on windows use backslash bs_windows = CrashInfo.fromRawCrashData([], [], cfg_windows, auxCrashData=bs_lines) # test that signature generated from linux assertion matches both linux_sig = fs_linux.createCrashSignature() assert linux_sig.matches(fs_linux) assert not linux_sig.matches(bs_linux) # this is invalid and should not match assert linux_sig.matches(fs_windows) assert linux_sig.matches(bs_windows) # test that signature generated from windows assertion matches both windows_sig = bs_windows.createCrashSignature() assert windows_sig.matches(fs_linux) assert not windows_sig.matches(bs_linux) # this is invalid and should not match assert windows_sig.matches(fs_windows) assert windows_sig.matches(bs_windows)
def runTest(self): config = ProgramConfiguration("test", "x86", "linux") crashInfo1 = CrashInfo.fromRawCrashData([], [], config, auxCrashData=asanTraceCrash.splitlines()) crashInfo2 = CrashInfo.fromRawCrashData([], asanTraceUAF.splitlines(), config) self.assertIsInstance(crashInfo1, ASanCrashInfo) self.assertIsInstance(crashInfo2, ASanCrashInfo)
def runTest(self): config = ProgramConfiguration("test", "x86", "linux") crashInfoNeg = CrashInfo.fromRawCrashData([], [], config, auxCrashData=testTraceHeapWithCrashAddress.splitlines()) crashInfoPos = CrashInfo.fromRawCrashData([], [], config, auxCrashData=testTraceHeapWithoutCrashAddress.splitlines()) testSigEmptyCrashAddress = CrashSignature(testSignatureEmptyCrashAddress) self.assertTrue(testSigEmptyCrashAddress.matches(crashInfoPos)) self.assertFalse(testSigEmptyCrashAddress.matches(crashInfoNeg))
def runTest(self): config = ProgramConfiguration("test", "x86", "linux") crashInfo = CrashInfo.fromRawCrashData([], [], config, auxCrashData=testTrace1.splitlines()) testSig3 = CrashSignature(testSignature3) testSig4 = CrashSignature(testSignature4) testSig5 = CrashSignature(testSignature5) testSig6 = CrashSignature(testSignature6) self.assertFalse(testSig3.matchRequiresTest()) self.assertTrue(testSig4.matchRequiresTest()) self.assertTrue(testSig5.matchRequiresTest()) # Must not match without testcase provided self.assertFalse(testSig4.matches(crashInfo)) self.assertFalse(testSig5.matches(crashInfo)) self.assertFalse(testSig6.matches(crashInfo)) # Attach testcase crashInfo.testcase = testCase1 # Must match with testcase provided self.assertTrue(testSig4.matches(crashInfo)) self.assertTrue(testSig5.matches(crashInfo)) # This one does not match at all self.assertFalse(testSig6.matches(crashInfo))
def getCrashInfo(self, attachTestcase=False, requiredOutputSources=("stdout", "stderr", "crashdata")): # TODO: This should be cached at some level # TODO: Need to include environment and program arguments here configuration = ProgramConfiguration(self.product.name, self.platform.name, self.os.name, self.product.version) cachedCrashInfo = None if self.cachedCrashInfo: cachedCrashInfo = json.loads(self.cachedCrashInfo) # We can skip loading raw output fields from the database iff # 1) we know we don't need them for matching *and* # 2) we already have the crash data cached (rawStdout, rawStderr, rawCrashData) = (None, None, None) if cachedCrashInfo is None or "stdout" in requiredOutputSources: rawStdout = self.rawStdout if cachedCrashInfo is None or "stderr" in requiredOutputSources: rawStderr = self.rawStderr if cachedCrashInfo is None or "crashdata" in requiredOutputSources: rawCrashData = self.rawCrashData crashInfo = CrashInfo.fromRawCrashData(rawStdout, rawStderr, configuration, rawCrashData, cacheObject=cachedCrashInfo) if attachTestcase and self.testcase is not None and not self.testcase.isBinary: self.testcase.loadTest() crashInfo.testcase = self.testcase.content return crashInfo
def runTest(self): config = ProgramConfiguration("test", "x86-64", "linux") crashInfo = CrashInfo.fromRawCrashData([], [], config, auxCrashData=tsanSimpleRaceReport.splitlines()) testSignature = crashInfo.createCrashSignature() self.assertTrue(testSignature.matches(crashInfo)) outputSymptoms = [] for symptom in testSignature.symptoms: if isinstance(symptom, OutputSymptom): self.assertEqual(symptom.src, "crashdata") outputSymptoms.append(symptom) self.assertEqual(len(outputSymptoms), 3) for stringMatchVal in [ "WARNING: ThreadSanitizer: data race", "Write of size 4 at 0x[0-9a-fA-F]+ by thread T1:", "Previous read of size 4 at 0x[0-9a-fA-F]+ by main thread:" ]: found = False for symptom in outputSymptoms: if symptom.output.value == stringMatchVal: found = True self.assertTrue(found, msg="Couldn't find OutputSymptom with value '%s'" % stringMatchVal)
def runTest(self): config = ProgramConfiguration("test", "x86-64", "macosx") with open('apple-crash-report-example.txt', 'r') as f: crashData = f.read().splitlines() crashInfo = CrashInfo.fromRawCrashData([], [], config, crashData) self.assertEqual(crashInfo.crashAddress, long(0x00007fff5f3fff98))
def runTest(self): config = ProgramConfiguration("test", "x86", "linux") with open('minidump-example.txt', 'r') as f: crashData = f.read().splitlines() crashInfo = CrashInfo.fromRawCrashData([], [], config, crashData) self.assertEqual(crashInfo.crashAddress, long(0x3e800006acb))
def runTest(self): config = ProgramConfiguration("test", "x86-64", "linux") crashInfoPos = CrashInfo.fromRawCrashData([], [], config, auxCrashData=testAsanFailedAlloc.splitlines()) testSig = crashInfoPos.createCrashSignature() self.assertIn("/AddressSanitizer failed to allocate", str(testSig)) self.assertTrue(testSig.matches(crashInfoPos)) self.assertTrue(isinstance(testSig.symptoms[1], StackFramesSymptom))
def runTest(self): config = ProgramConfiguration("test", "x86-64", "linux") crashInfoPos = CrashInfo.fromRawCrashData([], [], config, auxCrashData=testAsanLongTrace.splitlines()) # The test signature uses > 15 which was previously interpreted as 0x15 # while the test crash data has 16 frames. testSig = CrashSignature(testSignatureStackSize) self.assertTrue(testSig.matches(crashInfoPos))
def runTest(self): config = ProgramConfiguration("test", "x86-64", "linux") crashInfoPos = CrashInfo.fromRawCrashData([], [], config, auxCrashData=testAsanStackOverflow.splitlines()) testSig = crashInfoPos.createCrashSignature() # Check matches appropriately self.assertTrue(testSig.matches(crashInfoPos))
def runTest(self): config = ProgramConfiguration("test", "x86", "linux") crashInfo = CrashInfo.fromRawCrashData([], [], config, ubsanSampleTrace1.splitlines()) self.assertEqual(crashInfo.backtrace[0], "WelsDec::BsGetUe") self.assertEqual(crashInfo.backtrace[9], "_start") self.assertEqual(crashInfo.backtrace[11], "Lex< >")
def runTest(self): config = ProgramConfiguration("test", "x86-64", "windows") crashInfoPos = CrashInfo.fromRawCrashData([], [], config, auxCrashData=testAsanAccessViolation.splitlines()) testSig = crashInfoPos.createCrashSignature() self.assertIn("/ERROR: AddressSanitizer", str(testSig)) self.assertIn("access-violation", str(testSig)) self.assertTrue(isinstance(testSig.symptoms[1], StackFramesSymptom))
def runTest(self): config = ProgramConfiguration("test", "x86-64", "linux") crashInfoPos = CrashInfo.fromRawCrashData([], [], config, auxCrashData=testTraceNegativeSizeParam.splitlines()) testSig = crashInfoPos.createCrashSignature() self.assertIn("/ERROR: AddressSanitizer", str(testSig)) self.assertIn("negative-size-param", str(testSig)) self.assertTrue(isinstance(testSig.symptoms[1], StackFramesSymptom))
def create(self, attrs): ''' Create a CrashEntry instance based on the given dictionary of values received. We need to unflatten foreign relationships like product, platform, os and client and create the foreign objects on the fly if they don't exist in our database yet. ''' missing_keys = {'rawStdout', 'rawStderr', 'rawCrashData'} - set(attrs.keys()) if missing_keys: raise InvalidArgumentException({key: ["This field is required."] for key in missing_keys}) attrs['product'] = Product.objects.get_or_create(**attrs['product'])[0] attrs['platform'] = Platform.objects.get_or_create(**attrs['platform'])[0] attrs['os'] = OS.objects.get_or_create(**attrs['os'])[0] attrs['client'] = Client.objects.get_or_create(**attrs['client'])[0] attrs['tool'] = Tool.objects.get_or_create(**attrs['tool'])[0] # Parse the incoming data using the crash signature package from FTB configuration = ProgramConfiguration(attrs['product'].name, attrs['platform'].name, attrs['os'].name, attrs['product'].version) crashInfo = CrashInfo.fromRawCrashData(attrs['rawStdout'], attrs['rawStderr'], configuration, attrs['rawCrashData']) # Populate certain fields here from the CrashInfo object we just got if crashInfo.crashAddress is not None: attrs['crashAddress'] = '0x%x' % crashInfo.crashAddress attrs['shortSignature'] = crashInfo.createShortSignature() # If a testcase is supplied, create a testcase object and store it if 'test' in attrs['testcase']: testcase = attrs['testcase'] testcase_ext = attrs.pop('testcase_ext', None) testcase_quality = testcase.get('quality', 0) testcase_isbinary = testcase.get('isBinary', False) testcase = testcase['test'] if testcase_ext is None: raise RuntimeError("Must provide testcase extension when providing testcase") h = hashlib.new('sha1') if testcase_isbinary: testcase = base64.b64decode(testcase) h.update(testcase) else: h.update(repr(testcase).encode("utf-8")) dbobj = TestCase(quality=testcase_quality, isBinary=testcase_isbinary, size=len(testcase)) dbobj.test.save("%s.%s" % (h.hexdigest(), testcase_ext), ContentFile(testcase)) dbobj.save() attrs['testcase'] = dbobj else: attrs['testcase'] = None # Create our CrashEntry instance return super(CrashEntrySerializer, self).create(attrs)
def runTest(self): config = ProgramConfiguration("test", "x86-64", "linux") crashSignature1 = '{ "symptoms" : [ { "type" : "crashAddress", "address" : "< 0x1000" } ] }' crashSignature1Neg = '{ "symptoms" : [ { "type" : "crashAddress", "address" : "0x1000" } ] }' addressSig1 = CrashSignature(crashSignature1) addressSig1Neg = CrashSignature(crashSignature1Neg) crashInfo1 = CrashInfo.fromRawCrashData([], [], config, auxCrashData=gdbSampleTrace1.splitlines()) crashInfo3 = CrashInfo.fromRawCrashData([], [], config, auxCrashData=gdbSampleTrace3.splitlines()) self.assertIsInstance(crashInfo1, GDBCrashInfo) self.assert_(addressSig1.matches(crashInfo1)) self.assertFalse(addressSig1Neg.matches(crashInfo1)) # For crashInfo3, we don't have a crash address. Ensure we don't match self.assertFalse(addressSig1.matches(crashInfo3)) self.assertFalse(addressSig1Neg.matches(crashInfo3))
def runTest(self): config = ProgramConfiguration("test", "x86", "linux") crashInfo = CrashInfo.fromRawCrashData([], [], config, auxCrashData=testTrace1.splitlines()) testSig1 = CrashSignature(testSignaturePCREShort1) testSig2 = CrashSignature(testSignaturePCREShort2) self.assertTrue(testSig1.matches(crashInfo)) self.assertFalse(testSig2.matches(crashInfo))
def runTest(self): config = ProgramConfiguration("test", "x86-64", "linux") crashInfoPos = CrashInfo.fromRawCrashData([], [], config, auxCrashData=testTraceWithAuxMessage.splitlines()) crashInfoNeg = CrashInfo.fromRawCrashData([], [], config, auxCrashData=testTraceWithAuxAndAbortMessage.splitlines()) crashSignaturePos = crashInfoPos.createCrashSignature() crashSignatureNeg = crashInfoNeg.createCrashSignature() # Check that the first crash signature has ASan symptoms but # the second does not because it has a program abort message self.assertIn("/ERROR: AddressSanitizer", str(crashSignaturePos)) self.assertIn("/READ of size", str(crashSignaturePos)) self.assertNotIn("/ERROR: AddressSanitizer", str(crashSignatureNeg)) self.assertNotIn("/READ of size", str(crashSignatureNeg)) # Check matches appropriately self.assertTrue(crashSignaturePos.matches(crashInfoPos)) self.assertTrue(crashSignaturePos.matches(crashInfoNeg)) self.assertFalse(crashSignatureNeg.matches(crashInfoPos)) self.assertTrue(crashSignatureNeg.matches(crashInfoNeg))
def getCrashInfo(self, attachTestcase=False): # TODO: This should be cached at some level # TODO: Need to include environment and program arguments here configuration = ProgramConfiguration(self.product.name, self.platform.name, self.os.name, self.product.version) crashInfo = CrashInfo.fromRawCrashData(self.rawStdout, self.rawStderr, configuration, self.rawCrashData) if attachTestcase and self.testcase != None and not self.testcase.isBinary: self.testcase.loadTest() crashInfo.testcase = self.testcase.content return crashInfo
def runTest(self): config = ProgramConfiguration("test", "x86", "linux") crashInfo = CrashInfo.fromRawCrashData([], [], config, auxCrashData=testTrace2.splitlines()) testSig = crashInfo.createCrashSignature() # Ensure that the last frame with a symbol is at the right place and there is nothing else, # espcially no wildcard, following afterwards. self.assertTrue(isinstance(testSig.symptoms[0], StackFramesSymptom)) self.assertEqual(str(testSig.symptoms[0].functionNames[6]), "js::jit::CheckOverRecursedWithExtra") self.assertEqual(len(testSig.symptoms[0].functionNames), 7)
def runTest(self): config = ProgramConfiguration("test", "x86-64", "linux") crashSignature1 = '{ "symptoms" : [ { "type" : "instruction", "registerNames" : ["r14"] } ] }' crashSignature1Neg = '{ "symptoms" : [ { "type" : "instruction", "registerNames" : ["r14", "rax"] } ] }' crashSignature2 = '{ "symptoms" : [ { "type" : "instruction", "instructionName" : "mov" } ] }' crashSignature2Neg = '{ "symptoms" : [ { "type" : "instruction", "instructionName" : "cmp" } ] }' crashSignature3 = '{ "symptoms" : [ { "type" : "instruction", "instructionName" : "mov", "registerNames" : ["r14", "rbx"] } ] }' crashSignature3Neg = '{ "symptoms" : [ { "type" : "instruction", "instructionName" : "mov", "registerNames" : ["r14", "rax"] } ] }' instructionSig1 = CrashSignature(crashSignature1) instructionSig1Neg = CrashSignature(crashSignature1Neg) instructionSig2 = CrashSignature(crashSignature2) instructionSig2Neg = CrashSignature(crashSignature2Neg) instructionSig3 = CrashSignature(crashSignature3) instructionSig3Neg = CrashSignature(crashSignature3Neg) crashInfo2 = CrashInfo.fromRawCrashData([], [], config, auxCrashData=gdbSampleTrace2.splitlines()) crashInfo3 = CrashInfo.fromRawCrashData([], [], config, auxCrashData=gdbSampleTrace3.splitlines()) self.assertIsInstance(crashInfo2, GDBCrashInfo) self.assertIsInstance(crashInfo3, GDBCrashInfo) self.assert_(instructionSig1.matches(crashInfo2)) self.assertFalse(instructionSig1Neg.matches(crashInfo2)) self.assert_(instructionSig2.matches(crashInfo2)) self.assertFalse(instructionSig2Neg.matches(crashInfo2)) self.assert_(instructionSig3.matches(crashInfo2)) self.assertFalse(instructionSig3Neg.matches(crashInfo2)) # Crash info3 doesn't have register information, ensure we don't match any self.assertFalse(instructionSig1.matches(crashInfo3)) self.assertFalse(instructionSig2.matches(crashInfo3)) self.assertFalse(instructionSig3.matches(crashInfo3))
def runTest(self): config = ProgramConfiguration("test", "x86-64", "linux") crashInfo = CrashInfo.fromRawCrashData([], [], config, auxCrashData=tsanSimpleLeakReport.splitlines()) testSignature = crashInfo.createCrashSignature() self.assertTrue(testSignature.matches(crashInfo)) found = False for symptom in testSignature.symptoms: if isinstance(symptom, OutputSymptom): self.assertEqual(symptom.src, "crashdata") self.assertEqual(symptom.output.value, "WARNING: ThreadSanitizer: thread leak") found = True self.assertTrue(found, msg="Expected correct OutputSymptom in signature")
def runTest(self): config = ProgramConfiguration("test", "x86-64", "linux") crashSignature1 = '{ "symptoms" : [ { "type" : "output", "value" : "test" } ] }' crashSignature1Neg = '{ "symptoms" : [ { "type" : "output", "src" : "stderr", "value" : "test" } ] }' crashSignature2 = '{ "symptoms" : [ { "type" : "output", "src" : "stderr", "value" : { "value" : "^fest$", "matchType" : "pcre" } } ] }' outputSignature1 = CrashSignature(crashSignature1) outputSignature1Neg = CrashSignature(crashSignature1Neg) outputSignature2 = CrashSignature(crashSignature2) gdbOutput = [] stdout = [] stderr = [] stdout.append("Foo") stdout.append("Bartester") stdout.append("Baz") stderr.append("hackfest") crashInfo = CrashInfo.fromRawCrashData(stdout, stderr, config, auxCrashData=gdbOutput) self.assertIsInstance(crashInfo, NoCrashInfo) # Ensure we match on stdout/err if nothing is specified self.assert_(outputSignature1.matches(crashInfo)) # Don't match stdout if stderr is specified self.assertFalse(outputSignature1Neg.matches(crashInfo)) # Check that we're really using PCRE self.assertFalse(outputSignature2.matches(crashInfo)) # Add something the PCRE should match, then retry stderr.append("fest") crashInfo = CrashInfo.fromRawCrashData(stdout, stderr, config, auxCrashData=gdbOutput) self.assert_(outputSignature2.matches(crashInfo))
def test_collector_generate_search(tmpdir): '''Test sigcache generation and search''' # create a cache dir cache_dir = tmpdir.mkdir('sigcache').strpath # create a collector collector = Collector(sigCacheDir=cache_dir) # generate a signature from the crash data config = ProgramConfiguration('mozilla-central', 'x86-64', 'linux', version='ba0bc4f26681') crashInfo = CrashInfo.fromRawCrashData([], asanTraceCrash.splitlines(), config) sig = collector.generate(crashInfo, False, False, 8) assert {f.strpath for f in tmpdir.join('sigcache').listdir()} == {sig} # search the sigcache and see that it matches the original sigMatch, meta = collector.search(crashInfo) assert sigMatch == sig assert meta is None # write metadata and make sure that's returned if it exists sigBase, _ = os.path.splitext(sig) with open(sigBase + '.metadata', 'w') as f: f.write('{}') sigMatch, meta = collector.search(crashInfo) assert sigMatch == sig assert meta == {} # make sure another crash doesn't match crashInfo = CrashInfo.fromRawCrashData([], [], config) sigMatch, meta = collector.search(crashInfo) assert sigMatch is None assert meta is None # returns None if sig generation fails result = collector.generate(crashInfo, True, True, 8) assert result is None
def add_fault(self): # Setup FuzzManager with target information and platform data. program_configuration = ProgramConfiguration.fromBinary(self.binary) # Prepare FuzzManager with crash information. stdout = "N/A" # Todo: There is no plain stdout logger yet. stderr = "N/A" # Todo: There is no plain stderr logger yet. auxdat = self.bucket.get("crashlog", "N/A").get("data", "N/A") metaData = None testcase = self.save_bucket_as_zip(self.bucket) crash_info = CrashInfo.fromRawCrashData(stdout, stderr, program_configuration, auxdat) # Submit crash report with testcase to FuzzManager. collector = Collector(tool="dharma") collector.submit(crash_info, testcase, metaData)
def runTest(self): config = ProgramConfiguration("test", "x86", "linux") crashInfo = CrashInfo.fromRawCrashData([], [], config, auxCrashData=testTrace1.splitlines()) testSig1 = CrashSignature(testSignatureStackFrames1) testSig2 = CrashSignature(testSignatureStackFrames2) testSig3 = CrashSignature(testSignatureStackFrames3) testSig4 = CrashSignature(testSignatureStackFrames4) testSig5 = CrashSignature(testSignatureStackFrames5) self.assertTrue(testSig1.matches(crashInfo)) self.assertTrue(testSig2.matches(crashInfo)) self.assertTrue(testSig3.matches(crashInfo)) self.assertTrue(testSig4.matches(crashInfo)) self.assertFalse(testSig5.matches(crashInfo))
def test_SignatureGenerationTSanLeakTest(): config = ProgramConfiguration("test", "x86-64", "linux") with open(os.path.join(CWD, 'resources', 'tsan-simple-leak-report.txt'), 'r') as f: crashInfo = CrashInfo.fromRawCrashData( [], [], config, auxCrashData=f.read().splitlines()) testSignature = crashInfo.createCrashSignature() assert testSignature.matches(crashInfo) found = False for symptom in testSignature.symptoms: if isinstance(symptom, OutputSymptom): assert symptom.src == "crashdata" assert symptom.output.value == "WARNING: ThreadSanitizer: thread leak" found = True assert found, "Expected correct OutputSymptom in signature"
def runTest(self): config = ProgramConfiguration("test", "x86-64", "linux") crashInfo = CrashInfo.fromRawCrashData( [], [], config, auxCrashData=tsanSimpleLeakReport.splitlines()) testSignature = crashInfo.createCrashSignature() self.assertTrue(testSignature.matches(crashInfo)) found = False for symptom in testSignature.symptoms: if isinstance(symptom, OutputSymptom): self.assertEqual(symptom.src, "crashdata") self.assertEqual(symptom.output.value, "WARNING: ThreadSanitizer: thread leak") found = True self.assertTrue(found, msg="Expected correct OutputSymptom in signature")
def runTest(self): collector = Collector(self.tmpCacheDir, serverHost='127.0.0.1', serverPort='8000', serverProtocol='http', serverUser=testAuthCreds[0], serverPass=testAuthCreds[1], clientId='test-fuzzer1') config = ProgramConfiguration("mozilla-central", "x86-64", "linux", version="ba0bc4f26681") crashInfo = CrashInfo.fromRawCrashData([], asanTraceCrash.splitlines(), config) # TODO: This is only a rudimentary check to see if we submitted *something*. # We should check more precisely that the information submitted is correct. issueCount = self.getRemoteCrashEntryCount() collector.submit(crashInfo, exampleTestCase) self.assertEqual(self.getRemoteCrashEntryCount(), issueCount + 1)
def runTest(self): config = ProgramConfiguration("test", "x86", "linux") crashInfo = CrashInfo.fromRawCrashData( [], [], config, auxCrashData=testTrace1.splitlines()) testSig1 = CrashSignature(testSignatureStackFrames1) testSig2 = CrashSignature(testSignatureStackFrames2) testSig3 = CrashSignature(testSignatureStackFrames3) testSig4 = CrashSignature(testSignatureStackFrames4) testSig5 = CrashSignature(testSignatureStackFrames5) self.assertTrue(testSig1.matches(crashInfo)) self.assertTrue(testSig2.matches(crashInfo)) self.assertTrue(testSig3.matches(crashInfo)) self.assertTrue(testSig4.matches(crashInfo)) self.assertFalse(testSig5.matches(crashInfo))
def OnFault(self, run, test, variationCount, monitorData, actionValues): # Setup FuzzManager with information about target and platform data. program_configuration = ProgramConfiguration.fromBinary(self.target_binary) # Prepare FuzzManager with target and crash information. stdout = self._get_value_by_key(monitorData, "stdout.txt", "N/A") stderr = self._get_value_by_key(monitorData, "stderr.txt", "N/A") auxdat = self._get_value_by_key(monitorData, "auxdat.txt", "N/A") crash_info = CrashInfo.fromRawCrashData(stdout, stderr, program_configuration, auxdat) collector = Collector(tool="peach") # Write testcase content and any additional meta information to a temporary ZIP archive. buffer = StringIO.StringIO() zip_buffer = zipfile.ZipFile(buffer, 'w') # Collect |actionValues| crash information from Peach. for i in range(len(actionValues)): if len(actionValues[i]) > 2: data = actionValues[i][2] fileName = "data_%d_%s_%s.txt" % (i, actionValues[i][1], actionValues[i][0]) zip_buffer.writestr(fileName, data) if len(actionValues[i]) > 3 and actionValues[i][1] != 'output': data = repr(actionValues[i][3]) fileName = "data_%d_%s_%s_action.txt" % (i, actionValues[i][1], actionValues[i][0]) zip_buffer.writestr(fileName, data) if len(actionValues[i]) > 3 and actionValues[i][1] == 'output': fileName = "data_%d_%s_%s_fileName.txt" % (i, actionValues[i][1], actionValues[i][0]) data = actionValues[i][3] zip_buffer.writestr(fileName, data) # Collect |monitorData| crash information from Peach. for k, v in monitorData.items(): zip_buffer.writestr(k, v) zip_buffer.close() with tempfile.NamedTemporaryFile(delete=False, suffix='.zip') as testcase: buffer.seek(0) testcase.write(buffer.getvalue()) testcase.close() # Submit crash report with testcase to FuzzManager. collector.submit(crash_info, testcase.name, metaData=None)
def add_fault(self): # Setup FuzzManager with target information and platform data. program_configuration = ProgramConfiguration.fromBinary( self.binary) # Prepare FuzzManager with crash information. stdout = "N/A" # Todo: There is no plain stdout logger yet. stderr = "N/A" # Todo: There is no plain stderr logger yet. auxdat = self.bucket.get("crashlog", "N/A").get("data", "N/A") metaData = None testcase = self.save_bucket_as_zip(self.bucket) crash_info = CrashInfo.fromRawCrashData(stdout, stderr, program_configuration, auxdat) # Submit crash report with testcase to FuzzManager. collector = Collector(tool="dharma") collector.submit(crash_info, testcase, metaData)
def crash_info(self): """Create CrashInfo object from logs. Args: None Returns: CrashInfo: CrashInfo based on log data. """ if self._crash_info is None: assert self.path is not None # read in the log files and create a CrashInfo object if self._logs.aux is not None: with open(self._logs.aux, "rb") as log_fp: aux_data = ( log_fp.read().decode("utf-8", errors="ignore").splitlines() ) else: aux_data = None # create ProgramConfiguration that can be reported to a FM server if isfile("%s.fuzzmanagerconf" % (self._target_binary,)): # attempt to use "<target_binary>.fuzzmanagerconf" fm_cfg = ProgramConfiguration.fromBinary(self._target_binary) else: LOG.debug("'%s.fuzzmanagerconf' does not exist", self._target_binary) fm_cfg = None if fm_cfg is None: LOG.debug("creating ProgramConfiguration") cpu = machine().lower() fm_cfg = ProgramConfiguration( basename(self._target_binary), "x86_64" if cpu == "amd64" else cpu, system(), ) with open(self._logs.stderr, "rb") as err_fp, open( self._logs.stdout, "rb" ) as out_fp: self._crash_info = CrashInfo.fromRawCrashData( out_fp.read().decode("utf-8", errors="ignore").splitlines(), err_fp.read().decode("utf-8", errors="ignore").splitlines(), fm_cfg, auxCrashData=aux_data, ) return self._crash_info
def crash_info(self, target_binary): """Create CrashInfo object from logs. Args: target_binary (str): Binary file being tested. Returns: CrashInfo: CrashInfo based on Result log data. """ if self._crash_info is None: # read in the log files and create a CrashInfo object aux_data = None if self.log_aux is not None: with open(os.path.join(self.path, self.log_aux), "rb") as log_fp: aux_data = log_fp.read().decode( "utf-8", errors="ignore").splitlines() stderr_file = os.path.join(self.path, self.log_err) stdout_file = os.path.join(self.path, self.log_out) # create ProgramConfiguration that can be reported to a FM server if os.path.isfile("%s.fuzzmanagerconf" % (target_binary, )): # attempt to use "<target_binary>.fuzzmanagerconf" fm_cfg = ProgramConfiguration.fromBinary(target_binary) else: log.debug("'%s.fuzzmanagerconf' does not exist", target_binary) fm_cfg = None if fm_cfg is None: log.debug("creating ProgramConfiguration") cpu = platform.machine().lower() fm_cfg = ProgramConfiguration( os.path.basename(target_binary), "x86_64" if cpu == "amd64" else cpu, platform.system()) with open(stderr_file, "rb") as err_fp, open(stdout_file, "rb") as out_fp: self._crash_info = CrashInfo.fromRawCrashData( out_fp.read().decode("utf-8", errors="ignore").splitlines(), err_fp.read().decode("utf-8", errors="ignore").splitlines(), fm_cfg, auxCrashData=aux_data) return self._crash_info
def runTest(self): config = ProgramConfiguration("test", "x86", "linux") crashInfo = CrashInfo.fromRawCrashData([], [], config, auxCrashData=testTrace1.splitlines()) crashSig1 = crashInfo.createCrashSignature(forceCrashAddress=True, maxFrames=4, minimumSupportedVersion=10) crashSig2 = crashInfo.createCrashSignature(forceCrashAddress=False, maxFrames=3, minimumSupportedVersion=10) crashSig3 = crashInfo.createCrashSignature(forceCrashInstruction=True, maxFrames=2, minimumSupportedVersion=10) # Check that all generated signatures match their originating crashInfo self.assert_(crashSig1.matches(crashInfo)) self.assert_(crashSig2.matches(crashInfo)) self.assert_(crashSig3.matches(crashInfo)) # Check that the generated signatures look as expected self.assertEqual(json.loads(str(crashSig1)), json.loads(testSignature1)) self.assertEqual(json.loads(str(crashSig2)), json.loads(testSignature2)) # The third crashInfo misses 2 frames from the top 4 frames, so it will # also include the crash address, even though we did not request it. self.assertEqual(json.loads(str(crashSig3)), json.loads(testSignature3))
def getCrashInfo(self, attachTestcase=False): # TODO: This should be cached at some level # TODO: Need to include environment and program arguments here configuration = ProgramConfiguration(self.product.name, self.platform.name, self.os.name, self.product.version) cachedCrashInfo = None if self.cachedCrashInfo: cachedCrashInfo = json.loads(self.cachedCrashInfo) crashInfo = CrashInfo.fromRawCrashData(self.rawStdout, self.rawStderr, configuration, self.rawCrashData, cacheObject=cachedCrashInfo) if attachTestcase and self.testcase != None and not self.testcase.isBinary: self.testcase.loadTest() crashInfo.testcase = self.testcase.content return crashInfo
def runTest(self): config = ProgramConfiguration("test", "x86-64", "linux") crashSignature1 = '{ "symptoms" : [ { "type" : "stackFrame", "functionName" : "internalAppend" } ] }' crashSignature1Neg = '{ "symptoms" : [ { "type" : "stackFrame", "functionName" : "foobar" } ] }' crashSignature2 = '{ "symptoms" : [ { "type" : "stackFrame", "functionName" : "js::ion::MBasicBlock::setBackedge", "frameNumber" : "<= 4" } ] }' crashSignature2Neg = '{ "symptoms" : [ { "type" : "stackFrame", "functionName" : "js::ion::MBasicBlock::setBackedge", "frameNumber" : "> 4" } ] }' stackFrameSig1 = CrashSignature(crashSignature1) stackFrameSig1Neg = CrashSignature(crashSignature1Neg) stackFrameSig2 = CrashSignature(crashSignature2) stackFrameSig2Neg = CrashSignature(crashSignature2Neg) crashInfo1 = CrashInfo.fromRawCrashData([], [], config, auxCrashData=gdbSampleTrace1.splitlines()) self.assertIsInstance(crashInfo1, GDBCrashInfo) self.assert_(stackFrameSig1.matches(crashInfo1)) self.assertFalse(stackFrameSig1Neg.matches(crashInfo1)) self.assert_(stackFrameSig2.matches(crashInfo1)) self.assertFalse(stackFrameSig2Neg.matches(crashInfo1))
def runTest(self): config = ProgramConfiguration("test", "x86-64", "linux") crashSignature1 = '{ "symptoms" : [ { "type" : "stackSize", "size" : 8 } ] }' crashSignature1Neg = '{ "symptoms" : [ { "type" : "stackSize", "size" : 9 } ] }' crashSignature2 = '{ "symptoms" : [ { "type" : "stackSize", "size" : "< 10" } ] }' crashSignature2Neg = '{ "symptoms" : [ { "type" : "stackSize", "size" : "> 10" } ] }' stackSizeSig1 = CrashSignature(crashSignature1) stackSizeSig1Neg = CrashSignature(crashSignature1Neg) stackSizeSig2 = CrashSignature(crashSignature2) stackSizeSig2Neg = CrashSignature(crashSignature2Neg) crashInfo1 = CrashInfo.fromRawCrashData([], [], config, auxCrashData=gdbSampleTrace1.splitlines()) self.assertIsInstance(crashInfo1, GDBCrashInfo) self.assert_(stackSizeSig1.matches(crashInfo1)) self.assertFalse(stackSizeSig1Neg.matches(crashInfo1)) self.assert_(stackSizeSig2.matches(crashInfo1)) self.assertFalse(stackSizeSig2Neg.matches(crashInfo1))
def test_SignatureGenerationTSanRaceTestAtomic(): config = ProgramConfiguration("test", "x86-64", "linux") for fn in ['tsan-report-atomic.txt', 'tsan-report-atomic-swapped.txt']: with open(os.path.join(CWD, 'resources', fn), 'r') as f: crashInfo = CrashInfo.fromRawCrashData( [], [], config, auxCrashData=f.read().splitlines()) assert (crashInfo.backtrace[0] == "pthread_mutex_destroy") assert ( crashInfo.createShortSignature() == "ThreadSanitizer: data race [@ pthread_mutex_destroy] vs. [@ pthread_mutex_unlock]" ) testSignature = crashInfo.createCrashSignature() assert testSignature.matches(crashInfo) outputSymptoms = [] for symptom in testSignature.symptoms: if isinstance(symptom, OutputSymptom): assert symptom.src == "crashdata" outputSymptoms.append(symptom) assert len(outputSymptoms) == 3 for stringMatchVal in [ "WARNING: ThreadSanitizer: data race", "(Previous )?[Aa]tomic [Rr]ead of size 1 at 0x[0-9a-fA-F]+ by thread T[0-9]+( .+mutexes: .+)?:", "(Previous )?[Ww]rite of size 1 at 0x[0-9a-fA-F]+ by main thread( .+mutexes: .+)?:" ]: found = False for symptom in outputSymptoms: if symptom.output.value == stringMatchVal: found = True assert found, "Couldn't find OutputSymptom with value '%s'" % stringMatchVal
if __name__ == '__main__': cmd = ["simply-buggy/simple-crash"] result = subprocess.run(cmd, stderr=subprocess.PIPE, stdout=subprocess.PIPE) if __name__ == '__main__': stderr = result.stderr.decode().splitlines() stderr[0:3] if __name__ == '__main__': stdout = result.stdout.decode().splitlines() stdout if __name__ == '__main__': crashInfo = CrashInfo.fromRawCrashData(stdout, stderr, configuration) print(crashInfo) ### Collector if __name__ == '__main__': print('\n### Collector') if __name__ == '__main__': from Collector.Collector import Collector # type: ignore if __name__ == '__main__': collector = Collector() if __name__ == '__main__': collector.submit(crashInfo)
def main(argv=None): '''Command line options.''' program_name = os.path.basename(sys.argv[0]) program_version = "v%s" % __version__ program_build_date = "%s" % __updated__ program_version_string = '%%prog %s (%s)' % (program_version, program_build_date) if argv is None: argv = sys.argv[1:] # setup argparser parser = argparse.ArgumentParser(usage='%s [OPTIONS] --cmd <COMMAND AND ARGUMENTS>' % program_name) mainGroup = parser.add_argument_group(title="Main arguments", description=None) fmGroup = parser.add_argument_group(title="FuzzManager specific options", description="""Values for the options listed here are typically provided through FuzzManager configuration files, but can be overwritten using these options:""") mainGroup.add_argument('--version', action='version', version=program_version_string) mainGroup.add_argument('--cmd', dest='cmd', action='store_true', help="Command with parameters to run") mainGroup.add_argument('--env', dest='env', nargs='+', type=str, help="List of environment variables in the form 'KEY=VALUE'") # Settings fmGroup.add_argument("--sigdir", dest="sigdir", help="Signature cache directory", metavar="DIR") fmGroup.add_argument("--serverhost", dest="serverhost", help="Server hostname for remote signature management", metavar="HOST") fmGroup.add_argument("--serverport", dest="serverport", type=int, help="Server port to use", metavar="PORT") fmGroup.add_argument("--serverproto", dest="serverproto", help="Server protocol to use (default is https)", metavar="PROTO") fmGroup.add_argument("--serverauthtokenfile", dest="serverauthtokenfile", help="File containing the server authentication token", metavar="FILE") fmGroup.add_argument("--clientid", dest="clientid", help="Client ID to use when submitting issues", metavar="ID") fmGroup.add_argument("--platform", dest="platform", help="Platform this crash appeared on", metavar="(x86|x86-64|arm)") fmGroup.add_argument("--product", dest="product", help="Product this crash appeared on", metavar="PRODUCT") fmGroup.add_argument("--productversion", dest="product_version", help="Product version this crash appeared on", metavar="VERSION") fmGroup.add_argument("--os", dest="os", help="OS this crash appeared on", metavar="(windows|linux|macosx|b2g|android)") fmGroup.add_argument("--tool", dest="tool", help="Name of the tool that found this issue", metavar="NAME") fmGroup.add_argument('--metadata', dest='metadata', nargs='+', type=str, help="List of metadata variables in the form 'KEY=VALUE'") parser.add_argument('rargs', nargs=argparse.REMAINDER, help=argparse.SUPPRESS) if len(argv) == 0: parser.print_help() return 2 # process options opts = parser.parse_args(argv) if not opts.rargs: print("Error: No arguments specified", file=sys.stderr) return 2 binary = opts.rargs[0] if not os.path.exists(binary): print("Error: Specified binary does not exist: %s" % binary, file=sys.stderr) return 2 configuration = ProgramConfiguration.fromBinary(binary) if configuration == None: print("Error: Failed to load program configuration based on binary", file=sys.stderr) return 2 if opts.platform == None or opts.product == None or opts.os == None: print("Error: Must use binary configuration file or specify/configure at least --platform, --product and --os", file=sys.stderr) return 2 configuration = ProgramConfiguration(opts.product, opts.platform, opts.os, opts.product_version) env = {} if opts.env: env = dict(kv.split('=', 1) for kv in opts.env) configuration.addEnvironmentVariables(env) # Copy the system environment variables by default and overwrite them # if they are specified through env. env = dict(os.environ) if opts.env: oenv = dict(kv.split('=', 1) for kv in opts.env) configuration.addEnvironmentVariables(oenv) for envkey in oenv: env[envkey] = oenv[envkey] args = opts.rargs[1:] if args: configuration.addProgramArguments(args) metadata = {} if opts.metadata: metadata.update(dict(kv.split('=', 1) for kv in opts.metadata)) configuration.addMetadata(metadata) # Set LD_LIBRARY_PATH for convenience if not 'LD_LIBRARY_PATH' in env: env['LD_LIBRARY_PATH'] = os.path.dirname(binary) serverauthtoken = None if opts.serverauthtokenfile: with open(opts.serverauthtokenfile) as f: serverauthtoken = f.read().rstrip() collector = Collector(opts.sigdir, opts.serverhost, opts.serverport, opts.serverproto, serverauthtoken, opts.clientid, opts.tool) signature_repeat_count = 0 last_signature = None while(True): process = subprocess.Popen( opts.rargs, # stdout=None, stderr=subprocess.PIPE, env=env, universal_newlines=True ) monitor = LibFuzzerMonitor(process.stderr) monitor.start() monitor.join() print("Process terminated, processing results...", file=sys.stderr) trace = monitor.getASanTrace() testcase = monitor.getTestcase() crashInfo = CrashInfo.fromRawCrashData([], [], configuration, auxCrashData=trace) (sigfile, metadata) = collector.search(crashInfo) if sigfile != None: if last_signature == sigfile: signature_repeat_count += 1 else: last_signature = sigfile signature_repeat_count = 0 print("Crash matches signature %s, not submitting..." % sigfile, file=sys.stderr) else: collector.generate(crashInfo, forceCrashAddress=True, forceCrashInstruction=False, numFrames=8) collector.submit(crashInfo, testcase) print("Successfully submitted crash.", file=sys.stderr) if signature_repeat_count >= 10: print("Too many crashes with the same signature, exiting...", file=sys.stderr) break
def newSignature(request): if request.method == 'POST': # TODO: FIXME: Update bug here as well bucket = Bucket( signature=request.POST['signature'], shortDescription=request.POST['shortDescription'], frequent="frequent" in request.POST ) return __handleSignaturePost(request, bucket) elif request.method == 'GET': if 'crashid' in request.GET: crashEntry = get_object_or_404(CrashEntry, pk=request.GET['crashid']) configuration = ProgramConfiguration(crashEntry.product.name, crashEntry.platform.name, crashEntry.os.name, crashEntry.product.version) crashInfo = CrashInfo.fromRawCrashData(crashEntry.rawStdout, crashEntry.rawStderr, configuration, crashEntry.rawCrashData) maxStackFrames = 8 forceCrashInstruction = False forceCrashAddress = True errorMsg = None if 'stackframes' in request.GET: maxStackFrames = int(request.GET['stackframes']) if 'forcecrashaddress' in request.GET: forceCrashAddress = bool(int(request.GET['forcecrashaddress'])) if 'forcecrashinstruction' in request.GET: forceCrashInstruction = bool(int(request.GET['forcecrashinstruction'])) # First try to create the signature with the crash address included. # However, if that fails, try without forcing the crash signature. proposedSignature = crashInfo.createCrashSignature( forceCrashAddress=forceCrashAddress, forceCrashInstruction=forceCrashInstruction, maxFrames=maxStackFrames ) if (proposedSignature == None): errorMsg = crashInfo.failureReason proposedSignature = crashInfo.createCrashSignature(maxFrames=maxStackFrames) proposedSignature = str(proposedSignature) proposedShortDesc = crashInfo.createShortSignature() data = { 'new' : True, 'bucket' : { 'pk' : None, 'bug' : None, 'signature' : proposedSignature, 'shortDescription' : proposedShortDesc }, 'error_message' : errorMsg } else: data = { 'new' : True } else: raise SuspiciousOperation return render(request, 'signatures/edit.html', data)
def main(argv=None): '''Command line options.''' program_name = os.path.basename(sys.argv[0]) if argv is None: argv = sys.argv[1:] # setup argparser parser = argparse.ArgumentParser( usage= '%s --libfuzzer or --aflfuzz [OPTIONS] --cmd <COMMAND AND ARGUMENTS>' % program_name) mainGroup = parser.add_argument_group(title="Main Options", description=None) aflGroup = parser.add_argument_group( title="AFL Options", description="Use these arguments in AFL mode") libfGroup = parser.add_argument_group( title="Libfuzzer Options", description="Use these arguments in Libfuzzer mode") fmGroup = parser.add_argument_group( title="FuzzManager Options", description="Use these to specify FuzzManager parameters") mainGroup.add_argument("--libfuzzer", dest="libfuzzer", action='store_true', help="Enable LibFuzzer mode") mainGroup.add_argument("--aflfuzz", dest="aflfuzz", action='store_true', help="Enable AFL mode") mainGroup.add_argument("--fuzzmanager", dest="fuzzmanager", action='store_true', help="Use FuzzManager to submit crash results") libfGroup.add_argument( '--env', dest='env', nargs='+', type=str, help="List of environment variables in the form 'KEY=VALUE'") libfGroup.add_argument('--cmd', dest='cmd', action='store_true', help="Command with parameters to run") libfGroup.add_argument("--sigdir", dest="sigdir", help="Signature cache directory", metavar="DIR") fmGroup.add_argument( "--fuzzmanager-toolname", dest="fuzzmanager_toolname", help="Override FuzzManager tool name (for submitting crash results)") fmGroup.add_argument("--custom-cmdline-file", dest="custom_cmdline_file", help="Path to custom cmdline file", metavar="FILE") fmGroup.add_argument( "--env-file", dest="env_file", help="Path to a file with additional environment variables", metavar="FILE") fmGroup.add_argument( "--serverhost", help="Server hostname for remote signature management.", metavar="HOST") fmGroup.add_argument("--serverport", dest="serverport", type=int, help="Server port to use", metavar="PORT") fmGroup.add_argument("--serverproto", dest="serverproto", help="Server protocol to use (default is https)", metavar="PROTO") fmGroup.add_argument( "--serverauthtokenfile", dest="serverauthtokenfile", help="File containing the server authentication token", metavar="FILE") fmGroup.add_argument("--clientid", dest="clientid", help="Client ID to use when submitting issues", metavar="ID") fmGroup.add_argument("--platform", dest="platform", help="Platform this crash appeared on", metavar="(x86|x86-64|arm)") fmGroup.add_argument("--product", dest="product", help="Product this crash appeared on", metavar="PRODUCT") fmGroup.add_argument("--productversion", dest="product_version", help="Product version this crash appeared on", metavar="VERSION") fmGroup.add_argument("--os", dest="os", help="OS this crash appeared on", metavar="(windows|linux|macosx|b2g|android)") fmGroup.add_argument("--tool", dest="tool", help="Name of the tool that found this issue", metavar="NAME") fmGroup.add_argument( '--metadata', dest='metadata', nargs='+', type=str, help="List of metadata variables in the form 'KEY=VALUE'") aflGroup.add_argument("--s3-queue-upload", dest="s3_queue_upload", action='store_true', help="Use S3 to synchronize queues") aflGroup.add_argument( "--s3-queue-cleanup", dest="s3_queue_cleanup", action='store_true', help="Cleanup S3 queue entries older than specified refresh interval") aflGroup.add_argument("--s3-queue-status", dest="s3_queue_status", action='store_true', help="Display S3 queue status") aflGroup.add_argument( "--s3-build-download", dest="s3_build_download", help="Use S3 to download the build for the specified project", metavar="DIR") aflGroup.add_argument( "--s3-build-upload", dest="s3_build_upload", help="Use S3 to upload a new build for the specified project", metavar="FILE") aflGroup.add_argument( "--s3-corpus-download", dest="s3_corpus_download", help="Use S3 to download the test corpus for the specified project", metavar="DIR") aflGroup.add_argument( "--s3-corpus-download-size", dest="s3_corpus_download_size", help="When downloading the corpus, select only SIZE files randomly", metavar="SIZE") aflGroup.add_argument( "--s3-corpus-upload", dest="s3_corpus_upload", help="Use S3 to upload a test corpus for the specified project", metavar="DIR") aflGroup.add_argument( "--s3-corpus-replace", dest="s3_corpus_replace", action='store_true', help= "In conjunction with --s3-corpus-upload, deletes all other remote test files" ) aflGroup.add_argument( "--s3-corpus-refresh", dest="s3_corpus_refresh", help= "Download queues and corpus from S3, combine and minimize, then re-upload.", metavar="DIR") aflGroup.add_argument("--s3-corpus-status", dest="s3_corpus_status", action='store_true', help="Display S3 corpus status") aflGroup.add_argument( "--test-file", dest="test_file", help="Optional path to copy the test file to before reproducing", metavar="FILE") aflGroup.add_argument( "--afl-timeout", dest="afl_timeout", type=int, default=1000, help="Timeout per test to pass to AFL for corpus refreshing", metavar="MSECS") aflGroup.add_argument( "--firefox", dest="firefox", action='store_true', help="Test Program is Firefox (requires FFPuppet installed)") aflGroup.add_argument("--firefox-prefs", dest="firefox_prefs", help="Path to prefs.js file for Firefox", metavar="FILE") aflGroup.add_argument("--firefox-extensions", nargs='+', type=str, dest="firefox_extensions", help="Path extension file for Firefox", metavar="FILE") aflGroup.add_argument("--firefox-testpath", dest="firefox_testpath", help="Path to file to open with Firefox", metavar="FILE") aflGroup.add_argument( "--firefox-start-afl", dest="firefox_start_afl", metavar="FILE", help= "Start AFL with the given Firefox binary, remaining arguments being passed to AFL" ) aflGroup.add_argument( "--s3-refresh-interval", dest="s3_refresh_interval", type=int, default=86400, help="How often the s3 corpus is refreshed (affects queue cleaning)", metavar="SECS") aflGroup.add_argument("--afl-output-dir", dest="afloutdir", help="Path to the AFL output directory to manage", metavar="DIR") aflGroup.add_argument("--afl-binary-dir", dest="aflbindir", help="Path to the AFL binary directory to use", metavar="DIR") aflGroup.add_argument( "--afl-stats", dest="aflstats", help="Collect aggregated statistics while scanning output directories", metavar="FILE") aflGroup.add_argument("--s3-bucket", dest="s3_bucket", help="Name of the S3 bucket to use", metavar="NAME") aflGroup.add_argument( "--project", dest="project", help="Name of the subfolder/project inside the S3 bucket", metavar="NAME") aflGroup.add_argument('rargs', nargs=argparse.REMAINDER) if len(argv) == 0: parser.print_help() return 2 opts = parser.parse_args(argv) if not opts.libfuzzer and not opts.aflfuzz: opts.aflfuzz = True if opts.cmd and opts.aflfuzz: if not opts.firefox: print( "Error: Use --cmd either with libfuzzer or with afl in firefox mode", file=sys.sdderr) return 2 if opts.libfuzzer: if not opts.rargs: print("Error: No arguments specified", file=sys.stderr) return 2 binary = opts.rargs[0] if not os.path.exists(binary): print("Error: Specified binary does not exist: %s" % binary, file=sys.stderr) return 2 configuration = ProgramConfiguration.fromBinary(binary) if configuration == None: print( "Error: Failed to load program configuration based on binary", file=sys.stderr) return 2 env = {} if opts.env: env = dict(kv.split('=', 1) for kv in opts.env) configuration.addEnvironmentVariables(env) # Copy the system environment variables by default and overwrite them # if they are specified through env. env = dict(os.environ) if opts.env: oenv = dict(kv.split('=', 1) for kv in opts.env) configuration.addEnvironmentVariables(oenv) for envkey in oenv: env[envkey] = oenv[envkey] args = opts.rargs[1:] if args: configuration.addProgramArguments(args) metadata = {} if opts.metadata: metadata.update(dict(kv.split('=', 1) for kv in opts.metadata)) configuration.addMetadata(metadata) # Set LD_LIBRARY_PATH for convenience if not 'LD_LIBRARY_PATH' in env: env['LD_LIBRARY_PATH'] = os.path.dirname(binary) collector = Collector(opts.sigdir, opts.fuzzmanager_toolname) signature_repeat_count = 0 last_signature = None while (True): process = subprocess.Popen( opts.rargs, # stdout=None, stderr=subprocess.PIPE, env=env, universal_newlines=True) monitor = LibFuzzerMonitor(process.stderr) monitor.start() monitor.join() print("Process terminated, processing results...", file=sys.stderr) trace = monitor.getASanTrace() testcase = monitor.getTestcase() crashInfo = CrashInfo.fromRawCrashData([], [], configuration, auxCrashData=trace) (sigfile, metadata) = collector.search(crashInfo) if sigfile != None: if last_signature == sigfile: signature_repeat_count += 1 else: last_signature = sigfile signature_repeat_count = 0 print("Crash matches signature %s, not submitting..." % sigfile, file=sys.stderr) else: collector.generate(crashInfo, forceCrashAddress=True, forceCrashInstruction=False, numFrames=8) collector.submit(crashInfo, testcase) print("Successfully submitted crash.", file=sys.stderr) if signature_repeat_count >= 10: print("Too many crashes with the same signature, exiting...", file=sys.stderr) break if opts.aflfuzz: if opts.firefox or opts.firefox_start_afl: if not haveFFPuppet: print( "Error: --firefox and --firefox-start-afl require FFPuppet to be installed", file=sys.stderr) return 2 if opts.custom_cmdline_file: print( "Error: --custom-cmdline-file is incompatible with firefox options", file=sys.stderr) return 2 if not opts.firefox_prefs or not opts.firefox_testpath: print( "Error: --firefox and --firefox-start-afl require --firefox-prefs and --firefox-testpath to be specified", file=sys.stderr) return 2 if opts.firefox_start_afl: if not opts.aflbindir: print( "Error: Must specify --afl-binary-dir for starting AFL with firefox", file=sys.stderr) return 2 (ffp, cmd, env) = setup_firefox(opts.firefox_start_afl, opts.firefox_prefs, opts.firefox_extensions, opts.firefox_testpath) afl_cmd = [os.path.join(opts.aflbindir, "afl-fuzz")] opts.rargs.remove("--") afl_cmd.extend(opts.rargs) afl_cmd.extend(cmd) try: subprocess.call(afl_cmd, env=env) except: traceback.print_exc() ffp.clean_up() return 0 afl_out_dirs = [] if opts.afloutdir: if not os.path.exists(os.path.join(opts.afloutdir, "crashes")): # The specified directory doesn't have a "crashes" sub directory. # Either the wrong directory was specified, or this is an AFL multi-process # sychronization directory. Try to figure this out here. sync_dirs = os.listdir(opts.afloutdir) for sync_dir in sync_dirs: if os.path.exists( os.path.join(opts.afloutdir, sync_dir, "crashes")): afl_out_dirs.append( os.path.join(opts.afloutdir, sync_dir)) if not afl_out_dirs: print( "Error: Directory %s does not appear to be a valid AFL output/sync directory" % opts.afloutdir, file=sys.stderr) return 2 else: afl_out_dirs.append(opts.afloutdir) # Upload and FuzzManager modes require specifying the AFL directory if opts.s3_queue_upload or opts.fuzzmanager: if not opts.afloutdir: print( "Error: Must specify AFL output directory using --afl-output-dir", file=sys.stderr) return 2 if (opts.s3_queue_upload or opts.s3_corpus_refresh or opts.s3_build_download or opts.s3_build_upload or opts.s3_corpus_download or opts.s3_corpus_upload or opts.s3_queue_status): if not opts.s3_bucket or not opts.project: print( "Error: Must specify both --s3-bucket and --project for S3 actions", file=sys.stderr) return 2 if opts.s3_queue_status: status_data = get_queue_status(opts.s3_bucket, opts.project) total_queue_files = 0 for queue_name in status_data: print("Queue %s: %s" % (queue_name, status_data[queue_name])) total_queue_files += status_data[queue_name] print("Total queue files: %s" % total_queue_files) return 0 if opts.s3_corpus_status: status_data = get_corpus_status(opts.s3_bucket, opts.project) total_corpus_files = 0 for (status_dt, status_cnt) in sorted(status_data.items()): print("Added %s: %s" % (status_dt, status_cnt)) total_corpus_files += status_cnt print("Total corpus files: %s" % total_corpus_files) return 0 if opts.s3_queue_cleanup: clean_queue_dirs(opts.s3_corpus_refresh, opts.s3_bucket, opts.project, opts.s3_refresh_interval) return 0 if opts.s3_build_download: download_build(opts.s3_build_download, opts.s3_bucket, opts.project) return 0 if opts.s3_build_upload: upload_build(opts.s3_build_upload, opts.s3_bucket, opts.project) return 0 if opts.s3_corpus_download: if opts.s3_corpus_download_size != None: opts.s3_corpus_download_size = int( opts.s3_corpus_download_size) download_corpus(opts.s3_corpus_download, opts.s3_bucket, opts.project, opts.s3_corpus_download_size) return 0 if opts.s3_corpus_upload: upload_corpus(opts.s3_corpus_upload, opts.s3_bucket, opts.project, opts.s3_corpus_replace) return 0 if opts.s3_corpus_refresh: if not opts.aflbindir: print( "Error: Must specify --afl-binary-dir for refreshing the test corpus", file=sys.stderr) return 2 if not os.path.exists(opts.s3_corpus_refresh): os.makedirs(opts.s3_corpus_refresh) queues_dir = os.path.join(opts.s3_corpus_refresh, "queues") print("Cleaning old AFL queues from s3://%s/%s/queues/" % (opts.s3_bucket, opts.project)) clean_queue_dirs(opts.s3_corpus_refresh, opts.s3_bucket, opts.project, opts.s3_refresh_interval) print("Downloading AFL queues from s3://%s/%s/queues/ to %s" % (opts.s3_bucket, opts.project, queues_dir)) download_queue_dirs(opts.s3_corpus_refresh, opts.s3_bucket, opts.project) cmdline_file = os.path.join(opts.s3_corpus_refresh, "cmdline") if not os.path.exists(cmdline_file): print( "Error: Failed to download a cmdline file from queue directories.", file=sys.stderr) return 2 print("Downloading build") download_build(os.path.join(opts.s3_corpus_refresh, "build"), opts.s3_bucket, opts.project) with open(os.path.join(opts.s3_corpus_refresh, "cmdline"), 'r') as cmdline_file: cmdline = cmdline_file.read().splitlines() # Assume cmdline[0] is the name of the binary binary_name = os.path.basename(cmdline[0]) # Try locating our binary in the build we just unpacked binary_search_result = [ os.path.join(dirpath, filename) for dirpath, dirnames, filenames in os.walk( os.path.join(opts.s3_corpus_refresh, "build")) for filename in filenames if (filename == binary_name and ( stat.S_IXUSR & os.stat(os.path.join(dirpath, filename))[stat.ST_MODE])) ] if not binary_search_result: print("Error: Failed to locate binary %s in unpacked build." % binary_name, file=sys.stderr) return 2 if len(binary_search_result) > 1: print("Error: Binary name %s is ambiguous in unpacked build." % binary_name, file=sys.stderr) return 2 cmdline[0] = binary_search_result[0] # Download our current corpus into the queues directory as well print("Downloading corpus from s3://%s/%s/corpus/ to %s" % (opts.s3_bucket, opts.project, queues_dir)) download_corpus(queues_dir, opts.s3_bucket, opts.project) # Ensure the directory for our new tests is empty updated_tests_dir = os.path.join(opts.s3_corpus_refresh, "tests") if os.path.exists(updated_tests_dir): shutil.rmtree(updated_tests_dir) os.mkdir(updated_tests_dir) # Run afl-cmin afl_cmin = os.path.join(opts.aflbindir, "afl-cmin") if not os.path.exists(afl_cmin): print("Error: Unable to locate afl-cmin binary.", file=sys.stderr) return 2 if opts.firefox: (ffpInst, ffCmd, ffEnv) = setup_firefox(cmdline[0], opts.firefox_prefs, opts.firefox_extensions, opts.firefox_testpath) cmdline = ffCmd afl_cmdline = [ afl_cmin, '-e', '-i', queues_dir, '-o', updated_tests_dir, '-t', str(opts.afl_timeout), '-m', 'none' ] if opts.test_file: afl_cmdline.extend(['-f', opts.test_file]) afl_cmdline.extend(cmdline) print("Running afl-cmin") with open(os.devnull, 'w') as devnull: env = os.environ.copy() env['LD_LIBRARY_PATH'] = os.path.dirname(cmdline[0]) if opts.firefox: env.update(ffEnv) subprocess.check_call(afl_cmdline, stdout=devnull, env=env) if opts.firefox: ffpInst.clean_up() # replace existing corpus with reduced corpus print("Uploading reduced corpus to s3://%s/%s/corpus/" % (opts.s3_bucket, opts.project)) upload_corpus(updated_tests_dir, opts.s3_bucket, opts.project, corpus_delete=True) # Prune the queues directory once we successfully uploaded the new # test corpus, but leave everything that's part of our new corpus # so we don't have to download those files again. test_files = [ file for file in os.listdir(updated_tests_dir) if os.path.isfile(os.path.join(updated_tests_dir, file)) ] obsolete_queue_files = [ file for file in os.listdir(queues_dir) if os.path.isfile(os.path.join(queues_dir, file)) and file not in test_files ] for file in obsolete_queue_files: os.remove(os.path.join(queues_dir, file)) if opts.fuzzmanager or opts.s3_queue_upload or opts.aflstats: last_queue_upload = 0 while True: if opts.fuzzmanager: for afl_out_dir in afl_out_dirs: scan_crashes(afl_out_dir, opts.custom_cmdline_file, opts.env_file, opts.fuzzmanager_toolname, opts.test_file) # Only upload queue files every 20 minutes if opts.s3_queue_upload and last_queue_upload < int( time.time()) - 1200: for afl_out_dir in afl_out_dirs: upload_queue_dir(afl_out_dir, opts.s3_bucket, opts.project, new_cov_only=True) last_queue_upload = int(time.time()) if opts.aflstats: write_aggregated_stats(afl_out_dirs, opts.aflstats) time.sleep(10)
def create(self, attrs): ''' Create a CrashEntry instance based on the given dictionary of values received. We need to unflatten foreign relationships like product, platform, os and client and create the foreign objects on the fly if they don't exist in our database yet. ''' missing_keys = {'rawStdout', 'rawStderr', 'rawCrashData'} - set(attrs.keys()) if missing_keys: raise InvalidArgumentException({key: ["This field is required."] for key in missing_keys}) # If mozilla-central is not the only product name try: attrs['product'] = Product.objects.get_or_create(**attrs['product'])[0] except Product.MultipleObjectsReturned: attrs['product'] = Product.objects.filter(**attrs['product']).order_by('id').first() attrs['platform'] = Platform.objects.get_or_create(**attrs['platform'])[0] attrs['os'] = OS.objects.get_or_create(**attrs['os'])[0] attrs['client'] = Client.objects.get_or_create(**attrs['client'])[0] attrs['tool'] = Tool.objects.get_or_create(**attrs['tool'])[0] # Parse the incoming data using the crash signature package from FTB configuration = ProgramConfiguration(attrs['product'].name, attrs['platform'].name, attrs['os'].name, attrs['product'].version) crashInfo = CrashInfo.fromRawCrashData(attrs['rawStdout'], attrs['rawStderr'], configuration, attrs['rawCrashData']) # Populate certain fields here from the CrashInfo object we just got if crashInfo.crashAddress is not None: attrs['crashAddress'] = '0x%x' % crashInfo.crashAddress attrs['shortSignature'] = crashInfo.createShortSignature() # If a testcase is supplied, create a testcase object and store it if 'test' in attrs['testcase']: testcase = attrs['testcase'] testcase_ext = attrs.pop('testcase_ext', None) testcase_quality = testcase.get('quality', 0) testcase_isbinary = testcase.get('isBinary', False) testcase = testcase['test'] if testcase_ext is None: raise RuntimeError("Must provide testcase extension when providing testcase") h = hashlib.new('sha1') if testcase_isbinary: testcase = base64.b64decode(testcase) h.update(testcase) else: h.update(repr(testcase).encode("utf-8")) dbobj = TestCase(quality=testcase_quality, isBinary=testcase_isbinary, size=len(testcase)) dbobj.test.save("%s.%s" % (h.hexdigest(), testcase_ext), ContentFile(testcase)) dbobj.save() attrs['testcase'] = dbobj else: attrs['testcase'] = None # Create our CrashEntry instance return super(CrashEntrySerializer, self).create(attrs)
def test_SignatureMatchWithUnicode(): config = ProgramConfiguration('test', 'x86-64', 'linux') crashInfo = CrashInfo.fromRawCrashData(["(«f => (generator.throw(f))», «undefined»)"], [], config) testSignature = CrashSignature('{"symptoms": [{"src": "stdout", "type": "output", "value": "x"}]}') assert not testSignature.matches(crashInfo)
def getCrashInfo(self, configuration): return CrashInfo.fromRawCrashData(self.stdout, self.stderr, configuration, self.auxCrashData)
def main(args=None): '''Command line options.''' # setup argparser parser = argparse.ArgumentParser() parser.add_argument('--version', action='version', version='%s v%s (%s)' % (__file__, __version__, __updated__)) # Crash information parser.add_argument("--stdout", help="File containing STDOUT data", metavar="FILE") parser.add_argument("--stderr", help="File containing STDERR data", metavar="FILE") parser.add_argument("--crashdata", help="File containing external crash data", metavar="FILE") # Actions action_group = parser.add_argument_group( "Actions", "A single action must be selected.") actions = action_group.add_mutually_exclusive_group(required=True) actions.add_argument("--refresh", action='store_true', help="Perform a signature refresh") actions.add_argument("--submit", action='store_true', help="Submit a signature to the server") actions.add_argument("--search", action='store_true', help="Search cached signatures for the given crash") actions.add_argument( "--generate", action='store_true', help="Create a (temporary) local signature in the cache directory") actions.add_argument( "--autosubmit", action='store_true', help= ("Go into auto-submit mode. In this mode, all remaining arguments are interpreted " "as the crashing command. This tool will automatically obtain GDB crash information " "and submit it.")) actions.add_argument( "--download", type=int, help="Download the testcase for the specified crash entry", metavar="ID") actions.add_argument( "--download-all", type=int, help="Download all testcases for the specified signature entry", metavar="ID") actions.add_argument( "--get-clientid", action='store_true', help="Print the client ID used when submitting issues") # Settings parser.add_argument("--sigdir", help="Signature cache directory", metavar="DIR") parser.add_argument("--serverhost", help="Server hostname for remote signature management", metavar="HOST") parser.add_argument("--serverport", type=int, help="Server port to use", metavar="PORT") parser.add_argument("--serverproto", help="Server protocol to use (default is https)", metavar="PROTO") parser.add_argument("--serverauthtokenfile", help="File containing the server authentication token", metavar="FILE") parser.add_argument("--clientid", help="Client ID to use when submitting issues", metavar="ID") parser.add_argument("--platform", help="Platform this crash appeared on", metavar="(x86|x86-64|arm)") parser.add_argument("--product", help="Product this crash appeared on", metavar="PRODUCT") parser.add_argument("--productversion", dest="product_version", help="Product version this crash appeared on", metavar="VERSION") parser.add_argument("--os", help="OS this crash appeared on", metavar="(windows|linux|macosx|b2g|android)") parser.add_argument("--tool", help="Name of the tool that found this issue", metavar="NAME") parser.add_argument( '--args', nargs='+', type=str, help= "List of program arguments. Backslashes can be used for escaping and are stripped." ) parser.add_argument( '--env', nargs='+', type=str, help="List of environment variables in the form 'KEY=VALUE'") parser.add_argument( '--metadata', nargs='+', type=str, help="List of metadata variables in the form 'KEY=VALUE'") parser.add_argument( "--binary", help="Binary that has a configuration file for reading", metavar="BINARY") parser.add_argument("--testcase", help="File containing testcase", metavar="FILE") parser.add_argument( "--testcasequality", default=0, type=int, help= "Integer indicating test case quality (%(default)s is best and default)", metavar="VAL") parser.add_argument( "--testcasesize", type=int, help= "Integer indicating test case size (default is size of testcase data)", metavar="SIZE") # Options that affect how signatures are generated parser.add_argument( "--forcecrashaddr", action='store_true', help="Force including the crash address into the signature") parser.add_argument( "--forcecrashinst", action='store_true', help= "Force including the crash instruction into the signature (GDB only)") parser.add_argument( "--numframes", default=8, type=int, help= "How many frames to include into the signature (default: %(default)s)") parser.add_argument('rargs', nargs=argparse.REMAINDER) # process options opts = parser.parse_args(args=args) # In autosubmit mode, we try to open a configuration file for the binary specified # on the command line. It should contain the binary-specific settings for submitting. if opts.autosubmit: if not opts.rargs: parser.error( "Action --autosubmit requires test arguments to be specified") # Store the binary candidate only if --binary wasn't also specified if not opts.binary: opts.binary = opts.rargs[0] # We also need to check that (apart from the binary), there is only one file on the command line # (the testcase), if it hasn't been explicitely specified. testcase = opts.testcase testcaseidx = None if testcase is None: for idx, arg in enumerate(opts.rargs[1:]): if os.path.exists(arg): if testcase: parser.error( "Multiple potential testcases specified on command line. " "Must explicitly specify test using --testcase.") testcase = arg testcaseidx = idx # Either --autosubmit was specified, or someone specified --binary manually # Check that the binary actually exists if opts.binary and not os.path.exists(opts.binary): parser.error("Error: Specified binary does not exist: %s" % opts.binary) stdout = None stderr = None crashdata = None crashInfo = None args = None env = None metadata = {} if opts.search or opts.generate or opts.submit or opts.autosubmit: if opts.metadata: metadata.update(dict(kv.split('=', 1) for kv in opts.metadata)) if opts.autosubmit: # Try to automatically get arguments from the command line # If the testcase is not the last argument, leave it in the # command line arguments and replace it with a generic placeholder. if testcaseidx == len(opts.rargs[1:]) - 1: args = opts.rargs[1:-1] else: args = opts.rargs[1:] if testcaseidx is not None: args[testcaseidx] = "TESTFILE" else: if opts.args: args = [arg.replace('\\', '') for arg in opts.args] if opts.env: env = dict(kv.split('=', 1) for kv in opts.env) # Start without any ProgramConfiguration configuration = None # If we have a binary, try using that to create our ProgramConfiguration if opts.binary: configuration = ProgramConfiguration.fromBinary(opts.binary) if configuration: if env: configuration.addEnvironmentVariables(env) if args: configuration.addProgramArguments(args) if metadata: configuration.addMetadata(metadata) # If configuring through binary failed, try to manually create ProgramConfiguration from command line arguments if configuration is None: if opts.platform is None or opts.product is None or opts.os is None: parser.error( "Must specify/configure at least --platform, --product and --os" ) configuration = ProgramConfiguration(opts.product, opts.platform, opts.os, opts.product_version, env, args, metadata) if not opts.autosubmit: if opts.stderr is None and opts.crashdata is None: parser.error( "Must specify at least either --stderr or --crashdata file" ) if opts.stdout: with open(opts.stdout) as f: stdout = f.read() if opts.stderr: with open(opts.stderr) as f: stderr = f.read() if opts.crashdata: with open(opts.crashdata) as f: crashdata = f.read() crashInfo = CrashInfo.fromRawCrashData(stdout, stderr, configuration, auxCrashData=crashdata) if opts.testcase: (testCaseData, isBinary) = Collector.read_testcase(opts.testcase) if not isBinary: crashInfo.testcase = testCaseData serverauthtoken = None if opts.serverauthtokenfile: with open(opts.serverauthtokenfile) as f: serverauthtoken = f.read().rstrip() collector = Collector(opts.sigdir, opts.serverhost, opts.serverport, opts.serverproto, serverauthtoken, opts.clientid, opts.tool) if opts.refresh: collector.refresh() return 0 if opts.submit: testcase = opts.testcase collector.submit(crashInfo, testcase, opts.testcasequality, opts.testcasesize, metadata) return 0 if opts.search: (sig, metadata) = collector.search(crashInfo) if sig is None: print("No match found", file=sys.stderr) return 3 print(sig) if metadata: print(json.dumps(metadata, indent=4)) return 0 if opts.generate: sigFile = collector.generate(crashInfo, opts.forcecrashaddr, opts.forcecrashinst, opts.numframes) if not sigFile: print( "Failed to generate a signature for the given crash information.", file=sys.stderr) return 1 print(sigFile) return 0 if opts.autosubmit: runner = AutoRunner.fromBinaryArgs(opts.rargs[0], opts.rargs[1:]) if runner.run(): crashInfo = runner.getCrashInfo(configuration) collector.submit(crashInfo, testcase, opts.testcasequality, opts.testcasesize, metadata) else: print("Error: Failed to reproduce the given crash, cannot submit.", file=sys.stderr) return 1 if opts.download: (retFile, retJSON) = collector.download(opts.download) if not retFile: print("Specified crash entry does not have a testcase", file=sys.stderr) return 1 if "args" in retJSON and retJSON["args"]: args = json.loads(retJSON["args"]) print("Command line arguments: %s" % " ".join(args)) print("") if "env" in retJSON and retJSON["env"]: env = json.loads(retJSON["env"]) print("Environment variables: %s", " ".join("%s = %s" % (k, v) for (k, v) in env.items())) print("") if "metadata" in retJSON and retJSON["metadata"]: metadata = json.loads(retJSON["metadata"]) print("== Metadata ==") for k, v in metadata.items(): print("%s = %s" % (k, v)) print("") print(retFile) return 0 if opts.download_all: downloaded = False for result in collector.download_all(opts.download_all): downloaded = True print(result) if not downloaded: print("Specified signature does not have any testcases", file=sys.stderr) return 1 return 0 if opts.get_clientid: print(collector.clientId) return 0
def test_collector_submit(live_server, tmpdir, fm_user, monkeypatch): '''Test crash submission''' monkeypatch.setattr( os.path, 'expanduser', lambda path: tmpdir.strpath) # ensure fuzzmanager config is not used monkeypatch.setattr(time, 'sleep', lambda t: None) # create a collector url = urlsplit(live_server.url) collector = Collector(sigCacheDir=tmpdir.mkdir('sigcache').strpath, serverHost=url.hostname, serverPort=url.port, serverProtocol=url.scheme, serverAuthToken=fm_user.token, clientId='test-fuzzer1', tool='test-tool') testcase_path = tmpdir.mkdir('testcase').join('testcase.js').strpath with open(testcase_path, 'wb') as testcase_fp: testcase_fp.write(exampleTestCase) config = ProgramConfiguration('mozilla-central', 'x86-64', 'linux', version='ba0bc4f26681') crashInfo = CrashInfo.fromRawCrashData([], asanTraceCrash.splitlines(), config) # submit a crash to test server using collector result = collector.submit(crashInfo, testcase_path) # see that the issue was created in the server entry = CrashEntry.objects.get(pk=result['id']) assert entry.rawStdout == '' assert entry.rawStderr == asanTraceCrash assert entry.rawCrashData == '' assert entry.tool.name == 'test-tool' assert entry.client.name == 'test-fuzzer1' assert entry.product.name == config.product assert entry.product.version == config.version assert entry.platform.name == config.platform assert entry.os.name == config.os assert entry.testcase.quality == 0 assert not entry.testcase.isBinary assert entry.testcase.size == len(exampleTestCase) with open(entry.testcase.test.path, 'rb') as testcase_fp: assert testcase_fp.read() == exampleTestCase assert entry.metadata == '' assert entry.env == '' assert entry.args == '' # create a test config with open(tmpdir.join('.fuzzmanagerconf').strpath, 'w') as fp: fp.write('[Main]\n') fp.write('serverhost = %s\n' % url.hostname) fp.write('serverport = %d\n' % url.port) fp.write('serverproto = %s\n' % url.scheme) fp.write('serverauthtoken = %s\n' % fm_user.token) # try a binary testcase via cmd line testcase_path = tmpdir.join('testcase.bin').strpath with open(testcase_path, 'wb') as testcase_fp: testcase_fp.write(b'\0') stdout = tmpdir.join('stdout.txt').strpath with open(stdout, 'w') as fp: fp.write('stdout data') stderr = tmpdir.join('stderr.txt').strpath with open(stderr, 'w') as fp: fp.write('stderr data') crashdata = tmpdir.join('crashdata.txt').strpath with open(crashdata, 'w') as fp: fp.write(asanTraceCrash) result = main([ '--submit', '--tool', 'tool2', '--product', 'mozilla-inbound', '--productversion', '12345', '--os', 'minix', '--platform', 'pdp11', '--env', 'PATH=/home/ken', 'LD_PRELOAD=hack.so', '--metadata', 'var1=val1', 'var2=val2', '--args', './myprog', '--testcase', testcase_path, '--testcasequality', '5', '--stdout', stdout, '--stderr', stderr, '--crashdata', crashdata, ]) assert result == 0 entry = CrashEntry.objects.get( pk__gt=entry.id ) # newer than the last result, will fail if the test db is active assert entry.rawStdout == 'stdout data' assert entry.rawStderr == 'stderr data' assert entry.rawCrashData == asanTraceCrash assert entry.tool.name == 'tool2' assert entry.client.name == platform.node() assert entry.product.name == 'mozilla-inbound' assert entry.product.version == '12345' assert entry.platform.name == 'pdp11' assert entry.os.name == 'minix' assert entry.testcase.quality == 5 assert entry.testcase.isBinary assert entry.testcase.size == 1 with open(entry.testcase.test.path, 'rb') as testcase_fp: assert testcase_fp.read() == b'\0' assert json.loads(entry.metadata) == {'var1': 'val1', 'var2': 'val2'} assert json.loads(entry.env) == { 'PATH': '/home/ken', 'LD_PRELOAD': 'hack.so' } assert json.loads(entry.args) == ['./myprog'] class response_t(object): status_code = 500 text = "Error" def mypost(_session, _url, _data, headers=None): return response_t() monkeypatch.setattr(time, 'sleep', lambda t: None) monkeypatch.setattr(requests.Session, 'post', mypost) with pytest.raises(RuntimeError, match='Server unexpectedly responded'): collector.submit(crashInfo, testcase_path)
def restore_object(self, attrs, instance=None): ''' Create a CrashEntry instance based on the given dictionary of values received. We need to unflatten foreign relationships like product, platform, os and client and create the foreign objects on the fly if they don't exist in our database yet. ''' if instance: # Not allowed to update existing instances return instance product = attrs.pop('product', None) product_version = attrs.pop('product_version', None) platform = attrs.pop('platform', None) os = attrs.pop('os', None) client = attrs.pop('client', None) tool = attrs.pop('tool', None) testcase = attrs.pop('testcase', None) testcase_ext = attrs.pop('testcase_ext', None) testcase_quality = attrs.pop('testcase_quality', 0) testcase_isbinary = attrs.pop('testcase_isbinary', False) # Parse the incoming data using the crash signature package from FTB configuration = ProgramConfiguration(product, platform, os, product_version) crashInfo = CrashInfo.fromRawCrashData(attrs['rawStdout'], attrs['rawStderr'], configuration, attrs['rawCrashData']) # Populate certain fields here from the CrashInfo object we just got if crashInfo.crashAddress != None: attrs['crashAddress'] = hex(crashInfo.crashAddress) attrs['shortSignature'] = crashInfo.createShortSignature() def createOrGetModelByName(model, attrs): ''' Generically determine if the given model with the given attributes already exists in our database. If so, return that object, otherwise create it on the fly. @type model: Class @param model: The model to use for filtering and instantiating @type attrs: dict @param attrs: Dictionary of attributes to use for filtering/instantiating @rtype: model @return The model instance ''' objs = model.objects.filter(**attrs) if len(objs) > 1: raise MultipleObjectsReturned("Multiple objects with same keyword combination in database!") if len(objs) == 0: dbobj = model(**attrs) dbobj.save() return dbobj else: return objs.first() # Get or instantiate objects for product, platform, os, client and tool attrs['product'] = createOrGetModelByName(Product, { 'name' : product, 'version' : product_version }) attrs['platform'] = createOrGetModelByName(Platform, { 'name' : platform }) attrs['os'] = createOrGetModelByName(OS, { 'name' : os }) attrs['client'] = createOrGetModelByName(Client, { 'name' : client }) attrs['tool'] = createOrGetModelByName(Tool, { 'name' : tool }) # If a testcase is supplied, create a testcase object and store it if testcase: if testcase_ext == None: raise RuntimeError("Must provide testcase extension when providing testcase") if testcase_isbinary: testcase = base64.b64decode(testcase) h = hashlib.new('sha1') if testcase_isbinary: h.update(str(testcase)) else: h.update(repr(testcase)) dbobj = TestCase(quality=testcase_quality, isBinary=testcase_isbinary, size=len(testcase)) dbobj.test.save("%s.%s" % (h.hexdigest(), testcase_ext), ContentFile(testcase)) dbobj.save() attrs['testcase'] = dbobj else: attrs['testcase'] = None # Create our CrashEntry instance return super(CrashEntrySerializer, self).restore_object(attrs, instance)