def checkForCrashes(self, directory, symbolsPath): dumpDir = tempfile.mkdtemp() self._devicemanager.getDirectory(self._remoteProfile + '/minidumps/', dumpDir) automationutils.checkForCrashes(dumpDir, symbolsPath, self.lastTestSeen) try: shutil.rmtree(dumpDir) except: print "WARNING: unable to remove directory: %s" % (dumpDir)
def checkForCrashes(self, directory, symbolsPath): # XXX: This will have to be updated after crash reporting on b2g # is in place. dumpDir = tempfile.mkdtemp() self._devicemanager.getDirectory(self._remoteProfile + '/minidumps/', dumpDir) automationutils.checkForCrashes(dumpDir, symbolsPath, self.lastTestSeen) try: shutil.rmtree(dumpDir) except: print "WARNING: unable to remove directory: %s" % (dumpDir)
def checkForCrashes(self, directory, symbolsPath): remoteCrashDir = self._remoteProfile + "/minidumps/" if self._devicemanager.dirExists(remoteCrashDir): dumpDir = tempfile.mkdtemp() self._devicemanager.getDirectory(remoteCrashDir, dumpDir) automationutils.checkForCrashes(dumpDir, symbolsPath, self.lastTestSeen) try: shutil.rmtree(dumpDir) except: print "WARNING: unable to remove directory: %s" % dumpDir else: # As of this writing, the minidumps directory is automatically # created when fennec (first) starts, so its lack of presence # is a hint that something went wrong. print "WARNING: No crash directory (%s) on remote " "device" % remoteCrashDir
def checkForCrashes(self, directory, symbolsPath): logcat = self._devicemanager.getLogcat( filterOutRegexps=fennecLogcatFilters) javaException = self.checkForJavaException(logcat) if javaException: return True try: remoteCrashDir = self._remoteProfile + '/minidumps/' if not self._devicemanager.dirExists(remoteCrashDir): # As of this writing, the minidumps directory is automatically # created when fennec (first) starts, so its lack of presence # is a hint that something went wrong. print "Automation Error: No crash directory (%s) found on remote device" % remoteCrashDir # Whilst no crash was found, the run should still display as a failure return True dumpDir = tempfile.mkdtemp() self._devicemanager.getDirectory(remoteCrashDir, dumpDir) crashed = automationutils.checkForCrashes(dumpDir, symbolsPath, self.lastTestSeen) finally: try: shutil.rmtree(dumpDir) except: print "WARNING: unable to remove directory: %s" % dumpDir return crashed
def checkForCrashes(self, directory, symbolsPath): remoteCrashDir = self._remoteProfile + '/minidumps/' if self._devicemanager.dirExists(remoteCrashDir): dumpDir = tempfile.mkdtemp() self._devicemanager.getDirectory(remoteCrashDir, dumpDir) automationutils.checkForCrashes(dumpDir, symbolsPath, self.lastTestSeen) try: shutil.rmtree(dumpDir) except: print "WARNING: unable to remove directory: %s" % dumpDir else: # As of this writing, the minidumps directory is automatically # created when fennec (first) starts, so its lack of presence # is a hint that something went wrong. print "WARNING: No crash directory (%s) on remote " \ "device" % remoteCrashDir
def checkForCrashes(self, directory, symbolsPath): remoteCrashDir = self._remoteProfile + '/minidumps/' if not self._devicemanager.dirExists(remoteCrashDir): # As of this writing, the minidumps directory is automatically # created when fennec (first) starts, so its lack of presence # is a hint that something went wrong. print "Automation Error: No crash directory (%s) found on remote device" % remoteCrashDir # Whilst no crash was found, the run should still display as a failure return True dumpDir = tempfile.mkdtemp() self._devicemanager.getDirectory(remoteCrashDir, dumpDir) crashed = automationutils.checkForCrashes(dumpDir, symbolsPath, self.lastTestSeen) try: shutil.rmtree(dumpDir) except: print "WARNING: unable to remove directory: %s" % dumpDir return crashed
def checkCrashesAtExit(): if checkForCrashes(os.path.join(PROFILE_DIR, 'minidumps'), SYMBOLS_PATH, TEST_NAME): print >> sys.stderr, 'TinderboxPrint: ' + TEST_NAME + '<br/><em class="testfail">CRASH</em>' sys.exit(1)
binary = cmd['bin'] args = cmd['args'] print >> sys.stderr, 'INFO | runtest.py | Running ' + cmd['name'] + ' in ' + CWD + ' : ' print >> sys.stderr, 'INFO | runtest.py | ', binary, args envkeys = mailnewsEnv.keys() envkeys.sort() for envkey in envkeys: print >> sys.stderr, "%s=%s"%(envkey, mailnewsEnv[envkey]) # The try case handles MOZILLA_1_9_1_BRANCH, the except case handles trunk. try: proc = automation.Process(binary, args, env = mailnewsEnv) except TypeError: proc = automation.Process([binary] + args, env = mailnewsEnv) status = proc.wait() if status != 0: print >> sys.stderr, "TEST-UNEXPECTED-FAIL | runtest.py | Exited with code %d during test run"%(status) if checkForCrashes(os.path.join(PROFILE, "minidumps"), options.symbols, cmd['name']): print >> sys.stderr, 'TinderboxPrint: ' + cmd['name'] + '<br/><em class="testfail">CRASH</em>' status = -1 if status != 0: sys.exit(status) print >> sys.stderr, 'INFO | runtest.py | ' + cmd['name'] + ' executed successfully.' print >> sys.stderr, 'INFO | runtest.py | All tests executed successfully.'
args.append(options.extraArg) # Different binary implies no default args if 'bin' in cmd: binary = cmd['bin'] args = cmd['args'] print >> sys.stderr, 'INFO | runtest.py | Running ' + cmd['name'] + ' in ' + CWD + ' : ' print >> sys.stderr, 'INFO | runtest.py | ', binary, args envkeys = mailnewsEnv.keys() envkeys.sort() for envkey in envkeys: print >> sys.stderr, "%s=%s"%(envkey, mailnewsEnv[envkey]) proc = automation.Process([binary] + args, env = mailnewsEnv) status = proc.wait() if status != 0: print >> sys.stderr, "TEST-UNEXPECTED-FAIL | runtest.py | Exited with code %d during test run"%(status) if checkForCrashes(os.path.join(PROFILE, "minidumps"), options.symbols, cmd['name']): print >> sys.stderr, 'TinderboxPrint: ' + cmd['name'] + '<br/><em class="testfail">CRASH</em>' status = 1 if status != 0: sys.exit(status) print >> sys.stderr, 'INFO | runtest.py | ' + cmd['name'] + ' executed successfully.' print >> sys.stderr, 'INFO | runtest.py | All tests executed successfully.'
def runTests(xpcshell, xrePath=None, symbolsPath=None, manifest=None, testdirs=[], testPath=None, interactive=False, logfiles=True): """Run xpcshell tests. |xpcshell|, is the xpcshell executable to use to run the tests. |xrePath|, if provided, is the path to the XRE to use. |symbolsPath|, if provided is the path to a directory containing breakpad symbols for processing crashes in tests. |manifest|, if provided, is a file containing a list of test directories to run. |testdirs|, if provided, is a list of absolute paths of test directories. No-manifest only option. |testPath|, if provided, indicates a single path and/or test to run. |interactive|, if set to True, indicates to provide an xpcshell prompt instead of automatically executing the test. |logfiles|, if set to False, indicates not to save output to log files. Non-interactive only option. """ if not testdirs and not manifest: # nothing to test! print >>sys.stderr, "Error: No test dirs or test manifest specified!" return False passCount = 0 failCount = 0 testharnessdir = os.path.dirname(os.path.abspath(__file__)) xpcshell = os.path.abspath(xpcshell) # we assume that httpd.js lives in components/ relative to xpcshell httpdJSPath = os.path.join(os.path.dirname(xpcshell), "components", "httpd.js").replace("\\", "/"); env = dict(os.environ) # Make assertions fatal env["XPCOM_DEBUG_BREAK"] = "stack-and-abort" # Don't launch the crash reporter client env["MOZ_CRASHREPORTER_NO_REPORT"] = "1" if xrePath is None: xrePath = os.path.dirname(xpcshell) else: xrePath = os.path.abspath(xrePath) if sys.platform == 'win32': env["PATH"] = env["PATH"] + ";" + xrePath elif sys.platform in ('os2emx', 'os2knix'): os.environ["BEGINLIBPATH"] = xrePath + ";" + env["BEGINLIBPATH"] os.environ["LIBPATHSTRICT"] = "T" elif sys.platform == 'osx': env["DYLD_LIBRARY_PATH"] = xrePath else: # unix or linux? env["LD_LIBRARY_PATH"] = xrePath # xpcsRunArgs: <head.js> function to call to run the test. # pStdout, pStderr: Parameter values for later |Popen()| call. if interactive: xpcsRunArgs = [ '-e', 'print("To start the test, type |_execute_test();|.");', '-i'] pStdout = None pStderr = None else: xpcsRunArgs = ['-e', '_execute_test();'] if sys.platform == 'os2emx': pStdout = None else: pStdout = PIPE pStderr = STDOUT # <head.js> has to be loaded by xpchell: it can't load itself. xpcsCmd = [xpcshell, '-g', xrePath, '-j', '-s'] + \ ['-e', 'const _HTTPD_JS_PATH = "%s";' % httpdJSPath, '-f', os.path.join(testharnessdir, 'head.js')] # |testPath| will be the optional path only, or |None|. # |singleFile| will be the optional test only, or |None|. singleFile = None if testPath: if testPath.endswith('.js'): # Split into path and file. if testPath.find('/') == -1: # Test only. singleFile = testPath testPath = None else: # Both path and test. # Reuse |testPath| temporarily. testPath = testPath.rsplit('/', 1) singleFile = testPath[1] testPath = testPath[0] else: # Path only. # Simply remove optional ending separator. testPath = testPath.rstrip("/") # Override testdirs. if manifest is not None: testdirs = readManifest(os.path.abspath(manifest)) # Process each test directory individually. for testdir in testdirs: if testPath and not testdir.endswith(testPath): continue testdir = os.path.abspath(testdir) # get the list of head and tail files from the directory testHeadFiles = [] for f in sorted(glob(os.path.join(testdir, "head_*.js"))): if os.path.isfile(f): testHeadFiles += [f] testTailFiles = [] # Tails are executed in the reverse order, to "match" heads order, # as in "h1-h2-h3 then t3-t2-t1". for f in reversed(sorted(glob(os.path.join(testdir, "tail_*.js")))): if os.path.isfile(f): testTailFiles += [f] # if a single test file was specified, we only want to execute that test testfiles = sorted(glob(os.path.join(testdir, "test_*.js"))) if singleFile: if singleFile in [os.path.basename(x) for x in testfiles]: testfiles = [os.path.join(testdir, singleFile)] else: # not in this dir? skip it continue cmdH = ", ".join(['"' + f.replace('\\', '/') + '"' for f in testHeadFiles]) cmdT = ", ".join(['"' + f.replace('\\', '/') + '"' for f in testTailFiles]) cmdH = xpcsCmd + \ ['-e', 'const _HEAD_FILES = [%s];' % cmdH] + \ ['-e', 'const _TAIL_FILES = [%s];' % cmdT] # Now execute each test individually. for test in testfiles: # The test file will have to be loaded after the head files. cmdT = ['-e', 'const _TEST_FILE = ["%s"];' % os.path.join(testdir, test).replace('\\', '/')] # create a temp dir that the JS harness can stick a profile in profileDir = None try: profileDir = mkdtemp() env["XPCSHELL_TEST_PROFILE_DIR"] = profileDir # Enable leaks (only) detection to its own log file. leakLogFile = os.path.join(profileDir, "runxpcshelltests_leaks.log") env["XPCOM_MEM_LEAK_LOG"] = leakLogFile proc = Popen(cmdH + cmdT + xpcsRunArgs, stdout=pStdout, stderr=pStderr, env=env, cwd=testdir) # allow user to kill hung subprocess with SIGINT w/o killing this script # - don't move this line above Popen, or child will inherit the SIG_IGN signal.signal(signal.SIGINT, signal.SIG_IGN) # |stderr == None| as |pStderr| was either |None| or redirected to |stdout|. stdout, stderr = proc.communicate() signal.signal(signal.SIGINT, signal.SIG_DFL) if interactive: # Not sure what else to do here... return True if proc.returncode != 0 or (stdout and re.search("^TEST-UNEXPECTED-FAIL", stdout, re.MULTILINE)): print """TEST-UNEXPECTED-FAIL | %s | test failed (with xpcshell return code: %d), see following log: >>>>>>> %s <<<<<<<""" % (test, proc.returncode, stdout) checkForCrashes(testdir, symbolsPath, testName=test) failCount += 1 else: print "TEST-PASS | %s | test passed" % test passCount += 1 dumpLeakLog(leakLogFile, True) if logfiles and stdout: try: f = open(test + ".log", "w") f.write(stdout) if os.path.exists(leakLogFile): leaks = open(leakLogFile, "r") f.write(leaks.read()) leaks.close() finally: if f: f.close() finally: if profileDir: shutil.rmtree(profileDir) if passCount == 0 and failCount == 0: print "TEST-UNEXPECTED-FAIL | runxpcshelltests.py | No tests run. Did you pass an invalid --test-path?" failCount = 1 print """INFO | Result summary: INFO | Passed: %d INFO | Failed: %d""" % (passCount, failCount) return failCount == 0