def cramtest(self, cram_args=None, test_paths=None, test_objects=None): self._activate_virtualenv() import mozinfo from manifestparser import TestManifest if test_objects is None: from moztest.resolve import TestResolver resolver = self._spawn(TestResolver) if test_paths: # If we were given test paths, try to find tests matching them. test_objects = resolver.resolve_tests(paths=test_paths, flavor='cram') else: # Otherwise just run everything in CRAMTEST_MANIFESTS test_objects = resolver.resolve_tests(flavor='cram') if not test_objects: message = 'No tests were collected, check spelling of the test paths.' self.log(logging.WARN, 'cramtest', {}, message) return 1 mp = TestManifest() mp.tests.extend(test_objects) tests = mp.active_tests(disabled=False, **mozinfo.info) python = self.virtualenv_manager.python_path cmd = [python, '-m', 'cram'] + cram_args + [t['relpath'] for t in tests] return subprocess.call(cmd, cwd=self.topsrcdir)
def buildTestPath(self, options): """ Build the url path to the specific test harness and test file or directory Build a manifest of tests to run and write out a json file for the harness to read """ if options.manifestFile and os.path.isfile(options.manifestFile): manifest = TestManifest(strict=False) manifest.read(options.manifestFile) # Bug 883858 - return all tests including disabled tests tests = manifest.active_tests(disabled=False, **mozinfo.info) paths = [] for test in tests: tp = test['path'].split(self.getTestRoot(options), 1)[1].strip('/') # Filter out tests if we are using --test-path if options.testPath and not tp.startswith(options.testPath): continue paths.append({'path': tp}) # Bug 883865 - add this functionality into manifestDestiny with open('tests.json', 'w') as manifestFile: manifestFile.write(json.dumps({'tests': paths})) options.manifestFile = 'tests.json' testHost = "http://mochi.test:8888" testURL = ("/").join([testHost, self.TEST_PATH, options.testPath]) if os.path.isfile(os.path.join(self.oldcwd, os.path.dirname(__file__), self.TEST_PATH, options.testPath)) and options.repeat > 0: testURL = ("/").join([testHost, self.PLAIN_LOOP_PATH]) if options.chrome or options.a11y: testURL = ("/").join([testHost, self.CHROME_PATH]) elif options.browserChrome: testURL = "about:blank" elif options.ipcplugins: testURL = ("/").join([testHost, self.TEST_PATH, "dom/plugins/test"]) return testURL
def test_all_js(tests, options): print "Running JS Tests" # We run each test in its own instance since these are harness tests. # That just seems safer, no opportunity for cross-talk since # we are sorta using the framework to test itself results = JSResults() for t in tests: # write a temporary manifest manifest = TestManifest() manifest.tests = [t] fd, filename = tempfile.mkstemp(suffix=".ini") os.close(fd) fp = file(filename, "w") manifest.write(fp=fp) fp.close() # get CLI arguments to mozmill args = ["-b", options.binary] args.append("--console-level=DEBUG") args.append("-m") args.append(filename) # run the test proc = ProcessHandler("mozmill", args=args) proc.run() status = proc.waitForFinish(timeout=300) command = proc.commandline results.acquire(t["name"], proc.output, status, command) # remove the temporary manifest os.remove(filename) return results
def add_test(self, test, expected='pass', test_container=None): filepath = os.path.abspath(test) if os.path.isdir(filepath): for root, dirs, files in os.walk(filepath): for filename in files: if (filename.endswith('.ini')): msg_tmpl = ("Ignoring manifest '{0}'; running all tests in '{1}'." " See --help for details.") relpath = os.path.relpath(os.path.join(root, filename), filepath) self.logger.warning(msg_tmpl.format(relpath, filepath)) elif (filename.startswith('test_') and (filename.endswith('.py') or filename.endswith('.js'))): test_file = os.path.join(root, filename) self.add_test(test_file) return file_ext = os.path.splitext(os.path.split(filepath)[-1])[1] if file_ext == '.ini': manifest = TestManifest() manifest.read(filepath) filters = [] if self.test_tags: filters.append(tags(self.test_tags)) json_path = update_mozinfo(filepath) self.logger.info("mozinfo updated with the following: {}".format(None)) manifest_tests = manifest.active_tests(exists=False, disabled=True, filters=filters, app=self.appName, e10s=self.e10s, **mozinfo.info) if len(manifest_tests) == 0: self.logger.error("no tests to run using specified " "combination of filters: {}".format( manifest.fmt_filters())) target_tests = [] for test in manifest_tests: if test.get('disabled'): self.manifest_skipped_tests.append(test) else: target_tests.append(test) for i in target_tests: if not os.path.exists(i["path"]): raise IOError("test file: %s does not exist" % i["path"]) file_ext = os.path.splitext(os.path.split(i['path'])[-1])[-1] test_container = None self.add_test(i["path"], i["expected"], test_container) return self.tests.append({'filepath': filepath, 'expected': expected, 'test_container': test_container})
def __init__(self, options, **kwargs): self.options = options self.server = None self.logger = mozlog.getLogger('PEP') # create the profile self.profile = self.profile_class(profile=self.options.profilePath, addons=[os.path.join(here, 'extension')]) # fork a server to serve the test related files if self.options.serverPath: self.runServer() tests = [] # TODO is there a better way of doing this? if self.options.testPath.endswith('.js'): # a single test file was passed in testObj = {} testObj['path'] = os.path.realpath(self.options.testPath) testObj['name'] = os.path.basename(self.options.testPath) tests.append(testObj) else: # a test manifest was passed in # open and convert the manifest to json manifest = TestManifest() manifest.read(self.options.testPath) tests = manifest.get() # create a manifest object to be read by the JS side manifestObj = {} manifestObj['tests'] = tests # write manifest to a JSON file jsonManifest = open(os.path.join(here, 'manifest.json'), 'w') jsonManifest.write(str(manifestObj).replace("'", "\"")) jsonManifest.close() # setup environment env = os.environ.copy() env['MOZ_INSTRUMENT_EVENT_LOOP'] = '1' env['MOZ_INSTRUMENT_EVENT_LOOP_THRESHOLD'] = str(options.tracerThreshold) env['MOZ_INSTRUMENT_EVENT_LOOP_INTERVAL'] = str(options.tracerInterval) env['MOZ_CRASHREPORTER_NO_REPORT'] = '1' # construct the browser arguments cmdargs = [] # TODO Make browserArgs a list cmdargs.extend(self.options.browserArgs) cmdargs.extend(['-pep-start', os.path.realpath(jsonManifest.name)]) # run with managed process handler self.runner = self.runner_class(profile=self.profile, binary=self.options.binary, cmdargs=cmdargs, env=env, process_class=PepProcess)
def add_test(self, test, expected="pass", test_container=None): filepath = os.path.abspath(test) if os.path.isdir(filepath): for root, dirs, files in os.walk(filepath): for filename in files: if filename.startswith("test_") and (filename.endswith(".py") or filename.endswith(".js")): filepath = os.path.join(root, filename) self.add_test(filepath) return file_ext = os.path.splitext(os.path.split(filepath)[-1])[1] if file_ext == ".ini": manifest = TestManifest() manifest.read(filepath) filters = [] if self.test_tags: filters.append(tags(self.test_tags)) json_path = update_mozinfo(filepath) self.logger.info("mozinfo updated with the following: {}".format(None)) manifest_tests = manifest.active_tests( exists=False, disabled=True, filters=filters, device=self.device, app=self.appName, e10s=self.e10s, **mozinfo.info ) if len(manifest_tests) == 0: self.logger.error( "no tests to run using specified " "combination of filters: {}".format(manifest.fmt_filters()) ) target_tests = [] for test in manifest_tests: if test.get("disabled"): self.manifest_skipped_tests.append(test) else: target_tests.append(test) for i in target_tests: if not os.path.exists(i["path"]): raise IOError("test file: %s does not exist" % i["path"]) file_ext = os.path.splitext(os.path.split(i["path"])[-1])[-1] test_container = None self.add_test(i["path"], i["expected"], test_container) return self.tests.append({"filepath": filepath, "expected": expected, "test_container": test_container})
def test_unknown_keywords(self): filter_example = os.path.join(here, 'filter-example.ini') manifest = TestManifest(manifests=(filter_example,)) with self.assertRaises(ParseError): # toolkit missing manifest.active_tests(os='win', disabled=False, exists=False) with self.assertRaises(ParseError): # os missing manifest.active_tests(toolkit='windows', disabled=False, exists=False)
def get_test_list(self, manifest): self.logger.info("Reading test manifest: %s" % manifest) mft = TestManifest() mft.read(manifest) # In the future if we want to add in more processing to the manifest # here is where you'd do that. Right now, we just return a list of # tests testlist = [] for i in mft.active_tests(exists=False, disabled=False): testlist.append(i["path"]) return testlist
def __call__(self, manifests, buildconfig): global logger logger = logger or structuredlog.get_default_logger() if not isinstance(manifests, Iterable): manifests = [manifests] m = TestManifest(manifests) active = [t['path'] for t in m.active_tests(exists=False, disabled=False, **buildconfig)] skipped = [t['path'] for t in m.tests if t['path'] not in active] return active, skipped
def add_test(self, test, expected='pass', test_container=None): filepath = os.path.abspath(test) if os.path.isdir(filepath): for root, dirs, files in os.walk(filepath): for filename in files: if (filename.startswith('test_') and (filename.endswith('.py') or filename.endswith('.js'))): filepath = os.path.join(root, filename) self.add_test(filepath) return file_ext = os.path.splitext(os.path.split(filepath)[-1])[1] if file_ext == '.ini': manifest = TestManifest() manifest.read(filepath) filters = [] if self.test_tags: filters.append(tags(self.test_tags)) manifest_tests = manifest.active_tests(exists=False, disabled=True, filters=filters, e10s=self.e10s, **mozinfo.info) if len(manifest_tests) == 0: self.logger.error("no tests to run using specified " "combination of filters: {}".format( manifest.fmt_filters())) target_tests = [] for test in manifest_tests: if test.get('disabled'): self.manifest_skipped_tests.append(test) else: target_tests.append(test) for i in target_tests: if not os.path.exists(i["path"]): raise IOError("test file: %s does not exist" % i["path"]) file_ext = os.path.splitext(os.path.split(i['path'])[-1])[-1] test_container = None self.add_test(i["path"], i["expected"], test_container) return self.tests.append({'filepath': filepath, 'expected': expected, 'test_container': test_container})
def convert_ini_manifest_to_json(cls, manifest_path): manifest = TestManifest([manifest_path]) whitelist = [t['path'] for t in manifest.active_tests(disabled=False)] blacklist = [t for t in manifest.paths() if t not in whitelist] whitelist.insert(0, os.path.join(gaia_dir, 'shared', 'test', 'integration', 'setup.js')) map(lambda l: [os.path.relpath(p, gaia_dir) for p in l] , (whitelist, blacklist)) contents = { 'whitelist': whitelist } manifest_path = tempfile.NamedTemporaryFile(suffix='.json').name with open(manifest_path, 'w') as f: f.writelines(json.dumps(contents, indent=2)) return manifest_path
def runTests(self): self.startup() if isinstance(self.options.manifestFile, TestManifest): mp = self.options.manifestFile else: mp = TestManifest(strict=False) mp.read(self.options.robocopIni) filters = [] if self.options.totalChunks: filters.append( chunk_by_slice(self.options.thisChunk, self.options.totalChunks)) robocop_tests = mp.active_tests( exists=False, filters=filters, **mozinfo.info) if not self.options.autorun: # Force a single loop iteration. The iteration will start Fennec and # the httpd server, but not actually run a test. self.options.test_paths = [robocop_tests[0]['name']] active_tests = [] for test in robocop_tests: if self.options.test_paths and test['name'] not in self.options.test_paths: continue if 'disabled' in test: self.log.info('TEST-INFO | skipping %s | %s' % (test['name'], test['disabled'])) continue active_tests.append(test) tests_by_manifest = defaultdict(list) for test in active_tests: tests_by_manifest[test['manifest']].append(test['name']) self.log.suite_start(tests_by_manifest) worstTestResult = None for test in active_tests: result = self.runSingleTest(test) if worstTestResult is None or worstTestResult == 0: worstTestResult = result if worstTestResult is None: self.log.warning( "No tests run. Did you pass an invalid TEST_PATH?") worstTestResult = 1 else: print "INFO | runtests.py | Test summary: start." logResult = self.logTestSummary() print "INFO | runtests.py | Test summary: end." if worstTestResult == 0: worstTestResult = logResult return worstTestResult
def _load_manifestparser_manifest(self, mpath): mp = TestManifest(manifests=[mpath], strict=True, rootdir=self.topsrcdir, finder=self.finder, handle_defaults=True) return (test for test in mp.tests)
def run_suite(self, **kwargs): from manifestparser import TestManifest manifest = TestManifest(manifests=[ os.path.join(self.topobjdir, '_tests', 'xpcshell', 'xpcshell.ini') ]) return self._run_xpcshell_harness(manifest=manifest, **kwargs)
def run_robocop_test(self, context, tests, suite=None, **kwargs): host_ret = verify_host_bin() if host_ret != 0: return host_ret import imp path = os.path.join(self.mochitest_dir, 'runrobocop.py') with open(path, 'r') as fh: imp.load_module('runrobocop', fh, path, ('.py', 'r', imp.PY_SOURCE)) import runrobocop options = Namespace(**kwargs) from manifestparser import TestManifest if tests and not options.manifestFile: manifest = TestManifest() manifest.tests.extend(tests) options.manifestFile = manifest # robocop only used for Firefox for Android - non-e10s options.e10s = False print("using e10s=False for robocop") return runrobocop.run_test_harness(parser, options)
def run_desktop_test(self, context, tests=None, suite=None, **kwargs): """Runs a mochitest. suite is the type of mochitest to run. It can be one of ('plain', 'chrome', 'browser', 'a11y', 'jetpack-package', 'jetpack-addon', 'webapprt-chrome', 'webapprt-content'). """ # runtests.py is ambiguous, so we load the file/module manually. if 'mochitest' not in sys.modules: import imp path = os.path.join(self.mochitest_dir, 'runtests.py') with open(path, 'r') as fh: imp.load_module('mochitest', fh, path, ('.py', 'r', imp.PY_SOURCE)) import mochitest # This is required to make other components happy. Sad, isn't it? os.chdir(self.topobjdir) # Automation installs its own stream handler to stdout. Since we want # all logging to go through us, we just remove their handler. remove_handlers = [ l for l in logging.getLogger().handlers if isinstance(l, logging.StreamHandler) ] for handler in remove_handlers: logging.getLogger().removeHandler(handler) options = Namespace(**kwargs) if suite == 'webapprt-content': if not options.app or options.app == self.get_binary_path(): options.app = self.get_webapp_runtime_path() options.xrePath = self.get_webapp_runtime_xre_path() elif suite == 'webapprt-chrome': options.browserArgs.append("-test-mode") if not options.app or options.app == self.get_binary_path(): options.app = self.get_webapp_runtime_path() options.xrePath = self.get_webapp_runtime_xre_path() from manifestparser import TestManifest if tests: manifest = TestManifest() manifest.tests.extend(tests) options.manifestFile = manifest # When developing mochitest-plain tests, it's often useful to be able to # refresh the page to pick up modifications. Therefore leave the browser # open if only running a single mochitest-plain test. This behaviour can # be overridden by passing in --keep-open=false. if len(tests ) == 1 and options.keep_open is None and suite == 'plain': options.keep_open = True # We need this to enable colorization of output. self.log_manager.enable_unstructured() result = mochitest.run_test_harness(options) self.log_manager.disable_unstructured() return result
def run_xpcshell_test(self, test_objects=None, **params): from mozbuild.controller.building import BuildDriver if test_objects is not None: from manifestparser import TestManifest m = TestManifest() m.tests.extend(test_objects) params['manifest'] = m driver = self._spawn(BuildDriver) driver.install_tests(test_objects) # We should probably have a utility function to ensure the tree is # ready to run tests. Until then, we just create the state dir (in # case the tree wasn't built with mach). self._ensure_state_subdir_exists('.') params['log'] = structured.commandline.setup_logging( "XPCShellTests", params, {"mach": sys.stdout}, {"verbose": True}) if conditions.is_android(self): from mozrunner.devices.android_device import verify_android_device verify_android_device(self) xpcshell = self._spawn(AndroidXPCShellRunner) else: xpcshell = self._spawn(XPCShellRunner) xpcshell.cwd = self._mach_context.cwd try: return xpcshell.run_test(**params) except InvalidTestPathError as e: print(e.message) return 1
def run_android_test(self, context, tests, **kwargs): host_ret = verify_host_bin() if host_ret != 0: return host_ret import imp path = os.path.join(self.mochitest_dir, "runtestsremote.py") with open(path, "r") as fh: imp.load_module("runtestsremote", fh, path, (".py", "r", imp.PY_SOURCE)) import runtestsremote from mozrunner.devices.android_device import get_adb_path if not kwargs["adbPath"]: kwargs["adbPath"] = get_adb_path(self) options = Namespace(**kwargs) from manifestparser import TestManifest if tests and not options.manifestFile: manifest = TestManifest() manifest.tests.extend(tests) options.manifestFile = manifest # Firefox for Android doesn't use e10s if options.app is not None and "geckoview" not in options.app: options.e10s = False print("using e10s=False for non-geckoview app") return runtestsremote.run_test_harness(parser, options)
def run_android_test(self, context, tests, suite=None, **kwargs): host_ret = verify_host_bin() if host_ret != 0: return host_ret import imp path = os.path.join(self.mochitest_dir, 'runtestsremote.py') with open(path, 'r') as fh: imp.load_module('runtestsremote', fh, path, ('.py', 'r', imp.PY_SOURCE)) import runtestsremote from mozrunner.devices.android_device import get_adb_path if not kwargs['adbPath']: kwargs['adbPath'] = get_adb_path(self) options = Namespace(**kwargs) from manifestparser import TestManifest if tests and not options.manifestFile: manifest = TestManifest() manifest.tests.extend(tests) options.manifestFile = manifest return runtestsremote.run_test_harness(parser, options)
def inner(string, name="manifest.ini"): manifest = tmpdir.join(name) manifest.write(string, ensure=True) # pylint --py3k: W1612 path = six.text_type(manifest) return TestManifest(manifests=(path, ), strict=False, rootdir=tmpdir.strpath)
def runTests(self): self.startup() if isinstance(self.options.manifestFile, TestManifest): mp = self.options.manifestFile else: mp = TestManifest(strict=False) mp.read(self.options.robocopIni) filters = [] if self.options.totalChunks: filters.append( chunk_by_slice(self.options.thisChunk, self.options.totalChunks)) robocop_tests = mp.active_tests(exists=False, filters=filters, **mozinfo.info) if not self.options.autorun: # Force a single loop iteration. The iteration will start Fennec and # the httpd server, but not actually run a test. self.options.test_paths = [robocop_tests[0]['name']] active_tests = [] for test in robocop_tests: if self.options.test_paths and test[ 'name'] not in self.options.test_paths: continue if 'disabled' in test: self.log.info('TEST-INFO | skipping %s | %s' % (test['name'], test['disabled'])) continue active_tests.append(test) self.log.suite_start([t['name'] for t in active_tests]) worstTestResult = None for test in active_tests: result = self.runSingleTest(test) if worstTestResult is None or worstTestResult == 0: worstTestResult = result if worstTestResult is None: self.log.warning( "No tests run. Did you pass an invalid TEST_PATH?") worstTestResult = 1 else: print "INFO | runtests.py | Test summary: start." logResult = self.logTestSummary() print "INFO | runtests.py | Test summary: end." if worstTestResult == 0: worstTestResult = logResult return worstTestResult
def run_b2g_test(self, context, tests=None, suite='mochitest', **kwargs): """Runs a b2g mochitest.""" if kwargs.get('desktop'): kwargs['profile'] = kwargs.get('profile') or os.environ.get( 'GAIA_PROFILE') if not kwargs['profile'] or not os.path.isdir(kwargs['profile']): print(GAIA_PROFILE_NOT_FOUND) sys.exit(1) if os.path.isfile( os.path.join(kwargs['profile'], 'extensions', '*****@*****.**')): print(GAIA_PROFILE_IS_DEBUG.format(kwargs['profile'])) sys.exit(1) elif context.target_out: host_webapps_dir = os.path.join(context.target_out, 'data', 'local', 'webapps') if not os.path.isdir( os.path.join(host_webapps_dir, 'test-container.gaiamobile.org')): print(ENG_BUILD_REQUIRED.format(host_webapps_dir)) sys.exit(1) # TODO without os.chdir, chained imports fail below os.chdir(self.mochitest_dir) # The imp module can spew warnings if the modules below have # already been imported, ignore them. with warnings.catch_warnings(): warnings.simplefilter('ignore') import imp path = os.path.join(self.mochitest_dir, 'runtestsb2g.py') with open(path, 'r') as fh: imp.load_module('mochitest', fh, path, ('.py', 'r', imp.PY_SOURCE)) import mochitest options = Namespace(**kwargs) from manifestparser import TestManifest if tests: manifest = TestManifest() manifest.tests.extend(tests) options.manifestFile = manifest if options.desktop: return mochitest.run_desktop_mochitests(options) try: which.which('adb') except which.WhichError: # TODO Find adb automatically if it isn't on the path print(ADB_NOT_FOUND.format(options.b2gPath)) return 1 return mochitest.run_remote_mochitests(options)
def test_testmanifest(self): # Test filtering based on platform: filter_example = os.path.join(here, "filter-example.ini") manifest = TestManifest(manifests=(filter_example,), strict=False) self.assertEqual( [ i["name"] for i in manifest.active_tests(os="win", disabled=False, exists=False) ], ["windowstest", "fleem"], ) self.assertEqual( [ i["name"] for i in manifest.active_tests(os="linux", disabled=False, exists=False) ], ["fleem", "linuxtest"], ) # Look for existing tests. There is only one: self.assertEqual([i["name"] for i in manifest.active_tests()], ["fleem"]) # You should be able to expect failures: last = manifest.active_tests(exists=False, toolkit="gtk")[-1] self.assertEqual(last["name"], "linuxtest") self.assertEqual(last["expected"], "pass") last = manifest.active_tests(exists=False, toolkit="cocoa")[-1] self.assertEqual(last["expected"], "fail")
def run(arguments=sys.argv[1:]): # parse the command line arguments (options, command) = parse_args(arguments) # ensure the binary is given if not options.binary: print "Please provide a path to your Firefox binary: -b, --binary" sys.exit(1) # set the BROWSER_PATH environment variable so that # subshells will be able to invoke mozrunner os.environ['BROWSER_PATH'] = options.binary # Parse the manifest mp = TestManifest(manifests=(options.manifest,), strict=False) # run + report if command == "testpy": tests = mp.active_tests(disabled=False) results = test_all_python(mp.get(tests=tests, type='python'), options) if results.failures or results.errors: sys.exit(report(True, results, None, options)) else: sys.exit(report(False)) elif command == "testjs": tests = mp.active_tests(disabled=False) results = test_all_js(mp.get(tests=tests, type='javascript'), options) if results.fails: sys.exit(report(True, None, results, options)) else: sys.exit(report(False)) elif command == "testall": test_all(mp.active_tests(disabled=False), options)
def run(arguments=sys.argv[1:]): # parse the command line arguments (options, command) = parse_args(arguments) # ensure the binary is given if not options.binary: print "Please provide a path to your Firefox binary: -b, --binary" sys.exit(1) # Parse the manifest mp = TestManifest(manifests=(options.manifest,), strict=False) # run + report if command == "testpy": results = test_all_python(mp.get(tests=mp.active_tests(disabled=False), type='python'), options) if results.failures or results.errors: sys.exit(report(True, results, None, options)) else: sys.exit(report(False)) elif command == "testjs": results = test_all_js(mp.get(tests=mp.active_tests(disabled=False), type='javascript'), options) if results.failures: sys.exit(report(True, None, results, options)) else: sys.exit(report(False)) elif command == "testall": test_all(mp.active_tests(disabled=False), options)
def test_manifest_subsuites(self): """ test subsuites and conditional subsuites """ relative_path = os.path.join(here, 'subsuite.ini') manifest = TestManifest(manifests=(relative_path, )) info = {'foo': 'bar'} # 6 tests total tests = manifest.active_tests(exists=False, **info) self.assertEquals(len(tests), 6) # only 3 tests for subsuite bar when foo==bar tests = manifest.active_tests(exists=False, filters=[subsuite('bar')], **info) self.assertEquals(len(tests), 3) # only 1 test for subsuite baz, regardless of conditions other = {'something': 'else'} tests = manifest.active_tests(exists=False, filters=[subsuite('baz')], **info) self.assertEquals(len(tests), 1) tests = manifest.active_tests(exists=False, filters=[subsuite('baz')], **other) self.assertEquals(len(tests), 1) # 4 tests match when the condition doesn't match (all tests except # the unconditional subsuite) info = {'foo': 'blah'} tests = manifest.active_tests(exists=False, filters=[subsuite()], **info) self.assertEquals(len(tests), 5) # test for illegal subsuite value manifest.tests[0][ 'subsuite'] = 'subsuite=bar,foo=="bar",type="nothing"' with self.assertRaises(ParseError): manifest.active_tests(exists=False, filters=[subsuite('foo')], **info)
def find_manifest_dirs(topsrcdir, manifests): """Routine to retrieve directories specified in a manifest, relative to topsrcdir. It does not recurse into manifests, as we currently have no need for that. """ dirs = set() for p in manifests: p = os.path.join(topsrcdir, p) if p.endswith(".ini"): test_manifest = TestManifest() test_manifest.read(p) dirs |= set([os.path.dirname(m) for m in test_manifest.manifests()]) elif p.endswith(".list"): m = ReftestManifest() m.load(p) dirs |= m.dirs else: raise Exception( '"{}" is not a supported manifest format.'.format( os.path.splitext(p)[1] ) ) dirs = {mozpath.normpath(d[len(topsrcdir) :]).lstrip("/") for d in dirs} # Filter out children captured by parent directories because duplicates # will confuse things later on. def parents(p): while True: p = mozpath.dirname(p) if not p: break yield p seen = set() for d in sorted(dirs, key=len): if not any(p in seen for p in parents(d)): seen.add(d) return sorted(seen)
def run_xpcshell_test(self, test_objects=None, **params): from mozbuild.controller.building import BuildDriver if test_objects is not None: from manifestparser import TestManifest m = TestManifest() m.tests.extend(test_objects) params["manifest"] = m driver = self._spawn(BuildDriver) driver.install_tests() # We should probably have a utility function to ensure the tree is # ready to run tests. Until then, we just create the state dir (in # case the tree wasn't built with mach). self._ensure_state_subdir_exists(".") if not params.get("log"): log_defaults = { self._mach_context.settings["test"]["format"]: sys.stdout } fmt_defaults = { "level": self._mach_context.settings["test"]["level"], "verbose": True, } params["log"] = structured.commandline.setup_logging( "XPCShellTests", params, log_defaults, fmt_defaults) if not params["threadCount"]: # pylint --py3k W1619 params["threadCount"] = int((cpu_count() * 3) / 2) if conditions.is_android(self) or self.substs.get( "MOZ_BUILD_APP") == "b2g": from mozrunner.devices.android_device import ( verify_android_device, get_adb_path, ) device_serial = params.get("deviceSerial") verify_android_device(self, network=True, device_serial=device_serial) if not params["adbPath"]: params["adbPath"] = get_adb_path(self) xpcshell = self._spawn(AndroidXPCShellRunner) else: xpcshell = self._spawn(XPCShellRunner) xpcshell.cwd = self._mach_context.cwd try: return xpcshell.run_test(**params) except InvalidTestPathError as e: print(str(e)) return 1
def convert_ini_manifest_to_json(cls, manifest_path): manifest = TestManifest([manifest_path]) whitelist = [t['path'] for t in manifest.active_tests(disabled=False)] blacklist = [t for t in manifest.paths() if t not in whitelist] whitelist.insert( 0, os.path.join(gaia_dir, 'shared', 'test', 'integration', 'setup.js')) map(lambda l: [os.path.relpath(p, gaia_dir) for p in l], (whitelist, blacklist)) contents = {'whitelist': whitelist} manifest_path = tempfile.NamedTemporaryFile(suffix='.json').name with open(manifest_path, 'w') as f: f.writelines(json.dumps(contents, indent=2)) return manifest_path
def read_tests(self): self._tests = [] manifest = TestManifest() manifest.read(self._test_path) tests_info = manifest.get() for t in tests_info: if not t['here'] in sys.path: sys.path.append(t['here']) if t['name'].endswith('.py'): t['name'] = t['name'][:-3] # add all classes in module that are derived from PhoneTest to # the test list tests = [(x[1], os.path.normpath(os.path.join(t['here'], t.get('config', '')))) for x in inspect.getmembers(__import__(t['name']), inspect.isclass) if x[0] != 'PhoneTest' and issubclass(x[1], phonetest.PhoneTest)] self._tests.extend(tests)
def test_none_and_empty_manifest(self): """ Test TestManifest for None and empty manifest, see https://bugzilla.mozilla.org/show_bug.cgi?id=1087682 """ none_manifest = TestManifest(manifests=None, strict=False) self.assertEqual(len(none_manifest.test_paths()), 0) self.assertEqual(len(none_manifest.active_tests()), 0) empty_manifest = TestManifest(manifests=[], strict=False) self.assertEqual(len(empty_manifest.test_paths()), 0) self.assertEqual(len(empty_manifest.active_tests()), 0)
def test_comments(self): """ ensure comments work, see https://bugzilla.mozilla.org/show_bug.cgi?id=813674 """ comment_example = os.path.join(here, 'comment-example.ini') manifest = TestManifest(manifests=(comment_example,)) self.assertEqual(len(manifest.tests), 8) names = [i['name'] for i in manifest.tests] self.assertFalse('test_0202_app_launch_apply_update_dirlocked.js' in names)
def __init__(self, manifest, git_link, git_branch): # generate git path self._test_href = git_link + 'tree/' + git_branch + '/tests/python/gaia-ui-tests/gaiatest/tests/' # read manifest self.manifest = TestManifest(manifests=(manifest,)) self._flame = self.manifest.active_tests(b2g=True, device='Flame') # read tree herder self._desktop = self.manifest.active_tests(b2g=True, device='desktop')
def get_manifests(self, suite, mozinfo): mozinfo = dict(mozinfo) # Compute all tests for the given suite/subsuite. tests = self.get_tests(suite) if "web-platform-tests" in suite: manifests = set() for t in tests: manifests.add(t["manifest"]) return {"active": list(manifests), "skipped": []} manifests = set(chunk_by_runtime.get_manifest(t) for t in tests) # Compute the active tests. m = TestManifest() m.tests = tests tests = m.active_tests(disabled=False, exists=False, **mozinfo) active = set(chunk_by_runtime.get_manifest(t) for t in tests) skipped = manifests - active return {"active": list(active), "skipped": list(skipped)}
def run_test(self, test_paths, interactive=False, keep_going=False, sequential=False, shuffle=False, debugger=None, debuggerArgs=None, debuggerInteractive=None, rerun_failures=False, test_objects=None, # ignore parameters from other platforms' options **kwargs): """Runs an individual xpcshell test.""" from mozbuild.testing import TestResolver from manifestparser import TestManifest # TODO Bug 794506 remove once mach integrates with virtualenv. build_path = os.path.join(self.topobjdir, 'build') if build_path not in sys.path: sys.path.append(build_path) if test_paths == ['all']: self.run_suite(interactive=interactive, keep_going=keep_going, shuffle=shuffle, sequential=sequential, debugger=debugger, debuggerArgs=debuggerArgs, debuggerInteractive=debuggerInteractive, rerun_failures=rerun_failures) return elif test_paths: test_paths = [self._wrap_path_argument(p).relpath() for p in test_paths] if test_objects: tests = test_objects else: resolver = self._spawn(TestResolver) tests = list(resolver.resolve_tests(paths=test_paths, flavor='xpcshell')) if not tests: raise InvalidTestPathError('We could not find an xpcshell test ' 'for the passed test path. Please select a path that is ' 'a test file or is a directory containing xpcshell tests.') # Dynamically write out a manifest holding all the discovered tests. manifest = TestManifest() manifest.tests.extend(tests) args = { 'interactive': interactive, 'keep_going': keep_going, 'shuffle': shuffle, 'sequential': sequential, 'debugger': debugger, 'debuggerArgs': debuggerArgs, 'debuggerInteractive': debuggerInteractive, 'rerun_failures': rerun_failures, 'manifest': manifest, } return self._run_xpcshell_harness(**args)
def print_test_dirs(topsrcdir, manifest_file): """ Simple routine which prints the paths of directories specified in a Marionette manifest, relative to topsrcdir. This does not recurse into manifests, as we currently have no need for that. """ # output the directory of this (parent) manifest topsrcdir = os.path.abspath(topsrcdir) scriptdir = os.path.abspath(os.path.dirname(__file__)) print scriptdir[len(topsrcdir) + 1:] # output the directories of all the other manifests dirs = set() manifest = TestManifest() manifest.read(manifest_file) for i in manifest.get(): dirs.add(os.path.dirname(i['manifest'])[len(topsrcdir) + 1:]) for path in dirs: print path
def get_chunked_manifests(flavor, subsuite, chunks, mozinfo): """Compute which manifests should run in which chunks with the given category of tests. Args: flavor (str): The suite to run. Values are defined by the 'build_flavor' key in `moztest.resolve.TEST_SUITES`. subsuite (str): The subsuite to run or 'undefined' to denote no subsuite. chunks (int): Number of chunks to split manifests across. mozinfo (frozenset): Set of data in the form of (<key>, <value>) used for filtering. Returns: A list of manifests where each item contains the manifest that should run in the corresponding chunk. """ mozinfo = dict(mozinfo) # Compute all tests for the given suite/subsuite. tests = get_tests(flavor, subsuite) all_manifests = set(t['manifest_relpath'] for t in tests) # Compute only the active tests. m = TestManifest() m.tests = tests tests = m.active_tests(disabled=False, exists=False, **mozinfo) active_manifests = set(t['manifest_relpath'] for t in tests) # Run the chunking algorithm. chunked_manifests = [ c[1] for c in chunk_by_runtime( None, chunks, get_runtimes(mozinfo['os']) ).get_chunked_manifests(tests) ] # Add all skipped manifests to the first chunk so they still show up in the # logs. They won't impact runtime much. skipped_manifests = all_manifests - active_manifests chunked_manifests[0].extend(skipped_manifests) return chunked_manifests
def find_manifest_dirs(topsrcdir, manifests): """Routine to retrieve directories specified in a manifest, relative to topsrcdir. It does not recurse into manifests, as we currently have no need for that. """ dirs = set() for p in manifests: p = os.path.join(topsrcdir, p) if p.endswith('.ini'): test_manifest = TestManifest() test_manifest.read(p) dirs |= set([os.path.dirname(m) for m in test_manifest.manifests()]) elif p.endswith('.list'): m = ReftestManifest() m.load(p) dirs |= m.dirs else: raise Exception('"{}" is not a supported manifest format.'.format( os.path.splitext(p)[1])) dirs = {mozpath.normpath(d[len(topsrcdir):]).lstrip('/') for d in dirs} # Filter out children captured by parent directories because duplicates # will confuse things later on. def parents(p): while True: p = mozpath.dirname(p) if not p: break yield p seen = set() for d in sorted(dirs, key=len): if not any(p in seen for p in parents(d)): seen.add(d) return sorted(seen)
def inner(*tests, **opts): assert len(tests) > 0 manifest = TestManifest() manifest.tests.extend(map(normalize, tests)) options['manifestFile'] = manifest options.update(opts) result = runtests.run_test_harness(parser, Namespace(**options)) out = json.loads('[' + ','.join(buf.getvalue().splitlines()) + ']') buf.close() return result, out
def _get_subtests_from_ini(self, manifest_path, suite_name): """ Returns a list of (sub)tests from an ini file containing the test definitions. :param str manifest_path: path to the ini file :return list: the list of the tests """ test_manifest = TestManifest([manifest_path], strict=False) test_list = test_manifest.active_tests(exists=False, disabled=False) subtest_list = {} for subtest in test_list: subtest_list[subtest["name"]] = subtest["manifest"] self._urls[subtest["name"]] = { "type": suite_name, "url": subtest["test_url"], } self._urls = collections.OrderedDict( sorted(self._urls.items(), key=lambda t: len(t[0]))) return subtest_list
def run(arguments=sys.argv[1:]): # parse the command line arguments parser_kwargs = dict(arguments=arguments) (options, command) = parse_args(**parser_kwargs) # Parse the manifest mp = TestManifest(manifests=(options.manifest,)) # run + report if command == "testpy": results = test_all_python(mp.get(tests=mp.active_tests(disabled=False), type='python'), options) if results.failures or results.errors: sys.exit(report(True, results, None, options)) else: sys.exit(report(False)) elif command == "testjs": results = test_all_js(mp.get(tests=mp.active_tests(disabled=False), type='javascript'), options) if results.failures: sys.exit(report(True, None, results, options)) else: sys.exit(report(False)) elif command == "testall": test_all(mp.active_tests(disabled=False), options)
def add_test(self, test, expected='pass'): filepath = os.path.abspath(test) if os.path.isdir(filepath): for root, dirs, files in os.walk(filepath): for filename in files: if filename.endswith('.ini'): msg_tmpl = ( "Ignoring manifest '{0}'; running all tests in '{1}'." " See --help for details.") relpath = os.path.relpath(os.path.join(root, filename), filepath) self.logger.warning(msg_tmpl.format(relpath, filepath)) elif self._is_filename_valid(filename): test_file = os.path.join(root, filename) self.add_test(test_file) return file_ext = os.path.splitext(os.path.split(filepath)[-1])[1] if file_ext == '.ini': manifest = TestManifest() manifest.read(filepath) filters = [] if self.test_tags: filters.append(tags(self.test_tags)) update_mozinfo(filepath) self.logger.info( "mozinfo updated with the following: {}".format(None)) self.logger.info("mozinfo is: {}".format(mozinfo.info)) manifest_tests = manifest.active_tests(exists=False, disabled=True, filters=filters, app=self.appName, e10s=self.e10s, **mozinfo.info) if len(manifest_tests) == 0: self.logger.error("No tests to run using specified " "combination of filters: {}".format( manifest.fmt_filters())) target_tests = [] for test in manifest_tests: if test.get('disabled'): self.manifest_skipped_tests.append(test) else: target_tests.append(test) for i in target_tests: if not os.path.exists(i["path"]): raise IOError("test file: {} does not exist".format( i["path"])) file_ext = os.path.splitext(os.path.split(i['path'])[-1])[-1] self.add_test(i["path"], i["expected"]) return self.tests.append({'filepath': filepath, 'expected': expected})
def buildTestPath(self, options): """ Build the url path to the specific test harness and test file or directory Build a manifest of tests to run and write out a json file for the harness to read """ if options.manifestFile and os.path.isfile(options.manifestFile): manifest = TestManifest(strict=False) manifest.read(options.manifestFile) # Bug 883858 - return all tests including disabled tests tests = manifest.active_tests(disabled=False, **mozinfo.info) paths = [] for test in tests: tp = test['path'].split(self.getTestRoot(options), 1)[1].strip('/') # Filter out tests if we are using --test-path if options.testPath and not tp.startswith(options.testPath): continue paths.append({'path': tp}) # Bug 883865 - add this functionality into manifestDestiny with open('tests.json', 'w') as manifestFile: manifestFile.write(json.dumps({'tests': paths})) options.manifestFile = 'tests.json' testHost = "http://mochi.test:8888" testURL = ("/").join([testHost, self.TEST_PATH, options.testPath]) if os.path.isfile( os.path.join(self.oldcwd, os.path.dirname(__file__), self.TEST_PATH, options.testPath)) and options.repeat > 0: testURL = ("/").join([testHost, self.PLAIN_LOOP_PATH]) if options.chrome or options.a11y: testURL = ("/").join([testHost, self.CHROME_PATH]) elif options.browserChrome: testURL = "about:blank" elif options.ipcplugins: testURL = ("/").join( [testHost, self.TEST_PATH, "dom/plugins/test"]) return testURL
def run_mozmill(runner, args): tests = [] for test in args.mozmill: testpath = os.path.expanduser(test) realpath = os.path.realpath(testpath) if not os.path.exists(testpath): raise Exception("Not a valid test file/directory: %s" % test) root,ext = os.path.splitext(testpath) if ext == ".ini": # This is a test manifest, use the parser instead manifest = TestManifest(manifests=[testpath], strict=False) print mozinfo.info tests.extend(manifest.active_tests(**mozinfo.info)) else: def testname(t): if os.path.isdir(realpath): return os.path.join(test, os.path.relpath(t, testpath)) return test tests.extend([{'name': testname(t), 'path': t } for t in mozmill.collect_tests(testpath)]) if args.verbose and len(tests): print "Running these tests:" print "\t" + "\n\t".join(map(lambda x: x['path'], tests)) exception = None try: runner.run(tests, True) except: exception_type, exception, tb = sys.exc_info() results = runner.finish(fatal=exception is not None) if exception: traceback.print_exception(exception_type, exception, tb) if exception or results.fails: sys.exit(1)
def test_missing_paths(self): """ Test paths that don't exist raise an exception in strict mode. """ tempdir = tempfile.mkdtemp() missing_path = os.path.join(here, 'missing-path.ini') manifest = TestManifest(manifests=(missing_path,), strict=True) self.assertRaises(IOError, manifest.active_tests) self.assertRaises(IOError, manifest.copy, tempdir) self.assertRaises(IOError, manifest.update, tempdir) shutil.rmtree(tempdir)
def add_test(self, test, expected='pass', test_container=None): filepath = os.path.abspath(test) if os.path.isdir(filepath): for root, dirs, files in os.walk(filepath): for filename in files: if (filename.startswith('test_') and (filename.endswith('.py') or filename.endswith('.js'))): filepath = os.path.join(root, filename) self.add_test(filepath) return file_ext = os.path.splitext(os.path.split(filepath)[-1])[1] if file_ext == '.ini': manifest = TestManifest() manifest.read(filepath) filters = [] if self.test_tags: filters.append(tags(self.test_tags)) json_path = update_mozinfo(filepath) self.logger.info( "mozinfo updated with the following: {}".format(None)) manifest_tests = manifest.active_tests(exists=False, disabled=True, filters=filters, device=self.device, app=self.appName, e10s=self.e10s, **mozinfo.info) if len(manifest_tests) == 0: self.logger.error("no tests to run using specified " "combination of filters: {}".format( manifest.fmt_filters())) target_tests = [] for test in manifest_tests: if test.get('disabled'): self.manifest_skipped_tests.append(test) else: target_tests.append(test) for i in target_tests: if not os.path.exists(i["path"]): raise IOError("test file: %s does not exist" % i["path"]) file_ext = os.path.splitext(os.path.split(i['path'])[-1])[-1] test_container = None self.add_test(i["path"], i["expected"], test_container) return self.tests.append({ 'filepath': filepath, 'expected': expected, 'test_container': test_container })
def inner(*tests, **opts): assert len(tests) > 0 manifest = TestManifest() # pylint --py3k: W1636 manifest.tests.extend(list(map(normalize, tests))) options["manifestFile"] = manifest options.update(opts) result = runtests.run_test_harness(parser, Namespace(**options)) out = json.loads("[" + ",".join(buf.getvalue().splitlines()) + "]") buf.close() return result, out
def print_test_dirs(topsrcdir, manifest_file): """ Simple routine which prints the paths of directories specified in a Marionette manifest, relative to topsrcdir. This does not recurse into manifests, as we currently have no need for that. """ dirs = set() # output the directory of this (parent) manifest topsrcdir = os.path.abspath(topsrcdir) scriptdir = os.path.abspath(os.path.dirname(__file__)) dirs.add(scriptdir[len(topsrcdir) + 1:]) # output the directories of all the other manifests manifest = TestManifest() manifest.read(manifest_file) for i in manifest.get(): d = os.path.dirname(i['manifest'])[len(topsrcdir) + 1:] dirs.add(d) for path in dirs: path = path.replace('\\', '/') print path
def test_manifest_subsuites(self): """ test subsuites and conditional subsuites """ relative_path = os.path.join(here, 'subsuite.ini') manifest = TestManifest(manifests=(relative_path,)) info = {'foo': 'bar'} # 6 tests total tests = manifest.active_tests(exists=False, **info) self.assertEquals(len(tests), 6) # only 3 tests for subsuite bar when foo==bar tests = manifest.active_tests(exists=False, filters=[subsuite('bar')], **info) self.assertEquals(len(tests), 3) # only 1 test for subsuite baz, regardless of conditions other = {'something': 'else'} tests = manifest.active_tests(exists=False, filters=[subsuite('baz')], **info) self.assertEquals(len(tests), 1) tests = manifest.active_tests(exists=False, filters=[subsuite('baz')], **other) self.assertEquals(len(tests), 1) # 4 tests match when the condition doesn't match (all tests except # the unconditional subsuite) info = {'foo': 'blah'} tests = manifest.active_tests(exists=False, filters=[subsuite()], **info) self.assertEquals(len(tests), 5) # test for illegal subsuite value manifest.tests[0]['subsuite'] = 'subsuite=bar,foo=="bar",type="nothing"' with self.assertRaises(ParseError): manifest.active_tests(exists=False, filters=[subsuite('foo')], **info)
def parse_manifests(self, tests_path, options=None): """Parses a list of given files as manifests, skipping those that are unparsable. Outputs a summary that gives information about the tests activated/skipped.""" self.total_tests_count = 0 self.skipped_tests_count = 0 options = options or dict() for rel_path in CodeRevision.MANIFESTS_REL_PATHS: manifest_path = os.path.join(tests_path, rel_path) test_manifest = TestManifest([manifest_path]) active_tests = [t['path'] for t in test_manifest.active_tests(exists=False, disabled=False, **options)] skipped_tests = [t['path'] for t in test_manifest.tests if t['path'] not in active_tests] self.total_tests_count += len(test_manifest.tests) self.skipped_tests_count += len(skipped_tests) test_suite_state = TestSuiteState(revision=self, test_suite=rel_path, options=options, active_tests=active_tests, skipped_tests=skipped_tests) self.manifest_states.append(test_suite_state) test_suite_state.save() self.processed = True self.save()
def test_manifest_subsuites(self): """ test subsuites and conditional subsuites """ class AttributeDict(dict): def __getattr__(self, attr): return self[attr] def __setattr__(self, attr, value): self[attr] = value relative_path = os.path.join(here, 'subsuite.ini') manifest = TestManifest(manifests=(relative_path,)) info = {'foo': 'bar'} options = {'subsuite': 'bar'} # 6 tests total self.assertEquals(len(manifest.active_tests(exists=False, **info)), 6) # only 3 tests for subsuite bar when foo==bar self.assertEquals(len(manifest.active_tests(exists=False, options=AttributeDict(options), **info)), 3) options = {'subsuite': 'baz'} other = {'something': 'else'} # only 1 test for subsuite baz, regardless of conditions self.assertEquals(len(manifest.active_tests(exists=False, options=AttributeDict(options), **info)), 1) self.assertEquals(len(manifest.active_tests(exists=False, options=AttributeDict(options), **other)), 1) # 4 tests match when the condition doesn't match (all tests except # the unconditional subsuite) info = {'foo': 'blah'} options = {'subsuite': None} self.assertEquals(len(manifest.active_tests(exists=False, options=AttributeDict(options), **info)), 5) # test for illegal subsuite value manifest.tests[0]['subsuite'] = 'subsuite=bar,foo=="bar",type="nothing"' self.assertRaises(ParseError, manifest.active_tests, exists=False, options=AttributeDict(options), **info)
def test_testmanifest(self): # Test filtering based on platform: filter_example = os.path.join(here, 'filter-example.ini') manifest = TestManifest(manifests=(filter_example,), strict=False) self.assertEqual([i['name'] for i in manifest.active_tests(os='win', disabled=False, exists=False)], ['windowstest', 'fleem']) self.assertEqual([i['name'] for i in manifest.active_tests(os='linux', disabled=False, exists=False)], ['fleem', 'linuxtest']) # Look for existing tests. There is only one: self.assertEqual([i['name'] for i in manifest.active_tests()], ['fleem']) # You should be able to expect failures: last = manifest.active_tests(exists=False, toolkit='gtk2')[-1] self.assertEqual(last['name'], 'linuxtest') self.assertEqual(last['expected'], 'pass') last = manifest.active_tests(exists=False, toolkit='cocoa')[-1] self.assertEqual(last['expected'], 'fail')
def __init__(self, args): # event handler plugin names self.handlers = {} for handler_class in handlers.handlers(): name = getattr(handler_class, 'name', handler_class.__name__) self.handlers[name] = handler_class self.jsbridge_port = jsbridge.find_port() # add and parse options mozrunner.CLI.__init__(self, args) # Do not allow manifests and tests specified at the same time if self.options.manifests and self.options.tests: self.parser.error("Options %s and %s are mutually exclusive." % (self.parser.get_option('-t'), self.parser.get_option('-m'))) # read tests from manifests (if any) self.manifest = TestManifest(manifests=self.options.manifests, strict=False) # expand user directory and check existence for the test for test in self.options.tests: testpath = os.path.expanduser(test) realpath = os.path.realpath(testpath) if not os.path.exists(testpath): raise Exception("Not a valid test file/directory: %s" % test) # collect the tests def testname(t): if os.path.isdir(realpath): return os.path.join(test, os.path.relpath(t, testpath)) return test tests = [{'name': testname(t), 'path': t} for t in collect_tests(testpath)] self.manifest.tests.extend(tests) # list the tests and exit if specified if self.options.list_tests: for test in self.manifest.tests: print test['path'] self.parser.exit() # instantiate event handler plugins self.event_handlers = [] for name, handler_class in self.handlers.items(): if name in self.options.disable: continue handler = handlers.instantiate_handler(handler_class, self.options) if handler is not None: self.event_handlers.append(handler) for handler in self.options.handlers: # user handlers try: handler_class = handlers.load_handler(handler) except BaseException as e: self.parser.error(str(e)) _handler = handlers.instantiate_handler(handler_class, self.options) if _handler is not None: self.event_handlers.append(_handler) # if in manual mode, ensure we're interactive if self.options.manual: self.options.interactive = True
class CLI(mozrunner.CLI): """Command line interface to mozmill.""" module = "mozmill" def __init__(self, args): # event handler plugin names self.handlers = {} for handler_class in handlers.handlers(): name = getattr(handler_class, 'name', handler_class.__name__) self.handlers[name] = handler_class self.jsbridge_port = jsbridge.find_port() # add and parse options mozrunner.CLI.__init__(self, args) # Do not allow manifests and tests specified at the same time if self.options.manifests and self.options.tests: self.parser.error("Options %s and %s are mutually exclusive." % (self.parser.get_option('-t'), self.parser.get_option('-m'))) # read tests from manifests (if any) self.manifest = TestManifest(manifests=self.options.manifests, strict=False) # expand user directory and check existence for the test for test in self.options.tests: testpath = os.path.expanduser(test) realpath = os.path.realpath(testpath) if not os.path.exists(testpath): raise Exception("Not a valid test file/directory: %s" % test) # collect the tests def testname(t): if os.path.isdir(realpath): return os.path.join(test, os.path.relpath(t, testpath)) return test tests = [{'name': testname(t), 'path': t} for t in collect_tests(testpath)] self.manifest.tests.extend(tests) # list the tests and exit if specified if self.options.list_tests: for test in self.manifest.tests: print test['path'] self.parser.exit() # instantiate event handler plugins self.event_handlers = [] for name, handler_class in self.handlers.items(): if name in self.options.disable: continue handler = handlers.instantiate_handler(handler_class, self.options) if handler is not None: self.event_handlers.append(handler) for handler in self.options.handlers: # user handlers try: handler_class = handlers.load_handler(handler) except BaseException as e: self.parser.error(str(e)) _handler = handlers.instantiate_handler(handler_class, self.options) if _handler is not None: self.event_handlers.append(_handler) # if in manual mode, ensure we're interactive if self.options.manual: self.options.interactive = True def add_options(self, parser): """Add command line options.""" group = OptionGroup(parser, 'MozRunner options') mozrunner.CLI.add_options(self, group) parser.add_option_group(group) group = OptionGroup(parser, 'MozMill options') group.add_option("-t", "--test", dest="tests", default=[], action='append', help='Run test') group.add_option("--timeout", dest="timeout", type="float", default=JSBRIDGE_TIMEOUT, help="Seconds before harness timeout if no " "communication is taking place") group.add_option("--restart", dest='restart', action='store_true', default=False, help="Restart the application and reset the " "profile between each test file") group.add_option("-m", "--manifest", dest='manifests', action='append', metavar='MANIFEST', help='test manifest .ini file') group.add_option('-D', '--debug', dest="debug", action="store_true", default=False, help="debug mode" ) group.add_option('--list-tests', dest='list_tests', action='store_true', default=False, help="List test files that would be run, in order") group.add_option('--handler', dest='handlers', action='append', default=[], metavar='PATH:CLASS', help="Specify an event handler given a file PATH " "and the CLASS in the file") group.add_option('--screenshots-path', dest='screenshots_path', metavar='PATH', help='Path of directory to use for screenshots') if self.handlers: group.add_option('--disable', dest='disable', action='append', default=[], metavar='HANDLER', help="Disable a default event handler (%s)" % ','.join(self.handlers.keys())) group.add_option('--manual', dest='manual', action='store_true', default=False, help="start the browser without running any tests") parser.add_option_group(group) # add option for included event handlers for name, handler_class in self.handlers.items(): if hasattr(handler_class, 'add_options'): group = OptionGroup(parser, '%s options' % name, description=getattr(handler_class, '__doc__', None)) handler_class.add_options(group) parser.add_option_group(group) def profile_args(self): """Setup profile settings for the profile object. Returns arguments needed to make a profile object from this command-line interface. """ profile_args = mozrunner.CLI.profile_args(self) profile_args.setdefault('addons', []).extend(ADDONS) profile_args['preferences'] = { 'extensions.jsbridge.port': self.jsbridge_port, 'focusmanager.testmode': True } if self.options.debug: prefs = profile_args['preferences'] prefs['extensions.checkCompatibility'] = False prefs['extensions.jsbridge.log'] = True prefs['javascript.options.strict'] = True return profile_args def command_args(self): """Arguments to the application to be run.""" cmdargs = mozrunner.CLI.command_args(self) if self.options.debug and '-jsconsole' not in cmdargs: cmdargs.append('-jsconsole') return cmdargs def run(self): """CLI front end to run mozmill.""" # make sure you have tests to run if (not self.manifest.tests) and (not self.options.manual): self.parser.error("No tests found. Please specify with -t or -m") # create a Mozrunner runner = self.create_runner() # create an instance of MozMill mozmill = MozMill(runner, self.jsbridge_port, jsbridge_timeout=self.options.timeout, handlers=self.event_handlers, screenshots_path=self.options.screenshots_path) # set debugger arguments mozmill.set_debugger(*self.debugger_arguments()) # load the mozmill + jsbridge extension but don't run any tests # (for debugging) if self.options.manual: try: mozmill.start_runner() mozmill.runner.wait() except (JSBridgeDisconnectError, KeyboardInterrupt): pass return # run the tests exception = None tests = self.manifest.active_tests(**mozinfo.info) try: mozmill.run(tests, self.options.restart) except: exception_type, exception, tb = sys.exc_info() # do whatever reporting you're going to do results = mozmill.finish(fatal=exception is not None) # exit on bad stuff happen if exception: traceback.print_exception(exception_type, exception, tb) if exception or results.fails: sys.exit(1) # return results on success [currently unused] return results
def run_test(self, test, testtype): if not self.httpd: self.start_httpd() if not self.marionette: self.start_marionette() filepath = os.path.abspath(test) if os.path.isdir(filepath): for root, dirs, files in os.walk(filepath): for filename in files: if ((filename.startswith('test_') or filename.startswith('browser_')) and (filename.endswith('.py') or filename.endswith('.js'))): filepath = os.path.join(root, filename) self.run_test(filepath, testtype) return mod_name,file_ext = os.path.splitext(os.path.split(filepath)[-1]) testloader = unittest.TestLoader() suite = unittest.TestSuite() if file_ext == '.ini': if testtype is not None: testargs = {} testtypes = testtype.replace('+', ' +').replace('-', ' -').split() for atype in testtypes: if atype.startswith('+'): testargs.update({ atype[1:]: 'true' }) elif atype.startswith('-'): testargs.update({ atype[1:]: 'false' }) else: testargs.update({ atype: 'true' }) manifest = TestManifest() manifest.read(filepath) if testtype is None: manifest_tests = manifest.get() else: manifest_tests = manifest.get(**testargs) for i in manifest_tests: self.run_test(i["path"], testtype) return self.logger.info('TEST-START %s' % os.path.basename(test)) if file_ext == '.py': test_mod = imp.load_source(mod_name, filepath) for name in dir(test_mod): obj = getattr(test_mod, name) if (isinstance(obj, (type, types.ClassType)) and issubclass(obj, unittest.TestCase)): testnames = testloader.getTestCaseNames(obj) for testname in testnames: suite.addTest(obj(self.marionette, methodName=testname)) elif file_ext == '.js': suite.addTest(MarionetteJSTestCase(self.marionette, jsFile=filepath)) if suite.countTestCases(): results = MarionetteTextTestRunner(verbosity=3).run(suite) self.failed += len(results.failures) + len(results.errors) self.todo = 0 if hasattr(results, 'skipped'): self.todo += len(results.skipped) + len(results.expectedFailures) self.passed += results.passed for failure in results.failures + results.errors: self.failures.append((results.getInfo(failure[0]), failure[1], 'TEST-UNEXPECTED-FAIL')) if hasattr(results, 'unexpectedSuccess'): self.failed += len(results.unexpectedSuccesses) for failure in results.unexpectedSuccesses: self.failures.append((results.getInfo(failure[0]), failure[1], 'TEST-UNEXPECTED-PASS'))
def run_test(self, test, testtype): if not self.httpd: print "starting httpd" self.start_httpd() if not self.marionette: self.start_marionette() filepath = os.path.abspath(test) if os.path.isdir(filepath): for root, dirs, files in os.walk(filepath): for filename in files: if ((filename.startswith('test_') or filename.startswith('browser_')) and (filename.endswith('.py') or filename.endswith('.js'))): filepath = os.path.join(root, filename) self.run_test(filepath, testtype) return mod_name,file_ext = os.path.splitext(os.path.split(filepath)[-1]) testloader = unittest.TestLoader() suite = unittest.TestSuite() if file_ext == '.ini': testargs = { 'skip': 'false' } if testtype is not None: testtypes = testtype.replace('+', ' +').replace('-', ' -').split() for atype in testtypes: if atype.startswith('+'): testargs.update({ atype[1:]: 'true' }) elif atype.startswith('-'): testargs.update({ atype[1:]: 'false' }) else: testargs.update({ atype: 'true' }) manifest = TestManifest() manifest.read(filepath) if self.perf: if self.perfserv is None: self.perfserv = manifest.get("perfserv")[0] machine_name = socket.gethostname() try: manifest.has_key("machine_name") machine_name = manifest.get("machine_name")[0] except: self.logger.info("Using machine_name: %s" % machine_name) os_name = platform.system() os_version = platform.release() self.perfrequest = datazilla.DatazillaRequest( server=self.perfserv, machine_name=machine_name, os=os_name, os_version=os_version, platform=manifest.get("platform")[0], build_name=manifest.get("build_name")[0], version=manifest.get("version")[0], revision=self.revision, branch=manifest.get("branch")[0], id=os.getenv('BUILD_ID'), test_date=int(time.time())) manifest_tests = manifest.get(**testargs) for i in manifest_tests: self.run_test(i["path"], testtype) return self.logger.info('TEST-START %s' % os.path.basename(test)) for handler in self.test_handlers: if handler.match(os.path.basename(test)): handler.add_tests_to_suite(mod_name, filepath, suite, testloader, self.marionette) break if suite.countTestCases(): results = MarionetteTextTestRunner(verbosity=3, perf=self.perf).run(suite) self.results.append(results) self.failed += len(results.failures) + len(results.errors) if results.perfdata and options.perf: self.perfrequest.add_datazilla_result(results.perfdata) if hasattr(results, 'skipped'): self.todo += len(results.skipped) + len(results.expectedFailures) self.passed += results.passed for failure in results.failures + results.errors: self.failures.append((results.getInfo(failure[0]), failure[1], 'TEST-UNEXPECTED-FAIL')) if hasattr(results, 'unexpectedSuccess'): self.failed += len(results.unexpectedSuccesses) for failure in results.unexpectedSuccesses: self.failures.append((results.getInfo(failure[0]), failure[1], 'TEST-UNEXPECTED-PASS'))
def run_test(self, test): if not self.httpd: print "starting httpd" self.start_httpd() if not self.marionette: self.start_marionette() filepath = os.path.abspath(test) if os.path.isdir(filepath): for root, dirs, files in os.walk(filepath): for filename in files: if ((filename.startswith('test_') or filename.startswith('browser_')) and (filename.endswith('.py') or filename.endswith('.js'))): filepath = os.path.join(root, filename) self.run_test(filepath) if self.marionette.check_for_crash(): return return mod_name,file_ext = os.path.splitext(os.path.split(filepath)[-1]) testloader = unittest.TestLoader() suite = unittest.TestSuite() if file_ext == '.ini': testargs = {} if self.type is not None: testtypes = self.type.replace('+', ' +').replace('-', ' -').split() for atype in testtypes: if atype.startswith('+'): testargs.update({ atype[1:]: 'true' }) elif atype.startswith('-'): testargs.update({ atype[1:]: 'false' }) else: testargs.update({ atype: 'true' }) manifest = TestManifest() manifest.read(filepath) all_tests = manifest.active_tests(disabled=False) manifest_tests = manifest.active_tests(disabled=False, device=self.device, app=self.appName) skip_tests = list(set([x['path'] for x in all_tests]) - set([x['path'] for x in manifest_tests])) for skipped in skip_tests: self.logger.info('TEST-SKIP | %s | device=%s, app=%s' % (os.path.basename(skipped), self.device, self.appName)) self.todo += 1 for i in manifest.get(tests=manifest_tests, **testargs): self.run_test(i["path"]) if self.marionette.check_for_crash(): return return self.logger.info('TEST-START %s' % os.path.basename(test)) for handler in self.test_handlers: if handler.match(os.path.basename(test)): handler.add_tests_to_suite(mod_name, filepath, suite, testloader, self.marionette, self.testvars, **self.test_kwargs) break if suite.countTestCases(): runner = self.textrunnerclass(verbosity=3, marionette=self.marionette) results = runner.run(suite) self.results.append(results) self.failed += len(results.failures) + len(results.errors) if hasattr(results, 'skipped'): self.todo += len(results.skipped) self.passed += results.passed for failure in results.failures + results.errors: self.failures.append((results.getInfo(failure[0]), failure[1], 'TEST-UNEXPECTED-FAIL')) if hasattr(results, 'unexpectedSuccesses'): self.failed += len(results.unexpectedSuccesses) for failure in results.unexpectedSuccesses: self.failures.append((results.getInfo(failure), 'TEST-UNEXPECTED-PASS')) if hasattr(results, 'expectedFailures'): self.passed += len(results.expectedFailures)