Exemple #1
0
 def runTests(self):
     self.startup()
     if isinstance(self.options.manifestFile, TestManifest):
         mp = self.options.manifestFile
     else:
         mp = TestManifest(strict=False)
         mp.read(self.options.robocopIni)
     filters = []
     if self.options.totalChunks:
         filters.append(
             chunk_by_slice(self.options.thisChunk,
                            self.options.totalChunks))
     robocop_tests = mp.active_tests(exists=False,
                                     filters=filters,
                                     **mozinfo.info)
     if not self.options.autorun:
         # Force a single loop iteration. The iteration will start Fennec and
         # the httpd server, but not actually run a test.
         self.options.test_paths = [robocop_tests[0]['name']]
     active_tests = []
     for test in robocop_tests:
         if self.options.test_paths and test[
                 'name'] not in self.options.test_paths:
             continue
         if 'disabled' in test:
             self.log.info('TEST-INFO | skipping %s | %s' %
                           (test['name'], test['disabled']))
             continue
         active_tests.append(test)
     self.log.suite_start([t['name'] for t in active_tests])
     worstTestResult = None
     for test in active_tests:
         result = self.runSingleTest(test)
         if worstTestResult is None or worstTestResult == 0:
             worstTestResult = result
     if worstTestResult is None:
         self.log.warning(
             "No tests run. Did you pass an invalid TEST_PATH?")
         worstTestResult = 1
     else:
         print "INFO | runtests.py | Test summary: start."
         logResult = self.logTestSummary()
         print "INFO | runtests.py | Test summary: end."
         if worstTestResult == 0:
             worstTestResult = logResult
     return worstTestResult
Exemple #2
0
    def run_xpcshell_test(self, test_objects=None, **params):
        from mozbuild.controller.building import BuildDriver

        if test_objects is not None:
            from manifestparser import TestManifest
            m = TestManifest()
            m.tests.extend(test_objects)
            params['manifest'] = m

        driver = self._spawn(BuildDriver)
        driver.install_tests()

        # We should probably have a utility function to ensure the tree is
        # ready to run tests. Until then, we just create the state dir (in
        # case the tree wasn't built with mach).
        self._ensure_state_subdir_exists('.')

        if not params.get('log'):
            log_defaults = {self._mach_context.settings['test']['format']: sys.stdout}
            fmt_defaults = {
                "level": self._mach_context.settings['test']['level'],
                "verbose": True
            }
            params['log'] = structured.commandline.setup_logging(
                "XPCShellTests", params, log_defaults, fmt_defaults)

        if not params['threadCount']:
            params['threadCount'] = int((cpu_count() * 3) / 2)

        if conditions.is_android(self) or self.substs.get('MOZ_BUILD_APP') == 'b2g':
            from mozrunner.devices.android_device import verify_android_device, get_adb_path
            device_serial = params.get('deviceSerial')
            verify_android_device(self, network=True, device_serial=device_serial)
            if not params['adbPath']:
                params['adbPath'] = get_adb_path(self)
            xpcshell = self._spawn(AndroidXPCShellRunner)
        else:
            xpcshell = self._spawn(XPCShellRunner)
        xpcshell.cwd = self._mach_context.cwd

        try:
            return xpcshell.run_test(**params)
        except InvalidTestPathError as e:
            print(e.message)
            return 1
Exemple #3
0
    def test_manifest_subsuites(self):
        """
        test subsuites and conditional subsuites
        """
        relative_path = os.path.join(here, 'subsuite.ini')
        manifest = TestManifest(manifests=(relative_path, ))
        info = {'foo': 'bar'}

        # 6 tests total
        tests = manifest.active_tests(exists=False, **info)
        self.assertEquals(len(tests), 6)

        # only 3 tests for subsuite bar when foo==bar
        tests = manifest.active_tests(exists=False,
                                      filters=[subsuite('bar')],
                                      **info)
        self.assertEquals(len(tests), 3)

        # only 1 test for subsuite baz, regardless of conditions
        other = {'something': 'else'}
        tests = manifest.active_tests(exists=False,
                                      filters=[subsuite('baz')],
                                      **info)
        self.assertEquals(len(tests), 1)
        tests = manifest.active_tests(exists=False,
                                      filters=[subsuite('baz')],
                                      **other)
        self.assertEquals(len(tests), 1)

        # 4 tests match when the condition doesn't match (all tests except
        # the unconditional subsuite)
        info = {'foo': 'blah'}
        tests = manifest.active_tests(exists=False,
                                      filters=[subsuite()],
                                      **info)
        self.assertEquals(len(tests), 5)

        # test for illegal subsuite value
        manifest.tests[0][
            'subsuite'] = 'subsuite=bar,foo=="bar",type="nothing"'
        with self.assertRaises(ParseError):
            manifest.active_tests(exists=False,
                                  filters=[subsuite('foo')],
                                  **info)
Exemple #4
0
def find_manifest_dirs(topsrcdir, manifests):
    """Routine to retrieve directories specified in a manifest, relative to topsrcdir.

    It does not recurse into manifests, as we currently have no need for that.
    """
    dirs = set()

    for p in manifests:
        p = os.path.join(topsrcdir, p)

        if p.endswith(".ini"):
            test_manifest = TestManifest()
            test_manifest.read(p)
            dirs |= set([os.path.dirname(m) for m in test_manifest.manifests()])

        elif p.endswith(".list"):
            m = ReftestManifest()
            m.load(p)
            dirs |= m.dirs

        else:
            raise Exception(
                '"{}" is not a supported manifest format.'.format(
                    os.path.splitext(p)[1]
                )
            )

    dirs = {mozpath.normpath(d[len(topsrcdir) :]).lstrip("/") for d in dirs}

    # Filter out children captured by parent directories because duplicates
    # will confuse things later on.
    def parents(p):
        while True:
            p = mozpath.dirname(p)
            if not p:
                break
            yield p

    seen = set()
    for d in sorted(dirs, key=len):
        if not any(p in seen for p in parents(d)):
            seen.add(d)

    return sorted(seen)
    def test_testmanifest(self):
        # Test filtering based on platform:
        filter_example = os.path.join(here, 'filter-example.ini')
        manifest = TestManifest(manifests=(filter_example,))
        self.assertEqual([i['name'] for i in manifest.active_tests(os='win', disabled=False, exists=False)],
                         ['windowstest', 'fleem'])
        self.assertEqual([i['name'] for i in manifest.active_tests(os='linux', disabled=False, exists=False)],
                         ['fleem', 'linuxtest'])

        # Look for existing tests.  There is only one:
        self.assertEqual([i['name'] for i in manifest.active_tests()],
                         ['fleem'])

        # You should be able to expect failures:
        last_test = manifest.active_tests(exists=False, toolkit='gtk2')[-1]
        self.assertEqual(last_test['name'], 'linuxtest')
        self.assertEqual(last_test['expected'], 'pass')
        last_test = manifest.active_tests(exists=False, toolkit='cocoa')[-1]
        self.assertEqual(last_test['expected'], 'fail')
Exemple #6
0
    def convert_ini_manifest_to_json(cls, manifest_path):
        manifest = TestManifest([manifest_path])

        whitelist = [t['path'] for t in manifest.active_tests(disabled=False)]
        blacklist = [t for t in manifest.paths() if t not in whitelist]

        whitelist.insert(
            0,
            os.path.join(gaia_dir, 'shared', 'test', 'integration',
                         'setup.js'))

        map(lambda l: [os.path.relpath(p, gaia_dir) for p in l],
            (whitelist, blacklist))
        contents = {'whitelist': whitelist}

        manifest_path = tempfile.NamedTemporaryFile(suffix='.json').name
        with open(manifest_path, 'w') as f:
            f.writelines(json.dumps(contents, indent=2))
        return manifest_path
Exemple #7
0
    def run_robocop_test(self, context, tests, suite=None, **kwargs):
        host_ret = verify_host_bin()
        if host_ret != 0:
            return host_ret

        import imp
        path = os.path.join(self.mochitest_dir, 'runrobocop.py')
        with open(path, 'r') as fh:
            imp.load_module('runrobocop', fh, path,
                            ('.py', 'r', imp.PY_SOURCE))
        import runrobocop

        options = Namespace(**kwargs)

        from manifestparser import TestManifest
        manifest = TestManifest()
        manifest.tests.extend(tests)
        options.manifestFile = manifest

        return runrobocop.run_test_harness(options)
Exemple #8
0
    def get_manifests(self, suite, mozinfo):
        mozinfo = dict(mozinfo)
        # Compute all tests for the given suite/subsuite.
        tests = self.get_tests(suite)

        if "web-platform-tests" in suite:
            manifests = set()
            for t in tests:
                manifests.add(t["manifest"])
            return {"active": list(manifests), "skipped": []}

        manifests = set(chunk_by_runtime.get_manifest(t) for t in tests)

        # Compute  the active tests.
        m = TestManifest()
        m.tests = tests
        tests = m.active_tests(disabled=False, exists=False, **mozinfo)
        active = set(chunk_by_runtime.get_manifest(t) for t in tests)
        skipped = manifests - active
        return {"active": list(active), "skipped": list(skipped)}
Exemple #9
0
def get_chunked_manifests(flavor, subsuite, chunks, mozinfo):
    """Compute which manifests should run in which chunks with the given category
    of tests.

    Args:
        flavor (str): The suite to run. Values are defined by the 'build_flavor' key
            in `moztest.resolve.TEST_SUITES`.
        subsuite (str): The subsuite to run or 'undefined' to denote no subsuite.
        chunks (int): Number of chunks to split manifests across.
        mozinfo (frozenset): Set of data in the form of (<key>, <value>) used
                             for filtering.

    Returns:
        A list of manifests where each item contains the manifest that should
        run in the corresponding chunk.
    """
    mozinfo = dict(mozinfo)
    # Compute all tests for the given suite/subsuite.
    tests = get_tests(flavor, subsuite)
    all_manifests = set(t['manifest_relpath'] for t in tests)

    # Compute only the active tests.
    m = TestManifest()
    m.tests = tests
    tests = m.active_tests(disabled=False, exists=False, **mozinfo)
    active_manifests = set(t['manifest_relpath'] for t in tests)

    # Run the chunking algorithm.
    chunked_manifests = [
        c[1] for c in chunk_by_runtime(
            None,
            chunks,
            get_runtimes(mozinfo['os'])
        ).get_chunked_manifests(tests)
    ]

    # Add all skipped manifests to the first chunk so they still show up in the
    # logs. They won't impact runtime much.
    skipped_manifests = all_manifests - active_manifests
    chunked_manifests[0].extend(skipped_manifests)
    return chunked_manifests
Exemple #10
0
def main(args=sys.argv[1:]):

    # read the manifest
    if args:
        manifests = args
    else:
        manifests = [os.path.join(here, 'manifest.ini')]
    missing = []
    for manifest_file in manifests:
        # ensure manifests exist
        if not os.path.exists(manifest_file):
            missing.append(manifest_file)
    assert not missing, 'manifest%s not found: %s' % (
        (len(manifests) == 1 and '' or 's'), ', '.join(missing))
    manifest = TestManifest(manifests=manifests)
    unittest_results = test_all(manifest)
    results = TestResultCollection.from_unittest_results(
        None, unittest_results)

    # exit according to results
    sys.exit(1 if results.num_failures else 0)
    def _get_subtests_from_ini(self, manifest_path, suite_name):
        """
        Returns a list of (sub)tests from an ini file containing the test definitions.

        :param str manifest_path: path to the ini file
        :return list: the list of the tests
        """
        test_manifest = TestManifest([manifest_path], strict=False)
        test_list = test_manifest.active_tests(exists=False, disabled=False)
        subtest_list = {}
        for subtest in test_list:
            subtest_list[subtest["name"]] = subtest["manifest"]
            self._urls[subtest["name"]] = {
                "type": suite_name,
                "url": subtest["test_url"],
            }

        self._urls = collections.OrderedDict(
            sorted(self._urls.items(), key=lambda t: len(t[0])))

        return subtest_list
Exemple #12
0
    def run_xpcshell_test(self, test_objects=None, **params):
        from mozbuild.controller.building import BuildDriver

        if test_objects is not None:
            from manifestparser import TestManifest
            m = TestManifest()
            m.tests.extend(test_objects)
            params['manifest'] = m

        driver = self._spawn(BuildDriver)
        driver.install_tests(test_objects)

        # We should probably have a utility function to ensure the tree is
        # ready to run tests. Until then, we just create the state dir (in
        # case the tree wasn't built with mach).
        self._ensure_state_subdir_exists('.')


        params['log'] = structured.commandline.setup_logging("XPCShellTests",
                                                             params,
                                                             {"mach": sys.stdout},
                                                             {"verbose": True})

        if conditions.is_android(self):
            from mozrunner.devices.android_device import verify_android_device
            verify_android_device(self)
            xpcshell = self._spawn(AndroidXPCShellRunner)
        elif conditions.is_b2g(self):
            xpcshell = self._spawn(B2GXPCShellRunner)
            params['b2g_home'] = self.b2g_home
            params['device_name'] = self.device_name
        else:
            xpcshell = self._spawn(XPCShellRunner)
        xpcshell.cwd = self._mach_context.cwd

        try:
            return xpcshell.run_test(**params)
        except InvalidTestPathError as e:
            print(e.message)
            return 1
Exemple #13
0
    def buildTestPath(self, options):
        """ Build the url path to the specific test harness and test file or directory
        Build a manifest of tests to run and write out a json file for the harness to read
    """
        if options.manifestFile and os.path.isfile(options.manifestFile):
            manifest = TestManifest(strict=False)
            manifest.read(options.manifestFile)
            # Bug 883858 - return all tests including disabled tests
            tests = manifest.active_tests(disabled=False, **mozinfo.info)
            paths = []
            for test in tests:
                tp = test['path'].split(self.getTestRoot(options),
                                        1)[1].strip('/')

                # Filter out tests if we are using --test-path
                if options.testPath and not tp.startswith(options.testPath):
                    continue

                paths.append({'path': tp})

            # Bug 883865 - add this functionality into manifestDestiny
            with open('tests.json', 'w') as manifestFile:
                manifestFile.write(json.dumps({'tests': paths}))
            options.manifestFile = 'tests.json'

        testHost = "http://mochi.test:8888"
        testURL = ("/").join([testHost, self.TEST_PATH, options.testPath])
        if os.path.isfile(
                os.path.join(self.oldcwd, os.path.dirname(__file__),
                             self.TEST_PATH,
                             options.testPath)) and options.repeat > 0:
            testURL = ("/").join([testHost, self.PLAIN_LOOP_PATH])
        if options.chrome or options.a11y:
            testURL = ("/").join([testHost, self.CHROME_PATH])
        elif options.browserChrome:
            testURL = "about:blank"
        elif options.ipcplugins:
            testURL = ("/").join(
                [testHost, self.TEST_PATH, "dom/plugins/test"])
        return testURL
Exemple #14
0
    def _get_subtests_from_ini(self, manifest_path, suite_name):
        """
        Returns a list of (sub)tests from an ini file containing the test definitions.

        :param str manifest_path: path to the ini file
        :return list: the list of the tests
        """
        test_manifest = TestManifest([manifest_path], strict=False)
        test_list = test_manifest.active_tests(exists=False, disabled=False)
        subtests = {}
        for subtest in test_list:
            subtests[subtest["name"]] = subtest["manifest"]
            self._urls.setdefault(suite_name, []).append(
                {
                    "test_name": subtest["name"],
                    "url": subtest["test_url"],
                }
            )

        self._urls[suite_name].sort(key=lambda item: item["test_name"])

        return subtests
def print_test_dirs(topsrcdir, manifest_file):
    """
    Simple routine which prints the paths of directories specified
    in a Marionette manifest, relative to topsrcdir.  This does not recurse 
    into manifests, as we currently have no need for that.
    """

    dirs = set()
    # output the directory of this (parent) manifest
    topsrcdir = os.path.abspath(topsrcdir)
    scriptdir = os.path.abspath(os.path.dirname(__file__))
    dirs.add(scriptdir[len(topsrcdir) + 1:])

    # output the directories of all the other manifests
    manifest = TestManifest()
    manifest.read(manifest_file)
    for i in manifest.get():
        d = os.path.dirname(i['manifest'])[len(topsrcdir) + 1:]
        dirs.add(d)
    for path in dirs:
        path = path.replace('\\', '/')
        print path
Exemple #16
0
    def get_manifests(self, flavor, subsuite, mozinfo):
        mozinfo = dict(mozinfo)
        # Compute all tests for the given suite/subsuite.
        tests = self.get_tests(flavor, subsuite)

        if flavor == "web-platform-tests":
            manifests = set()
            for t in tests:
                group = self.get_wpt_group(t)
                wpt_group_translation[t['manifest']].add(group)
                manifests.add(t['manifest'])

            return {"active": list(manifests), "skipped": []}

        manifests = set(chunk_by_runtime.get_manifest(t) for t in tests)

        # Compute  the active tests.
        m = TestManifest()
        m.tests = tests
        tests = m.active_tests(disabled=False, exists=False, **mozinfo)
        active = set(chunk_by_runtime.get_manifest(t) for t in tests)
        skipped = manifests - active
        return {"active": list(active), "skipped": list(skipped)}
    def _get_subtests_from_ini(self, manifest_path, suite_name):
        """
        Returns a list of (sub)tests from an ini file containing the test definitions.

        :param str manifest_path: path to the ini file
        :return list: the list of the tests
        """
        desc_exclusion = ["here", "manifest", "manifest_relpath", "path", "relpath"]
        test_manifest = TestManifest([manifest_path], strict=False)
        test_list = test_manifest.active_tests(exists=False, disabled=False)
        subtests = {}
        for subtest in test_list:
            subtests[subtest["name"]] = subtest["manifest"]

            description = {}
            for key, value in subtest.items():
                if key not in desc_exclusion:
                    description[key] = value
            self._descriptions.setdefault(suite_name, []).append(description)

        self._descriptions[suite_name].sort(key=lambda item: item["name"])

        return subtests
Exemple #18
0
def test_all_js(tests, options):
    print "Running JS Tests"
    # We run each test in its own instance since these are harness tests.
    # That just seems safer, no opportunity for cross-talk since
    # we are sorta using the framework to test itself
    results = JSResults()

    for t in tests:

        # write a temporary manifest
        manifest = TestManifest()
        manifest.tests = [t]
        fd, filename = tempfile.mkstemp(suffix='.ini')
        os.close(fd)
        fp = file(filename, 'w')
        manifest.write(fp=fp)
        fp.close()

        # get CLI arguments to mozmill
        args = []
        if options.binary:
            args.extend(['-b', options.binary])
        args.append('--console-level=DEBUG')        
        args.append('-m')
        args.append(filename)

        # run the test
        proc = ProcessHandler("mozmill", args=args)
        proc.run()
        status = proc.waitForFinish(timeout=300)
        command = proc.commandline
        results.acquire(t['name'], proc.output, status, command)

        # remove the temporary manifest
        os.remove(filename)
        
    return results
    def get_test_list(self):
        """
        Returns a dictionary containing the tests that are in perftest.ini manifest.

        :return dict: A dictionary with the following structure: {
                "suite_name": {
                    'perftest_test1',
                    'perftest_test2',
                },
            }
        """
        for path in pathlib.Path(self.workspace_dir).rglob("perftest.ini"):
            if "obj-" in str(path):
                continue
            suite_name = re.sub(self.workspace_dir, "", os.path.dirname(path))

            # If the workspace dir doesn't end with a forward-slash,
            # the substitution above won't work completely
            if suite_name.startswith("/") or suite_name.startswith("\\"):
                suite_name = suite_name[1:]

            # We have to add new paths to the logger as we search
            # because mozperftest tests exist in multiple places in-tree
            PerfDocLogger.PATHS.append(suite_name)

            # Get the tests from perftest.ini
            test_manifest = TestManifest([str(path)], strict=False)
            test_list = test_manifest.active_tests(exists=False,
                                                   disabled=False)
            for test in test_list:
                si = ScriptInfo(test["path"])
                self.script_infos[si["name"]] = si
                self._test_list.setdefault(suite_name.replace("\\", "/"),
                                           {}).update({si["name"]: str(path)})

        return self._test_list
Exemple #20
0
    def run_b2g_test(self, context, tests=None, suite='mochitest', **kwargs):
        """Runs a b2g mochitest."""
        if context.target_out:
            host_webapps_dir = os.path.join(context.target_out, 'data',
                                            'local', 'webapps')
            if not os.path.isdir(
                    os.path.join(host_webapps_dir,
                                 'test-container.gaiamobile.org')):
                print(ENG_BUILD_REQUIRED.format(host_webapps_dir))
                sys.exit(1)

        # TODO without os.chdir, chained imports fail below
        os.chdir(self.mochitest_dir)

        # The imp module can spew warnings if the modules below have
        # already been imported, ignore them.
        with warnings.catch_warnings():
            warnings.simplefilter('ignore')

            import imp
            path = os.path.join(self.mochitest_dir, 'runtestsb2g.py')
            with open(path, 'r') as fh:
                imp.load_module('mochitest', fh, path,
                                ('.py', 'r', imp.PY_SOURCE))

            import mochitest

        options = Namespace(**kwargs)

        from manifestparser import TestManifest
        if tests:
            manifest = TestManifest()
            manifest.tests.extend(tests)
            options.manifestFile = manifest

        return mochitest.run_test_harness(options)
Exemple #21
0
    def __init__(self, args):

        # event handler plugin names
        self.handlers = {}
        for handler_class in handlers.handlers():
            name = getattr(handler_class, 'name', handler_class.__name__)
            self.handlers[name] = handler_class

        self.jsbridge_port = jsbridge.find_port()

        # add and parse options
        mozrunner.CLI.__init__(self, args)

        # Do not allow manifests and tests specified at the same time
        if self.options.manifests and self.options.tests:
            self.parser.error("Options %s and %s are mutually exclusive." %
                              (self.parser.get_option('-t'),
                               self.parser.get_option('-m')))

        # read tests from manifests (if any)
        self.manifest = TestManifest(manifests=self.options.manifests,
                                     strict=False)

        # expand user directory and check existence for the test
        for test in self.options.tests:
            testpath = os.path.expanduser(test)
            realpath = os.path.realpath(testpath)
            if not os.path.exists(testpath):
                raise Exception("Not a valid test file/directory: %s" % test)

            # collect the tests
            def testname(t):
                if os.path.isdir(realpath):
                    return os.path.join(test, os.path.relpath(t, testpath))
                return test
            tests = [{'name': testname(t), 'path': t}
                     for t in collect_tests(testpath)]
            self.manifest.tests.extend(tests)

        # list the tests and exit if specified
        if self.options.list_tests:
            for test in self.manifest.tests:
                print test['path']
            self.parser.exit()

        # instantiate event handler plugins
        self.event_handlers = []
        for name, handler_class in self.handlers.items():
            if name in self.options.disable:
                continue
            handler = handlers.instantiate_handler(handler_class, self.options)
            if handler is not None:
                self.event_handlers.append(handler)
        for handler in self.options.handlers:
            # user handlers
            try:
                handler_class = handlers.load_handler(handler)
            except BaseException as e:
                self.parser.error(str(e))
            _handler = handlers.instantiate_handler(handler_class,
                                                    self.options)
            if _handler is not None:
                self.event_handlers.append(_handler)

        # if in manual mode, ensure we're interactive
        if self.options.manual:
            self.options.interactive = True
Exemple #22
0
    def run_desktop_test(self,
                         context,
                         suite=None,
                         test_paths=None,
                         debugger=None,
                         debugger_args=None,
                         slowscript=False,
                         shuffle=False,
                         keep_open=False,
                         rerun_failures=False,
                         no_autorun=False,
                         repeat=0,
                         run_until_failure=False,
                         slow=False,
                         chunk_by_dir=0,
                         total_chunks=None,
                         this_chunk=None,
                         jsdebugger=False,
                         debug_on_failure=False,
                         start_at=None,
                         end_at=None,
                         e10s=False,
                         dmd=False,
                         dump_output_directory=None,
                         dump_about_memory_after_test=False,
                         dump_dmd_after_test=False,
                         install_extension=None,
                         quiet=False,
                         **kwargs):
        """Runs a mochitest.

        test_paths are path to tests. They can be a relative path from the
        top source directory, an absolute filename, or a directory containing
        test files.

        suite is the type of mochitest to run. It can be one of ('plain',
        'chrome', 'browser', 'metro', 'a11y').

        debugger is a program name or path to a binary (presumably a debugger)
        to run the test in. e.g. 'gdb'

        debugger_args are the arguments passed to the debugger.

        slowscript is true if the user has requested the SIGSEGV mechanism of
        invoking the slow script dialog.

        shuffle is whether test order should be shuffled (defaults to false).

        keep_open denotes whether to keep the browser open after tests
        complete.
        """
        if rerun_failures and test_paths:
            print('Cannot specify both --rerun-failures and a test path.')
            return 1

        # Need to call relpath before os.chdir() below.
        if test_paths:
            test_paths = [
                self._wrap_path_argument(p).relpath() for p in test_paths
            ]

        failure_file_path = os.path.join(self.statedir,
                                         'mochitest_failures.json')

        if rerun_failures and not os.path.exists(failure_file_path):
            print('No failure file present. Did you run mochitests before?')
            return 1

        from StringIO import StringIO

        # runtests.py is ambiguous, so we load the file/module manually.
        if 'mochitest' not in sys.modules:
            import imp
            path = os.path.join(self.mochitest_dir, 'runtests.py')
            with open(path, 'r') as fh:
                imp.load_module('mochitest', fh, path,
                                ('.py', 'r', imp.PY_SOURCE))

        import mozinfo
        import mochitest
        from manifestparser import TestManifest
        from mozbuild.testing import TestResolver

        # This is required to make other components happy. Sad, isn't it?
        os.chdir(self.topobjdir)

        # Automation installs its own stream handler to stdout. Since we want
        # all logging to go through us, we just remove their handler.
        remove_handlers = [
            l for l in logging.getLogger().handlers
            if isinstance(l, logging.StreamHandler)
        ]
        for handler in remove_handlers:
            logging.getLogger().removeHandler(handler)

        runner = mochitest.Mochitest()

        opts = mochitest.MochitestOptions()
        options, args = opts.parse_args([])

        flavor = suite

        # Need to set the suite options before verifyOptions below.
        if suite == 'plain':
            # Don't need additional options for plain.
            flavor = 'mochitest'
        elif suite == 'chrome':
            options.chrome = True
        elif suite == 'browser':
            options.browserChrome = True
            flavor = 'browser-chrome'
        elif suite == 'metro':
            options.immersiveMode = True
            options.browserChrome = True
        elif suite == 'a11y':
            options.a11y = True
        elif suite == 'webapprt-content':
            options.webapprtContent = True
            options.app = self.get_webapp_runtime_path()
        elif suite == 'webapprt-chrome':
            options.webapprtChrome = True
            options.app = self.get_webapp_runtime_path()
            options.browserArgs.append("-test-mode")
        else:
            raise Exception('None or unrecognized mochitest suite type.')

        if dmd:
            options.dmdPath = self.bin_dir

        options.autorun = not no_autorun
        options.closeWhenDone = not keep_open
        options.slowscript = slowscript
        options.shuffle = shuffle
        options.consoleLevel = 'INFO'
        options.repeat = repeat
        options.runUntilFailure = run_until_failure
        options.runSlower = slow
        options.testingModulesDir = os.path.join(self.tests_dir, 'modules')
        options.extraProfileFiles.append(os.path.join(self.distdir, 'plugins'))
        options.symbolsPath = os.path.join(self.distdir,
                                           'crashreporter-symbols')
        options.chunkByDir = chunk_by_dir
        options.totalChunks = total_chunks
        options.thisChunk = this_chunk
        options.jsdebugger = jsdebugger
        options.debugOnFailure = debug_on_failure
        options.startAt = start_at
        options.endAt = end_at
        options.e10s = e10s
        options.dumpAboutMemoryAfterTest = dump_about_memory_after_test
        options.dumpDMDAfterTest = dump_dmd_after_test
        options.dumpOutputDirectory = dump_output_directory
        options.quiet = quiet

        options.failureFile = failure_file_path
        if install_extension != None:
            options.extensionsToInstall = [
                os.path.join(self.topsrcdir, install_extension)
            ]

        for k, v in kwargs.iteritems():
            setattr(options, k, v)

        if test_paths:
            resolver = self._spawn(TestResolver)

            tests = list(
                resolver.resolve_tests(paths=test_paths,
                                       flavor=flavor,
                                       cwd=context.cwd))

            if not tests:
                print('No tests could be found in the path specified. Please '
                      'specify a path that is a test file or is a directory '
                      'containing tests.')
                return 1

            manifest = TestManifest()
            manifest.tests.extend(tests)

            options.manifestFile = manifest

        if rerun_failures:
            options.testManifest = failure_file_path

        if debugger:
            options.debugger = debugger

        if debugger_args:
            if options.debugger == None:
                print("--debugger-args passed, but no debugger specified.")
                return 1
            options.debuggerArgs = debugger_args

        options = opts.verifyOptions(options, runner)

        if options is None:
            raise Exception('mochitest option validator failed.')

        # We need this to enable colorization of output.
        self.log_manager.enable_unstructured()

        # Output processing is a little funky here. The old make targets
        # grepped the log output from TEST-UNEXPECTED-* and printed these lines
        # after test execution. Ideally the test runner would expose a Python
        # API for obtaining test results and we could just format failures
        # appropriately. Unfortunately, it doesn't yet do that. So, we capture
        # all output to a buffer then "grep" the buffer after test execution.
        # Bug 858197 tracks a Python API that would facilitate this.
        test_output = StringIO()
        handler = logging.StreamHandler(test_output)
        handler.addFilter(UnexpectedFilter())
        handler.setFormatter(StructuredHumanFormatter(0, write_times=False))
        logging.getLogger().addHandler(handler)

        result = runner.runTests(options)

        # Need to remove our buffering handler before we echo failures or else
        # it will catch them again!
        logging.getLogger().removeHandler(handler)
        self.log_manager.disable_unstructured()

        if test_output.getvalue():
            result = 1
            for line in test_output.getvalue().splitlines():
                self.log(logging.INFO, 'unexpected', {'msg': line}, '{msg}')

        return result
Exemple #23
0
    def run_python_tests(self,
                         tests=None,
                         test_objects=None,
                         subsuite=None,
                         verbose=False,
                         jobs=None,
                         python=None,
                         exitfirst=False,
                         extra=None,
                         **kwargs):
        self._activate_test_virtualenvs(python)

        if test_objects is None:
            from moztest.resolve import TestResolver
            resolver = self._spawn(TestResolver)
            # If we were given test paths, try to find tests matching them.
            test_objects = resolver.resolve_tests(paths=tests, flavor='python')
        else:
            # We've received test_objects from |mach test|. We need to ignore
            # the subsuite because python-tests don't use this key like other
            # harnesses do and |mach test| doesn't realize this.
            subsuite = None

        mp = TestManifest()
        mp.tests.extend(test_objects)

        filters = []
        if subsuite == 'default':
            filters.append(mpf.subsuite(None))
        elif subsuite:
            filters.append(mpf.subsuite(subsuite))

        tests = mp.active_tests(filters=filters,
                                disabled=False,
                                python=self.virtualenv_manager.version_info[0],
                                **mozinfo.info)

        if not tests:
            submsg = "for subsuite '{}' ".format(subsuite) if subsuite else ""
            message = "TEST-UNEXPECTED-FAIL | No tests collected " + \
                      "{}(Not in PYTHON_UNITTEST_MANIFESTS?)".format(submsg)
            self.log(logging.WARN, 'python-test', {}, message)
            return 1

        parallel = []
        sequential = []
        os.environ.setdefault('PYTEST_ADDOPTS', '')

        if extra:
            os.environ['PYTEST_ADDOPTS'] += " " + " ".join(extra)

        if exitfirst:
            sequential = tests
            os.environ['PYTEST_ADDOPTS'] += " -x"
        else:
            for test in tests:
                if test.get('sequential'):
                    sequential.append(test)
                else:
                    parallel.append(test)

        self.jobs = jobs or cpu_count()
        self.terminate = False
        self.verbose = verbose

        return_code = 0

        def on_test_finished(result):
            output, ret, test_path = result

            for line in output:
                self.log(logging.INFO, 'python-test', {'line': line.rstrip()},
                         '{line}')

            if ret and not return_code:
                self.log(logging.ERROR, 'python-test', {
                    'test_path': test_path,
                    'ret': ret
                }, 'Setting retcode to {ret} from {test_path}')
            return return_code or ret

        with ThreadPoolExecutor(max_workers=self.jobs) as executor:
            futures = [
                executor.submit(self._run_python_test, test)
                for test in parallel
            ]

            try:
                for future in as_completed(futures):
                    return_code = on_test_finished(future.result())
            except KeyboardInterrupt:
                # Hack to force stop currently running threads.
                # https://gist.github.com/clchiou/f2608cbe54403edb0b13
                executor._threads.clear()
                thread._threads_queues.clear()
                raise

        for test in sequential:
            return_code = on_test_finished(self._run_python_test(test))
            if return_code and exitfirst:
                break

        self.log(logging.INFO, 'python-test', {'return_code': return_code},
                 'Return code from mach python-test: {return_code}')
        return return_code
Exemple #24
0
    def run_test(self, test, expected='pass'):
        if not self.httpd:
            print "starting httpd"
            self.start_httpd()

        if not self.marionette:
            self.start_marionette()
            if self.emulator:
                self.marionette.emulator.wait_for_homescreen(self.marionette)

        testargs = {}
        if self.type is not None:
            testtypes = self.type.replace('+', ' +').replace('-', ' -').split()
            for atype in testtypes:
                if atype.startswith('+'):
                    testargs.update({atype[1:]: 'true'})
                elif atype.startswith('-'):
                    testargs.update({atype[1:]: 'false'})
                else:
                    testargs.update({atype: 'true'})
        oop = testargs.get('oop', False)
        if isinstance(oop, basestring):
            oop = False if oop == 'false' else 'true'

        filepath = os.path.abspath(test)

        if os.path.isdir(filepath):
            for root, dirs, files in os.walk(filepath):
                if self.shuffle:
                    random.shuffle(files)
                for filename in files:
                    if ((filename.startswith('test_')
                         or filename.startswith('browser_'))
                            and (filename.endswith('.py')
                                 or filename.endswith('.js'))):
                        filepath = os.path.join(root, filename)
                        self.run_test(filepath)
                        if self.marionette.check_for_crash():
                            return
            return

        mod_name, file_ext = os.path.splitext(os.path.split(filepath)[-1])

        testloader = unittest.TestLoader()
        suite = unittest.TestSuite()

        if file_ext == '.ini':
            manifest = TestManifest()
            manifest.read(filepath)

            all_tests = manifest.active_tests(exists=False, disabled=False)
            manifest_tests = manifest.active_tests(exists=False,
                                                   disabled=False,
                                                   device=self.device,
                                                   app=self.appName)
            skip_tests = list(
                set([x['path'] for x in all_tests]) -
                set([x['path'] for x in manifest_tests]))
            for skipped in skip_tests:
                self.logger.info(
                    'TEST-SKIP | %s | device=%s, app=%s' %
                    (os.path.basename(skipped), self.device, self.appName))
                self.todo += 1

            target_tests = manifest.get(tests=manifest_tests, **testargs)
            if self.shuffle:
                random.shuffle(target_tests)
            for i in target_tests:
                self.run_test(i["path"], i["expected"])
                if self.marionette.check_for_crash():
                    return
            return

            self.logger.info('TEST-START %s' % os.path.basename(test))

        self.test_kwargs['expected'] = expected
        self.test_kwargs['oop'] = oop
        for handler in self.test_handlers:
            if handler.match(os.path.basename(test)):
                handler.add_tests_to_suite(mod_name, filepath, suite,
                                           testloader, self.marionette,
                                           self.testvars, **self.test_kwargs)
                break

        if suite.countTestCases():
            runner = self.textrunnerclass(verbosity=3,
                                          marionette=self.marionette)
            results = runner.run(suite)
            self.results.append(results)

            self.failed += len(results.failures) + len(results.errors)
            if hasattr(results, 'skipped'):
                self.todo += len(results.skipped)
            self.passed += results.passed
            for failure in results.failures + results.errors:
                self.failures.append((results.getInfo(failure), failure.output,
                                      'TEST-UNEXPECTED-FAIL'))
            if hasattr(results, 'unexpectedSuccesses'):
                self.failed += len(results.unexpectedSuccesses)
                for failure in results.unexpectedSuccesses:
                    self.failures.append(
                        (results.getInfo(failure), 'TEST-UNEXPECTED-PASS'))
            if hasattr(results, 'expectedFailures'):
                self.passed += len(results.expectedFailures)
Exemple #25
0
    def run_python_tests(self,
                         tests=[],
                         test_objects=None,
                         subsuite=None,
                         verbose=False,
                         stop=False,
                         jobs=1):
        self._activate_virtualenv()

        def find_tests_by_path():
            import glob
            files = []
            for t in tests:
                if t.endswith('.py') and os.path.isfile(t):
                    files.append(t)
                elif os.path.isdir(t):
                    for root, _, _ in os.walk(t):
                        files += glob.glob(mozpath.join(root, 'test*.py'))
                        files += glob.glob(mozpath.join(root, 'unit*.py'))
                else:
                    self.log(logging.WARN, 'python-test', {'test': t},
                             'TEST-UNEXPECTED-FAIL | Invalid test: {test}')
                    if stop:
                        break
            return files

        # Python's unittest, and in particular discover, has problems with
        # clashing namespaces when importing multiple test modules. What follows
        # is a simple way to keep environments separate, at the price of
        # launching Python multiple times. Most tests are run via mozunit,
        # which produces output in the format Mozilla infrastructure expects.
        # Some tests are run via pytest.
        if test_objects is None:
            from moztest.resolve import TestResolver
            resolver = self._spawn(TestResolver)
            if tests:
                # If we were given test paths, try to find tests matching them.
                test_objects = resolver.resolve_tests(paths=tests,
                                                      flavor='python')
            else:
                # Otherwise just run everything in PYTHON_UNITTEST_MANIFESTS
                test_objects = resolver.resolve_tests(flavor='python')

        mp = TestManifest()
        mp.tests.extend(test_objects)

        filters = []
        if subsuite == 'default':
            filters.append(mpf.subsuite(None))
        elif subsuite:
            filters.append(mpf.subsuite(subsuite))

        tests = mp.active_tests(filters=filters,
                                disabled=False,
                                **mozinfo.info)

        if not tests:
            submsg = "for subsuite '{}' ".format(subsuite) if subsuite else ""
            message = "TEST-UNEXPECTED-FAIL | No tests collected " + \
                      "{}(Not in PYTHON_UNITTEST_MANIFESTS?)".format(submsg)
            self.log(logging.WARN, 'python-test', {}, message)
            return 1

        parallel = []
        sequential = []
        for test in tests:
            if test.get('sequential'):
                sequential.append(test)
            else:
                parallel.append(test)

        self.jobs = jobs
        self.terminate = False
        self.verbose = verbose

        return_code = 0

        def on_test_finished(result):
            output, ret, test_path = result

            for line in output:
                self.log(logging.INFO, 'python-test', {'line': line.rstrip()},
                         '{line}')

            if ret and not return_code:
                self.log(logging.ERROR, 'python-test', {
                    'test_path': test_path,
                    'ret': ret
                }, 'Setting retcode to {ret} from {test_path}')
            return return_code or ret

        with ThreadPoolExecutor(max_workers=self.jobs) as executor:
            futures = [
                executor.submit(self._run_python_test, test['path'])
                for test in parallel
            ]

            try:
                for future in as_completed(futures):
                    return_code = on_test_finished(future.result())
            except KeyboardInterrupt:
                # Hack to force stop currently running threads.
                # https://gist.github.com/clchiou/f2608cbe54403edb0b13
                executor._threads.clear()
                thread._threads_queues.clear()
                raise

        for test in sequential:
            return_code = on_test_finished(self._run_python_test(test['path']))

        self.log(logging.INFO, 'python-test', {'return_code': return_code},
                 'Return code from mach python-test: {return_code}')
        return return_code
Exemple #26
0
    def __init__(self, options, **kwargs):
        self.options = options
        self.server = None
        self.logger = mozlog.getLogger('PEP')

        # create the profile
        enable_proxy = False
        locations = ServerLocations()
        if self.options.proxyLocations:
            if not self.options.serverPath:
                self.logger.warning('Can\'t set up proxy without server path')
            else:
                enable_proxy = True
                for proxyLocation in self.options.proxyLocations:
                    locations.read(proxyLocation, False)
                locations.add_host(host='127.0.0.1',
                                   port=self.options.serverPort,
                                   options='primary,privileged')

        self.profile = self.profile_class(
            profile=self.options.profilePath,
            addons=[os.path.join(here, 'extension')],
            locations=locations,
            proxy=enable_proxy)

        # fork a server to serve the test related files
        if self.options.serverPath:
            self.runServer()

        tests = []
        # TODO is there a better way of doing this?
        if self.options.testPath.endswith('.js'):
            # a single test file was passed in
            testObj = {}
            testObj['path'] = os.path.realpath(self.options.testPath)
            testObj['name'] = os.path.basename(self.options.testPath)
            testObj['here'] = os.path.dirname(testObj['path'])
            tests.append(testObj)
        else:
            # a test manifest was passed in
            # open and convert the manifest to json
            manifest = TestManifest()
            manifest.read(self.options.testPath)
            tests = manifest.get()

        # create a manifest object to be read by the JS side
        manifestObj = {}
        manifestObj['tests'] = tests
        manifestObj['options'] = options.__dict__

        # write manifest to a JSON file
        jsonManifest = open(os.path.join(here, 'manifest.json'), 'w')
        jsonManifest.write(json.dumps(manifestObj))
        jsonManifest.close()

        # setup environment
        env = os.environ.copy()
        env['MOZ_INSTRUMENT_EVENT_LOOP'] = '1'
        env['MOZ_INSTRUMENT_EVENT_LOOP_THRESHOLD'] = str(
            options.tracerThreshold)
        env['MOZ_INSTRUMENT_EVENT_LOOP_INTERVAL'] = str(options.tracerInterval)
        env['MOZ_CRASHREPORTER_NO_REPORT'] = '1'

        # construct the browser arguments
        cmdargs = []
        # TODO Make browserArgs a list
        cmdargs.extend(self.options.browserArgs)
        cmdargs.extend(['-pep-start', os.path.realpath(jsonManifest.name)])

        # run with managed process handler
        self.runner = self.runner_class(profile=self.profile,
                                        binary=self.options.binary,
                                        cmdargs=cmdargs,
                                        env=env,
                                        process_class=PepProcess)
Exemple #27
0
    def run_desktop_test(
            self,
            context,
            suite=None,
            test_paths=None,
            debugger=None,
            debugger_args=None,
            slowscript=False,
            screenshot_on_fail=False,
            shuffle=False,
            closure_behaviour='auto',
            rerun_failures=False,
            no_autorun=False,
            repeat=0,
            run_until_failure=False,
            slow=False,
            chunk_by_dir=0,
            total_chunks=None,
            this_chunk=None,
            extraPrefs=[],
            jsdebugger=False,
            debug_on_failure=False,
            start_at=None,
            end_at=None,
            e10s=False,
            strict_content_sandbox=False,
            nested_oop=False,
            dmd=False,
            dump_output_directory=None,
            dump_about_memory_after_test=False,
            dump_dmd_after_test=False,
            install_extension=None,
            quiet=False,
            environment=[],
            app_override=None,
            bisectChunk=None,
            runByDir=False,
            useTestMediaDevices=False,
            timeout=None,
            max_timeouts=None,
            **kwargs):
        """Runs a mochitest.

        test_paths are path to tests. They can be a relative path from the
        top source directory, an absolute filename, or a directory containing
        test files.

        suite is the type of mochitest to run. It can be one of ('plain',
        'chrome', 'browser', 'metro', 'a11y', 'jetpack-package', 'jetpack-addon').

        debugger is a program name or path to a binary (presumably a debugger)
        to run the test in. e.g. 'gdb'

        debugger_args are the arguments passed to the debugger.

        slowscript is true if the user has requested the SIGSEGV mechanism of
        invoking the slow script dialog.

        shuffle is whether test order should be shuffled (defaults to false).

        closure_behaviour denotes whether to keep the browser open after tests
        complete.
        """
        if rerun_failures and test_paths:
            print('Cannot specify both --rerun-failures and a test path.')
            return 1

        # Make absolute paths relative before calling os.chdir() below.
        if test_paths:
            test_paths = [self._wrap_path_argument(
                p).relpath() if os.path.isabs(p) else p for p in test_paths]

        failure_file_path = os.path.join(
            self.statedir,
            'mochitest_failures.json')

        if rerun_failures and not os.path.exists(failure_file_path):
            print('No failure file present. Did you run mochitests before?')
            return 1

        # runtests.py is ambiguous, so we load the file/module manually.
        if 'mochitest' not in sys.modules:
            import imp
            path = os.path.join(self.mochitest_dir, 'runtests.py')
            with open(path, 'r') as fh:
                imp.load_module('mochitest', fh, path,
                                ('.py', 'r', imp.PY_SOURCE))

        import mochitest
        from manifestparser import TestManifest
        from mozbuild.testing import TestResolver

        # This is required to make other components happy. Sad, isn't it?
        os.chdir(self.topobjdir)

        # Automation installs its own stream handler to stdout. Since we want
        # all logging to go through us, we just remove their handler.
        remove_handlers = [l for l in logging.getLogger().handlers
                           if isinstance(l, logging.StreamHandler)]
        for handler in remove_handlers:
            logging.getLogger().removeHandler(handler)

        opts = mochitest.MochitestOptions()
        options, args = opts.parse_args([])

        options.subsuite = ''
        flavor = suite

        # Need to set the suite options before verifyOptions below.
        if suite == 'plain':
            # Don't need additional options for plain.
            flavor = 'mochitest'
        elif suite == 'chrome':
            options.chrome = True
        elif suite == 'browser':
            options.browserChrome = True
            flavor = 'browser-chrome'
        elif suite == 'devtools':
            options.browserChrome = True
            options.subsuite = 'devtools'
        elif suite == 'jetpack-package':
            options.jetpackPackage = True
        elif suite == 'jetpack-addon':
            options.jetpackAddon = True
        elif suite == 'metro':
            options.immersiveMode = True
            options.browserChrome = True
        elif suite == 'a11y':
            options.a11y = True
        elif suite == 'webapprt-content':
            options.webapprtContent = True
            options.app = self.get_webapp_runtime_path()
        elif suite == 'webapprt-chrome':
            options.webapprtChrome = True
            options.app = self.get_webapp_runtime_path()
            options.browserArgs.append("-test-mode")
        else:
            raise Exception('None or unrecognized mochitest suite type.')

        if dmd:
            options.dmdPath = self.bin_dir

        options.autorun = not no_autorun
        options.closeWhenDone = closure_behaviour != 'open'
        options.slowscript = slowscript
        options.screenshotOnFail = screenshot_on_fail
        options.shuffle = shuffle
        options.consoleLevel = 'INFO'
        options.repeat = repeat
        options.runUntilFailure = run_until_failure
        options.runSlower = slow
        options.testingModulesDir = os.path.join(self.tests_dir, 'modules')
        options.extraProfileFiles.append(os.path.join(self.distdir, 'plugins'))
        options.symbolsPath = os.path.join(
            self.distdir,
            'crashreporter-symbols')
        options.chunkByDir = chunk_by_dir
        options.totalChunks = total_chunks
        options.thisChunk = this_chunk
        options.jsdebugger = jsdebugger
        options.debugOnFailure = debug_on_failure
        options.startAt = start_at
        options.endAt = end_at
        options.e10s = e10s
        options.strictContentSandbox = strict_content_sandbox
        options.nested_oop = nested_oop
        options.dumpAboutMemoryAfterTest = dump_about_memory_after_test
        options.dumpDMDAfterTest = dump_dmd_after_test
        options.dumpOutputDirectory = dump_output_directory
        options.quiet = quiet
        options.environment = environment
        options.extraPrefs = extraPrefs
        options.bisectChunk = bisectChunk
        options.runByDir = runByDir
        options.useTestMediaDevices = useTestMediaDevices
        if timeout:
            options.timeout = int(timeout)
        if max_timeouts:
            options.maxTimeouts = int(max_timeouts)

        options.failureFile = failure_file_path
        if install_extension is not None:
            options.extensionsToInstall = [
                os.path.join(
                    self.topsrcdir,
                    install_extension)]

        for k, v in kwargs.iteritems():
            setattr(options, k, v)

        if test_paths:
            resolver = self._spawn(TestResolver)

            tests = list(
                resolver.resolve_tests(
                    paths=test_paths,
                    flavor=flavor))

            if not tests:
                print('No tests could be found in the path specified. Please '
                      'specify a path that is a test file or is a directory '
                      'containing tests.')
                return 1

            manifest = TestManifest()
            manifest.tests.extend(tests)

            if len(
                    tests) == 1 and closure_behaviour == 'auto' and suite == 'plain':
                options.closeWhenDone = False

            options.manifestFile = manifest

        if rerun_failures:
            options.testManifest = failure_file_path

        if debugger:
            options.debugger = debugger

        if debugger_args:
            if options.debugger is None:
                print("--debugger-args passed, but no debugger specified.")
                return 1
            options.debuggerArgs = debugger_args

        if app_override:
            if app_override == "dist":
                options.app = self.get_binary_path(where='staged-package')
            elif app_override:
                options.app = app_override
            if options.gmp_path is None:
                # Need to fix the location of gmp_fake which might not be
                # shipped in the binary
                bin_path = self.get_binary_path()
                options.gmp_path = os.path.join(
                    os.path.dirname(bin_path),
                    'gmp-fake',
                    '1.0')
                options.gmp_path += os.pathsep
                options.gmp_path += os.path.join(
                    os.path.dirname(bin_path),
                    'gmp-clearkey',
                    '0.1')

        logger_options = {
            key: value for key,
            value in vars(options).iteritems() if key.startswith('log')}
        runner = mochitest.Mochitest(logger_options)
        options = opts.verifyOptions(options, runner)

        if options is None:
            raise Exception('mochitest option validator failed.')

        # We need this to enable colorization of output.
        self.log_manager.enable_unstructured()

        result = runner.runTests(options)

        self.log_manager.disable_unstructured()
        if runner.message_logger.errors:
            result = 1
            runner.message_logger.logger.warning("The following tests failed:")
            for error in runner.message_logger.errors:
                runner.message_logger.logger.log_raw(error)

        runner.message_logger.finish()

        return result
Exemple #28
0
    def run_test(self, test, testtype):
        if not self.httpd:
            print "starting httpd"
            self.start_httpd()

        if not self.marionette:
            self.start_marionette()

        filepath = os.path.abspath(test)

        if os.path.isdir(filepath):
            for root, dirs, files in os.walk(filepath):
                for filename in files:
                    if ((filename.startswith('test_') or filename.startswith('browser_')) and 
                        (filename.endswith('.py') or filename.endswith('.js'))):
                        filepath = os.path.join(root, filename)
                        self.run_test(filepath, testtype)
                        if self.marionette.check_for_crash():
                            return
            return

        mod_name,file_ext = os.path.splitext(os.path.split(filepath)[-1])

        testloader = unittest.TestLoader()
        suite = unittest.TestSuite()

        if file_ext == '.ini':
            testargs = {}
            if testtype is not None:
                testtypes = testtype.replace('+', ' +').replace('-', ' -').split()
                for atype in testtypes:
                    if atype.startswith('+'):
                        testargs.update({ atype[1:]: 'true' })
                    elif atype.startswith('-'):
                        testargs.update({ atype[1:]: 'false' })
                    else:
                        testargs.update({ atype: 'true' })

            manifest = TestManifest()
            manifest.read(filepath)

            if self.perf:
                if self.perfserv is None:
                    self.perfserv = manifest.get("perfserv")[0]
                machine_name = socket.gethostname()
                try:
                    manifest.has_key("machine_name")
                    machine_name = manifest.get("machine_name")[0]
                except:
                    self.logger.info("Using machine_name: %s" % machine_name)
                os_name = platform.system()
                os_version = platform.release()
                self.perfrequest = datazilla.DatazillaRequest(
                             server=self.perfserv,
                             machine_name=machine_name,
                             os=os_name,
                             os_version=os_version,
                             platform=manifest.get("platform")[0],
                             build_name=manifest.get("build_name")[0],
                             version=manifest.get("version")[0],
                             revision=self.revision,
                             branch=manifest.get("branch")[0],
                             id=os.getenv('BUILD_ID'),
                             test_date=int(time.time()))

            manifest_tests = manifest.active_tests(disabled=False)

            for i in manifest.get(tests=manifest_tests, **testargs):
                self.run_test(i["path"], testtype)
                if self.marionette.check_for_crash():
                    return
            return

        self.logger.info('TEST-START %s' % os.path.basename(test))

        for handler in self.test_handlers:
            if handler.match(os.path.basename(test)):
                handler.add_tests_to_suite(mod_name, filepath, suite, testloader, self.marionette, self.testvars)
                break

        if suite.countTestCases():
            runner = MarionetteTextTestRunner(verbosity=3,
                                              perf=self.perf,
                                              marionette=self.marionette)
            results = runner.run(suite)
            self.results.append(results)

            self.failed += len(results.failures) + len(results.errors)
            if results.perfdata and options.perf:
                self.perfrequest.add_datazilla_result(results.perfdata)
            if hasattr(results, 'skipped'):
                self.todo += len(results.skipped) + len(results.expectedFailures)
            self.passed += results.passed
            for failure in results.failures + results.errors:
                self.failures.append((results.getInfo(failure[0]), failure[1], 'TEST-UNEXPECTED-FAIL'))
            if hasattr(results, 'unexpectedSuccess'):
                self.failed += len(results.unexpectedSuccesses)
                for failure in results.unexpectedSuccesses:
                    self.failures.append((results.getInfo(failure[0]), failure[1], 'TEST-UNEXPECTED-PASS'))
Exemple #29
0
    def __call__(self, parser, namespace, values, option_string=None):
        from manifestparser import TestManifest

        here = os.path.abspath(os.path.dirname(__file__))
        raptor_ini = os.path.join(here, 'raptor.ini')

        for _app in self.integrated_apps:
            test_manifest = TestManifest([raptor_ini], strict=False)
            info = {"app": _app}
            available_tests = test_manifest.active_tests(exists=False,
                                                         disabled=False,
                                                         filters=[self.filter_app],
                                                         **info)
            if len(available_tests) == 0:
                # none for that app; skip to next
                continue

            # print in readable format
            if _app == "firefox":
                title = "\nRaptor Tests Available for %s" % APPS[_app]['long_name']
            else:
                title = "\nRaptor Tests Available for %s (--app=%s)" \
                    % (APPS[_app]['long_name'], _app)

            print(title)
            print("=" * (len(title) - 1))

            # build the list of tests for this app
            test_list = {}

            for next_test in available_tests:
                if next_test.get("name", None) is None:
                    # no test name; skip it
                    continue

                suite = os.path.basename(next_test['manifest'])[:-4]
                if suite not in test_list:
                    test_list[suite] = {'type': None, 'subtests': []}

                # for page-load tests, we want to list every subtest, so we
                # can see which pages are available in which tp6-* sets
                if next_test.get("type", None) is not None:
                    test_list[suite]['type'] = next_test['type']
                    if next_test['type'] == "pageload":
                        subtest = next_test['name']
                        measure = next_test.get("measure")
                        if measure is not None:
                            subtest = "{0} ({1})".format(subtest, measure)
                        test_list[suite]['subtests'].append(subtest)

            # print the list in a nice, readable format
            for key in sorted(test_list.iterkeys()):
                print("\n%s" % key)
                print("  type: %s" % test_list[key]['type'])
                if len(test_list[key]['subtests']) != 0:
                    print("  subtests:")
                    for _sub in sorted(test_list[key]['subtests']):
                        print("    %s" % _sub)

        print("\nDone.")
        # exit Raptor
        parser.exit()
Exemple #30
0
    def add_test(self, test, expected='pass', group='default'):
        filepath = os.path.abspath(test)

        if os.path.isdir(filepath):
            for root, dirs, files in os.walk(filepath):
                for filename in files:
                    if filename.endswith('.ini'):
                        msg_tmpl = (
                            "Ignoring manifest '{0}'; running all tests in '{1}'."
                            " See --help for details.")
                        relpath = os.path.relpath(os.path.join(root, filename),
                                                  filepath)
                        self.logger.warning(msg_tmpl.format(relpath, filepath))
                    elif self._is_filename_valid(filename):
                        test_file = os.path.join(root, filename)
                        self.add_test(test_file)
            return

        file_ext = os.path.splitext(os.path.split(filepath)[-1])[1]

        if file_ext == '.ini':
            group = filepath

            manifest = TestManifest()
            manifest.read(filepath)

            json_path = update_mozinfo(filepath)
            self.logger.info("mozinfo updated from: {}".format(json_path))
            self.logger.info("mozinfo is: {}".format(mozinfo.info))

            filters = []
            if self.test_tags:
                filters.append(tags(self.test_tags))

            values = {
                "appname": self.appName,
                "e10s": self.e10s,
                "manage_instance": self.marionette.instance is not None,
                "headless": self.headless
            }
            values.update(mozinfo.info)

            manifest_tests = manifest.active_tests(exists=False,
                                                   disabled=True,
                                                   filters=filters,
                                                   **values)
            if len(manifest_tests) == 0:
                self.logger.error("No tests to run using specified "
                                  "combination of filters: {}".format(
                                      manifest.fmt_filters()))

            target_tests = []
            for test in manifest_tests:
                if test.get('disabled'):
                    self.manifest_skipped_tests.append(test)
                else:
                    target_tests.append(test)

            for i in target_tests:
                if not os.path.exists(i["path"]):
                    raise IOError("test file: {} does not exist".format(
                        i["path"]))

                self.add_test(i["path"], i["expected"], group=group)
            return

        self.tests.append({
            'filepath': filepath,
            'expected': expected,
            'group': group
        })