def test_testmanifest(self):
        # Test filtering based on platform:
        filter_example = os.path.join(here, "filter-example.ini")
        manifest = TestManifest(manifests=(filter_example,), strict=False)
        self.assertEqual(
            [
                i["name"]
                for i in manifest.active_tests(os="win", disabled=False, exists=False)
            ],
            ["windowstest", "fleem"],
        )
        self.assertEqual(
            [
                i["name"]
                for i in manifest.active_tests(os="linux", disabled=False, exists=False)
            ],
            ["fleem", "linuxtest"],
        )

        # Look for existing tests.  There is only one:
        self.assertEqual([i["name"] for i in manifest.active_tests()], ["fleem"])

        # You should be able to expect failures:
        last = manifest.active_tests(exists=False, toolkit="gtk")[-1]
        self.assertEqual(last["name"], "linuxtest")
        self.assertEqual(last["expected"], "pass")
        last = manifest.active_tests(exists=False, toolkit="cocoa")[-1]
        self.assertEqual(last["expected"], "fail")
Exemple #2
0
def run(arguments=sys.argv[1:]):

    # parse the command line arguments
    parser_kwargs = dict(arguments=arguments)
    (options, command) = parse_args(**parser_kwargs)

    # Parse the manifest
    mp = TestManifest(manifests=(options.manifest,))

    # run + report
    if command == "testpy":
        results = test_all_python(mp.get(tests=mp.active_tests(disabled=False), type='python'), options)
        if results.failures or results.errors:
            sys.exit(report(True, results, None, options))
        else:
            sys.exit(report(False))
            
    elif command == "testjs":
        results = test_all_js(mp.get(tests=mp.active_tests(disabled=False), type='javascript'), options)
        if results.failures:
            sys.exit(report(True, None, results, options))
        else:
            sys.exit(report(False))
            
    elif command == "testall":
        test_all(mp.active_tests(disabled=False), options)
def run(arguments=sys.argv[1:]):

    # parse the command line arguments
    (options, command) = parse_args(arguments)

    # ensure the binary is given
    if not options.binary:
        print "Please provide a path to your Firefox binary: -b, --binary"
        sys.exit(1)

    # Parse the manifest
    mp = TestManifest(manifests=(options.manifest,), strict=False)

    # run + report
    if command == "testpy":
        results = test_all_python(mp.get(tests=mp.active_tests(disabled=False), type='python'), options)
        if results.failures or results.errors:
            sys.exit(report(True, results, None, options))
        else:
            sys.exit(report(False))

    elif command == "testjs":
        results = test_all_js(mp.get(tests=mp.active_tests(disabled=False), type='javascript'), options)
        if results.failures:
            sys.exit(report(True, None, results, options))
        else:
            sys.exit(report(False))

    elif command == "testall":
        test_all(mp.active_tests(disabled=False), options)
Exemple #4
0
def run(arguments=sys.argv[1:]):
    # parse the command line arguments
    (options, command) = parse_args(arguments)

    # ensure the binary is given
    if not options.binary:
        print "Please provide a path to your Firefox binary: -b, --binary"
        sys.exit(1)

    # set the BROWSER_PATH environment variable so that
    # subshells will be able to invoke mozrunner
    os.environ['BROWSER_PATH'] = options.binary

    # Parse the manifest
    mp = TestManifest(manifests=(options.manifest,), strict=False)

    # run + report
    if command == "testpy":
        tests = mp.active_tests(disabled=False)
        results = test_all_python(mp.get(tests=tests, type='python'), options)
        if results.failures or results.errors:
            sys.exit(report(True, results, None, options))
        else:
            sys.exit(report(False))

    elif command == "testjs":
        tests = mp.active_tests(disabled=False)
        results = test_all_js(mp.get(tests=tests, type='javascript'), options)
        if results.fails:
            sys.exit(report(True, None, results, options))
        else:
            sys.exit(report(False))

    elif command == "testall":
        test_all(mp.active_tests(disabled=False), options)
Exemple #5
0
def run(arguments=sys.argv[1:]):
    # parse the command line arguments
    (options, command) = parse_args(arguments)

    # ensure the binary is given
    if not options.binary:
        print "Please provide a path to your Firefox binary: -b, --binary"
        sys.exit(1)

    # set the BROWSER_PATH environment variable so that
    # subshells will be able to invoke mozrunner
    os.environ['BROWSER_PATH'] = options.binary

    # Parse the manifest
    mp = TestManifest(manifests=(options.manifest,), strict=False)

    # run + report
    if command == "testpy":
        tests = mp.active_tests(disabled=False)
        results = test_all_python(mp.get(tests=tests, type='python'), options)
        if results.failures or results.errors:
            sys.exit(report(True, results, None, options))
        else:
            sys.exit(report(False))

    elif command == "testjs":
        tests = mp.active_tests(disabled=False)
        results = test_all_js(mp.get(tests=tests, type='javascript'), options)
        if results.fails:
            sys.exit(report(True, None, results, options))
        else:
            sys.exit(report(False))

    elif command == "testall":
        test_all(mp.active_tests(disabled=False), options)
Exemple #6
0
    def test_manifest_subsuites(self):
        """
        test subsuites and conditional subsuites
        """
        class AttributeDict(dict):
            def __getattr__(self, attr):
                return self[attr]

            def __setattr__(self, attr, value):
                self[attr] = value

        relative_path = os.path.join(here, 'subsuite.ini')
        manifest = TestManifest(manifests=(relative_path, ))
        info = {'foo': 'bar'}
        options = {'subsuite': 'bar'}

        # 6 tests total
        self.assertEquals(len(manifest.active_tests(exists=False, **info)), 6)

        # only 3 tests for subsuite bar when foo==bar
        self.assertEquals(
            len(
                manifest.active_tests(exists=False,
                                      options=AttributeDict(options),
                                      **info)), 3)

        options = {'subsuite': 'baz'}
        other = {'something': 'else'}
        # only 1 test for subsuite baz, regardless of conditions
        self.assertEquals(
            len(
                manifest.active_tests(exists=False,
                                      options=AttributeDict(options),
                                      **info)), 1)
        self.assertEquals(
            len(
                manifest.active_tests(exists=False,
                                      options=AttributeDict(options),
                                      **other)), 1)

        # 4 tests match when the condition doesn't match (all tests except
        # the unconditional subsuite)
        info = {'foo': 'blah'}
        options = {'subsuite': None}
        self.assertEquals(
            len(
                manifest.active_tests(exists=False,
                                      options=AttributeDict(options),
                                      **info)), 5)

        # test for illegal subsuite value
        manifest.tests[0][
            'subsuite'] = 'subsuite=bar,foo=="bar",type="nothing"'
        self.assertRaises(ParseError,
                          manifest.active_tests,
                          exists=False,
                          options=AttributeDict(options),
                          **info)
Exemple #7
0
    def test_unknown_keywords(self):
        filter_example = os.path.join(here, 'filter-example.ini')
        manifest = TestManifest(manifests=(filter_example,))

        with self.assertRaises(ParseError):
            # toolkit missing
            manifest.active_tests(os='win', disabled=False, exists=False)

        with self.assertRaises(ParseError):
            # os missing
            manifest.active_tests(toolkit='windows', disabled=False, exists=False)
    def test_none_and_empty_manifest(self):
        """
        Test TestManifest for None and empty manifest, see
        https://bugzilla.mozilla.org/show_bug.cgi?id=1087682
        """
        none_manifest = TestManifest(manifests=None, strict=False)
        self.assertEqual(len(none_manifest.test_paths()), 0)
        self.assertEqual(len(none_manifest.active_tests()), 0)

        empty_manifest = TestManifest(manifests=[], strict=False)
        self.assertEqual(len(empty_manifest.test_paths()), 0)
        self.assertEqual(len(empty_manifest.active_tests()), 0)
    def test_none_and_empty_manifest(self):
        """
        Test TestManifest for None and empty manifest, see
        https://bugzilla.mozilla.org/show_bug.cgi?id=1087682
        """
        none_manifest = TestManifest(manifests=None, strict=False)
        self.assertEqual(len(none_manifest.test_paths()), 0)
        self.assertEqual(len(none_manifest.active_tests()), 0)

        empty_manifest = TestManifest(manifests=[], strict=False)
        self.assertEqual(len(empty_manifest.test_paths()), 0)
        self.assertEqual(len(empty_manifest.active_tests()), 0)
Exemple #10
0
def cramtest(command_context,
             cram_args=None,
             test_paths=None,
             test_objects=None):
    command_context.activate_virtualenv()
    import mozinfo
    from manifestparser import TestManifest

    if test_objects is None:
        from moztest.resolve import TestResolver

        resolver = command_context._spawn(TestResolver)
        if test_paths:
            # If we were given test paths, try to find tests matching them.
            test_objects = resolver.resolve_tests(paths=test_paths,
                                                  flavor="cram")
        else:
            # Otherwise just run everything in CRAMTEST_MANIFESTS
            test_objects = resolver.resolve_tests(flavor="cram")

    if not test_objects:
        message = "No tests were collected, check spelling of the test paths."
        command_context.log(logging.WARN, "cramtest", {}, message)
        return 1

    mp = TestManifest()
    mp.tests.extend(test_objects)
    tests = mp.active_tests(disabled=False, **mozinfo.info)

    python = command_context.virtualenv_manager.python_path
    cmd = [python, "-m", "cram"] + cram_args + [t["relpath"] for t in tests]
    return subprocess.call(cmd, cwd=command_context.topsrcdir)
Exemple #11
0
    def cramtest(self, cram_args=None, test_paths=None, test_objects=None):
        self._activate_virtualenv()
        import mozinfo
        from manifestparser import TestManifest

        if test_objects is None:
            from moztest.resolve import TestResolver
            resolver = self._spawn(TestResolver)
            if test_paths:
                # If we were given test paths, try to find tests matching them.
                test_objects = resolver.resolve_tests(paths=test_paths, flavor='cram')
            else:
                # Otherwise just run everything in CRAMTEST_MANIFESTS
                test_objects = resolver.resolve_tests(flavor='cram')

        if not test_objects:
            message = 'No tests were collected, check spelling of the test paths.'
            self.log(logging.WARN, 'cramtest', {}, message)
            return 1

        mp = TestManifest()
        mp.tests.extend(test_objects)
        tests = mp.active_tests(disabled=False, **mozinfo.info)

        python = self.virtualenv_manager.python_path
        cmd = [python, '-m', 'cram'] + cram_args + [t['relpath'] for t in tests]
        return subprocess.call(cmd, cwd=self.topsrcdir)
Exemple #12
0
    def get_suite_list(self):
        '''
        Returns a dictionary containing a mapping from suites
        to the tests they contain.

        :return dict: A dictionary with the following structure: {
                "suite_name": [
                    'testing/raptor/test1',
                    'testing/raptor/test2'
                ]
            }
        '''
        if self._suite_list:
            return self._suite_list

        manifest_path = self.get_manifest_path()

        # Get the tests from the manifest
        test_manifest = TestManifest([manifest_path], strict=False)
        test_list = test_manifest.active_tests(exists=False, disabled=False)

        # Parse the tests into the expected dictionary
        for test in test_list:
            # Get the top-level suite
            s = os.path.basename(test["here"])
            if s not in self._suite_list:
                self._suite_list[s] = []

            # Get the individual test
            fpath = re.sub(".*testing", "testing", test['manifest'])

            if fpath not in self._suite_list[s]:
                self._suite_list[s].append(fpath)

        return self._suite_list
Exemple #13
0
  def buildTestPath(self, options):
    """ Build the url path to the specific test harness and test file or directory
        Build a manifest of tests to run and write out a json file for the harness to read
    """
    if options.manifestFile and os.path.isfile(options.manifestFile):
      manifest = TestManifest(strict=False)
      manifest.read(options.manifestFile)
      # Bug 883858 - return all tests including disabled tests 
      tests = manifest.active_tests(disabled=False, **mozinfo.info)
      paths = []
      for test in tests:
        tp = test['path'].split(self.getTestRoot(options), 1)[1].strip('/')

        # Filter out tests if we are using --test-path
        if options.testPath and not tp.startswith(options.testPath):
          continue

        paths.append({'path': tp})

      # Bug 883865 - add this functionality into manifestDestiny
      with open('tests.json', 'w') as manifestFile:
        manifestFile.write(json.dumps({'tests': paths}))
      options.manifestFile = 'tests.json'

    testHost = "http://mochi.test:8888"
    testURL = ("/").join([testHost, self.TEST_PATH, options.testPath])
    if os.path.isfile(os.path.join(self.oldcwd, os.path.dirname(__file__), self.TEST_PATH, options.testPath)) and options.repeat > 0:
       testURL = ("/").join([testHost, self.PLAIN_LOOP_PATH])
    if options.chrome or options.a11y:
       testURL = ("/").join([testHost, self.CHROME_PATH])
    elif options.browserChrome:
      testURL = "about:blank"
    elif options.ipcplugins:
      testURL = ("/").join([testHost, self.TEST_PATH, "dom/plugins/test"])
    return testURL
    def get_test_list(self):
        """
        Returns a dictionary containing the tests that are in perftest.ini manifest.

        :return dict: A dictionary with the following structure: {
                "suite_name": {
                    'perftest_test1',
                    'perftest_test2',
                },
            }
        """
        for path in pathlib.Path(self.workspace_dir).rglob("perftest.ini"):
            suite_name = re.sub(self.workspace_dir, "", os.path.dirname(path))

            # Get the tests from perftest.ini
            test_manifest = TestManifest([str(path)], strict=False)
            test_list = test_manifest.active_tests(exists=False,
                                                   disabled=False)
            for test in test_list:
                si = ScriptInfo(test["path"])
                self.script_infos[si["name"]] = si
                self._test_list.setdefault(suite_name,
                                           {}).update({si["name"]: ""})

        return self._test_list
Exemple #15
0
    def add_test(self, test, expected='pass'):
        filepath = os.path.abspath(test)

        if os.path.isdir(filepath):
            for root, dirs, files in os.walk(filepath):
                for filename in files:
                    if filename.endswith('.ini'):
                        msg_tmpl = (
                            "Ignoring manifest '{0}'; running all tests in '{1}'."
                            " See --help for details.")
                        relpath = os.path.relpath(os.path.join(root, filename),
                                                  filepath)
                        self.logger.warning(msg_tmpl.format(relpath, filepath))
                    elif self._is_filename_valid(filename):
                        test_file = os.path.join(root, filename)
                        self.add_test(test_file)
            return

        file_ext = os.path.splitext(os.path.split(filepath)[-1])[1]

        if file_ext == '.ini':
            manifest = TestManifest()
            manifest.read(filepath)

            filters = []
            if self.test_tags:
                filters.append(tags(self.test_tags))
            update_mozinfo(filepath)
            self.logger.info(
                "mozinfo updated with the following: {}".format(None))
            self.logger.info("mozinfo is: {}".format(mozinfo.info))
            manifest_tests = manifest.active_tests(exists=False,
                                                   disabled=True,
                                                   filters=filters,
                                                   app=self.appName,
                                                   e10s=self.e10s,
                                                   **mozinfo.info)
            if len(manifest_tests) == 0:
                self.logger.error("No tests to run using specified "
                                  "combination of filters: {}".format(
                                      manifest.fmt_filters()))

            target_tests = []
            for test in manifest_tests:
                if test.get('disabled'):
                    self.manifest_skipped_tests.append(test)
                else:
                    target_tests.append(test)

            for i in target_tests:
                if not os.path.exists(i["path"]):
                    raise IOError("test file: {} does not exist".format(
                        i["path"]))

                file_ext = os.path.splitext(os.path.split(i['path'])[-1])[-1]

                self.add_test(i["path"], i["expected"])
            return

        self.tests.append({'filepath': filepath, 'expected': expected})
Exemple #16
0
    def get_test_list(self):
        """
        Returns a dictionary containing the tests that are in perftest.ini manifest.

        :return dict: A dictionary with the following structure: {
                "suite_name": {
                    'perftest_test1',
                    'perftest_test2',
                },
            }
        """
        for path in pathlib.Path(self.workspace_dir).rglob("perftest.ini"):
            suite_name = re.sub(self.workspace_dir, "", os.path.dirname(path))

            # If the workspace dir doesn't end with a forward-slash,
            # the substitution above won't work completely
            if suite_name.startswith("/") or suite_name.startswith("\\"):
                suite_name = suite_name[1:]

            # We have to add new paths to the logger as we search
            # because mozperftest tests exist in multiple places in-tree
            PerfDocLogger.PATHS.append(suite_name)

            # Get the tests from perftest.ini
            test_manifest = TestManifest([str(path)], strict=False)
            test_list = test_manifest.active_tests(exists=False,
                                                   disabled=False)
            for test in test_list:
                si = ScriptInfo(test["path"])
                self.script_infos[si["name"]] = si
                self._test_list.setdefault(suite_name,
                                           {}).update({si["name"]: str(path)})

        return self._test_list
    def getActiveTests(self, manifests, options, testDumpFile=None):
        # These prefs will cause reftest.jsm to parse the manifests,
        # dump the resulting tests to a file, and exit.
        prefs = {
            'reftest.manifests': json.dumps(manifests),
            'reftest.manifests.dumpTests': testDumpFile or self.testDumpFile,
        }
        cmdargs = []
        self.runApp(options, cmdargs=cmdargs, prefs=prefs)

        with open(self.testDumpFile, 'r') as fh:
            tests = json.load(fh)

        if os.path.isfile(self.testDumpFile):
            mozfile.remove(self.testDumpFile)

        for test in tests:
            # Name and path are expected by manifestparser, but not used in reftest.
            test['name'] = test['path'] = test['url1']

        mp = TestManifest(strict=False)
        mp.tests = tests

        filters = []
        if options.totalChunks:
            filters.append(
                mpf.chunk_by_manifest(options.thisChunk, options.totalChunks))

        tests = mp.active_tests(exists=False, filters=filters)
        return tests
Exemple #18
0
    def cramtest(self, cram_args=None, test_paths=None, test_objects=None):
        self._activate_virtualenv()
        import mozinfo
        from manifestparser import TestManifest

        if test_objects is None:
            from mozbuild.testing import TestResolver
            resolver = self._spawn(TestResolver)
            if test_paths:
                # If we were given test paths, try to find tests matching them.
                test_objects = resolver.resolve_tests(paths=test_paths,
                                                      flavor='cram')
            else:
                # Otherwise just run everything in CRAMTEST_MANIFESTS
                test_objects = resolver.resolve_tests(flavor='cram')

        if not test_objects:
            message = 'No tests were collected, check spelling of the test paths.'
            self.log(logging.WARN, 'cramtest', {}, message)
            return 1

        mp = TestManifest()
        mp.tests.extend(test_objects)
        tests = mp.active_tests(disabled=False, **mozinfo.info)

        python = self.virtualenv_manager.python_path
        cmd = [python, '-m', 'cram'
               ] + cram_args + [t['relpath'] for t in tests]
        return subprocess.call(cmd, cwd=self.topsrcdir)
Exemple #19
0
def get_browser_test_list(browser_app, run_local):
    LOG.info(raptor_ini)
    test_manifest = TestManifest([raptor_ini], strict=False)
    info = {"app": browser_app, "run_local": run_local}
    return test_manifest.active_tests(
        exists=False, disabled=False, filters=[filter_app], **info
    )
    def _get_subtests_from_ini(self, manifest_path, suite_name):
        """
        Returns a list of (sub)tests from an ini file containing the test definitions.

        :param str manifest_path: path to the ini file
        :return list: the list of the tests
        """
        desc_exclusion = [
            "here", "manifest", "manifest_relpath", "path", "relpath"
        ]
        test_manifest = TestManifest([manifest_path], strict=False)
        test_list = test_manifest.active_tests(exists=False, disabled=False)
        subtests = {}
        for subtest in test_list:
            subtests[subtest["name"]] = subtest["manifest"]

            description = {}
            for key, value in subtest.items():
                if key not in desc_exclusion:
                    description[key] = value
            self._descriptions.setdefault(suite_name, []).append(description)

        self._descriptions[suite_name].sort(key=lambda item: item["name"])

        return subtests
Exemple #21
0
    def add_test(self, test, expected='pass', test_container=None):
        filepath = os.path.abspath(test)

        if os.path.isdir(filepath):
            for root, dirs, files in os.walk(filepath):
                for filename in files:
                    if (filename.startswith('test_')
                            and (filename.endswith('.py')
                                 or filename.endswith('.js'))):
                        filepath = os.path.join(root, filename)
                        self.add_test(filepath)
            return

        file_ext = os.path.splitext(os.path.split(filepath)[-1])[1]

        if file_ext == '.ini':
            manifest = TestManifest()
            manifest.read(filepath)

            filters = []
            if self.test_tags:
                filters.append(tags(self.test_tags))
            json_path = update_mozinfo(filepath)
            self.logger.info(
                "mozinfo updated with the following: {}".format(None))
            manifest_tests = manifest.active_tests(exists=False,
                                                   disabled=True,
                                                   filters=filters,
                                                   device=self.device,
                                                   app=self.appName,
                                                   e10s=self.e10s,
                                                   **mozinfo.info)
            if len(manifest_tests) == 0:
                self.logger.error("no tests to run using specified "
                                  "combination of filters: {}".format(
                                      manifest.fmt_filters()))

            target_tests = []
            for test in manifest_tests:
                if test.get('disabled'):
                    self.manifest_skipped_tests.append(test)
                else:
                    target_tests.append(test)

            for i in target_tests:
                if not os.path.exists(i["path"]):
                    raise IOError("test file: %s does not exist" % i["path"])

                file_ext = os.path.splitext(os.path.split(i['path'])[-1])[-1]
                test_container = None

                self.add_test(i["path"], i["expected"], test_container)
            return

        self.tests.append({
            'filepath': filepath,
            'expected': expected,
            'test_container': test_container
        })
Exemple #22
0
    def __call__(self, parser, namespace, values, option_string=None):
        from manifestparser import TestManifest

        here = os.path.abspath(os.path.dirname(__file__))
        raptor_ini = os.path.join(here, 'raptor.ini')

        for _app in ["firefox", "chrome", "geckoview", "chrome-android"]:
            test_manifest = TestManifest([raptor_ini], strict=False)
            info = {"app": _app}
            available_tests = test_manifest.active_tests(exists=False,
                                                         disabled=False,
                                                         filters=[self.filter_app],
                                                         **info)
            if len(available_tests) == 0:
                # none for that app, skip to next
                continue

            # print in readable format
            if _app == "firefox":
                title = "\nRaptor Tests Available for %s" % self.get_long_name(_app)
            else:
                title = "\nRaptor Tests Available for %s (--app=%s)" \
                    % (self.get_long_name(_app), _app)

            print(title)
            print("=" * (len(title) - 1))

            # build the list of tests for this app
            test_list = {}

            for next_test in available_tests:
                if next_test.get("name", None) is None:
                    # no test name, skip it
                    continue

                suite = os.path.basename(next_test['manifest'])[:-4]
                if suite not in test_list:
                    test_list[suite] = {'type': None, 'subtests': []}

                # for page-load tests we want to list every subtest, so we
                # can see which pages are available in which tp6-* sets
                if next_test.get("type", None) is not None:
                    test_list[suite]['type'] = next_test['type']
                    if next_test['type'] == "pageload":
                        test_list[suite]['subtests'].append(next_test['name'])

            # print the list in a nice readable format
            for key in sorted(test_list.iterkeys()):
                print("\n%s" % key)
                print("  type: %s" % test_list[key]['type'])
                if len(test_list[key]['subtests']) != 0:
                    print("  subtests:")
                    for _sub in sorted(test_list[key]['subtests']):
                        print("    %s" % _sub)

        print("\nDone.")
        # exit Raptor
        parser.exit()
    def add_test(self, test, expected='pass', test_container=None):
        filepath = os.path.abspath(test)

        if os.path.isdir(filepath):
            for root, dirs, files in os.walk(filepath):
                for filename in files:
                    if (filename.endswith('.ini')):
                        msg_tmpl = ("Ignoring manifest '{0}'; running all tests in '{1}'."
                                    " See --help for details.")
                        relpath = os.path.relpath(os.path.join(root, filename), filepath)
                        self.logger.warning(msg_tmpl.format(relpath, filepath))
                    elif (filename.startswith('test_') and
                        (filename.endswith('.py') or filename.endswith('.js'))):
                        test_file = os.path.join(root, filename)
                        self.add_test(test_file)
            return


        file_ext = os.path.splitext(os.path.split(filepath)[-1])[1]

        if file_ext == '.ini':
            manifest = TestManifest()
            manifest.read(filepath)

            filters = []
            if self.test_tags:
                filters.append(tags(self.test_tags))
            json_path = update_mozinfo(filepath)
            self.logger.info("mozinfo updated with the following: {}".format(None))
            manifest_tests = manifest.active_tests(exists=False,
                                                   disabled=True,
                                                   filters=filters,
                                                   app=self.appName,
                                                   e10s=self.e10s,
                                                   **mozinfo.info)
            if len(manifest_tests) == 0:
                self.logger.error("no tests to run using specified "
                                  "combination of filters: {}".format(
                                       manifest.fmt_filters()))

            target_tests = []
            for test in manifest_tests:
                if test.get('disabled'):
                    self.manifest_skipped_tests.append(test)
                else:
                    target_tests.append(test)

            for i in target_tests:
                if not os.path.exists(i["path"]):
                    raise IOError("test file: %s does not exist" % i["path"])

                file_ext = os.path.splitext(os.path.split(i['path'])[-1])[-1]
                test_container = None

                self.add_test(i["path"], i["expected"], test_container)
            return

        self.tests.append({'filepath': filepath, 'expected': expected, 'test_container': test_container})
    def test_manifest_subsuites(self):
        """
        test subsuites and conditional subsuites
        """
        class AttributeDict(dict):
            def __getattr__(self, attr):
                return self[attr]
            def __setattr__(self, attr, value):
                self[attr] = value

        relative_path = os.path.join(here, 'subsuite.ini')
        manifest = TestManifest(manifests=(relative_path,))
        info = {'foo': 'bar'}
        options = {'subsuite': 'bar'}

        # 6 tests total
        self.assertEquals(len(manifest.active_tests(exists=False, **info)), 6)

        # only 3 tests for subsuite bar when foo==bar
        self.assertEquals(len(manifest.active_tests(exists=False,
                                                    options=AttributeDict(options),
                                                    **info)), 3)

        options = {'subsuite': 'baz'}
        other = {'something': 'else'}
        # only 1 test for subsuite baz, regardless of conditions
        self.assertEquals(len(manifest.active_tests(exists=False,
                                                    options=AttributeDict(options),
                                                    **info)), 1)
        self.assertEquals(len(manifest.active_tests(exists=False,
                                                    options=AttributeDict(options),
                                                    **other)), 1)

        # 4 tests match when the condition doesn't match (all tests except
        # the unconditional subsuite)
        info = {'foo': 'blah'}
        options = {'subsuite': None}
        self.assertEquals(len(manifest.active_tests(exists=False,
                                                    options=AttributeDict(options),
                                                    **info)), 5)

        # test for illegal subsuite value
        manifest.tests[0]['subsuite'] = 'subsuite=bar,foo=="bar",type="nothing"'
        self.assertRaises(ParseError, manifest.active_tests, exists=False,
                          options=AttributeDict(options), **info)
    def test_manifest_subsuites(self):
        """
        test subsuites and conditional subsuites
        """
        relative_path = os.path.join(here, "subsuite.ini")
        manifest = TestManifest(manifests=(relative_path,))
        info = {"foo": "bar"}

        # 6 tests total
        tests = manifest.active_tests(exists=False, **info)
        self.assertEqual(len(tests), 6)

        # only 3 tests for subsuite bar when foo==bar
        tests = manifest.active_tests(exists=False, filters=[subsuite("bar")], **info)
        self.assertEqual(len(tests), 3)

        # only 1 test for subsuite baz, regardless of conditions
        other = {"something": "else"}
        tests = manifest.active_tests(exists=False, filters=[subsuite("baz")], **info)
        self.assertEqual(len(tests), 1)
        tests = manifest.active_tests(exists=False, filters=[subsuite("baz")], **other)
        self.assertEqual(len(tests), 1)

        # 4 tests match when the condition doesn't match (all tests except
        # the unconditional subsuite)
        info = {"foo": "blah"}
        tests = manifest.active_tests(exists=False, filters=[subsuite()], **info)
        self.assertEqual(len(tests), 5)

        # test for illegal subsuite value
        manifest.tests[0]["subsuite"] = 'subsuite=bar,foo=="bar",type="nothing"'
        with self.assertRaises(ParseError):
            manifest.active_tests(exists=False, filters=[subsuite("foo")], **info)
    def test_testmanifest(self):
        # Test filtering based on platform:
        filter_example = os.path.join(here, 'filter-example.ini')
        manifest = TestManifest(manifests=(filter_example,), strict=False)
        self.assertEqual([i['name'] for i in manifest.active_tests(os='win', disabled=False, exists=False)],
                         ['windowstest', 'fleem'])
        self.assertEqual([i['name'] for i in manifest.active_tests(os='linux', disabled=False, exists=False)],
                         ['fleem', 'linuxtest'])

        # Look for existing tests.  There is only one:
        self.assertEqual([i['name'] for i in manifest.active_tests()],
                         ['fleem'])

        # You should be able to expect failures:
        last = manifest.active_tests(exists=False, toolkit='gtk2')[-1]
        self.assertEqual(last['name'], 'linuxtest')
        self.assertEqual(last['expected'], 'pass')
        last = manifest.active_tests(exists=False, toolkit='cocoa')[-1]
        self.assertEqual(last['expected'], 'fail')
    def test_testmanifest(self):
        # Test filtering based on platform:
        filter_example = os.path.join(here, 'filter-example.ini')
        manifest = TestManifest(manifests=(filter_example,))
        self.assertEqual([i['name'] for i in manifest.active_tests(os='win', disabled=False, exists=False)],
                         ['windowstest', 'fleem'])
        self.assertEqual([i['name'] for i in manifest.active_tests(os='linux', disabled=False, exists=False)],
                         ['fleem', 'linuxtest'])

        # Look for existing tests.  There is only one:
        self.assertEqual([i['name'] for i in manifest.active_tests()],
                         ['fleem'])

        # You should be able to expect failures:
        last_test = manifest.active_tests(exists=False, toolkit='gtk2')[-1]
        self.assertEqual(last_test['name'], 'linuxtest')
        self.assertEqual(last_test['expected'], 'pass')
        last_test = manifest.active_tests(exists=False, toolkit='cocoa')[-1]
        self.assertEqual(last_test['expected'], 'fail')
Exemple #28
0
    def add_test(self, test, expected="pass", test_container=None):
        filepath = os.path.abspath(test)

        if os.path.isdir(filepath):
            for root, dirs, files in os.walk(filepath):
                for filename in files:
                    if filename.startswith("test_") and (filename.endswith(".py") or filename.endswith(".js")):
                        filepath = os.path.join(root, filename)
                        self.add_test(filepath)
            return

        file_ext = os.path.splitext(os.path.split(filepath)[-1])[1]

        if file_ext == ".ini":
            manifest = TestManifest()
            manifest.read(filepath)

            filters = []
            if self.test_tags:
                filters.append(tags(self.test_tags))
            json_path = update_mozinfo(filepath)
            self.logger.info("mozinfo updated with the following: {}".format(None))
            manifest_tests = manifest.active_tests(
                exists=False,
                disabled=True,
                filters=filters,
                device=self.device,
                app=self.appName,
                e10s=self.e10s,
                **mozinfo.info
            )
            if len(manifest_tests) == 0:
                self.logger.error(
                    "no tests to run using specified " "combination of filters: {}".format(manifest.fmt_filters())
                )

            target_tests = []
            for test in manifest_tests:
                if test.get("disabled"):
                    self.manifest_skipped_tests.append(test)
                else:
                    target_tests.append(test)

            for i in target_tests:
                if not os.path.exists(i["path"]):
                    raise IOError("test file: %s does not exist" % i["path"])

                file_ext = os.path.splitext(os.path.split(i["path"])[-1])[-1]
                test_container = None

                self.add_test(i["path"], i["expected"], test_container)
            return

        self.tests.append({"filepath": filepath, "expected": expected, "test_container": test_container})
Exemple #29
0
    def _get_subtests_from_ini(self, manifest_path):
        '''
        Returns a list of (sub)tests from an ini file containing the test definitions.

        :param str manifest_path: path to the ini file
        :return list: the list of the tests
        '''
        test_manifest = TestManifest([manifest_path], strict=False)
        test_list = test_manifest.active_tests(exists=False, disabled=False)
        subtest_list = [subtest["name"] for subtest in test_list]

        return subtest_list
    def get_test_list(self, manifest):
        self.logger.info("Reading test manifest: %s" % manifest)
        mft = TestManifest()
        mft.read(manifest)

        # In the future if we want to add in more processing to the manifest
        # here is where you'd do that. Right now, we just return a list of
        # tests
        testlist = []
        for i in mft.active_tests(exists=False, disabled=False):
            testlist.append(i["path"])

        return testlist
Exemple #31
0
    def get_test_list(self, manifest):
        self.logger.info("Reading test manifest: %s" % manifest)
        mft = TestManifest()
        mft.read(manifest)

        # In the future if we want to add in more processing to the manifest
        # here is where you'd do that. Right now, we just return a list of
        # tests
        testlist = []
        for i in mft.active_tests(exists=False, disabled=False):
            testlist.append(i["path"])

        return testlist
Exemple #32
0
    def __call__(self, manifests, buildconfig):
        global logger
        logger = logger or structuredlog.get_default_logger()

        if not isinstance(manifests, Iterable):
            manifests = [manifests]

        m = TestManifest(manifests)

        active = [t['path'] for t in m.active_tests(exists=False, disabled=False, **buildconfig)]
        skipped = [t['path'] for t in m.tests if t['path'] not in active]

        return active, skipped
Exemple #33
0
    def runTests(self):
        self.startup()
        if isinstance(self.options.manifestFile, TestManifest):
            mp = self.options.manifestFile
        else:
            mp = TestManifest(strict=False)
            mp.read(self.options.robocopIni)
        filters = []
        if self.options.totalChunks:
            filters.append(
                chunk_by_slice(self.options.thisChunk,
                               self.options.totalChunks))
        robocop_tests = mp.active_tests(exists=False,
                                        filters=filters,
                                        **mozinfo.info)
        if not self.options.autorun:
            # Force a single loop iteration. The iteration will start Fennec and
            # the httpd server, but not actually run a test.
            self.options.test_paths = [robocop_tests[0]['name']]
        active_tests = []
        for test in robocop_tests:
            if self.options.test_paths and test[
                    'name'] not in self.options.test_paths:
                continue
            if 'disabled' in test:
                self.log.info('TEST-INFO | skipping %s | %s' %
                              (test['name'], test['disabled']))
                continue
            active_tests.append(test)

        tests_by_manifest = defaultdict(list)
        for test in active_tests:
            tests_by_manifest[test['manifest']].append(test['name'])
        self.log.suite_start(tests_by_manifest)

        worstTestResult = None
        for test in active_tests:
            result = self.runSingleTest(test)
            if worstTestResult is None or worstTestResult == 0:
                worstTestResult = result
        if worstTestResult is None:
            self.log.warning(
                "No tests run. Did you pass an invalid TEST_PATH?")
            worstTestResult = 1
        else:
            print "INFO | runtests.py | Test summary: start."
            logResult = self.logTestSummary()
            print "INFO | runtests.py | Test summary: end."
            if worstTestResult == 0:
                worstTestResult = logResult
        return worstTestResult
Exemple #34
0
    def add_test(self, test, expected='pass', test_container=None):
        filepath = os.path.abspath(test)

        if os.path.isdir(filepath):
            for root, dirs, files in os.walk(filepath):
                for filename in files:
                    if (filename.startswith('test_') and
                        (filename.endswith('.py') or filename.endswith('.js'))):
                        filepath = os.path.join(root, filename)
                        self.add_test(filepath)
            return

        file_ext = os.path.splitext(os.path.split(filepath)[-1])[1]

        if file_ext == '.ini':
            manifest = TestManifest()
            manifest.read(filepath)

            filters = []
            if self.test_tags:
                filters.append(tags(self.test_tags))
            manifest_tests = manifest.active_tests(exists=False,
                                                   disabled=True,
                                                   filters=filters,
                                                   e10s=self.e10s,
                                                   **mozinfo.info)
            if len(manifest_tests) == 0:
                self.logger.error("no tests to run using specified "
                                  "combination of filters: {}".format(
                                       manifest.fmt_filters()))

            target_tests = []
            for test in manifest_tests:
                if test.get('disabled'):
                    self.manifest_skipped_tests.append(test)
                else:
                    target_tests.append(test)

            for i in target_tests:
                if not os.path.exists(i["path"]):
                    raise IOError("test file: %s does not exist" % i["path"])

                file_ext = os.path.splitext(os.path.split(i['path'])[-1])[-1]
                test_container = None

                self.add_test(i["path"], i["expected"], test_container)
            return

        self.tests.append({'filepath': filepath, 'expected': expected, 'test_container': test_container})
Exemple #35
0
    def convert_ini_manifest_to_json(cls, manifest_path):
        manifest = TestManifest([manifest_path])

        whitelist = [t['path'] for t in manifest.active_tests(disabled=False)]
        blacklist = [t for t in manifest.paths() if t not in whitelist]

        whitelist.insert(0, os.path.join(gaia_dir, 'shared', 'test', 'integration', 'setup.js'))

        map(lambda l: [os.path.relpath(p, gaia_dir) for p in l] , (whitelist, blacklist))
        contents = { 'whitelist': whitelist }

        manifest_path = tempfile.NamedTemporaryFile(suffix='.json').name
        with open(manifest_path, 'w') as f:
            f.writelines(json.dumps(contents, indent=2))
        return manifest_path
Exemple #36
0
    def runTests(self):
        self.startup()
        if isinstance(self.options.manifestFile, TestManifest):
            mp = self.options.manifestFile
        else:
            mp = TestManifest(strict=False)
            mp.read(self.options.robocopIni)
        filters = []
        if self.options.totalChunks:
            filters.append(
                chunk_by_slice(self.options.thisChunk, self.options.totalChunks))
        robocop_tests = mp.active_tests(
            exists=False, filters=filters, **mozinfo.info)
        if not self.options.autorun:
            # Force a single loop iteration. The iteration will start Fennec and
            # the httpd server, but not actually run a test.
            self.options.test_paths = [robocop_tests[0]['name']]
        active_tests = []
        for test in robocop_tests:
            if self.options.test_paths and test['name'] not in self.options.test_paths:
                continue
            if 'disabled' in test:
                self.log.info('TEST-INFO | skipping %s | %s' %
                              (test['name'], test['disabled']))
                continue
            active_tests.append(test)

        tests_by_manifest = defaultdict(list)
        for test in active_tests:
            tests_by_manifest[test['manifest']].append(test['name'])
        self.log.suite_start(tests_by_manifest)

        worstTestResult = None
        for test in active_tests:
            result = self.runSingleTest(test)
            if worstTestResult is None or worstTestResult == 0:
                worstTestResult = result
        if worstTestResult is None:
            self.log.warning(
                "No tests run. Did you pass an invalid TEST_PATH?")
            worstTestResult = 1
        else:
            print "INFO | runtests.py | Test summary: start."
            logResult = self.logTestSummary()
            print "INFO | runtests.py | Test summary: end."
            if worstTestResult == 0:
                worstTestResult = logResult
        return worstTestResult
Exemple #37
0
    def convert_ini_manifest_to_json(cls, manifest_path):
        manifest = TestManifest([manifest_path])

        whitelist = [t['path'] for t in manifest.active_tests(disabled=False)]
        blacklist = [t for t in manifest.paths() if t not in whitelist]

        whitelist.insert(
            0,
            os.path.join(gaia_dir, 'shared', 'test', 'integration',
                         'setup.js'))

        map(lambda l: [os.path.relpath(p, gaia_dir) for p in l],
            (whitelist, blacklist))
        contents = {'whitelist': whitelist}

        manifest_path = tempfile.NamedTemporaryFile(suffix='.json').name
        with open(manifest_path, 'w') as f:
            f.writelines(json.dumps(contents, indent=2))
        return manifest_path
Exemple #38
0
    def get_manifests(self, suite, mozinfo):
        mozinfo = dict(mozinfo)
        # Compute all tests for the given suite/subsuite.
        tests = self.get_tests(suite)

        if "web-platform-tests" in suite:
            manifests = set()
            for t in tests:
                manifests.add(t["manifest"])
            return {"active": list(manifests), "skipped": []}

        manifests = set(chunk_by_runtime.get_manifest(t) for t in tests)

        # Compute  the active tests.
        m = TestManifest()
        m.tests = tests
        tests = m.active_tests(disabled=False, exists=False, **mozinfo)
        active = set(chunk_by_runtime.get_manifest(t) for t in tests)
        skipped = manifests - active
        return {"active": list(active), "skipped": list(skipped)}
Exemple #39
0
def get_chunked_manifests(flavor, subsuite, chunks, mozinfo):
    """Compute which manifests should run in which chunks with the given category
    of tests.

    Args:
        flavor (str): The suite to run. Values are defined by the 'build_flavor' key
            in `moztest.resolve.TEST_SUITES`.
        subsuite (str): The subsuite to run or 'undefined' to denote no subsuite.
        chunks (int): Number of chunks to split manifests across.
        mozinfo (frozenset): Set of data in the form of (<key>, <value>) used
                             for filtering.

    Returns:
        A list of manifests where each item contains the manifest that should
        run in the corresponding chunk.
    """
    mozinfo = dict(mozinfo)
    # Compute all tests for the given suite/subsuite.
    tests = get_tests(flavor, subsuite)
    all_manifests = set(t['manifest_relpath'] for t in tests)

    # Compute only the active tests.
    m = TestManifest()
    m.tests = tests
    tests = m.active_tests(disabled=False, exists=False, **mozinfo)
    active_manifests = set(t['manifest_relpath'] for t in tests)

    # Run the chunking algorithm.
    chunked_manifests = [
        c[1] for c in chunk_by_runtime(
            None,
            chunks,
            get_runtimes(mozinfo['os'])
        ).get_chunked_manifests(tests)
    ]

    # Add all skipped manifests to the first chunk so they still show up in the
    # logs. They won't impact runtime much.
    skipped_manifests = all_manifests - active_manifests
    chunked_manifests[0].extend(skipped_manifests)
    return chunked_manifests
    def _get_subtests_from_ini(self, manifest_path, suite_name):
        """
        Returns a list of (sub)tests from an ini file containing the test definitions.

        :param str manifest_path: path to the ini file
        :return list: the list of the tests
        """
        test_manifest = TestManifest([manifest_path], strict=False)
        test_list = test_manifest.active_tests(exists=False, disabled=False)
        subtest_list = {}
        for subtest in test_list:
            subtest_list[subtest["name"]] = subtest["manifest"]
            self._urls[subtest["name"]] = {
                "type": suite_name,
                "url": subtest["test_url"],
            }

        self._urls = collections.OrderedDict(
            sorted(self._urls.items(), key=lambda t: len(t[0])))

        return subtest_list
    def test_manifest_subsuites(self):
        """
        test subsuites and conditional subsuites
        """
        relative_path = os.path.join(here, 'subsuite.ini')
        manifest = TestManifest(manifests=(relative_path,))
        info = {'foo': 'bar'}

        # 6 tests total
        tests = manifest.active_tests(exists=False, **info)
        self.assertEquals(len(tests), 6)

        # only 3 tests for subsuite bar when foo==bar
        tests = manifest.active_tests(exists=False,
                                      filters=[subsuite('bar')],
                                      **info)
        self.assertEquals(len(tests), 3)

        # only 1 test for subsuite baz, regardless of conditions
        other = {'something': 'else'}
        tests = manifest.active_tests(exists=False,
                                      filters=[subsuite('baz')],
                                      **info)
        self.assertEquals(len(tests), 1)
        tests = manifest.active_tests(exists=False,
                                      filters=[subsuite('baz')],
                                      **other)
        self.assertEquals(len(tests), 1)

        # 4 tests match when the condition doesn't match (all tests except
        # the unconditional subsuite)
        info = {'foo': 'blah'}
        tests = manifest.active_tests(exists=False,
                                      filters=[subsuite()],
                                      **info)
        self.assertEquals(len(tests), 5)

        # test for illegal subsuite value
        manifest.tests[0]['subsuite'] = 'subsuite=bar,foo=="bar",type="nothing"'
        with self.assertRaises(ParseError):
            manifest.active_tests(exists=False,
                                  filters=[subsuite('foo')],
                                  **info)
Exemple #42
0
    def buildTestPath(self, options):
        """ Build the url path to the specific test harness and test file or directory
        Build a manifest of tests to run and write out a json file for the harness to read
    """
        if options.manifestFile and os.path.isfile(options.manifestFile):
            manifest = TestManifest(strict=False)
            manifest.read(options.manifestFile)
            # Bug 883858 - return all tests including disabled tests
            tests = manifest.active_tests(disabled=False, **mozinfo.info)
            paths = []
            for test in tests:
                tp = test['path'].split(self.getTestRoot(options),
                                        1)[1].strip('/')

                # Filter out tests if we are using --test-path
                if options.testPath and not tp.startswith(options.testPath):
                    continue

                paths.append({'path': tp})

            # Bug 883865 - add this functionality into manifestDestiny
            with open('tests.json', 'w') as manifestFile:
                manifestFile.write(json.dumps({'tests': paths}))
            options.manifestFile = 'tests.json'

        testHost = "http://mochi.test:8888"
        testURL = ("/").join([testHost, self.TEST_PATH, options.testPath])
        if os.path.isfile(
                os.path.join(self.oldcwd, os.path.dirname(__file__),
                             self.TEST_PATH,
                             options.testPath)) and options.repeat > 0:
            testURL = ("/").join([testHost, self.PLAIN_LOOP_PATH])
        if options.chrome or options.a11y:
            testURL = ("/").join([testHost, self.CHROME_PATH])
        elif options.browserChrome:
            testURL = "about:blank"
        elif options.ipcplugins:
            testURL = ("/").join(
                [testHost, self.TEST_PATH, "dom/plugins/test"])
        return testURL
def run_mozmill(runner, args):
  tests = []
  for test in args.mozmill:
    testpath = os.path.expanduser(test)
    realpath = os.path.realpath(testpath)

    if not os.path.exists(testpath):
      raise Exception("Not a valid test file/directory: %s" % test)

    root,ext = os.path.splitext(testpath)
    if ext == ".ini":
        # This is a test manifest, use the parser instead
        manifest = TestManifest(manifests=[testpath], strict=False)
        print mozinfo.info
        tests.extend(manifest.active_tests(**mozinfo.info))
    else:
      def testname(t):
        if os.path.isdir(realpath):
          return os.path.join(test, os.path.relpath(t, testpath))
        return test

      tests.extend([{'name': testname(t), 'path': t }
                    for t in mozmill.collect_tests(testpath)])

  if args.verbose and len(tests):
    print "Running these tests:"
    print "\t" + "\n\t".join(map(lambda x: x['path'], tests))

  exception = None
  try:
    runner.run(tests, True)
  except:
    exception_type, exception, tb = sys.exc_info()

  results = runner.finish(fatal=exception is not None)

  if exception:
      traceback.print_exception(exception_type, exception, tb)
  if exception or results.fails:
      sys.exit(1)
Exemple #44
0
    def _get_subtests_from_ini(self, manifest_path, suite_name):
        """
        Returns a list of (sub)tests from an ini file containing the test definitions.

        :param str manifest_path: path to the ini file
        :return list: the list of the tests
        """
        test_manifest = TestManifest([manifest_path], strict=False)
        test_list = test_manifest.active_tests(exists=False, disabled=False)
        subtests = {}
        for subtest in test_list:
            subtests[subtest["name"]] = subtest["manifest"]
            self._urls.setdefault(suite_name, []).append(
                {
                    "test_name": subtest["name"],
                    "url": subtest["test_url"],
                }
            )

        self._urls[suite_name].sort(key=lambda item: item["test_name"])

        return subtests
Exemple #45
0
    def parse_manifests(self, tests_path, options=None):
        """Parses a list of given files as manifests, skipping those that are unparsable.
        Outputs a summary that gives information about the tests activated/skipped."""
        self.total_tests_count = 0
        self.skipped_tests_count = 0

        options = options or dict()
        for rel_path in CodeRevision.MANIFESTS_REL_PATHS:
            manifest_path = os.path.join(tests_path, rel_path)
            test_manifest = TestManifest([manifest_path])

            active_tests = [t['path'] for t in test_manifest.active_tests(exists=False, disabled=False, **options)]
            skipped_tests = [t['path'] for t in test_manifest.tests if t['path'] not in active_tests]

            self.total_tests_count += len(test_manifest.tests)
            self.skipped_tests_count += len(skipped_tests)
            test_suite_state = TestSuiteState(revision=self, test_suite=rel_path, options=options,
                                              active_tests=active_tests, skipped_tests=skipped_tests)
            self.manifest_states.append(test_suite_state)
            test_suite_state.save()

        self.processed = True
        self.save()
Exemple #46
0
    def get_manifests(self, flavor, subsuite, mozinfo):
        mozinfo = dict(mozinfo)
        # Compute all tests for the given suite/subsuite.
        tests = self.get_tests(flavor, subsuite)

        if flavor == "web-platform-tests":
            manifests = set()
            for t in tests:
                group = self.get_wpt_group(t)
                wpt_group_translation[t['manifest']].add(group)
                manifests.add(t['manifest'])

            return {"active": list(manifests), "skipped": []}

        manifests = set(chunk_by_runtime.get_manifest(t) for t in tests)

        # Compute  the active tests.
        m = TestManifest()
        m.tests = tests
        tests = m.active_tests(disabled=False, exists=False, **mozinfo)
        active = set(chunk_by_runtime.get_manifest(t) for t in tests)
        skipped = manifests - active
        return {"active": list(active), "skipped": list(skipped)}
Exemple #47
0
    def add_test(self, test, expected='pass'):
        filepath = os.path.abspath(test)

        if os.path.isdir(filepath):
            for root, dirs, files in os.walk(filepath):
                for filename in files:
                    if filename.endswith('.ini'):
                        msg_tmpl = ("Ignoring manifest '{0}'; running all tests in '{1}'."
                                    " See --help for details.")
                        relpath = os.path.relpath(os.path.join(root, filename), filepath)
                        self.logger.warning(msg_tmpl.format(relpath, filepath))
                    elif self._is_filename_valid(filename):
                        test_file = os.path.join(root, filename)
                        self.add_test(test_file)
            return

        file_ext = os.path.splitext(os.path.split(filepath)[-1])[1]

        if file_ext == '.ini':
            manifest = TestManifest()
            manifest.read(filepath)

            json_path = update_mozinfo(filepath)
            self.logger.info("mozinfo updated from: {}".format(json_path))
            self.logger.info("mozinfo is: {}".format(mozinfo.info))

            filters = []
            if self.test_tags:
                filters.append(tags(self.test_tags))

            values = {
                "appname": self.appName,
                "e10s": self.e10s,
                "manage_instance": self.marionette.instance is not None,
            }
            values.update(mozinfo.info)

            manifest_tests = manifest.active_tests(exists=False,
                                                   disabled=True,
                                                   filters=filters,
                                                   **values)
            if len(manifest_tests) == 0:
                self.logger.error("No tests to run using specified "
                                  "combination of filters: {}".format(
                                      manifest.fmt_filters()))

            target_tests = []
            for test in manifest_tests:
                if test.get('disabled'):
                    self.manifest_skipped_tests.append(test)
                else:
                    target_tests.append(test)

            for i in target_tests:
                if not os.path.exists(i["path"]):
                    raise IOError("test file: {} does not exist".format(i["path"]))

                file_ext = os.path.splitext(os.path.split(i['path'])[-1])[-1]

                self.add_test(i["path"], i["expected"])
            return

        self.tests.append({'filepath': filepath, 'expected': expected})
def main(args):
    parser = Options()
    options, args = parser.parse_args()
    if not options.html_manifest or not options.specialpowers or not options.host1 or not options.host2 or not options.signalling_server:
        parser.print_usage()
        return 2

    package_options = get_package_options(parser, options)
    if not package_options:
        parser.print_usage()
        return 2

    if not os.path.isdir(options.specialpowers):
        parser.error("SpecialPowers directory %s does not exist" % options.specialpowers)
        return 2
    if options.prefs and not os.path.isfile(options.prefs):
        parser.error("Prefs file %s does not exist" % options.prefs)
        return 2
    if options.log_dest and not os.path.isdir(options.log_dest):
        parser.error("Log directory %s does not exist" % options.log_dest)
        return 2

    log = mozlog.getLogger('steeplechase')
    log.setLevel(mozlog.DEBUG)
    if ':' in options.host1:
        host, port = options.host1.split(':')
        dm1 = DeviceManagerSUT(host, port)
    else:
        dm1 = DeviceManagerSUT(options.host1)
    if ':' in options.host2:
        host, port = options.host2.split(':')
        dm2 = DeviceManagerSUT(host, port)
    else:
        dm2 = DeviceManagerSUT(options.host2)
    remote_info = [{'dm': dm1,
                    'binary': package_options.binary,
                    'package': package_options.package,
                    'is_initiator': True,
                    'name': 'Client1'},
                   {'dm': dm2,
                    'binary': package_options.binary2,
                    'package': package_options.package2,
                    'is_initiator': False,
                    'name': 'Client2'}]
    # first, push app
    for info in remote_info:
        dm = info['dm']

        if info['binary']:
            asset = Binary(path=info['binary'], log=log, dm=info['dm'], name=info['name'])
        else:
            asset = generate_package_asset(path=info['package'], log=log, dm=info['dm'], name=info['name'])

        if options.setup:
            asset.setup_test_root()
        info['test_root'] = asset.test_root()

        if options.setup:
            log.info("Pushing app to %s...", info["name"])
            asset.setup_client()
        info['remote_app_path'] = asset.path_to_launch()
        if not options.setup and not dm.fileExists(info['remote_app_path']):
            log.error("App does not exist on %s, don't use --noSetup", info['name'])
            return 2

    pass_count, fail_count = 0, 0
    if options.html_manifest:
        manifest = TestManifest(strict=False)
        manifest.read(options.html_manifest)
        manifest_data = {"tests": [{"path": t["relpath"]} for t in manifest.active_tests(disabled=False, **mozinfo.info)]}

        remote_port = 0
        if options.remote_webserver:
            result = re.search(':(\d+)', options.remote_webserver)
            if result:
                remote_port = int(result.groups()[0])


        @json_response
        def get_manifest(req):
            return (200, manifest_data)
        handlers = [{
            'method': 'GET',
            'path': '/manifest.json',
            'function': get_manifest
            }]
        httpd = MozHttpd(host=moznetwork.get_ip(), port=remote_port, log_requests=True,
                         docroot=os.path.join(os.path.dirname(__file__), "..", "webharness"),
                         urlhandlers=handlers,
                         path_mappings={"/tests": os.path.dirname(options.html_manifest)})
        httpd.start(block=False)
        test = HTMLTests(httpd, remote_info, log, options)
        html_pass_count, html_fail_count = test.run()
        pass_count += html_pass_count
        fail_count += html_fail_count
        httpd.stop()
    log.info("Result summary:")
    log.info("Passed: %d" % pass_count)
    log.info("Failed: %d" % fail_count)
    return pass_count > 0 and fail_count == 0
Exemple #49
0
    def run_test(self, test):
        if not self.httpd:
            print "starting httpd"
            self.start_httpd()

        if not self.marionette:
            self.start_marionette()

        filepath = os.path.abspath(test)

        if os.path.isdir(filepath):
            for root, dirs, files in os.walk(filepath):
                for filename in files:
                    if ((filename.startswith('test_') or filename.startswith('browser_')) and
                        (filename.endswith('.py') or filename.endswith('.js'))):
                        filepath = os.path.join(root, filename)
                        self.run_test(filepath)
                        if self.marionette.check_for_crash():
                            return
            return

        mod_name,file_ext = os.path.splitext(os.path.split(filepath)[-1])

        testloader = unittest.TestLoader()
        suite = unittest.TestSuite()

        if file_ext == '.ini':
            testargs = {}
            if self.type is not None:
                testtypes = self.type.replace('+', ' +').replace('-', ' -').split()
                for atype in testtypes:
                    if atype.startswith('+'):
                        testargs.update({ atype[1:]: 'true' })
                    elif atype.startswith('-'):
                        testargs.update({ atype[1:]: 'false' })
                    else:
                        testargs.update({ atype: 'true' })

            manifest = TestManifest()
            manifest.read(filepath)

            all_tests = manifest.active_tests(disabled=False)
            manifest_tests = manifest.active_tests(disabled=False,
                                                   device=self.device,
                                                   app=self.appName)
            skip_tests = list(set([x['path'] for x in all_tests]) -
                              set([x['path'] for x in manifest_tests]))
            for skipped in skip_tests:
                self.logger.info('TEST-SKIP | %s | device=%s, app=%s' %
                                 (os.path.basename(skipped),
                                  self.device,
                                  self.appName))
                self.todo += 1

            for i in manifest.get(tests=manifest_tests, **testargs):
                self.run_test(i["path"])
                if self.marionette.check_for_crash():
                    return
            return

        self.logger.info('TEST-START %s' % os.path.basename(test))

        for handler in self.test_handlers:
            if handler.match(os.path.basename(test)):
                handler.add_tests_to_suite(mod_name,
                                           filepath,
                                           suite,
                                           testloader,
                                           self.marionette,
                                           self.testvars,
                                           **self.test_kwargs)
                break

        if suite.countTestCases():
            runner = self.textrunnerclass(verbosity=3,
                                          marionette=self.marionette)
            results = runner.run(suite)
            self.results.append(results)

            self.failed += len(results.failures) + len(results.errors)
            if hasattr(results, 'skipped'):
                self.todo += len(results.skipped)
            self.passed += results.passed
            for failure in results.failures + results.errors:
                self.failures.append((results.getInfo(failure[0]), failure[1], 'TEST-UNEXPECTED-FAIL'))
            if hasattr(results, 'unexpectedSuccesses'):
                self.failed += len(results.unexpectedSuccesses)
                for failure in results.unexpectedSuccesses:
                    self.failures.append((results.getInfo(failure), 'TEST-UNEXPECTED-PASS'))
            if hasattr(results, 'expectedFailures'):
                self.passed += len(results.expectedFailures)
def run_test_harness(options):
    message_logger = MessageLogger(logger=None)
    process_args = {'messageLogger': message_logger}
    auto = RemoteAutomation(None, "fennec", processArgs=process_args)

    if options is None:
        raise ValueError("Invalid options specified, use --help for a list of valid options")

    dm = options.dm
    auto.setDeviceManager(dm)
    mochitest = MochiRemote(auto, dm, options)

    log = mochitest.log
    message_logger.logger = log
    mochitest.message_logger = message_logger

    productPieces = options.remoteProductName.split('.')
    if (productPieces is not None):
        auto.setProduct(productPieces[0])
    else:
        auto.setProduct(options.remoteProductName)
    auto.setAppName(options.remoteappname)

    logParent = os.path.dirname(options.remoteLogFile)
    dm.mkDir(logParent)
    auto.setRemoteLog(options.remoteLogFile)
    auto.setServerInfo(options.webServer, options.httpPort, options.sslPort)

    mochitest.printDeviceInfo()

    # Add Android version (SDK level) to mozinfo so that manifest entries
    # can be conditional on android_version.
    androidVersion = dm.shellCheckOutput(['getprop', 'ro.build.version.sdk'])
    log.info(
        "Android sdk version '%s'; will use this to filter manifests" %
        str(androidVersion))
    mozinfo.info['android_version'] = androidVersion

    deviceRoot = dm.deviceRoot
    if options.dmdPath:
        dmdLibrary = "libdmd.so"
        dmdPathOnDevice = os.path.join(deviceRoot, dmdLibrary)
        dm.removeFile(dmdPathOnDevice)
        dm.pushFile(os.path.join(options.dmdPath, dmdLibrary), dmdPathOnDevice)
        options.dmdPath = deviceRoot

    options.dumpOutputDirectory = deviceRoot

    procName = options.app.split('/')[-1]
    dm.killProcess(procName)

    if options.robocopIni != "":
        # turning buffering off as it's not used in robocop
        message_logger.buffering = False

        # sut may wait up to 300 s for a robocop am process before returning
        dm.default_timeout = 320
        if isinstance(options.manifestFile, TestManifest):
            mp = options.manifestFile
        else:
            mp = TestManifest(strict=False)
            mp.read(options.robocopIni)

        filters = []
        if options.totalChunks:
            filters.append(
                chunk_by_slice(options.thisChunk, options.totalChunks))
        robocop_tests = mp.active_tests(exists=False, filters=filters, **mozinfo.info)

        options.extraPrefs.append('browser.search.suggest.enabled=true')
        options.extraPrefs.append('browser.search.suggest.prompted=true')
        options.extraPrefs.append('layout.css.devPixelsPerPx=1.0')
        options.extraPrefs.append('browser.chrome.dynamictoolbar=false')
        options.extraPrefs.append('browser.snippets.enabled=false')
        options.extraPrefs.append('browser.casting.enabled=true')
        options.extraPrefs.append('extensions.autoupdate.enabled=false')

        if (options.dm_trans == 'adb' and options.robocopApk):
            dm._checkCmd(["install", "-r", options.robocopApk])

        if not options.autorun:
            # Force a single loop iteration. The iteration will start Fennec and
            # the httpd server, but not actually run a test.
            options.test_paths = [robocop_tests[0]['name']]

        retVal = None
        # Filtering tests
        active_tests = []
        for test in robocop_tests:
            if options.test_paths and test['name'] not in options.test_paths:
                continue

            if 'disabled' in test:
                log.info(
                    'TEST-INFO | skipping %s | %s' %
                    (test['name'], test['disabled']))
                continue

            active_tests.append(test)

        log.suite_start([t['name'] for t in active_tests])

        for test in active_tests:
            # When running in a loop, we need to create a fresh profile for
            # each cycle
            if mochitest.localProfile:
                options.profilePath = mochitest.localProfile
                os.system("rm -Rf %s" % options.profilePath)
                options.profilePath = None
                mochitest.localProfile = options.profilePath

            options.app = "am"
            mochitest.nsprLogName = "nspr-%s.log" % test['name']
            if options.autorun:
                # This launches a test (using "am instrument") and instructs
                # Fennec to /quit/ the browser (using Robocop:Quit) and to
                # /finish/ all opened activities.
                options.browserArgs = [
                    "instrument",
                    "-w",
                    "-e", "quit_and_finish", "1",
                    "-e", "deviceroot", deviceRoot,
                    "-e",
                    "class"]
                options.browserArgs.append(
                    "org.mozilla.gecko.tests.%s" %
                    test['name'].split('.java')[0])
                options.browserArgs.append(
                    "org.mozilla.roboexample.test/org.mozilla.gecko.FennecInstrumentationTestRunner")
            else:
                # This does not launch a test at all. It launches an activity
                # that starts Fennec and then waits indefinitely, since cat
                # never returns.
                options.browserArgs = ["start",
                                       "-n", "org.mozilla.roboexample.test/org.mozilla.gecko.LaunchFennecWithConfigurationActivity",
                                       "&&", "cat"]
                dm.default_timeout = sys.maxint # Forever.

                mochitest.log.info("")
                mochitest.log.info("Serving mochi.test Robocop root at http://%s:%s/tests/robocop/" %
                    (options.remoteWebServer, options.httpPort))
                mochitest.log.info("")

            # If the test is for checking the import from bookmarks then make
            # sure there is data to import
            if test['name'] == "testImportFromAndroid":

                # Get the OS so we can run the insert in the apropriate
                # database and following the correct table schema
                osInfo = dm.getInfo("os")
                devOS = " ".join(osInfo['os'])

                if ("pandaboard" in devOS):
                    delete = [
                        'execsu',
                        'sqlite3',
                        "/data/data/com.android.browser/databases/browser2.db \'delete from bookmarks where _id > 14;\'"]
                else:
                    delete = [
                        'execsu',
                        'sqlite3',
                        "/data/data/com.android.browser/databases/browser.db \'delete from bookmarks where _id > 14;\'"]
                if (options.dm_trans == "sut"):
                    dm._runCmds([{"cmd": " ".join(delete)}])

                # Insert the bookmarks
                log.info(
                    "Insert bookmarks in the default android browser database")
                for i in range(20):
                    if ("pandaboard" in devOS):
                        cmd = [
                            'execsu',
                            'sqlite3',
                            "/data/data/com.android.browser/databases/browser2.db 'insert or replace into bookmarks(_id,title,url,folder,parent,position) values (" +
                            str(
                                30 +
                                i) +
                            ",\"Bookmark" +
                            str(i) +
                            "\",\"http://www.bookmark" +
                            str(i) +
                            ".com\",0,1," +
                            str(
                                100 +
                                i) +
                            ");'"]
                    else:
                        cmd = [
                            'execsu',
                            'sqlite3',
                            "/data/data/com.android.browser/databases/browser.db 'insert into bookmarks(title,url,bookmark) values (\"Bookmark" +
                            str(i) +
                            "\",\"http://www.bookmark" +
                            str(i) +
                            ".com\",1);'"]
                    if (options.dm_trans == "sut"):
                        dm._runCmds([{"cmd": " ".join(cmd)}])
            try:
                screenShotDir = "/mnt/sdcard/Robotium-Screenshots"
                dm.removeDir(screenShotDir)
                dm.recordLogcat()
                result = mochitest.runTests(options)
                if result != 0:
                    log.error("runTests() exited with code %s" % result)
                log_result = mochitest.addLogData()
                if result != 0 or log_result != 0:
                    mochitest.printDeviceInfo(printLogcat=True)
                    mochitest.printScreenshots(screenShotDir)
                # Ensure earlier failures aren't overwritten by success on this
                # run
                if retVal is None or retVal == 0:
                    retVal = result
            except:
                log.error(
                    "Automation Error: Exception caught while running tests")
                traceback.print_exc()
                mochitest.stopServers()
                try:
                    mochitest.cleanup(options)
                except devicemanager.DMError:
                    # device error cleaning up... oh well!
                    pass
                retVal = 1
                break
            finally:
                # Clean-up added bookmarks
                if test['name'] == "testImportFromAndroid":
                    if ("pandaboard" in devOS):
                        cmd_del = [
                            'execsu',
                            'sqlite3',
                            "/data/data/com.android.browser/databases/browser2.db \'delete from bookmarks where _id > 14;\'"]
                    else:
                        cmd_del = [
                            'execsu',
                            'sqlite3',
                            "/data/data/com.android.browser/databases/browser.db \'delete from bookmarks where _id > 14;\'"]
                    if (options.dm_trans == "sut"):
                        dm._runCmds([{"cmd": " ".join(cmd_del)}])
        if retVal is None:
            log.warning("No tests run. Did you pass an invalid TEST_PATH?")
            retVal = 1
        else:
            # if we didn't have some kind of error running the tests, make
            # sure the tests actually passed
            print "INFO | runtests.py | Test summary: start."
            overallResult = mochitest.printLog()
            print "INFO | runtests.py | Test summary: end."
            if retVal == 0:
                retVal = overallResult
    else:
        mochitest.nsprLogName = "nspr.log"
        try:
            dm.recordLogcat()
            retVal = mochitest.runTests(options)
        except:
            log.error("Automation Error: Exception caught while running tests")
            traceback.print_exc()
            mochitest.stopServers()
            try:
                mochitest.cleanup(options)
            except devicemanager.DMError:
                # device error cleaning up... oh well!
                pass
            retVal = 1

        mochitest.printDeviceInfo(printLogcat=True)

    message_logger.finish()

    return retVal
Exemple #51
0
    def run_python_tests(self,
                         tests=None,
                         test_objects=None,
                         subsuite=None,
                         verbose=False,
                         jobs=None,
                         python=None,
                         exitfirst=False,
                         extra=None,
                         **kwargs):
        self._activate_test_virtualenvs(python)

        if test_objects is None:
            from moztest.resolve import TestResolver
            resolver = self._spawn(TestResolver)
            # If we were given test paths, try to find tests matching them.
            test_objects = resolver.resolve_tests(paths=tests, flavor='python')
        else:
            # We've received test_objects from |mach test|. We need to ignore
            # the subsuite because python-tests don't use this key like other
            # harnesses do and |mach test| doesn't realize this.
            subsuite = None

        mp = TestManifest()
        mp.tests.extend(test_objects)

        filters = []
        if subsuite == 'default':
            filters.append(mpf.subsuite(None))
        elif subsuite:
            filters.append(mpf.subsuite(subsuite))

        tests = mp.active_tests(filters=filters,
                                disabled=False,
                                python=self.virtualenv_manager.version_info[0],
                                **mozinfo.info)

        if not tests:
            submsg = "for subsuite '{}' ".format(subsuite) if subsuite else ""
            message = "TEST-UNEXPECTED-FAIL | No tests collected " + \
                      "{}(Not in PYTHON_UNITTEST_MANIFESTS?)".format(submsg)
            self.log(logging.WARN, 'python-test', {}, message)
            return 1

        parallel = []
        sequential = []
        os.environ.setdefault('PYTEST_ADDOPTS', '')

        if extra:
            os.environ['PYTEST_ADDOPTS'] += " " + " ".join(extra)

        if exitfirst:
            sequential = tests
            os.environ['PYTEST_ADDOPTS'] += " -x"
        else:
            for test in tests:
                if test.get('sequential'):
                    sequential.append(test)
                else:
                    parallel.append(test)

        self.jobs = jobs or cpu_count()
        self.terminate = False
        self.verbose = verbose

        return_code = 0

        def on_test_finished(result):
            output, ret, test_path = result

            for line in output:
                self.log(logging.INFO, 'python-test', {'line': line.rstrip()},
                         '{line}')

            if ret and not return_code:
                self.log(logging.ERROR, 'python-test', {
                    'test_path': test_path,
                    'ret': ret
                }, 'Setting retcode to {ret} from {test_path}')
            return return_code or ret

        with ThreadPoolExecutor(max_workers=self.jobs) as executor:
            futures = [
                executor.submit(self._run_python_test, test)
                for test in parallel
            ]

            try:
                for future in as_completed(futures):
                    return_code = on_test_finished(future.result())
            except KeyboardInterrupt:
                # Hack to force stop currently running threads.
                # https://gist.github.com/clchiou/f2608cbe54403edb0b13
                executor._threads.clear()
                thread._threads_queues.clear()
                raise

        for test in sequential:
            return_code = on_test_finished(self._run_python_test(test))
            if return_code and exitfirst:
                break

        self.log(logging.INFO, 'python-test', {'return_code': return_code},
                 'Return code from mach python-test: {return_code}')
        return return_code
Exemple #52
0
    def run_test(self, test, expected='pass'):
        if not self.httpd:
            print "starting httpd"
            self.start_httpd()

        if not self.marionette:
            self.start_marionette()
            if self.emulator:
                self.marionette.emulator.wait_for_homescreen(self.marionette)
            # Retrieve capabilities for later use
            if not self._capabilities:
                self.capabilities

        testargs = {}
        if self.type is not None:
            testtypes = self.type.replace('+', ' +').replace('-', ' -').split()
            for atype in testtypes:
                if atype.startswith('+'):
                    testargs.update({ atype[1:]: 'true' })
                elif atype.startswith('-'):
                    testargs.update({ atype[1:]: 'false' })
                else:
                    testargs.update({ atype: 'true' })
        oop = testargs.get('oop', False)
        if isinstance(oop, basestring):
            oop = False if oop == 'false' else 'true'

        filepath = os.path.abspath(test)

        if os.path.isdir(filepath):
            for root, dirs, files in os.walk(filepath):
                if self.shuffle:
                    random.shuffle(files)
                for filename in files:
                    if ((filename.startswith('test_') or filename.startswith('browser_')) and
                        (filename.endswith('.py') or filename.endswith('.js'))):
                        filepath = os.path.join(root, filename)
                        self.run_test(filepath)
                        if self.marionette.check_for_crash():
                            return
            return

        mod_name,file_ext = os.path.splitext(os.path.split(filepath)[-1])

        testloader = unittest.TestLoader()
        suite = unittest.TestSuite()

        if file_ext == '.ini':
            manifest = TestManifest()
            manifest.read(filepath)

            manifest_tests = manifest.active_tests(exists=False,
                                                   disabled=True,
                                                   device=self.device,
                                                   app=self.appName,
                                                   **mozinfo.info)
            unfiltered_tests = []
            for test in manifest_tests:
                if test.get('disabled'):
                    self.manifest_skipped_tests.append(test)
                else:
                    unfiltered_tests.append(test)

            target_tests = manifest.get(tests=unfiltered_tests, **testargs)
            for test in unfiltered_tests:
                if test['path'] not in [x['path'] for x in target_tests]:
                    test.setdefault('disabled', 'filtered by type (%s)' % self.type)
                    self.manifest_skipped_tests.append(test)

            for test in self.manifest_skipped_tests:
                self.logger.info('TEST-SKIP | %s | %s' % (
                    os.path.basename(test['path']),
                    test['disabled']))
                self.todo += 1

            if self.shuffle:
                random.shuffle(target_tests)
            for i in target_tests:
                if not os.path.exists(i["path"]):
                    raise IOError("test file: %s does not exist" % i["path"])
                self.run_test(i["path"], i["expected"])
                if self.marionette.check_for_crash():
                    return
            return

            self.logger.info('TEST-START %s' % os.path.basename(test))

        self.test_kwargs['expected'] = expected
        self.test_kwargs['oop'] = oop
        for handler in self.test_handlers:
            if handler.match(os.path.basename(test)):
                handler.add_tests_to_suite(mod_name,
                                           filepath,
                                           suite,
                                           testloader,
                                           self.marionette,
                                           self.testvars,
                                           **self.test_kwargs)
                break

        if suite.countTestCases():
            runner = self.textrunnerclass(verbosity=3,
                                          marionette=self.marionette,
                                          capabilities=self.capabilities)
            results = runner.run(suite)
            self.results.append(results)

            self.failed += len(results.failures) + len(results.errors)
            if hasattr(results, 'skipped'):
                self.todo += len(results.skipped)
            self.passed += results.passed
            for failure in results.failures + results.errors:
                self.failures.append((results.getInfo(failure), failure.output, 'TEST-UNEXPECTED-FAIL'))
            if hasattr(results, 'unexpectedSuccesses'):
                self.failed += len(results.unexpectedSuccesses)
                for failure in results.unexpectedSuccesses:
                    self.failures.append((results.getInfo(failure), 'TEST-UNEXPECTED-PASS'))
            if hasattr(results, 'expectedFailures'):
                self.passed += len(results.expectedFailures)
class ManifestParser():
    test_logs = []
    output = None

    def __init__(self, manifest, git_link, git_branch):
        # generate git path
        self._test_href = git_link + 'tree/' + git_branch + '/tests/python/gaia-ui-tests/gaiatest/tests/'

        # read manifest
        self.manifest = TestManifest(manifests=(manifest,))

        self._flame = self.manifest.active_tests(b2g=True, device='Flame')

        # read tree herder
        self._desktop = self.manifest.active_tests(b2g=True, device='desktop')

    def _get_test(self, manifest, test_name):
        for test in manifest:
            if test_name in test['name']:
                return test

    def table_row(self, test):
        name = test['name'].split('/')[-1]
        suite_name = test['relpath'].split('/')[0]
        app_name = os.path.basename(test['here'])
        path_name = test['here'].split('gaiatest')[1]

        link = self._test_href + test['relpath']
        run_link = self._test_href + suite_name
        class_link = self._test_href + os.path.dirname(test['relpath'])

        flame = Device('Flame')

        desktop = Device('desktop')

        flame_test = self._get_test(self._flame, name)
        desktop_test = self._get_test(self._desktop, name)

        if flame_test:
            if flame_test['expected'] == 'fail':
                flame.status = 'XFailed'
                flame.tooltip = 'Test is expected to fail'
            elif 'disabled' in flame_test.keys():
                flame.status = 'Disabled'
                flame.tooltip = flame_test['disabled']
            elif flame_test['expected'] == 'pass':
                flame.status = 'Enabled'
                flame.tooltip = 'Test is expected to pass'

        if desktop_test:
            if desktop_test.has_key('skip-if') and re.match('device\s*==\s*"desktop"', desktop_test['skip-if']):
                pass
            elif desktop_test['expected'] == 'fail':
                desktop.status = 'XFailed'
                desktop.tooltip = 'Test is expected to fail'
            elif 'disabled' in desktop_test.keys():
                desktop.status = 'Disabled'
                desktop.tooltip = desktop_test['disabled']
            elif desktop_test['expected'] == 'pass':
                desktop.status = 'Enabled'
                desktop.tooltip = 'Test is expected to pass'

        self.test_logs.append(
            html.tr([
                html.td(
                    html.a(name, href_=link, target_='_blank'),
                        class_='col-name', title=path_name),
                    html.td(
                        html.a(suite_name, href_=run_link, target_='_blank'),
                        class_='col-run'),
                    html.td(
                        html.a(app_name, href_=class_link, target_='_blank'),
                        class_='col-class'),
                    html.td(flame.status, class_='col-flame ' + flame.status, title_=flame.tooltip),
                    html.td(desktop.status, class_='col-desktop ' + desktop.status,
                            title_=desktop.tooltip)
                    ], class_='results-table-row')
        )

    def generate_html(self):

        # generate table entry's
        for test in self.manifest.tests:
            self.table_row(test)

        # main HTML file
        doc = html.html(
            html.head(
                html.meta(charset='utf-8'),
                html.title('Test Report'),
                html.style(raw(pkg_resources.resource_string(
                    __name__, os.path.sep.join(['resources', 'style.css']))),
                    type='text/css')),
            html.body(
                html.script(raw(pkg_resources.resource_string(
                    __name__, os.path.sep.join(['resources', 'jquery.js']))),
                    type='text/javascript'),
                html.script(raw(pkg_resources.resource_string(
                    __name__, os.path.sep.join(['resources', 'main.js']))),
                    type='text/javascript'),
                html.h2('Test Status Mapping'),
                html.br(),
                html.p(
                    html.span('''The following table shows the functional Gaia UI tests and the targets they are currently run against.
                        Each test indicates the expected outcome from running the test.'''),
                    html.br(),
                    html.span('''Hover over the test name for the full path of the test file.
                Hover over the expected state to see if there's an associated reason'''),
                    html.br(),
                    html.br(),
                    html.span('Generated on: %s' % time.strftime("%c"))
                ),
                html.br(),
                html.table([html.thead(
                    html.tr([
                        html.th('Test', rowspan_="2", colspan_="3"),
                        html.th('Device', colspan_="1"),
                        html.th('Desktop', colspan_="1")
                    ]),
                    html.tr([
                        html.th('Flame'),
                        html.th('Desktop')
                    ]),
                    html.tr([
                        html.th('Name', class_='sortable', col='name'),
                        html.th('Run', class_='sortable', col='run'),
                        html.th('Class', class_='sortable', col='class'),
                        html.th('State', class_='sortable', col='flame'),
                        html.th('State', class_='sortable', col='desktop'),
                    ]), id='results-table-head'),
                    html.tbody(self.test_logs, id='results-table-body')], id='results-table')))

        return doc.unicode()
Exemple #54
0
    def add_test(self, test, expected='pass', test_container=None):
        filepath = os.path.abspath(test)

        if os.path.isdir(filepath):
            for root, dirs, files in os.walk(filepath):
                for filename in files:
                    if (filename.startswith('test_') and
                        (filename.endswith('.py') or filename.endswith('.js'))):
                        filepath = os.path.join(root, filename)
                        self.add_test(filepath)
            return

        testargs = {}
        if self.type is not None:
            testtypes = self.type.replace('+', ' +').replace('-', ' -').split()
            for atype in testtypes:
                if atype.startswith('+'):
                    testargs.update({ atype[1:]: 'true' })
                elif atype.startswith('-'):
                    testargs.update({ atype[1:]: 'false' })
                else:
                    testargs.update({ atype: 'true' })

        testarg_b2g = bool(testargs.get('b2g'))

        file_ext = os.path.splitext(os.path.split(filepath)[-1])[1]

        if file_ext == '.ini':
            manifest = TestManifest()
            manifest.read(filepath)

            filters = []
            if self.test_tags:
                filters.append(tags(self.test_tags))
            manifest_tests = manifest.active_tests(exists=False,
                                                   disabled=True,
                                                   filters=filters,
                                                   device=self.device,
                                                   app=self.appName,
                                                   **mozinfo.info)
            if len(manifest_tests) == 0:
                self.logger.error("no tests to run using specified "
                                  "combination of filters: {}".format(
                                       manifest.fmt_filters()))

            unfiltered_tests = []
            for test in manifest_tests:
                if test.get('disabled'):
                    self.manifest_skipped_tests.append(test)
                else:
                    unfiltered_tests.append(test)

            target_tests = manifest.get(tests=unfiltered_tests, **testargs)
            for test in unfiltered_tests:
                if test['path'] not in [x['path'] for x in target_tests]:
                    test.setdefault('disabled', 'filtered by type (%s)' % self.type)
                    self.manifest_skipped_tests.append(test)

            for i in target_tests:
                if not os.path.exists(i["path"]):
                    raise IOError("test file: %s does not exist" % i["path"])

                file_ext = os.path.splitext(os.path.split(i['path'])[-1])[-1]
                test_container = None
                if i.get('test_container') and testarg_b2g:
                    if i.get('test_container') == "true":
                        test_container = True
                    elif i.get('test_container') == "false":
                        test_container = False

                self.add_test(i["path"], i["expected"], test_container)
            return

        self.tests.append({'filepath': filepath, 'expected': expected, 'test_container': test_container})
Exemple #55
0
class CLI(mozrunner.CLI):
    """Command line interface to mozmill."""

    module = "mozmill"

    def __init__(self, args):

        # event handler plugin names
        self.handlers = {}
        for handler_class in handlers.handlers():
            name = getattr(handler_class, 'name', handler_class.__name__)
            self.handlers[name] = handler_class

        self.jsbridge_port = jsbridge.find_port()

        # add and parse options
        mozrunner.CLI.__init__(self, args)

        # Do not allow manifests and tests specified at the same time
        if self.options.manifests and self.options.tests:
            self.parser.error("Options %s and %s are mutually exclusive." %
                              (self.parser.get_option('-t'),
                               self.parser.get_option('-m')))

        # read tests from manifests (if any)
        self.manifest = TestManifest(manifests=self.options.manifests,
                                     strict=False)

        # expand user directory and check existence for the test
        for test in self.options.tests:
            testpath = os.path.expanduser(test)
            realpath = os.path.realpath(testpath)
            if not os.path.exists(testpath):
                raise Exception("Not a valid test file/directory: %s" % test)

            # collect the tests
            def testname(t):
                if os.path.isdir(realpath):
                    return os.path.join(test, os.path.relpath(t, testpath))
                return test
            tests = [{'name': testname(t), 'path': t}
                     for t in collect_tests(testpath)]
            self.manifest.tests.extend(tests)

        # list the tests and exit if specified
        if self.options.list_tests:
            for test in self.manifest.tests:
                print test['path']
            self.parser.exit()

        # instantiate event handler plugins
        self.event_handlers = []
        for name, handler_class in self.handlers.items():
            if name in self.options.disable:
                continue
            handler = handlers.instantiate_handler(handler_class, self.options)
            if handler is not None:
                self.event_handlers.append(handler)
        for handler in self.options.handlers:
            # user handlers
            try:
                handler_class = handlers.load_handler(handler)
            except BaseException as e:
                self.parser.error(str(e))
            _handler = handlers.instantiate_handler(handler_class,
                                                    self.options)
            if _handler is not None:
                self.event_handlers.append(_handler)

        # if in manual mode, ensure we're interactive
        if self.options.manual:
            self.options.interactive = True

    def add_options(self, parser):
        """Add command line options."""

        group = OptionGroup(parser, 'MozRunner options')
        mozrunner.CLI.add_options(self, group)
        parser.add_option_group(group)

        group = OptionGroup(parser, 'MozMill options')
        group.add_option("-t", "--test",
                         dest="tests",
                         default=[],
                         action='append',
                         help='Run test')
        group.add_option("--timeout",
                         dest="timeout",
                         type="float",
                         default=JSBRIDGE_TIMEOUT,
                         help="Seconds before harness timeout if no "
                              "communication is taking place")
        group.add_option("--restart",
                         dest='restart',
                         action='store_true',
                         default=False,
                         help="Restart the application and reset the "
                              "profile between each test file")
        group.add_option("-m", "--manifest",
                         dest='manifests',
                         action='append',
                         metavar='MANIFEST',
                         help='test manifest .ini file')
        group.add_option('-D', '--debug', dest="debug",
                         action="store_true",
                         default=False,
                         help="debug mode"
                         )
        group.add_option('--list-tests',
                         dest='list_tests',
                         action='store_true',
                         default=False,
                         help="List test files that would be run, in order")
        group.add_option('--handler',
                         dest='handlers',
                         action='append',
                         default=[],
                         metavar='PATH:CLASS',
                         help="Specify an event handler given a file PATH "
                              "and the CLASS in the file")
        group.add_option('--screenshots-path',
                         dest='screenshots_path',
                         metavar='PATH',
                         help='Path of directory to use for screenshots')

        if self.handlers:
            group.add_option('--disable',
                             dest='disable',
                             action='append',
                             default=[],
                             metavar='HANDLER',
                             help="Disable a default event handler (%s)" %
                                  ','.join(self.handlers.keys()))
        group.add_option('--manual', dest='manual',
                         action='store_true', default=False,
                         help="start the browser without running any tests")

        parser.add_option_group(group)

        # add option for included event handlers
        for name, handler_class in self.handlers.items():
            if hasattr(handler_class, 'add_options'):
                group = OptionGroup(parser, '%s options' % name,
                                    description=getattr(handler_class,
                                                        '__doc__', None))
                handler_class.add_options(group)
                parser.add_option_group(group)

    def profile_args(self):
        """Setup profile settings for the profile object.

        Returns arguments needed to make a profile object from
        this command-line interface.

        """
        profile_args = mozrunner.CLI.profile_args(self)
        profile_args.setdefault('addons', []).extend(ADDONS)

        profile_args['preferences'] = {
            'extensions.jsbridge.port': self.jsbridge_port,
            'focusmanager.testmode': True
        }

        if self.options.debug:
            prefs = profile_args['preferences']
            prefs['extensions.checkCompatibility'] = False
            prefs['extensions.jsbridge.log'] = True
            prefs['javascript.options.strict'] = True

        return profile_args

    def command_args(self):
        """Arguments to the application to be run."""

        cmdargs = mozrunner.CLI.command_args(self)
        if self.options.debug and '-jsconsole' not in cmdargs:
            cmdargs.append('-jsconsole')

        return cmdargs

    def run(self):
        """CLI front end to run mozmill."""

        # make sure you have tests to run
        if (not self.manifest.tests) and (not self.options.manual):
            self.parser.error("No tests found. Please specify with -t or -m")

        # create a Mozrunner
        runner = self.create_runner()

        # create an instance of MozMill
        mozmill = MozMill(runner, self.jsbridge_port,
                          jsbridge_timeout=self.options.timeout,
                          handlers=self.event_handlers,
                          screenshots_path=self.options.screenshots_path)

        # set debugger arguments
        mozmill.set_debugger(*self.debugger_arguments())

        # load the mozmill + jsbridge extension but don't run any tests
        # (for debugging)
        if self.options.manual:
            try:
                mozmill.start_runner()
                mozmill.runner.wait()
            except (JSBridgeDisconnectError, KeyboardInterrupt):
                pass
            return

        # run the tests
        exception = None
        tests = self.manifest.active_tests(**mozinfo.info)
        try:
            mozmill.run(tests, self.options.restart)
        except:
            exception_type, exception, tb = sys.exc_info()

        # do whatever reporting you're going to do
        results = mozmill.finish(fatal=exception is not None)

        # exit on bad stuff happen
        if exception:
            traceback.print_exception(exception_type, exception, tb)
        if exception or results.fails:
            sys.exit(1)

        # return results on success [currently unused]
        return results
Exemple #56
0
    def add_test(self, test, expected='pass', oop=None):
        filepath = os.path.abspath(test)

        if os.path.isdir(filepath):
            for root, dirs, files in os.walk(filepath):
                for filename in files:
                    if ((filename.startswith('test_') or filename.startswith('browser_')) and
                        (filename.endswith('.py') or filename.endswith('.js'))):
                        filepath = os.path.join(root, filename)
                        self.add_test(filepath)
            return

        testargs = {}
        if self.type is not None:
            testtypes = self.type.replace('+', ' +').replace('-', ' -').split()
            for atype in testtypes:
                if atype.startswith('+'):
                    testargs.update({ atype[1:]: 'true' })
                elif atype.startswith('-'):
                    testargs.update({ atype[1:]: 'false' })
                else:
                    testargs.update({ atype: 'true' })

        # testarg_oop = either None, 'true' or 'false'.
        testarg_oop = testargs.get('oop')

        file_ext = os.path.splitext(os.path.split(filepath)[-1])[1]

        if file_ext == '.ini':
            manifest = TestManifest()
            manifest.read(filepath)

            manifest_tests = manifest.active_tests(exists=False,
                                                   disabled=True,
                                                   device=self.device,
                                                   app=self.appName,
                                                   **mozinfo.info)
            unfiltered_tests = []
            for test in manifest_tests:
                if test.get('disabled'):
                    self.manifest_skipped_tests.append(test)
                else:
                    unfiltered_tests.append(test)

            # Don't filter tests with "oop" flag because manifest parser can't
            # handle it well.
            if testarg_oop is not None:
                del testargs['oop']

            target_tests = manifest.get(tests=unfiltered_tests, **testargs)
            for test in unfiltered_tests:
                if test['path'] not in [x['path'] for x in target_tests]:
                    test.setdefault('disabled', 'filtered by type (%s)' % self.type)
                    self.manifest_skipped_tests.append(test)

            for test in self.manifest_skipped_tests:
                self.logger.info('TEST-SKIP | %s | %s' % (
                    os.path.basename(test['path']),
                    test['disabled']))
                self.todo += 1

            for i in target_tests:
                if not os.path.exists(i["path"]):
                    raise IOError("test file: %s does not exist" % i["path"])

                # manifest_oop is either 'false', 'true' or 'both'.  Anything
                # else implies 'false'.
                manifest_oop = i.get('oop', 'false')

                # We only add an oop test when following conditions are met:
                # 1) It's written by javascript because we have only
                #    MarionetteJSTestCase that supports oop mode.
                # 2) we're running with "--type=+oop" or no "--type=-oop", which
                #    follows testarg_oop is either None or 'true' and must not
                #    be 'false'.
                # 3) When no "--type=[+-]oop" is applied, all active tests are
                #    included in target_tests, so we must filter out those
                #    really capable of running in oop mode. Besides, oop tests
                #    must be explicitly specified for backward compatibility. So
                #    test manifest_oop equals to either 'both' or 'true'.
                file_ext = os.path.splitext(os.path.split(i['path'])[-1])[-1]
                if (file_ext == '.js' and
                    testarg_oop != 'false' and
                    (manifest_oop == 'both' or manifest_oop == 'true')):
                    self.add_test(i["path"], i["expected"], True)

                # We only add an in-process test when following conditions are
                # met:
                # 1) we're running with "--type=-oop" or no "--type=+oop", which
                #    follows testarg_oop is either None or 'false' and must not
                #    be 'true'.
                # 2) When no "--type=[+-]oop" is applied, all active tests are
                #    included in target_tests, so we must filter out those
                #    really capable of running in in-process mode.
                if (testarg_oop != 'true' and
                    (manifest_oop == 'both' or manifest_oop != 'true')):
                    self.add_test(i["path"], i["expected"], False)
            return

        if oop is None:
            # This test is added by directory enumeration or directly specified
            # in argument list.  We have no manifest information here so we just
            # respect the "--type=[+-]oop" argument here.
            oop = file_ext == '.js' and testarg_oop == 'true'

        self.tests.append({'filepath': filepath, 'expected': expected, 'oop': oop})
Exemple #57
0
    def python_test(self,
                    tests=[],
                    test_objects=None,
                    subsuite=None,
                    verbose=False,
                    path_only=False,
                    stop=False,
                    jobs=1):
        self._activate_virtualenv()

        def find_tests_by_path():
            import glob
            files = []
            for t in tests:
                if t.endswith('.py') and os.path.isfile(t):
                    files.append(t)
                elif os.path.isdir(t):
                    for root, _, _ in os.walk(t):
                        files += glob.glob(mozpath.join(root, 'test*.py'))
                        files += glob.glob(mozpath.join(root, 'unit*.py'))
                else:
                    self.log(logging.WARN, 'python-test',
                                 {'test': t},
                                 'TEST-UNEXPECTED-FAIL | Invalid test: {test}')
                    if stop:
                        break
            return files

        # Python's unittest, and in particular discover, has problems with
        # clashing namespaces when importing multiple test modules. What follows
        # is a simple way to keep environments separate, at the price of
        # launching Python multiple times. Most tests are run via mozunit,
        # which produces output in the format Mozilla infrastructure expects.
        # Some tests are run via pytest.
        if test_objects is None:
            # If we're not being called from `mach test`, do our own
            # test resolution.
            if path_only:
                if tests:
                    test_objects = [{'path': p} for p in find_tests_by_path()]
                else:
                    self.log(logging.WARN, 'python-test', {},
                             'TEST-UNEXPECTED-FAIL | No tests specified')
                    test_objects = []
            else:
                from mozbuild.testing import TestResolver
                resolver = self._spawn(TestResolver)
                if tests:
                    # If we were given test paths, try to find tests matching them.
                    test_objects = resolver.resolve_tests(paths=tests,
                                                          flavor='python')
                else:
                    # Otherwise just run everything in PYTHON_UNITTEST_MANIFESTS
                    test_objects = resolver.resolve_tests(flavor='python')

        if not test_objects:
            message = 'TEST-UNEXPECTED-FAIL | No tests collected'
            if not path_only:
                message += ' (Not in PYTHON_UNITTEST_MANIFESTS? Try --path-only?)'
            self.log(logging.WARN, 'python-test', {}, message)
            return 1

        mp = TestManifest()
        mp.tests.extend(test_objects)
        tests = mp.active_tests(disabled=False, **mozinfo.info)

        self.jobs = jobs
        self.terminate = False
        self.verbose = verbose

        return_code = 0
        with ThreadPoolExecutor(max_workers=self.jobs) as executor:
            futures = [executor.submit(self._run_python_test, test['path'])
                       for test in tests]

            try:
                for future in as_completed(futures):
                    output, ret, test_path = future.result()

                    for line in output:
                        self.log(logging.INFO, 'python-test', {'line': line.rstrip()}, '{line}')

                    if ret and not return_code:
                        self.log(logging.ERROR, 'python-test', {'test_path': test_path, 'ret': ret}, 'Setting retcode to {ret} from {test_path}')
                    return_code = return_code or ret
            except KeyboardInterrupt:
                # Hack to force stop currently running threads.
                # https://gist.github.com/clchiou/f2608cbe54403edb0b13
                executor._threads.clear()
                thread._threads_queues.clear()
                raise

        self.log(logging.INFO, 'python-test', {'return_code': return_code}, 'Return code from mach python-test: {return_code}')
        return return_code
Exemple #58
0
    def run_test(self, test, testtype):
        if not self.httpd:
            print "starting httpd"
            self.start_httpd()

        if not self.marionette:
            self.start_marionette()

        filepath = os.path.abspath(test)

        if os.path.isdir(filepath):
            for root, dirs, files in os.walk(filepath):
                for filename in files:
                    if (filename.startswith("test_") or filename.startswith("browser_")) and (
                        filename.endswith(".py") or filename.endswith(".js")
                    ):
                        filepath = os.path.join(root, filename)
                        self.run_test(filepath, testtype)
                        if self.marionette.check_for_crash():
                            return
            return

        mod_name, file_ext = os.path.splitext(os.path.split(filepath)[-1])

        testloader = unittest.TestLoader()
        suite = unittest.TestSuite()

        if file_ext == ".ini":
            testargs = {}
            if testtype is not None:
                testtypes = testtype.replace("+", " +").replace("-", " -").split()
                for atype in testtypes:
                    if atype.startswith("+"):
                        testargs.update({atype[1:]: "true"})
                    elif atype.startswith("-"):
                        testargs.update({atype[1:]: "false"})
                    else:
                        testargs.update({atype: "true"})

            manifest = TestManifest()
            manifest.read(filepath)

            if self.perf:
                if self.perfserv is None:
                    self.perfserv = manifest.get("perfserv")[0]
                machine_name = socket.gethostname()
                try:
                    manifest.has_key("machine_name")
                    machine_name = manifest.get("machine_name")[0]
                except:
                    self.logger.info("Using machine_name: %s" % machine_name)
                os_name = platform.system()
                os_version = platform.release()
                self.perfrequest = datazilla.DatazillaRequest(
                    server=self.perfserv,
                    machine_name=machine_name,
                    os=os_name,
                    os_version=os_version,
                    platform=manifest.get("platform")[0],
                    build_name=manifest.get("build_name")[0],
                    version=manifest.get("version")[0],
                    revision=self.revision,
                    branch=manifest.get("branch")[0],
                    id=os.getenv("BUILD_ID"),
                    test_date=int(time.time()),
                )

            manifest_tests = manifest.active_tests(disabled=False)

            for i in manifest.get(tests=manifest_tests, **testargs):
                self.run_test(i["path"], testtype)
                if self.marionette.check_for_crash():
                    return
            return

        self.logger.info("TEST-START %s" % os.path.basename(test))

        for handler in self.test_handlers:
            if handler.match(os.path.basename(test)):
                handler.add_tests_to_suite(mod_name, filepath, suite, testloader, self.marionette, self.testvars)
                break

        if suite.countTestCases():
            runner = MarionetteTextTestRunner(verbosity=3, perf=self.perf, marionette=self.marionette)
            results = runner.run(suite)
            self.results.append(results)

            self.failed += len(results.failures) + len(results.errors)
            if results.perfdata and options.perf:
                self.perfrequest.add_datazilla_result(results.perfdata)
            if hasattr(results, "skipped"):
                self.todo += len(results.skipped) + len(results.expectedFailures)
            self.passed += results.passed
            for failure in results.failures + results.errors:
                self.failures.append((results.getInfo(failure[0]), failure[1], "TEST-UNEXPECTED-FAIL"))
            if hasattr(results, "unexpectedSuccess"):
                self.failed += len(results.unexpectedSuccesses)
                for failure in results.unexpectedSuccesses:
                    self.failures.append((results.getInfo(failure[0]), failure[1], "TEST-UNEXPECTED-PASS"))