def parse_webdriver_expectations(host, port):
    expectations_path = port.path_to_webdriver_expectations_file()
    file_contents = host.filesystem.read_text_file(expectations_path)
    expectations_dict = {expectations_path: file_contents}
    expectations = test_expectations.TestExpectations(
        port, expectations_dict=expectations_dict)
    return expectations
    def test_summarized_results_with_iterations(self):
        test_name = 'passes/text.html'
        expectations = test_expectations.TestExpectations(self.port)
        initial_results = test_run_results.TestRunResults(
            expectations, 3, None)
        initial_results.add(get_result(test_name, ResultType.Crash), False,
                            False)
        initial_results.add(get_result(test_name, ResultType.Failure), False,
                            False)
        initial_results.add(get_result(test_name, ResultType.Timeout), False,
                            False)
        all_retry_results = [
            test_run_results.TestRunResults(expectations, 2, None)
        ]
        all_retry_results[0].add(get_result(test_name, ResultType.Failure),
                                 False, False)
        all_retry_results[0].add(get_result(test_name, ResultType.Failure),
                                 False, False)

        summary = test_run_results.summarize_results(self.port, expectations,
                                                     initial_results,
                                                     all_retry_results)
        self.assertEquals(summary['tests']['passes']['text.html']['expected'],
                          'PASS')
        self.assertEquals(summary['tests']['passes']['text.html']['actual'],
                          'CRASH FAIL TIMEOUT FAIL FAIL')
        self.assertEquals(summary['num_flaky'], 0)
        self.assertEquals(summary['num_passes'], 0)
        self.assertEquals(summary['num_regressions'], 1)
 def test_timeout_then_unexpected_pass(self):
     test_name = 'failures/expected/text.html'
     expectations = test_expectations.TestExpectations(self.port)
     initial_results = test_run_results.TestRunResults(
         expectations, 1, None)
     initial_results.add(
         get_result(test_name, ResultType.Timeout, run_time=1), False,
         False)
     all_retry_results = [
         test_run_results.TestRunResults(expectations, 1, None),
         test_run_results.TestRunResults(expectations, 1, None),
         test_run_results.TestRunResults(expectations, 1, None)
     ]
     all_retry_results[0].add(
         get_result(test_name, ResultType.Failure, run_time=0.1), False,
         False)
     all_retry_results[1].add(
         get_result(test_name, ResultType.Pass, run_time=0.1), False, False)
     all_retry_results[2].add(
         get_result(test_name, ResultType.Pass, run_time=0.1), False, False)
     summary = test_run_results.summarize_results(self.port, expectations,
                                                  initial_results,
                                                  all_retry_results)
     self.assertIn('is_unexpected',
                   summary['tests']['failures']['expected']['text.html'])
     self.assertEquals(
         summary['tests']['failures']['expected']['text.html']['expected'],
         'FAIL')
     self.assertEquals(
         summary['tests']['failures']['expected']['text.html']['actual'],
         'TIMEOUT FAIL PASS PASS')
     self.assertEquals(summary['num_passes'], 1)
     self.assertEquals(summary['num_regressions'], 0)
     self.assertEquals(summary['num_flaky'], 0)
 def test_summarized_results_flaky_pass_after_first_retry(self):
     test_name = 'passes/text.html'
     expectations = test_expectations.TestExpectations(self.port)
     initial_results = test_run_results.TestRunResults(
         expectations, 1, None)
     initial_results.add(get_result(test_name, ResultType.Crash), False,
                         False)
     all_retry_results = [
         test_run_results.TestRunResults(expectations, 1, None),
         test_run_results.TestRunResults(expectations, 1, None),
         test_run_results.TestRunResults(expectations, 1, None)
     ]
     all_retry_results[0].add(get_result(test_name, ResultType.Timeout),
                              False, False)
     all_retry_results[1].add(get_result(test_name, ResultType.Pass), True,
                              False)
     all_retry_results[2].add(get_result(test_name, ResultType.Pass), True,
                              False)
     summary = test_run_results.summarize_results(self.port, expectations,
                                                  initial_results,
                                                  all_retry_results)
     self.assertTrue(
         'is_unexpected' not in summary['tests']['passes']['text.html'])
     self.assertEquals(summary['tests']['passes']['text.html']['expected'],
                       'PASS')
     self.assertEquals(summary['tests']['passes']['text.html']['actual'],
                       'CRASH TIMEOUT PASS PASS')
     self.assertEquals(summary['num_flaky'], 1)
     self.assertEquals(summary['num_passes'], 0)
     self.assertEquals(summary['num_regressions'], 0)
Beispiel #5
0
 def test_summarized_results_flaky_pass_after_first_retry(self):
     test_name = 'passes/text.html'
     expectations = test_expectations.TestExpectations(
         self.port, [test_name])
     initial_results = test_run_results.TestRunResults(expectations, 1)
     initial_results.add(get_result(test_name, test_expectations.CRASH),
                         False, False)
     all_retry_results = [
         test_run_results.TestRunResults(expectations, 1),
         test_run_results.TestRunResults(expectations, 1),
         test_run_results.TestRunResults(expectations, 1)
     ]
     all_retry_results[0].add(
         get_result(test_name, test_expectations.TIMEOUT), False, False)
     all_retry_results[1].add(get_result(test_name, test_expectations.PASS),
                              True, False)
     all_retry_results[2].add(get_result(test_name, test_expectations.PASS),
                              True, False)
     summary = test_run_results.summarize_results(
         self.port,
         expectations,
         initial_results,
         all_retry_results,
         enabled_pixel_tests_in_retry=True)
     self.assertTrue(
         'is_unexpected' not in summary['tests']['passes']['text.html'])
     self.assertEquals(summary['tests']['passes']['text.html']['expected'],
                       'PASS')
     self.assertEquals(summary['tests']['passes']['text.html']['actual'],
                       'CRASH TIMEOUT PASS PASS')
     self.assertEquals(summary['num_flaky'], 1)
     self.assertEquals(summary['num_passes'], 0)
     self.assertEquals(summary['num_regressions'], 0)
 def setUp(self):
     expectations = test_expectations.TestExpectations(
         MockHost().port_factory.get(port_name='test'))
     self.results = test_run_results.TestRunResults(expectations, 1, None)
     self.test = get_result('failures/expected/text.html',
                            ResultType.Timeout,
                            run_time=1)
Beispiel #7
0
    def test_summarized_results_with_iterations(self):
        test_name = 'passes/text.html'
        expectations = test_expectations.TestExpectations(
            self.port, [test_name])
        initial_results = test_run_results.TestRunResults(expectations, 3)
        initial_results.add(get_result(test_name, test_expectations.CRASH),
                            False, False)
        initial_results.add(get_result(test_name, test_expectations.IMAGE),
                            False, False)
        initial_results.add(get_result(test_name, test_expectations.TIMEOUT),
                            False, False)
        all_retry_results = [test_run_results.TestRunResults(expectations, 2)]
        all_retry_results[0].add(get_result(test_name, test_expectations.TEXT),
                                 False, False)
        all_retry_results[0].add(get_result(test_name, test_expectations.LEAK),
                                 False, False)

        summary = test_run_results.summarize_results(
            self.port,
            expectations,
            initial_results,
            all_retry_results,
            enabled_pixel_tests_in_retry=True)
        print summary
        self.assertEquals(summary['tests']['passes']['text.html']['expected'],
                          'PASS')
        self.assertEquals(summary['tests']['passes']['text.html']['actual'],
                          'CRASH IMAGE TIMEOUT TEXT LEAK')
        self.assertEquals(summary['num_flaky'], 0)
        self.assertEquals(summary['num_passes'], 0)
        self.assertEquals(summary['num_regressions'], 1)
Beispiel #8
0
def run_results(port, extra_skipped_tests=None):
    tests = [
        'passes/text.html', 'failures/expected/timeout.html',
        'failures/expected/crash.html', 'failures/expected/leak.html',
        'failures/expected/keyboard.html', 'failures/expected/audio.html',
        'failures/expected/text.html', 'passes/skipped/skip.html'
    ]
    expectations = test_expectations.TestExpectations(port, tests)
    if extra_skipped_tests:
        expectations.add_extra_skipped_tests(extra_skipped_tests)
    return test_run_results.TestRunResults(expectations, len(tests))
def lint(host, options):
    ports_to_lint = [
        host.port_factory.get(name)
        for name in host.port_factory.all_port_names(options.platform)
    ]
    files_linted = set()

    # In general, the set of TestExpectation files should be the same for
    # all ports. However, the method used to list expectations files is
    # in Port, and the TestExpectations constructor takes a Port.
    # Perhaps this function could be changed to just use one Port
    # (the default Port for this host) and it would work the same.

    failures = []
    wpt_overrides_exps_path = host.filesystem.join(
        ports_to_lint[0].web_tests_dir(), 'WPTOverrideExpectations')
    web_gpu_exps_path = host.filesystem.join(ports_to_lint[0].web_tests_dir(),
                                             'WebGPUExpectations')
    paths = [wpt_overrides_exps_path, web_gpu_exps_path]
    expectations_dict = {}
    for path in paths:
        if host.filesystem.exists(path):
            expectations_dict[path] = host.filesystem.read_text_file(path)
    for port in ports_to_lint:
        expectations_dict.update(port.all_expectations_dict())
        for path in port.extra_expectations_files():
            if host.filesystem.exists(path):
                expectations_dict[path] = host.filesystem.read_text_file(path)
    for path, content in expectations_dict.items():
        try:
            test_expectations.TestExpectations(
                ports_to_lint[0],
                expectations_dict={path: content},
                is_lint_mode=True)
        except test_expectations.ParseError as error:
            _log.error('')
            for warning in error.warnings:
                _log.error(warning)
                failures.append('%s: %s' % (path, warning))
                _log.error('')
        for lineno, line in enumerate(content.split('\n'), 1):
            if line.strip().startswith('Bug('):
                error = ((
                    "%s:%d Expectation '%s' has the Bug(...) token, "
                    "The token has been removed in the new expectations format"
                ) % (host.filesystem.basename(path), lineno, line))
                _log.error(error)
                failures.append(error)
                _log.error('')

    return failures
def run_results(port, extra_skipped_tests=None):
    tests = [
        'passes/text.html', 'failures/expected/timeout.html',
        'failures/expected/crash.html', 'failures/expected/leak.html',
        'failures/expected/keyboard.html', 'failures/expected/audio.html',
        'failures/expected/text.html', 'passes/skipped/skip.html'
    ]
    expectations = test_expectations.TestExpectations(port)
    if extra_skipped_tests:
        extra_expectations = '# results: [ Skip ]'
        for test in extra_skipped_tests:
            extra_expectations += '\n%s [ Skip ]' % test
        expectations.merge_raw_expectations(extra_expectations)
    return test_run_results.TestRunResults(expectations, len(tests), None)
Beispiel #11
0
    def test_look_for_new_crash_logs(self):
        def get_manager():
            host = MockHost()
            port = host.port_factory.get('test-mac-mac10.10')
            manager = Manager(port,
                              options=optparse.Values({
                                  'test_list': None,
                                  'http': True,
                                  'max_locked_shards': 1
                              }),
                              printer=FakePrinter())
            return manager

        host = MockHost()
        port = host.port_factory.get('test-mac-mac10.10')
        tests = ['failures/expected/crash.html']
        expectations = test_expectations.TestExpectations(port)
        run_results = TestRunResults(expectations, len(tests), None)
        manager = get_manager()
        manager._look_for_new_crash_logs(run_results, time.time())
 def test_timeout_then_unexpected_pass(self):
     test_name = 'failures/expected/text.html'
     expectations = test_expectations.TestExpectations(self.port, [test_name])
     initial_results = test_run_results.TestRunResults(expectations, 1)
     initial_results.add(get_result(test_name, test_expectations.TIMEOUT, run_time=1), False, False)
     all_retry_results = [test_run_results.TestRunResults(expectations, 1),
                          test_run_results.TestRunResults(expectations, 1),
                          test_run_results.TestRunResults(expectations, 1)]
     all_retry_results[0].add(get_result(test_name, test_expectations.LEAK, run_time=0.1), False, False)
     all_retry_results[1].add(get_result(test_name, test_expectations.PASS, run_time=0.1), False, False)
     all_retry_results[2].add(get_result(test_name, test_expectations.PASS, run_time=0.1), False, False)
     summary = test_run_results.summarize_results(
         self.port, expectations, initial_results, all_retry_results,
         enabled_pixel_tests_in_retry=True)
     self.assertIn('is_unexpected', summary['tests']['failures']['expected']['text.html'])
     self.assertEquals(summary['tests']['failures']['expected']['text.html']['expected'], 'FAIL')
     self.assertEquals(summary['tests']['failures']['expected']['text.html']['actual'], 'TIMEOUT LEAK PASS PASS')
     self.assertEquals(summary['num_passes'], 1)
     self.assertEquals(summary['num_regressions'], 0)
     self.assertEquals(summary['num_flaky'], 0)
Beispiel #13
0
def lint(host, options):
    ports_to_lint = [
        host.port_factory.get(name)
        for name in host.port_factory.all_port_names(options.platform)
    ]
    files_linted = set()

    # In general, the set of TestExpectation files should be the same for
    # all ports. However, the method used to list expectations files is
    # in Port, and the TestExpectations constructor takes a Port.
    # Perhaps this function could be changed to just use one Port
    # (the default Port for this host) and it would work the same.

    failures = []
    for port_to_lint in ports_to_lint:
        expectations_dict = port_to_lint.all_expectations_dict()

        for path in port_to_lint.extra_expectations_files():
            if host.filesystem.exists(path):
                expectations_dict[path] = host.filesystem.read_text_file(path)

        for expectations_file in expectations_dict:

            if expectations_file in files_linted:
                continue

            try:
                test_expectations.TestExpectations(
                    port_to_lint,
                    expectations_dict={
                        expectations_file: expectations_dict[expectations_file]
                    },
                    is_lint_mode=True)
            except test_expectations.ParseError as error:
                _log.error('')
                for warning in error.warnings:
                    _log.error(warning)
                    failures.append('%s: %s' % (expectations_file, warning))
                _log.error('')
            files_linted.add(expectations_file)
    return failures
    def test_skip_tests_expectations(self):
        """Tests that tests are skipped based on to expectations and options."""
        host = MockHost()
        port = host.port_factory.get('test-win-win7', None)

        all_tests = [
            'fast/css/passes.html',
            'fast/css/fails.html',
            'fast/css/times_out.html',
            'fast/css/skip.html',
        ]

        # Patch port.tests() to return our tests
        port.tests = lambda paths: paths or all_tests

        options = optparse.Values({
            'no_expectations': False,
            'enable_sanitizer': False,
            'skipped': 'default',
            'skip_timeouts': False,
            'skip_failing_tests': False,
        })
        finder = web_test_finder.WebTestFinder(port, options)

        expectations = test_expectations.TestExpectations(port)
        expectations.merge_raw_expectations(
            ('# results: [ Failure Timeout Skip ]'
             '\nfast/css/fails.html [ Failure ]'
             '\nfast/css/times_out.html [ Timeout ]'
             '\nfast/css/skip.html [ Skip ]'))

        # When run with default settings, we only skip the tests marked Skip.
        tests = finder.skip_tests([], all_tests, expectations)
        self.assertEqual(tests, set(['fast/css/skip.html']))

        # Specify test on the command line; by default should not skip.
        tests = finder.skip_tests(['fast/css/skip.html'], all_tests,
                                  expectations)
        self.assertEqual(tests, set())

        # Specify test on the command line, but always skip.
        finder._options.skipped = 'always'
        tests = finder.skip_tests(['fast/css/skip.html'], all_tests,
                                  expectations)
        self.assertEqual(tests, set(['fast/css/skip.html']))
        finder._options.skipped = 'default'

        # Only run skip tests, aka skip all non-skipped tests.
        finder._options.skipped = 'only'
        tests = finder.skip_tests([], all_tests, expectations)
        self.assertEqual(
            tests,
            set([
                'fast/css/passes.html', 'fast/css/fails.html',
                'fast/css/times_out.html'
            ]))
        finder._options.skipped = 'default'

        # Ignore any skip entries, aka never skip anything.
        finder._options.skipped = 'ignore'
        tests = finder.skip_tests([], all_tests, expectations)
        self.assertEqual(tests, set())
        finder._options.skipped = 'default'

        # Skip tests that are marked TIMEOUT.
        finder._options.skip_timeouts = True
        tests = finder.skip_tests([], all_tests, expectations)
        self.assertEqual(
            tests, set(['fast/css/times_out.html', 'fast/css/skip.html']))
        finder._options.skip_timeouts = False

        # Skip tests that are marked FAILURE
        finder._options.skip_failing_tests = True
        tests = finder.skip_tests([], all_tests, expectations)
        self.assertEqual(tests,
                         set(['fast/css/fails.html', 'fast/css/skip.html']))
        finder._options.skip_failing_tests = False

        # Disable expectations entirely; nothing should be skipped by default.
        finder._options.no_expectations = True
        tests = finder.skip_tests([], all_tests, expectations)
        self.assertEqual(tests, set())
    def test_skip_tests_idlharness(self):
        """Tests that idlharness tests are skipped on MSAN/ASAN runs.

        See https://crbug.com/856601
        """
        host = MockHost()
        port = host.port_factory.get('test-win-win7', None)

        non_idlharness_test = 'external/wpt/dir1/dir2/foo.html'
        idlharness_test_1 = 'external/wpt/dir1/dir2/idlharness.any.html'
        idlharness_test_2 = 'external/wpt/dir1/dir2/idlharness.any.worker.html'
        all_tests = [
            non_idlharness_test,
            idlharness_test_1,
            idlharness_test_2,
        ]

        # Patch port.tests() to return our tests
        port.tests = lambda paths: paths or all_tests

        options = optparse.Values({
            'no_expectations': False,
            'enable_sanitizer': False,
            'skipped': 'default',
            'skip_timeouts': False,
            'skip_failing_tests': False,
        })
        finder = web_test_finder.WebTestFinder(port, options)

        # Default case; not MSAN/ASAN so should not skip anything.
        expectations = test_expectations.TestExpectations(port)
        tests = finder.skip_tests([], all_tests, expectations)
        self.assertEqual(tests, set())
        for test in all_tests:
            self.assertTrue(
                expectations.get_expectations(test).is_default_pass)

        # MSAN/ASAN, with no paths specified explicitly, so should skip both
        # idlharness tests.
        expectations = test_expectations.TestExpectations(port)
        finder._options.enable_sanitizer = True
        tests = finder.skip_tests([], all_tests, expectations)
        self.assertEqual(tests, set([idlharness_test_1, idlharness_test_2]))
        self.assertTrue(
            expectations.get_expectations(non_idlharness_test).is_default_pass)
        self.assertEquals(
            expectations.get_expectations(idlharness_test_1).results, {'SKIP'})
        self.assertEquals(
            expectations.get_expectations(idlharness_test_2).results, {'SKIP'})

        # Disable expectations entirely; we should still skip the idlharness
        # tests but shouldn't touch the expectations parameter.
        finder._options.no_expectations = True
        tests = finder.skip_tests([], all_tests, None)
        self.assertEqual(tests, set([idlharness_test_1, idlharness_test_2]))

        # MSAN/ASAN, with one of the tests specified explicitly (and
        # --skipped=default), so should skip only the unspecified test.
        expectations = test_expectations.TestExpectations(port)
        tests = finder.skip_tests([idlharness_test_1], all_tests, expectations)
        self.assertEqual(tests, set([idlharness_test_2]))
        # Although we will run the test because it was specified explicitly, it
        # is still *expected* to Skip. This is consistent with how entries in
        # TestExpectations work.
        self.assertTrue(
            expectations.get_expectations(non_idlharness_test).is_default_pass)
        self.assertEquals(
            expectations.get_expectations(idlharness_test_1).results, {'SKIP'})
        self.assertEquals(
            expectations.get_expectations(idlharness_test_2).results, {'SKIP'})
Beispiel #16
0
    def run(self, args):
        """Runs the tests and return a RunDetails object with the results."""
        start_time = time.time()
        self._printer.write_update('Collecting tests ...')
        running_all_tests = False

        try:
            paths, all_test_names, running_all_tests = self._collect_tests(
                args)
        except IOError:
            # This is raised if --test-list doesn't exist
            return test_run_results.RunDetails(
                exit_code=exit_codes.NO_TESTS_EXIT_STATUS)

        test_names = self._finder.split_into_chunks(all_test_names)
        if self._options.order == 'natural':
            test_names.sort(key=self._port.test_key)
        elif self._options.order == 'random':
            test_names.sort()
            random.Random(self._options.seed).shuffle(test_names)
        elif self._options.order == 'none':
            # Restore the test order to user specified order.
            # base.tests() may change the order as it returns tests in the
            # real, external/wpt, virtual order.
            if paths:
                test_names = self._restore_order(paths, test_names)

        if not self._options.no_expectations:
            self._printer.write_update('Parsing expectations ...')
            self._expectations = test_expectations.TestExpectations(self._port)

        tests_to_run, tests_to_skip = self._prepare_lists(paths, test_names)

        self._printer.print_found(len(all_test_names), len(test_names),
                                  len(tests_to_run), self._options.repeat_each,
                                  self._options.iterations)

        # Check to make sure we're not skipping every test.
        if not tests_to_run:
            msg = 'No tests to run.'
            if self._options.zero_tests_executed_ok:
                _log.info(msg)
                # Keep executing to produce valid (but empty) results.
            else:
                _log.critical(msg)
                code = exit_codes.NO_TESTS_EXIT_STATUS
                return test_run_results.RunDetails(exit_code=code)

        exit_code = self._set_up_run(tests_to_run)
        if exit_code:
            return test_run_results.RunDetails(exit_code=exit_code)

        if self._options.num_retries is None:
            # If --test-list is passed, or if no test narrowing is specified,
            # default to 3 retries. Otherwise [e.g. if tests are being passed by
            # name], default to 0 retries.
            if self._options.test_list or len(paths) < len(test_names):
                self._options.num_retries = 3
            else:
                self._options.num_retries = 0

        should_retry_failures = self._options.num_retries > 0

        try:
            self._start_servers(tests_to_run)
            if self._options.watch:
                run_results = self._run_test_loop(tests_to_run, tests_to_skip)
            else:
                run_results = self._run_test_once(tests_to_run, tests_to_skip,
                                                  should_retry_failures)
            initial_results, all_retry_results = run_results
        finally:
            self._stop_servers()
            self._clean_up_run()

        if self._options.no_expectations:
            return test_run_results.RunDetails(0, [], [], initial_results,
                                               all_retry_results)

        # Some crash logs can take a long time to be written out so look
        # for new logs after the test run finishes.
        self._printer.write_update('Looking for new crash logs ...')
        self._look_for_new_crash_logs(initial_results, start_time)
        for retry_attempt_results in all_retry_results:
            self._look_for_new_crash_logs(retry_attempt_results, start_time)

        self._printer.write_update('Summarizing results ...')
        summarized_full_results = test_run_results.summarize_results(
            self._port, self._expectations, initial_results, all_retry_results)
        summarized_failing_results = test_run_results.summarize_results(
            self._port,
            self._expectations,
            initial_results,
            all_retry_results,
            only_include_failing=True)

        exit_code = summarized_failing_results['num_regressions']
        if exit_code > exit_codes.MAX_FAILURES_EXIT_STATUS:
            _log.warning('num regressions (%d) exceeds max exit status (%d)',
                         exit_code, exit_codes.MAX_FAILURES_EXIT_STATUS)
            exit_code = exit_codes.MAX_FAILURES_EXIT_STATUS

        if not self._options.dry_run:
            self._write_json_files(summarized_full_results,
                                   summarized_failing_results, initial_results,
                                   running_all_tests)

            self._upload_json_files()

            self._copy_results_html_file(self._artifacts_directory,
                                         'results.html')
            if initial_results.keyboard_interrupted:
                exit_code = exit_codes.INTERRUPTED_EXIT_STATUS
            else:
                if initial_results.interrupted:
                    exit_code = exit_codes.EARLY_EXIT_STATUS
                if (self._options.show_results
                        and (exit_code or initial_results.total_failures)):
                    self._port.show_results_html_file(
                        self._filesystem.join(self._artifacts_directory,
                                              'results.html'))
                self._printer.print_results(time.time() - start_time,
                                            initial_results)

        return test_run_results.RunDetails(exit_code, summarized_full_results,
                                           summarized_failing_results,
                                           initial_results, all_retry_results)
Beispiel #17
0
    def run(self, args):
        """Runs the tests and return a RunDetails object with the results."""
        start_time = time.time()
        self._printer.write_update('Collecting tests ...')
        running_all_tests = False

        if not args or any('external' in path for path in args):
            self._printer.write_update('Generating MANIFEST.json for web-platform-tests ...')
            WPTManifest.ensure_manifest(self._port.host)
            self._printer.write_update('Completed generating manifest.')

        self._printer.write_update('Collecting tests ...')
        try:
            paths, all_test_names, running_all_tests = self._collect_tests(args)
        except IOError:
            # This is raised if --test-list doesn't exist
            return test_run_results.RunDetails(exit_code=exit_codes.NO_TESTS_EXIT_STATUS)

        test_names, tests_in_other_chunks = self._finder.split_into_chunks(all_test_names)

        if self._options.order == 'natural':
            test_names.sort(key=self._port.test_key)
        elif self._options.order == 'random':
            test_names.sort()
            random.Random(self._options.seed).shuffle(test_names)

        self._printer.write_update('Parsing expectations ...')
        self._expectations = test_expectations.TestExpectations(self._port, test_names)

        tests_to_run, tests_to_skip = self._prepare_lists(paths, test_names)

        self._expectations.remove_tests_from_expectations(tests_in_other_chunks)

        self._printer.print_found(
            len(all_test_names), len(test_names), len(tests_to_run),
            self._options.repeat_each, self._options.iterations)

        # Check to make sure we're not skipping every test.
        if not tests_to_run:
            msg = 'No tests to run.'
            if self._options.zero_tests_executed_ok:
                _log.info(msg)
                # Keep executing to produce valid (but empty) results.
            else:
                _log.critical(msg)
                code = exit_codes.NO_TESTS_EXIT_STATUS
                return test_run_results.RunDetails(exit_code=code)

        exit_code = self._set_up_run(tests_to_run)
        if exit_code:
            return test_run_results.RunDetails(exit_code=exit_code)

        if self._options.num_retries is None:
            # Don't retry failures if an explicit list of tests was passed in.
            should_retry_failures = len(paths) < len(test_names)
            # Retry failures 3 times by default.
            if should_retry_failures:
                self._options.num_retries = 3
        else:
            should_retry_failures = self._options.num_retries > 0

        try:
            self._start_servers(tests_to_run)
            if self._options.watch:
                run_results = self._run_test_loop(tests_to_run, tests_to_skip)
            else:
                run_results = self._run_test_once(tests_to_run, tests_to_skip, should_retry_failures)
            initial_results, all_retry_results, enabled_pixel_tests_in_retry = run_results
        finally:
            self._stop_servers()
            self._clean_up_run()

        # Some crash logs can take a long time to be written out so look
        # for new logs after the test run finishes.
        self._printer.write_update('Looking for new crash logs ...')
        self._look_for_new_crash_logs(initial_results, start_time)
        for retry_attempt_results in all_retry_results:
            self._look_for_new_crash_logs(retry_attempt_results, start_time)

        self._printer.write_update('Summarizing results ...')
        summarized_full_results = test_run_results.summarize_results(
            self._port, self._expectations, initial_results, all_retry_results,
            enabled_pixel_tests_in_retry)
        summarized_failing_results = test_run_results.summarize_results(
            self._port, self._expectations, initial_results, all_retry_results,
            enabled_pixel_tests_in_retry, only_include_failing=True)

        exit_code = summarized_failing_results['num_regressions']
        if exit_code > exit_codes.MAX_FAILURES_EXIT_STATUS:
            _log.warning('num regressions (%d) exceeds max exit status (%d)',
                         exit_code, exit_codes.MAX_FAILURES_EXIT_STATUS)
            exit_code = exit_codes.MAX_FAILURES_EXIT_STATUS

        if not self._options.dry_run:
            self._write_json_files(summarized_full_results, summarized_failing_results, initial_results, running_all_tests)

            self._upload_json_files()

            self._copy_results_html_file(self._results_directory, 'results.html')
            self._copy_results_html_file(self._results_directory, 'legacy-results.html')
            if initial_results.keyboard_interrupted:
                exit_code = exit_codes.INTERRUPTED_EXIT_STATUS
            else:
                if initial_results.interrupted:
                    exit_code = exit_codes.EARLY_EXIT_STATUS
                if self._options.show_results and (exit_code or initial_results.total_failures):
                    self._port.show_results_html_file(
                        self._filesystem.join(self._results_directory, 'results.html'))
                self._printer.print_results(time.time() - start_time, initial_results)

        return test_run_results.RunDetails(
            exit_code, summarized_full_results, summarized_failing_results,
            initial_results, all_retry_results, enabled_pixel_tests_in_retry)
Beispiel #18
0
def lint(host, options):
    ports_to_lint = [host.port_factory.get(name) for name in host.port_factory.all_port_names(options.platform)]
    files_linted = set()

    # In general, the set of TestExpectation files should be the same for
    # all ports. However, the method used to list expectations files is
    # in Port, and the TestExpectations constructor takes a Port.
    # Perhaps this function could be changed to just use one Port
    # (the default Port for this host) and it would work the same.

    failures = []
    wpt_overrides_exps_path = host.filesystem.join(
        ports_to_lint[0].web_tests_dir(), 'WPTOverrideExpectations')
    web_gpu_exps_path = host.filesystem.join(
        ports_to_lint[0].web_tests_dir(), 'WebGPUExpectations')
    paths = [wpt_overrides_exps_path, web_gpu_exps_path]
    expectations_dict = {}
    all_system_specifiers = set()
    all_build_specifiers = set(ports_to_lint[0].ALL_BUILD_TYPES)
    for path in paths:
        if host.filesystem.exists(path):
            expectations_dict[path] = host.filesystem.read_text_file(path)

    for port in ports_to_lint:
        expectations_dict.update(port.all_expectations_dict())
        config_macro_dict = port.configuration_specifier_macros()
        if config_macro_dict:
            all_system_specifiers.update({s.lower() for s in config_macro_dict.keys()})
            all_system_specifiers.update(
                {s.lower() for s in reduce(lambda x, y: x + y, config_macro_dict.values())})
        for path in port.extra_expectations_files():
            if host.filesystem.exists(path):
                expectations_dict[path] = host.filesystem.read_text_file(path)
    for path, content in expectations_dict.items():
        try:
            test_expectations.TestExpectations(
                ports_to_lint[0],
                expectations_dict={path: content},
                is_lint_mode=True)
        except test_expectations.ParseError as error:
            _log.error('')
            for warning in error.warnings:
                _log.error(warning)
                failures.append('%s: %s' % (path, warning))
                _log.error('')
        exp_lines = content.split('\n')
        for lineno, line in enumerate(exp_lines, 1):
            if line.strip().startswith('Bug('):
                error = (("%s:%d Expectation '%s' has the Bug(...) token, "
                          "The token has been removed in the new expectations format") %
                          (host.filesystem.basename(path), lineno, line))
                _log.error(error)
                failures.append(error)
                _log.error('')

        for lineno, line in enumerate(exp_lines, 1):
            if line.strip().startswith('#') or not line.strip():
                continue
            exp_line = TestExpectationLine.tokenize_line(
                host.filesystem.basename(path), line, lineno, ports_to_lint[0])
            specifiers = set(s.lower() for s in exp_line.specifiers)
            system_intersection = specifiers & all_system_specifiers
            build_intersection = specifiers & all_build_specifiers
            for intersection in [system_intersection, build_intersection]:
                if len(intersection) < 2:
                    continue
                error = (("%s:%d Expectation '%s' has multiple specifiers that are mutually exclusive.\n"
                          "The mutually exclusive specifiers are %s") %
                         (host.filesystem.basename(path), lineno, line, ', '.join(intersection)))
                _log.error(error)
                _log.error('')
                failures.append(error)
    return failures