def _get_expectations(self, expectations, test, original_test=None):
        results = set()
        reasons = set()
        is_slow_test = False
        trailing_comments = ''
        for test_exp in expectations:
            expected_results = test_exp.expectations_for(test)
            # The return Expectation instance from expectations_for has the default
            # PASS expected result. If there are no expected results in the first
            # file and there are expected results in the second file, then the JSON
            # results will show an expected per test field with PASS and whatever the
            # expected results in the second file are.
            if not expected_results.is_default_pass:
                results.update(expected_results.results)
            is_slow_test |= expected_results.is_slow_test
            reasons.update(expected_results.reason.split())
            # Typ will leave a newline at the end of trailing_comments, so we
            # can just concatenate here and still have comments from different
            # files be separated by newlines.
            trailing_comments += expected_results.trailing_comments

        # If the results set is empty then the Expectation constructor
        # will set the expected result to Pass.
        return typ_types.Expectation(test=original_test or test,
                                     results=results,
                                     is_slow_test=is_slow_test,
                                     reason=' '.join(reasons),
                                     trailing_comments=trailing_comments)
Exemple #2
0
 def _split_configuration(self, exp, versions_to_remove):
     build_specifiers = set()
     os_specifiers = ({os for os in self._os_specifiers if
                       versions_to_remove & self._configuration_specifiers_dict[os]} & exp.tags)
     if os_specifiers:
         # If an expectations tag list has an OS tag which has several versions which are
         # in the versions_to_remove set, create expectations for versions that are not in
         # the versions_to_remove set which fall under the OS specifier.
         build_specifiers = exp.tags - os_specifiers
         os_specifier = os_specifiers.pop()
         system_specifiers = (
             set(version for version in self._configuration_specifiers_dict[os_specifier]) -
             versions_to_remove)
     elif self._os_specifiers & exp.tags:
         # If there is an OS tag in the expectation's tag list which does not have
         # versions in the versions_to_remove list then return the expectation.
         return [exp]
     else:
         # If there are no OS tags in the expectation's tag list, then create an
         # expectation for each version that is not in the versions_to_remove list
         system_specifiers = set(self._version_specifiers - versions_to_remove)
         for os, os_versions in self._configuration_specifiers_dict.items():
             # If all the versions of an OS are in the system specifiers set, then
             # replace all those specifiers with the OS specifier.
             if os_versions.issubset(system_specifiers):
                 for version in os_versions:
                     system_specifiers.remove(version)
                 system_specifiers.add(os)
     return [
         typ_types.Expectation(
             tags=set([specifier])|build_specifiers, results=exp.results, is_slow_test=exp.is_slow_test,
             reason=exp.reason, test=exp.test, lineno=exp.lineno, trailing_comments=exp.trailing_comments)
         for specifier in sorted(system_specifiers)]
Exemple #3
0
 def _add_expectations_from_bot(self):
     # FIXME: With mode 'very-flaky' and 'maybe-flaky', this will show the expectations entry in the flakiness
     # dashboard rows for each test to be whatever the bot thinks they should be. Is this a good thing?
     bot_expectations = self._port.bot_expectations()
     raw_expectations = '# results: [ Failure Pass Crash Skip Timeout ]\n'
     for test, results in bot_expectations.items():
         raw_expectations += typ_types.Expectation(test=test, results=results).to_string() + '\n'
     self.merge_raw_expectations(raw_expectations)
Exemple #4
0
 def get_expectations(self, test, fallback_for_test=None):
     expectations = self._override_or_fallback_expectations(
         self._get_expectations(self._flag_expectations, test,
                                fallback_for_test),
         self._get_expectations(self._expectations, test,
                                fallback_for_test))
     base_test = self.port.lookup_virtual_test_base(test)
     if base_test:
         base_expectations = self.get_expectations(base_test, test)
         if ResultType.Skip in base_expectations.results:
             # TODO(crbug.com/1072015#c9): Temporarily remove Skip from the
             # inherited expectations to avoid unexpected Skip.
             base_expectations = typ_types.Expectation(
                 test=base_expectations.test,
                 results=base_expectations.results - set([ResultType.Skip]),
                 is_slow_test=base_expectations.is_slow_test,
                 reason=base_expectations.reason,
                 trailing_comments=base_expectations.trailing_comments)
         return self._override_or_fallback_expectations(
             expectations, base_expectations)
     return expectations
 def _get_expectations(expectations, test):
     results = set()
     reasons = set()
     is_slow_test = False
     for test_exp in expectations:
         expected_results = test_exp.expectations_for(test)
         # The return Expectation instance from expectations_for has the default
         # PASS expected result. If there are no expected results in the first
         # file and there are expected results in the second file, then the JSON
         # results will show an expected per test field with PASS and whatever the
         # expected results in the second file are.
         if not (len(expected_results.results) == 1
                 and ResultType.Pass in expected_results.results):
             results.update(expected_results.results)
         is_slow_test |= expected_results.is_slow_test
         reasons.update(expected_results.reason.split())
     # If the results set is empty then the Expectation constructor
     # will set the expected result to Pass.
     return typ_types.Expectation(test=test,
                                  results=results,
                                  is_slow_test=is_slow_test,
                                  reason=' '.join(reasons))
    def skip_tests(self, paths, all_tests_list, expectations):
        """Given a list of tests, returns the ones that should be skipped.

        A test may be skipped for many reasons, depending on the expectation
        files and options selected. The most obvious is SKIP entries in
        TestExpectations, but we also e.g. skip idlharness tests on MSAN/ASAN
        due to https://crbug.com/856601. Note that for programmatically added
        SKIPs, this function will modify the input expectations to include the
        SKIP expectation (but not write it to disk)

        Args:
            paths: the paths passed on the command-line to run_web_tests.py
            all_tests_list: all tests that we are considering running
            expectations: parsed TestExpectations data

        Returns: a set of tests that should be skipped (not run).
        """
        all_tests = set(all_tests_list)
        tests_to_skip = set()
        idlharness_skips = set()
        for test in all_tests:
            # We always skip idlharness tests for MSAN/ASAN, even when running
            # with --no-expectations (https://crbug.com/856601). Note we will
            # run the test anyway if it is explicitly specified on the command
            # line; paths are removed from the skip list after this loop.
            if self._options.enable_sanitizer and Port.is_wpt_idlharness_test(
                    test):
                tests_to_skip.update({test})
                idlharness_skips.update({test})
                continue

            if self._options.no_expectations:
                # do not skip anything from TestExpectations
                continue

            expected_results = expectations.get_expectations(test).results
            if ResultType.Skip in expected_results:
                tests_to_skip.update({test})
            if self._options.skip_timeouts and ResultType.Timeout in expected_results:
                tests_to_skip.update({test})
            if self._options.skip_failing_tests and ResultType.Failure in expected_results:
                tests_to_skip.update({test})

        # Idlharness tests are skipped programmatically on MSAN/ASAN, so we have
        # to add them to the expectations to avoid reporting unexpected skips.
        if idlharness_skips and expectations is not None:
            raw_expectations = '# results: [ Skip ]\n'
            for test in idlharness_skips:
                raw_expectations += typ_types.Expectation(
                    reason="crbug.com/856601",
                    test=test,
                    results=[ResultType.Skip]).to_string() + '\n'
            expectations.merge_raw_expectations(raw_expectations)

        if self._options.skipped == 'only':
            tests_to_skip = all_tests - tests_to_skip
        elif self._options.skipped == 'ignore':
            tests_to_skip = set()
        elif self._options.skipped != 'always':
            # make sure we're explicitly running any tests passed on the command line; equivalent to 'default'.
            tests_to_skip -= set(paths)

        return tests_to_skip