Example #1
0
    def __init__(self, host, port_name=None, **kwargs):
        Port.__init__(self, host, port_name or TestPort.default_port_name, **kwargs)
        self._tests = unit_test_list()
        self._flakes = set()

        # FIXME: crbug.com/279494. This needs to be in the "real web tests
        # dir" in a mock filesystem, rather than outside of the checkout, so
        # that tests that want to write to a TestExpectations file can share
        # this between "test" ports and "real" ports.  This is the result of
        # rebaseline_unittest.py having tests that refer to "real" port names
        # and real builders instead of fake builders that point back to the
        # test ports. rebaseline_unittest.py needs to not mix both "real" ports
        # and "test" ports

        self._generic_expectations_path = WEB_TEST_DIR + '/TestExpectations'
        self._results_directory = None

        self._operating_system = 'mac'
        if self._name.startswith('test-win'):
            self._operating_system = 'win'
        elif self._name.startswith('test-linux'):
            self._operating_system = 'linux'

        self.port_name = self._operating_system

        version_map = {
            'test-win-win7': 'win7',
            'test-win-win10': 'win10',
            'test-mac-mac10.10': 'mac10.10',
            'test-mac-mac10.11': 'mac10.11',
            'test-linux-precise': 'precise',
            'test-linux-trusty': 'trusty',
        }
        self._version = version_map[self._name]

        if self._operating_system == 'linux':
            self._architecture = 'x86_64'

        self.all_systems = (('mac10.10', 'x86'),
                            ('mac10.11', 'x86'),
                            ('win7', 'x86'),
                            ('win10', 'x86'),
                            ('precise', 'x86_64'),
                            ('trusty', 'x86_64'))

        self.all_build_types = ('debug', 'release')

        # To avoid surprises when introducing new macros, these are
        # intentionally fixed in time.
        self.configuration_specifier_macros_dict = {
            'mac': ['mac10.10', 'mac10.11'],
            'win': ['win7', 'win10'],
            'linux': ['precise', 'trusty']
        }
Example #2
0
    def skip_tests(self, paths, all_tests_list, expectations):
        """Given a list of tests, returns the ones that should be skipped.

        A test may be skipped for many reasons, depending on the expectation
        files and options selected. The most obvious is SKIP entries in
        TestExpectations, but we also e.g. skip idlharness tests on MSAN/ASAN
        due to https://crbug.com/856601.

        Args:
            paths: the paths passed on the command-line to run_web_tests.py
            all_tests_list: all tests that we are considering running
            expectations: parsed TestExpectations data

        Returns: a set of tests that should be skipped (not run).
        """
        all_tests = set(all_tests_list)
        tests_to_skip = set()
        for test in all_tests:
            # We always skip idlharness tests for MSAN/ASAN, even when running
            # with --no-expectations (https://crbug.com/856601). Note we will
            # run the test anyway if it is explicitly specified on the command
            # line; paths are removed from the skip list after this loop.
            if self._options.enable_sanitizer and Port.is_wpt_idlharness_test(
                    test):
                tests_to_skip.update({test})
                continue

            if self._options.no_expectations:
                # do not skip anything from TestExpectations
                continue

            expected_results = expectations.get_expectations(test).results
            if ResultType.Skip in expected_results:
                tests_to_skip.update({test})
            if self._options.skip_timeouts and ResultType.Timeout in expected_results:
                tests_to_skip.update({test})
            if self._options.skip_failing_tests and ResultType.Failure in expected_results:
                tests_to_skip.update({test})

        if self._options.skipped == 'only':
            tests_to_skip = all_tests - tests_to_skip
        elif self._options.skipped == 'ignore':
            tests_to_skip = set()
        elif self._options.skipped != 'always':
            # make sure we're explicitly running any tests passed on the command line; equivalent to 'default'.
            tests_to_skip -= set(paths)

        return tests_to_skip
    def test_repeated_test_artifacts(self):
        host = MockSystemHost()
        port = Port(host, 'baseport')
        artifacts = Artifacts('/dir', host.filesystem, repeat_tests=True)

        def init_test_failure(test_failure):
            test_failure.port = port
            test_failure.filesystem = host.filesystem
            test_failure.test_name = 'foo.html'
            test_failure.result_directory = '/dir'

        pass_with_stderr = PassWithStderr(
            DriverOutput(None, None, None, None, error=b'pass with stderr'))
        init_test_failure(pass_with_stderr)
        crash = FailureCrash(
            DriverOutput(None,
                         None,
                         None,
                         None,
                         crash=True,
                         error=b'crash stderr'))
        init_test_failure(crash)
        timeout = FailureTimeout(
            DriverOutput(None, None, None, None, error=b'timeout with stderr'))
        init_test_failure(timeout)

        pass_with_stderr.create_artifacts(artifacts)
        self.assertEqual('pass with stderr',
                         host.filesystem.read_text_file('/dir/foo-stderr.txt'))

        crash.create_artifacts(artifacts)
        self.assertEqual('crash stderr',
                         host.filesystem.read_text_file('/dir/foo-stderr.txt'))

        timeout.create_artifacts(artifacts)
        self.assertEqual('timeout with stderr',
                         host.filesystem.read_text_file('/dir/foo-stderr.txt'))

        pass_with_stderr.create_artifacts(artifacts)
        self.assertEqual('timeout with stderr',
                         host.filesystem.read_text_file('/dir/foo-stderr.txt'))
    def skip_tests(self, paths, all_tests_list, expectations):
        """Given a list of tests, returns the ones that should be skipped.

        A test may be skipped for many reasons, depending on the expectation
        files and options selected. The most obvious is SKIP entries in
        TestExpectations, but we also e.g. skip idlharness tests on MSAN/ASAN
        due to https://crbug.com/856601. Note that for programmatically added
        SKIPs, this function will modify the input expectations to include the
        SKIP expectation (but not write it to disk)

        Args:
            paths: the paths passed on the command-line to run_web_tests.py
            all_tests_list: all tests that we are considering running
            expectations: parsed TestExpectations data

        Returns: a set of tests that should be skipped (not run).
        """
        all_tests = set(all_tests_list)
        tests_to_skip = set()
        idlharness_skips = set()
        for test in all_tests:
            # We always skip idlharness tests for MSAN/ASAN, even when running
            # with --no-expectations (https://crbug.com/856601). Note we will
            # run the test anyway if it is explicitly specified on the command
            # line; paths are removed from the skip list after this loop.
            if self._options.enable_sanitizer and Port.is_wpt_idlharness_test(
                    test):
                tests_to_skip.update({test})
                idlharness_skips.update({test})
                continue

            if self._options.no_expectations:
                # do not skip anything from TestExpectations
                continue

            expected_results = expectations.get_expectations(test).results
            if ResultType.Skip in expected_results:
                tests_to_skip.update({test})
            if self._options.skip_timeouts and ResultType.Timeout in expected_results:
                tests_to_skip.update({test})
            if self._options.skip_failing_tests and ResultType.Failure in expected_results:
                tests_to_skip.update({test})

        # Idlharness tests are skipped programmatically on MSAN/ASAN, so we have
        # to add them to the expectations to avoid reporting unexpected skips.
        if idlharness_skips and expectations is not None:
            raw_expectations = '# results: [ Skip ]\n'
            for test in idlharness_skips:
                raw_expectations += typ_types.Expectation(
                    reason="crbug.com/856601",
                    test=test,
                    results=[ResultType.Skip]).to_string() + '\n'
            expectations.merge_raw_expectations(raw_expectations)

        if self._options.skipped == 'only':
            tests_to_skip = all_tests - tests_to_skip
        elif self._options.skipped == 'ignore':
            tests_to_skip = set()
        elif self._options.skipped != 'always':
            # make sure we're explicitly running any tests passed on the command line; equivalent to 'default'.
            tests_to_skip -= set(paths)

        return tests_to_skip
Example #5
0
 def make_port(self):
     return Port(MockSystemHost(), 'test', optparse.Values({'configuration': 'Release'}))