Example #1
0
    def find_touched_tests(self,
                           new_or_modified_paths,
                           apply_skip_expectations=True):
        potential_test_paths = []
        for test_file in new_or_modified_paths:
            if not test_file.startswith(self.LAYOUT_TESTS_DIRECTORY):
                continue

            test_file = self._strip_test_dir_prefix(test_file)
            test_paths = self._port.potential_test_names_from_expected_file(
                test_file)
            if test_paths:
                potential_test_paths.extend(test_paths)
            else:
                potential_test_paths.append(test_file)

        if not potential_test_paths:
            return None

        tests = self._port.tests(list(set(potential_test_paths)))
        if not apply_skip_expectations:
            return tests

        expectations = test_expectations.TestExpectations(
            self._port, tests, force_expectations_pass=False)
        expectations.parse_all_expectations()
        tests_to_skip = self.skip_tests(potential_test_paths, tests,
                                        expectations, None)
        return [test for test in tests if test not in tests_to_skip]
def run_results(port, extra_skipped_tests=[]):
    tests = ['passes/text.html', 'failures/expected/timeout.html', 'failures/expected/crash.html', 'failures/expected/leak.html', 'failures/expected/keyboard.html',
             'failures/expected/audio.html', 'failures/expected/text.html', 'passes/skipped/skip.html']
    expectations = test_expectations.TestExpectations(port, tests)
    if extra_skipped_tests:
        expectations.add_extra_skipped_tests(extra_skipped_tests)
    return test_run_results.TestRunResults(expectations, len(tests))
Example #3
0
def lint(port, options):
    host = port.host
    if options.platform:
        ports_to_lint = [port]
    else:
        ports_to_lint = [
            host.port_factory.get(name)
            for name in host.port_factory.all_port_names()
        ]

    files_linted = set()
    lint_failed = False

    for port_to_lint in ports_to_lint:
        expectations_file = port_to_lint.path_to_test_expectations_file()
        if expectations_file in files_linted:
            continue

        try:
            test_expectations.TestExpectations(port_to_lint, is_lint_mode=True)
        except test_expectations.ParseError, e:
            lint_failed = True
            _log.error('')
            for warning in e.warnings:
                _log.error(warning)
            _log.error('')
        files_linted.add(expectations_file)
Example #4
0
def lint(host, options):
    ports_to_lint = [
        host.port_factory.get(name)
        for name in host.port_factory.all_port_names(options.platform)
    ]
    files_linted = set()

    failures = []
    for port_to_lint in ports_to_lint:
        expectations_dict = port_to_lint.expectations_dict()

        for expectations_file in expectations_dict.keys():
            if expectations_file in files_linted:
                continue

            try:
                test_expectations.TestExpectations(
                    port_to_lint,
                    expectations_dict={
                        expectations_file: expectations_dict[expectations_file]
                    },
                    is_lint_mode=True)
            except test_expectations.ParseError as e:
                _log.error('')
                for warning in e.warnings:
                    _log.error(warning)
                    failures.append('%s: %s' % (expectations_file, warning))
                _log.error('')
            files_linted.add(expectations_file)
    return failures
def lint(host, options):
    # FIXME: Remove this when we remove the --chromium flag (crbug.com/245504).
    if options.platform == 'chromium':
        options.platform = None

    ports_to_lint = [
        host.port_factory.get(name)
        for name in host.port_factory.all_port_names(options.platform)
    ]
    files_linted = set()
    lint_failed = False

    for port_to_lint in ports_to_lint:
        expectations_dict = port_to_lint.expectations_dict()

        for expectations_file in expectations_dict.keys():
            if expectations_file in files_linted:
                continue

            try:
                test_expectations.TestExpectations(
                    port_to_lint,
                    expectations_dict={
                        expectations_file: expectations_dict[expectations_file]
                    },
                    is_lint_mode=True)
            except test_expectations.ParseError as e:
                lint_failed = True
                _log.error('')
                for warning in e.warnings:
                    _log.error(warning)
                _log.error('')
            files_linted.add(expectations_file)
    return lint_failed
 def test_summarized_results_flaky_pass_after_first_retry(self):
     test_name = 'passes/text.html'
     expectations = test_expectations.TestExpectations(
         self.port, [test_name])
     initial_results = test_run_results.TestRunResults(expectations, 1)
     initial_results.add(get_result(test_name, test_expectations.CRASH),
                         False, False)
     all_retry_results = [
         test_run_results.TestRunResults(expectations, 1),
         test_run_results.TestRunResults(expectations, 1),
         test_run_results.TestRunResults(expectations, 1)
     ]
     all_retry_results[0].add(
         get_result(test_name, test_expectations.TIMEOUT), False, False)
     all_retry_results[1].add(get_result(test_name, test_expectations.PASS),
                              True, False)
     all_retry_results[2].add(get_result(test_name, test_expectations.PASS),
                              True, False)
     summary = test_run_results.summarize_results(
         self.port,
         expectations,
         initial_results,
         all_retry_results,
         enabled_pixel_tests_in_retry=True)
     self.assertTrue(
         'is_unexpected' not in summary['tests']['passes']['text.html'])
     self.assertEquals(summary['tests']['passes']['text.html']['expected'],
                       'PASS')
     self.assertEquals(summary['tests']['passes']['text.html']['actual'],
                       'CRASH TIMEOUT PASS PASS')
     self.assertEquals(summary['num_flaky'], 1)
     self.assertEquals(summary['num_passes'], 0)
     self.assertEquals(summary['num_regressions'], 0)
Example #7
0
    def _collect_tests(self,
                       paths,  # type: List[str]
                       device_type_list,  # type: List[Optional[DeviceType]]
                       ):
        aggregate_tests = set()  # type: Set[Test]
        aggregate_tests_to_run = set()  # type: Set[Test]
        tests_to_run_by_device = {}  # type: Dict[Optional[DeviceType], List[Test]]

        device_type_list = self._port.supported_device_types()
        for device_type in device_type_list:
            for_device_type = u'for {} '.format(device_type) if device_type else ''
            self._printer.write_update(u'Collecting tests {}...'.format(for_device_type))
            paths, tests = self._finder.find_tests(self._options, paths, device_type=device_type)
            aggregate_tests.update(tests)

            test_names = [test.test_path for test in tests]

            self._printer.write_update(u'Parsing expectations {}...'.format(for_device_type))
            self._expectations[device_type] = test_expectations.TestExpectations(self._port, test_names, force_expectations_pass=self._options.force, device_type=device_type)
            self._expectations[device_type].parse_all_expectations()

            tests_to_run = self._tests_to_run(tests, device_type=device_type)
            tests_to_run_by_device[device_type] = [test for test in tests_to_run if test not in aggregate_tests_to_run]
            aggregate_tests_to_run.update(tests_to_run_by_device[device_type])

        aggregate_tests_to_skip = aggregate_tests - aggregate_tests_to_run

        return tests_to_run_by_device, aggregate_tests_to_skip
Example #8
0
    def get_result_summary(self, test_names, expectations_str):
        port.test_expectations = lambda: expectations_str
        port.test_expectations_overrides = lambda: None
        expectations = test_expectations.TestExpectations(self._port, test_names)

        rs = result_summary.ResultSummary(expectations, test_names)
        return test_names, rs, expectations
def run_results(port):
    tests = [
        'passes/text.html', 'failures/expected/timeout.html',
        'failures/expected/crash.html', 'failures/expected/hang.html',
        'failures/expected/audio.html'
    ]
    expectations = test_expectations.TestExpectations(port, tests)
    return test_run_results.TestRunResults(expectations, len(tests))
Example #10
0
 def get_result_summary(self, port, test_names, expectations_str):
     expectations = test_expectations.TestExpectations(
         port,
         test_names,
         expectations_str,
         port.test_configuration(),
         is_lint_mode=False)
     return test_names, result_summary.ResultSummary(
         expectations, test_names), expectations
Example #11
0
 def check_test_expectations(self, expectations_str, tests=None, overrides=None):
     err = None
     expectations = None
     try:
         expectations = test_expectations.TestExpectations(
             port=self._port_obj, expectations=expectations_str, tests=tests,
             test_config=self._port_obj.test_configuration(),
             is_lint_mode=True, overrides=overrides)
     except test_expectations.ParseError, error:
         err = error
 def test_look_for_new_crash_logs(self):
     def get_manager():
         host = MockHost()
         port = host.port_factory.get('test-mac-leopard')
         manager = Manager(port, options=MockOptions(test_list=None, http=True, max_locked_shards=1), printer=Mock())
         return manager
     host = MockHost()
     port = host.port_factory.get('test-mac-leopard')
     tests = ['failures/expected/crash.html']
     expectations = test_expectations.TestExpectations(port, tests)
     run_results = TestRunResults(expectations, len(tests))
     manager = get_manager()
     manager._look_for_new_crash_logs(run_results, time.time())
Example #13
0
def lint(host, options, logging_stream):
    logger = logging.getLogger()
    logger.setLevel(logging.INFO)
    handler = logging.StreamHandler(logging_stream)
    logger.addHandler(handler)

    try:
        ports_to_lint = [
            host.port_factory.get(name)
            for name in host.port_factory.all_port_names(options.platform)
        ]
        files_linted = set()
        lint_failed = False

        for port_to_lint in ports_to_lint:
            if port_to_lint in ['gtk', 'wpe']:
                port_to_lint._options.additional_expectations = [
                    'LayoutTests/platform/glib/TestExpectations'
                ]
            expectations_dict = port_to_lint.expectations_dict()

            # FIXME: This won't work if multiple ports share a TestExpectations file but support different modifiers in the file.
            for expectations_file in expectations_dict.keys():
                if expectations_file in files_linted:
                    continue

                try:
                    expectations = test_expectations.TestExpectations(
                        port_to_lint,
                        expectations_to_lint={
                            expectations_file:
                            expectations_dict[expectations_file]
                        })
                    expectations.parse_all_expectations()
                except test_expectations.ParseError as e:
                    lint_failed = True
                    _log.error('')
                    for warning in e.warnings:
                        _log.error(warning)
                    _log.error('')
                files_linted.add(expectations_file)

        if lint_failed:
            _log.error('Lint failed.')
            return -1

        _log.info('Lint succeeded.')
        return 0
    finally:
        logger.removeHandler(handler)
Example #14
0
 def test_look_for_new_crash_logs(self):
     def get_manager_with_tests(test_names):
         host = MockHost()
         port = host.port_factory.get('test-mac-leopard')
         manager = Manager(port, options=MockOptions(test_list=None, http=True), printer=Mock())
         manager.collect_tests(test_names)
         return manager
     host = MockHost()
     port = host.port_factory.get('test-mac-leopard')
     tests = ['failures/expected/crash.html']
     expectations = test_expectations.TestExpectations(port, tests)
     rs = result_summary.ResultSummary(expectations, tests)
     manager = get_manager_with_tests(tests)
     manager._look_for_new_crash_logs(rs, time.time())
Example #15
0
def lint(host, options):
    ports_to_lint = [
        host.port_factory.get(name)
        for name in host.port_factory.all_port_names(options.platform)
    ]
    files_linted = set()

    # In general, the set of TestExpectation files should be the same for
    # all ports. However, the method used to list expectations files is
    # in Port, and the TestExpectations constructor takes a Port.
    # Perhaps this function could be changed to just use one Port
    # (the default Port for this host) and it would work the same.

    failures = []
    for port_to_lint in ports_to_lint:
        expectations_dict = port_to_lint.all_expectations_dict()

        # There are some TestExpectations files that are not loaded by default
        # in any Port, and are instead passed via --additional-expectations on
        # some builders. We also want to inspect these files if they're present.
        extra_files = (
            'ASANExpectations',
            'LeakExpectations',
            'MSANExpectations',
        )
        for name in extra_files:
            path = port_to_lint.layout_tests_dir() + '/' + name
            if host.filesystem.exists(path):
                expectations_dict[path] = host.filesystem.read_text_file(path)

        for expectations_file in expectations_dict:

            if expectations_file in files_linted:
                continue

            try:
                test_expectations.TestExpectations(
                    port_to_lint,
                    expectations_dict={
                        expectations_file: expectations_dict[expectations_file]
                    },
                    is_lint_mode=True)
            except test_expectations.ParseError as error:
                _log.error('')
                for warning in error.warnings:
                    _log.error(warning)
                    failures.append('%s: %s' % (expectations_file, warning))
                _log.error('')
            files_linted.add(expectations_file)
    return failures
def lint(host, options, logging_stream):
    logger = logging.getLogger()
    logger.setLevel(logging.INFO)
    handler = logging.StreamHandler(logging_stream)
    logger.addHandler(handler)

    try:
        # FIXME: Remove this when we remove the --chromium flag (crbug.com/245504).
        if options.platform == 'chromium':
            options.platform = None

        ports_to_lint = [
            host.port_factory.get(name)
            for name in host.port_factory.all_port_names(options.platform)
        ]
        files_linted = set()
        lint_failed = False

        for port_to_lint in ports_to_lint:
            expectations_dict = port_to_lint.expectations_dict()

            for expectations_file in expectations_dict.keys():
                if expectations_file in files_linted:
                    continue

                try:
                    test_expectations.TestExpectations(
                        port_to_lint,
                        expectations_dict={
                            expectations_file:
                            expectations_dict[expectations_file]
                        },
                        is_lint_mode=True)
                except test_expectations.ParseError as e:
                    lint_failed = True
                    _log.error('')
                    for warning in e.warnings:
                        _log.error(warning)
                    _log.error('')
                files_linted.add(expectations_file)

        if lint_failed:
            _log.error('Lint failed.')
            return -1

        _log.info('Lint succeeded.')
        return 0
    finally:
        logger.removeHandler(handler)
Example #17
0
 def check_test_expectations(self, expectations_str, tests=None, overrides=None):
     err = None
     expectations = None
     # FIXME: We need to rework how we lint strings so that we can do it independently of what a
     # port's existing expectations are. Linting should probably just call the parser directly.
     # For now we override the port hooks. This will also need to be reworked when expectations
     # can cascade arbitrarily, rather than just have expectations and overrides.
     orig_expectations = self._port_obj.test_expectations
     orig_overrides = self._port_obj.test_expectations_overrides
     try:
         self._port_obj.test_expectations = lambda: expectations_str
         self._port_obj.test_expectations_overrides = lambda: overrides
         expectations = test_expectations.TestExpectations(self._port_obj, tests, True)
     except test_expectations.ParseError, error:
         err = error
Example #18
0
    def print_expectations(self, args):
        self._printer.write_update("Collecting tests ...")
        try:
            paths, test_names = self._collect_tests(args)
        except IOError:
            # This is raised if --test-list doesn't exist
            return -1

        self._printer.write_update("Parsing expectations ...")
        self._expectations = test_expectations.TestExpectations(
            self._port,
            test_names,
            force_expectations_pass=self._options.force)
        self._expectations.parse_all_expectations()

        tests_to_run, tests_to_skip = self._prepare_lists(paths, test_names)
        self._printer.print_found(len(test_names), len(tests_to_run),
                                  self._options.repeat_each,
                                  self._options.iterations)

        test_col_width = len(max(tests_to_run + list(tests_to_skip),
                                 key=len)) + 1

        default_device_tests = []

        # Look for tests with custom device requirements.
        custom_device_tests = defaultdict(list)
        for test_file in tests_to_run:
            custom_device = self._custom_device_for_test(test_file)
            if custom_device:
                custom_device_tests[custom_device].append(test_file)
            else:
                default_device_tests.append(test_file)

        if custom_device_tests:
            for device_class in custom_device_tests:
                _log.debug('{} tests use device {}'.format(
                    len(custom_device_tests[device_class]), device_class))

        self._print_expectations_for_subset(None, test_col_width, tests_to_run,
                                            tests_to_skip)

        for device_class in custom_device_tests:
            device_tests = custom_device_tests[device_class]
            self._print_expectations_for_subset(device_class, test_col_width,
                                                device_tests)

        return 0
Example #19
0
    def __init__(self,
                 running_port,
                 target_port,
                 platform,
                 options,
                 url_fetcher,
                 zip_factory,
                 scm,
                 logged_before=False):
        """
        Args:
            running_port: the Port the script is running on.
            target_port: the Port the script uses to find port-specific
                configuration information like the test_expectations.txt
                file location and the list of test platforms.
            platform: the test platform to rebaseline
            options: the command-line options object.
            url_fetcher: object that can fetch objects from URLs
            zip_factory: optional object that can fetch zip files from URLs
            scm: scm object for adding new baselines
            logged_before: whether the previous running port logged anything.
        """
        self._platform = platform
        self._options = options
        self._port = running_port
        self._filesystem = running_port._filesystem
        self._target_port = target_port

        self._rebaseline_port = port.get(platform,
                                         options,
                                         filesystem=self._filesystem)
        self._rebaselining_tests = set()
        self._rebaselined_tests = []
        self._logged_before = logged_before
        self.did_log = False

        # Create tests and expectations helper which is used to:
        #   -. compile list of tests that need rebaselining.
        #   -. update the tests in test_expectations file after rebaseline
        #      is done.
        expectations_str = self._rebaseline_port.test_expectations()
        self._test_expectations = test_expectations.TestExpectations(
            self._rebaseline_port, None, expectations_str,
            self._rebaseline_port.test_configuration(), False)
        self._url_fetcher = url_fetcher
        self._zip_factory = zip_factory
        self._scm = scm
Example #20
0
    def skipped_layout_tests(self, extra_test_files=None):
        expectations_str = self.test_expectations()
        overrides_str = self.test_expectations_overrides()
        is_debug_mode = False

        all_test_files = self.tests([])
        if extra_test_files:
            all_test_files.update(extra_test_files)

        expectations = test_expectations.TestExpectations(
            self,
            all_test_files,
            expectations_str,
            self.test_configuration(),
            is_lint_mode=False,
            overrides=overrides_str)
        return expectations.get_tests_with_result_type(test_expectations.SKIP)
Example #21
0
    def test_look_for_new_crash_logs(self):
        def get_manager():
            host = MockHost()
            port = host.port_factory.get('test-mac-mac10.10')
            manager = Manager(port,
                              options=optparse.Values({
                                  'test_list': None,
                                  'http': True,
                                  'max_locked_shards': 1
                              }),
                              printer=FakePrinter())
            return manager

        host = MockHost()
        port = host.port_factory.get('test-mac-mac10.10')
        tests = ['failures/expected/crash.html']
        expectations = test_expectations.TestExpectations(port, tests)
        run_results = TestRunResults(expectations, len(tests))
        manager = get_manager()
        manager._look_for_new_crash_logs(run_results, time.time())
 def test_timeout_then_unexpected_pass(self):
     test_name = 'failures/expected/text.html'
     expectations = test_expectations.TestExpectations(self.port, [test_name])
     initial_results = test_run_results.TestRunResults(expectations, 1)
     initial_results.add(get_result(test_name, test_expectations.TIMEOUT, run_time=1), False, False)
     all_retry_results = [test_run_results.TestRunResults(expectations, 1),
                          test_run_results.TestRunResults(expectations, 1),
                          test_run_results.TestRunResults(expectations, 1)]
     all_retry_results[0].add(get_result(test_name, test_expectations.LEAK, run_time=0.1), False, False)
     all_retry_results[1].add(get_result(test_name, test_expectations.PASS, run_time=0.1), False, False)
     all_retry_results[2].add(get_result(test_name, test_expectations.PASS, run_time=0.1), False, False)
     summary = test_run_results.summarize_results(
         self.port, expectations, initial_results, all_retry_results,
         enabled_pixel_tests_in_retry=True)
     self.assertTrue('is_unexpected' in summary['tests']['failures']['expected']['text.html'])
     self.assertEquals(summary['tests']['failures']['expected']['text.html']['expected'], 'FAIL')
     self.assertEquals(summary['tests']['failures']['expected']['text.html']['actual'], 'TIMEOUT LEAK PASS PASS')
     self.assertEquals(summary['num_passes'], 1)
     self.assertEquals(summary['num_regressions'], 0)
     self.assertEquals(summary['num_flaky'], 0)
Example #23
0
def lint(port, options, logging_stream):
    host = port.host
    logging.getLogger().setLevel(
        logging.DEBUG if options.debug_rwt_logging else logging.INFO)
    printer = printing.Printer(port,
                               options,
                               logging_stream,
                               logger=logging.getLogger())

    if options.platform:
        ports_to_lint = [port]
    else:
        ports_to_lint = [
            host.port_factory.get(name)
            for name in host.port_factory.all_port_names()
        ]

    files_linted = set()
    lint_failed = False

    for port_to_lint in ports_to_lint:
        expectations_dict = port_to_lint.expectations_dict()

        # FIXME: This won't work if multiple ports share a TestExpectations file but support different modifiers in the file.
        for expectations_file in expectations_dict.keys():
            if expectations_file in files_linted:
                continue

            try:
                test_expectations.TestExpectations(
                    port_to_lint,
                    expectations_to_lint={
                        expectations_file: expectations_dict[expectations_file]
                    })
            except test_expectations.ParseError, e:
                lint_failed = True
                _log.error('')
                for warning in e.warnings:
                    _log.error(warning)
                _log.error('')
            files_linted.add(expectations_file)
Example #24
0
def lint(host, options):
    ports_to_lint = [
        host.port_factory.get(name)
        for name in host.port_factory.all_port_names(options.platform)
    ]
    files_linted = set()

    # In general, the set of TestExpectation files should be the same for
    # all ports. However, the method used to list expectations files is
    # in Port, and the TestExpectations constructor takes a Port.
    # Perhaps this function could be changed to just use one Port
    # (the default Port for this host) and it would work the same.

    failures = []
    for port_to_lint in ports_to_lint:
        expectations_dict = port_to_lint.all_expectations_dict()

        for path in port_to_lint.extra_expectations_files():
            if host.filesystem.exists(path):
                expectations_dict[path] = host.filesystem.read_text_file(path)

        for expectations_file in expectations_dict:

            if expectations_file in files_linted:
                continue

            try:
                test_expectations.TestExpectations(
                    port_to_lint,
                    expectations_dict={
                        expectations_file: expectations_dict[expectations_file]
                    },
                    is_lint_mode=True)
            except test_expectations.ParseError as error:
                _log.error('')
                for warning in error.warnings:
                    _log.error(warning)
                    failures.append('%s: %s' % (expectations_file, warning))
                _log.error('')
            files_linted.add(expectations_file)
    return failures
Example #25
0
    def lint_test_expectations(files, configuration, cwd, increment_error_count=lambda: 0, line_numbers=None, host=Host()):
        error_count = 0
        files_linted = set()
        ports_to_lint = [host.port_factory.get(name) for name in host.port_factory.all_port_names()]
        for port in ports_to_lint:
            for expectations_file in port.expectations_dict().keys():
                style_error_handler = DefaultStyleErrorHandler(expectations_file, configuration, increment_error_count, line_numbers)

                try:
                    if expectations_file in files_linted:
                        continue
                    expectations = test_expectations.TestExpectations(
                        port,
                        expectations_to_lint={expectations_file: port.expectations_dict()[expectations_file]})
                    expectations.parse_all_expectations()
                except test_expectations.ParseError as e:
                    for warning in e.warnings:
                        if TestExpectationsChecker._should_log_linter_warning(warning, files, cwd, host):
                            style_error_handler(warning.line_number, 'test/expectations', 5, warning.error)
                            error_count += 1
                files_linted.add(expectations_file)
        return error_count
Example #26
0
    def print_expectations(self, args):
        aggregate_test_names = set()
        aggregate_tests_to_run = set()
        aggregate_tests_to_skip = set()
        tests_to_run_by_device = {}

        device_type_list = self._port.DEFAULT_DEVICE_TYPES or [self._port.DEVICE_TYPE]
        for device_type in device_type_list:
            """Run the tests and return a RunDetails object with the results."""
            for_device_type = 'for {} '.format(device_type) if device_type else ''
            self._printer.write_update('Collecting tests {}...'.format(for_device_type))
            try:
                paths, test_names = self._collect_tests(args, device_type=device_type)
            except IOError:
                # This is raised if --test-list doesn't exist
                return test_run_results.RunDetails(exit_code=-1)

            self._printer.write_update('Parsing expectations {}...'.format(for_device_type))
            self._expectations[device_type] = test_expectations.TestExpectations(self._port, test_names, force_expectations_pass=self._options.force, device_type=device_type)
            self._expectations[device_type].parse_all_expectations()

            aggregate_test_names.update(test_names)
            tests_to_run, tests_to_skip = self._prepare_lists(paths, test_names, device_type=device_type)
            aggregate_tests_to_skip.update(tests_to_skip)

            tests_to_run_by_device[device_type] = [test for test in tests_to_run if test not in aggregate_tests_to_run]
            aggregate_tests_to_run.update(tests_to_run)

        aggregate_tests_to_skip = aggregate_tests_to_skip - aggregate_tests_to_run

        self._printer.print_found(len(aggregate_test_names), len(aggregate_tests_to_run), self._options.repeat_each, self._options.iterations)
        test_col_width = len(max(aggregate_tests_to_run.union(aggregate_tests_to_skip), key=len)) + 1

        self._print_expectations_for_subset(device_type_list[0], test_col_width, tests_to_run_by_device[device_type_list[0]], aggregate_tests_to_skip)

        for device_type in device_type_list[1:]:
            self._print_expectations_for_subset(device_type, test_col_width, tests_to_run_by_device[device_type])

        return 0
 def test_timeout_then_unexpected_pass(self):
     tests = ['failures/expected/image.html']
     expectations = test_expectations.TestExpectations(self.port, tests)
     initial_results = test_run_results.TestRunResults(
         expectations, len(tests))
     initial_results.add(
         get_result('failures/expected/image.html',
                    test_expectations.TIMEOUT,
                    run_time=1), False, False)
     retry_results = test_run_results.TestRunResults(
         expectations, len(tests))
     retry_results.add(
         get_result('failures/expected/image.html',
                    test_expectations.PASS,
                    run_time=0.1), False, False)
     summary = test_run_results.summarize_results(
         self.port,
         expectations,
         initial_results,
         retry_results,
         enabled_pixel_tests_in_retry=True,
         only_include_failing=True)
     self.assertEquals(summary['num_regressions'], 0)
     self.assertEquals(summary['num_passes'], 1)
Example #28
0
    def run(self, args):
        """Run the tests and return a RunDetails object with the results."""
        self._printer.write_update("Collecting tests ...")
        try:
            paths, test_names = self._collect_tests(args)
        except IOError:
            # This is raised if --test-list doesn't exist
            return test_run_results.RunDetails(exit_code=-1)

        self._printer.write_update("Parsing expectations ...")
        self._expectations = test_expectations.TestExpectations(
            self._port,
            test_names,
            force_expectations_pass=self._options.force)
        self._expectations.parse_all_expectations()

        tests_to_run, tests_to_skip = self._prepare_lists(paths, test_names)
        self._printer.print_found(len(test_names), len(tests_to_run),
                                  self._options.repeat_each,
                                  self._options.iterations)
        start_time = time.time()

        # Check to make sure we're not skipping every test.
        if not tests_to_run:
            _log.critical('No tests to run.')
            return test_run_results.RunDetails(exit_code=-1)

        if not self._set_up_run(tests_to_run):
            return test_run_results.RunDetails(exit_code=-1)

        enabled_pixel_tests_in_retry = False
        try:
            initial_results = self._run_tests(
                tests_to_run,
                tests_to_skip,
                self._options.repeat_each,
                self._options.iterations,
                int(self._options.child_processes),
                retrying=False)

            tests_to_retry = self._tests_to_retry(
                initial_results,
                include_crashes=self._port.should_retry_crashes())
            # Don't retry failures when interrupted by user or failures limit exception.
            retry_failures = self._options.retry_failures and not (
                initial_results.interrupted
                or initial_results.keyboard_interrupted)
            if retry_failures and tests_to_retry:
                enabled_pixel_tests_in_retry = self._force_pixel_tests_if_needed(
                )

                _log.info('')
                _log.info("Retrying %s ..." %
                          pluralize(len(tests_to_retry), "unexpected failure"))
                _log.info('')
                retry_results = self._run_tests(tests_to_retry,
                                                tests_to_skip=set(),
                                                repeat_each=1,
                                                iterations=1,
                                                num_workers=1,
                                                retrying=True)

                if enabled_pixel_tests_in_retry:
                    self._options.pixel_tests = False
            else:
                retry_results = None
        finally:
            self._clean_up_run()

        end_time = time.time()

        # Some crash logs can take a long time to be written out so look
        # for new logs after the test run finishes.
        _log.debug("looking for new crash logs")
        self._look_for_new_crash_logs(initial_results, start_time)
        if retry_results:
            self._look_for_new_crash_logs(retry_results, start_time)

        _log.debug("summarizing results")
        summarized_results = test_run_results.summarize_results(
            self._port, self._expectations, initial_results, retry_results,
            enabled_pixel_tests_in_retry)
        results_including_passes = None
        if self._options.results_server_host:
            results_including_passes = test_run_results.summarize_results(
                self._port,
                self._expectations,
                initial_results,
                retry_results,
                enabled_pixel_tests_in_retry,
                include_passes=True,
                include_time_and_modifiers=True)
        self._printer.print_results(end_time - start_time, initial_results,
                                    summarized_results)

        exit_code = -1
        if not self._options.dry_run:
            self._port.print_leaks_summary()
            self._upload_json_files(summarized_results, initial_results,
                                    results_including_passes, start_time,
                                    end_time)

            results_path = self._filesystem.join(self._results_directory,
                                                 "results.html")
            self._copy_results_html_file(results_path)
            if initial_results.keyboard_interrupted:
                exit_code = INTERRUPTED_EXIT_STATUS
            else:
                if self._options.show_results and (
                        initial_results.unexpected_results_by_name or
                    (self._options.full_results_html
                     and initial_results.total_failures)):
                    self._port.show_results_html_file(results_path)
                exit_code = self._port.exit_code_from_summarized_results(
                    summarized_results)
        return test_run_results.RunDetails(exit_code, summarized_results,
                                           initial_results, retry_results,
                                           enabled_pixel_tests_in_retry)
Example #29
0
    def run(self, args):
        """Run the tests and return a RunDetails object with the results."""
        start_time = time.time()
        self._printer.write_update("Collecting tests ...")
        running_all_tests = False
        try:
            paths, test_names, running_all_tests = self._collect_tests(args)
        except IOError:
            # This is raised if --test-list doesn't exist
            return test_run_results.RunDetails(
                exit_code=test_run_results.NO_TESTS_EXIT_STATUS)

        self._printer.write_update("Parsing expectations ...")
        self._expectations = test_expectations.TestExpectations(
            self._port, test_names)

        tests_to_run, tests_to_skip = self._prepare_lists(paths, test_names)
        self._printer.print_found(len(test_names), len(tests_to_run),
                                  self._options.repeat_each,
                                  self._options.iterations)

        # Check to make sure we're not skipping every test.
        if not tests_to_run:
            _log.critical('No tests to run.')
            return test_run_results.RunDetails(
                exit_code=test_run_results.NO_TESTS_EXIT_STATUS)

        exit_code = self._set_up_run(tests_to_run)
        if exit_code:
            return test_run_results.RunDetails(exit_code=exit_code)

        # Don't retry failures if an explicit list of tests was passed in.
        if self._options.retry_failures is None:
            should_retry_failures = len(paths) < len(test_names)
        else:
            should_retry_failures = self._options.retry_failures

        enabled_pixel_tests_in_retry = False
        try:
            self._start_servers(tests_to_run)

            num_workers = self._port.num_workers(
                int(self._options.child_processes))

            initial_results = self._run_tests(tests_to_run, tests_to_skip,
                                              self._options.repeat_each,
                                              self._options.iterations,
                                              num_workers)

            # Don't retry failures when interrupted by user or failures limit exception.
            should_retry_failures = should_retry_failures and not (
                initial_results.interrupted
                or initial_results.keyboard_interrupted)

            tests_to_retry = self._tests_to_retry(initial_results)
            all_retry_results = []
            if should_retry_failures and tests_to_retry:
                enabled_pixel_tests_in_retry = self._force_pixel_tests_if_needed(
                )

                for retry_attempt in xrange(1, self._options.num_retries + 1):
                    if not tests_to_retry:
                        break

                    _log.info('')
                    _log.info(
                        'Retrying %s, attempt %d of %d...',
                        grammar.pluralize('unexpected failure',
                                          len(tests_to_retry)), retry_attempt,
                        self._options.num_retries)

                    retry_results = self._run_tests(
                        tests_to_retry,
                        tests_to_skip=set(),
                        repeat_each=1,
                        iterations=1,
                        num_workers=num_workers,
                        retry_attempt=retry_attempt)
                    all_retry_results.append(retry_results)

                    tests_to_retry = self._tests_to_retry(retry_results)

                if enabled_pixel_tests_in_retry:
                    self._options.pixel_tests = False
        finally:
            self._stop_servers()
            self._clean_up_run()

        # Some crash logs can take a long time to be written out so look
        # for new logs after the test run finishes.
        self._printer.write_update("looking for new crash logs")
        self._look_for_new_crash_logs(initial_results, start_time)
        for retry_attempt_results in all_retry_results:
            self._look_for_new_crash_logs(retry_attempt_results, start_time)

        _log.debug("summarizing results")
        summarized_full_results = test_run_results.summarize_results(
            self._port, self._expectations, initial_results, all_retry_results,
            enabled_pixel_tests_in_retry)
        summarized_failing_results = test_run_results.summarize_results(
            self._port,
            self._expectations,
            initial_results,
            all_retry_results,
            enabled_pixel_tests_in_retry,
            only_include_failing=True)

        exit_code = summarized_failing_results['num_regressions']
        if exit_code > test_run_results.MAX_FAILURES_EXIT_STATUS:
            _log.warning('num regressions (%d) exceeds max exit status (%d)',
                         exit_code, test_run_results.MAX_FAILURES_EXIT_STATUS)
            exit_code = test_run_results.MAX_FAILURES_EXIT_STATUS

        if not self._options.dry_run:
            self._write_json_files(summarized_full_results,
                                   summarized_failing_results, initial_results,
                                   running_all_tests)

            if self._options.write_full_results_to:
                self._filesystem.copyfile(
                    self._filesystem.join(self._results_directory,
                                          "full_results.json"),
                    self._options.write_full_results_to)

            self._upload_json_files()

            results_path = self._filesystem.join(self._results_directory,
                                                 "results.html")
            self._copy_results_html_file(results_path)
            if initial_results.keyboard_interrupted:
                exit_code = test_run_results.INTERRUPTED_EXIT_STATUS
            else:
                if initial_results.interrupted:
                    exit_code = test_run_results.EARLY_EXIT_STATUS
                if self._options.show_results and (
                        exit_code or (self._options.full_results_html
                                      and initial_results.total_failures)):
                    self._port.show_results_html_file(results_path)
                self._printer.print_results(time.time() - start_time,
                                            initial_results,
                                            summarized_failing_results)

        return test_run_results.RunDetails(exit_code, summarized_full_results,
                                           summarized_failing_results,
                                           initial_results, all_retry_results,
                                           enabled_pixel_tests_in_retry)
Example #30
0
    def run(self, args):
        """Runs the tests and return a RunDetails object with the results."""
        start_time = time.time()
        self._printer.write_update('Collecting tests ...')
        running_all_tests = False

        if not args or any('external' in path for path in args):
            self._printer.write_update(
                'Generating MANIFEST.json for web-platform-tests ...')
            WPTManifest.ensure_manifest(self._port.host)
            self._printer.write_update('Completed generating manifest.')

        self._printer.write_update('Collecting tests ...')
        try:
            paths, all_test_names, running_all_tests = self._collect_tests(
                args)
        except IOError:
            # This is raised if --test-list doesn't exist
            return test_run_results.RunDetails(
                exit_code=exit_codes.NO_TESTS_EXIT_STATUS)

        # Create a sorted list of test files so the subset chunk,
        # if used, contains alphabetically consecutive tests.
        if self._options.order == 'natural':
            all_test_names.sort(key=self._port.test_key)
        elif self._options.order == 'random':
            all_test_names.sort()
            random.Random(self._options.seed).shuffle(all_test_names)

        test_names, tests_in_other_chunks = self._finder.split_into_chunks(
            all_test_names)

        self._printer.write_update('Parsing expectations ...')
        self._expectations = test_expectations.TestExpectations(
            self._port, test_names)

        tests_to_run, tests_to_skip = self._prepare_lists(paths, test_names)

        self._expectations.remove_tests_from_expectations(
            tests_in_other_chunks)

        self._printer.print_found(len(all_test_names), len(test_names),
                                  len(tests_to_run), self._options.repeat_each,
                                  self._options.iterations)

        # Check to make sure we're not skipping every test.
        if not tests_to_run:
            _log.critical('No tests to run.')
            return test_run_results.RunDetails(
                exit_code=exit_codes.NO_TESTS_EXIT_STATUS)

        exit_code = self._set_up_run(tests_to_run)
        if exit_code:
            return test_run_results.RunDetails(exit_code=exit_code)

        # Don't retry failures if an explicit list of tests was passed in.
        if self._options.retry_failures is None:
            should_retry_failures = len(paths) < len(test_names)
        else:
            should_retry_failures = self._options.retry_failures

        try:
            self._start_servers(tests_to_run)
            if self._options.watch:
                run_results = self._run_test_loop(tests_to_run, tests_to_skip)
            else:
                run_results = self._run_test_once(tests_to_run, tests_to_skip,
                                                  should_retry_failures)
            initial_results, all_retry_results, enabled_pixel_tests_in_retry = run_results
        finally:
            self._stop_servers()
            self._clean_up_run()

        # Some crash logs can take a long time to be written out so look
        # for new logs after the test run finishes.
        self._printer.write_update('Looking for new crash logs ...')
        self._look_for_new_crash_logs(initial_results, start_time)
        for retry_attempt_results in all_retry_results:
            self._look_for_new_crash_logs(retry_attempt_results, start_time)

        self._printer.write_update('Summarizing results ...')
        summarized_full_results = test_run_results.summarize_results(
            self._port, self._expectations, initial_results, all_retry_results,
            enabled_pixel_tests_in_retry)
        summarized_failing_results = test_run_results.summarize_results(
            self._port,
            self._expectations,
            initial_results,
            all_retry_results,
            enabled_pixel_tests_in_retry,
            only_include_failing=True)

        exit_code = summarized_failing_results['num_regressions']
        if exit_code > exit_codes.MAX_FAILURES_EXIT_STATUS:
            _log.warning('num regressions (%d) exceeds max exit status (%d)',
                         exit_code, exit_codes.MAX_FAILURES_EXIT_STATUS)
            exit_code = exit_codes.MAX_FAILURES_EXIT_STATUS

        if not self._options.dry_run:
            self._write_json_files(summarized_full_results,
                                   summarized_failing_results, initial_results,
                                   running_all_tests)

            self._upload_json_files()

            self._copy_results_html_file(self._results_directory,
                                         'results.html')
            self._copy_results_html_file(self._results_directory,
                                         'legacy-results.html')
            if initial_results.keyboard_interrupted:
                exit_code = exit_codes.INTERRUPTED_EXIT_STATUS
            else:
                if initial_results.interrupted:
                    exit_code = exit_codes.EARLY_EXIT_STATUS
                if self._options.show_results and (
                        exit_code or initial_results.total_failures):
                    self._port.show_results_html_file(
                        self._filesystem.join(self._results_directory,
                                              'results.html'))
                self._printer.print_results(time.time() - start_time,
                                            initial_results)

        return test_run_results.RunDetails(exit_code, summarized_full_results,
                                           summarized_failing_results,
                                           initial_results, all_retry_results,
                                           enabled_pixel_tests_in_retry)