コード例 #1
0
    def _end_test_run(self, start_time, end_time, initial_results, retry_results, enabled_pixel_tests_in_retry):
        if initial_results is None:
            _log.error('No results generated')
            return test_run_results.RunDetails(exit_code=-1)

        # Some crash logs can take a long time to be written out so look
        # for new logs after the test run finishes.
        _log.debug("looking for new crash logs")
        self._look_for_new_crash_logs(initial_results, start_time)
        if retry_results:
            self._look_for_new_crash_logs(retry_results, start_time)

        _log.debug("summarizing results")
        summarized_results = test_run_results.summarize_results(self._port, self._expectations, initial_results, retry_results, enabled_pixel_tests_in_retry)
        self._printer.print_results(end_time - start_time, initial_results, summarized_results)

        exit_code = -1
        if not self._options.dry_run:
            self._port.print_leaks_summary()
            self._output_perf_metrics(end_time - start_time, initial_results)
            self._save_json_files(summarized_results, initial_results)

            results_path = self._filesystem.join(self._results_directory, "results.html")
            self._copy_results_html_file(results_path)
            if initial_results.keyboard_interrupted:
                exit_code = INTERRUPTED_EXIT_STATUS
            else:
                if self._options.show_results and (initial_results.unexpected_results_by_name or
                    (self._options.full_results_html and initial_results.total_failures)):
                    self._port.show_results_html_file(results_path)
                exit_code = self._port.exit_code_from_summarized_results(summarized_results)
        return test_run_results.RunDetails(exit_code, summarized_results, initial_results, retry_results, enabled_pixel_tests_in_retry)
コード例 #2
0
 def test_print_results(self):
     port = MockHost().port_factory.get('test', options=MockOptions(pixel_tests=False, world_leaks=False))
     printer, out = self.get_printer()
     initial_results = test_run_results_unittest.run_results(port)
     summary = test_run_results_unittest.summarized_results(port, expected=False, passing=True, flaky=False)
     details = test_run_results.RunDetails(summary['num_regressions'], summary, initial_results, None)
     printer.print_results(details)
     self.assertNotEmpty(out)
コード例 #3
0
 def test_print_results(self):
     port = MockHost().port_factory.get('test')
     printer, out = self.get_printer()
     initial_results = test_run_results_unittest.run_results(port)
     full_summary = test_run_results_unittest.summarized_results(port, expected=False, passing=True, flaky=False)
     failing_summary = test_run_results_unittest.summarized_results(port, expected=False, passing=True, flaky=False, only_include_failing=True)
     details = test_run_results.RunDetails(failing_summary['num_regressions'], full_summary, failing_summary, initial_results, None)
     printer.print_results(details)
     self.assertTrue(out.getvalue().find('but passed') != -1)
コード例 #4
0
    def print_expectations(self, args):
        aggregate_test_names = set()
        aggregate_tests_to_run = set()
        aggregate_tests_to_skip = set()
        tests_to_run_by_device = {}

        device_type_list = self._port.DEFAULT_DEVICE_TYPES or [self._port.DEVICE_TYPE]
        for device_type in device_type_list:
            """Run the tests and return a RunDetails object with the results."""
            for_device_type = 'for {} '.format(device_type) if device_type else ''
            self._printer.write_update('Collecting tests {}...'.format(for_device_type))
            try:
                paths, test_names = self._collect_tests(args, device_type=device_type)
            except IOError:
                # This is raised if --test-list doesn't exist
                return test_run_results.RunDetails(exit_code=-1)

            self._printer.write_update('Parsing expectations {}...'.format(for_device_type))
            self._expectations[device_type] = test_expectations.TestExpectations(self._port, test_names, force_expectations_pass=self._options.force, device_type=device_type)
            self._expectations[device_type].parse_all_expectations()

            aggregate_test_names.update(test_names)
            tests_to_run, tests_to_skip = self._prepare_lists(paths, test_names, device_type=device_type)
            aggregate_tests_to_skip.update(tests_to_skip)

            tests_to_run_by_device[device_type] = [test for test in tests_to_run if test not in aggregate_tests_to_run]
            aggregate_tests_to_run.update(tests_to_run)

        aggregate_tests_to_skip = aggregate_tests_to_skip - aggregate_tests_to_run

        self._printer.print_found(len(aggregate_test_names), len(aggregate_tests_to_run), self._options.repeat_each, self._options.iterations)
        test_col_width = len(max(aggregate_tests_to_run.union(aggregate_tests_to_skip), key=len)) + 1

        self._print_expectations_for_subset(device_type_list[0], test_col_width, tests_to_run_by_device[device_type_list[0]], aggregate_tests_to_skip)

        for device_type in device_type_list[1:]:
            self._print_expectations_for_subset(device_type, test_col_width, tests_to_run_by_device[device_type])

        return 0
コード例 #5
0
ファイル: manager.py プロジェクト: xiaoyanzheng/webkit
    def run(self, args):
        """Run the tests and return a RunDetails object with the results."""
        self._printer.write_update("Collecting tests ...")
        try:
            paths, test_names = self._collect_tests(args)
        except IOError:
            # This is raised if --test-list doesn't exist
            return test_run_results.RunDetails(exit_code=-1)

        self._printer.write_update("Parsing expectations ...")
        self._expectations = test_expectations.TestExpectations(
            self._port,
            test_names,
            force_expectations_pass=self._options.force)
        self._expectations.parse_all_expectations()

        tests_to_run, tests_to_skip = self._prepare_lists(paths, test_names)
        self._printer.print_found(len(test_names), len(tests_to_run),
                                  self._options.repeat_each,
                                  self._options.iterations)
        start_time = time.time()

        # Check to make sure we're not skipping every test.
        if not tests_to_run:
            _log.critical('No tests to run.')
            return test_run_results.RunDetails(exit_code=-1)

        if not self._set_up_run(tests_to_run):
            return test_run_results.RunDetails(exit_code=-1)

        enabled_pixel_tests_in_retry = False
        try:
            initial_results = self._run_tests(
                tests_to_run,
                tests_to_skip,
                self._options.repeat_each,
                self._options.iterations,
                int(self._options.child_processes),
                retrying=False)

            tests_to_retry = self._tests_to_retry(
                initial_results,
                include_crashes=self._port.should_retry_crashes())
            # Don't retry failures when interrupted by user or failures limit exception.
            retry_failures = self._options.retry_failures and not (
                initial_results.interrupted
                or initial_results.keyboard_interrupted)
            if retry_failures and tests_to_retry:
                enabled_pixel_tests_in_retry = self._force_pixel_tests_if_needed(
                )

                _log.info('')
                _log.info("Retrying %s ..." %
                          pluralize(len(tests_to_retry), "unexpected failure"))
                _log.info('')
                retry_results = self._run_tests(tests_to_retry,
                                                tests_to_skip=set(),
                                                repeat_each=1,
                                                iterations=1,
                                                num_workers=1,
                                                retrying=True)

                if enabled_pixel_tests_in_retry:
                    self._options.pixel_tests = False
            else:
                retry_results = None
        finally:
            self._clean_up_run()

        end_time = time.time()

        # Some crash logs can take a long time to be written out so look
        # for new logs after the test run finishes.
        _log.debug("looking for new crash logs")
        self._look_for_new_crash_logs(initial_results, start_time)
        if retry_results:
            self._look_for_new_crash_logs(retry_results, start_time)

        _log.debug("summarizing results")
        summarized_results = test_run_results.summarize_results(
            self._port, self._expectations, initial_results, retry_results,
            enabled_pixel_tests_in_retry)
        results_including_passes = None
        if self._options.results_server_host:
            results_including_passes = test_run_results.summarize_results(
                self._port,
                self._expectations,
                initial_results,
                retry_results,
                enabled_pixel_tests_in_retry,
                include_passes=True,
                include_time_and_modifiers=True)
        self._printer.print_results(end_time - start_time, initial_results,
                                    summarized_results)

        exit_code = -1
        if not self._options.dry_run:
            self._port.print_leaks_summary()
            self._upload_json_files(summarized_results, initial_results,
                                    results_including_passes, start_time,
                                    end_time)

            results_path = self._filesystem.join(self._results_directory,
                                                 "results.html")
            self._copy_results_html_file(results_path)
            if initial_results.keyboard_interrupted:
                exit_code = INTERRUPTED_EXIT_STATUS
            else:
                if self._options.show_results and (
                        initial_results.unexpected_results_by_name or
                    (self._options.full_results_html
                     and initial_results.total_failures)):
                    self._port.show_results_html_file(results_path)
                exit_code = self._port.exit_code_from_summarized_results(
                    summarized_results)
        return test_run_results.RunDetails(exit_code, summarized_results,
                                           initial_results, retry_results,
                                           enabled_pixel_tests_in_retry)
コード例 #6
0
ファイル: manager.py プロジェクト: subhanshuja/ofa
    def run(self, args):
        """Run the tests and return a RunDetails object with the results."""
        start_time = time.time()
        self._printer.write_update("Collecting tests ...")
        running_all_tests = False
        try:
            paths, test_names, running_all_tests = self._collect_tests(args)
        except IOError:
            # This is raised if --test-list doesn't exist
            return test_run_results.RunDetails(
                exit_code=test_run_results.NO_TESTS_EXIT_STATUS)

        self._printer.write_update("Parsing expectations ...")
        self._expectations = test_expectations.TestExpectations(
            self._port, test_names)

        tests_to_run, tests_to_skip = self._prepare_lists(paths, test_names)
        self._printer.print_found(len(test_names), len(tests_to_run),
                                  self._options.repeat_each,
                                  self._options.iterations)

        # Check to make sure we're not skipping every test.
        if not tests_to_run:
            _log.critical('No tests to run.')
            return test_run_results.RunDetails(
                exit_code=test_run_results.NO_TESTS_EXIT_STATUS)

        exit_code = self._set_up_run(tests_to_run)
        if exit_code:
            return test_run_results.RunDetails(exit_code=exit_code)

        # Don't retry failures if an explicit list of tests was passed in.
        if self._options.retry_failures is None:
            should_retry_failures = len(paths) < len(test_names)
        else:
            should_retry_failures = self._options.retry_failures

        enabled_pixel_tests_in_retry = False
        try:
            self._start_servers(tests_to_run)

            num_workers = self._port.num_workers(
                int(self._options.child_processes))

            initial_results = self._run_tests(tests_to_run, tests_to_skip,
                                              self._options.repeat_each,
                                              self._options.iterations,
                                              num_workers)

            # Don't retry failures when interrupted by user or failures limit exception.
            should_retry_failures = should_retry_failures and not (
                initial_results.interrupted
                or initial_results.keyboard_interrupted)

            tests_to_retry = self._tests_to_retry(initial_results)
            all_retry_results = []
            if should_retry_failures and tests_to_retry:
                enabled_pixel_tests_in_retry = self._force_pixel_tests_if_needed(
                )

                for retry_attempt in xrange(1, self._options.num_retries + 1):
                    if not tests_to_retry:
                        break

                    _log.info('')
                    _log.info(
                        'Retrying %s, attempt %d of %d...',
                        grammar.pluralize('unexpected failure',
                                          len(tests_to_retry)), retry_attempt,
                        self._options.num_retries)

                    retry_results = self._run_tests(
                        tests_to_retry,
                        tests_to_skip=set(),
                        repeat_each=1,
                        iterations=1,
                        num_workers=num_workers,
                        retry_attempt=retry_attempt)
                    all_retry_results.append(retry_results)

                    tests_to_retry = self._tests_to_retry(retry_results)

                if enabled_pixel_tests_in_retry:
                    self._options.pixel_tests = False
        finally:
            self._stop_servers()
            self._clean_up_run()

        # Some crash logs can take a long time to be written out so look
        # for new logs after the test run finishes.
        self._printer.write_update("looking for new crash logs")
        self._look_for_new_crash_logs(initial_results, start_time)
        for retry_attempt_results in all_retry_results:
            self._look_for_new_crash_logs(retry_attempt_results, start_time)

        _log.debug("summarizing results")
        summarized_full_results = test_run_results.summarize_results(
            self._port, self._expectations, initial_results, all_retry_results,
            enabled_pixel_tests_in_retry)
        summarized_failing_results = test_run_results.summarize_results(
            self._port,
            self._expectations,
            initial_results,
            all_retry_results,
            enabled_pixel_tests_in_retry,
            only_include_failing=True)

        exit_code = summarized_failing_results['num_regressions']
        if exit_code > test_run_results.MAX_FAILURES_EXIT_STATUS:
            _log.warning('num regressions (%d) exceeds max exit status (%d)',
                         exit_code, test_run_results.MAX_FAILURES_EXIT_STATUS)
            exit_code = test_run_results.MAX_FAILURES_EXIT_STATUS

        if not self._options.dry_run:
            self._write_json_files(summarized_full_results,
                                   summarized_failing_results, initial_results,
                                   running_all_tests)

            if self._options.write_full_results_to:
                self._filesystem.copyfile(
                    self._filesystem.join(self._results_directory,
                                          "full_results.json"),
                    self._options.write_full_results_to)

            self._upload_json_files()

            results_path = self._filesystem.join(self._results_directory,
                                                 "results.html")
            self._copy_results_html_file(results_path)
            if initial_results.keyboard_interrupted:
                exit_code = test_run_results.INTERRUPTED_EXIT_STATUS
            else:
                if initial_results.interrupted:
                    exit_code = test_run_results.EARLY_EXIT_STATUS
                if self._options.show_results and (
                        exit_code or (self._options.full_results_html
                                      and initial_results.total_failures)):
                    self._port.show_results_html_file(results_path)
                self._printer.print_results(time.time() - start_time,
                                            initial_results,
                                            summarized_failing_results)

        return test_run_results.RunDetails(exit_code, summarized_full_results,
                                           summarized_failing_results,
                                           initial_results, all_retry_results,
                                           enabled_pixel_tests_in_retry)
コード例 #7
0
    def run(self, args):
        """Runs the tests and return a RunDetails object with the results."""
        start_time = time.time()
        self._printer.write_update('Collecting tests ...')
        running_all_tests = False

        if not args or any('external' in path for path in args):
            self._printer.write_update(
                'Generating MANIFEST.json for web-platform-tests ...')
            WPTManifest.ensure_manifest(self._port.host)
            self._printer.write_update('Completed generating manifest.')

        self._printer.write_update('Collecting tests ...')
        try:
            paths, all_test_names, running_all_tests = self._collect_tests(
                args)
        except IOError:
            # This is raised if --test-list doesn't exist
            return test_run_results.RunDetails(
                exit_code=exit_codes.NO_TESTS_EXIT_STATUS)

        # Create a sorted list of test files so the subset chunk,
        # if used, contains alphabetically consecutive tests.
        if self._options.order == 'natural':
            all_test_names.sort(key=self._port.test_key)
        elif self._options.order == 'random':
            all_test_names.sort()
            random.Random(self._options.seed).shuffle(all_test_names)

        test_names, tests_in_other_chunks = self._finder.split_into_chunks(
            all_test_names)

        self._printer.write_update('Parsing expectations ...')
        self._expectations = test_expectations.TestExpectations(
            self._port, test_names)

        tests_to_run, tests_to_skip = self._prepare_lists(paths, test_names)

        self._expectations.remove_tests_from_expectations(
            tests_in_other_chunks)

        self._printer.print_found(len(all_test_names), len(test_names),
                                  len(tests_to_run), self._options.repeat_each,
                                  self._options.iterations)

        # Check to make sure we're not skipping every test.
        if not tests_to_run:
            _log.critical('No tests to run.')
            return test_run_results.RunDetails(
                exit_code=exit_codes.NO_TESTS_EXIT_STATUS)

        exit_code = self._set_up_run(tests_to_run)
        if exit_code:
            return test_run_results.RunDetails(exit_code=exit_code)

        # Don't retry failures if an explicit list of tests was passed in.
        if self._options.retry_failures is None:
            should_retry_failures = len(paths) < len(test_names)
        else:
            should_retry_failures = self._options.retry_failures

        try:
            self._start_servers(tests_to_run)
            if self._options.watch:
                run_results = self._run_test_loop(tests_to_run, tests_to_skip)
            else:
                run_results = self._run_test_once(tests_to_run, tests_to_skip,
                                                  should_retry_failures)
            initial_results, all_retry_results, enabled_pixel_tests_in_retry = run_results
        finally:
            self._stop_servers()
            self._clean_up_run()

        # Some crash logs can take a long time to be written out so look
        # for new logs after the test run finishes.
        self._printer.write_update('Looking for new crash logs ...')
        self._look_for_new_crash_logs(initial_results, start_time)
        for retry_attempt_results in all_retry_results:
            self._look_for_new_crash_logs(retry_attempt_results, start_time)

        self._printer.write_update('Summarizing results ...')
        summarized_full_results = test_run_results.summarize_results(
            self._port, self._expectations, initial_results, all_retry_results,
            enabled_pixel_tests_in_retry)
        summarized_failing_results = test_run_results.summarize_results(
            self._port,
            self._expectations,
            initial_results,
            all_retry_results,
            enabled_pixel_tests_in_retry,
            only_include_failing=True)

        exit_code = summarized_failing_results['num_regressions']
        if exit_code > exit_codes.MAX_FAILURES_EXIT_STATUS:
            _log.warning('num regressions (%d) exceeds max exit status (%d)',
                         exit_code, exit_codes.MAX_FAILURES_EXIT_STATUS)
            exit_code = exit_codes.MAX_FAILURES_EXIT_STATUS

        if not self._options.dry_run:
            self._write_json_files(summarized_full_results,
                                   summarized_failing_results, initial_results,
                                   running_all_tests)

            self._upload_json_files()

            self._copy_results_html_file(self._results_directory,
                                         'results.html')
            self._copy_results_html_file(self._results_directory,
                                         'legacy-results.html')
            if initial_results.keyboard_interrupted:
                exit_code = exit_codes.INTERRUPTED_EXIT_STATUS
            else:
                if initial_results.interrupted:
                    exit_code = exit_codes.EARLY_EXIT_STATUS
                if self._options.show_results and (
                        exit_code or initial_results.total_failures):
                    self._port.show_results_html_file(
                        self._filesystem.join(self._results_directory,
                                              'results.html'))
                self._printer.print_results(time.time() - start_time,
                                            initial_results)

        return test_run_results.RunDetails(exit_code, summarized_full_results,
                                           summarized_failing_results,
                                           initial_results, all_retry_results,
                                           enabled_pixel_tests_in_retry)
コード例 #8
0
ファイル: manager.py プロジェクト: VortrexFTW/webkit
    def run(self, args):
        """Run the tests and return a RunDetails object with the results."""
        self._printer.write_update("Collecting tests ...")
        try:
            paths, test_names = self._collect_tests(args)
        except IOError:
            # This is raised if --test-list doesn't exist
            return test_run_results.RunDetails(exit_code=-1)

        self._printer.write_update("Parsing expectations ...")
        self._expectations = test_expectations.TestExpectations(self._port, test_names, force_expectations_pass=self._options.force)
        self._expectations.parse_all_expectations()

        tests_to_run, tests_to_skip = self._prepare_lists(paths, test_names)
        self._printer.print_found(len(test_names), len(tests_to_run), self._options.repeat_each, self._options.iterations)
        start_time = time.time()

        # Check to make sure we're not skipping every test.
        if not tests_to_run:
            _log.critical('No tests to run.')
            return test_run_results.RunDetails(exit_code=-1)

        default_device_tests = []

        # Look for tests with custom device requirements.
        custom_device_tests = defaultdict(list)
        for test_file in tests_to_run:
            custom_device = self._custom_device_for_test(test_file)
            if custom_device:
                custom_device_tests[custom_device].append(test_file)
            else:
                default_device_tests.append(test_file)

        if custom_device_tests:
            for device_class in custom_device_tests:
                _log.debug('{} tests use device {}'.format(len(custom_device_tests[device_class]), device_class))

        initial_results = None
        retry_results = None
        enabled_pixel_tests_in_retry = False

        needs_http = any((self._is_http_test(test) and not self._needs_web_platform_test(test)) for test in tests_to_run)
        needs_web_platform_test_server = any(self._needs_web_platform_test(test) for test in tests_to_run)
        needs_websockets = any(self._is_websocket_test(test) for test in tests_to_run)
        self._runner = LayoutTestRunner(self._options, self._port, self._printer, self._results_directory, self._test_is_slow,
                                        needs_http=needs_http, needs_web_platform_test_server=needs_web_platform_test_server, needs_websockets=needs_websockets)

        self._printer.write_update("Checking build ...")
        if not self._port.check_build():
            _log.error("Build check failed")
            return test_run_results.RunDetails(exit_code=-1)

        if self._options.clobber_old_results:
            self._clobber_old_results()

        # Create the output directory if it doesn't already exist.
        self._port.host.filesystem.maybe_make_directory(self._results_directory)

        if default_device_tests:
            _log.info('')
            _log.info("Running %s", pluralize(len(tests_to_run), "test"))
            _log.info('')
            if not self._set_up_run(tests_to_run):
                return test_run_results.RunDetails(exit_code=-1)

            initial_results, retry_results, enabled_pixel_tests_in_retry = self._run_test_subset(default_device_tests, tests_to_skip)

        # Only use a single worker for custom device classes
        self._options.child_processes = 1
        for device_class in custom_device_tests:
            device_tests = custom_device_tests[device_class]
            if device_tests:
                _log.info('')
                _log.info('Running %s for %s', pluralize(len(device_tests), "test"), device_class)
                _log.info('')
                if not self._set_up_run(device_tests, device_class):
                    return test_run_results.RunDetails(exit_code=-1)

                device_initial_results, device_retry_results, device_enabled_pixel_tests_in_retry = self._run_test_subset(device_tests, tests_to_skip)

                initial_results = initial_results.merge(device_initial_results) if initial_results else device_initial_results
                retry_results = retry_results.merge(device_retry_results) if retry_results else device_retry_results
                enabled_pixel_tests_in_retry |= device_enabled_pixel_tests_in_retry

        self._runner.stop_servers()
        end_time = time.time()
        return self._end_test_run(start_time, end_time, initial_results, retry_results, enabled_pixel_tests_in_retry)
コード例 #9
0
    def run(self, args):
        """Run the tests and return a RunDetails object with the results."""
        self._printer.write_update("Collecting tests ...")
        try:
            paths, test_names = self._collect_tests(args)
        except IOError:
            # This is raised if --test-list doesn't exist
            return test_run_results.RunDetails(exit_code=-1)

        self._printer.write_update("Parsing expectations ...")
        self._expectations = test_expectations.TestExpectations(
            self._port,
            test_names,
            force_expectations_pass=self._options.force)
        self._expectations.parse_all_expectations()

        tests_to_run, tests_to_skip = self._prepare_lists(paths, test_names)
        self._printer.print_found(len(test_names), len(tests_to_run),
                                  self._options.repeat_each,
                                  self._options.iterations)
        start_time = time.time()

        # Check to make sure we're not skipping every test.
        if not tests_to_run:
            _log.critical('No tests to run.')
            return test_run_results.RunDetails(exit_code=-1)

        default_device_tests = []

        # Look for tests with custom device requirements.
        custom_device_tests = defaultdict(list)
        for test_file in tests_to_run:
            custom_device = self._custom_device_for_test(test_file)
            if custom_device:
                custom_device_tests[custom_device].append(test_file)
            else:
                default_device_tests.append(test_file)

        if custom_device_tests:
            for device_class in custom_device_tests:
                _log.debug('{} tests use device {}'.format(
                    len(custom_device_tests[device_class]), device_class))

        initial_results = None
        retry_results = None
        enabled_pixel_tests_in_retry = False

        if default_device_tests:
            _log.info('')
            _log.info("Running %s", pluralize(len(tests_to_run), "test"))
            _log.info('')
            if not self._set_up_run(tests_to_run):
                return test_run_results.RunDetails(exit_code=-1)

            initial_results, retry_results, enabled_pixel_tests_in_retry = self._run_test_subset(
                default_device_tests, tests_to_skip)

        for device_class in custom_device_tests:
            device_tests = custom_device_tests[device_class]
            if device_tests:
                _log.info('')
                _log.info('Running %s for %s',
                          pluralize(len(device_tests), "test"), device_class)
                _log.info('')
                if not self._set_up_run(device_tests, device_class):
                    return test_run_results.RunDetails(exit_code=-1)

                device_initial_results, device_retry_results, device_enabled_pixel_tests_in_retry = self._run_test_subset(
                    device_tests, tests_to_skip)

                initial_results = initial_results.merge(
                    device_initial_results
                ) if initial_results else device_initial_results
                retry_results = retry_results.merge(
                    device_retry_results
                ) if retry_results else device_retry_results
                enabled_pixel_tests_in_retry |= device_enabled_pixel_tests_in_retry

        end_time = time.time()
        return self._end_test_run(start_time, end_time, initial_results,
                                  retry_results, enabled_pixel_tests_in_retry)
コード例 #10
0
    def run(self, args):
        num_failed_uploads = 0

        device_type_list = self._port.supported_device_types()
        try:
            tests_to_run_by_device, aggregate_tests_to_skip = self._collect_tests(args, device_type_list)
        except IOError:
            # This is raised if --test-list doesn't exist
            return test_run_results.RunDetails(exit_code=-1)

        aggregate_tests_to_run = set()  # type: Set[Test]
        for v in tests_to_run_by_device.values():
            aggregate_tests_to_run.update(v)

        skipped_tests_by_path = defaultdict(set)
        for test in aggregate_tests_to_skip:
            skipped_tests_by_path[test.test_path].add(test)

        # If a test is marked skipped, but was explicitly requested, run it anyways
        if self._options.skipped != 'always':
            for arg in args:
                if arg in skipped_tests_by_path:
                    tests = skipped_tests_by_path[arg]
                    tests_to_run_by_device[device_type_list[0]].extend(tests)
                    aggregate_tests_to_run |= tests
                    aggregate_tests_to_skip -= tests
                    del skipped_tests_by_path[arg]

        aggregate_tests = aggregate_tests_to_run | aggregate_tests_to_skip

        self._printer.print_found(len(aggregate_tests),
                                  len(aggregate_tests_to_run),
                                  self._options.repeat_each,
                                  self._options.iterations)
        start_time = time.time()

        # Check to see if all tests we are running are skipped.
        if aggregate_tests == aggregate_tests_to_skip:
            # XXX: this is currently identical to the follow if, which likely isn't intended
            _log.error("All tests skipped.")
            return test_run_results.RunDetails(exit_code=0, skipped_all_tests=True)

        # Check to make sure we have no tests to run that are not skipped.
        if not aggregate_tests_to_run:
            _log.critical('No tests to run.')
            return test_run_results.RunDetails(exit_code=-1)

        self._printer.write_update("Checking build ...")
        if not self._port.check_build():
            _log.error("Build check failed")
            return test_run_results.RunDetails(exit_code=-1)

        if self._options.clobber_old_results:
            self._clobber_old_results()

        # Create the output directory if it doesn't already exist.
        self._port.host.filesystem.maybe_make_directory(self._results_directory)

        needs_http = any(test.needs_http_server for tests in itervalues(tests_to_run_by_device) for test in tests)
        needs_web_platform_test_server = any(test.needs_wpt_server for tests in itervalues(tests_to_run_by_device) for test in tests)
        needs_websockets = any(test.needs_websocket_server for tests in itervalues(tests_to_run_by_device) for test in tests)
        self._runner = LayoutTestRunner(self._options, self._port, self._printer, self._results_directory,
                                        needs_http=needs_http, needs_web_platform_test_server=needs_web_platform_test_server, needs_websockets=needs_websockets)

        initial_results = None
        retry_results = None
        enabled_pixel_tests_in_retry = False

        max_child_processes_for_run = 1
        child_processes_option_value = self._options.child_processes
        uploads = []

        for device_type in device_type_list:
            self._options.child_processes = min(self._port.max_child_processes(device_type=device_type), int(child_processes_option_value or self._port.default_child_processes(device_type=device_type)))

            _log.info('')
            if not self._options.child_processes:
                _log.info('Skipping {} because {} is not available'.format(pluralize(len(tests_to_run_by_device[device_type]), 'test'), str(device_type)))
                _log.info('')
                continue

            max_child_processes_for_run = max(self._options.child_processes, max_child_processes_for_run)

            self._printer.print_baseline_search_path(device_type=device_type)

            _log.info(u'Running {}{}'.format(pluralize(len(tests_to_run_by_device[device_type]), 'test'), u' for {}'.format(device_type) if device_type else ''))
            _log.info('')
            start_time_for_device = time.time()
            if not tests_to_run_by_device[device_type]:
                continue

            test_inputs = [self._test_input_for_file(test, device_type=device_type)
                           for test in tests_to_run_by_device[device_type]]

            if not self._set_up_run(test_inputs, device_type=device_type):
                return test_run_results.RunDetails(exit_code=-1)

            configuration = self._port.configuration_for_upload(self._port.target_host(0))
            if not configuration.get('flavor', None):  # The --result-report-flavor argument should override wk1/wk2
                configuration['flavor'] = 'wk2' if self._options.webkit_test_runner else 'wk1'
            temp_initial_results, temp_retry_results, temp_enabled_pixel_tests_in_retry = self._run_test_subset(test_inputs, device_type=device_type)

            skipped_results = TestRunResults(self._expectations[device_type], len(aggregate_tests_to_skip))
            for skipped_test in set(aggregate_tests_to_skip):
                skipped_result = test_results.TestResult(skipped_test.test_path)
                skipped_result.type = test_expectations.SKIP
                skipped_results.add(skipped_result, expected=True)
            temp_initial_results = temp_initial_results.merge(skipped_results)

            if self._options.report_urls:
                self._printer.writeln('\n')
                self._printer.write_update('Preparing upload data ...')

                upload = Upload(
                    suite=self._options.suite or 'layout-tests',
                    configuration=configuration,
                    details=Upload.create_details(options=self._options),
                    commits=self._port.commits_for_upload(),
                    timestamp=start_time,
                    run_stats=Upload.create_run_stats(
                        start_time=start_time_for_device,
                        end_time=time.time(),
                        tests_skipped=temp_initial_results.remaining + temp_initial_results.expected_skips,
                    ),
                    results=self._results_to_upload_json_trie(self._expectations[device_type], temp_initial_results),
                )
                for hostname in self._options.report_urls:
                    self._printer.write_update('Uploading to {} ...'.format(hostname))
                    if not upload.upload(hostname, log_line_func=self._printer.writeln):
                        num_failed_uploads += 1
                    else:
                        uploads.append(upload)
                self._printer.writeln('Uploads completed!')

            initial_results = initial_results.merge(temp_initial_results) if initial_results else temp_initial_results
            retry_results = retry_results.merge(temp_retry_results) if retry_results else temp_retry_results
            enabled_pixel_tests_in_retry |= temp_enabled_pixel_tests_in_retry

            if (initial_results and (initial_results.interrupted or initial_results.keyboard_interrupted)) or \
                    (retry_results and (retry_results.interrupted or retry_results.keyboard_interrupted)):
                break

        # Used for final logging, max_child_processes_for_run is most relevant here.
        self._options.child_processes = max_child_processes_for_run

        self._runner.stop_servers()

        end_time = time.time()
        result = self._end_test_run(start_time, end_time, initial_results, retry_results, enabled_pixel_tests_in_retry)

        if self._options.report_urls and uploads:
            self._printer.writeln('\n')
            self._printer.write_update('Preparing to upload test archive ...')

            with self._filesystem.mkdtemp() as temp:
                archive = self._filesystem.join(temp, 'test-archive')
                shutil.make_archive(archive, 'zip', self._results_directory)

                for upload in uploads:
                    for hostname in self._options.report_urls:
                        self._printer.write_update('Uploading archive to {} ...'.format(hostname))
                        if not upload.upload_archive(hostname, self._filesystem.open_binary_file_for_reading(archive + '.zip'), log_line_func=self._printer.writeln):
                            num_failed_uploads += 1

        if num_failed_uploads:
            result.exit_code = -1
        return result
コード例 #11
0
    def run(self, args):
        num_failed_uploads = 0
        total_tests = set()
        aggregate_test_names = set()
        aggregate_tests = set()
        tests_to_run_by_device = {}

        device_type_list = self._port.supported_device_types()
        for device_type in device_type_list:
            """Run the tests and return a RunDetails object with the results."""
            for_device_type = u'for {} '.format(device_type) if device_type else ''
            self._printer.write_update(u'Collecting tests {}...'.format(for_device_type))
            try:
                paths, test_names = self._collect_tests(args, device_type=device_type)
            except IOError:
                # This is raised if --test-list doesn't exist
                return test_run_results.RunDetails(exit_code=-1)

            self._printer.write_update(u'Parsing expectations {}...'.format(for_device_type))
            self._expectations[device_type] = test_expectations.TestExpectations(self._port, test_names, force_expectations_pass=self._options.force, device_type=device_type)
            self._expectations[device_type].parse_all_expectations()

            aggregate_test_names.update(test_names)
            tests_to_run, tests_to_skip = self._prepare_lists(paths, test_names, device_type=device_type)

            total_tests.update(tests_to_run)
            total_tests.update(tests_to_skip)

            tests_to_run_by_device[device_type] = [test for test in tests_to_run if test not in aggregate_tests]
            aggregate_tests.update(tests_to_run)

        # If a test is marked skipped, but was explicitly requested, run it anyways
        if self._options.skipped != 'always':
            for arg in args:
                if arg in total_tests and arg not in aggregate_tests:
                    tests_to_run_by_device[device_type_list[0]].append(arg)
                    aggregate_tests.add(arg)

        tests_to_skip = total_tests - aggregate_tests
        self._printer.print_found(len(aggregate_test_names), len(aggregate_tests), self._options.repeat_each, self._options.iterations)
        start_time = time.time()

        # Check to make sure we're not skipping every test.
        if not sum([len(tests) for tests in itervalues(tests_to_run_by_device)]):
            _log.critical('No tests to run.')
            return test_run_results.RunDetails(exit_code=-1)

        needs_http = any((self._is_http_test(test) and not self._needs_web_platform_test(test)) for tests in itervalues(tests_to_run_by_device) for test in tests)
        needs_web_platform_test_server = any(self._needs_web_platform_test(test) for tests in itervalues(tests_to_run_by_device) for test in tests)
        needs_websockets = any(self._is_websocket_test(test) for tests in itervalues(tests_to_run_by_device) for test in tests)
        self._runner = LayoutTestRunner(self._options, self._port, self._printer, self._results_directory, self._test_is_slow,
                                        needs_http=needs_http, needs_web_platform_test_server=needs_web_platform_test_server, needs_websockets=needs_websockets)

        self._printer.write_update("Checking build ...")
        if not self._port.check_build():
            _log.error("Build check failed")
            return test_run_results.RunDetails(exit_code=-1)

        if self._options.clobber_old_results:
            self._clobber_old_results()

        # Create the output directory if it doesn't already exist.
        self._port.host.filesystem.maybe_make_directory(self._results_directory)

        initial_results = None
        retry_results = None
        enabled_pixel_tests_in_retry = False

        max_child_processes_for_run = 1
        child_processes_option_value = self._options.child_processes
        uploads = []

        for device_type in device_type_list:
            self._runner._test_is_slow = lambda test_file: self._test_is_slow(test_file, device_type=device_type)
            self._options.child_processes = min(self._port.max_child_processes(device_type=device_type), int(child_processes_option_value or self._port.default_child_processes(device_type=device_type)))

            _log.info('')
            if not self._options.child_processes:
                _log.info('Skipping {} because {} is not available'.format(pluralize(len(tests_to_run_by_device[device_type]), 'test'), str(device_type)))
                _log.info('')
                continue

            max_child_processes_for_run = max(self._options.child_processes, max_child_processes_for_run)

            self._printer.print_baseline_search_path(device_type=device_type)

            _log.info(u'Running {}{}'.format(pluralize(len(tests_to_run_by_device[device_type]), 'test'), u' for {}'.format(device_type) if device_type else ''))
            _log.info('')
            start_time_for_device = time.time()
            if not tests_to_run_by_device[device_type]:
                continue
            if not self._set_up_run(tests_to_run_by_device[device_type], device_type=device_type):
                return test_run_results.RunDetails(exit_code=-1)

            configuration = self._port.configuration_for_upload(self._port.target_host(0))
            if not configuration.get('flavor', None):  # The --result-report-flavor argument should override wk1/wk2
                configuration['flavor'] = 'wk2' if self._options.webkit_test_runner else 'wk1'
            temp_initial_results, temp_retry_results, temp_enabled_pixel_tests_in_retry = self._run_test_subset(tests_to_run_by_device[device_type], tests_to_skip, device_type=device_type)

            if self._options.report_urls:
                self._printer.writeln('\n')
                self._printer.write_update('Preparing upload data ...')

                upload = Upload(
                    suite='layout-tests',
                    configuration=configuration,
                    details=Upload.create_details(options=self._options),
                    commits=self._port.commits_for_upload(),
                    timestamp=start_time,
                    run_stats=Upload.create_run_stats(
                        start_time=start_time_for_device,
                        end_time=time.time(),
                        tests_skipped=temp_initial_results.remaining + temp_initial_results.expected_skips,
                    ),
                    results=self._results_to_upload_json_trie(self._expectations[device_type], temp_initial_results),
                )
                for hostname in self._options.report_urls:
                    self._printer.write_update('Uploading to {} ...'.format(hostname))
                    if not upload.upload(hostname, log_line_func=self._printer.writeln):
                        num_failed_uploads += 1
                    else:
                        uploads.append(upload)
                self._printer.writeln('Uploads completed!')

            initial_results = initial_results.merge(temp_initial_results) if initial_results else temp_initial_results
            retry_results = retry_results.merge(temp_retry_results) if retry_results else temp_retry_results
            enabled_pixel_tests_in_retry |= temp_enabled_pixel_tests_in_retry

            if (initial_results and (initial_results.interrupted or initial_results.keyboard_interrupted)) or \
                    (retry_results and (retry_results.interrupted or retry_results.keyboard_interrupted)):
                break

        # Used for final logging, max_child_processes_for_run is most relevant here.
        self._options.child_processes = max_child_processes_for_run

        self._runner.stop_servers()

        end_time = time.time()
        result = self._end_test_run(start_time, end_time, initial_results, retry_results, enabled_pixel_tests_in_retry)

        if self._options.report_urls and uploads:
            self._printer.writeln('\n')
            self._printer.write_update('Preparing to upload test archive ...')

            with self._filesystem.mkdtemp() as temp:
                archive = self._filesystem.join(temp, 'test-archive')
                shutil.make_archive(archive, 'zip', self._results_directory)

                for upload in uploads:
                    for hostname in self._options.report_urls:
                        self._printer.write_update('Uploading archive to {} ...'.format(hostname))
                        if not upload.upload_archive(hostname, self._filesystem.open_binary_file_for_reading(archive + '.zip'), log_line_func=self._printer.writeln):
                            num_failed_uploads += 1

        if num_failed_uploads:
            result.exit_code = -1
        return result
コード例 #12
0
    def run(self, args):
        """Run the tests and return a RunDetails object with the results."""
        self._printer.write_update("Collecting tests ...")
        try:
            paths, test_names = self._collect_tests(args)
        except IOError:
            # This is raised if --test-list doesn't exist
            return test_run_results.RunDetails(exit_code=-1)

        self._printer.write_update("Parsing expectations ...")
        self._expectations = test_expectations.TestExpectations(self._port, test_names)

        tests_to_run, tests_to_skip = self._prepare_lists(paths, test_names)
        self._printer.print_found(len(test_names), len(tests_to_run), self._options.repeat_each, self._options.iterations)

        # Check to make sure we're not skipping every test.
        if not tests_to_run:
            _log.critical('No tests to run.')
            return test_run_results.RunDetails(exit_code=-1)

        if not self._set_up_run(tests_to_run):
            return test_run_results.RunDetails(exit_code=-1)

        # Don't retry failures if an explicit list of tests was passed in.
        if self._options.retry_failures is None:
            should_retry_failures = len(paths) < len(test_names)
        else:
            should_retry_failures = self._options.retry_failures

        start_time = time.time()
        enabled_pixel_tests_in_retry = False
        try:
            initial_results = self._run_tests(tests_to_run, tests_to_skip, self._options.repeat_each, self._options.iterations,
                int(self._options.child_processes), retrying=False)

            tests_to_retry = self._tests_to_retry(initial_results, include_crashes=self._port.should_retry_crashes())
            if should_retry_failures and tests_to_retry and not initial_results.interrupted:
                enabled_pixel_tests_in_retry = self._force_pixel_tests_if_needed()

                _log.info('')
                _log.info("Retrying %d unexpected failure(s) ..." % len(tests_to_retry))
                _log.info('')
                retry_results = self._run_tests(tests_to_retry, tests_to_skip=set(), repeat_each=1, iterations=1,
                    num_workers=1, retrying=True)

                if enabled_pixel_tests_in_retry:
                    self._options.pixel_tests = False
            else:
                retry_results = None
        finally:
            self._clean_up_run()

        end_time = time.time()

        # Some crash logs can take a long time to be written out so look
        # for new logs after the test run finishes.
        _log.debug("looking for new crash logs")
        self._look_for_new_crash_logs(initial_results, start_time)
        if retry_results:
            self._look_for_new_crash_logs(retry_results, start_time)

        _log.debug("summarizing results")
        summarized_full_results = test_run_results.summarize_results(self._port, self._expectations, initial_results, retry_results, enabled_pixel_tests_in_retry)
        summarized_failing_results = test_run_results.summarize_results(self._port, self._expectations, initial_results, retry_results, enabled_pixel_tests_in_retry, only_include_failing=True)
        self._printer.print_results(end_time - start_time, initial_results, summarized_failing_results)

        exit_code = self._port.exit_code_from_summarized_results(summarized_failing_results)
        if not self._options.dry_run:
            self._write_json_files(summarized_full_results, summarized_failing_results, initial_results)
            self._upload_json_files()

            results_path = self._filesystem.join(self._results_directory, "results.html")
            self._copy_results_html_file(results_path)
            if self._options.show_results and (exit_code or (self._options.full_results_html and initial_results.total_failures)):
                self._port.show_results_html_file(results_path)

        return test_run_results.RunDetails(exit_code, summarized_full_results, summarized_failing_results, initial_results, retry_results, enabled_pixel_tests_in_retry)
コード例 #13
0
ファイル: manager.py プロジェクト: hxdwdfang/webkit
    def run(self, args):
        total_tests = set()
        aggregate_test_names = set()
        aggregate_tests = set()
        tests_to_run_by_device = {}

        device_type_list = self._port.supported_device_types()
        for device_type in device_type_list:
            """Run the tests and return a RunDetails object with the results."""
            for_device_type = 'for {} '.format(
                device_type) if device_type else ''
            self._printer.write_update(
                'Collecting tests {}...'.format(for_device_type))
            try:
                paths, test_names = self._collect_tests(
                    args, device_type=device_type)
            except IOError:
                # This is raised if --test-list doesn't exist
                return test_run_results.RunDetails(exit_code=-1)

            self._printer.write_update(
                'Parsing expectations {}...'.format(for_device_type))
            self._expectations[
                device_type] = test_expectations.TestExpectations(
                    self._port,
                    test_names,
                    force_expectations_pass=self._options.force,
                    device_type=device_type)
            self._expectations[device_type].parse_all_expectations()

            aggregate_test_names.update(test_names)
            tests_to_run, tests_to_skip = self._prepare_lists(
                paths, test_names, device_type=device_type)

            total_tests.update(tests_to_run)
            total_tests.update(tests_to_skip)

            tests_to_run_by_device[device_type] = [
                test for test in tests_to_run if test not in aggregate_tests
            ]
            aggregate_tests.update(tests_to_run)

        tests_to_skip = total_tests - aggregate_tests
        self._printer.print_found(len(aggregate_test_names),
                                  len(aggregate_tests),
                                  self._options.repeat_each,
                                  self._options.iterations)
        start_time = time.time()

        # Check to make sure we're not skipping every test.
        if not sum(
            [len(tests) for tests in tests_to_run_by_device.itervalues()]):
            _log.critical('No tests to run.')
            return test_run_results.RunDetails(exit_code=-1)

        needs_http = any((self._is_http_test(test)
                          and not self._needs_web_platform_test(test))
                         for tests in tests_to_run_by_device.itervalues()
                         for test in tests)
        needs_web_platform_test_server = any(
            self._needs_web_platform_test(test)
            for tests in tests_to_run_by_device.itervalues() for test in tests)
        needs_websockets = any(
            self._is_websocket_test(test)
            for tests in tests_to_run_by_device.itervalues() for test in tests)
        self._runner = LayoutTestRunner(
            self._options,
            self._port,
            self._printer,
            self._results_directory,
            self._test_is_slow,
            needs_http=needs_http,
            needs_web_platform_test_server=needs_web_platform_test_server,
            needs_websockets=needs_websockets)

        self._printer.write_update("Checking build ...")
        if not self._port.check_build():
            _log.error("Build check failed")
            return test_run_results.RunDetails(exit_code=-1)

        if self._options.clobber_old_results:
            self._clobber_old_results()

        # Create the output directory if it doesn't already exist.
        self._port.host.filesystem.maybe_make_directory(
            self._results_directory)

        initial_results = None
        retry_results = None
        enabled_pixel_tests_in_retry = False

        max_child_processes_for_run = 1
        child_processes_option_value = self._options.child_processes

        for device_type in device_type_list:
            self._runner._test_is_slow = lambda test_file: self._test_is_slow(
                test_file, device_type=device_type)
            self._options.child_processes = min(
                self._port.max_child_processes(device_type=device_type),
                int(child_processes_option_value
                    or self._port.default_child_processes(
                        device_type=device_type)))

            _log.info('')
            if not self._options.child_processes:
                _log.info('Skipping {} because {} is not available'.format(
                    pluralize(len(tests_to_run_by_device[device_type]),
                              'test'), str(device_type)))
                _log.info('')
                continue

            max_child_processes_for_run = max(self._options.child_processes,
                                              max_child_processes_for_run)

            self._printer.print_baseline_search_path(device_type=device_type)

            _log.info('Running {}{}'.format(
                pluralize(len(tests_to_run_by_device[device_type]), 'test'),
                ' for {}'.format(str(device_type)) if device_type else ''))
            _log.info('')
            if not tests_to_run_by_device[device_type]:
                continue
            if not self._set_up_run(tests_to_run_by_device[device_type],
                                    device_type=device_type):
                return test_run_results.RunDetails(exit_code=-1)

            temp_initial_results, temp_retry_results, temp_enabled_pixel_tests_in_retry = self._run_test_subset(
                tests_to_run_by_device[device_type],
                tests_to_skip,
                device_type=device_type)
            initial_results = initial_results.merge(
                temp_initial_results
            ) if initial_results else temp_initial_results
            retry_results = retry_results.merge(
                temp_retry_results) if retry_results else temp_retry_results
            enabled_pixel_tests_in_retry |= temp_enabled_pixel_tests_in_retry

        # Used for final logging, max_child_processes_for_run is most relevant here.
        self._options.child_processes = max_child_processes_for_run

        self._runner.stop_servers()

        end_time = time.time()
        return self._end_test_run(start_time, end_time, initial_results,
                                  retry_results, enabled_pixel_tests_in_retry)
コード例 #14
0
ファイル: manager.py プロジェクト: JanXu-Dev/webkit
    def run(self, args):
        """Run the tests and return a RunDetails object with the results."""
        self._printer.write_update("Collecting tests ...")
        try:
            paths, test_names = self._collect_tests(args)
        except IOError:
            # This is raised if --test-list doesn't exist
            return test_run_results.RunDetails(exit_code=-1)

        self._printer.write_update("Parsing expectations ...")
        self._expectations = test_expectations.TestExpectations(
            self._port,
            test_names,
            force_expectations_pass=self._options.force)
        self._expectations.parse_all_expectations()

        tests_to_run, tests_to_skip = self._prepare_lists(paths, test_names)
        self._printer.print_found(len(test_names), len(tests_to_run),
                                  self._options.repeat_each,
                                  self._options.iterations)
        start_time = time.time()

        # Check to make sure we're not skipping every test.
        if not tests_to_run:
            _log.critical('No tests to run.')
            return test_run_results.RunDetails(exit_code=-1)

        # Look for tests with custom device requirements.
        test_device_mapping = defaultdict(list)
        for test_file in tests_to_run:
            test_device_mapping[self._custom_device_for_test(test_file)
                                or self._port.DEFAULT_DEVICE_TYPE].append(
                                    test_file)

        # Order device types from most specific to least specific in the hopes that some of the more specific device
        # types will match the less specific device types.
        device_type_order = []
        types_with_family = []
        remaining_types = []
        for device_type in test_device_mapping.iterkeys():
            if device_type and device_type.hardware_family and device_type.hardware_type:
                device_type_order.append(device_type)
            elif device_type and device_type.hardware_family:
                types_with_family.append(device_type)
            else:
                remaining_types.append(device_type)
        device_type_order.extend(types_with_family + remaining_types)

        needs_http = any((self._is_http_test(test)
                          and not self._needs_web_platform_test(test))
                         for test in tests_to_run)
        needs_web_platform_test_server = any(
            self._needs_web_platform_test(test) for test in tests_to_run)
        needs_websockets = any(
            self._is_websocket_test(test) for test in tests_to_run)
        self._runner = LayoutTestRunner(
            self._options,
            self._port,
            self._printer,
            self._results_directory,
            self._test_is_slow,
            needs_http=needs_http,
            needs_web_platform_test_server=needs_web_platform_test_server,
            needs_websockets=needs_websockets)

        self._printer.write_update("Checking build ...")
        if not self._port.check_build():
            _log.error("Build check failed")
            return test_run_results.RunDetails(exit_code=-1)

        if self._options.clobber_old_results:
            self._clobber_old_results()

        # Create the output directory if it doesn't already exist.
        self._port.host.filesystem.maybe_make_directory(
            self._results_directory)

        initial_results = None
        retry_results = None
        enabled_pixel_tests_in_retry = False

        child_processes_option_value = self._options.child_processes

        while device_type_order:
            device_type = device_type_order[0]
            tests = test_device_mapping[device_type]
            del device_type_order[0]

            self._options.child_processes = min(
                self._port.max_child_processes(device_type=device_type),
                int(child_processes_option_value
                    or self._port.default_child_processes(
                        device_type=device_type)))

            _log.info('')
            if not self._options.child_processes:
                _log.info('Skipping {} because {} is not available'.format(
                    pluralize(len(test_device_mapping[device_type]), 'test'),
                    str(device_type)))
                _log.info('')
                continue

            # This loop looks for any less-specific device types which match the current device type
            index = 0
            while index < len(device_type_order):
                if device_type_order[index] == device_type:
                    tests.extend(test_device_mapping[device_type_order[index]])

                    # Remove devices types from device_type_order once tests associated with that type have been claimed.
                    del device_type_order[index]
                else:
                    index += 1

            _log.info('Running {}{}'.format(
                pluralize(len(tests), 'test'),
                ' for {}'.format(str(device_type)) if device_type else ''))
            _log.info('')
            if not self._set_up_run(tests, device_type):
                return test_run_results.RunDetails(exit_code=-1)

            temp_initial_results, temp_retry_results, temp_enabled_pixel_tests_in_retry = self._run_test_subset(
                tests, tests_to_skip)
            initial_results = initial_results.merge(
                temp_initial_results
            ) if initial_results else temp_initial_results
            retry_results = retry_results.merge(
                temp_retry_results) if retry_results else temp_retry_results
            enabled_pixel_tests_in_retry |= temp_enabled_pixel_tests_in_retry

        self._runner.stop_servers()

        end_time = time.time()
        return self._end_test_run(start_time, end_time, initial_results,
                                  retry_results, enabled_pixel_tests_in_retry)