コード例 #1
0
    def test_encoding(self):
        start_time, end_time = time.time() - 3, time.time()
        upload = Upload(
            suite='webkitpy-tests',
            configuration=Upload.create_configuration(
                platform='mac',
                version='10.13.0',
                version_name='High Sierra',
                architecture='x86_64',
                sdk='17A405',
            ),
            details=Upload.create_details(link='https://webkit.org'),
            commits=[Upload.create_commit(
                repository_id='webkit',
                id='5',
                branch='trunk',
            )],
            run_stats=Upload.create_run_stats(
                start_time=start_time,
                end_time=end_time,
                tests_skipped=0,
            ),
            results={
                'webkitpy.test1': {},
                'webkitpy.test2': Upload.create_test_result(expected=Upload.Expectations.PASS, actual=Upload.Expectations.FAIL),
            },
        )
        generated_dict = self.normalize(json.loads(json.dumps(upload, cls=Upload.Encoder)))

        self.assertEqual(generated_dict['version'], 0)
        self.assertEqual(generated_dict['suite'], 'webkitpy-tests')
        self.assertEqual(generated_dict['configuration'], self.normalize(dict(
            platform='mac',
            is_simulator=False,
            version='10.13.0',
            version_name='High Sierra',
            architecture='x86_64',
            sdk='17A405',
        )))
        self.assertEqual(generated_dict['commits'], [dict(
            repository_id='webkit',
            id='5',
            branch='trunk',
        )])
        self.assertEqual(generated_dict['test_results']['details'], self.normalize(dict(link='https://webkit.org')))
        self.assertEqual(generated_dict['test_results']['run_stats'], self.normalize(dict(
            start_time=start_time,
            end_time=end_time,
            tests_skipped=0,
        )))
        self.assertEqual(generated_dict['test_results']['results'], self.normalize({
            'webkitpy.test1': {},
            'webkitpy.test2': Upload.create_test_result(expected=Upload.Expectations.PASS, actual=Upload.Expectations.FAIL),
        }))
コード例 #2
0
    def _run_tests(self, names):
        # Make sure PYTHONPATH is set up properly.
        sys.path = self.finder.additional_paths(sys.path) + sys.path

        # We autoinstall everything up so that we can run tests concurrently
        # and not have to worry about autoinstalling packages concurrently.
        self.printer.write_update("Checking autoinstalled packages ...")
        from webkitpy.thirdparty import autoinstall_everything
        autoinstall_everything()

        start_time = time.time()

        if getattr(self._options, 'coverage', False):
            _log.warning("Checking code coverage, so running things serially")
            self._options.child_processes = 1

            import webkitpy.thirdparty.autoinstalled.coverage as coverage
            cov = coverage.coverage(omit=[
                "/usr/*",
                "*/webkitpy/thirdparty/autoinstalled/*",
                "*/webkitpy/thirdparty/BeautifulSoup.py",
                "*/webkitpy/thirdparty/BeautifulSoup_legacy.py",
            ])
            cov.start()

        self.printer.write_update("Checking imports ...")
        if not self._check_imports(names):
            return False

        self.printer.write_update("Finding the individual test methods ...")
        loader = _Loader()
        parallel_tests, serial_tests = self._test_names(loader, names)

        self.printer.write_update("Running the tests ...")
        self.printer.num_tests = len(parallel_tests) + len(serial_tests)
        start = time.time()
        test_runner = Runner(self.printer, loader)
        test_runner.run(parallel_tests,
                        getattr(self._options, 'child_processes', 1))
        test_runner.run(serial_tests, 1)
        end_time = time.time()

        self.printer.print_result(time.time() - start)

        if getattr(self._options, 'json', False):
            _print_results_as_json(
                sys.stdout, itertools.chain(parallel_tests, serial_tests),
                test_runner.failures, test_runner.errors)

        if getattr(self._options, 'json_file_name', None):
            self._options.json_file_name = os.path.abspath(
                self._options.json_file_name)
            with open(self._options.json_file_name, 'w') as json_file:
                _print_results_as_json(
                    json_file, itertools.chain(parallel_tests, serial_tests),
                    test_runner.failures, test_runner.errors)

        if getattr(self._options, 'coverage', False):
            cov.stop()
            cov.save()

        failed_uploads = 0
        if getattr(self._options, 'report_urls', None):
            self.printer.meter.writeln('\n')
            self.printer.write_update('Preparing upload data ...')

            # Empty test results indicate a PASS.
            results = {test: {} for test in test_runner.tests_run}
            for test, errors in test_runner.errors:
                results[test] = Upload.create_test_result(
                    actual=Upload.Expectations.ERROR, log='/n'.join(errors))
            for test, failures in test_runner.failures:
                results[test] = Upload.create_test_result(
                    actual=Upload.Expectations.FAIL, log='/n'.join(failures))

            _host.initialize_scm()
            upload = Upload(
                suite='webkitpy-tests',
                configuration=Upload.create_configuration(
                    platform=_host.platform.os_name,
                    version=str(_host.platform.os_version),
                    version_name=_host.platform.os_version_name(),
                    style=self.upload_style,
                    sdk=_host.platform.build_version(),
                    flavor=self._options.result_report_flavor,
                ),
                details=Upload.create_details(options=self._options),
                commits=[
                    Upload.create_commit(
                        repository_id='webkit',
                        id=_host.scm().native_revision(_webkit_root),
                        branch=_host.scm().native_branch(_webkit_root),
                    )
                ],
                run_stats=Upload.create_run_stats(
                    start_time=start_time,
                    end_time=end_time,
                    tests_skipped=len(test_runner.tests_run) -
                    len(parallel_tests) - len(serial_tests),
                ),
                results=results,
            )
            for url in self._options.report_urls:
                self.printer.write_update('Uploading to {} ...'.format(url))
                failed_uploads = failed_uploads if upload.upload(
                    url, log_line_func=self.printer.meter.writeln) else (
                        failed_uploads + 1)
            self.printer.meter.writeln('Uploads completed!')

        if getattr(self._options, 'coverage', False):
            cov.report(show_missing=False)

        return not self.printer.num_errors and not self.printer.num_failures and not failed_uploads
コード例 #3
0
ファイル: manager.py プロジェクト: eocanha/webkit
    def run(self, args, json_output=None):
        if json_output:
            json_output = self.host.filesystem.abspath(json_output)
            if not self.host.filesystem.isdir(
                    self.host.filesystem.dirname(json_output)
            ) or self.host.filesystem.isdir(json_output):
                raise RuntimeError('Cannot write to {}'.format(json_output))

        start_time = time.time()

        self._stream.write_update('Checking build ...')
        if not self._port.check_api_test_build(
                self._binaries_for_arguments(args)):
            _log.error('Build check failed')
            return Manager.FAILED_BUILD_CHECK

        self._initialize_devices()

        self._stream.write_update('Collecting tests ...')
        try:
            test_names = self._collect_tests(args)
        except ScriptError:
            self._stream.writeln('Failed to collect tests')
            return Manager.FAILED_COLLECT_TESTS
        self._stream.write_update('Found {} tests'.format(len(test_names)))
        if len(test_names) == 0:
            self._stream.writeln('No tests found')
            return Manager.FAILED_COLLECT_TESTS

        if self._port.get_option('dump'):
            for test in test_names:
                self._stream.writeln(test)
            return Manager.SUCCESS

        test_names = [
            test for test in test_names
            for _ in range(self._options.repeat_each)
        ]
        if self._options.repeat_each != 1:
            _log.debug('Repeating each test {} times'.format(
                self._options.iterations))

        try:
            _log.info('Running tests')
            runner = Runner(self._port, self._stream)
            for i in range(self._options.iterations):
                _log.debug('\nIteration {}'.format(i + 1))
                runner.run(
                    test_names,
                    int(self._options.child_processes)
                    if self._options.child_processes else
                    self._port.default_child_processes())
        except KeyboardInterrupt:
            # If we receive a KeyboardInterrupt, print results.
            self._stream.writeln('')

        end_time = time.time()

        successful = runner.result_map_by_status(runner.STATUS_PASSED)
        disabled = len(runner.result_map_by_status(runner.STATUS_DISABLED))
        _log.info('Ran {} tests of {} with {} successful'.format(
            len(runner.results) - disabled, len(test_names), len(successful)))

        result_dictionary = {
            'Skipped': [],
            'Failed': [],
            'Crashed': [],
            'Timedout': [],
        }

        self._stream.writeln('-' * 30)
        result = Manager.SUCCESS
        if len(successful) * self._options.repeat_each + disabled == len(
                test_names):
            self._stream.writeln('All tests successfully passed!')
            if json_output:
                self.host.filesystem.write_text_file(
                    json_output, json.dumps(result_dictionary, indent=4))
        else:
            self._stream.writeln('Test suite failed')
            self._stream.writeln('')

            skipped = []
            for test in test_names:
                if test not in runner.results:
                    skipped.append(test)
                    result_dictionary['Skipped'].append({
                        'name': test,
                        'output': None
                    })
            if skipped:
                self._stream.writeln('Skipped {} tests'.format(len(skipped)))
                self._stream.writeln('')
                if self._options.verbose:
                    for test in skipped:
                        self._stream.writeln('    {}'.format(test))

            self._print_tests_result_with_status(runner.STATUS_FAILED, runner)
            self._print_tests_result_with_status(runner.STATUS_CRASHED, runner)
            self._print_tests_result_with_status(runner.STATUS_TIMEOUT, runner)

            for test, result in iteritems(runner.results):
                status_to_string = {
                    runner.STATUS_FAILED: 'Failed',
                    runner.STATUS_CRASHED: 'Crashed',
                    runner.STATUS_TIMEOUT: 'Timedout',
                }.get(result[0])
                if not status_to_string:
                    continue
                result_dictionary[status_to_string].append({
                    'name': test,
                    'output': result[1]
                })

            if json_output:
                self.host.filesystem.write_text_file(
                    json_output, json.dumps(result_dictionary, indent=4))

            result = Manager.FAILED_TESTS

        if self._options.report_urls:
            self._stream.writeln('\n')
            self._stream.write_update('Preparing upload data ...')

            status_to_test_result = {
                runner.STATUS_PASSED: None,
                runner.STATUS_FAILED: Upload.Expectations.FAIL,
                runner.STATUS_CRASHED: Upload.Expectations.CRASH,
                runner.STATUS_TIMEOUT: Upload.Expectations.TIMEOUT,
            }
            upload = Upload(
                suite=self._options.suite or 'api-tests',
                configuration=self._port.configuration_for_upload(
                    self._port.target_host(0)),
                details=Upload.create_details(options=self._options),
                commits=self._port.commits_for_upload(),
                run_stats=Upload.create_run_stats(
                    start_time=start_time,
                    end_time=end_time,
                    tests_skipped=len(result_dictionary['Skipped']),
                ),
                results={
                    test: Upload.create_test_result(
                        actual=status_to_test_result[result[0]])
                    for test, result in iteritems(runner.results)
                    if result[0] in status_to_test_result
                },
            )
            for url in self._options.report_urls:
                self._stream.write_update('Uploading to {} ...'.format(url))
                if not upload.upload(url, log_line_func=self._stream.writeln):
                    result = Manager.FAILED_UPLOAD
            self._stream.writeln('Uploads completed!')

        return result
コード例 #4
0
ファイル: main.py プロジェクト: zszyj/webkit
    def _run_tests(self, names, will_run_lldb_webkit_tests):
        # Make sure PYTHONPATH is set up properly.
        sys.path = self.finder.additional_paths(sys.path) + sys.path

        # We autoinstall everything up so that we can run tests concurrently
        # and not have to worry about autoinstalling packages concurrently.
        self.printer.write_update("Checking autoinstalled packages ...")
        from webkitpy.thirdparty import autoinstall_everything
        autoinstall_everything()

        start_time = time.time()
        config = Config(_host.executive, self.finder.filesystem)
        configuration_to_use = self._options.configuration or config.default_configuration(
        )

        if will_run_lldb_webkit_tests:
            self.printer.write_update('Building lldbWebKitTester ...')
            build_lldbwebkittester = self.finder.filesystem.join(
                _webkit_root, 'Tools', 'Scripts', 'build-lldbwebkittester')
            try:
                _host.executive.run_and_throw_if_fail(
                    [
                        build_lldbwebkittester,
                        config.flag_for_configuration(configuration_to_use)
                    ],
                    quiet=(not bool(self._options.verbose)))
            except ScriptError as e:
                _log.error(e.message_with_output(output_limit=None))
                return False
            os.environ['LLDB_WEBKIT_TESTER_EXECUTABLE'] = str(
                self.finder.filesystem.join(
                    config.build_directory(configuration_to_use),
                    'lldbWebKitTester'))
            if not self.finder.filesystem.exists(
                    os.environ['LLDB_WEBKIT_TESTER_EXECUTABLE']):
                _log.error('Failed to find lldbWebKitTester.')
                return False

        if self._options.coverage:
            _log.warning("Checking code coverage, so running things serially")
            self._options.child_processes = 1

            import webkitpy.thirdparty.autoinstalled.coverage as coverage
            cov = coverage.coverage(omit=[
                "/usr/*", "*/webkitpy/thirdparty/autoinstalled/*",
                "*/webkitpy/thirdparty/BeautifulSoup.py"
            ])
            cov.start()

        self.printer.write_update("Checking imports ...")
        if not self._check_imports(names):
            return False

        self.printer.write_update("Finding the individual test methods ...")
        loader = _Loader()
        parallel_tests, serial_tests = self._test_names(loader, names)

        self.printer.write_update("Running the tests ...")
        self.printer.num_tests = len(parallel_tests) + len(serial_tests)
        start = time.time()
        test_runner = Runner(self.printer, loader)
        test_runner.run(parallel_tests, self._options.child_processes)
        test_runner.run(serial_tests, 1)
        end_time = time.time()

        self.printer.print_result(time.time() - start)

        if self._options.json:
            _print_results_as_json(
                sys.stdout, itertools.chain(parallel_tests, serial_tests),
                test_runner.failures, test_runner.errors)

        if self._options.json_file_name:
            self._options.json_file_name = os.path.abspath(
                self._options.json_file_name)
            with open(self._options.json_file_name, 'w') as json_file:
                _print_results_as_json(
                    json_file, itertools.chain(parallel_tests, serial_tests),
                    test_runner.failures, test_runner.errors)

        if self._options.coverage:
            cov.stop()
            cov.save()

        failed_uploads = 0
        if self._options.report_urls:
            self.printer.meter.writeln('\n')
            self.printer.write_update('Preparing upload data ...')

            # Empty test results indicate a PASS.
            results = {test: {} for test in test_runner.tests_run}
            for test, errors in test_runner.errors:
                results[test] = Upload.create_test_result(
                    actual=Upload.Expectations.ERROR, log='/n'.join(errors))
            for test, failures in test_runner.failures:
                results[test] = Upload.create_test_result(
                    actual=Upload.Expectations.FAIL, log='/n'.join(failures))

            _host.initialize_scm()
            upload = Upload(
                suite='webkitpy-tests',
                configuration=Upload.create_configuration(
                    platform=_host.platform.os_name,
                    version=str(_host.platform.os_version),
                    version_name=_host.platform.os_version_name(),
                    style='asan'
                    if config.asan else configuration_to_use.lower(),
                    sdk=_host.platform.build_version(),
                    flavor=self._options.result_report_flavor,
                ),
                details=Upload.create_details(options=self._options),
                commits=[
                    Upload.create_commit(
                        repository_id='webkit',
                        id=_host.scm().native_revision(_webkit_root),
                        branch=_host.scm().native_branch(_webkit_root),
                    )
                ],
                run_stats=Upload.create_run_stats(
                    start_time=start_time,
                    end_time=end_time,
                    tests_skipped=len(test_runner.tests_run) -
                    len(parallel_tests) - len(serial_tests),
                ),
                results=results,
            )
            for url in self._options.report_urls:
                self.printer.write_update('Uploading to {} ...'.format(url))
                failed_uploads = failed_uploads if upload.upload(
                    url, log_line_func=self.printer.meter.writeln) else (
                        failed_uploads + 1)
            self.printer.meter.writeln('Uploads completed!')

        if self._options.coverage:
            cov.report(show_missing=False)

        return not self.printer.num_errors and not self.printer.num_failures and not failed_uploads
コード例 #5
0
    def run(self, args):
        num_failed_uploads = 0

        device_type_list = self._port.supported_device_types()
        try:
            tests_to_run_by_device, aggregate_tests_to_skip = self._collect_tests(args, device_type_list)
        except IOError:
            # This is raised if --test-list doesn't exist
            return test_run_results.RunDetails(exit_code=-1)

        aggregate_tests_to_run = set()  # type: Set[Test]
        for v in tests_to_run_by_device.values():
            aggregate_tests_to_run.update(v)

        skipped_tests_by_path = defaultdict(set)
        for test in aggregate_tests_to_skip:
            skipped_tests_by_path[test.test_path].add(test)

        # If a test is marked skipped, but was explicitly requested, run it anyways
        if self._options.skipped != 'always':
            for arg in args:
                if arg in skipped_tests_by_path:
                    tests = skipped_tests_by_path[arg]
                    tests_to_run_by_device[device_type_list[0]].extend(tests)
                    aggregate_tests_to_run |= tests
                    aggregate_tests_to_skip -= tests
                    del skipped_tests_by_path[arg]

        aggregate_tests = aggregate_tests_to_run | aggregate_tests_to_skip

        self._printer.print_found(len(aggregate_tests),
                                  len(aggregate_tests_to_run),
                                  self._options.repeat_each,
                                  self._options.iterations)
        start_time = time.time()

        # Check to see if all tests we are running are skipped.
        if aggregate_tests == aggregate_tests_to_skip:
            # XXX: this is currently identical to the follow if, which likely isn't intended
            _log.error("All tests skipped.")
            return test_run_results.RunDetails(exit_code=0, skipped_all_tests=True)

        # Check to make sure we have no tests to run that are not skipped.
        if not aggregate_tests_to_run:
            _log.critical('No tests to run.')
            return test_run_results.RunDetails(exit_code=-1)

        self._printer.write_update("Checking build ...")
        if not self._port.check_build():
            _log.error("Build check failed")
            return test_run_results.RunDetails(exit_code=-1)

        if self._options.clobber_old_results:
            self._clobber_old_results()

        # Create the output directory if it doesn't already exist.
        self._port.host.filesystem.maybe_make_directory(self._results_directory)

        needs_http = any(test.needs_http_server for tests in itervalues(tests_to_run_by_device) for test in tests)
        needs_web_platform_test_server = any(test.needs_wpt_server for tests in itervalues(tests_to_run_by_device) for test in tests)
        needs_websockets = any(test.needs_websocket_server for tests in itervalues(tests_to_run_by_device) for test in tests)
        self._runner = LayoutTestRunner(self._options, self._port, self._printer, self._results_directory,
                                        needs_http=needs_http, needs_web_platform_test_server=needs_web_platform_test_server, needs_websockets=needs_websockets)

        initial_results = None
        retry_results = None
        enabled_pixel_tests_in_retry = False

        max_child_processes_for_run = 1
        child_processes_option_value = self._options.child_processes
        uploads = []

        for device_type in device_type_list:
            self._options.child_processes = min(self._port.max_child_processes(device_type=device_type), int(child_processes_option_value or self._port.default_child_processes(device_type=device_type)))

            _log.info('')
            if not self._options.child_processes:
                _log.info('Skipping {} because {} is not available'.format(pluralize(len(tests_to_run_by_device[device_type]), 'test'), str(device_type)))
                _log.info('')
                continue

            max_child_processes_for_run = max(self._options.child_processes, max_child_processes_for_run)

            self._printer.print_baseline_search_path(device_type=device_type)

            _log.info(u'Running {}{}'.format(pluralize(len(tests_to_run_by_device[device_type]), 'test'), u' for {}'.format(device_type) if device_type else ''))
            _log.info('')
            start_time_for_device = time.time()
            if not tests_to_run_by_device[device_type]:
                continue

            test_inputs = [self._test_input_for_file(test, device_type=device_type)
                           for test in tests_to_run_by_device[device_type]]

            if not self._set_up_run(test_inputs, device_type=device_type):
                return test_run_results.RunDetails(exit_code=-1)

            configuration = self._port.configuration_for_upload(self._port.target_host(0))
            if not configuration.get('flavor', None):  # The --result-report-flavor argument should override wk1/wk2
                configuration['flavor'] = 'wk2' if self._options.webkit_test_runner else 'wk1'
            temp_initial_results, temp_retry_results, temp_enabled_pixel_tests_in_retry = self._run_test_subset(test_inputs, device_type=device_type)

            skipped_results = TestRunResults(self._expectations[device_type], len(aggregate_tests_to_skip))
            for skipped_test in set(aggregate_tests_to_skip):
                skipped_result = test_results.TestResult(skipped_test.test_path)
                skipped_result.type = test_expectations.SKIP
                skipped_results.add(skipped_result, expected=True)
            temp_initial_results = temp_initial_results.merge(skipped_results)

            if self._options.report_urls:
                self._printer.writeln('\n')
                self._printer.write_update('Preparing upload data ...')

                upload = Upload(
                    suite=self._options.suite or 'layout-tests',
                    configuration=configuration,
                    details=Upload.create_details(options=self._options),
                    commits=self._port.commits_for_upload(),
                    timestamp=start_time,
                    run_stats=Upload.create_run_stats(
                        start_time=start_time_for_device,
                        end_time=time.time(),
                        tests_skipped=temp_initial_results.remaining + temp_initial_results.expected_skips,
                    ),
                    results=self._results_to_upload_json_trie(self._expectations[device_type], temp_initial_results),
                )
                for hostname in self._options.report_urls:
                    self._printer.write_update('Uploading to {} ...'.format(hostname))
                    if not upload.upload(hostname, log_line_func=self._printer.writeln):
                        num_failed_uploads += 1
                    else:
                        uploads.append(upload)
                self._printer.writeln('Uploads completed!')

            initial_results = initial_results.merge(temp_initial_results) if initial_results else temp_initial_results
            retry_results = retry_results.merge(temp_retry_results) if retry_results else temp_retry_results
            enabled_pixel_tests_in_retry |= temp_enabled_pixel_tests_in_retry

            if (initial_results and (initial_results.interrupted or initial_results.keyboard_interrupted)) or \
                    (retry_results and (retry_results.interrupted or retry_results.keyboard_interrupted)):
                break

        # Used for final logging, max_child_processes_for_run is most relevant here.
        self._options.child_processes = max_child_processes_for_run

        self._runner.stop_servers()

        end_time = time.time()
        result = self._end_test_run(start_time, end_time, initial_results, retry_results, enabled_pixel_tests_in_retry)

        if self._options.report_urls and uploads:
            self._printer.writeln('\n')
            self._printer.write_update('Preparing to upload test archive ...')

            with self._filesystem.mkdtemp() as temp:
                archive = self._filesystem.join(temp, 'test-archive')
                shutil.make_archive(archive, 'zip', self._results_directory)

                for upload in uploads:
                    for hostname in self._options.report_urls:
                        self._printer.write_update('Uploading archive to {} ...'.format(hostname))
                        if not upload.upload_archive(hostname, self._filesystem.open_binary_file_for_reading(archive + '.zip'), log_line_func=self._printer.writeln):
                            num_failed_uploads += 1

        if num_failed_uploads:
            result.exit_code = -1
        return result
コード例 #6
0
    def run(self, args):
        num_failed_uploads = 0
        total_tests = set()
        aggregate_test_names = set()
        aggregate_tests = set()
        tests_to_run_by_device = {}

        device_type_list = self._port.supported_device_types()
        for device_type in device_type_list:
            """Run the tests and return a RunDetails object with the results."""
            for_device_type = u'for {} '.format(device_type) if device_type else ''
            self._printer.write_update(u'Collecting tests {}...'.format(for_device_type))
            try:
                paths, test_names = self._collect_tests(args, device_type=device_type)
            except IOError:
                # This is raised if --test-list doesn't exist
                return test_run_results.RunDetails(exit_code=-1)

            self._printer.write_update(u'Parsing expectations {}...'.format(for_device_type))
            self._expectations[device_type] = test_expectations.TestExpectations(self._port, test_names, force_expectations_pass=self._options.force, device_type=device_type)
            self._expectations[device_type].parse_all_expectations()

            aggregate_test_names.update(test_names)
            tests_to_run, tests_to_skip = self._prepare_lists(paths, test_names, device_type=device_type)

            total_tests.update(tests_to_run)
            total_tests.update(tests_to_skip)

            tests_to_run_by_device[device_type] = [test for test in tests_to_run if test not in aggregate_tests]
            aggregate_tests.update(tests_to_run)

        # If a test is marked skipped, but was explicitly requested, run it anyways
        if self._options.skipped != 'always':
            for arg in args:
                if arg in total_tests and arg not in aggregate_tests:
                    tests_to_run_by_device[device_type_list[0]].append(arg)
                    aggregate_tests.add(arg)

        tests_to_skip = total_tests - aggregate_tests
        self._printer.print_found(len(aggregate_test_names), len(aggregate_tests), self._options.repeat_each, self._options.iterations)
        start_time = time.time()

        # Check to make sure we're not skipping every test.
        if not sum([len(tests) for tests in itervalues(tests_to_run_by_device)]):
            _log.critical('No tests to run.')
            return test_run_results.RunDetails(exit_code=-1)

        needs_http = any((self._is_http_test(test) and not self._needs_web_platform_test(test)) for tests in itervalues(tests_to_run_by_device) for test in tests)
        needs_web_platform_test_server = any(self._needs_web_platform_test(test) for tests in itervalues(tests_to_run_by_device) for test in tests)
        needs_websockets = any(self._is_websocket_test(test) for tests in itervalues(tests_to_run_by_device) for test in tests)
        self._runner = LayoutTestRunner(self._options, self._port, self._printer, self._results_directory, self._test_is_slow,
                                        needs_http=needs_http, needs_web_platform_test_server=needs_web_platform_test_server, needs_websockets=needs_websockets)

        self._printer.write_update("Checking build ...")
        if not self._port.check_build():
            _log.error("Build check failed")
            return test_run_results.RunDetails(exit_code=-1)

        if self._options.clobber_old_results:
            self._clobber_old_results()

        # Create the output directory if it doesn't already exist.
        self._port.host.filesystem.maybe_make_directory(self._results_directory)

        initial_results = None
        retry_results = None
        enabled_pixel_tests_in_retry = False

        max_child_processes_for_run = 1
        child_processes_option_value = self._options.child_processes
        uploads = []

        for device_type in device_type_list:
            self._runner._test_is_slow = lambda test_file: self._test_is_slow(test_file, device_type=device_type)
            self._options.child_processes = min(self._port.max_child_processes(device_type=device_type), int(child_processes_option_value or self._port.default_child_processes(device_type=device_type)))

            _log.info('')
            if not self._options.child_processes:
                _log.info('Skipping {} because {} is not available'.format(pluralize(len(tests_to_run_by_device[device_type]), 'test'), str(device_type)))
                _log.info('')
                continue

            max_child_processes_for_run = max(self._options.child_processes, max_child_processes_for_run)

            self._printer.print_baseline_search_path(device_type=device_type)

            _log.info(u'Running {}{}'.format(pluralize(len(tests_to_run_by_device[device_type]), 'test'), u' for {}'.format(device_type) if device_type else ''))
            _log.info('')
            start_time_for_device = time.time()
            if not tests_to_run_by_device[device_type]:
                continue
            if not self._set_up_run(tests_to_run_by_device[device_type], device_type=device_type):
                return test_run_results.RunDetails(exit_code=-1)

            configuration = self._port.configuration_for_upload(self._port.target_host(0))
            if not configuration.get('flavor', None):  # The --result-report-flavor argument should override wk1/wk2
                configuration['flavor'] = 'wk2' if self._options.webkit_test_runner else 'wk1'
            temp_initial_results, temp_retry_results, temp_enabled_pixel_tests_in_retry = self._run_test_subset(tests_to_run_by_device[device_type], tests_to_skip, device_type=device_type)

            if self._options.report_urls:
                self._printer.writeln('\n')
                self._printer.write_update('Preparing upload data ...')

                upload = Upload(
                    suite='layout-tests',
                    configuration=configuration,
                    details=Upload.create_details(options=self._options),
                    commits=self._port.commits_for_upload(),
                    timestamp=start_time,
                    run_stats=Upload.create_run_stats(
                        start_time=start_time_for_device,
                        end_time=time.time(),
                        tests_skipped=temp_initial_results.remaining + temp_initial_results.expected_skips,
                    ),
                    results=self._results_to_upload_json_trie(self._expectations[device_type], temp_initial_results),
                )
                for hostname in self._options.report_urls:
                    self._printer.write_update('Uploading to {} ...'.format(hostname))
                    if not upload.upload(hostname, log_line_func=self._printer.writeln):
                        num_failed_uploads += 1
                    else:
                        uploads.append(upload)
                self._printer.writeln('Uploads completed!')

            initial_results = initial_results.merge(temp_initial_results) if initial_results else temp_initial_results
            retry_results = retry_results.merge(temp_retry_results) if retry_results else temp_retry_results
            enabled_pixel_tests_in_retry |= temp_enabled_pixel_tests_in_retry

            if (initial_results and (initial_results.interrupted or initial_results.keyboard_interrupted)) or \
                    (retry_results and (retry_results.interrupted or retry_results.keyboard_interrupted)):
                break

        # Used for final logging, max_child_processes_for_run is most relevant here.
        self._options.child_processes = max_child_processes_for_run

        self._runner.stop_servers()

        end_time = time.time()
        result = self._end_test_run(start_time, end_time, initial_results, retry_results, enabled_pixel_tests_in_retry)

        if self._options.report_urls and uploads:
            self._printer.writeln('\n')
            self._printer.write_update('Preparing to upload test archive ...')

            with self._filesystem.mkdtemp() as temp:
                archive = self._filesystem.join(temp, 'test-archive')
                shutil.make_archive(archive, 'zip', self._results_directory)

                for upload in uploads:
                    for hostname in self._options.report_urls:
                        self._printer.write_update('Uploading archive to {} ...'.format(hostname))
                        if not upload.upload_archive(hostname, self._filesystem.open_binary_file_for_reading(archive + '.zip'), log_line_func=self._printer.writeln):
                            num_failed_uploads += 1

        if num_failed_uploads:
            result.exit_code = -1
        return result