示例#1
0
class Manager(object):
    """Test execution manager

    This class has the main entry points for run-webkit-tests; the ..run_webkit_tests module almost
    exclusively just handles CLI options. It orchestrates collecting the tests (through
    LayoutTestFinder), running them (LayoutTestRunner), and then displaying the results
    (TestResultWriter/Printer).
    """

    def __init__(self, port, options, printer):
        """Initialize test runner data structures.

        Args:
          port: an object implementing port-specific
          options: a dictionary of command line options
          printer: a Printer object to record updates to.
        """
        self._port = port
        fs = port.host.filesystem
        self._filesystem = fs
        self._options = options
        self._printer = printer
        self._expectations = OrderedDict()
        self._results_directory = self._port.results_directory()
        self._finder = LayoutTestFinder(self._port, self._options)
        self._runner = None

        self._tests_options = {}
        test_options_json_path = fs.join(self._port.layout_tests_dir(), "tests-options.json")
        if fs.exists(test_options_json_path):
            with fs.open_binary_file_for_reading(test_options_json_path) as fd:
                try:
                    self._tests_options = json.load(fd)
                except (ValueError, IOError):
                    pass

    def _collect_tests(self,
                       paths,  # type: List[str]
                       device_type_list,  # type: List[Optional[DeviceType]]
                       ):
        aggregate_tests = set()  # type: Set[Test]
        aggregate_tests_to_run = set()  # type: Set[Test]
        tests_to_run_by_device = {}  # type: Dict[Optional[DeviceType], List[Test]]

        device_type_list = self._port.supported_device_types()
        for device_type in device_type_list:
            for_device_type = u'for {} '.format(device_type) if device_type else ''
            self._printer.write_update(u'Collecting tests {}...'.format(for_device_type))
            paths, tests = self._finder.find_tests(self._options, paths, device_type=device_type)
            aggregate_tests.update(tests)

            test_names = [test.test_path for test in tests]

            self._printer.write_update(u'Parsing expectations {}...'.format(for_device_type))
            self._expectations[device_type] = test_expectations.TestExpectations(self._port, test_names, force_expectations_pass=self._options.force, device_type=device_type)
            self._expectations[device_type].parse_all_expectations()

            tests_to_run = self._tests_to_run(tests, device_type=device_type)
            tests_to_run_by_device[device_type] = [test for test in tests_to_run if test not in aggregate_tests_to_run]
            aggregate_tests_to_run.update(tests_to_run_by_device[device_type])

        aggregate_tests_to_skip = aggregate_tests - aggregate_tests_to_run

        return tests_to_run_by_device, aggregate_tests_to_skip

    def _skip_tests(self, all_tests_list, expectations, http_tests):
        all_tests = set(all_tests_list)

        tests_to_skip = expectations.model().get_tests_with_result_type(test_expectations.SKIP)
        if self._options.skip_failing_tests:
            tests_to_skip.update(expectations.model().get_tests_with_result_type(test_expectations.FAIL))
            tests_to_skip.update(expectations.model().get_tests_with_result_type(test_expectations.FLAKY))

        if self._options.skipped == 'only':
            tests_to_skip = all_tests - tests_to_skip
        elif self._options.skipped == 'ignore':
            tests_to_skip = set()

        # unless of course we don't want to run the HTTP tests :)
        if not self._options.http:
            tests_to_skip.update(set(http_tests))

        return tests_to_skip

    def _split_into_chunks(self, test_names):
        """split into a list to run and a set to skip, based on --run-chunk and --run-part."""
        if not self._options.run_chunk and not self._options.run_part:
            return test_names, set()

        # If the user specifies they just want to run a subset of the tests,
        # just grab a subset of the non-skipped tests.
        chunk_value = self._options.run_chunk or self._options.run_part
        try:
            (chunk_num, chunk_len) = chunk_value.split(":")
            chunk_num = int(chunk_num)
            assert(chunk_num >= 0)
            test_size = int(chunk_len)
            assert(test_size > 0)
        except AssertionError:
            _log.critical("invalid chunk '%s'" % chunk_value)
            return (None, None)

        # Get the number of tests
        num_tests = len(test_names)

        # Get the start offset of the slice.
        if self._options.run_chunk:
            chunk_len = test_size
            # In this case chunk_num can be really large. We need
            # to make the worker fit in the current number of tests.
            slice_start = (chunk_num * chunk_len) % num_tests
        else:
            # Validate the data.
            assert(test_size <= num_tests)
            assert(chunk_num <= test_size)

            # To count the chunk_len, and make sure we don't skip
            # some tests, we round to the next value that fits exactly
            # all the parts.
            rounded_tests = num_tests
            if rounded_tests % test_size != 0:
                rounded_tests = (num_tests + test_size - (num_tests % test_size))

            chunk_len = rounded_tests // test_size
            slice_start = chunk_len * (chunk_num - 1)
            # It does not mind if we go over test_size.

        # Get the end offset of the slice.
        slice_end = min(num_tests, slice_start + chunk_len)

        tests_to_run = test_names[slice_start:slice_end]

        _log.debug('chunk slice [%d:%d] of %d is %d tests' % (slice_start, slice_end, num_tests, (slice_end - slice_start)))

        # If we reached the end and we don't have enough tests, we run some
        # from the beginning.
        if slice_end - slice_start < chunk_len:
            extra = chunk_len - (slice_end - slice_start)
            _log.debug('   last chunk is partial, appending [0:%d]' % extra)
            tests_to_run.extend(test_names[0:extra])

        return (tests_to_run, set(test_names) - set(tests_to_run))

    def _tests_to_run(self, tests, device_type):
        test_names = {test.test_path for test in tests}
        test_names_to_skip = self._skip_tests(test_names,
                                              self._expectations[device_type],
                                              {test.test_path for test in tests if test.needs_any_server})
        tests_to_run = [test for test in tests if test.test_path not in test_names_to_skip]

        # Create a sorted list of test files so the subset chunk,
        # if used, contains alphabetically consecutive tests.
        if self._options.order == 'natural':
            tests_to_run.sort(key=lambda x: self._port.test_key(x.test_path))
        elif self._options.order == 'random':
            random.shuffle(tests_to_run)

        tests_to_run, _ = self._split_into_chunks(tests_to_run)
        return tests_to_run

    def _test_input_for_file(self, test_file, device_type):
        test_is_slow = self._test_is_slow(test_file.test_path, device_type=device_type)
        reference_files = self._port.reference_files(
            test_file.test_path, device_type=device_type
        )
        timeout = (
            self._options.slow_time_out_ms
            if test_is_slow
            else self._options.time_out_ms
        )
        should_dump_jsconsolelog_in_stderr = (
            self._test_should_dump_jsconsolelog_in_stderr(
                test_file.test_path, device_type=device_type
            )
        )

        if reference_files:
            should_run_pixel_test = True
        elif not self._options.pixel_tests:
            should_run_pixel_test = False
        elif self._options.pixel_test_directories:
            should_run_pixel_test = any(
                test_file.test_path.startswith(directory)
                for directory in self._options.pixel_test_directories
            )
        else:
            should_run_pixel_test = True

        return TestInput(
            test_file,
            timeout=timeout,
            is_slow=test_is_slow,
            needs_servers=test_file.needs_any_server,
            should_dump_jsconsolelog_in_stderr=should_dump_jsconsolelog_in_stderr,
            reference_files=reference_files,
            should_run_pixel_test=should_run_pixel_test,
        )

    def _test_is_slow(self, test_file, device_type):
        if self._expectations[device_type].model().has_modifier(test_file, test_expectations.SLOW):
            return True
        return "slow" in self._tests_options.get(test_file, [])

    def _test_should_dump_jsconsolelog_in_stderr(self, test_file, device_type):
        return self._expectations[device_type].model().has_modifier(test_file, test_expectations.DUMPJSCONSOLELOGINSTDERR)

    def _multiply_test_inputs(self, test_inputs, repeat_each, iterations):
        if repeat_each == 1:
            per_iteration = list(test_inputs)[:]
        else:
            per_iteration = []
            for test_input in test_inputs:
                per_iteration.extend([test_input] * repeat_each)

        return per_iteration * iterations

    def _update_worker_count(self, test_inputs):
        new_test_inputs = self._multiply_test_inputs(test_inputs, self._options.repeat_each, self._options.iterations)
        worker_count = self._runner.get_worker_count(new_test_inputs, int(self._options.child_processes))
        self._options.child_processes = worker_count

    def _set_up_run(self, test_inputs, device_type):
        # This must be started before we check the system dependencies,
        # since the helper may do things to make the setup correct.
        self._printer.write_update("Starting helper ...")
        if not self._port.start_helper(pixel_tests=self._options.pixel_tests, prefer_integrated_gpu=self._options.prefer_integrated_gpu):
            return False

        self._update_worker_count(test_inputs)
        self._port.reset_preferences()

        # Check that the system dependencies (themes, fonts, ...) are correct.
        if not self._options.nocheck_sys_deps:
            self._printer.write_update("Checking system dependencies ...")
            if not self._port.check_sys_deps():
                self._port.stop_helper()
                return False

        self._port.setup_test_run(device_type)
        return True

    def run(self, args):
        num_failed_uploads = 0

        device_type_list = self._port.supported_device_types()
        try:
            tests_to_run_by_device, aggregate_tests_to_skip = self._collect_tests(args, device_type_list)
        except IOError:
            # This is raised if --test-list doesn't exist
            return test_run_results.RunDetails(exit_code=-1)

        aggregate_tests_to_run = set()  # type: Set[Test]
        for v in tests_to_run_by_device.values():
            aggregate_tests_to_run.update(v)

        skipped_tests_by_path = defaultdict(set)
        for test in aggregate_tests_to_skip:
            skipped_tests_by_path[test.test_path].add(test)

        # If a test is marked skipped, but was explicitly requested, run it anyways
        if self._options.skipped != 'always':
            for arg in args:
                if arg in skipped_tests_by_path:
                    tests = skipped_tests_by_path[arg]
                    tests_to_run_by_device[device_type_list[0]].extend(tests)
                    aggregate_tests_to_run |= tests
                    aggregate_tests_to_skip -= tests
                    del skipped_tests_by_path[arg]

        aggregate_tests = aggregate_tests_to_run | aggregate_tests_to_skip

        self._printer.print_found(len(aggregate_tests),
                                  len(aggregate_tests_to_run),
                                  self._options.repeat_each,
                                  self._options.iterations)
        start_time = time.time()

        # Check to see if all tests we are running are skipped.
        if aggregate_tests == aggregate_tests_to_skip:
            # XXX: this is currently identical to the follow if, which likely isn't intended
            _log.error("All tests skipped.")
            return test_run_results.RunDetails(exit_code=0, skipped_all_tests=True)

        # Check to make sure we have no tests to run that are not skipped.
        if not aggregate_tests_to_run:
            _log.critical('No tests to run.')
            return test_run_results.RunDetails(exit_code=-1)

        self._printer.write_update("Checking build ...")
        if not self._port.check_build():
            _log.error("Build check failed")
            return test_run_results.RunDetails(exit_code=-1)

        if self._options.clobber_old_results:
            self._clobber_old_results()

        # Create the output directory if it doesn't already exist.
        self._port.host.filesystem.maybe_make_directory(self._results_directory)

        needs_http = any(test.needs_http_server for tests in itervalues(tests_to_run_by_device) for test in tests)
        needs_web_platform_test_server = any(test.needs_wpt_server for tests in itervalues(tests_to_run_by_device) for test in tests)
        needs_websockets = any(test.needs_websocket_server for tests in itervalues(tests_to_run_by_device) for test in tests)
        self._runner = LayoutTestRunner(self._options, self._port, self._printer, self._results_directory,
                                        needs_http=needs_http, needs_web_platform_test_server=needs_web_platform_test_server, needs_websockets=needs_websockets)

        initial_results = None
        retry_results = None
        enabled_pixel_tests_in_retry = False

        max_child_processes_for_run = 1
        child_processes_option_value = self._options.child_processes
        uploads = []

        for device_type in device_type_list:
            self._options.child_processes = min(self._port.max_child_processes(device_type=device_type), int(child_processes_option_value or self._port.default_child_processes(device_type=device_type)))

            _log.info('')
            if not self._options.child_processes:
                _log.info('Skipping {} because {} is not available'.format(pluralize(len(tests_to_run_by_device[device_type]), 'test'), str(device_type)))
                _log.info('')
                continue

            max_child_processes_for_run = max(self._options.child_processes, max_child_processes_for_run)

            self._printer.print_baseline_search_path(device_type=device_type)

            _log.info(u'Running {}{}'.format(pluralize(len(tests_to_run_by_device[device_type]), 'test'), u' for {}'.format(device_type) if device_type else ''))
            _log.info('')
            start_time_for_device = time.time()
            if not tests_to_run_by_device[device_type]:
                continue

            test_inputs = [self._test_input_for_file(test, device_type=device_type)
                           for test in tests_to_run_by_device[device_type]]

            if not self._set_up_run(test_inputs, device_type=device_type):
                return test_run_results.RunDetails(exit_code=-1)

            configuration = self._port.configuration_for_upload(self._port.target_host(0))
            if not configuration.get('flavor', None):  # The --result-report-flavor argument should override wk1/wk2
                configuration['flavor'] = 'wk2' if self._options.webkit_test_runner else 'wk1'
            temp_initial_results, temp_retry_results, temp_enabled_pixel_tests_in_retry = self._run_test_subset(test_inputs, device_type=device_type)

            skipped_results = TestRunResults(self._expectations[device_type], len(aggregate_tests_to_skip))
            for skipped_test in set(aggregate_tests_to_skip):
                skipped_result = test_results.TestResult(skipped_test.test_path)
                skipped_result.type = test_expectations.SKIP
                skipped_results.add(skipped_result, expected=True)
            temp_initial_results = temp_initial_results.merge(skipped_results)

            if self._options.report_urls:
                self._printer.writeln('\n')
                self._printer.write_update('Preparing upload data ...')

                upload = Upload(
                    suite=self._options.suite or 'layout-tests',
                    configuration=configuration,
                    details=Upload.create_details(options=self._options),
                    commits=self._port.commits_for_upload(),
                    timestamp=start_time,
                    run_stats=Upload.create_run_stats(
                        start_time=start_time_for_device,
                        end_time=time.time(),
                        tests_skipped=temp_initial_results.remaining + temp_initial_results.expected_skips,
                    ),
                    results=self._results_to_upload_json_trie(self._expectations[device_type], temp_initial_results),
                )
                for hostname in self._options.report_urls:
                    self._printer.write_update('Uploading to {} ...'.format(hostname))
                    if not upload.upload(hostname, log_line_func=self._printer.writeln):
                        num_failed_uploads += 1
                    else:
                        uploads.append(upload)
                self._printer.writeln('Uploads completed!')

            initial_results = initial_results.merge(temp_initial_results) if initial_results else temp_initial_results
            retry_results = retry_results.merge(temp_retry_results) if retry_results else temp_retry_results
            enabled_pixel_tests_in_retry |= temp_enabled_pixel_tests_in_retry

            if (initial_results and (initial_results.interrupted or initial_results.keyboard_interrupted)) or \
                    (retry_results and (retry_results.interrupted or retry_results.keyboard_interrupted)):
                break

        # Used for final logging, max_child_processes_for_run is most relevant here.
        self._options.child_processes = max_child_processes_for_run

        self._runner.stop_servers()

        end_time = time.time()
        result = self._end_test_run(start_time, end_time, initial_results, retry_results, enabled_pixel_tests_in_retry)

        if self._options.report_urls and uploads:
            self._printer.writeln('\n')
            self._printer.write_update('Preparing to upload test archive ...')

            with self._filesystem.mkdtemp() as temp:
                archive = self._filesystem.join(temp, 'test-archive')
                shutil.make_archive(archive, 'zip', self._results_directory)

                for upload in uploads:
                    for hostname in self._options.report_urls:
                        self._printer.write_update('Uploading archive to {} ...'.format(hostname))
                        if not upload.upload_archive(hostname, self._filesystem.open_binary_file_for_reading(archive + '.zip'), log_line_func=self._printer.writeln):
                            num_failed_uploads += 1

        if num_failed_uploads:
            result.exit_code = -1
        return result

    def _run_test_subset(self,
                         test_inputs,  # type: List[TestInput]
                         device_type,  # type: Optional[DeviceType]
                         ):
        try:
            enabled_pixel_tests_in_retry = False
            initial_results = self._run_tests(test_inputs, self._options.repeat_each, self._options.iterations, int(self._options.child_processes), retrying=False, device_type=device_type)

            tests_to_retry = self._tests_to_retry(initial_results, include_crashes=self._port.should_retry_crashes())
            # Don't retry failures when interrupted by user or failures limit exception.
            retry_failures = self._options.retry_failures and not (initial_results.interrupted or initial_results.keyboard_interrupted)
            if retry_failures and tests_to_retry:
                enabled_pixel_tests_in_retry = self._force_pixel_tests_if_needed()
                if enabled_pixel_tests_in_retry:
                    retry_test_inputs = [self._test_input_for_file(test_input.test, device_type=device_type)
                                         for test_input in test_inputs
                                         if test_input.test.test_path in tests_to_retry]
                else:
                    retry_test_inputs = [test_input
                                         for test_input in test_inputs
                                         if test_input.test.test_path in tests_to_retry]

                _log.info('')
                _log.info("Retrying %s ..." % pluralize(len(tests_to_retry), "unexpected failure"))
                _log.info('')
                retry_results = self._run_tests(retry_test_inputs,
                                                repeat_each=1,
                                                iterations=1,
                                                num_workers=1,
                                                retrying=True,
                                                device_type=device_type)

                if enabled_pixel_tests_in_retry:
                    self._options.pixel_tests = False
            else:
                retry_results = None
        finally:
            self._clean_up_run()

        return (initial_results, retry_results, enabled_pixel_tests_in_retry)

    def _end_test_run(self, start_time, end_time, initial_results, retry_results, enabled_pixel_tests_in_retry):
        if initial_results is None:
            _log.error('No results generated')
            return test_run_results.RunDetails(exit_code=-1)

        # Some crash logs can take a long time to be written out so look
        # for new logs after the test run finishes.
        _log.debug("looking for new crash logs")
        self._look_for_new_crash_logs(initial_results, start_time)
        if retry_results:
            self._look_for_new_crash_logs(retry_results, start_time)

        _log.debug("summarizing results")
        summarized_results = test_run_results.summarize_results(self._port, self._expectations, initial_results, retry_results, enabled_pixel_tests_in_retry)
        self._printer.print_results(end_time - start_time, initial_results, summarized_results)

        exit_code = -1
        if not self._options.dry_run:
            self._port.print_leaks_summary()
            self._output_perf_metrics(end_time - start_time, initial_results)
            self._save_json_files(summarized_results, initial_results)

            results_path = self._filesystem.join(self._results_directory, "results.html")
            self._copy_results_html_file(results_path)
            if initial_results.keyboard_interrupted:
                exit_code = INTERRUPTED_EXIT_STATUS
            else:
                if self._options.show_results and (initial_results.unexpected_results_by_name or
                    (self._options.full_results_html and initial_results.total_failures)):
                    self._port.show_results_html_file(results_path)
                exit_code = self._port.exit_code_from_summarized_results(summarized_results)
        return test_run_results.RunDetails(exit_code, summarized_results, initial_results, retry_results, enabled_pixel_tests_in_retry)

    def _run_tests(self,
                   test_inputs,  # type: List[TestInput]
                   repeat_each,  # type: int
                   iterations,  # type: int
                   num_workers,  # type: int
                   retrying,  # type: bool
                   device_type,  # type: Optional[DeviceType]
                   ):
        new_test_inputs = self._multiply_test_inputs(test_inputs, repeat_each, iterations)

        assert self._runner is not None
        return self._runner.run_tests(self._expectations[device_type], new_test_inputs, num_workers, retrying, device_type)

    def _clean_up_run(self):
        _log.debug("Flushing stdout")
        sys.stdout.flush()
        _log.debug("Flushing stderr")
        sys.stderr.flush()
        _log.debug("Stopping helper")
        self._port.stop_helper()
        _log.debug("Cleaning up port")
        self._port.clean_up_test_run()

    def _force_pixel_tests_if_needed(self):
        if self._options.pixel_tests:
            return False

        _log.debug("Restarting helper")
        self._options.pixel_tests = True
        return self._port.start_helper(prefer_integrated_gpu=self._options.prefer_integrated_gpu)

    def _look_for_new_crash_logs(self, run_results, start_time):
        """Since crash logs can take a long time to be written out if the system is
           under stress do a second pass at the end of the test run.

           run_results: the results of the test run
           start_time: time the tests started at.  We're looking for crash
               logs after that time.
        """
        crashed_processes = []
        for test, result in run_results.unexpected_results_by_name.items():
            if (result.type != test_expectations.CRASH):
                continue
            for failure in result.failures:
                if not isinstance(failure, test_failures.FailureCrash):
                    continue
                crashed_processes.append([test, failure.process_name, failure.pid])

        sample_files = self._port.look_for_new_samples(crashed_processes, start_time)
        if sample_files:
            for test, sample_file in sample_files.items():
                writer = TestResultWriter(self._port._filesystem, self._port, self._port.results_directory(), test)
                writer.copy_sample_file(sample_file)

        crash_logs = self._port.look_for_new_crash_logs(crashed_processes, start_time)
        if crash_logs:
            for test, crash_log in crash_logs.items():
                writer = TestResultWriter(self._port._filesystem, self._port, self._port.results_directory(), test)
                writer.write_crash_log(crash_log)

                # Check if this crashing 'test' is already in list of crashed_processes, if not add it to the run_results
                if not any(process[0] == test for process in crashed_processes):
                    result = test_results.TestResult(test)
                    result.type = test_expectations.CRASH
                    result.is_other_crash = True
                    run_results.add(result, expected=False)
                    _log.debug("Adding results for other crash: " + str(test))

    def _clobber_old_results(self):
        self._printer.write_update("Deleting results directory {}".format(self._results_directory))
        if self._filesystem.isdir(self._results_directory):
            self._filesystem.rmtree(self._results_directory)

    def _tests_to_retry(self, run_results, include_crashes):
        return [result.test_name for result in run_results.unexpected_results_by_name.values() if
                   ((result.type != test_expectations.PASS) and
                    (result.type != test_expectations.MISSING) and
                    (result.type != test_expectations.CRASH or include_crashes))]

    def _output_perf_metrics(self, run_time, initial_results):
        perf_metrics_json = json_results_generator.perf_metrics_for_test(run_time, initial_results.results_by_name.values())
        perf_metrics_path = self._filesystem.join(self._results_directory, "layout_test_perf_metrics.json")
        self._filesystem.write_text_file(perf_metrics_path, json.dumps(perf_metrics_json))

    def _results_to_upload_json_trie(self, expectations, results):
        FAILURE_TO_TEXT = {
            test_expectations.PASS: Upload.Expectations.PASS,
            test_expectations.CRASH: Upload.Expectations.CRASH,
            test_expectations.TIMEOUT: Upload.Expectations.TIMEOUT,
            test_expectations.IMAGE: Upload.Expectations.IMAGE,
            test_expectations.TEXT: Upload.Expectations.TEXT,
            test_expectations.AUDIO: Upload.Expectations.AUDIO,
            test_expectations.MISSING: Upload.Expectations.WARNING,
            test_expectations.IMAGE_PLUS_TEXT: ' '.join([Upload.Expectations.IMAGE, Upload.Expectations.TEXT]),
        }

        results_trie = {}
        for result in itervalues(results.results_by_name):
            if result.type == test_expectations.SKIP:
                continue

            expected = expectations.filtered_expectations_for_test(
                result.test_name,
                self._options.pixel_tests or bool(result.reftest_type),
                self._options.world_leaks,
            )
            if expected == {test_expectations.PASS}:
                expected = None
            else:
                expected = ' '.join([FAILURE_TO_TEXT.get(e, Upload.Expectations.FAIL) for e in expected])

            json_results_generator.add_path_to_trie(
                result.test_name,
                Upload.create_test_result(
                    expected=expected,
                    actual=FAILURE_TO_TEXT.get(result.type, Upload.Expectations.FAIL) if result.type else None,
                    time=int(result.test_run_time * 1000),
                ), results_trie)
        return results_trie

    def _save_json_files(self, summarized_results, initial_results):
        """Writes the results of the test run as JSON files into the results
        dir and upload the files to the appengine server.

        Args:
          summarized_results: dict of results
          initial_results: full summary object
        """
        _log.debug("Writing JSON files in %s." % self._results_directory)

        # FIXME: Upload stats.json to the server and delete times_ms.
        times_trie = json_results_generator.test_timings_trie(self._port, initial_results.results_by_name.values())
        times_json_path = self._filesystem.join(self._results_directory, "times_ms.json")
        json_results_generator.write_json(self._filesystem, times_trie, times_json_path)

        stats_trie = self._stats_trie(initial_results)
        stats_path = self._filesystem.join(self._results_directory, "stats.json")
        self._filesystem.write_text_file(stats_path, json.dumps(stats_trie))

        full_results_path = self._filesystem.join(self._results_directory, "full_results.json")
        # We write full_results.json out as jsonp because we need to load it from a file url and Chromium doesn't allow that.
        json_results_generator.write_json(self._filesystem, summarized_results, full_results_path, callback="ADD_RESULTS")

        generator = json_layout_results_generator.JSONLayoutResultsGenerator(
            self._port, self._results_directory,
            self._expectations, initial_results,
            "layout-tests")

        if generator.generate_json_output():
            _log.debug("Finished writing JSON file for the test results server.")
        else:
            _log.debug("Failed to generate JSON file for the test results server.")
            return

        incremental_results_path = self._filesystem.join(self._results_directory, "incremental_results.json")

        # Remove these files from the results directory so they don't take up too much space on the buildbot.
        # The tools use the version we uploaded to the results server anyway.
        self._filesystem.remove(times_json_path)
        self._filesystem.remove(incremental_results_path)

    def _copy_results_html_file(self, destination_path):
        base_dir = self._port.path_from_webkit_base('LayoutTests', 'fast', 'harness')
        results_file = self._filesystem.join(base_dir, 'results.html')
        # Note that the results.html template file won't exist when we're using a MockFileSystem during unit tests,
        # so make sure it exists before we try to copy it.
        if self._filesystem.exists(results_file):
            self._filesystem.copyfile(results_file, destination_path)

    def _stats_trie(self, initial_results):
        def _worker_number(worker_name):
            return int(worker_name.split('/')[1]) if worker_name else -1

        stats = {}
        for result in initial_results.results_by_name.values():
            if result.type != test_expectations.SKIP:
                stats[result.test_name] = {'results': (_worker_number(result.worker_name), result.test_number, result.pid, int(result.test_run_time * 1000), int(result.total_run_time * 1000))}
        stats_trie = {}
        for name, value in iteritems(stats):
            json_results_generator.add_path_to_trie(name, value, stats_trie)
        return stats_trie

    def _print_expectation_line_for_test(self, format_string, test, device_type):
        test_path = test.test_path
        line = self._expectations[device_type].model().get_expectation_line(test_path)
        print(format_string.format(test_path,
                                   line.expected_behavior,
                                   self._expectations[device_type].readable_filename_and_line_number(line),
                                   line.original_string or ''))

    def _print_expectations_for_subset(self, device_type, test_col_width, tests_to_run, tests_to_skip=None):
        format_string = '{{:{width}}} {{}} {{}} {{}}'.format(width=test_col_width)
        if tests_to_skip:
            print('')
            print('Tests to skip ({})'.format(len(tests_to_skip)))
            for test in sorted(tests_to_skip):
                self._print_expectation_line_for_test(format_string, test, device_type=device_type)

        print('')
        print('Tests to run{} ({})'.format(' for ' + str(device_type) if device_type else '', len(tests_to_run)))
        for test in sorted(tests_to_run):
            self._print_expectation_line_for_test(format_string, test, device_type=device_type)

    def print_expectations(self, args):
        device_type_list = self._port.DEFAULT_DEVICE_TYPES or [self._port.DEVICE_TYPE]

        try:
            tests_to_run_by_device, aggregate_tests_to_skip = self._collect_tests(args, device_type_list)
        except IOError:
            # This is raised if --test-list doesn't exist
            return -1

        aggregate_tests_to_run = set()
        for v in tests_to_run_by_device.values():
            aggregate_tests_to_run.update(v)
        aggregate_tests = aggregate_tests_to_run | aggregate_tests_to_skip

        self._printer.print_found(len(aggregate_tests), len(aggregate_tests_to_run), self._options.repeat_each, self._options.iterations)
        test_col_width = max(len(test.test_path) for test in aggregate_tests) + 1

        self._print_expectations_for_subset(device_type_list[0], test_col_width, tests_to_run_by_device[device_type_list[0]], aggregate_tests_to_skip)

        for device_type in device_type_list[1:]:
            self._print_expectations_for_subset(device_type, test_col_width, tests_to_run_by_device[device_type])

        return 0
示例#2
0
class Manager(object):
    """A class for managing running a series of tests on a series of layout
    test files."""

    def __init__(self, port, options, printer):
        """Initialize test runner data structures.

        Args:
          port: an object implementing port-specific
          options: a dictionary of command line options
          printer: a Printer object to record updates to.
        """
        self._port = port
        self._filesystem = port.host.filesystem
        self._options = options
        self._printer = printer
        self._expectations = None
        self.HTTP_SUBDIR = 'http' + port.TEST_PATH_SEPARATOR + 'test'
        self.WEBSOCKET_SUBDIR = 'websocket' + port.TEST_PATH_SEPARATOR
        self.web_platform_test_subdir = self._port.web_platform_test_server_doc_root()
        self.webkit_specific_web_platform_test_subdir = 'http' + port.TEST_PATH_SEPARATOR + 'wpt' + port.TEST_PATH_SEPARATOR
        self.LAYOUT_TESTS_DIRECTORY = 'LayoutTests'
        self._results_directory = self._port.results_directory()
        self._finder = LayoutTestFinder(self._port, self._options)
        self._runner = None

        test_options_json_path = self._port.path_from_webkit_base(self.LAYOUT_TESTS_DIRECTORY, "tests-options.json")
        self._tests_options = json.loads(self._filesystem.read_text_file(test_options_json_path)) if self._filesystem.exists(test_options_json_path) else {}

    def _collect_tests(self, args):
        return self._finder.find_tests(self._options, args)

    def _is_http_test(self, test):
        return self.HTTP_SUBDIR in test or self._is_websocket_test(test) or self._needs_web_platform_test(test)

    def _is_websocket_test(self, test):
        return self.WEBSOCKET_SUBDIR in test

    def _needs_web_platform_test(self, test):
        return self.web_platform_test_subdir in test or self.webkit_specific_web_platform_test_subdir in test

    def _custom_device_for_test(self, test):
        for device_class in self._port.CUSTOM_DEVICE_CLASSES:
            directory_suffix = device_class.lower().replace(' ', '') + self._port.TEST_PATH_SEPARATOR
            if directory_suffix in test:
                return device_class
        return None

    def _http_tests(self, test_names):
        return set(test for test in test_names if self._is_http_test(test))

    def _prepare_lists(self, paths, test_names):
        tests_to_skip = self._finder.skip_tests(paths, test_names, self._expectations, self._http_tests(test_names))
        tests_to_run = [test for test in test_names if test not in tests_to_skip]

        # Create a sorted list of test files so the subset chunk,
        # if used, contains alphabetically consecutive tests.
        if self._options.order == 'natural':
            tests_to_run.sort(key=self._port.test_key)
        elif self._options.order == 'random':
            random.shuffle(tests_to_run)

        tests_to_run, tests_in_other_chunks = self._finder.split_into_chunks(tests_to_run)
        self._expectations.add_skipped_tests(tests_in_other_chunks)
        tests_to_skip.update(tests_in_other_chunks)

        return tests_to_run, tests_to_skip

    def _test_input_for_file(self, test_file):
        return TestInput(test_file,
            self._options.slow_time_out_ms if self._test_is_slow(test_file) else self._options.time_out_ms,
            self._is_http_test(test_file),
            should_dump_jsconsolelog_in_stderr=self._test_should_dump_jsconsolelog_in_stderr(test_file))

    def _test_is_slow(self, test_file):
        if self._expectations.model().has_modifier(test_file, test_expectations.SLOW):
            return True
        return "slow" in self._tests_options.get(test_file, [])

    def _test_should_dump_jsconsolelog_in_stderr(self, test_file):
        return self._expectations.model().has_modifier(test_file, test_expectations.DUMPJSCONSOLELOGINSTDERR)

    def needs_servers(self, test_names):
        return any(self._is_http_test(test_name) for test_name in test_names) and self._options.http

    def _get_test_inputs(self, tests_to_run, repeat_each, iterations):
        test_inputs = []
        for _ in xrange(iterations):
            for test in tests_to_run:
                for _ in xrange(repeat_each):
                    test_inputs.append(self._test_input_for_file(test))
        return test_inputs

    def _update_worker_count(self, test_names):
        test_inputs = self._get_test_inputs(test_names, self._options.repeat_each, self._options.iterations)
        worker_count = self._runner.get_worker_count(test_inputs, int(self._options.child_processes))
        self._options.child_processes = worker_count

    def _set_up_run(self, test_names, device_class=None):
        self._options.device_class = device_class

        # This must be started before we check the system dependencies,
        # since the helper may do things to make the setup correct.
        self._printer.write_update("Starting helper ...")
        if not self._port.start_helper(self._options.pixel_tests):
            return False

        self._update_worker_count(test_names)
        self._port.reset_preferences()

        # Check that the system dependencies (themes, fonts, ...) are correct.
        if not self._options.nocheck_sys_deps:
            self._printer.write_update("Checking system dependencies ...")
            if not self._port.check_sys_deps():
                self._port.stop_helper()
                return False

        self._port.setup_test_run(self._options.device_class)
        return True

    def run(self, args):
        """Run the tests and return a RunDetails object with the results."""
        self._printer.write_update("Collecting tests ...")
        try:
            paths, test_names = self._collect_tests(args)
        except IOError:
            # This is raised if --test-list doesn't exist
            return test_run_results.RunDetails(exit_code=-1)

        self._printer.write_update("Parsing expectations ...")
        self._expectations = test_expectations.TestExpectations(self._port, test_names, force_expectations_pass=self._options.force)
        self._expectations.parse_all_expectations()

        tests_to_run, tests_to_skip = self._prepare_lists(paths, test_names)
        self._printer.print_found(len(test_names), len(tests_to_run), self._options.repeat_each, self._options.iterations)
        start_time = time.time()

        # Check to make sure we're not skipping every test.
        if not tests_to_run:
            _log.critical('No tests to run.')
            return test_run_results.RunDetails(exit_code=-1)

        default_device_tests = []

        # Look for tests with custom device requirements.
        custom_device_tests = defaultdict(list)
        for test_file in tests_to_run:
            custom_device = self._custom_device_for_test(test_file)
            if custom_device:
                custom_device_tests[custom_device].append(test_file)
            else:
                default_device_tests.append(test_file)

        if custom_device_tests:
            for device_class in custom_device_tests:
                _log.debug('{} tests use device {}'.format(len(custom_device_tests[device_class]), device_class))

        initial_results = None
        retry_results = None
        enabled_pixel_tests_in_retry = False

        needs_http = any((self._is_http_test(test) and not self._needs_web_platform_test(test)) for test in tests_to_run)
        needs_web_platform_test_server = any(self._needs_web_platform_test(test) for test in tests_to_run)
        needs_websockets = any(self._is_websocket_test(test) for test in tests_to_run)
        self._runner = LayoutTestRunner(self._options, self._port, self._printer, self._results_directory, self._test_is_slow,
                                        needs_http=needs_http, needs_web_platform_test_server=needs_web_platform_test_server, needs_websockets=needs_websockets)

        self._printer.write_update("Checking build ...")
        if not self._port.check_build():
            _log.error("Build check failed")
            return test_run_results.RunDetails(exit_code=-1)

        if self._options.clobber_old_results:
            self._clobber_old_results()

        # Create the output directory if it doesn't already exist.
        self._port.host.filesystem.maybe_make_directory(self._results_directory)

        if default_device_tests:
            _log.info('')
            _log.info("Running %s", pluralize(len(tests_to_run), "test"))
            _log.info('')
            if not self._set_up_run(tests_to_run):
                return test_run_results.RunDetails(exit_code=-1)

            initial_results, retry_results, enabled_pixel_tests_in_retry = self._run_test_subset(default_device_tests, tests_to_skip)

        # Only use a single worker for custom device classes
        self._options.child_processes = 1
        for device_class in custom_device_tests:
            device_tests = custom_device_tests[device_class]
            if device_tests:
                _log.info('')
                _log.info('Running %s for %s', pluralize(len(device_tests), "test"), device_class)
                _log.info('')
                if not self._set_up_run(device_tests, device_class):
                    return test_run_results.RunDetails(exit_code=-1)

                device_initial_results, device_retry_results, device_enabled_pixel_tests_in_retry = self._run_test_subset(device_tests, tests_to_skip)

                initial_results = initial_results.merge(device_initial_results) if initial_results else device_initial_results
                retry_results = retry_results.merge(device_retry_results) if retry_results else device_retry_results
                enabled_pixel_tests_in_retry |= device_enabled_pixel_tests_in_retry

        self._runner.stop_servers()
        end_time = time.time()
        return self._end_test_run(start_time, end_time, initial_results, retry_results, enabled_pixel_tests_in_retry)

    def _run_test_subset(self, tests_to_run, tests_to_skip):
        try:
            enabled_pixel_tests_in_retry = False
            initial_results = self._run_tests(tests_to_run, tests_to_skip, self._options.repeat_each, self._options.iterations, int(self._options.child_processes), retrying=False)

            tests_to_retry = self._tests_to_retry(initial_results, include_crashes=self._port.should_retry_crashes())
            # Don't retry failures when interrupted by user or failures limit exception.
            retry_failures = self._options.retry_failures and not (initial_results.interrupted or initial_results.keyboard_interrupted)
            if retry_failures and tests_to_retry:
                enabled_pixel_tests_in_retry = self._force_pixel_tests_if_needed()

                _log.info('')
                _log.info("Retrying %s ..." % pluralize(len(tests_to_retry), "unexpected failure"))
                _log.info('')
                retry_results = self._run_tests(tests_to_retry, tests_to_skip=set(), repeat_each=1, iterations=1, num_workers=1, retrying=True)

                if enabled_pixel_tests_in_retry:
                    self._options.pixel_tests = False
            else:
                retry_results = None
        finally:
            self._clean_up_run()

        return (initial_results, retry_results, enabled_pixel_tests_in_retry)

    def _end_test_run(self, start_time, end_time, initial_results, retry_results, enabled_pixel_tests_in_retry):
        # Some crash logs can take a long time to be written out so look
        # for new logs after the test run finishes.

        _log.debug("looking for new crash logs")
        self._look_for_new_crash_logs(initial_results, start_time)
        if retry_results:
            self._look_for_new_crash_logs(retry_results, start_time)

        _log.debug("summarizing results")
        summarized_results = test_run_results.summarize_results(self._port, self._expectations, initial_results, retry_results, enabled_pixel_tests_in_retry)
        results_including_passes = None
        if self._options.results_server_host:
            results_including_passes = test_run_results.summarize_results(self._port, self._expectations, initial_results, retry_results, enabled_pixel_tests_in_retry, include_passes=True, include_time_and_modifiers=True)
        self._printer.print_results(end_time - start_time, initial_results, summarized_results)

        exit_code = -1
        if not self._options.dry_run:
            self._port.print_leaks_summary()
            self._upload_json_files(summarized_results, initial_results, results_including_passes, start_time, end_time)

            results_path = self._filesystem.join(self._results_directory, "results.html")
            self._copy_results_html_file(results_path)
            if initial_results.keyboard_interrupted:
                exit_code = INTERRUPTED_EXIT_STATUS
            else:
                if self._options.show_results and (initial_results.unexpected_results_by_name or
                    (self._options.full_results_html and initial_results.total_failures)):
                    self._port.show_results_html_file(results_path)
                exit_code = self._port.exit_code_from_summarized_results(summarized_results)
        return test_run_results.RunDetails(exit_code, summarized_results, initial_results, retry_results, enabled_pixel_tests_in_retry)

    def _run_tests(self, tests_to_run, tests_to_skip, repeat_each, iterations, num_workers, retrying):
        test_inputs = self._get_test_inputs(tests_to_run, repeat_each, iterations)

        return self._runner.run_tests(self._expectations, test_inputs, tests_to_skip, num_workers, retrying)

    def _clean_up_run(self):
        _log.debug("Flushing stdout")
        sys.stdout.flush()
        _log.debug("Flushing stderr")
        sys.stderr.flush()
        _log.debug("Stopping helper")
        self._port.stop_helper()
        _log.debug("Cleaning up port")
        self._port.clean_up_test_run()

    def _force_pixel_tests_if_needed(self):
        if self._options.pixel_tests:
            return False

        _log.debug("Restarting helper")
        self._port.stop_helper()
        self._options.pixel_tests = True
        return self._port.start_helper()

    def _look_for_new_crash_logs(self, run_results, start_time):
        """Since crash logs can take a long time to be written out if the system is
           under stress do a second pass at the end of the test run.

           run_results: the results of the test run
           start_time: time the tests started at.  We're looking for crash
               logs after that time.
        """
        crashed_processes = []
        for test, result in run_results.unexpected_results_by_name.iteritems():
            if (result.type != test_expectations.CRASH):
                continue
            for failure in result.failures:
                if not isinstance(failure, test_failures.FailureCrash):
                    continue
                crashed_processes.append([test, failure.process_name, failure.pid])

        sample_files = self._port.look_for_new_samples(crashed_processes, start_time)
        if sample_files:
            for test, sample_file in sample_files.iteritems():
                writer = TestResultWriter(self._port._filesystem, self._port, self._port.results_directory(), test)
                writer.copy_sample_file(sample_file)

        crash_logs = self._port.look_for_new_crash_logs(crashed_processes, start_time)
        if crash_logs:
            for test, crash_log in crash_logs.iteritems():
                writer = TestResultWriter(self._port._filesystem, self._port, self._port.results_directory(), test)
                writer.write_crash_log(crash_log)

                # Check if this crashing 'test' is already in list of crashed_processes, if not add it to the run_results
                if not any(process[0] == test for process in crashed_processes):
                    result = test_results.TestResult(test)
                    result.type = test_expectations.CRASH
                    result.is_other_crash = True
                    run_results.add(result, expected=False, test_is_slow=False)
                    _log.debug("Adding results for other crash: " + str(test))

    def _clobber_old_results(self):
        # Just clobber the actual test results directories since the other
        # files in the results directory are explicitly used for cross-run
        # tracking.
        self._printer.write_update("Clobbering old results in %s" %
                                   self._results_directory)
        layout_tests_dir = self._port.layout_tests_dir()
        possible_dirs = self._port.test_dirs()
        for dirname in possible_dirs:
            if self._filesystem.isdir(self._filesystem.join(layout_tests_dir, dirname)):
                self._filesystem.rmtree(self._filesystem.join(self._results_directory, dirname))

    def _tests_to_retry(self, run_results, include_crashes):
        return [result.test_name for result in run_results.unexpected_results_by_name.values() if
                   ((result.type != test_expectations.PASS) and
                    (result.type != test_expectations.MISSING) and
                    (result.type != test_expectations.CRASH or include_crashes))]

    def _upload_json_files(self, summarized_results, initial_results, results_including_passes=None, start_time=None, end_time=None):
        """Writes the results of the test run as JSON files into the results
        dir and upload the files to the appengine server.

        Args:
          summarized_results: dict of results
          initial_results: full summary object
        """
        _log.debug("Writing JSON files in %s." % self._results_directory)

        # FIXME: Upload stats.json to the server and delete times_ms.
        times_trie = json_results_generator.test_timings_trie(self._port, initial_results.results_by_name.values())
        times_json_path = self._filesystem.join(self._results_directory, "times_ms.json")
        json_results_generator.write_json(self._filesystem, times_trie, times_json_path)

        stats_trie = self._stats_trie(initial_results)
        stats_path = self._filesystem.join(self._results_directory, "stats.json")
        self._filesystem.write_text_file(stats_path, json.dumps(stats_trie))

        full_results_path = self._filesystem.join(self._results_directory, "full_results.json")
        # We write full_results.json out as jsonp because we need to load it from a file url and Chromium doesn't allow that.
        json_results_generator.write_json(self._filesystem, summarized_results, full_results_path, callback="ADD_RESULTS")

        results_json_path = self._filesystem.join(self._results_directory, "results_including_passes.json")
        if results_including_passes:
            json_results_generator.write_json(self._filesystem, results_including_passes, results_json_path)

        generator = json_layout_results_generator.JSONLayoutResultsGenerator(
            self._port, self._options.builder_name, self._options.build_name,
            self._options.build_number, self._results_directory,
            self._expectations, initial_results,
            self._options.test_results_server,
            "layout-tests",
            self._options.master_name)

        if generator.generate_json_output():
            _log.debug("Finished writing JSON file for the test results server.")
        else:
            _log.debug("Failed to generate JSON file for the test results server.")
            return

        json_files = ["incremental_results.json", "full_results.json", "times_ms.json"]

        generator.upload_json_files(json_files)
        if results_including_passes:
            self.upload_results(results_json_path, start_time, end_time)

        incremental_results_path = self._filesystem.join(self._results_directory, "incremental_results.json")

        # Remove these files from the results directory so they don't take up too much space on the buildbot.
        # The tools use the version we uploaded to the results server anyway.
        self._filesystem.remove(times_json_path)
        self._filesystem.remove(incremental_results_path)
        if results_including_passes:
            self._filesystem.remove(results_json_path)

    def upload_results(self, results_json_path, start_time, end_time):
        if not self._options.results_server_host:
            return
        master_name = self._options.master_name
        builder_name = self._options.builder_name
        build_number = self._options.build_number
        build_slave = self._options.build_slave
        if not master_name or not builder_name or not build_number or not build_slave:
            _log.error("--results-server-host was set, but --master-name, --builder-name, --build-number, or --build-slave was not. Not uploading JSON files.")
            return

        revisions = {}
        # FIXME: This code is duplicated in PerfTestRunner._generate_results_dict
        for (name, path) in self._port.repository_paths():
            scm = SCMDetector(self._port.host.filesystem, self._port.host.executive).detect_scm_system(path) or self._port.host.scm()
            revision = scm.native_revision(path)
            revisions[name] = {'revision': revision, 'timestamp': scm.timestamp_of_native_revision(path, revision)}

        for hostname in self._options.results_server_host:
            _log.info("Uploading JSON files for master: %s builder: %s build: %s slave: %s to %s", master_name, builder_name, build_number, build_slave, hostname)

            attrs = [
                ('master', 'build.webkit.org' if master_name == 'webkit.org' else master_name),  # FIXME: Pass in build.webkit.org.
                ('builder_name', builder_name),
                ('build_number', build_number),
                ('build_slave', build_slave),
                ('revisions', json.dumps(revisions)),
                ('start_time', str(start_time)),
                ('end_time', str(end_time)),
            ]

            uploader = FileUploader("http://%s/api/report" % hostname, 360)
            try:
                response = uploader.upload_as_multipart_form_data(self._filesystem, [('results.json', results_json_path)], attrs)
                if not response:
                    _log.error("JSON upload failed; no response returned")
                    continue

                if response.code != 200:
                    _log.error("JSON upload failed, %d: '%s'" % (response.code, response.read()))
                    continue

                response_text = response.read()
                try:
                    response_json = json.loads(response_text)
                except ValueError as error:
                    _log.error("JSON upload failed; failed to parse the response: %s", response_text)
                    continue

                if response_json['status'] != 'OK':
                    _log.error("JSON upload failed, %s: %s", response_json['status'], response_text)
                    continue

                _log.info("JSON uploaded.")
            except Exception as error:
                _log.error("Upload failed: %s" % error)
                continue

    def _copy_results_html_file(self, destination_path):
        base_dir = self._port.path_from_webkit_base('LayoutTests', 'fast', 'harness')
        results_file = self._filesystem.join(base_dir, 'results.html')
        # Note that the results.html template file won't exist when we're using a MockFileSystem during unit tests,
        # so make sure it exists before we try to copy it.
        if self._filesystem.exists(results_file):
            self._filesystem.copyfile(results_file, destination_path)

    def _stats_trie(self, initial_results):
        def _worker_number(worker_name):
            return int(worker_name.split('/')[1]) if worker_name else -1

        stats = {}
        for result in initial_results.results_by_name.values():
            if result.type != test_expectations.SKIP:
                stats[result.test_name] = {'results': (_worker_number(result.worker_name), result.test_number, result.pid, int(result.test_run_time * 1000), int(result.total_run_time * 1000))}
        stats_trie = {}
        for name, value in stats.iteritems():
            json_results_generator.add_path_to_trie(name, value, stats_trie)
        return stats_trie

    def _print_expectation_line_for_test(self, format_string, test):
        line = self._expectations.model().get_expectation_line(test)
        print(format_string.format(test, line.expected_behavior, self._expectations.readable_filename_and_line_number(line), line.original_string or ''))
    
    def _print_expectations_for_subset(self, device_class, test_col_width, tests_to_run, tests_to_skip={}):
        format_string = '{{:{width}}} {{}} {{}} {{}}'.format(width=test_col_width)
        if tests_to_skip:
            print('')
            print('Tests to skip ({})'.format(len(tests_to_skip)))
            for test in sorted(tests_to_skip):
                self._print_expectation_line_for_test(format_string, test)

        print('')
        print('Tests to run{} ({})'.format(' for ' + device_class if device_class else '', len(tests_to_run)))
        for test in sorted(tests_to_run):
            self._print_expectation_line_for_test(format_string, test)

    def print_expectations(self, args):
        self._printer.write_update("Collecting tests ...")
        try:
            paths, test_names = self._collect_tests(args)
        except IOError:
            # This is raised if --test-list doesn't exist
            return -1

        self._printer.write_update("Parsing expectations ...")
        self._expectations = test_expectations.TestExpectations(self._port, test_names, force_expectations_pass=self._options.force)
        self._expectations.parse_all_expectations()

        tests_to_run, tests_to_skip = self._prepare_lists(paths, test_names)
        self._printer.print_found(len(test_names), len(tests_to_run), self._options.repeat_each, self._options.iterations)

        test_col_width = len(max(tests_to_run + list(tests_to_skip), key=len)) + 1

        default_device_tests = []

        # Look for tests with custom device requirements.
        custom_device_tests = defaultdict(list)
        for test_file in tests_to_run:
            custom_device = self._custom_device_for_test(test_file)
            if custom_device:
                custom_device_tests[custom_device].append(test_file)
            else:
                default_device_tests.append(test_file)

        if custom_device_tests:
            for device_class in custom_device_tests:
                _log.debug('{} tests use device {}'.format(len(custom_device_tests[device_class]), device_class))

        self._print_expectations_for_subset(None, test_col_width, tests_to_run, tests_to_skip)

        for device_class in custom_device_tests:
            device_tests = custom_device_tests[device_class]
            self._print_expectations_for_subset(device_class, test_col_width, device_tests)

        return 0
示例#3
0
class Manager(object):
    """A class for managing running a series of tests on a series of layout
    test files."""
    def __init__(self, port, options, printer):
        """Initialize test runner data structures.

        Args:
          port: an object implementing port-specific
          options: a dictionary of command line options
          printer: a Printer object to record updates to.
        """
        self._port = port
        self._filesystem = port.host.filesystem
        self._options = options
        self._printer = printer
        self._expectations = OrderedDict()
        self.HTTP_SUBDIR = 'http' + port.TEST_PATH_SEPARATOR + 'test'
        self.WEBSOCKET_SUBDIR = 'websocket' + port.TEST_PATH_SEPARATOR
        self.web_platform_test_subdir = self._port.web_platform_test_server_doc_root(
        )
        self.webkit_specific_web_platform_test_subdir = 'http' + port.TEST_PATH_SEPARATOR + 'wpt' + port.TEST_PATH_SEPARATOR
        self.LAYOUT_TESTS_DIRECTORY = 'LayoutTests'
        self._results_directory = self._port.results_directory()
        self._finder = LayoutTestFinder(self._port, self._options)
        self._runner = None

        test_options_json_path = self._port.path_from_webkit_base(
            self.LAYOUT_TESTS_DIRECTORY, "tests-options.json")
        self._tests_options = json.loads(
            self._filesystem.read_text_file(test_options_json_path)
        ) if self._filesystem.exists(test_options_json_path) else {}

    def _collect_tests(self, args, device_type=None):
        return self._finder.find_tests(self._options,
                                       args,
                                       device_type=device_type)

    def _is_http_test(self, test):
        return self.HTTP_SUBDIR in test or self._is_websocket_test(
            test) or self._needs_web_platform_test(test)

    def _is_websocket_test(self, test):
        return self.WEBSOCKET_SUBDIR in test

    def _needs_web_platform_test(self, test):
        return self.web_platform_test_subdir in test or self.webkit_specific_web_platform_test_subdir in test

    def _http_tests(self, test_names):
        return set(test for test in test_names if self._is_http_test(test))

    def _prepare_lists(self, paths, test_names, device_type=None):
        tests_to_skip = self._finder.skip_tests(
            paths, test_names, self._expectations[device_type],
            self._http_tests(test_names))
        tests_to_run = [
            test for test in test_names if test not in tests_to_skip
        ]

        # Create a sorted list of test files so the subset chunk,
        # if used, contains alphabetically consecutive tests.
        if self._options.order == 'natural':
            tests_to_run.sort(key=self._port.test_key)
        elif self._options.order == 'random':
            random.shuffle(tests_to_run)

        tests_to_run, tests_in_other_chunks = self._finder.split_into_chunks(
            tests_to_run)
        self._expectations[device_type].add_skipped_tests(
            tests_in_other_chunks)
        tests_to_skip.update(tests_in_other_chunks)

        return tests_to_run, tests_to_skip

    def _test_input_for_file(self, test_file, device_type=None):
        return TestInput(test_file,
                         self._options.slow_time_out_ms if self._test_is_slow(
                             test_file, device_type=device_type) else
                         self._options.time_out_ms,
                         self._is_http_test(test_file),
                         should_dump_jsconsolelog_in_stderr=self.
                         _test_should_dump_jsconsolelog_in_stderr(
                             test_file, device_type=device_type))

    def _test_is_slow(self, test_file, device_type=None):
        if self._expectations[device_type].model().has_modifier(
                test_file, test_expectations.SLOW):
            return True
        return "slow" in self._tests_options.get(test_file, [])

    def _test_should_dump_jsconsolelog_in_stderr(self,
                                                 test_file,
                                                 device_type=None):
        return self._expectations[device_type].model().has_modifier(
            test_file, test_expectations.DUMPJSCONSOLELOGINSTDERR)

    def needs_servers(self, test_names):
        return any(self._is_http_test(test_name)
                   for test_name in test_names) and self._options.http

    def _get_test_inputs(self,
                         tests_to_run,
                         repeat_each,
                         iterations,
                         device_type=None):
        test_inputs = []
        for _ in range(iterations):
            for test in tests_to_run:
                for _ in range(repeat_each):
                    test_inputs.append(
                        self._test_input_for_file(test,
                                                  device_type=device_type))
        return test_inputs

    def _update_worker_count(self, test_names, device_type=None):
        test_inputs = self._get_test_inputs(test_names,
                                            self._options.repeat_each,
                                            self._options.iterations,
                                            device_type=device_type)
        worker_count = self._runner.get_worker_count(
            test_inputs, int(self._options.child_processes))
        self._options.child_processes = worker_count

    def _set_up_run(self, test_names, device_type=None):
        # This must be started before we check the system dependencies,
        # since the helper may do things to make the setup correct.
        self._printer.write_update("Starting helper ...")
        if not self._port.start_helper(
                pixel_tests=self._options.pixel_tests,
                prefer_integrated_gpu=self._options.prefer_integrated_gpu):
            return False

        self._update_worker_count(test_names, device_type=device_type)
        self._port.reset_preferences()

        # Check that the system dependencies (themes, fonts, ...) are correct.
        if not self._options.nocheck_sys_deps:
            self._printer.write_update("Checking system dependencies ...")
            if not self._port.check_sys_deps():
                self._port.stop_helper()
                return False

        self._port.setup_test_run(device_type)
        return True

    def run(self, args):
        num_failed_uploads = 0
        total_tests = set()
        aggregate_test_names = set()
        aggregate_tests = set()
        tests_to_run_by_device = {}

        device_type_list = self._port.supported_device_types()
        for device_type in device_type_list:
            """Run the tests and return a RunDetails object with the results."""
            for_device_type = u'for {} '.format(
                device_type) if device_type else ''
            self._printer.write_update(
                u'Collecting tests {}...'.format(for_device_type))
            try:
                paths, test_names = self._collect_tests(
                    args, device_type=device_type)
            except IOError:
                # This is raised if --test-list doesn't exist
                return test_run_results.RunDetails(exit_code=-1)

            self._printer.write_update(
                u'Parsing expectations {}...'.format(for_device_type))
            self._expectations[
                device_type] = test_expectations.TestExpectations(
                    self._port,
                    test_names,
                    force_expectations_pass=self._options.force,
                    device_type=device_type)
            self._expectations[device_type].parse_all_expectations()

            aggregate_test_names.update(test_names)
            tests_to_run, tests_to_skip = self._prepare_lists(
                paths, test_names, device_type=device_type)

            total_tests.update(tests_to_run)
            total_tests.update(tests_to_skip)

            tests_to_run_by_device[device_type] = [
                test for test in tests_to_run if test not in aggregate_tests
            ]
            aggregate_tests.update(tests_to_run)

        # If a test is marked skipped, but was explicitly requested, run it anyways
        if self._options.skipped != 'always':
            for arg in args:
                if arg in total_tests and arg not in aggregate_tests:
                    tests_to_run_by_device[device_type_list[0]].append(arg)
                    aggregate_tests.add(arg)

        tests_to_skip = total_tests - aggregate_tests
        self._printer.print_found(len(aggregate_test_names),
                                  len(aggregate_tests),
                                  self._options.repeat_each,
                                  self._options.iterations)
        start_time = time.time()

        # Check to see if all tests we are running are skipped.
        if tests_to_skip == total_tests:
            _log.error("All tests skipped.")
            return test_run_results.RunDetails(exit_code=0,
                                               skipped_all_tests=True)

        # Check to make sure we have no tests to run that are not skipped.
        if not sum(
            [len(tests) for tests in itervalues(tests_to_run_by_device)]):
            _log.critical('No tests to run.')
            return test_run_results.RunDetails(exit_code=-1)

        self._printer.write_update("Checking build ...")
        if not self._port.check_build():
            _log.error("Build check failed")
            return test_run_results.RunDetails(exit_code=-1)

        if self._options.clobber_old_results:
            self._clobber_old_results()

        # Create the output directory if it doesn't already exist.
        self._port.host.filesystem.maybe_make_directory(
            self._results_directory)

        needs_http = any((self._is_http_test(test)
                          and not self._needs_web_platform_test(test))
                         for tests in itervalues(tests_to_run_by_device)
                         for test in tests)
        needs_web_platform_test_server = any(
            self._needs_web_platform_test(test)
            for tests in itervalues(tests_to_run_by_device) for test in tests)
        needs_websockets = any(
            self._is_websocket_test(test)
            for tests in itervalues(tests_to_run_by_device) for test in tests)
        self._runner = LayoutTestRunner(
            self._options,
            self._port,
            self._printer,
            self._results_directory,
            self._test_is_slow,
            needs_http=needs_http,
            needs_web_platform_test_server=needs_web_platform_test_server,
            needs_websockets=needs_websockets)

        initial_results = None
        retry_results = None
        enabled_pixel_tests_in_retry = False

        max_child_processes_for_run = 1
        child_processes_option_value = self._options.child_processes
        uploads = []

        for device_type in device_type_list:
            self._runner._test_is_slow = lambda test_file: self._test_is_slow(
                test_file, device_type=device_type)
            self._options.child_processes = min(
                self._port.max_child_processes(device_type=device_type),
                int(child_processes_option_value
                    or self._port.default_child_processes(
                        device_type=device_type)))

            _log.info('')
            if not self._options.child_processes:
                _log.info('Skipping {} because {} is not available'.format(
                    pluralize(len(tests_to_run_by_device[device_type]),
                              'test'), str(device_type)))
                _log.info('')
                continue

            max_child_processes_for_run = max(self._options.child_processes,
                                              max_child_processes_for_run)

            self._printer.print_baseline_search_path(device_type=device_type)

            _log.info(u'Running {}{}'.format(
                pluralize(len(tests_to_run_by_device[device_type]), 'test'),
                u' for {}'.format(device_type) if device_type else ''))
            _log.info('')
            start_time_for_device = time.time()
            if not tests_to_run_by_device[device_type]:
                continue
            if not self._set_up_run(tests_to_run_by_device[device_type],
                                    device_type=device_type):
                return test_run_results.RunDetails(exit_code=-1)

            configuration = self._port.configuration_for_upload(
                self._port.target_host(0))
            if not configuration.get(
                    'flavor', None
            ):  # The --result-report-flavor argument should override wk1/wk2
                configuration[
                    'flavor'] = 'wk2' if self._options.webkit_test_runner else 'wk1'
            temp_initial_results, temp_retry_results, temp_enabled_pixel_tests_in_retry = self._run_test_subset(
                tests_to_run_by_device[device_type],
                tests_to_skip,
                device_type=device_type)

            if self._options.report_urls:
                self._printer.writeln('\n')
                self._printer.write_update('Preparing upload data ...')

                upload = Upload(
                    suite=self._options.suite or 'layout-tests',
                    configuration=configuration,
                    details=Upload.create_details(options=self._options),
                    commits=self._port.commits_for_upload(),
                    timestamp=start_time,
                    run_stats=Upload.create_run_stats(
                        start_time=start_time_for_device,
                        end_time=time.time(),
                        tests_skipped=temp_initial_results.remaining +
                        temp_initial_results.expected_skips,
                    ),
                    results=self._results_to_upload_json_trie(
                        self._expectations[device_type], temp_initial_results),
                )
                for hostname in self._options.report_urls:
                    self._printer.write_update(
                        'Uploading to {} ...'.format(hostname))
                    if not upload.upload(hostname,
                                         log_line_func=self._printer.writeln):
                        num_failed_uploads += 1
                    else:
                        uploads.append(upload)
                self._printer.writeln('Uploads completed!')

            initial_results = initial_results.merge(
                temp_initial_results
            ) if initial_results else temp_initial_results
            retry_results = retry_results.merge(
                temp_retry_results) if retry_results else temp_retry_results
            enabled_pixel_tests_in_retry |= temp_enabled_pixel_tests_in_retry

            if (initial_results and (initial_results.interrupted or initial_results.keyboard_interrupted)) or \
                    (retry_results and (retry_results.interrupted or retry_results.keyboard_interrupted)):
                break

        # Used for final logging, max_child_processes_for_run is most relevant here.
        self._options.child_processes = max_child_processes_for_run

        self._runner.stop_servers()

        end_time = time.time()
        result = self._end_test_run(start_time, end_time, initial_results,
                                    retry_results,
                                    enabled_pixel_tests_in_retry)

        if self._options.report_urls and uploads:
            self._printer.writeln('\n')
            self._printer.write_update('Preparing to upload test archive ...')

            with self._filesystem.mkdtemp() as temp:
                archive = self._filesystem.join(temp, 'test-archive')
                shutil.make_archive(archive, 'zip', self._results_directory)

                for upload in uploads:
                    for hostname in self._options.report_urls:
                        self._printer.write_update(
                            'Uploading archive to {} ...'.format(hostname))
                        if not upload.upload_archive(
                                hostname,
                                self._filesystem.open_binary_file_for_reading(
                                    archive + '.zip'),
                                log_line_func=self._printer.writeln):
                            num_failed_uploads += 1

        if num_failed_uploads:
            result.exit_code = -1
        return result

    def _run_test_subset(self, tests_to_run, tests_to_skip, device_type=None):
        try:
            enabled_pixel_tests_in_retry = False
            initial_results = self._run_tests(
                tests_to_run,
                tests_to_skip,
                self._options.repeat_each,
                self._options.iterations,
                int(self._options.child_processes),
                retrying=False,
                device_type=device_type)

            tests_to_retry = self._tests_to_retry(
                initial_results,
                include_crashes=self._port.should_retry_crashes())
            # Don't retry failures when interrupted by user or failures limit exception.
            retry_failures = self._options.retry_failures and not (
                initial_results.interrupted
                or initial_results.keyboard_interrupted)
            if retry_failures and tests_to_retry:
                enabled_pixel_tests_in_retry = self._force_pixel_tests_if_needed(
                )

                _log.info('')
                _log.info("Retrying %s ..." %
                          pluralize(len(tests_to_retry), "unexpected failure"))
                _log.info('')
                retry_results = self._run_tests(tests_to_retry,
                                                tests_to_skip=set(),
                                                repeat_each=1,
                                                iterations=1,
                                                num_workers=1,
                                                retrying=True,
                                                device_type=device_type)

                if enabled_pixel_tests_in_retry:
                    self._options.pixel_tests = False
            else:
                retry_results = None
        finally:
            self._clean_up_run()

        return (initial_results, retry_results, enabled_pixel_tests_in_retry)

    def _end_test_run(self, start_time, end_time, initial_results,
                      retry_results, enabled_pixel_tests_in_retry):
        if initial_results is None:
            _log.error('No results generated')
            return test_run_results.RunDetails(exit_code=-1)

        # Some crash logs can take a long time to be written out so look
        # for new logs after the test run finishes.
        _log.debug("looking for new crash logs")
        self._look_for_new_crash_logs(initial_results, start_time)
        if retry_results:
            self._look_for_new_crash_logs(retry_results, start_time)

        _log.debug("summarizing results")
        summarized_results = test_run_results.summarize_results(
            self._port, self._expectations, initial_results, retry_results,
            enabled_pixel_tests_in_retry)
        results_including_passes = None
        if self._options.results_server_host:
            results_including_passes = test_run_results.summarize_results(
                self._port,
                self._expectations,
                initial_results,
                retry_results,
                enabled_pixel_tests_in_retry,
                include_passes=True,
                include_time_and_modifiers=True)
        self._printer.print_results(end_time - start_time, initial_results,
                                    summarized_results)

        exit_code = -1
        if not self._options.dry_run:
            self._port.print_leaks_summary()
            self._output_perf_metrics(end_time - start_time, initial_results)
            self._upload_json_files(summarized_results, initial_results,
                                    results_including_passes, start_time,
                                    end_time)

            results_path = self._filesystem.join(self._results_directory,
                                                 "results.html")
            self._copy_results_html_file(results_path)
            if initial_results.keyboard_interrupted:
                exit_code = INTERRUPTED_EXIT_STATUS
            else:
                if self._options.show_results and (
                        initial_results.unexpected_results_by_name or
                    (self._options.full_results_html
                     and initial_results.total_failures)):
                    self._port.show_results_html_file(results_path)
                exit_code = self._port.exit_code_from_summarized_results(
                    summarized_results)
        return test_run_results.RunDetails(exit_code, summarized_results,
                                           initial_results, retry_results,
                                           enabled_pixel_tests_in_retry)

    def _run_tests(self,
                   tests_to_run,
                   tests_to_skip,
                   repeat_each,
                   iterations,
                   num_workers,
                   retrying,
                   device_type=None):
        test_inputs = self._get_test_inputs(tests_to_run,
                                            repeat_each,
                                            iterations,
                                            device_type=device_type)

        return self._runner.run_tests(self._expectations[device_type],
                                      test_inputs, tests_to_skip, num_workers,
                                      retrying)

    def _clean_up_run(self):
        _log.debug("Flushing stdout")
        sys.stdout.flush()
        _log.debug("Flushing stderr")
        sys.stderr.flush()
        _log.debug("Stopping helper")
        self._port.stop_helper()
        _log.debug("Cleaning up port")
        self._port.clean_up_test_run()

    def _force_pixel_tests_if_needed(self):
        if self._options.pixel_tests:
            return False

        _log.debug("Restarting helper")
        self._port.stop_helper()
        self._options.pixel_tests = True
        return self._port.start_helper()

    def _look_for_new_crash_logs(self, run_results, start_time):
        """Since crash logs can take a long time to be written out if the system is
           under stress do a second pass at the end of the test run.

           run_results: the results of the test run
           start_time: time the tests started at.  We're looking for crash
               logs after that time.
        """
        crashed_processes = []
        for test, result in run_results.unexpected_results_by_name.items():
            if (result.type != test_expectations.CRASH):
                continue
            for failure in result.failures:
                if not isinstance(failure, test_failures.FailureCrash):
                    continue
                crashed_processes.append(
                    [test, failure.process_name, failure.pid])

        sample_files = self._port.look_for_new_samples(crashed_processes,
                                                       start_time)
        if sample_files:
            for test, sample_file in sample_files.items():
                writer = TestResultWriter(self._port._filesystem, self._port,
                                          self._port.results_directory(), test)
                writer.copy_sample_file(sample_file)

        crash_logs = self._port.look_for_new_crash_logs(
            crashed_processes, start_time)
        if crash_logs:
            for test, crash_log in crash_logs.items():
                writer = TestResultWriter(self._port._filesystem, self._port,
                                          self._port.results_directory(), test)
                writer.write_crash_log(crash_log)

                # Check if this crashing 'test' is already in list of crashed_processes, if not add it to the run_results
                if not any(process[0] == test
                           for process in crashed_processes):
                    result = test_results.TestResult(test)
                    result.type = test_expectations.CRASH
                    result.is_other_crash = True
                    run_results.add(result, expected=False, test_is_slow=False)
                    _log.debug("Adding results for other crash: " + str(test))

    def _clobber_old_results(self):
        self._printer.write_update("Deleting results directory {}".format(
            self._results_directory))
        if self._filesystem.isdir(self._results_directory):
            self._filesystem.rmtree(self._results_directory)

    def _tests_to_retry(self, run_results, include_crashes):
        return [
            result.test_name
            for result in run_results.unexpected_results_by_name.values()
            if ((result.type != test_expectations.PASS) and (
                result.type != test_expectations.MISSING) and (
                    result.type != test_expectations.CRASH or include_crashes))
        ]

    def _output_perf_metrics(self, run_time, initial_results):
        perf_metrics_json = json_results_generator.perf_metrics_for_test(
            run_time, initial_results.results_by_name.values())
        perf_metrics_path = self._filesystem.join(
            self._results_directory, "layout_test_perf_metrics.json")
        self._filesystem.write_text_file(perf_metrics_path,
                                         json.dumps(perf_metrics_json))

    def _results_to_upload_json_trie(self, expectations, results):
        FAILURE_TO_TEXT = {
            test_expectations.PASS:
            Upload.Expectations.PASS,
            test_expectations.CRASH:
            Upload.Expectations.CRASH,
            test_expectations.TIMEOUT:
            Upload.Expectations.TIMEOUT,
            test_expectations.IMAGE:
            Upload.Expectations.IMAGE,
            test_expectations.TEXT:
            Upload.Expectations.TEXT,
            test_expectations.AUDIO:
            Upload.Expectations.AUDIO,
            test_expectations.MISSING:
            Upload.Expectations.WARNING,
            test_expectations.IMAGE_PLUS_TEXT:
            ' '.join([Upload.Expectations.IMAGE, Upload.Expectations.TEXT]),
        }

        results_trie = {}
        for result in itervalues(results.results_by_name):
            if result.type == test_expectations.SKIP:
                continue

            expected = expectations.filtered_expectations_for_test(
                result.test_name,
                self._options.pixel_tests or bool(result.reftest_type),
                self._options.world_leaks,
            )
            if expected == {test_expectations.PASS}:
                expected = None
            else:
                expected = ' '.join([
                    FAILURE_TO_TEXT.get(e, Upload.Expectations.FAIL)
                    for e in expected
                ])

            json_results_generator.add_path_to_trie(
                result.test_name,
                Upload.create_test_result(
                    expected=expected,
                    actual=FAILURE_TO_TEXT.get(result.type,
                                               Upload.Expectations.FAIL)
                    if result.type else None,
                    time=int(result.test_run_time * 1000),
                ), results_trie)
        return results_trie

    def _upload_json_files(self,
                           summarized_results,
                           initial_results,
                           results_including_passes=None,
                           start_time=None,
                           end_time=None):
        """Writes the results of the test run as JSON files into the results
        dir and upload the files to the appengine server.

        Args:
          summarized_results: dict of results
          initial_results: full summary object
        """
        _log.debug("Writing JSON files in %s." % self._results_directory)

        # FIXME: Upload stats.json to the server and delete times_ms.
        times_trie = json_results_generator.test_timings_trie(
            self._port, initial_results.results_by_name.values())
        times_json_path = self._filesystem.join(self._results_directory,
                                                "times_ms.json")
        json_results_generator.write_json(self._filesystem, times_trie,
                                          times_json_path)

        stats_trie = self._stats_trie(initial_results)
        stats_path = self._filesystem.join(self._results_directory,
                                           "stats.json")
        self._filesystem.write_text_file(stats_path, json.dumps(stats_trie))

        full_results_path = self._filesystem.join(self._results_directory,
                                                  "full_results.json")
        # We write full_results.json out as jsonp because we need to load it from a file url and Chromium doesn't allow that.
        json_results_generator.write_json(self._filesystem,
                                          summarized_results,
                                          full_results_path,
                                          callback="ADD_RESULTS")

        results_json_path = self._filesystem.join(
            self._results_directory, "results_including_passes.json")
        if results_including_passes:
            json_results_generator.write_json(self._filesystem,
                                              results_including_passes,
                                              results_json_path)

        generator = json_layout_results_generator.JSONLayoutResultsGenerator(
            self._port, self._options.builder_name, self._options.build_name,
            self._options.build_number, self._results_directory,
            self._expectations, initial_results,
            self._options.test_results_server, "layout-tests",
            self._options.master_name)

        if generator.generate_json_output():
            _log.debug(
                "Finished writing JSON file for the test results server.")
        else:
            _log.debug(
                "Failed to generate JSON file for the test results server.")
            return

        json_files = [
            "incremental_results.json", "full_results.json", "times_ms.json"
        ]

        generator.upload_json_files(json_files)
        if results_including_passes:
            self.upload_results(results_json_path, start_time, end_time)

        incremental_results_path = self._filesystem.join(
            self._results_directory, "incremental_results.json")

        # Remove these files from the results directory so they don't take up too much space on the buildbot.
        # The tools use the version we uploaded to the results server anyway.
        self._filesystem.remove(times_json_path)
        self._filesystem.remove(incremental_results_path)
        if results_including_passes:
            self._filesystem.remove(results_json_path)

    def upload_results(self, results_json_path, start_time, end_time):
        if not self._options.results_server_host:
            return
        master_name = self._options.master_name
        builder_name = self._options.builder_name
        build_number = self._options.build_number
        build_worker = self._options.build_slave
        if not master_name or not builder_name or not build_number or not build_worker:
            _log.error(
                "--results-server-host was set, but --master-name, --builder-name, --build-number, or --build-slave was not. Not uploading JSON files."
            )
            return

        revisions = {}
        # FIXME: This code is duplicated in PerfTestRunner._generate_results_dict
        for (name, path) in self._port.repository_paths():
            scm = SCMDetector(self._port.host.filesystem,
                              self._port.host.executive).detect_scm_system(
                                  path) or self._port.host.scm()
            revision = scm.native_revision(path)
            revisions[name] = {
                'revision': revision,
                'timestamp': scm.timestamp_of_native_revision(path, revision)
            }

        for hostname in self._options.results_server_host:
            _log.info(
                "Uploading JSON files for master: {} builder: {} build: {} worker: {} to {}"
                .format(master_name, builder_name, build_number, build_worker,
                        hostname))

            attrs = [
                ('master', 'build.webkit.org' if master_name == 'webkit.org'
                 else master_name),  # FIXME: Pass in build.webkit.org.
                ('builder_name', builder_name),
                ('build_number', build_number),
                ('build_slave', build_worker),
                ('revisions', json.dumps(revisions)),
                ('start_time', str(start_time)),
                ('end_time', str(end_time)),
            ]

            uploader = FileUploader("http://%s/api/report" % hostname, 360)
            try:
                response = uploader.upload_as_multipart_form_data(
                    self._filesystem, [('results.json', results_json_path)],
                    attrs)
                if not response:
                    _log.error("JSON upload failed; no response returned")
                    continue

                if response.code != 200:
                    _log.error("JSON upload failed, %d: '%s'" %
                               (response.code, response.read()))
                    continue

                response_text = response.read()
                try:
                    response_json = json.loads(response_text)
                except ValueError:
                    _log.error(
                        "JSON upload failed; failed to parse the response: %s",
                        response_text)
                    continue

                if response_json['status'] != 'OK':
                    _log.error("JSON upload failed, %s: %s",
                               response_json['status'], response_text)
                    continue

                _log.info("JSON uploaded.")
            except Exception as error:
                _log.error("Upload failed: %s" % error)
                continue

    def _copy_results_html_file(self, destination_path):
        base_dir = self._port.path_from_webkit_base('LayoutTests', 'fast',
                                                    'harness')
        results_file = self._filesystem.join(base_dir, 'results.html')
        # Note that the results.html template file won't exist when we're using a MockFileSystem during unit tests,
        # so make sure it exists before we try to copy it.
        if self._filesystem.exists(results_file):
            self._filesystem.copyfile(results_file, destination_path)

    def _stats_trie(self, initial_results):
        def _worker_number(worker_name):
            return int(worker_name.split('/')[1]) if worker_name else -1

        stats = {}
        for result in initial_results.results_by_name.values():
            if result.type != test_expectations.SKIP:
                stats[result.test_name] = {
                    'results':
                    (_worker_number(result.worker_name), result.test_number,
                     result.pid, int(result.test_run_time * 1000),
                     int(result.total_run_time * 1000))
                }
        stats_trie = {}
        for name, value in iteritems(stats):
            json_results_generator.add_path_to_trie(name, value, stats_trie)
        return stats_trie

    def _print_expectation_line_for_test(self,
                                         format_string,
                                         test,
                                         device_type=None):
        line = self._expectations[device_type].model().get_expectation_line(
            test)
        print(
            format_string.format(
                test, line.expected_behavior, self._expectations[device_type].
                readable_filename_and_line_number(line), line.original_string
                or ''))

    def _print_expectations_for_subset(self,
                                       device_type,
                                       test_col_width,
                                       tests_to_run,
                                       tests_to_skip={}):
        format_string = '{{:{width}}} {{}} {{}} {{}}'.format(
            width=test_col_width)
        if tests_to_skip:
            print('')
            print('Tests to skip ({})'.format(len(tests_to_skip)))
            for test in sorted(tests_to_skip):
                self._print_expectation_line_for_test(format_string,
                                                      test,
                                                      device_type=device_type)

        print('')
        print('Tests to run{} ({})'.format(
            ' for ' + str(device_type) if device_type else '',
            len(tests_to_run)))
        for test in sorted(tests_to_run):
            self._print_expectation_line_for_test(format_string,
                                                  test,
                                                  device_type=device_type)

    def print_expectations(self, args):
        aggregate_test_names = set()
        aggregate_tests_to_run = set()
        aggregate_tests_to_skip = set()
        tests_to_run_by_device = {}

        device_type_list = self._port.DEFAULT_DEVICE_TYPES or [
            self._port.DEVICE_TYPE
        ]
        for device_type in device_type_list:
            """Run the tests and return a RunDetails object with the results."""
            for_device_type = 'for {} '.format(
                device_type) if device_type else ''
            self._printer.write_update(
                'Collecting tests {}...'.format(for_device_type))
            try:
                paths, test_names = self._collect_tests(
                    args, device_type=device_type)
            except IOError:
                # This is raised if --test-list doesn't exist
                return test_run_results.RunDetails(exit_code=-1)

            self._printer.write_update(
                'Parsing expectations {}...'.format(for_device_type))
            self._expectations[
                device_type] = test_expectations.TestExpectations(
                    self._port,
                    test_names,
                    force_expectations_pass=self._options.force,
                    device_type=device_type)
            self._expectations[device_type].parse_all_expectations()

            aggregate_test_names.update(test_names)
            tests_to_run, tests_to_skip = self._prepare_lists(
                paths, test_names, device_type=device_type)
            aggregate_tests_to_skip.update(tests_to_skip)

            tests_to_run_by_device[device_type] = [
                test for test in tests_to_run
                if test not in aggregate_tests_to_run
            ]
            aggregate_tests_to_run.update(tests_to_run)

        aggregate_tests_to_skip = aggregate_tests_to_skip - aggregate_tests_to_run

        self._printer.print_found(len(aggregate_test_names),
                                  len(aggregate_tests_to_run),
                                  self._options.repeat_each,
                                  self._options.iterations)
        test_col_width = len(
            max(aggregate_tests_to_run.union(aggregate_tests_to_skip),
                key=len)) + 1

        self._print_expectations_for_subset(
            device_type_list[0], test_col_width,
            tests_to_run_by_device[device_type_list[0]],
            aggregate_tests_to_skip)

        for device_type in device_type_list[1:]:
            self._print_expectations_for_subset(
                device_type, test_col_width,
                tests_to_run_by_device[device_type])

        return 0