예제 #1
0
class Printer(object):
    """Class handling all non-debug-logging printing done by run-webkit-tests."""
    def __init__(self, port, options, regular_output, logger=None):
        self.num_started = 0
        self.num_tests = 0
        self._port = port
        self._options = options
        self._meter = MeteredStream(
            regular_output,
            options.debug_rwt_logging,
            logger=logger,
            number_of_columns=self._port.host.platform.terminal_width())
        self._running_tests = []
        self._completed_tests = []

    def cleanup(self):
        self._meter.cleanup()

    def __del__(self):
        self.cleanup()

    def print_config(self, results_directory):
        self._print_default("Using port '%s'" % self._port.name())
        self._print_default("Test configuration: %s" %
                            self._port.test_configuration())
        self._print_default("Placing test results in %s" % results_directory)

        # FIXME: should these options be in printing_options?
        if self._options.new_baseline:
            self._print_default("Placing new baselines in %s" %
                                self._port.baseline_path())

        self._print_default("Using %s build" % self._options.configuration)
        if self._options.pixel_tests:
            self._print_default("Pixel tests enabled")
        else:
            self._print_default("Pixel tests disabled")

        self._print_default(
            "Regular timeout: %s, slow test timeout: %s" %
            (self._options.time_out_ms, self._options.slow_time_out_ms))

        self._print_default('Command line: ' +
                            ' '.join(self._port.driver_cmd_line_for_logging()))
        self._print_default('')

    def print_baseline_search_path(self, device_type=None):
        fs = self._port.host.filesystem
        full_baseline_search_path = self._port.baseline_search_path(
            device_type=device_type)
        normalize_baseline = lambda baseline_search_path: [
            fs.relpath(x, self._port.layout_tests_dir()).replace("../", "")
            for x in baseline_search_path
        ]

        self._print_default(
            u'Verbose baseline search path: {} -> generic'.format(u' -> '.join(
                normalize_baseline(full_baseline_search_path))))

        self._print_default('')
        self._print_default(u'Baseline search path: {} -> generic'.format(
            u' -> '.join(
                normalize_baseline([
                    path for path in full_baseline_search_path
                    if fs.exists(path)
                ]))))
        self._print_default('')

    def print_found(self, num_all_test_files, num_to_run, repeat_each,
                    iterations):
        found_str = 'Found %s; running %d' % (pluralize(
            num_all_test_files, "test"), num_to_run)
        if repeat_each * iterations > 1:
            found_str += ' (%s each: --repeat-each=%d --iterations=%d)' % (
                pluralize(repeat_each * iterations,
                          "time"), repeat_each, iterations)
        found_str += ', skipping %d' % (num_all_test_files - num_to_run)
        self._print_default(found_str + '.')

    def print_expected(self, run_results, tests_with_result_type_callback):
        self._print_expected_results_of_type(run_results,
                                             test_expectations.PASS, "passes",
                                             tests_with_result_type_callback)
        self._print_expected_results_of_type(run_results,
                                             test_expectations.FAIL,
                                             "failures",
                                             tests_with_result_type_callback)
        self._print_expected_results_of_type(run_results,
                                             test_expectations.FLAKY, "flaky",
                                             tests_with_result_type_callback)
        self._print_debug('')

    def print_workers_and_shards(self, num_workers, num_shards):
        driver_name = self._port.driver_name()

        if num_workers == 1:
            self._print_default('Running 1 {}.'.format(driver_name))
            self._print_debug('({}).'.format(pluralize(num_shards, "shard")))
        else:
            self._print_default('Running {} in parallel.'.format(
                pluralize(num_workers, driver_name)))
            self._print_debug('({} shards).'.format(num_shards))
        self._print_default('')

    def _print_expected_results_of_type(self, run_results, result_type,
                                        result_type_str,
                                        tests_with_result_type_callback):
        tests = tests_with_result_type_callback(result_type)
        now = run_results.tests_by_timeline[test_expectations.NOW]
        wontfix = run_results.tests_by_timeline[test_expectations.WONTFIX]

        # We use a fancy format string in order to print the data out in a
        # nicely-aligned table.
        fmtstr = ("Expect: %%5d %%-8s (%%%dd now, %%%dd wontfix)" %
                  (self._num_digits(now), self._num_digits(wontfix)))
        self._print_debug(fmtstr % (len(tests), result_type_str,
                                    len(tests & now), len(tests & wontfix)))

    def _num_digits(self, num):
        ndigits = 1
        if len(num):
            ndigits = int(math.log10(len(num))) + 1
        return ndigits

    def print_results(self, run_time, run_results, summarized_results):
        self._print_timing_statistics(run_time, run_results)
        self._print_one_line_summary(
            run_results.total - run_results.expected_skips,
            run_results.expected - run_results.expected_skips,
            run_results.unexpected)

    def _print_timing_statistics(self, total_time, run_results):
        self._print_debug("Test timing:")
        self._print_debug("  %6.2f total testing time" % total_time)
        self._print_debug("")

        self._print_worker_statistics(run_results,
                                      int(self._options.child_processes))
        self._print_aggregate_test_statistics(run_results)
        self._print_individual_test_times(run_results)
        self._print_directory_timings(run_results)

    def _print_worker_statistics(self, run_results, num_workers):
        self._print_debug("Thread timing:")
        stats = {}
        cuml_time = 0
        for result in run_results.results_by_name.values():
            stats.setdefault(result.worker_name, {
                'num_tests': 0,
                'total_time': 0
            })
            stats[result.worker_name]['num_tests'] += 1
            stats[result.worker_name]['total_time'] += result.total_run_time
            cuml_time += result.total_run_time

        for worker_name in stats:
            self._print_debug("    %10s: %5d tests, %6.2f secs" %
                              (worker_name, stats[worker_name]['num_tests'],
                               stats[worker_name]['total_time']))
        self._print_debug("   %6.2f cumulative, %6.2f optimal" %
                          (cuml_time, cuml_time / num_workers))
        self._print_debug("")

    def _print_aggregate_test_statistics(self, run_results):
        times_for_dump_render_tree = [
            result.test_run_time
            for result in run_results.results_by_name.values()
        ]
        self._print_statistics_for_test_timings(
            "PER TEST TIME IN TESTSHELL (seconds):",
            times_for_dump_render_tree)

    def _print_individual_test_times(self, run_results):
        # Reverse-sort by the time spent in DumpRenderTree.

        individual_test_timings = sorted(
            run_results.results_by_name.values(),
            key=lambda result: result.test_run_time,
            reverse=True)
        num_printed = 0
        slow_tests = []
        timeout_or_crash_tests = []
        unexpected_slow_tests = []
        for test_tuple in individual_test_timings:
            test_name = test_tuple.test_name
            is_timeout_crash_or_slow = False
            if test_name in run_results.slow_tests:
                is_timeout_crash_or_slow = True
                slow_tests.append(test_tuple)

            if test_name in run_results.failures_by_name:
                result = run_results.results_by_name[test_name].type
                if (result == test_expectations.TIMEOUT
                        or result == test_expectations.CRASH):
                    is_timeout_crash_or_slow = True
                    timeout_or_crash_tests.append(test_tuple)

            if (not is_timeout_crash_or_slow
                    and num_printed < NUM_SLOW_TESTS_TO_LOG):
                num_printed = num_printed + 1
                unexpected_slow_tests.append(test_tuple)

        self._print_debug("")
        self._print_test_list_timing(
            "%s slowest tests that are not marked as SLOW and did not timeout/crash:"
            % NUM_SLOW_TESTS_TO_LOG, unexpected_slow_tests)
        self._print_debug("")
        self._print_test_list_timing("Tests marked as SLOW:", slow_tests)
        self._print_debug("")
        self._print_test_list_timing("Tests that timed out or crashed:",
                                     timeout_or_crash_tests)
        self._print_debug("")

    def _print_test_list_timing(self, title, test_list):
        self._print_debug(title)
        for test_tuple in test_list:
            test_run_time = round(test_tuple.test_run_time, 1)
            self._print_debug("  %s took %s seconds" %
                              (test_tuple.test_name, test_run_time))

    def _print_directory_timings(self, run_results):
        stats = {}
        for result in run_results.results_by_name.values():
            stats.setdefault(result.shard_name, {
                'num_tests': 0,
                'total_time': 0
            })
            stats[result.shard_name]['num_tests'] += 1
            stats[result.shard_name]['total_time'] += result.total_run_time

        timings = []
        for directory in stats:
            timings.append(
                (directory, round(stats[directory]['total_time'],
                                  1), stats[directory]['num_tests']))
        timings.sort()

        self._print_debug("Time to process slowest subdirectories:")
        min_seconds_to_print = 10
        for timing in timings:
            if timing[1] > min_seconds_to_print:
                self._print_debug("  %s took %s seconds to run %s tests." %
                                  timing)
        self._print_debug("")

    def _print_statistics_for_test_timings(self, title, timings):
        self._print_debug(title)
        timings.sort()

        num_tests = len(timings)
        if not num_tests:
            return
        percentile90 = timings[int(.9 * num_tests)]
        percentile99 = timings[int(.99 * num_tests)]

        if num_tests % 2 == 1:
            median = timings[(num_tests - 1) // 2 - 1]
        else:
            lower = timings[num_tests // 2 - 1]
            upper = timings[num_tests // 2]
            median = (float(lower + upper)) / 2

        mean = sum(timings) / num_tests

        for timing in timings:
            sum_of_deviations = math.pow(timing - mean, 2)

        std_deviation = math.sqrt(sum_of_deviations / num_tests)
        self._print_debug("  Median:          %6.3f" % median)
        self._print_debug("  Mean:            %6.3f" % mean)
        self._print_debug("  90th percentile: %6.3f" % percentile90)
        self._print_debug("  99th percentile: %6.3f" % percentile99)
        self._print_debug("  Standard dev:    %6.3f" % std_deviation)
        self._print_debug("")

    def _print_one_line_summary(self, total, expected, unexpected):
        incomplete = total - expected - unexpected
        incomplete_str = ''
        if incomplete:
            self._print_default("")
            incomplete_str = " (%d didn't run)" % incomplete

        if self._options.verbose or self._options.debug_rwt_logging or unexpected:
            self.writeln("")

        summary = ''
        if unexpected == 0:
            if expected == total:
                if expected > 1:
                    summary = "All %d tests ran as expected." % expected
                else:
                    summary = "The test ran as expected."
            else:
                summary = "%s ran as expected%s." % (pluralize(
                    expected, "test"), incomplete_str)
        else:
            summary = "%s ran as expected, %d didn't%s:" % (pluralize(
                expected, "test"), unexpected, incomplete_str)

        self._print_quiet(summary)
        self._print_quiet("")

    def _test_status_line(self, test_name, suffix, truncate=True):
        format_string = '[%d/%d] %s%s'
        status_line = format_string % (self.num_started, self.num_tests,
                                       test_name, suffix)
        if truncate and len(status_line) > self._meter.number_of_columns():
            overflow_columns = len(
                status_line) - self._meter.number_of_columns()
            ellipsis = '...'
            if len(test_name) < overflow_columns + len(ellipsis) + 2:
                # We don't have enough space even if we elide, just show the test filename.
                fs = self._port.host.filesystem
                test_name = fs.split(test_name)[1]
            else:
                new_length = len(test_name) - overflow_columns - len(ellipsis)
                prefix = int(new_length / 2)
                test_name = test_name[:prefix] + ellipsis + test_name[-(
                    new_length - prefix):]
        return format_string % (self.num_started, self.num_tests, test_name,
                                suffix)

    def print_started_test(self, test_name):
        self.num_started += 1
        self._running_tests.append(test_name)
        if len(self._running_tests) > 1:
            suffix = ' (+%d)' % (len(self._running_tests) - 1)
        else:
            suffix = ''
        if self._options.verbose:
            write = self._meter.write_update
        else:
            write = self._meter.write_throttled_update
        write(self._test_status_line(test_name, suffix))

    def print_finished_test(self, result, expected, exp_str, got_str):
        test_name = result.test_name

        result_message = self._result_message(result.type, result.failures,
                                              expected, exp_str,
                                              self._options.verbose)

        if self._options.details:
            self._print_test_trace(result, exp_str, got_str)
        elif (self._options.verbose
              and not self._options.debug_rwt_logging) or not expected:
            self.writeln(
                self._test_status_line(test_name,
                                       result_message,
                                       truncate=False))
        elif self.num_started == self.num_tests:
            self._meter.write_update('')
        else:
            if test_name == self._running_tests[0]:
                self._completed_tests.insert(0, [test_name, result_message])
            else:
                self._completed_tests.append([test_name, result_message])

            for test_name, result_message in self._completed_tests:
                self._meter.write_throttled_update(
                    self._test_status_line(test_name,
                                           result_message,
                                           truncate=False))
            self._completed_tests = []
        self._running_tests.remove(test_name)

    def _result_message(self, result_type, failures, expected, exp_str,
                        verbose):
        exp_string = ''
        if not expected:
            exp_string = ' (leak detection is pending)' if 'LEAK' in exp_str else ' unexpectedly'

        if result_type == test_expectations.PASS:
            return ' passed%s' % exp_string
        else:
            return ' failed%s (%s)' % (exp_string, ', '.join(
                failure.message() for failure in failures))

    def _print_test_trace(self, result, exp_str, got_str):
        test_name = result.test_name
        self._print_default(self._test_status_line(test_name, ''))

        for extension in ('.txt', '.png', '.wav', '.webarchive'):
            self._print_baseline(test_name, extension)

        self._print_default('  exp: %s' % exp_str)
        self._print_default('  got: %s' % got_str)
        self._print_default(' took: %-.3f' % result.test_run_time)
        self._print_default('')

    def _print_baseline(self, test_name, extension):
        baseline = self._port.expected_filename(test_name, extension)
        if self._port._filesystem.exists(baseline):
            relpath = self._port.relative_test_filename(baseline)
        else:
            relpath = '<none>'
        self._print_default('  %s: %s' % (extension[1:], relpath))

    def _print_quiet(self, msg):
        self.writeln(msg)

    def _print_default(self, msg):
        if not self._options.quiet:
            self.writeln(msg)

    def _print_debug(self, msg):
        if self._options.debug_rwt_logging:
            self.writeln(msg)

    def write_update(self, msg):
        self._meter.write_update(msg)

    def writeln(self, msg):
        self._meter.writeln(msg)

    def flush(self):
        self._meter.flush()
예제 #2
0
class Printer(object):
    """Class handling all non-debug-logging printing done by run-webkit-tests.

    Printing from run-webkit-tests falls into two buckets: general or
    regular output that is read only by humans and can be changed at any
    time, and output that is parsed by buildbots (and humans) and hence
    must be changed more carefully and in coordination with the buildbot
    parsing code (in chromium.org's buildbot/master.chromium/scripts/master/
    log_parser/webkit_test_command.py script).

    By default the buildbot-parsed code gets logged to stdout, and regular
    output gets logged to stderr."""

    def __init__(self, port, options, regular_output, buildbot_output, logger=None):
        self.num_completed = 0
        self.num_tests = 0
        self._port = port
        self._options = options
        self._buildbot_stream = buildbot_output
        self._meter = MeteredStream(regular_output, options.debug_rwt_logging, logger=logger)
        self._running_tests = []
        self._completed_tests = []

    def cleanup(self):
        self._meter.cleanup()

    def __del__(self):
        self.cleanup()

    def print_config(self):
        self._print_default("Using port '%s'" % self._port.name())
        self._print_default("Test configuration: %s" % self._port.test_configuration())
        self._print_default("Placing test results in %s" % self._options.results_directory)

        # FIXME: should these options be in printing_options?
        if self._options.new_baseline:
            self._print_default("Placing new baselines in %s" % self._port.baseline_path())

        fs = self._port.host.filesystem
        fallback_path = [fs.split(x)[1] for x in self._port.baseline_search_path()]
        self._print_default("Baseline search path: %s -> generic" % " -> ".join(fallback_path))

        self._print_default("Using %s build" % self._options.configuration)
        if self._options.pixel_tests:
            self._print_default("Pixel tests enabled")
        else:
            self._print_default("Pixel tests disabled")

        self._print_default(
            "Regular timeout: %s, slow test timeout: %s" % (self._options.time_out_ms, self._options.slow_time_out_ms)
        )

        self._print_default("Command line: " + " ".join(self._port.driver_cmd_line()))
        self._print_default("")

    def print_found(self, num_all_test_files, num_to_run, repeat_each, iterations):
        num_unique_tests = num_to_run / (repeat_each * iterations)
        found_str = "Found %s; running %d" % (grammar.pluralize("test", num_all_test_files), num_unique_tests)
        if repeat_each * iterations > 1:
            found_str += " (%d times each: --repeat-each=%d --iterations=%d)" % (
                repeat_each * iterations,
                repeat_each,
                iterations,
            )
        found_str += ", skipping %d" % (num_all_test_files - num_unique_tests)
        self._print_default(found_str + ".")

    def print_expected(self, result_summary, tests_with_result_type_callback):
        self._print_expected_results_of_type(
            result_summary, test_expectations.PASS, "passes", tests_with_result_type_callback
        )
        self._print_expected_results_of_type(
            result_summary, test_expectations.FAIL, "failures", tests_with_result_type_callback
        )
        self._print_expected_results_of_type(
            result_summary, test_expectations.FLAKY, "flaky", tests_with_result_type_callback
        )
        self._print_debug("")

    def print_workers_and_shards(self, num_workers, num_shards, num_locked_shards):
        driver_name = self._port.driver_name()
        if num_workers == 1:
            self._print_default("Running 1 %s over %s." % (driver_name, grammar.pluralize("shard", num_shards)))
        else:
            self._print_default(
                "Running %d %ss in parallel over %d shards (%d locked)."
                % (num_workers, driver_name, num_shards, num_locked_shards)
            )
        self._print_default("")

    def _print_expected_results_of_type(
        self, result_summary, result_type, result_type_str, tests_with_result_type_callback
    ):
        tests = tests_with_result_type_callback(result_type)
        now = result_summary.tests_by_timeline[test_expectations.NOW]
        wontfix = result_summary.tests_by_timeline[test_expectations.WONTFIX]

        # We use a fancy format string in order to print the data out in a
        # nicely-aligned table.
        fmtstr = "Expect: %%5d %%-8s (%%%dd now, %%%dd wontfix)" % (self._num_digits(now), self._num_digits(wontfix))
        self._print_debug(fmtstr % (len(tests), result_type_str, len(tests & now), len(tests & wontfix)))

    def _num_digits(self, num):
        ndigits = 1
        if len(num):
            ndigits = int(math.log10(len(num))) + 1
        return ndigits

    def print_results(
        self, run_time, thread_timings, test_timings, individual_test_timings, result_summary, unexpected_results
    ):
        self._print_timing_statistics(run_time, thread_timings, test_timings, individual_test_timings, result_summary)
        self._print_result_summary(result_summary)
        self._print_one_line_summary(
            result_summary.total - result_summary.expected_skips,
            result_summary.expected - result_summary.expected_skips,
            result_summary.unexpected,
        )
        self._print_unexpected_results(unexpected_results)

    def _print_timing_statistics(
        self, total_time, thread_timings, directory_test_timings, individual_test_timings, result_summary
    ):
        self._print_debug("Test timing:")
        self._print_debug("  %6.2f total testing time" % total_time)
        self._print_debug("")
        self._print_debug("Thread timing:")
        cuml_time = 0
        for t in thread_timings:
            self._print_debug("    %10s: %5d tests, %6.2f secs" % (t["name"], t["num_tests"], t["total_time"]))
            cuml_time += t["total_time"]
        self._print_debug(
            "   %6.2f cumulative, %6.2f optimal" % (cuml_time, cuml_time / int(self._options.child_processes))
        )
        self._print_debug("")

        self._print_aggregate_test_statistics(individual_test_timings)
        self._print_individual_test_times(individual_test_timings, result_summary)
        self._print_directory_timings(directory_test_timings)

    def _print_aggregate_test_statistics(self, individual_test_timings):
        times_for_dump_render_tree = [test_stats.test_run_time for test_stats in individual_test_timings]
        self._print_statistics_for_test_timings("PER TEST TIME IN TESTSHELL (seconds):", times_for_dump_render_tree)

    def _print_individual_test_times(self, individual_test_timings, result_summary):
        # Reverse-sort by the time spent in DumpRenderTree.
        individual_test_timings.sort(lambda a, b: cmp(b.test_run_time, a.test_run_time))
        num_printed = 0
        slow_tests = []
        timeout_or_crash_tests = []
        unexpected_slow_tests = []
        for test_tuple in individual_test_timings:
            test_name = test_tuple.test_name
            is_timeout_crash_or_slow = False
            if test_name in result_summary.slow_tests:
                is_timeout_crash_or_slow = True
                slow_tests.append(test_tuple)

            if test_name in result_summary.failures:
                result = result_summary.results[test_name].type
                if result == test_expectations.TIMEOUT or result == test_expectations.CRASH:
                    is_timeout_crash_or_slow = True
                    timeout_or_crash_tests.append(test_tuple)

            if not is_timeout_crash_or_slow and num_printed < NUM_SLOW_TESTS_TO_LOG:
                num_printed = num_printed + 1
                unexpected_slow_tests.append(test_tuple)

        self._print_debug("")
        self._print_test_list_timing(
            "%s slowest tests that are not marked as SLOW and did not timeout/crash:" % NUM_SLOW_TESTS_TO_LOG,
            unexpected_slow_tests,
        )
        self._print_debug("")
        self._print_test_list_timing("Tests marked as SLOW:", slow_tests)
        self._print_debug("")
        self._print_test_list_timing("Tests that timed out or crashed:", timeout_or_crash_tests)
        self._print_debug("")

    def _print_test_list_timing(self, title, test_list):
        self._print_debug(title)
        for test_tuple in test_list:
            test_run_time = round(test_tuple.test_run_time, 1)
            self._print_debug("  %s took %s seconds" % (test_tuple.test_name, test_run_time))

    def _print_directory_timings(self, directory_test_timings):
        timings = []
        for directory in directory_test_timings:
            num_tests, time_for_directory = directory_test_timings[directory]
            timings.append((round(time_for_directory, 1), directory, num_tests))
        timings.sort()

        self._print_debug("Time to process slowest subdirectories:")
        min_seconds_to_print = 10
        for timing in timings:
            if timing[0] > min_seconds_to_print:
                self._print_debug("  %s took %s seconds to run %s tests." % (timing[1], timing[0], timing[2]))
        self._print_debug("")

    def _print_statistics_for_test_timings(self, title, timings):
        self._print_debug(title)
        timings.sort()

        num_tests = len(timings)
        if not num_tests:
            return
        percentile90 = timings[int(0.9 * num_tests)]
        percentile99 = timings[int(0.99 * num_tests)]

        if num_tests % 2 == 1:
            median = timings[((num_tests - 1) / 2) - 1]
        else:
            lower = timings[num_tests / 2 - 1]
            upper = timings[num_tests / 2]
            median = (float(lower + upper)) / 2

        mean = sum(timings) / num_tests

        for timing in timings:
            sum_of_deviations = math.pow(timing - mean, 2)

        std_deviation = math.sqrt(sum_of_deviations / num_tests)
        self._print_debug("  Median:          %6.3f" % median)
        self._print_debug("  Mean:            %6.3f" % mean)
        self._print_debug("  90th percentile: %6.3f" % percentile90)
        self._print_debug("  99th percentile: %6.3f" % percentile99)
        self._print_debug("  Standard dev:    %6.3f" % std_deviation)
        self._print_debug("")

    def _print_result_summary(self, result_summary):
        if not self._options.debug_rwt_logging:
            return

        failed = result_summary.total_failures
        total = result_summary.total - result_summary.expected_skips
        passed = total - failed - result_summary.remaining
        pct_passed = 0.0
        if total > 0:
            pct_passed = float(passed) * 100 / total

        self._print_for_bot("=> Results: %d/%d tests passed (%.1f%%)" % (passed, total, pct_passed))
        self._print_for_bot("")
        self._print_result_summary_entry(result_summary, test_expectations.NOW, "Tests to be fixed")

        self._print_for_bot("")
        # FIXME: We should be skipping anything marked WONTFIX, so we shouldn't bother logging these stats.
        self._print_result_summary_entry(
            result_summary, test_expectations.WONTFIX, "Tests that will only be fixed if they crash (WONTFIX)"
        )
        self._print_for_bot("")

    def _print_result_summary_entry(self, result_summary, timeline, heading):
        total = len(result_summary.tests_by_timeline[timeline])
        not_passing = total - len(
            result_summary.tests_by_expectation[test_expectations.PASS] & result_summary.tests_by_timeline[timeline]
        )
        self._print_for_bot("=> %s (%d):" % (heading, not_passing))

        for result in TestExpectations.EXPECTATION_ORDER:
            if result in (test_expectations.PASS, test_expectations.SKIP):
                continue
            results = result_summary.tests_by_expectation[result] & result_summary.tests_by_timeline[timeline]
            desc = TestExpectations.EXPECTATION_DESCRIPTIONS[result]
            if not_passing and len(results):
                pct = len(results) * 100.0 / not_passing
                self._print_for_bot("  %5d %-24s (%4.1f%%)" % (len(results), desc[0], pct))

    def _print_one_line_summary(self, total, expected, unexpected):
        incomplete = total - expected - unexpected
        incomplete_str = ""
        if incomplete:
            self._print_default("")
            incomplete_str = " (%d didn't run)" % incomplete

        if self._options.verbose or self._options.debug_rwt_logging or unexpected:
            self.writeln("")

        summary = ""
        if unexpected == 0:
            if expected == total:
                if expected > 1:
                    summary = "All %d tests ran as expected." % expected
                else:
                    summary = "The test ran as expected."
            else:
                summary = "%s ran as expected%s." % (grammar.pluralize("test", expected), incomplete_str)
        else:
            summary = "%s ran as expected, %d didn't%s:" % (
                grammar.pluralize("test", expected),
                unexpected,
                incomplete_str,
            )

        self._print_quiet(summary)
        self._print_quiet("")

    def _test_status_line(self, test_name, suffix):
        format_string = "[%d/%d] %s%s"
        status_line = format_string % (self.num_completed, self.num_tests, test_name, suffix)
        if len(status_line) > self._meter.number_of_columns():
            overflow_columns = len(status_line) - self._meter.number_of_columns()
            ellipsis = "..."
            if len(test_name) < overflow_columns + len(ellipsis) + 2:
                # We don't have enough space even if we elide, just show the test filename.
                fs = self._port.host.filesystem
                test_name = fs.split(test_name)[1]
            else:
                new_length = len(test_name) - overflow_columns - len(ellipsis)
                prefix = int(new_length / 2)
                test_name = test_name[:prefix] + ellipsis + test_name[-(new_length - prefix) :]
        return format_string % (self.num_completed, self.num_tests, test_name, suffix)

    def print_started_test(self, test_name):
        self._running_tests.append(test_name)
        if len(self._running_tests) > 1:
            suffix = " (+%d)" % (len(self._running_tests) - 1)
        else:
            suffix = ""
        if self._options.verbose:
            write = self._meter.write_update
        else:
            write = self._meter.write_throttled_update
        write(self._test_status_line(test_name, suffix))

    def print_finished_test(self, result, expected, exp_str, got_str):
        self.num_completed += 1
        test_name = result.test_name
        if self._options.details:
            self._print_test_trace(result, exp_str, got_str)
        elif (self._options.verbose and not self._options.debug_rwt_logging) or not expected:
            desc = TestExpectations.EXPECTATION_DESCRIPTIONS[result.type]
            suffix = " " + desc[1]
            if not expected:
                suffix += " unexpectedly" + desc[2]
            self.writeln(self._test_status_line(test_name, suffix))
        elif self.num_completed == self.num_tests:
            self._meter.write_update("")
        else:
            desc = TestExpectations.EXPECTATION_DESCRIPTIONS[result.type]
            suffix = " " + desc[1]
            if test_name == self._running_tests[0]:
                self._completed_tests.insert(0, [test_name, suffix])
            else:
                self._completed_tests.append([test_name, suffix])

            for test_name, suffix in self._completed_tests:
                self._meter.write_throttled_update(self._test_status_line(test_name, suffix))
            self._completed_tests = []
        self._running_tests.remove(test_name)

    def _print_test_trace(self, result, exp_str, got_str):
        test_name = result.test_name
        self._print_default(self._test_status_line(test_name, ""))

        base = self._port.lookup_virtual_test_base(test_name)
        if base:
            args = " ".join(self._port.lookup_virtual_test_args(test_name))
            self._print_default(" base: %s" % base)
            self._print_default(" args: %s" % args)

        for extension in (".txt", ".png", ".wav", ".webarchive"):
            self._print_baseline(test_name, extension)

        self._print_default("  exp: %s" % exp_str)
        self._print_default("  got: %s" % got_str)
        self._print_default(" took: %-.3f" % result.test_run_time)
        self._print_default("")

    def _print_baseline(self, test_name, extension):
        baseline = self._port.expected_filename(test_name, extension)
        if self._port._filesystem.exists(baseline):
            relpath = self._port.relative_test_filename(baseline)
        else:
            relpath = "<none>"
        self._print_default("  %s: %s" % (extension[1:], relpath))

    def _print_unexpected_results(self, unexpected_results):
        # Prints to the buildbot stream
        passes = {}
        flaky = {}
        regressions = {}

        def add_to_dict_of_lists(dict, key, value):
            dict.setdefault(key, []).append(value)

        def add_result(test, results, passes=passes, flaky=flaky, regressions=regressions):
            actual = results["actual"].split(" ")
            expected = results["expected"].split(" ")
            if actual == ["PASS"]:
                if "CRASH" in expected:
                    add_to_dict_of_lists(passes, "Expected to crash, but passed", test)
                elif "TIMEOUT" in expected:
                    add_to_dict_of_lists(passes, "Expected to timeout, but passed", test)
                else:
                    add_to_dict_of_lists(passes, "Expected to fail, but passed", test)
            elif len(actual) > 1:
                # We group flaky tests by the first actual result we got.
                add_to_dict_of_lists(flaky, actual[0], test)
            else:
                add_to_dict_of_lists(regressions, results["actual"], test)

        resultsjsonparser.for_each_test(unexpected_results["tests"], add_result)

        if len(passes) or len(flaky) or len(regressions):
            self._print_for_bot("")
        if len(passes):
            for key, tests in passes.iteritems():
                self._print_for_bot("%s: (%d)" % (key, len(tests)))
                tests.sort()
                for test in tests:
                    self._print_for_bot("  %s" % test)
                self._print_for_bot("")
            self._print_for_bot("")

        if len(flaky):
            descriptions = TestExpectations.EXPECTATION_DESCRIPTIONS
            for key, tests in flaky.iteritems():
                result = TestExpectations.EXPECTATIONS[key.lower()]
                self._print_for_bot("Unexpected flakiness: %s (%d)" % (descriptions[result][0], len(tests)))
                tests.sort()

                for test in tests:
                    result = resultsjsonparser.result_for_test(unexpected_results["tests"], test)
                    actual = result["actual"].split(" ")
                    expected = result["expected"].split(" ")
                    result = TestExpectations.EXPECTATIONS[key.lower()]
                    new_expectations_list = list(set(actual) | set(expected))
                    self._print_for_bot("  %s = %s" % (test, " ".join(new_expectations_list)))
                self._print_for_bot("")
            self._print_for_bot("")

        if len(regressions):
            descriptions = TestExpectations.EXPECTATION_DESCRIPTIONS
            for key, tests in regressions.iteritems():
                result = TestExpectations.EXPECTATIONS[key.lower()]
                self._print_for_bot("Regressions: Unexpected %s : (%d)" % (descriptions[result][0], len(tests)))
                tests.sort()
                for test in tests:
                    self._print_for_bot("  %s = %s" % (test, key))
                self._print_for_bot("")

        if len(unexpected_results["tests"]) and self._options.debug_rwt_logging:
            self._print_for_bot("%s" % ("-" * 78))

    def _print_quiet(self, msg):
        self.writeln(msg)

    def _print_default(self, msg):
        if not self._options.quiet:
            self.writeln(msg)

    def _print_debug(self, msg):
        if self._options.debug_rwt_logging:
            self.writeln(msg)

    def _print_for_bot(self, msg):
        self._buildbot_stream.write(msg + "\n")

    def write_update(self, msg):
        self._meter.write_update(msg)

    def writeln(self, msg):
        self._meter.writeln(msg)

    def flush(self):
        self._meter.flush()
예제 #3
0
class Printer(object):
    """Class handling all non-debug-logging printing done by run-webkit-tests."""

    def __init__(self, port, options, regular_output, logger=None):
        self.num_completed = 0
        self.num_tests = 0
        self._port = port
        self._options = options
        self._meter = MeteredStream(regular_output, options.debug_rwt_logging, logger=logger,
                                    number_of_columns=self._port.host.platform.terminal_width())
        self._running_tests = []
        self._completed_tests = []

    def cleanup(self):
        self._meter.cleanup()

    def __del__(self):
        self.cleanup()

    def print_config(self, results_directory):
        self._print_default("Using port '%s'" % self._port.name())
        self._print_default("Test configuration: %s" % self._port.test_configuration())
        self._print_default("View the test results at file://%s/results.html" % results_directory)
        self._print_default("View the archived results dashboard at file://%s/dashboard.html" % results_directory)

        # FIXME: should these options be in printing_options?
        if self._options.new_baseline:
            self._print_default("Placing new baselines in %s" % self._port.baseline_path())

        fs = self._port.host.filesystem
        fallback_path = [fs.split(x)[1] for x in self._port.baseline_search_path()]
        self._print_default("Baseline search path: %s -> generic" % " -> ".join(fallback_path))

        self._print_default("Using %s build" % self._options.configuration)
        if self._options.pixel_tests:
            self._print_default("Pixel tests enabled")
        else:
            self._print_default("Pixel tests disabled")

        self._print_default("Regular timeout: %s, slow test timeout: %s" %
                  (self._options.time_out_ms, self._options.slow_time_out_ms))

        self._print_default('Command line: ' + ' '.join(self._port.driver_cmd_line()))
        self._print_default('')

    def print_found(self, num_all_test_files, num_to_run, repeat_each, iterations):
        found_str = 'Found %s; running %d' % (grammar.pluralize('test', num_all_test_files), num_to_run)
        if repeat_each * iterations > 1:
            found_str += ' (%d times each: --repeat-each=%d --iterations=%d)' % (repeat_each * iterations, repeat_each, iterations)
        found_str += ', skipping %d' % (num_all_test_files - num_to_run)
        self._print_default(found_str + '.')

    def print_expected(self, run_results, tests_with_result_type_callback):
        self._print_expected_results_of_type(run_results, test_expectations.PASS, "passes", tests_with_result_type_callback)
        self._print_expected_results_of_type(run_results, test_expectations.FAIL, "failures", tests_with_result_type_callback)
        self._print_expected_results_of_type(run_results, test_expectations.FLAKY, "flaky", tests_with_result_type_callback)
        self._print_debug('')

    def print_workers_and_shards(self, num_workers, num_shards, num_locked_shards):
        driver_name = self._port.driver_name()
        if num_workers == 1:
            self._print_default("Running 1 %s." % driver_name)
            self._print_debug("(%s)." % grammar.pluralize('shard', num_shards))
        else:
            self._print_default("Running %d %ss in parallel." % (num_workers, driver_name))
            self._print_debug("(%d shards; %d locked)." % (num_shards, num_locked_shards))
        self._print_default('')

    def _print_expected_results_of_type(self, run_results, result_type, result_type_str, tests_with_result_type_callback):
        tests = tests_with_result_type_callback(result_type)
        now = run_results.tests_by_timeline[test_expectations.NOW]
        wontfix = run_results.tests_by_timeline[test_expectations.WONTFIX]

        # We use a fancy format string in order to print the data out in a
        # nicely-aligned table.
        fmtstr = ("Expect: %%5d %%-8s (%%%dd now, %%%dd wontfix)"
                  % (self._num_digits(now), self._num_digits(wontfix)))
        self._print_debug(fmtstr % (len(tests), result_type_str, len(tests & now), len(tests & wontfix)))

    def _num_digits(self, num):
        ndigits = 1
        if len(num):
            ndigits = int(math.log10(len(num))) + 1
        return ndigits

    def print_results(self, run_time, run_results, summarized_results):
        self._print_timing_statistics(run_time, run_results)
        self._print_one_line_summary(run_time, run_results)

    def _print_timing_statistics(self, total_time, run_results):
        self._print_debug("Test timing:")
        self._print_debug("  %6.2f total testing time" % total_time)
        self._print_debug("")

        self._print_worker_statistics(run_results, int(self._options.child_processes))
        self._print_aggregate_test_statistics(run_results)
        self._print_individual_test_times(run_results)
        self._print_directory_timings(run_results)

    def _print_worker_statistics(self, run_results, num_workers):
        self._print_debug("Thread timing:")
        stats = {}
        cuml_time = 0
        for result in run_results.results_by_name.values():
            stats.setdefault(result.worker_name, {'num_tests': 0, 'total_time': 0})
            stats[result.worker_name]['num_tests'] += 1
            stats[result.worker_name]['total_time'] += result.total_run_time
            cuml_time += result.total_run_time

        for worker_name in stats:
            self._print_debug("    %10s: %5d tests, %6.2f secs" % (worker_name, stats[worker_name]['num_tests'], stats[worker_name]['total_time']))
        self._print_debug("   %6.2f cumulative, %6.2f optimal" % (cuml_time, cuml_time / num_workers))
        self._print_debug("")

    def _print_aggregate_test_statistics(self, run_results):
        times_for_dump_render_tree = [result.test_run_time for result in run_results.results_by_name.values()]
        self._print_statistics_for_test_timings("PER TEST TIME IN TESTSHELL (seconds):", times_for_dump_render_tree)

    def _print_individual_test_times(self, run_results):
        # Reverse-sort by the time spent in the driver.

        individual_test_timings = sorted(run_results.results_by_name.values(), key=lambda result: result.test_run_time, reverse=True)
        num_printed = 0
        slow_tests = []
        timeout_or_crash_tests = []
        unexpected_slow_tests = []
        for test_tuple in individual_test_timings:
            test_name = test_tuple.test_name
            is_timeout_crash_or_slow = False
            if test_name in run_results.slow_tests:
                is_timeout_crash_or_slow = True
                slow_tests.append(test_tuple)

            if test_name in run_results.failures_by_name:
                result = run_results.results_by_name[test_name].type
                if (result == test_expectations.TIMEOUT or
                    result == test_expectations.CRASH):
                    is_timeout_crash_or_slow = True
                    timeout_or_crash_tests.append(test_tuple)

            if (not is_timeout_crash_or_slow and num_printed < NUM_SLOW_TESTS_TO_LOG):
                num_printed = num_printed + 1
                unexpected_slow_tests.append(test_tuple)

        self._print_debug("")
        if unexpected_slow_tests:
            self._print_test_list_timing("%s slowest tests that are not marked as SLOW and did not timeout/crash:" %
                NUM_SLOW_TESTS_TO_LOG, unexpected_slow_tests)
            self._print_debug("")

        if slow_tests:
            self._print_test_list_timing("Tests marked as SLOW:", slow_tests)
            self._print_debug("")

        if timeout_or_crash_tests:
            self._print_test_list_timing("Tests that timed out or crashed:", timeout_or_crash_tests)
            self._print_debug("")

    def _print_test_list_timing(self, title, test_list):
        self._print_debug(title)
        for test_tuple in test_list:
            test_run_time = round(test_tuple.test_run_time, 1)
            self._print_debug("  %s took %s seconds" % (test_tuple.test_name, test_run_time))

    def _print_directory_timings(self, run_results):
        stats = {}
        for result in run_results.results_by_name.values():
            stats.setdefault(result.shard_name, {'num_tests': 0, 'total_time': 0})
            stats[result.shard_name]['num_tests'] += 1
            stats[result.shard_name]['total_time'] += result.total_run_time

        min_seconds_to_print = 15

        timings = []
        for directory in stats:
            rounded_time = round(stats[directory]['total_time'], 1)
            if rounded_time > min_seconds_to_print:
                timings.append((directory, rounded_time, stats[directory]['num_tests']))

        if not timings:
            return

        timings.sort()

        self._print_debug("Time to process slowest subdirectories:")
        for timing in timings:
            self._print_debug("  %s took %s seconds to run %s tests." % timing)
        self._print_debug("")

    def _print_statistics_for_test_timings(self, title, timings):
        self._print_debug(title)
        timings.sort()

        num_tests = len(timings)
        if not num_tests:
            return
        percentile90 = timings[int(.9 * num_tests)]
        percentile99 = timings[int(.99 * num_tests)]

        if num_tests % 2 == 1:
            median = timings[((num_tests - 1) / 2) - 1]
        else:
            lower = timings[num_tests / 2 - 1]
            upper = timings[num_tests / 2]
            median = (float(lower + upper)) / 2

        mean = sum(timings) / num_tests

        for timing in timings:
            sum_of_deviations = math.pow(timing - mean, 2)

        std_deviation = math.sqrt(sum_of_deviations / num_tests)
        self._print_debug("  Median:          %6.3f" % median)
        self._print_debug("  Mean:            %6.3f" % mean)
        self._print_debug("  90th percentile: %6.3f" % percentile90)
        self._print_debug("  99th percentile: %6.3f" % percentile99)
        self._print_debug("  Standard dev:    %6.3f" % std_deviation)
        self._print_debug("")

    def _print_one_line_summary(self, total_time, run_results):
        if self._options.timing:
            parallel_time = sum(result.total_run_time for result in run_results.results_by_name.values())

            # There is serial overhead in layout_test_runner.run() that we can't easily account for when
            # really running in parallel, but taking the min() ensures that in the worst case
            # (if parallel time is less than run_time) we do account for it.
            serial_time = total_time - min(run_results.run_time, parallel_time)

            speedup = (parallel_time + serial_time) / total_time
            timing_summary = ' in %.2fs (%.2fs in rwt, %.2gx)' % (total_time, serial_time, speedup)
        else:
            timing_summary = ''

        total = run_results.total - run_results.expected_skips
        expected = run_results.expected - run_results.expected_skips
        unexpected = run_results.unexpected
        incomplete = total - expected - unexpected
        incomplete_str = ''
        if incomplete:
            self._print_default("")
            incomplete_str = " (%d didn't run)" % incomplete

        if self._options.verbose or self._options.debug_rwt_logging or unexpected:
            self.writeln("")

        expected_summary_str = ''
        if run_results.expected_failures > 0:
            expected_summary_str = " (%d passed, %d didn't)" % (expected - run_results.expected_failures, run_results.expected_failures)

        summary = ''
        if unexpected == 0:
            if expected == total:
                if expected > 1:
                    summary = "All %d tests ran as expected%s%s." % (expected, expected_summary_str, timing_summary)
                else:
                    summary = "The test ran as expected%s%s." % (expected_summary_str, timing_summary)
            else:
                summary = "%s ran as expected%s%s%s." % (grammar.pluralize('test', expected), expected_summary_str, incomplete_str, timing_summary)
        else:
            summary = "%s ran as expected%s, %d didn't%s%s:" % (grammar.pluralize('test', expected), expected_summary_str, unexpected, incomplete_str, timing_summary)

        self._print_quiet(summary)
        self._print_quiet("")

    def _test_status_line(self, test_name, suffix):
        format_string = '[%d/%d] %s%s'
        status_line = format_string % (self.num_completed, self.num_tests, test_name, suffix)
        if len(status_line) > self._meter.number_of_columns():
            overflow_columns = len(status_line) - self._meter.number_of_columns()
            ellipsis = '...'
            if len(test_name) < overflow_columns + len(ellipsis) + 2:
                # We don't have enough space even if we elide, just show the test filename.
                fs = self._port.host.filesystem
                test_name = fs.split(test_name)[1]
            else:
                new_length = len(test_name) - overflow_columns - len(ellipsis)
                prefix = int(new_length / 2)
                test_name = test_name[:prefix] + ellipsis + test_name[-(new_length - prefix):]
        return format_string % (self.num_completed, self.num_tests, test_name, suffix)

    def print_started_test(self, test_name):
        self._running_tests.append(test_name)
        if len(self._running_tests) > 1:
            suffix = ' (+%d)' % (len(self._running_tests) - 1)
        else:
            suffix = ''
        if self._options.verbose:
            write = self._meter.write_update
        else:
            write = self._meter.write_throttled_update
        write(self._test_status_line(test_name, suffix))

    def print_finished_test(self, result, expected, exp_str, got_str):
        self.num_completed += 1
        test_name = result.test_name

        result_message = self._result_message(result.type, result.failures, expected,
                                              self._options.timing, result.test_run_time)

        if self._options.details:
            self._print_test_trace(result, exp_str, got_str)
        elif self._options.verbose or not expected:
            self.writeln(self._test_status_line(test_name, result_message))
        elif self.num_completed == self.num_tests:
            self._meter.write_update('')
        else:
            if test_name == self._running_tests[0]:
                self._completed_tests.insert(0, [test_name, result_message])
            else:
                self._completed_tests.append([test_name, result_message])

            for test_name, result_message in self._completed_tests:
                self._meter.write_throttled_update(self._test_status_line(test_name, result_message))
            self._completed_tests = []
        self._running_tests.remove(test_name)

    def _result_message(self, result_type, failures, expected, timing, test_run_time):
        exp_string = ' unexpectedly' if not expected else ''
        timing_string = ' %.4fs' % test_run_time if timing else ''
        if result_type == test_expectations.PASS:
            return ' passed%s%s' % (exp_string, timing_string)
        else:
            return ' failed%s (%s)%s' % (exp_string, ', '.join(failure.message() for failure in failures), timing_string)

    def _print_test_trace(self, result, exp_str, got_str):
        test_name = result.test_name
        self._print_default(self._test_status_line(test_name, ''))

        base = self._port.lookup_virtual_test_base(test_name)
        if base:
            args = ' '.join(self._port.lookup_virtual_test_args(test_name))
            reference_args = ' '.join(self._port.lookup_virtual_reference_args(test_name))
            self._print_default(' base: %s' % base)
            self._print_default(' args: %s' % args)
            self._print_default(' reference_args: %s' % reference_args)

        references = self._port.reference_files(test_name)
        if references:
            for _, filename in references:
                self._print_default('  ref: %s' % self._port.relative_test_filename(filename))
        else:
            for extension in ('.txt', '.png', '.wav'):
                    self._print_baseline(test_name, extension)

        self._print_default('  exp: %s' % exp_str)
        self._print_default('  got: %s' % got_str)
        self._print_default(' took: %-.3f' % result.test_run_time)
        self._print_default('')

    def _print_baseline(self, test_name, extension):
        baseline = self._port.expected_filename(test_name, extension)
        if self._port._filesystem.exists(baseline):
            relpath = self._port.relative_test_filename(baseline)
        else:
            relpath = '<none>'
        self._print_default('  %s: %s' % (extension[1:], relpath))

    def _print_quiet(self, msg):
        self.writeln(msg)

    def _print_default(self, msg):
        if not self._options.quiet:
            self.writeln(msg)

    def _print_debug(self, msg):
        if self._options.debug_rwt_logging:
            self.writeln(msg)

    def write_throttled_update(self, msg):
        self._meter.write_throttled_update(msg)

    def write_update(self, msg):
        self._meter.write_update(msg)

    def writeln(self, msg):
        self._meter.writeln(msg)

    def flush(self):
        self._meter.flush()
예제 #4
0
class Printer(object):
    def __init__(self, stream, options=None):
        self.stream = stream
        self.meter = None
        self.options = options
        self.num_tests = 0
        self.num_started = 0
        self.num_errors = 0
        self.num_failures = 0
        self.running_tests = []
        self.completed_tests = []
        if options:
            self.configure(options)

    def configure(self, options):
        self.options = options

        if options.timing:
            # --timing implies --verbose
            options.verbose = max(options.verbose, 1)

        log_level = logging.INFO
        if options.quiet:
            log_level = logging.WARNING
        elif options.verbose == 2:
            log_level = logging.DEBUG

        self.meter = MeteredStream(
            self.stream, (options.verbose == 2), number_of_columns=SystemHost().platform.terminal_width()
        )

        handler = logging.StreamHandler(self.stream)
        # We constrain the level on the handler rather than on the root
        # logger itself.  This is probably better because the handler is
        # configured and known only to this module, whereas the root logger
        # is an object shared (and potentially modified) by many modules.
        # Modifying the handler, then, is less intrusive and less likely to
        # interfere with modifications made by other modules (e.g. in unit
        # tests).
        handler.name = __name__
        handler.setLevel(log_level)
        formatter = logging.Formatter("%(message)s")
        handler.setFormatter(formatter)

        logger = logging.getLogger()
        logger.addHandler(handler)
        logger.setLevel(logging.NOTSET)

        # Filter out most webkitpy messages.
        #
        # Messages can be selectively re-enabled for this script by updating
        # this method accordingly.
        def filter_records(record):
            """Filter out autoinstall and non-third-party webkitpy messages."""
            # FIXME: Figure out a way not to use strings here, for example by
            #        using syntax like webkitpy.test.__name__.  We want to be
            #        sure not to import any non-Python 2.4 code, though, until
            #        after the version-checking code has executed.
            if record.name.startswith("webkitpy.common.system.autoinstall") or record.name.startswith("webkitpy.test"):
                return True
            if record.name.startswith("webkitpy"):
                return False
            return True

        testing_filter = logging.Filter()
        testing_filter.filter = filter_records

        # Display a message so developers are not mystified as to why
        # logging does not work in the unit tests.
        _log.info("Suppressing most webkitpy logging while running unit tests.")
        handler.addFilter(testing_filter)

        if self.options.pass_through:
            # FIXME: Can't import at top of file, as outputcapture needs unittest2
            from webkitpy.common.system import outputcapture

            outputcapture.OutputCapture.stream_wrapper = _CaptureAndPassThroughStream

    def write_update(self, msg):
        self.meter.write_update(msg)

    def print_started_test(self, source, test_name):
        self.running_tests.append(test_name)
        if len(self.running_tests) > 1:
            suffix = " (+%d)" % (len(self.running_tests) - 1)
        else:
            suffix = ""

        if self.options.verbose:
            write = self.meter.write_update
        else:
            write = self.meter.write_throttled_update

        write(self._test_line(self.running_tests[0], suffix))

    def print_finished_test(self, source, test_name, test_time, failures, errors):
        write = self.meter.writeln
        if failures:
            lines = failures[0].splitlines() + [""]
            suffix = " failed:"
            self.num_failures += 1
        elif errors:
            lines = errors[0].splitlines() + [""]
            suffix = " erred:"
            self.num_errors += 1
        else:
            suffix = " passed"
            lines = []
            if self.options.verbose:
                write = self.meter.writeln
            else:
                write = self.meter.write_throttled_update
        if self.options.timing:
            suffix += " %.4fs" % test_time

        self.num_started += 1

        if test_name == self.running_tests[0]:
            self.completed_tests.insert(0, [test_name, suffix, lines])
        else:
            self.completed_tests.append([test_name, suffix, lines])
        self.running_tests.remove(test_name)

        for test_name, msg, lines in self.completed_tests:
            if lines:
                self.meter.writeln(self._test_line(test_name, msg))
                for line in lines:
                    self.meter.writeln("  " + line)
            else:
                write(self._test_line(test_name, msg))
        self.completed_tests = []

    def _test_line(self, test_name, suffix):
        format_string = "[%d/%d] %s%s"
        status_line = format_string % (self.num_started, self.num_tests, test_name, suffix)
        if len(status_line) > self.meter.number_of_columns():
            overflow_columns = len(status_line) - self.meter.number_of_columns()
            ellipsis = "..."
            if len(test_name) < overflow_columns + len(ellipsis) + 3:
                # We don't have enough space even if we elide, just show the test method name.
                test_name = test_name.split(".")[-1]
            else:
                new_length = len(test_name) - overflow_columns - len(ellipsis)
                prefix = int(new_length / 2)
                test_name = test_name[:prefix] + ellipsis + test_name[-(new_length - prefix) :]
        return format_string % (self.num_started, self.num_tests, test_name, suffix)

    def print_result(self, run_time):
        write = self.meter.writeln
        write("Ran %s in %.3fs" % (pluralize(self.num_started, "test"), run_time))
        if self.num_failures or self.num_errors:
            write("FAILED (failures=%d, errors=%d)\n" % (self.num_failures, self.num_errors))
        else:
            write("\nOK\n")
예제 #5
0
class Printer(object):
    def __init__(self, stream, options=None):
        self.stream = stream
        self.meter = None
        self.options = options
        self.num_tests = 0
        self.num_started = 0
        self.num_errors = 0
        self.num_failures = 0
        self.running_tests = []
        self.completed_tests = []
        if options:
            self.configure(options)

    def configure(self, options):
        self.options = options

        if options.timing:
            # --timing implies --verbose
            options.verbose = max(options.verbose, 1)

        log_level = logging.INFO
        if options.quiet:
            log_level = logging.WARNING
        elif options.verbose == 2:
            log_level = logging.DEBUG

        self.meter = MeteredStream(
            self.stream, (options.verbose == 2),
            number_of_columns=SystemHost().platform.terminal_width())

        handler = logging.StreamHandler(self.stream)
        # We constrain the level on the handler rather than on the root
        # logger itself.  This is probably better because the handler is
        # configured and known only to this module, whereas the root logger
        # is an object shared (and potentially modified) by many modules.
        # Modifying the handler, then, is less intrusive and less likely to
        # interfere with modifications made by other modules (e.g. in unit
        # tests).
        handler.name = __name__
        handler.setLevel(log_level)
        formatter = logging.Formatter("%(message)s")
        handler.setFormatter(formatter)

        logger = logging.getLogger()
        logger.addHandler(handler)
        logger.setLevel(logging.NOTSET)

        # Filter out most webkitpy messages.
        #
        # Messages can be selectively re-enabled for this script by updating
        # this method accordingly.
        def filter_records(record):
            """Filter out autoinstall and non-third-party webkitpy messages."""
            # FIXME: Figure out a way not to use strings here, for example by
            #        using syntax like webkitpy.test.__name__.  We want to be
            #        sure not to import any non-Python 2.4 code, though, until
            #        after the version-checking code has executed.
            if (record.name.startswith("webkitpy.common.system.autoinstall")
                    or record.name.startswith("webkitpy.test")):
                return True
            if record.name.startswith("webkitpy"):
                return False
            return True

        testing_filter = logging.Filter()
        testing_filter.filter = filter_records

        # Display a message so developers are not mystified as to why
        # logging does not work in the unit tests.
        _log.info(
            "Suppressing most webkitpy logging while running unit tests.")
        handler.addFilter(testing_filter)

        if self.options.pass_through:
            outputcapture.OutputCapture.stream_wrapper = _CaptureAndPassThroughStream

    def write_update(self, msg):
        self.meter.write_update(msg)

    def print_started_test(self, source, test_name):
        self.running_tests.append(test_name)
        if len(self.running_tests) > 1:
            suffix = ' (+%d)' % (len(self.running_tests) - 1)
        else:
            suffix = ''

        if self.options.verbose:
            write = self.meter.write_update
        else:
            write = self.meter.write_throttled_update

        write(self._test_line(self.running_tests[0], suffix))

    def print_finished_test(self, source, test_name, test_time, failures,
                            errors):
        write = self.meter.writeln
        if failures:
            lines = failures[0].splitlines() + ['']
            suffix = ' failed:'
            self.num_failures += 1
        elif errors:
            lines = errors[0].splitlines() + ['']
            suffix = ' erred:'
            self.num_errors += 1
        else:
            suffix = ' passed'
            lines = []
            if self.options.verbose:
                write = self.meter.writeln
            else:
                write = self.meter.write_throttled_update
        if self.options.timing:
            suffix += ' %.4fs' % test_time

        self.num_started += 1

        if test_name == self.running_tests[0]:
            self.completed_tests.insert(0, [test_name, suffix, lines])
        else:
            self.completed_tests.append([test_name, suffix, lines])
        self.running_tests.remove(test_name)

        for test_name, msg, lines in self.completed_tests:
            if lines:
                self.meter.writeln(self._test_line(test_name, msg))
                for line in lines:
                    self.meter.writeln('  ' + line)
            else:
                write(self._test_line(test_name, msg))
        self.completed_tests = []

    def _test_line(self, test_name, suffix):
        format_string = '[%d/%d] %s%s'
        status_line = format_string % (self.num_started, self.num_tests,
                                       test_name, suffix)
        if len(status_line) > self.meter.number_of_columns():
            overflow_columns = len(
                status_line) - self.meter.number_of_columns()
            ellipsis = '...'
            if len(test_name) < overflow_columns + len(ellipsis) + 3:
                # We don't have enough space even if we elide, just show the test method name.
                test_name = test_name.split('.')[-1]
            else:
                new_length = len(test_name) - overflow_columns - len(ellipsis)
                prefix = int(new_length / 2)
                test_name = test_name[:prefix] + ellipsis + test_name[-(
                    new_length - prefix):]
        return format_string % (self.num_started, self.num_tests, test_name,
                                suffix)

    def print_result(self, run_time):
        write = self.meter.writeln
        write('Ran %s in %.3fs' %
              (pluralize(self.num_started, "test"), run_time))
        if self.num_failures or self.num_errors:
            write('FAILED (failures=%d, errors=%d)\n' %
                  (self.num_failures, self.num_errors))
        else:
            write('\nOK\n')
예제 #6
0
class Printer(object):
    """Class handling all non-debug-logging printing done by run-webkit-tests."""
    def __init__(self, port, options, regular_output, logger=None):
        self.num_completed = 0
        self.num_tests = 0
        self._port = port
        self._options = options
        self._meter = MeteredStream(
            regular_output,
            options.debug_rwt_logging,
            logger=logger,
            number_of_columns=self._port.host.platform.terminal_width())
        self._running_tests = []
        self._completed_tests = []

    def cleanup(self):
        self._meter.cleanup()

    def __del__(self):
        self.cleanup()

    def print_config(self, results_directory):
        self._print_default("Using port '%s'" % self._port.name())
        self._print_default('Test configuration: %s' %
                            self._port.test_configuration())
        self._print_default('View the test results at file://%s/results.html' %
                            results_directory)
        if self._options.order == 'random':
            self._print_default('Using random order with seed: %d' %
                                self._options.seed)

        fs = self._port.host.filesystem
        fallback_path = [
            fs.split(x)[1] for x in self._port.baseline_search_path()
        ]
        self._print_default('Baseline search path: %s -> generic' %
                            ' -> '.join(fallback_path))

        self._print_default('Using %s build' % self._options.configuration)
        if self._options.pixel_tests:
            self._print_default('Pixel tests enabled')
        else:
            self._print_default('Pixel tests disabled')

        self._print_default(
            'Regular timeout: %s, slow test timeout: %s' %
            (self._options.time_out_ms, self._options.slow_time_out_ms))

        self._print_default('Command line: ' +
                            ' '.join(self._port.driver_cmd_line()))
        self._print_default('')

    def print_found(self, num_all_test_files, num_shard_test_files, num_to_run,
                    repeat_each, iterations):
        found_str = 'Found %s' % grammar.pluralize('test',
                                                   num_shard_test_files)
        if num_all_test_files != num_shard_test_files:
            found_str += ' (total %d)' % num_all_test_files
        found_str += '; running %d' % num_to_run
        if repeat_each * iterations > 1:
            found_str += ' (%d times each: --repeat-each=%d --iterations=%d)' % (
                repeat_each * iterations, repeat_each, iterations)
        found_str += ', skipping %d' % (num_shard_test_files - num_to_run)
        self._print_default(found_str + '.')

    def print_expected(self, run_results, tests_with_result_type_callback):
        self._print_expected_results_of_type(run_results,
                                             test_expectations.PASS, 'passes',
                                             tests_with_result_type_callback)
        self._print_expected_results_of_type(run_results,
                                             test_expectations.FAIL,
                                             'failures',
                                             tests_with_result_type_callback)
        self._print_expected_results_of_type(run_results,
                                             test_expectations.FLAKY, 'flaky',
                                             tests_with_result_type_callback)
        self._print_debug('')

    def print_workers_and_shards(self, num_workers, num_shards,
                                 num_locked_shards):
        driver_name = self._port.driver_name()
        if num_workers == 1:
            self._print_default('Running 1 %s.' % driver_name)
            self._print_debug('(%s).' % grammar.pluralize('shard', num_shards))
        else:
            self._print_default('Running %d %ss in parallel.' %
                                (num_workers, driver_name))
            self._print_debug('(%d shards; %d locked).' %
                              (num_shards, num_locked_shards))
        self._print_default('')

    def _print_expected_results_of_type(self, run_results, result_type,
                                        result_type_str,
                                        tests_with_result_type_callback):
        tests = tests_with_result_type_callback(result_type)
        now = run_results.tests_by_timeline[test_expectations.NOW]
        wontfix = run_results.tests_by_timeline[test_expectations.WONTFIX]

        # We use a fancy format string in order to print the data out in a
        # nicely-aligned table.
        fmtstr = ('Expect: %%5d %%-8s (%%%dd now, %%%dd wontfix)' %
                  (self._num_digits(now), self._num_digits(wontfix)))
        self._print_debug(fmtstr % (len(tests), result_type_str,
                                    len(tests & now), len(tests & wontfix)))

    def _num_digits(self, num):
        ndigits = 1
        if len(num):
            ndigits = int(math.log10(len(num))) + 1
        return ndigits

    def print_results(self, run_time, run_results):
        self._print_timing_statistics(run_time, run_results)
        self._print_one_line_summary(run_time, run_results)

    def _print_timing_statistics(self, total_time, run_results):
        self._print_debug('Test timing:')
        self._print_debug('  %6.2f total testing time' % total_time)
        self._print_debug('')

        self._print_worker_statistics(run_results,
                                      int(self._options.child_processes))
        self._print_aggregate_test_statistics(run_results)
        self._print_individual_test_times(run_results)
        self._print_directory_timings(run_results)

    def _print_worker_statistics(self, run_results, num_workers):
        self._print_debug('Thread timing:')
        stats = {}
        cuml_time = 0
        for result in run_results.results_by_name.values():
            stats.setdefault(result.worker_name, {
                'num_tests': 0,
                'total_time': 0
            })
            stats[result.worker_name]['num_tests'] += 1
            stats[result.worker_name]['total_time'] += result.total_run_time
            cuml_time += result.total_run_time

        for worker_name in stats:
            self._print_debug('    %10s: %5d tests, %6.2f secs' %
                              (worker_name, stats[worker_name]['num_tests'],
                               stats[worker_name]['total_time']))
        self._print_debug('   %6.2f cumulative, %6.2f optimal' %
                          (cuml_time, cuml_time / num_workers))
        self._print_debug('')

    def _print_aggregate_test_statistics(self, run_results):
        times_for_dump_render_tree = [
            result.test_run_time
            for result in run_results.results_by_name.values()
        ]
        self._print_statistics_for_test_timings(
            'PER TEST TIME IN TESTSHELL (seconds):',
            times_for_dump_render_tree)

    def _print_individual_test_times(self, run_results):
        # Reverse-sort by the time spent in the driver.

        individual_test_timings = sorted(
            run_results.results_by_name.values(),
            key=lambda result: result.test_run_time,
            reverse=True)
        num_printed = 0
        slow_tests = []
        timeout_or_crash_tests = []
        unexpected_slow_tests = []
        for test_tuple in individual_test_timings:
            test_name = test_tuple.test_name
            is_timeout_crash_or_slow = False
            if test_name in run_results.slow_tests:
                is_timeout_crash_or_slow = True
                slow_tests.append(test_tuple)

            if test_name in run_results.failures_by_name:
                result = run_results.results_by_name[test_name].type
                if (result == test_expectations.TIMEOUT
                        or result == test_expectations.CRASH):
                    is_timeout_crash_or_slow = True
                    timeout_or_crash_tests.append(test_tuple)

            if not is_timeout_crash_or_slow and num_printed < NUM_SLOW_TESTS_TO_LOG:
                num_printed = num_printed + 1
                unexpected_slow_tests.append(test_tuple)

        self._print_debug('')
        if unexpected_slow_tests:
            self._print_test_list_timing(
                '%s slowest tests that are not marked as SLOW and did not timeout/crash:'
                % NUM_SLOW_TESTS_TO_LOG, unexpected_slow_tests)
            self._print_debug('')

        if slow_tests:
            self._print_test_list_timing('Tests marked as SLOW:', slow_tests)
            self._print_debug('')

        if timeout_or_crash_tests:
            self._print_test_list_timing('Tests that timed out or crashed:',
                                         timeout_or_crash_tests)
            self._print_debug('')

    def _print_test_list_timing(self, title, test_list):
        self._print_debug(title)
        for test_tuple in test_list:
            test_run_time = round(test_tuple.test_run_time, 1)
            self._print_debug('  %s took %s seconds' %
                              (test_tuple.test_name, test_run_time))

    def _print_directory_timings(self, run_results):
        stats = {}
        for result in run_results.results_by_name.values():
            stats.setdefault(result.shard_name, {
                'num_tests': 0,
                'total_time': 0
            })
            stats[result.shard_name]['num_tests'] += 1
            stats[result.shard_name]['total_time'] += result.total_run_time

        min_seconds_to_print = 15

        timings = []
        for directory in stats:
            rounded_time = round(stats[directory]['total_time'], 1)
            if rounded_time > min_seconds_to_print:
                timings.append(
                    (directory, rounded_time, stats[directory]['num_tests']))

        if not timings:
            return

        timings.sort()

        self._print_debug('Time to process slowest subdirectories:')
        for timing in timings:
            self._print_debug('  %s took %s seconds to run %s tests.' % timing)
        self._print_debug('')

    def _print_statistics_for_test_timings(self, title, timings):
        self._print_debug(title)
        timings.sort()

        num_tests = len(timings)
        if not num_tests:
            return
        percentile90 = timings[int(.9 * num_tests)]
        percentile99 = timings[int(.99 * num_tests)]

        if num_tests % 2 == 1:
            median = timings[((num_tests - 1) / 2) - 1]
        else:
            lower = timings[num_tests / 2 - 1]
            upper = timings[num_tests / 2]
            median = (float(lower + upper)) / 2

        mean = sum(timings) / num_tests

        for timing in timings:
            sum_of_deviations = math.pow(timing - mean, 2)

        std_deviation = math.sqrt(sum_of_deviations / num_tests)
        self._print_debug('  Median:          %6.3f' % median)
        self._print_debug('  Mean:            %6.3f' % mean)
        self._print_debug('  90th percentile: %6.3f' % percentile90)
        self._print_debug('  99th percentile: %6.3f' % percentile99)
        self._print_debug('  Standard dev:    %6.3f' % std_deviation)
        self._print_debug('')

    def _print_one_line_summary(self, total_time, run_results):
        if self._options.timing:
            parallel_time = sum(
                result.total_run_time
                for result in run_results.results_by_name.values())

            # There is serial overhead in layout_test_runner.run() that we can't easily account for when
            # really running in parallel, but taking the min() ensures that in the worst case
            # (if parallel time is less than run_time) we do account for it.
            serial_time = total_time - min(run_results.run_time, parallel_time)

            speedup = (parallel_time + serial_time) / total_time
            timing_summary = ' in %.2fs (%.2fs in rwt, %.2gx)' % (
                total_time, serial_time, speedup)
        else:
            timing_summary = ''

        total = run_results.total - run_results.expected_skips
        expected = run_results.expected - run_results.expected_skips
        unexpected = run_results.unexpected
        incomplete = total - expected - unexpected
        incomplete_str = ''
        if incomplete:
            self._print_default('')
            incomplete_str = " (%d didn't run)" % incomplete

        if self._options.verbose or self._options.debug_rwt_logging or unexpected:
            self.writeln('')

        expected_summary_str = ''
        if run_results.expected_failures > 0:
            expected_summary_str = " (%d passed, %d didn't)" % (
                expected - run_results.expected_failures,
                run_results.expected_failures)

        summary = ''
        if unexpected == 0:
            if expected == total:
                if expected > 1:
                    summary = 'All %d tests ran as expected%s%s.' % (
                        expected, expected_summary_str, timing_summary)
                else:
                    summary = 'The test ran as expected%s%s.' % (
                        expected_summary_str, timing_summary)
            else:
                summary = '%s ran as expected%s%s%s.' % (grammar.pluralize(
                    'test', expected), expected_summary_str, incomplete_str,
                                                         timing_summary)
        else:
            summary = "%s ran as expected%s, %d didn't%s%s:" % (
                grammar.pluralize('test', expected), expected_summary_str,
                unexpected, incomplete_str, timing_summary)

        self._print_quiet(summary)
        self._print_quiet('')

    def _test_status_line(self, test_name, suffix):
        format_string = '[%d/%d] %s%s'
        status_line = format_string % (self.num_completed, self.num_tests,
                                       test_name, suffix)
        if len(status_line) > self._meter.number_of_columns():
            overflow_columns = len(
                status_line) - self._meter.number_of_columns()
            ellipsis = '...'
            if len(test_name) < overflow_columns + len(ellipsis) + 2:
                # We don't have enough space even if we elide, just show the test filename.
                fs = self._port.host.filesystem
                test_name = fs.split(test_name)[1]
            else:
                new_length = len(test_name) - overflow_columns - len(ellipsis)
                prefix = int(new_length / 2)
                test_name = test_name[:prefix] + ellipsis + test_name[-(
                    new_length - prefix):]
        return format_string % (self.num_completed, self.num_tests, test_name,
                                suffix)

    def print_started_test(self, test_name):
        self._running_tests.append(test_name)
        if len(self._running_tests) > 1:
            suffix = ' (+%d)' % (len(self._running_tests) - 1)
        else:
            suffix = ''
        if self._options.verbose:
            write = self._meter.write_update
        else:
            write = self._meter.write_throttled_update
        write(self._test_status_line(test_name, suffix))

    def print_finished_test(self, result, expected, exp_str, got_str):
        self.num_completed += 1
        test_name = result.test_name

        result_message = self._result_message(result.type, result.failures,
                                              expected, self._options.timing,
                                              result.test_run_time)

        if self._options.details:
            self._print_test_trace(result, exp_str, got_str)
        elif self._options.verbose or not expected:
            self.writeln(self._test_status_line(test_name, result_message))
        elif self.num_completed == self.num_tests:
            self._meter.write_update('')
        else:
            if test_name == self._running_tests[0]:
                self._completed_tests.insert(0, [test_name, result_message])
            else:
                self._completed_tests.append([test_name, result_message])

            for test_name, result_message in self._completed_tests:
                self._meter.write_throttled_update(
                    self._test_status_line(test_name, result_message))
            self._completed_tests = []
        self._running_tests.remove(test_name)

    def _result_message(self, result_type, failures, expected, timing,
                        test_run_time):
        exp_string = ' unexpectedly' if not expected else ''
        timing_string = ' %.4fs' % test_run_time if timing else ''
        if result_type == test_expectations.PASS:
            return ' passed%s%s' % (exp_string, timing_string)
        else:
            return ' failed%s (%s)%s' % (exp_string, ', '.join(
                failure.message() for failure in failures), timing_string)

    def _print_test_trace(self, result, exp_str, got_str):
        test_name = result.test_name
        self._print_default(self._test_status_line(test_name, ''))

        base = self._port.lookup_virtual_test_base(test_name)
        if base:
            args = ' '.join(self._port.lookup_virtual_test_args(test_name))
            reference_args = ' '.join(
                self._port.lookup_virtual_reference_args(test_name))
            self._print_default(' base: %s' % base)
            self._print_default(' args: %s' % args)
            self._print_default(' reference_args: %s' % reference_args)

        references = self._port.reference_files(test_name)
        if references:
            for _, filename in references:
                self._print_default(
                    '  ref: %s' % self._port.relative_test_filename(filename))
        else:
            for extension in ('.txt', '.png', '.wav'):
                self._print_baseline(test_name, extension)

        self._print_default('  exp: %s' % exp_str)
        self._print_default('  got: %s' % got_str)
        self._print_default(' took: %-.3f' % result.test_run_time)
        self._print_default('')

    def _print_baseline(self, test_name, extension):
        baseline = self._port.expected_filename(test_name, extension)
        if self._port.host.filesystem.exists(baseline):
            relpath = self._port.relative_test_filename(baseline)
        else:
            relpath = '<none>'
        self._print_default('  %s: %s' % (extension[1:], relpath))

    def _print_quiet(self, msg):
        self.writeln(msg)

    def _print_default(self, msg):
        if not self._options.quiet:
            self.writeln(msg)

    def _print_debug(self, msg):
        if self._options.debug_rwt_logging:
            self.writeln(msg)

    def write_throttled_update(self, msg):
        self._meter.write_throttled_update(msg)

    def write_update(self, msg):
        self._meter.write_update(msg)

    def writeln(self, msg):
        self._meter.writeln(msg)

    def flush(self):
        self._meter.flush()
예제 #7
0
class Printer(object):
    """Class handling all non-debug-logging printing done by run-webkit-tests."""

    def __init__(self, port, options, regular_output, logger=None):
        self.num_completed = 0
        self.num_tests = 0
        self._port = port
        self._options = options
        self._meter = MeteredStream(
            regular_output,
            options.debug_rwt_logging,
            logger=logger,
            number_of_columns=self._port.host.platform.terminal_width(),
        )
        self._running_tests = []
        self._completed_tests = []

    def cleanup(self):
        self._meter.cleanup()

    def __del__(self):
        self.cleanup()

    def print_config(self, results_directory):
        self._print_default("Using port '%s'" % self._port.name())
        self._print_default("Test configuration: %s" % self._port.test_configuration())
        self._print_default("Placing test results in %s" % results_directory)

        # FIXME: should these options be in printing_options?
        if self._options.new_baseline:
            self._print_default("Placing new baselines in %s" % self._port.baseline_path())

        fs = self._port.host.filesystem
        fallback_path = [fs.split(x)[1] for x in self._port.baseline_search_path()]
        self._print_default("Baseline search path: %s -> generic" % " -> ".join(fallback_path))

        self._print_default("Using %s build" % self._options.configuration)
        if self._options.pixel_tests:
            self._print_default("Pixel tests enabled")
        else:
            self._print_default("Pixel tests disabled")

        self._print_default(
            "Regular timeout: %s, slow test timeout: %s" % (self._options.time_out_ms, self._options.slow_time_out_ms)
        )

        self._print_default("Command line: " + " ".join(self._port.driver_cmd_line()))
        self._print_default("")

    def print_found(self, num_all_test_files, num_to_run, repeat_each, iterations):
        num_unique_tests = num_to_run / (repeat_each * iterations)
        found_str = "Found %s; running %d" % (grammar.pluralize("test", num_all_test_files), num_unique_tests)
        if repeat_each * iterations > 1:
            found_str += " (%d times each: --repeat-each=%d --iterations=%d)" % (
                repeat_each * iterations,
                repeat_each,
                iterations,
            )
        found_str += ", skipping %d" % (num_all_test_files - num_unique_tests)
        self._print_default(found_str + ".")

    def print_expected(self, result_summary, tests_with_result_type_callback):
        self._print_expected_results_of_type(
            result_summary, test_expectations.PASS, "passes", tests_with_result_type_callback
        )
        self._print_expected_results_of_type(
            result_summary, test_expectations.FAIL, "failures", tests_with_result_type_callback
        )
        self._print_expected_results_of_type(
            result_summary, test_expectations.FLAKY, "flaky", tests_with_result_type_callback
        )
        self._print_debug("")

    def print_workers_and_shards(self, num_workers, num_shards, num_locked_shards):
        driver_name = self._port.driver_name()
        if num_workers == 1:
            self._print_default("Running 1 %s over %s." % (driver_name, grammar.pluralize("shard", num_shards)))
        else:
            self._print_default(
                "Running %d %ss in parallel over %d shards (%d locked)."
                % (num_workers, driver_name, num_shards, num_locked_shards)
            )
        self._print_default("")

    def _print_expected_results_of_type(
        self, result_summary, result_type, result_type_str, tests_with_result_type_callback
    ):
        tests = tests_with_result_type_callback(result_type)
        now = result_summary.tests_by_timeline[test_expectations.NOW]
        wontfix = result_summary.tests_by_timeline[test_expectations.WONTFIX]

        # We use a fancy format string in order to print the data out in a
        # nicely-aligned table.
        fmtstr = "Expect: %%5d %%-8s (%%%dd now, %%%dd wontfix)" % (self._num_digits(now), self._num_digits(wontfix))
        self._print_debug(fmtstr % (len(tests), result_type_str, len(tests & now), len(tests & wontfix)))

    def _num_digits(self, num):
        ndigits = 1
        if len(num):
            ndigits = int(math.log10(len(num))) + 1
        return ndigits

    def print_results(self, run_time, result_summary, summarized_results):
        self._print_timing_statistics(run_time, result_summary)
        self._print_one_line_summary(
            result_summary.total - result_summary.expected_skips,
            result_summary.expected - result_summary.expected_skips,
            result_summary.unexpected,
        )

    def _print_timing_statistics(self, total_time, result_summary):
        self._print_debug("Test timing:")
        self._print_debug("  %6.2f total testing time" % total_time)
        self._print_debug("")

        self._print_worker_statistics(result_summary, int(self._options.child_processes))
        self._print_aggregate_test_statistics(result_summary)
        self._print_individual_test_times(result_summary)
        self._print_directory_timings(result_summary)

    def _print_worker_statistics(self, result_summary, num_workers):
        self._print_debug("Thread timing:")
        stats = {}
        cuml_time = 0
        for result in result_summary.results.values():
            stats.setdefault(result.worker_name, {"num_tests": 0, "total_time": 0})
            stats[result.worker_name]["num_tests"] += 1
            stats[result.worker_name]["total_time"] += result.total_run_time
            cuml_time += result.total_run_time

        for worker_name in stats:
            self._print_debug(
                "    %10s: %5d tests, %6.2f secs"
                % (worker_name, stats[worker_name]["num_tests"], stats[worker_name]["total_time"])
            )
        self._print_debug("   %6.2f cumulative, %6.2f optimal" % (cuml_time, cuml_time / num_workers))
        self._print_debug("")

    def _print_aggregate_test_statistics(self, result_summary):
        times_for_dump_render_tree = [result.test_run_time for result in result_summary.results.values()]
        self._print_statistics_for_test_timings("PER TEST TIME IN TESTSHELL (seconds):", times_for_dump_render_tree)

    def _print_individual_test_times(self, result_summary):
        # Reverse-sort by the time spent in DumpRenderTree.

        individual_test_timings = sorted(
            result_summary.results.values(), key=lambda result: result.test_run_time, reverse=True
        )
        num_printed = 0
        slow_tests = []
        timeout_or_crash_tests = []
        unexpected_slow_tests = []
        for test_tuple in individual_test_timings:
            test_name = test_tuple.test_name
            is_timeout_crash_or_slow = False
            if test_name in result_summary.slow_tests:
                is_timeout_crash_or_slow = True
                slow_tests.append(test_tuple)

            if test_name in result_summary.failures:
                result = result_summary.results[test_name].type
                if result == test_expectations.TIMEOUT or result == test_expectations.CRASH:
                    is_timeout_crash_or_slow = True
                    timeout_or_crash_tests.append(test_tuple)

            if not is_timeout_crash_or_slow and num_printed < NUM_SLOW_TESTS_TO_LOG:
                num_printed = num_printed + 1
                unexpected_slow_tests.append(test_tuple)

        self._print_debug("")
        self._print_test_list_timing(
            "%s slowest tests that are not marked as SLOW and did not timeout/crash:" % NUM_SLOW_TESTS_TO_LOG,
            unexpected_slow_tests,
        )
        self._print_debug("")
        self._print_test_list_timing("Tests marked as SLOW:", slow_tests)
        self._print_debug("")
        self._print_test_list_timing("Tests that timed out or crashed:", timeout_or_crash_tests)
        self._print_debug("")

    def _print_test_list_timing(self, title, test_list):
        self._print_debug(title)
        for test_tuple in test_list:
            test_run_time = round(test_tuple.test_run_time, 1)
            self._print_debug("  %s took %s seconds" % (test_tuple.test_name, test_run_time))

    def _print_directory_timings(self, result_summary):
        stats = {}
        for result in result_summary.results.values():
            stats.setdefault(result.shard_name, {"num_tests": 0, "total_time": 0})
            stats[result.shard_name]["num_tests"] += 1
            stats[result.shard_name]["total_time"] += result.total_run_time

        timings = []
        for directory in stats:
            timings.append((directory, round(stats[directory]["total_time"], 1), stats[directory]["num_tests"]))
        timings.sort()

        self._print_debug("Time to process slowest subdirectories:")
        min_seconds_to_print = 10
        for timing in timings:
            if timing[0] > min_seconds_to_print:
                self._print_debug("  %s took %s seconds to run %s tests." % timing)
        self._print_debug("")

    def _print_statistics_for_test_timings(self, title, timings):
        self._print_debug(title)
        timings.sort()

        num_tests = len(timings)
        if not num_tests:
            return
        percentile90 = timings[int(0.9 * num_tests)]
        percentile99 = timings[int(0.99 * num_tests)]

        if num_tests % 2 == 1:
            median = timings[((num_tests - 1) / 2) - 1]
        else:
            lower = timings[num_tests / 2 - 1]
            upper = timings[num_tests / 2]
            median = (float(lower + upper)) / 2

        mean = sum(timings) / num_tests

        for timing in timings:
            sum_of_deviations = math.pow(timing - mean, 2)

        std_deviation = math.sqrt(sum_of_deviations / num_tests)
        self._print_debug("  Median:          %6.3f" % median)
        self._print_debug("  Mean:            %6.3f" % mean)
        self._print_debug("  90th percentile: %6.3f" % percentile90)
        self._print_debug("  99th percentile: %6.3f" % percentile99)
        self._print_debug("  Standard dev:    %6.3f" % std_deviation)
        self._print_debug("")

    def _print_one_line_summary(self, total, expected, unexpected):
        incomplete = total - expected - unexpected
        incomplete_str = ""
        if incomplete:
            self._print_default("")
            incomplete_str = " (%d didn't run)" % incomplete

        if self._options.verbose or self._options.debug_rwt_logging or unexpected:
            self.writeln("")

        summary = ""
        if unexpected == 0:
            if expected == total:
                if expected > 1:
                    summary = "All %d tests ran as expected." % expected
                else:
                    summary = "The test ran as expected."
            else:
                summary = "%s ran as expected%s." % (grammar.pluralize("test", expected), incomplete_str)
        else:
            summary = "%s ran as expected, %d didn't%s:" % (
                grammar.pluralize("test", expected),
                unexpected,
                incomplete_str,
            )

        self._print_quiet(summary)
        self._print_quiet("")

    def _test_status_line(self, test_name, suffix):
        format_string = "[%d/%d] %s%s"
        status_line = format_string % (self.num_completed, self.num_tests, test_name, suffix)
        if len(status_line) > self._meter.number_of_columns():
            overflow_columns = len(status_line) - self._meter.number_of_columns()
            ellipsis = "..."
            if len(test_name) < overflow_columns + len(ellipsis) + 2:
                # We don't have enough space even if we elide, just show the test filename.
                fs = self._port.host.filesystem
                test_name = fs.split(test_name)[1]
            else:
                new_length = len(test_name) - overflow_columns - len(ellipsis)
                prefix = int(new_length / 2)
                test_name = test_name[:prefix] + ellipsis + test_name[-(new_length - prefix) :]
        return format_string % (self.num_completed, self.num_tests, test_name, suffix)

    def print_started_test(self, test_name):
        self._running_tests.append(test_name)
        if len(self._running_tests) > 1:
            suffix = " (+%d)" % (len(self._running_tests) - 1)
        else:
            suffix = ""
        if self._options.verbose:
            write = self._meter.write_update
        else:
            write = self._meter.write_throttled_update
        write(self._test_status_line(test_name, suffix))

    def print_finished_test(self, result, expected, exp_str, got_str):
        self.num_completed += 1
        test_name = result.test_name

        result_message = self._result_message(result.type, result.failures, expected, self._options.verbose)

        if self._options.details:
            self._print_test_trace(result, exp_str, got_str)
        elif (self._options.verbose and not self._options.debug_rwt_logging) or not expected:
            self.writeln(self._test_status_line(test_name, result_message))
        elif self.num_completed == self.num_tests:
            self._meter.write_update("")
        else:
            if test_name == self._running_tests[0]:
                self._completed_tests.insert(0, [test_name, result_message])
            else:
                self._completed_tests.append([test_name, result_message])

            for test_name, result_message in self._completed_tests:
                self._meter.write_throttled_update(self._test_status_line(test_name, result_message))
            self._completed_tests = []
        self._running_tests.remove(test_name)

    def _result_message(self, result_type, failures, expected, verbose):
        exp_string = " unexpectedly" if not expected else ""
        if result_type == test_expectations.PASS:
            return " passed%s" % exp_string
        else:
            return " failed%s (%s)" % (exp_string, ", ".join(failure.message() for failure in failures))

    def _print_test_trace(self, result, exp_str, got_str):
        test_name = result.test_name
        self._print_default(self._test_status_line(test_name, ""))

        base = self._port.lookup_virtual_test_base(test_name)
        if base:
            args = " ".join(self._port.lookup_virtual_test_args(test_name))
            self._print_default(" base: %s" % base)
            self._print_default(" args: %s" % args)

        for extension in (".txt", ".png", ".wav", ".webarchive"):
            self._print_baseline(test_name, extension)

        self._print_default("  exp: %s" % exp_str)
        self._print_default("  got: %s" % got_str)
        self._print_default(" took: %-.3f" % result.test_run_time)
        self._print_default("")

    def _print_baseline(self, test_name, extension):
        baseline = self._port.expected_filename(test_name, extension)
        if self._port._filesystem.exists(baseline):
            relpath = self._port.relative_test_filename(baseline)
        else:
            relpath = "<none>"
        self._print_default("  %s: %s" % (extension[1:], relpath))

    def _print_quiet(self, msg):
        self.writeln(msg)

    def _print_default(self, msg):
        if not self._options.quiet:
            self.writeln(msg)

    def _print_debug(self, msg):
        if self._options.debug_rwt_logging:
            self.writeln(msg)

    def write_update(self, msg):
        self._meter.write_update(msg)

    def writeln(self, msg):
        self._meter.writeln(msg)

    def flush(self):
        self._meter.flush()
class Printer(object):
    """Class handling all non-debug-logging printing done by run-webkit-tests.

    Printing from run-webkit-tests falls into two buckets: general or
    regular output that is read only by humans and can be changed at any
    time, and output that is parsed by buildbots (and humans) and hence
    must be changed more carefully and in coordination with the buildbot
    parsing code (in chromium.org's buildbot/master.chromium/scripts/master/
    log_parser/webkit_test_command.py script).

    By default the buildbot-parsed code gets logged to stdout, and regular
    output gets logged to stderr."""
    def __init__(self,
                 port,
                 options,
                 regular_output,
                 buildbot_output,
                 logger=None):
        self.num_completed = 0
        self.num_tests = 0
        self._port = port
        self._options = options
        self._buildbot_stream = buildbot_output
        self._meter = MeteredStream(
            regular_output,
            options.debug_rwt_logging,
            logger=logger,
            number_of_columns=self._port.host.platform.terminal_width())
        self._running_tests = []
        self._completed_tests = []

    def cleanup(self):
        self._meter.cleanup()

    def __del__(self):
        self.cleanup()

    def print_config(self, results_directory):
        self._print_default("Using port '%s'" % self._port.name())
        self._print_default("Test configuration: %s" %
                            self._port.test_configuration())
        self._print_default("Placing test results in %s" % results_directory)

        # FIXME: should these options be in printing_options?
        if self._options.new_baseline:
            self._print_default("Placing new baselines in %s" %
                                self._port.baseline_path())

        fs = self._port.host.filesystem
        fallback_path = [
            fs.split(x)[1] for x in self._port.baseline_search_path()
        ]
        self._print_default("Baseline search path: %s -> generic" %
                            " -> ".join(fallback_path))

        self._print_default("Using %s build" % self._options.configuration)
        if self._options.pixel_tests:
            self._print_default("Pixel tests enabled")
        else:
            self._print_default("Pixel tests disabled")

        self._print_default(
            "Regular timeout: %s, slow test timeout: %s" %
            (self._options.time_out_ms, self._options.slow_time_out_ms))

        self._print_default('Command line: ' +
                            ' '.join(self._port.driver_cmd_line()))
        self._print_default('')

    def print_found(self, num_all_test_files, num_to_run, repeat_each,
                    iterations):
        num_unique_tests = num_to_run / (repeat_each * iterations)
        found_str = 'Found %s; running %d' % (grammar.pluralize(
            'test', num_all_test_files), num_unique_tests)
        if repeat_each * iterations > 1:
            found_str += ' (%d times each: --repeat-each=%d --iterations=%d)' % (
                repeat_each * iterations, repeat_each, iterations)
        found_str += ', skipping %d' % (num_all_test_files - num_unique_tests)
        self._print_default(found_str + '.')

    def print_expected(self, result_summary, tests_with_result_type_callback):
        self._print_expected_results_of_type(result_summary,
                                             test_expectations.PASS, "passes",
                                             tests_with_result_type_callback)
        self._print_expected_results_of_type(result_summary,
                                             test_expectations.FAIL,
                                             "failures",
                                             tests_with_result_type_callback)
        self._print_expected_results_of_type(result_summary,
                                             test_expectations.FLAKY, "flaky",
                                             tests_with_result_type_callback)
        self._print_debug('')

    def print_workers_and_shards(self, num_workers, num_shards,
                                 num_locked_shards):
        driver_name = self._port.driver_name()
        if num_workers == 1:
            self._print_default(
                "Running 1 %s over %s." %
                (driver_name, grammar.pluralize('shard', num_shards)))
        else:
            self._print_default(
                "Running %d %ss in parallel over %d shards (%d locked)." %
                (num_workers, driver_name, num_shards, num_locked_shards))
        self._print_default('')

    def _print_expected_results_of_type(self, result_summary, result_type,
                                        result_type_str,
                                        tests_with_result_type_callback):
        tests = tests_with_result_type_callback(result_type)
        now = result_summary.tests_by_timeline[test_expectations.NOW]
        wontfix = result_summary.tests_by_timeline[test_expectations.WONTFIX]

        # We use a fancy format string in order to print the data out in a
        # nicely-aligned table.
        fmtstr = ("Expect: %%5d %%-8s (%%%dd now, %%%dd wontfix)" %
                  (self._num_digits(now), self._num_digits(wontfix)))
        self._print_debug(fmtstr % (len(tests), result_type_str,
                                    len(tests & now), len(tests & wontfix)))

    def _num_digits(self, num):
        ndigits = 1
        if len(num):
            ndigits = int(math.log10(len(num))) + 1
        return ndigits

    def print_results(self, run_time, thread_timings, test_timings,
                      individual_test_timings, result_summary,
                      unexpected_results):
        self._print_timing_statistics(run_time, thread_timings, test_timings,
                                      individual_test_timings, result_summary)
        self._print_result_summary(result_summary)
        self._print_one_line_summary(
            result_summary.total - result_summary.expected_skips,
            result_summary.expected - result_summary.expected_skips,
            result_summary.unexpected)
        self._print_unexpected_results(unexpected_results)

    def _print_timing_statistics(self, total_time, thread_timings,
                                 directory_test_timings,
                                 individual_test_timings, result_summary):
        self._print_debug("Test timing:")
        self._print_debug("  %6.2f total testing time" % total_time)
        self._print_debug("")
        self._print_debug("Thread timing:")
        cuml_time = 0
        for t in thread_timings:
            self._print_debug("    %10s: %5d tests, %6.2f secs" %
                              (t['name'], t['num_tests'], t['total_time']))
            cuml_time += t['total_time']
        self._print_debug(
            "   %6.2f cumulative, %6.2f optimal" %
            (cuml_time, cuml_time / int(self._options.child_processes)))
        self._print_debug("")

        self._print_aggregate_test_statistics(individual_test_timings)
        self._print_individual_test_times(individual_test_timings,
                                          result_summary)
        self._print_directory_timings(directory_test_timings)

    def _print_aggregate_test_statistics(self, individual_test_timings):
        times_for_dump_render_tree = [
            test_stats.test_run_time for test_stats in individual_test_timings
        ]
        self._print_statistics_for_test_timings(
            "PER TEST TIME IN TESTSHELL (seconds):",
            times_for_dump_render_tree)

    def _print_individual_test_times(self, individual_test_timings,
                                     result_summary):
        # Reverse-sort by the time spent in DumpRenderTree.
        individual_test_timings.sort(
            lambda a, b: cmp(b.test_run_time, a.test_run_time))
        num_printed = 0
        slow_tests = []
        timeout_or_crash_tests = []
        unexpected_slow_tests = []
        for test_tuple in individual_test_timings:
            test_name = test_tuple.test_name
            is_timeout_crash_or_slow = False
            if test_name in result_summary.slow_tests:
                is_timeout_crash_or_slow = True
                slow_tests.append(test_tuple)

            if test_name in result_summary.failures:
                result = result_summary.results[test_name].type
                if (result == test_expectations.TIMEOUT
                        or result == test_expectations.CRASH):
                    is_timeout_crash_or_slow = True
                    timeout_or_crash_tests.append(test_tuple)

            if (not is_timeout_crash_or_slow
                    and num_printed < NUM_SLOW_TESTS_TO_LOG):
                num_printed = num_printed + 1
                unexpected_slow_tests.append(test_tuple)

        self._print_debug("")
        self._print_test_list_timing(
            "%s slowest tests that are not marked as SLOW and did not timeout/crash:"
            % NUM_SLOW_TESTS_TO_LOG, unexpected_slow_tests)
        self._print_debug("")
        self._print_test_list_timing("Tests marked as SLOW:", slow_tests)
        self._print_debug("")
        self._print_test_list_timing("Tests that timed out or crashed:",
                                     timeout_or_crash_tests)
        self._print_debug("")

    def _print_test_list_timing(self, title, test_list):
        self._print_debug(title)
        for test_tuple in test_list:
            test_run_time = round(test_tuple.test_run_time, 1)
            self._print_debug("  %s took %s seconds" %
                              (test_tuple.test_name, test_run_time))

    def _print_directory_timings(self, directory_test_timings):
        timings = []
        for directory in directory_test_timings:
            num_tests, time_for_directory = directory_test_timings[directory]
            timings.append((round(time_for_directory,
                                  1), directory, num_tests))
        timings.sort()

        self._print_debug("Time to process slowest subdirectories:")
        min_seconds_to_print = 10
        for timing in timings:
            if timing[0] > min_seconds_to_print:
                self._print_debug("  %s took %s seconds to run %s tests." %
                                  (timing[1], timing[0], timing[2]))
        self._print_debug("")

    def _print_statistics_for_test_timings(self, title, timings):
        self._print_debug(title)
        timings.sort()

        num_tests = len(timings)
        if not num_tests:
            return
        percentile90 = timings[int(.9 * num_tests)]
        percentile99 = timings[int(.99 * num_tests)]

        if num_tests % 2 == 1:
            median = timings[((num_tests - 1) / 2) - 1]
        else:
            lower = timings[num_tests / 2 - 1]
            upper = timings[num_tests / 2]
            median = (float(lower + upper)) / 2

        mean = sum(timings) / num_tests

        for timing in timings:
            sum_of_deviations = math.pow(timing - mean, 2)

        std_deviation = math.sqrt(sum_of_deviations / num_tests)
        self._print_debug("  Median:          %6.3f" % median)
        self._print_debug("  Mean:            %6.3f" % mean)
        self._print_debug("  90th percentile: %6.3f" % percentile90)
        self._print_debug("  99th percentile: %6.3f" % percentile99)
        self._print_debug("  Standard dev:    %6.3f" % std_deviation)
        self._print_debug("")

    def _print_result_summary(self, result_summary):
        if not self._options.debug_rwt_logging:
            return

        failed = result_summary.total_failures
        total = result_summary.total - result_summary.expected_skips
        passed = total - failed - result_summary.remaining
        pct_passed = 0.0
        if total > 0:
            pct_passed = float(passed) * 100 / total

        self._print_for_bot("=> Results: %d/%d tests passed (%.1f%%)" %
                            (passed, total, pct_passed))
        self._print_for_bot("")
        self._print_result_summary_entry(result_summary, test_expectations.NOW,
                                         "Tests to be fixed")

        self._print_for_bot("")
        # FIXME: We should be skipping anything marked WONTFIX, so we shouldn't bother logging these stats.
        self._print_result_summary_entry(
            result_summary, test_expectations.WONTFIX,
            "Tests that will only be fixed if they crash (WONTFIX)")
        self._print_for_bot("")

    def _print_result_summary_entry(self, result_summary, timeline, heading):
        total = len(result_summary.tests_by_timeline[timeline])
        not_passing = (
            total -
            len(result_summary.tests_by_expectation[test_expectations.PASS]
                & result_summary.tests_by_timeline[timeline]))
        self._print_for_bot("=> %s (%d):" % (heading, not_passing))

        for result in TestExpectations.EXPECTATION_ORDER:
            if result in (test_expectations.PASS, test_expectations.SKIP):
                continue
            results = (result_summary.tests_by_expectation[result]
                       & result_summary.tests_by_timeline[timeline])
            desc = TestExpectations.EXPECTATION_DESCRIPTIONS[result]
            if not_passing and len(results):
                pct = len(results) * 100.0 / not_passing
                self._print_for_bot("  %5d %-24s (%4.1f%%)" %
                                    (len(results), desc[0], pct))

    def _print_one_line_summary(self, total, expected, unexpected):
        incomplete = total - expected - unexpected
        incomplete_str = ''
        if incomplete:
            self._print_default("")
            incomplete_str = " (%d didn't run)" % incomplete

        if self._options.verbose or self._options.debug_rwt_logging or unexpected:
            self.writeln("")

        summary = ''
        if unexpected == 0:
            if expected == total:
                if expected > 1:
                    summary = "All %d tests ran as expected." % expected
                else:
                    summary = "The test ran as expected."
            else:
                summary = "%s ran as expected%s." % (grammar.pluralize(
                    'test', expected), incomplete_str)
        else:
            summary = "%s ran as expected, %d didn't%s:" % (grammar.pluralize(
                'test', expected), unexpected, incomplete_str)

        self._print_quiet(summary)
        self._print_quiet("")

    def _test_status_line(self, test_name, suffix):
        format_string = '[%d/%d] %s%s'
        status_line = format_string % (self.num_completed, self.num_tests,
                                       test_name, suffix)
        if len(status_line) > self._meter.number_of_columns():
            overflow_columns = len(
                status_line) - self._meter.number_of_columns()
            ellipsis = '...'
            if len(test_name) < overflow_columns + len(ellipsis) + 2:
                # We don't have enough space even if we elide, just show the test filename.
                fs = self._port.host.filesystem
                test_name = fs.split(test_name)[1]
            else:
                new_length = len(test_name) - overflow_columns - len(ellipsis)
                prefix = int(new_length / 2)
                test_name = test_name[:prefix] + ellipsis + test_name[-(
                    new_length - prefix):]
        return format_string % (self.num_completed, self.num_tests, test_name,
                                suffix)

    def print_started_test(self, test_name):
        self._running_tests.append(test_name)
        if len(self._running_tests) > 1:
            suffix = ' (+%d)' % (len(self._running_tests) - 1)
        else:
            suffix = ''
        if self._options.verbose:
            write = self._meter.write_update
        else:
            write = self._meter.write_throttled_update
        write(self._test_status_line(test_name, suffix))

    def print_finished_test(self, result, expected, exp_str, got_str):
        self.num_completed += 1
        test_name = result.test_name
        if self._options.details:
            self._print_test_trace(result, exp_str, got_str)
        elif (self._options.verbose
              and not self._options.debug_rwt_logging) or not expected:
            desc = TestExpectations.EXPECTATION_DESCRIPTIONS[result.type]
            suffix = ' ' + desc[1]
            if not expected:
                suffix += ' unexpectedly' + desc[2]
            self.writeln(self._test_status_line(test_name, suffix))
        elif self.num_completed == self.num_tests:
            self._meter.write_update('')
        else:
            desc = TestExpectations.EXPECTATION_DESCRIPTIONS[result.type]
            suffix = ' ' + desc[1]
            if test_name == self._running_tests[0]:
                self._completed_tests.insert(0, [test_name, suffix])
            else:
                self._completed_tests.append([test_name, suffix])

            for test_name, suffix in self._completed_tests:
                self._meter.write_throttled_update(
                    self._test_status_line(test_name, suffix))
            self._completed_tests = []
        self._running_tests.remove(test_name)

    def _print_test_trace(self, result, exp_str, got_str):
        test_name = result.test_name
        self._print_default(self._test_status_line(test_name, ''))

        base = self._port.lookup_virtual_test_base(test_name)
        if base:
            args = ' '.join(self._port.lookup_virtual_test_args(test_name))
            self._print_default(' base: %s' % base)
            self._print_default(' args: %s' % args)

        for extension in ('.txt', '.png', '.wav', '.webarchive'):
            self._print_baseline(test_name, extension)

        self._print_default('  exp: %s' % exp_str)
        self._print_default('  got: %s' % got_str)
        self._print_default(' took: %-.3f' % result.test_run_time)
        self._print_default('')

    def _print_baseline(self, test_name, extension):
        baseline = self._port.expected_filename(test_name, extension)
        if self._port._filesystem.exists(baseline):
            relpath = self._port.relative_test_filename(baseline)
        else:
            relpath = '<none>'
        self._print_default('  %s: %s' % (extension[1:], relpath))

    def _print_unexpected_results(self, unexpected_results):
        # Prints to the buildbot stream
        passes = {}
        flaky = {}
        regressions = {}

        def add_to_dict_of_lists(dict, key, value):
            dict.setdefault(key, []).append(value)

        def add_result(test,
                       results,
                       passes=passes,
                       flaky=flaky,
                       regressions=regressions):
            actual = results['actual'].split(" ")
            expected = results['expected'].split(" ")
            if actual == ['PASS']:
                if 'CRASH' in expected:
                    add_to_dict_of_lists(passes,
                                         'Expected to crash, but passed', test)
                elif 'TIMEOUT' in expected:
                    add_to_dict_of_lists(passes,
                                         'Expected to timeout, but passed',
                                         test)
                else:
                    add_to_dict_of_lists(passes,
                                         'Expected to fail, but passed', test)
            elif len(actual) > 1:
                # We group flaky tests by the first actual result we got.
                add_to_dict_of_lists(flaky, actual[0], test)
            else:
                add_to_dict_of_lists(regressions, results['actual'], test)

        resultsjsonparser.for_each_test(unexpected_results['tests'],
                                        add_result)

        if len(passes) or len(flaky) or len(regressions):
            self._print_for_bot("")
        if len(passes):
            for key, tests in passes.iteritems():
                self._print_for_bot("%s: (%d)" % (key, len(tests)))
                tests.sort()
                for test in tests:
                    self._print_for_bot("  %s" % test)
                self._print_for_bot("")
            self._print_for_bot("")

        if len(flaky):
            descriptions = TestExpectations.EXPECTATION_DESCRIPTIONS
            for key, tests in flaky.iteritems():
                result = TestExpectations.EXPECTATIONS[key.lower()]
                self._print_for_bot("Unexpected flakiness: %s (%d)" %
                                    (descriptions[result][0], len(tests)))
                tests.sort()

                for test in tests:
                    result = resultsjsonparser.result_for_test(
                        unexpected_results['tests'], test)
                    actual = result['actual'].split(" ")
                    expected = result['expected'].split(" ")
                    result = TestExpectations.EXPECTATIONS[key.lower()]
                    # FIXME: clean this up once the old syntax is gone
                    new_expectations_list = [
                        TestExpectationParser._inverted_expectation_tokens[exp]
                        for exp in list(set(actual) | set(expected))
                    ]
                    self._print_for_bot(
                        "  %s [ %s ]" %
                        (test, " ".join(new_expectations_list)))
                self._print_for_bot("")
            self._print_for_bot("")

        if len(regressions):
            descriptions = TestExpectations.EXPECTATION_DESCRIPTIONS
            for key, tests in regressions.iteritems():
                result = TestExpectations.EXPECTATIONS[key.lower()]
                self._print_for_bot("Regressions: Unexpected %s : (%d)" %
                                    (descriptions[result][0], len(tests)))
                tests.sort()
                for test in tests:
                    self._print_for_bot("  %s [ %s ] " %
                                        (test, TestExpectationParser.
                                         _inverted_expectation_tokens[key]))
                self._print_for_bot("")

        if len(unexpected_results['tests']
               ) and self._options.debug_rwt_logging:
            self._print_for_bot("%s" % ("-" * 78))

    def _print_quiet(self, msg):
        self.writeln(msg)

    def _print_default(self, msg):
        if not self._options.quiet:
            self.writeln(msg)

    def _print_debug(self, msg):
        if self._options.debug_rwt_logging:
            self.writeln(msg)

    def _print_for_bot(self, msg):
        self._buildbot_stream.write(msg + "\n")

    def write_update(self, msg):
        self._meter.write_update(msg)

    def writeln(self, msg):
        self._meter.writeln(msg)

    def flush(self):
        self._meter.flush()