def test_basic(self):
     buflist = self._basic([0, 1, 1.05, 1.1, 2])
     self.assertEqual(buflist, [
         'foo',
         MeteredStream._erasure('foo'), 'bar',
         MeteredStream._erasure('bar'), 'baz 2',
         MeteredStream._erasure('baz 2'), 'done\n'
     ])
Example #2
0
 def __init__(self, port, options, regular_output, logger=None):
     self.num_completed = 0
     self.num_tests = 0
     self._port = port
     self._options = options
     self._meter = MeteredStream(regular_output, options.debug_rwt_logging, logger=logger,
                                 number_of_columns=self._port.host.platform.terminal_width())
     self._running_tests = []
     self._completed_tests = []
Example #3
0
 def __init__(self, host, options, regular_output):
     self.num_completed = 0
     self.num_tests = 0
     self._host = host
     self._options = options
     logger = logging.getLogger()
     logger.setLevel(logging.DEBUG if options.debug_rwt_logging else logging.INFO)
     self._meter = MeteredStream(regular_output, options.debug_rwt_logging, logger,
                                 number_of_columns=host.platform.terminal_width())
     self._running_tests = []
     self._completed_tests = []
    def setUp(self):
        self.stream = StringIO.StringIO()
        self.buflist = self.stream.buflist
        self.stream.isatty = lambda: self.isatty

        # configure a logger to test that log calls do normally get included.
        self.logger = logging.getLogger(__name__)
        self.logger.setLevel(logging.DEBUG)
        self.logger.propagate = False

        # add a dummy time counter for a default behavior.
        self.times = range(10)

        self.meter = MeteredStream(self.stream, self.verbose, self.logger, self.time_fn, 8675)
 def test_logging_not_included(self):
     # This tests that if we don't hand a logger to the MeteredStream,
     # nothing is logged.
     logging_stream = StringIO.StringIO()
     handler = logging.StreamHandler(logging_stream)
     root_logger = logging.getLogger()
     orig_level = root_logger.level
     root_logger.addHandler(handler)
     root_logger.setLevel(logging.DEBUG)
     try:
         self.meter = MeteredStream(self.stream, self.verbose, None, self.time_fn, 8675)
         self.meter.write_throttled_update('foo')
         self.meter.write_update('bar')
         self.meter.write('baz')
         self.assertEqual(logging_stream.buflist, [])
     finally:
         root_logger.removeHandler(handler)
         root_logger.setLevel(orig_level)
class RegularTest(unittest.TestCase):
    verbose = False
    isatty = False

    def setUp(self):
        self.stream = StringIO.StringIO()
        self.buflist = self.stream.buflist
        self.stream.isatty = lambda: self.isatty

        # configure a logger to test that log calls do normally get included.
        self.logger = logging.getLogger(__name__)
        self.logger.setLevel(logging.DEBUG)
        self.logger.propagate = False

        # add a dummy time counter for a default behavior.
        self.times = range(10)

        self.meter = MeteredStream(self.stream, self.verbose, self.logger,
                                   self.time_fn, 8675)

    def tearDown(self):
        if self.meter:
            self.meter.cleanup()
            self.meter = None

    def time_fn(self):
        return self.times.pop(0)

    def test_logging_not_included(self):
        # This tests that if we don't hand a logger to the MeteredStream,
        # nothing is logged.
        logging_stream = StringIO.StringIO()
        handler = logging.StreamHandler(logging_stream)
        root_logger = logging.getLogger()
        orig_level = root_logger.level
        root_logger.addHandler(handler)
        root_logger.setLevel(logging.DEBUG)
        try:
            self.meter = MeteredStream(self.stream, self.verbose, None,
                                       self.time_fn, 8675)
            self.meter.write_throttled_update('foo')
            self.meter.write_update('bar')
            self.meter.write('baz')
            self.assertEqual(logging_stream.buflist, [])
        finally:
            root_logger.removeHandler(handler)
            root_logger.setLevel(orig_level)

    def _basic(self, times):
        self.times = times
        self.meter.write_update('foo')
        self.meter.write_update('bar')
        self.meter.write_throttled_update('baz')
        self.meter.write_throttled_update('baz 2')
        self.meter.writeln('done')
        self.assertEqual(self.times, [])
        return self.buflist

    def test_basic(self):
        buflist = self._basic([0, 1, 2, 13, 14])
        self.assertEqual(buflist, ['foo\n', 'bar\n', 'baz 2\n', 'done\n'])

    def _log_after_update(self):
        self.meter.write_update('foo')
        self.logger.info('bar')
        return self.buflist

    def test_log_after_update(self):
        buflist = self._log_after_update()
        self.assertEqual(buflist, ['foo\n', 'bar\n'])

    def test_log_args(self):
        self.logger.info('foo %s %d', 'bar', 2)
        self.assertEqual(self.buflist, ['foo bar 2\n'])
 def test_log_after_update(self):
     buflist = self._log_after_update()
     self.assertEqual(buflist,
                      ['foo', MeteredStream._erasure('foo'), 'bar\n'])
Example #8
0
class Printer(object):
    """Class handling all non-debug-logging printing done by run_web_tests.py."""

    def __init__(self, host, options, regular_output):
        self.num_completed = 0
        self.num_tests = 0
        self._host = host
        self._options = options
        logger = logging.getLogger()
        logger.setLevel(logging.DEBUG if options.debug_rwt_logging else logging.INFO)
        self._meter = MeteredStream(regular_output, options.debug_rwt_logging, logger,
                                    number_of_columns=host.platform.terminal_width())
        self._running_tests = []
        self._completed_tests = []

    def cleanup(self):
        self._meter.cleanup()

    def __del__(self):
        self.cleanup()

    def print_config(self, port):
        self._print_default("Using port '%s'" % port.name())
        self._print_default('Test configuration: %s' % port.test_configuration())
        self._print_default('View the test results at file://%s/results.html' % port.results_directory())
        if self._options.order == 'random':
            self._print_default('Using random order with seed: %d' % self._options.seed)

        fs = self._host.filesystem
        fallback_path = [fs.split(x)[1] for x in port.baseline_search_path()]
        self._print_default('Baseline search path: %s -> generic' % ' -> '.join(fallback_path))
        self._print_default('Using %s build' % self._options.configuration)
        self._print_default('Regular timeout: %s, slow test timeout: %s' %
                            (self._options.time_out_ms, self._options.slow_time_out_ms))

        self._print_default('Command line: ' + ' '.join(port.driver_cmd_line()))
        self._print_default('')

    def print_found(self, num_all_test_files, num_shard_test_files, num_to_run, repeat_each, iterations):
        found_str = 'Found %s' % grammar.pluralize('test', num_shard_test_files)
        if num_all_test_files != num_shard_test_files:
            found_str += ' (total %d)' % num_all_test_files
        found_str += '; running %d' % num_to_run
        if repeat_each * iterations > 1:
            found_str += ' (%d times each: --repeat-each=%d --iterations=%d)' % (repeat_each * iterations, repeat_each, iterations)
        found_str += ', skipping %d' % (num_shard_test_files - num_to_run)
        self._print_default(found_str + '.')

    def print_expected(self, run_results, tests_with_result_type_callback):
        self._print_expected_results_of_type(run_results, test_expectations.PASS, 'passes', tests_with_result_type_callback)
        self._print_expected_results_of_type(run_results, test_expectations.FAIL, 'failures', tests_with_result_type_callback)
        self._print_expected_results_of_type(run_results, test_expectations.FLAKY, 'flaky', tests_with_result_type_callback)
        self._print_debug('')

    def print_workers_and_shards(self, port, num_workers, num_shards, num_locked_shards):
        driver_name = port.driver_name()
        if num_workers == 1:
            self._print_default('Running 1 %s.' % driver_name)
            self._print_debug('(%s).' % grammar.pluralize('shard', num_shards))
        else:
            self._print_default('Running %d %ss in parallel.' % (num_workers, driver_name))
            self._print_debug('(%d shards; %d locked).' % (num_shards, num_locked_shards))
        self._print_default('')

    def _print_expected_results_of_type(self, run_results, result_type, result_type_str, tests_with_result_type_callback):
        tests = tests_with_result_type_callback(result_type)
        now = run_results.tests_by_timeline[test_expectations.NOW]
        wontfix = run_results.tests_by_timeline[test_expectations.WONTFIX]

        # We use a fancy format string in order to print the data out in a
        # nicely-aligned table.
        fmtstr = ('Expect: %%5d %%-8s (%%%dd now, %%%dd wontfix)'
                  % (self._num_digits(now), self._num_digits(wontfix)))
        self._print_debug(fmtstr % (len(tests), result_type_str, len(tests & now), len(tests & wontfix)))

    def _num_digits(self, num):
        ndigits = 1
        if len(num):
            ndigits = int(math.log10(len(num))) + 1
        return ndigits

    def print_results(self, run_time, run_results):
        self.print_timing_statistics(run_time, run_results)
        self.print_summary(run_time, run_results)

    def print_timing_statistics(self, total_time, run_results):
        self._print_debug('Test timing:')
        self._print_debug('  %6.2f total testing time' % total_time)
        self._print_debug('')

        num_workers = int(self._options.child_processes)
        self._print_debug('Thread timing:')
        stats = {}
        cuml_time = 0
        for result in run_results.results_by_name.values():
            stats.setdefault(result.worker_name, {'num_tests': 0, 'total_time': 0})
            stats[result.worker_name]['num_tests'] += 1
            stats[result.worker_name]['total_time'] += result.total_run_time
            cuml_time += result.total_run_time

        for worker_name in stats:
            self._print_debug('    %10s: %5d tests, %6.2f secs' % (worker_name, stats[
                              worker_name]['num_tests'], stats[worker_name]['total_time']))
        self._print_debug('   %6.2f cumulative, %6.2f optimal' % (cuml_time, cuml_time / num_workers))
        self._print_debug('')

    def print_summary(self, total_time, run_results):
        if self._options.timing:
            parallel_time = sum(result.total_run_time for result in run_results.results_by_name.values())

            # There is serial overhead in web_test_runner.run() that we can't easily account for when
            # really running in parallel, but taking the min() ensures that in the worst case
            # (if parallel time is less than run_time) we do account for it.
            serial_time = total_time - min(run_results.run_time, parallel_time)

            speedup = (parallel_time + serial_time) / total_time
            timing_summary = ' in %.2fs (%.2fs in rwt, %.2gx)' % (total_time, serial_time, speedup)
        else:
            timing_summary = ''

        total = run_results.total - run_results.expected_skips
        expected = run_results.expected - run_results.expected_skips
        unexpected = run_results.unexpected
        incomplete = total - expected - unexpected
        incomplete_str = ''
        if incomplete:
            self._print_default('')
            incomplete_str = " (%d didn't run)" % incomplete

        if self._options.verbose or self._options.debug_rwt_logging or unexpected:
            self.writeln('')

        expected_summary_str = ''
        if run_results.expected_failures > 0:
            expected_summary_str = " (%d passed, %d didn't)" % (
                expected - run_results.expected_failures, run_results.expected_failures)

        summary = ''
        if unexpected == 0:
            if expected == total:
                if expected > 1:
                    summary = 'All %d tests ran as expected%s%s.' % (expected, expected_summary_str, timing_summary)
                else:
                    summary = 'The test ran as expected%s%s.' % (expected_summary_str, timing_summary)
            else:
                summary = '%s ran as expected%s%s%s.' % (grammar.pluralize(
                    'test', expected), expected_summary_str, incomplete_str, timing_summary)
            self._print_quiet(summary)
        else:
            self._print_quiet("%s ran as expected%s, %d didn't%s%s:" % (grammar.pluralize(
                'test', expected), expected_summary_str, unexpected, incomplete_str, timing_summary))
            for test_name in sorted(run_results.unexpected_results_by_name):
                self._print_quiet('    %s' % test_name)

    def _test_status_line(self, test_name, suffix):
        format_string = '[%d/%d] %s%s'
        status_line = format_string % (self.num_completed, self.num_tests, test_name, suffix)
        if len(status_line) > self._meter.number_of_columns():
            overflow_columns = len(status_line) - self._meter.number_of_columns()
            ellipsis = '...'
            if len(test_name) < overflow_columns + len(ellipsis) + 2:
                # We don't have enough space even if we elide, just show the test filename.
                fs = self._host.filesystem
                test_name = fs.split(test_name)[1]
            else:
                new_length = len(test_name) - overflow_columns - len(ellipsis)
                prefix = int(new_length / 2)
                test_name = test_name[:prefix] + ellipsis + test_name[-(new_length - prefix):]
        return format_string % (self.num_completed, self.num_tests, test_name, suffix)

    def print_started_test(self, test_name):
        self._running_tests.append(test_name)
        if len(self._running_tests) > 1:
            suffix = ' (+%d)' % (len(self._running_tests) - 1)
        else:
            suffix = ''
        if self._options.verbose:
            write = self._meter.write_update
        else:
            write = self._meter.write_throttled_update
        write(self._test_status_line(test_name, suffix))

    def print_finished_test(self, port, result, expected, exp_str, got_str):
        self.num_completed += 1
        test_name = result.test_name

        result_message = self._result_message(result.type, result.failures, expected,
                                              self._options.timing, result.test_run_time)

        if self._options.details:
            self._print_test_trace(port, result, exp_str, got_str)
        elif self._options.verbose or not expected:
            self.writeln(self._test_status_line(test_name, result_message))
        elif self.num_completed == self.num_tests:
            self._meter.write_update('')
        else:
            if test_name == self._running_tests[0]:
                self._completed_tests.insert(0, [test_name, result_message])
            else:
                self._completed_tests.append([test_name, result_message])

            for test_name, result_message in self._completed_tests:
                self._meter.write_throttled_update(self._test_status_line(test_name, result_message))
            self._completed_tests = []
        self._running_tests.remove(test_name)

    def _result_message(self, result_type, failures, expected, timing, test_run_time):
        exp_string = ' unexpectedly' if not expected else ''
        timing_string = ' %.4fs' % test_run_time if timing else ''
        if result_type == test_expectations.PASS:
            return ' passed%s%s' % (exp_string, timing_string)
        else:
            return ' failed%s (%s)%s' % (exp_string, ', '.join(failure.message() for failure in failures), timing_string)

    def _print_test_trace(self, port, result, exp_str, got_str):
        test_name = result.test_name
        self._print_default(self._test_status_line(test_name, ''))

        base = port.lookup_virtual_test_base(test_name)
        if base:
            args = ' '.join(port.lookup_virtual_test_args(test_name))
            reference_args = ' '.join(port.lookup_virtual_reference_args(test_name))
            self._print_default(' base: %s' % base)
            self._print_default(' args: %s' % args)
            self._print_default(' reference_args: %s' % reference_args)

        references = port.reference_files(test_name)
        if references:
            for _, filename in references:
                self._print_default('  ref: %s' % port.relative_test_filename(filename))
        else:
            for extension in ('.txt', '.png', '.wav'):
                self._print_baseline(port, test_name, extension)

        self._print_default('  exp: %s' % exp_str)
        self._print_default('  got: %s' % got_str)
        self._print_default(' took: %-.3f' % result.test_run_time)
        self._print_default('')

    def _print_baseline(self, port, test_name, extension):
        baseline = port.expected_filename(test_name, extension)
        if self._host.filesystem.exists(baseline):
            relpath = port.relative_test_filename(baseline)
        else:
            relpath = '<none>'
        self._print_default('  %s: %s' % (extension[1:], relpath))

    def _print_quiet(self, msg):
        self.writeln(msg)

    def _print_default(self, msg):
        if not self._options.quiet:
            self.writeln(msg)

    def _print_debug(self, msg):
        if self._options.debug_rwt_logging:
            self.writeln(msg)

    def write_throttled_update(self, msg):
        self._meter.write_throttled_update(msg)

    def write_update(self, msg):
        self._meter.write_update(msg)

    def writeln(self, msg):
        self._meter.writeln(msg)

    def flush(self):
        self._meter.flush()