Esempio n. 1
0
 def check_arguments_and_execute(self, options, args, tool=None):
     if len(args) < len(self.required_arguments):
         _log.error(
             "%s required, %s provided.  Provided: %s  Required: %s\nSee '%s help %s' for usage.",
             pluralize('argument', len(self.required_arguments)),
             pluralize('argument', len(args)), "'%s'" % ' '.join(args),
             ' '.join(self.required_arguments), tool.name(), self.name)
         return 1
     return self.execute(options, args, tool) or 0
Esempio n. 2
0
    def print_summary(self, total_time, run_results):
        if self._options.timing:
            parallel_time = sum(
                result.total_run_time
                for result in run_results.results_by_name.values())

            # There is serial overhead in layout_test_runner.run() that we can't easily account for when
            # really running in parallel, but taking the min() ensures that in the worst case
            # (if parallel time is less than run_time) we do account for it.
            serial_time = total_time - min(run_results.run_time, parallel_time)

            speedup = (parallel_time + serial_time) / total_time
            timing_summary = ' in %.2fs (%.2fs in rwt, %.2gx)' % (
                total_time, serial_time, speedup)
        else:
            timing_summary = ''

        total = run_results.total - run_results.expected_skips
        expected = run_results.expected - run_results.expected_skips
        unexpected = run_results.unexpected
        incomplete = total - expected - unexpected
        incomplete_str = ''
        if incomplete:
            self._print_default('')
            incomplete_str = " (%d didn't run)" % incomplete

        if self._options.verbose or self._options.debug_rwt_logging or unexpected:
            self.writeln('')

        expected_summary_str = ''
        if run_results.expected_failures > 0:
            expected_summary_str = " (%d passed, %d didn't)" % (
                expected - run_results.expected_failures,
                run_results.expected_failures)

        summary = ''
        if unexpected == 0:
            if expected == total:
                if expected > 1:
                    summary = 'All %d tests ran as expected%s%s.' % (
                        expected, expected_summary_str, timing_summary)
                else:
                    summary = 'The test ran as expected%s%s.' % (
                        expected_summary_str, timing_summary)
            else:
                summary = '%s ran as expected%s%s%s.' % (grammar.pluralize(
                    'test', expected), expected_summary_str, incomplete_str,
                                                         timing_summary)
            self._print_quiet(summary)
        else:
            self._print_quiet(
                "%s ran as expected%s, %d didn't%s%s:" %
                (grammar.pluralize('test', expected), expected_summary_str,
                 unexpected, incomplete_str, timing_summary))
            for test_name in sorted(run_results.unexpected_results_by_name):
                self._print_quiet('    %s' % test_name)
Esempio n. 3
0
    def _run_test_once(self, tests_to_run, tests_to_skip, should_retry_failures):
        num_workers = self._port.num_workers(int(self._options.child_processes))

        initial_results = self._run_tests(
            tests_to_run, tests_to_skip, self._options.repeat_each, self._options.iterations,
            num_workers)

        # Don't retry failures when interrupted by user or failures limit exception.
        should_retry_failures = should_retry_failures and not (
            initial_results.interrupted or initial_results.keyboard_interrupted)

        tests_to_retry = self._tests_to_retry(initial_results)
        all_retry_results = []
        if should_retry_failures and tests_to_retry:
            for retry_attempt in xrange(1, self._options.num_retries + 1):
                if not tests_to_retry:
                    break

                _log.info('')
                _log.info('Retrying %s, attempt %d of %d...',
                          grammar.pluralize('unexpected failure', len(tests_to_retry)),
                          retry_attempt, self._options.num_retries)

                retry_results = self._run_tests(tests_to_retry,
                                                tests_to_skip=set(),
                                                repeat_each=1,
                                                iterations=1,
                                                num_workers=num_workers,
                                                retry_attempt=retry_attempt)
                all_retry_results.append(retry_results)

                tests_to_retry = self._tests_to_retry(retry_results)
        return (initial_results, all_retry_results)
Esempio n. 4
0
 def print_found(self, num_all_test_files, num_shard_test_files, num_to_run, repeat_each, iterations):
     found_str = 'Found %s' % grammar.pluralize('test', num_shard_test_files)
     if num_all_test_files != num_shard_test_files:
         found_str += ' (total %d)' % num_all_test_files
     found_str += '; running %d' % num_to_run
     if repeat_each * iterations > 1:
         found_str += ' (%d times each: --repeat-each=%d --iterations=%d)' % (repeat_each * iterations, repeat_each, iterations)
     found_str += ', skipping %d' % (num_shard_test_files - num_to_run)
     self._print_default(found_str + '.')
Esempio n. 5
0
 def print_workers_and_shards(self, port, num_workers, num_shards, num_locked_shards):
     driver_name = port.driver_name()
     if num_workers == 1:
         self._print_default('Running 1 %s.' % driver_name)
         self._print_debug('(%s).' % grammar.pluralize('shard', num_shards))
     else:
         self._print_default('Running %d %ss in parallel.' % (num_workers, driver_name))
         self._print_debug('(%d shards; %d locked).' % (num_shards, num_locked_shards))
     self._print_default('')
Esempio n. 6
0
    def run_tests(self, expectations, test_inputs, tests_to_skip, num_workers,
                  retry_attempt):
        batch_size = self._options.derived_batch_size

        # If we're retrying a test, then it's because we think it might be flaky
        # and rerunning it might provide a different result. We must restart
        # content shell to get a valid result, as otherwise state can leak
        # from previous tests. To do so, we set a batch size of 1, as that
        # prevents content shell reuse.
        if not self._options.must_use_derived_batch_size and retry_attempt >= 1:
            batch_size = 1
        self._expectations = expectations
        self._test_inputs = test_inputs

        test_run_results = TestRunResults(
            self._expectations,
            len(test_inputs) + len(tests_to_skip),
            self._test_result_sink,
        )
        self._current_run_results = test_run_results
        self._printer.num_tests = len(test_inputs)
        self._printer.num_completed = 0

        for test_name in set(tests_to_skip):
            result = test_results.TestResult(test_name)
            result.type = ResultType.Skip
            test_run_results.add(
                result,
                expected=True,
                test_is_slow=self._test_is_slow(test_name))

        self._printer.write_update('Sharding tests ...')
        locked_shards, unlocked_shards = self._sharder.shard_tests(
            test_inputs, int(self._options.child_processes),
            self._options.fully_parallel, self._options.virtual_parallel,
            batch_size == 1)

        self._reorder_tests_by_args(locked_shards)
        self._reorder_tests_by_args(unlocked_shards)

        # We don't have a good way to coordinate the workers so that they don't
        # try to run the shards that need a lock. The easiest solution is to
        # run all of the locked shards first.
        all_shards = locked_shards + unlocked_shards
        num_workers = min(num_workers, len(all_shards))

        if retry_attempt < 1:
            self._printer.print_workers_and_shards(self._port, num_workers,
                                                   len(all_shards),
                                                   len(locked_shards))

        if self._options.dry_run:
            return test_run_results

        self._printer.write_update(
            'Starting %s ...' % grammar.pluralize('worker', num_workers))

        start_time = time.time()
        try:
            with message_pool.get(self, self._worker_factory, num_workers,
                                  self._port.host) as pool:
                pool.run(('test_list', shard.name, shard.test_inputs,
                          batch_size) for shard in all_shards)

            if self._shards_to_redo:
                num_workers -= len(self._shards_to_redo)
                if num_workers > 0:
                    with message_pool.get(self, self._worker_factory,
                                          num_workers,
                                          self._port.host) as pool:
                        pool.run(('test_list', shard.name, shard.test_inputs,
                                  batch_size)
                                 for shard in self._shards_to_redo)
                else:
                    self._mark_interrupted_tests_as_skipped(
                        self._current_run_results)
                    raise TestRunInterruptedException(
                        'All workers have device failures. Exiting.')
        except TestRunInterruptedException as error:
            _log.warning(error.reason)
            test_run_results.interrupted = True
        except KeyboardInterrupt:
            self._printer.flush()
            self._printer.writeln('Interrupted, exiting ...')
            test_run_results.keyboard_interrupted = True
        except Exception as error:
            _log.debug('%s("%s") raised, exiting', error.__class__.__name__,
                       error)
            raise
        finally:
            test_run_results.run_time = time.time() - start_time

        return test_run_results
Esempio n. 7
0
    def run_tests(self, expectations, test_inputs, tests_to_skip, num_workers,
                  retry_attempt):
        self._expectations = expectations
        self._test_inputs = test_inputs
        self._retry_attempt = retry_attempt

        test_run_results = TestRunResults(
            self._expectations,
            len(test_inputs) + len(tests_to_skip))
        self._current_run_results = test_run_results
        self._printer.num_tests = len(test_inputs)
        self._printer.num_completed = 0

        if retry_attempt < 1:
            self._printer.print_expected(
                test_run_results,
                self._expectations.get_tests_with_result_type)

        for test_name in set(tests_to_skip):
            result = test_results.TestResult(test_name)
            result.type = test_expectations.SKIP
            test_run_results.add(result,
                                 expected=True,
                                 test_is_slow=self._test_is_slow(test_name))

        self._printer.write_update('Sharding tests ...')
        locked_shards, unlocked_shards = self._sharder.shard_tests(
            test_inputs, int(self._options.child_processes),
            self._options.fully_parallel, self._options.batch_size == 1)

        self._reorder_tests_by_args(locked_shards)
        self._reorder_tests_by_args(unlocked_shards)

        # We don't have a good way to coordinate the workers so that they don't
        # try to run the shards that need a lock. The easiest solution is to
        # run all of the locked shards first.
        all_shards = locked_shards + unlocked_shards
        num_workers = min(num_workers, len(all_shards))

        if retry_attempt < 1:
            self._printer.print_workers_and_shards(num_workers,
                                                   len(all_shards),
                                                   len(locked_shards))

        if self._options.dry_run:
            return test_run_results

        self._printer.write_update('Starting %s ...' %
                                   grammar.pluralize('worker', num_workers))

        start_time = time.time()
        try:
            with message_pool.get(self, self._worker_factory, num_workers,
                                  self._port.host) as pool:
                pool.run(('test_list', shard.name, shard.test_inputs)
                         for shard in all_shards)

            if self._shards_to_redo:
                num_workers -= len(self._shards_to_redo)
                if num_workers > 0:
                    with message_pool.get(self, self._worker_factory,
                                          num_workers,
                                          self._port.host) as pool:
                        pool.run(('test_list', shard.name, shard.test_inputs)
                                 for shard in self._shards_to_redo)
        except TestRunInterruptedException as error:
            _log.warning(error.reason)
            test_run_results.interrupted = True
        except KeyboardInterrupt:
            self._printer.flush()
            self._printer.writeln('Interrupted, exiting ...')
            test_run_results.keyboard_interrupted = True
        except Exception as error:
            _log.debug('%s("%s") raised, exiting', error.__class__.__name__,
                       error)
            raise
        finally:
            test_run_results.run_time = time.time() - start_time

        return test_run_results
Esempio n. 8
0
 def test_pluralize_two_ends_with_sh(self):
     self.assertEqual('2 crashes', grammar.pluralize('crash', 2))
Esempio n. 9
0
 def test_pluralize_two(self):
     self.assertEqual('2 tests', grammar.pluralize('test', 2))
Esempio n. 10
0
 def test_pluralize_one(self):
     self.assertEqual('1 test', grammar.pluralize('test', 1))
Esempio n. 11
0
 def test_pluralize_zero(self):
     self.assertEqual('0 tests', grammar.pluralize('test', 0))