Esempio n. 1
0
    def test_interrupt_if_at_failure_limits(self):
        port = Mock()  # FIXME: This should be a tighter mock.
        port.TEST_PATH_SEPARATOR = '/'
        port._filesystem = MockFileSystem()
        manager = Manager(port=port, options=MockOptions(), printer=Mock())

        manager._options = MockOptions(exit_after_n_failures=None, exit_after_n_crashes_or_timeouts=None)
        result_summary = ResultSummary(expectations=Mock(), test_files=[])
        result_summary.unexpected_failures = 100
        result_summary.unexpected_crashes = 50
        result_summary.unexpected_timeouts = 50
        # No exception when the exit_after* options are None.
        manager._interrupt_if_at_failure_limits(result_summary)

        # No exception when we haven't hit the limit yet.
        manager._options.exit_after_n_failures = 101
        manager._options.exit_after_n_crashes_or_timeouts = 101
        manager._interrupt_if_at_failure_limits(result_summary)

        # Interrupt if we've exceeded either limit:
        manager._options.exit_after_n_crashes_or_timeouts = 10
        self.assertRaises(TestRunInterruptedException, manager._interrupt_if_at_failure_limits, result_summary)

        manager._options.exit_after_n_crashes_or_timeouts = None
        manager._options.exit_after_n_failures = 10
        exception = self.assertRaises(TestRunInterruptedException, manager._interrupt_if_at_failure_limits, result_summary)
Esempio n. 2
0
    def test_interrupt_if_at_failure_limits(self):
        port = Mock()  # FIXME: This should be a tighter mock.
        port.TEST_PATH_SEPARATOR = '/'
        port._filesystem = MockFileSystem()
        manager = Manager(port=port, options=MockOptions(), printer=Mock())

        manager._options = MockOptions(exit_after_n_failures=None,
                                       exit_after_n_crashes_or_timeouts=None)
        result_summary = ResultSummary(expectations=Mock(), test_files=[])
        result_summary.unexpected_failures = 100
        result_summary.unexpected_crashes = 50
        result_summary.unexpected_timeouts = 50
        # No exception when the exit_after* options are None.
        manager._interrupt_if_at_failure_limits(result_summary)

        # No exception when we haven't hit the limit yet.
        manager._options.exit_after_n_failures = 101
        manager._options.exit_after_n_crashes_or_timeouts = 101
        manager._interrupt_if_at_failure_limits(result_summary)

        # Interrupt if we've exceeded either limit:
        manager._options.exit_after_n_crashes_or_timeouts = 10
        self.assertRaises(TestRunInterruptedException,
                          manager._interrupt_if_at_failure_limits,
                          result_summary)

        manager._options.exit_after_n_crashes_or_timeouts = None
        manager._options.exit_after_n_failures = 10
        exception = self.assertRaises(TestRunInterruptedException,
                                      manager._interrupt_if_at_failure_limits,
                                      result_summary)
Esempio n. 3
0
    def test_interrupt_if_at_failure_limits(self):
        runner = self._runner()
        runner._options.exit_after_n_failures = None
        runner._options.exit_after_n_crashes_or_times = None
        test_names = ['passes/text.html', 'passes/image.html']
        runner._test_inputs = [TestInput(test_name, 6000) for test_name in test_names]

        result_summary = ResultSummary(TestExpectations(runner._port, test_names), len(test_names))
        result_summary.unexpected_failures = 100
        result_summary.unexpected_crashes = 50
        result_summary.unexpected_timeouts = 50
        # No exception when the exit_after* options are None.
        runner._interrupt_if_at_failure_limits(result_summary)

        # No exception when we haven't hit the limit yet.
        runner._options.exit_after_n_failures = 101
        runner._options.exit_after_n_crashes_or_timeouts = 101
        runner._interrupt_if_at_failure_limits(result_summary)

        # Interrupt if we've exceeded either limit:
        runner._options.exit_after_n_crashes_or_timeouts = 10
        self.assertRaises(TestRunInterruptedException, runner._interrupt_if_at_failure_limits, result_summary)
        self.assertEqual(result_summary.results['passes/text.html'].type, test_expectations.SKIP)
        self.assertEqual(result_summary.results['passes/image.html'].type, test_expectations.SKIP)

        runner._options.exit_after_n_crashes_or_timeouts = None
        runner._options.exit_after_n_failures = 10
        exception = self.assertRaises(TestRunInterruptedException, runner._interrupt_if_at_failure_limits, result_summary)
Esempio n. 4
0
    def run_tests(self, expectations, test_inputs, tests_to_skip, num_workers, needs_http, needs_websockets, retrying):
        self._expectations = expectations
        self._test_inputs = test_inputs
        self._needs_http = needs_http
        self._needs_websockets = needs_websockets
        self._retrying = retrying

        result_summary = ResultSummary(self._expectations, len(test_inputs) + len(tests_to_skip))
        self._current_result_summary = result_summary
        self._remaining_locked_shards = []
        self._has_http_lock = False
        self._printer.num_tests = len(test_inputs)
        self._printer.num_completed = 0

        if not retrying:
            self._printer.print_expected(result_summary, self._expectations.get_tests_with_result_type)

        for test_name in set(tests_to_skip):
            result = test_results.TestResult(test_name)
            result.type = test_expectations.SKIP
            result_summary.add(result, expected=True, test_is_slow=self._test_is_slow(test_name))

        self._printer.write_update('Sharding tests ...')
        locked_shards, unlocked_shards = self._sharder.shard_tests(test_inputs, int(self._options.child_processes), self._options.fully_parallel)

        # FIXME: We don't have a good way to coordinate the workers so that
        # they don't try to run the shards that need a lock if we don't actually
        # have the lock. The easiest solution at the moment is to grab the
        # lock at the beginning of the run, and then run all of the locked
        # shards first. This minimizes the time spent holding the lock, but
        # means that we won't be running tests while we're waiting for the lock.
        # If this becomes a problem in practice we'll need to change this.

        all_shards = locked_shards + unlocked_shards
        self._remaining_locked_shards = locked_shards
        if self._port.requires_http_server() or (locked_shards and self._options.http):
            self.start_servers_with_lock(2 * min(num_workers, len(locked_shards)))

        num_workers = min(num_workers, len(all_shards))
        self._printer.print_workers_and_shards(num_workers, len(all_shards), len(locked_shards))

        if self._options.dry_run:
            return result_summary

        self._printer.write_update('Starting %s ...' % grammar.pluralize('worker', num_workers))

        try:
            with message_pool.get(self, self._worker_factory, num_workers, self._port.worker_startup_delay_secs(), self._port.host) as pool:
                pool.run(('test_list', shard.name, shard.test_inputs) for shard in all_shards)
        except TestRunInterruptedException, e:
            _log.warning(e.reason)
            result_summary.interrupted = True
Esempio n. 5
0
 def test_update_summary_with_result(self):
     host = MockHost()
     port = host.port_factory.get('test-win-xp')
     test = 'failures/expected/reftest.html'
     expectations = TestExpectations(
         port,
         tests=[test],
         expectations='WONTFIX : failures/expected/reftest.html = IMAGE',
         test_config=port.test_configuration())
     # Reftests expected to be image mismatch should be respected when pixel_tests=False.
     manager = Manager(port=port,
                       options=MockOptions(
                           pixel_tests=False,
                           exit_after_n_failures=None,
                           exit_after_n_crashes_or_timeouts=None),
                       printer=Mock())
     manager._expectations = expectations
     result_summary = ResultSummary(expectations=expectations,
                                    test_files=[test])
     result = TestResult(
         test_name=test,
         failures=[test_failures.FailureReftestMismatchDidNotOccur()])
     manager._update_summary_with_result(result_summary, result)
     self.assertEquals(1, result_summary.expected)
     self.assertEquals(0, result_summary.unexpected)
Esempio n. 6
0
    def _prepare_lists(self):
        tests_to_skip = self._finder.skip_tests(self._paths, self._test_names,
                                                self._expectations,
                                                self._http_tests())
        self._test_names = [
            test for test in self._test_names if test not in tests_to_skip
        ]

        # Create a sorted list of test files so the subset chunk,
        # if used, contains alphabetically consecutive tests.
        if self._options.order == 'natural':
            self._test_names.sort(key=self._port.test_key)
        elif self._options.order == 'random':
            random.shuffle(self._test_names)

        self._test_names, tests_in_other_chunks = self._finder.split_into_chunks(
            self._test_names)
        self._expectations.add_skipped_tests(tests_in_other_chunks)
        tests_to_skip.update(tests_in_other_chunks)

        if self._options.repeat_each > 1:
            list_with_repetitions = []
            for test in self._test_names:
                list_with_repetitions += ([test] * self._options.repeat_each)
            self._test_names = list_with_repetitions

        if self._options.iterations > 1:
            self._test_names = self._test_names * self._options.iterations

        iterations = self._options.repeat_each * self._options.iterations
        return ResultSummary(self._expectations, set(self._test_names),
                             iterations, tests_to_skip)
Esempio n. 7
0
    def _prepare_lists(self, paths, test_names):
        tests_to_skip = self._finder.skip_tests(paths, test_names, self._expectations, self._http_tests(test_names))
        tests_to_run = [test for test in test_names if test not in tests_to_skip]

        # Create a sorted list of test files so the subset chunk,
        # if used, contains alphabetically consecutive tests.
        if self._options.order == 'natural':
            tests_to_run.sort(key=self._port.test_key)
        elif self._options.order == 'random':
            random.shuffle(tests_to_run)

        tests_to_run, tests_in_other_chunks = self._finder.split_into_chunks(tests_to_run)
        self._expectations.add_skipped_tests(tests_in_other_chunks)
        tests_to_skip.update(tests_in_other_chunks)

        summary = ResultSummary(self._expectations, len(tests_to_run) * self._options.repeat_each * self._options.iterations + len(tests_to_skip))

        for test_name in set(tests_to_skip):
            result = test_results.TestResult(test_name)
            result.type = test_expectations.SKIP
            summary.add(result, expected=True, test_is_slow=self._test_is_slow(test_name))
        return summary, tests_to_run
    def test_update_summary_with_result(self):
        # Reftests expected to be image mismatch should be respected when pixel_tests=False.
        runner = self._runner()
        runner._options.pixel_tests = False
        test = 'failures/expected/reftest.html'
        expectations = TestExpectations(runner._port, tests=[test])
        runner._expectations = expectations

        result_summary = ResultSummary(expectations, [test], 1, set())
        result = TestResult(
            test_name=test,
            failures=[test_failures.FailureReftestMismatchDidNotOccur()],
            reftest_type=['!='])
        runner._update_summary_with_result(result_summary, result)
        self.assertEqual(1, result_summary.expected)
        self.assertEqual(0, result_summary.unexpected)

        result_summary = ResultSummary(expectations, [test], 1, set())
        result = TestResult(test_name=test, failures=[], reftest_type=['=='])
        runner._update_summary_with_result(result_summary, result)
        self.assertEqual(0, result_summary.expected)
        self.assertEqual(1, result_summary.unexpected)
 def test_look_for_new_crash_logs(self):
     def get_manager_with_tests(test_names):
         host = MockHost()
         port = host.port_factory.get('test-mac-leopard')
         manager = Manager(port, options=MockOptions(test_list=None, http=True, max_locked_shards=1), printer=Mock())
         manager._collect_tests(test_names)
         return manager
     host = MockHost()
     port = host.port_factory.get('test-mac-leopard')
     tests = ['failures/expected/crash.html']
     expectations = test_expectations.TestExpectations(port, tests)
     rs = ResultSummary(expectations, tests, 1, set())
     manager = get_manager_with_tests(tests)
     manager._look_for_new_crash_logs(rs, time.time())
 def _result_summary(self, runner, tests):
     return ResultSummary(TestExpectations(runner._port, tests), tests, 1,
                          set())
Esempio n. 11
0
 def get_result_summary(self, port, test_names, expectations_str):
     port.expectations_dict = lambda: {'': expectations_str}
     expectations = test_expectations.TestExpectations(port, test_names)
     return test_names, ResultSummary(expectations, test_names, 1, set()), expectations
Esempio n. 12
0
    def run(self, args):
        """Run all our tests on all our test files and return the number of unexpected results (0 == success)."""
        self._printer.write_update("Collecting tests ...")
        try:
            self._paths, self._test_names = self._collect_tests(args)
        except IOError as exception:
            # This is raised if --test-list doesn't exist
            return -1

        self._printer.write_update("Parsing expectations ...")
        self._expectations = test_expectations.TestExpectations(
            self._port, self._test_names)

        num_all_test_files_found = len(self._test_names)
        result_summary = self._prepare_lists()

        # Check to make sure we're not skipping every test.
        if not self._test_names:
            _log.critical('No tests to run.')
            return -1

        self._printer.print_found(num_all_test_files_found,
                                  len(self._test_names),
                                  self._options.repeat_each,
                                  self._options.iterations)
        self._printer.print_expected(
            result_summary, self._expectations.get_tests_with_result_type)

        if not self._set_up_run():
            return -1

        start_time = time.time()

        interrupted, keyboard_interrupted, thread_timings, test_timings, individual_test_timings = \
            self._run_tests(self._test_names, result_summary, int(self._options.child_processes))

        # We exclude the crashes from the list of results to retry, because
        # we want to treat even a potentially flaky crash as an error.

        failures = self._get_failures(
            result_summary,
            include_crashes=self._port.should_retry_crashes(),
            include_missing=False)
        retry_summary = result_summary
        while (len(failures) and self._options.retry_failures
               and not self._retrying and not interrupted
               and not keyboard_interrupted):
            _log.info('')
            _log.info("Retrying %d unexpected failure(s) ..." % len(failures))
            _log.info('')
            self._retrying = True
            retry_summary = ResultSummary(self._expectations, failures.keys(),
                                          1, set())
            # Note that we intentionally ignore the return value here.
            self._run_tests(failures.keys(), retry_summary, 1)
            failures = self._get_failures(retry_summary,
                                          include_crashes=True,
                                          include_missing=True)

        end_time = time.time()

        # Some crash logs can take a long time to be written out so look
        # for new logs after the test run finishes.
        self._look_for_new_crash_logs(result_summary, start_time)
        self._look_for_new_crash_logs(retry_summary, start_time)
        self._clean_up_run()

        unexpected_results = summarize_results(self._port,
                                               self._expectations,
                                               result_summary,
                                               retry_summary,
                                               individual_test_timings,
                                               only_unexpected=True,
                                               interrupted=interrupted)

        self._printer.print_results(end_time - start_time, thread_timings,
                                    test_timings, individual_test_timings,
                                    result_summary, unexpected_results)

        # Re-raise a KeyboardInterrupt if necessary so the caller can handle it.
        if keyboard_interrupted:
            raise KeyboardInterrupt

        # FIXME: remove record_results. It's just used for testing. There's no need
        # for it to be a commandline argument.
        if (self._options.record_results and not self._options.dry_run
                and not keyboard_interrupted):
            self._port.print_leaks_summary()
            # Write the same data to log files and upload generated JSON files to appengine server.
            summarized_results = summarize_results(self._port,
                                                   self._expectations,
                                                   result_summary,
                                                   retry_summary,
                                                   individual_test_timings,
                                                   only_unexpected=False,
                                                   interrupted=interrupted)
            self._upload_json_files(summarized_results, result_summary,
                                    individual_test_timings)

        # Write the summary to disk (results.html) and display it if requested.
        if not self._options.dry_run:
            self._copy_results_html_file()
            if self._options.show_results:
                self._show_results_html_file(result_summary)

        return self._port.exit_code_from_summarized_results(unexpected_results)