예제 #1
0
    def test_update_summary_with_result(self):
        # Reftests expected to be image mismatch should be respected when pixel_tests=False.
        runner = self._runner()
        runner._options.pixel_tests = False
        runner._options.world_leaks = False
        test = 'failures/expected/reftest.html'
        leak_test = 'failures/expected/leak.html'
        expectations = TestExpectations(runner._port, tests=[test, leak_test])
        expectations.parse_all_expectations()
        runner._expectations = expectations

        runner._current_run_results = TestRunResults(expectations, 1)
        result = TestResult(test, failures=[test_failures.FailureReftestMismatchDidNotOccur()], reftest_type=['!='])
        runner.update_summary_with_result(result)
        self.assertEqual(1, runner._current_run_results.expected)
        self.assertEqual(0, runner._current_run_results.unexpected)

        runner._current_run_results = TestRunResults(expectations, 1)
        result = TestResult(test, failures=[], reftest_type=['=='])
        runner.update_summary_with_result(result)
        self.assertEqual(0, runner._current_run_results.expected)
        self.assertEqual(1, runner._current_run_results.unexpected)

        runner._current_run_results = TestRunResults(expectations, 1)
        result = TestResult(leak_test, failures=[])
        runner.update_summary_with_result(result)
        self.assertEqual(1, runner._current_run_results.expected)
        self.assertEqual(0, runner._current_run_results.unexpected)
    def test_interrupt_if_at_failure_limits(self):
        runner = self._runner()
        runner._options.exit_after_n_failures = None
        runner._options.exit_after_n_crashes_or_times = None
        test_names = ['passes/text.html', 'passes/image.html']
        runner._test_inputs = [TestInput(test_name, 6000) for test_name in test_names]

        run_results = TestRunResults(TestExpectations(runner._port, test_names), len(test_names))
        run_results.unexpected_failures = 100
        run_results.unexpected_crashes = 50
        run_results.unexpected_timeouts = 50
        # No exception when the exit_after* options are None.
        runner._interrupt_if_at_failure_limits(run_results)

        # No exception when we haven't hit the limit yet.
        runner._options.exit_after_n_failures = 101
        runner._options.exit_after_n_crashes_or_timeouts = 101
        runner._interrupt_if_at_failure_limits(run_results)

        # Interrupt if we've exceeded either limit:
        runner._options.exit_after_n_crashes_or_timeouts = 10
        self.assertRaises(TestRunInterruptedException, runner._interrupt_if_at_failure_limits, run_results)
        self.assertEqual(run_results.results_by_name['passes/text.html'].type, test_expectations.SKIP)
        self.assertEqual(run_results.results_by_name['passes/image.html'].type, test_expectations.SKIP)

        runner._options.exit_after_n_crashes_or_timeouts = None
        runner._options.exit_after_n_failures = 10
        exception = self.assertRaises(TestRunInterruptedException, runner._interrupt_if_at_failure_limits, run_results)
예제 #3
0
    def test_interrupt_if_at_failure_limits(self):
        runner = self._runner()
        runner._options.exit_after_n_failures = None
        runner._options.exit_after_n_crashes_or_times = None
        test_names = ['passes/text.html', 'passes/image.html']
        runner._test_inputs = [TestInput(Test(test_name), 6000) for test_name in test_names]

        expectations = TestExpectations(runner._port, test_names)
        expectations.parse_all_expectations()
        run_results = TestRunResults(expectations, len(test_names))
        run_results.unexpected_failures = 100
        run_results.unexpected_crashes = 50
        run_results.unexpected_timeouts = 50
        # No exception when the exit_after* options are None.
        runner._interrupt_if_at_failure_limits(run_results)

        # No exception when we haven't hit the limit yet.
        runner._options.exit_after_n_failures = 101
        runner._options.exit_after_n_crashes_or_timeouts = 101
        runner._interrupt_if_at_failure_limits(run_results)

        # Interrupt if we've exceeded either limit:
        runner._options.exit_after_n_crashes_or_timeouts = 10
        self.assertRaises(TestRunInterruptedException, runner._interrupt_if_at_failure_limits, run_results)
        self.assertEqual(run_results.results_by_name['passes/text.html'].type, test_expectations.SKIP)
        self.assertEqual(run_results.results_by_name['passes/image.html'].type, test_expectations.SKIP)

        runner._options.exit_after_n_crashes_or_timeouts = None
        runner._options.exit_after_n_failures = 10
        exception = self.assertRaises(TestRunInterruptedException, runner._interrupt_if_at_failure_limits, run_results)
    def run_tests(
        self,
        expectations,
        test_inputs,
        tests_to_skip,
        num_workers,
        needs_http,
        needs_websockets,
        needs_web_platform_test_server,
        retrying,
    ):
        self._expectations = expectations
        self._test_inputs = test_inputs
        self._needs_http = needs_http
        self._needs_websockets = needs_websockets
        self._needs_web_platform_test_server = needs_web_platform_test_server
        self._retrying = retrying

        # FIXME: rename all variables to test_run_results or some such ...
        run_results = TestRunResults(self._expectations, len(test_inputs) + len(tests_to_skip))
        self._current_run_results = run_results
        self._printer.num_tests = len(test_inputs)
        self._printer.num_started = 0

        if not retrying:
            self._printer.print_expected(run_results, self._expectations.model().get_tests_with_result_type)

        for test_name in set(tests_to_skip):
            result = test_results.TestResult(test_name)
            result.type = test_expectations.SKIP
            run_results.add(result, expected=True, test_is_slow=self._test_is_slow(test_name))

        self._printer.write_update("Sharding tests ...")
        all_shards = self._sharder.shard_tests(
            test_inputs, int(self._options.child_processes), self._options.fully_parallel
        )

        if (self._needs_http and self._options.http) or self._needs_web_platform_test_server:
            self.start_servers()

        num_workers = min(num_workers, len(all_shards))
        self._printer.print_workers_and_shards(num_workers, len(all_shards))

        if self._options.dry_run:
            return run_results

        self._printer.write_update("Starting %s ..." % grammar.pluralize(num_workers, "worker"))

        try:
            with message_pool.get(
                self, self._worker_factory, num_workers, self._port.worker_startup_delay_secs(), self._port.host
            ) as pool:
                pool.run(("test_list", shard.name, shard.test_inputs) for shard in all_shards)
        except TestRunInterruptedException, e:
            _log.warning(e.reason)
            run_results.interrupted = True
예제 #5
0
    def run_tests(self, expectations, test_inputs, tests_to_skip, num_workers,
                  needs_http, needs_websockets, needs_web_platform_test_server,
                  retrying):
        self._expectations = expectations
        self._test_inputs = test_inputs
        self._needs_http = needs_http
        self._needs_websockets = needs_websockets
        self._needs_web_platform_test_server = needs_web_platform_test_server
        self._retrying = retrying

        # FIXME: rename all variables to test_run_results or some such ...
        run_results = TestRunResults(self._expectations,
                                     len(test_inputs) + len(tests_to_skip))
        self._current_run_results = run_results
        self._printer.num_tests = len(test_inputs)
        self._printer.num_started = 0

        if not retrying:
            self._printer.print_expected(
                run_results,
                self._expectations.model().get_tests_with_result_type)

        for test_name in set(tests_to_skip):
            result = test_results.TestResult(test_name)
            result.type = test_expectations.SKIP
            run_results.add(result,
                            expected=True,
                            test_is_slow=self._test_is_slow(test_name))

        self._printer.write_update('Sharding tests ...')
        all_shards = self._sharder.shard_tests(
            test_inputs, int(self._options.child_processes),
            self._options.fully_parallel)

        if (self._needs_http and
                self._options.http) or self._needs_web_platform_test_server:
            self.start_servers()

        num_workers = min(num_workers, len(all_shards))
        self._printer.print_workers_and_shards(num_workers, len(all_shards))

        if self._options.dry_run:
            return run_results

        self._printer.write_update('Starting %s ...' %
                                   grammar.pluralize(num_workers, "worker"))

        try:
            with message_pool.get(self, self._worker_factory, num_workers,
                                  self._port.worker_startup_delay_secs(),
                                  self._port.host) as pool:
                pool.run(('test_list', shard.name, shard.test_inputs)
                         for shard in all_shards)
        except TestRunInterruptedException, e:
            _log.warning(e.reason)
            run_results.interrupted = True
예제 #6
0
    def run_tests(self, expectations, test_inputs, tests_to_skip, num_workers, needs_http, needs_websockets, retrying):
        self._expectations = expectations
        self._test_inputs = test_inputs
        self._needs_http = needs_http
        self._needs_websockets = needs_websockets
        self._retrying = retrying

        # FIXME: rename all variables to test_run_results or some such ...
        run_results = TestRunResults(self._expectations, len(test_inputs) + len(tests_to_skip))
        self._current_run_results = run_results
        self._remaining_locked_shards = []
        self._has_http_lock = False
        self._printer.num_tests = len(test_inputs)
        self._printer.num_started = 0

        if not retrying:
            self._printer.print_expected(run_results, self._expectations.get_tests_with_result_type)

        for test_name in set(tests_to_skip):
            result = test_results.TestResult(test_name)
            result.type = test_expectations.SKIP
            run_results.add(result, expected=True, test_is_slow=self._test_is_slow(test_name))

        self._printer.write_update('Sharding tests ...')
        locked_shards, unlocked_shards = self._sharder.shard_tests(test_inputs, int(self._options.child_processes), self._options.fully_parallel)

        # FIXME: We don't have a good way to coordinate the workers so that
        # they don't try to run the shards that need a lock if we don't actually
        # have the lock. The easiest solution at the moment is to grab the
        # lock at the beginning of the run, and then run all of the locked
        # shards first. This minimizes the time spent holding the lock, but
        # means that we won't be running tests while we're waiting for the lock.
        # If this becomes a problem in practice we'll need to change this.

        all_shards = locked_shards + unlocked_shards
        self._remaining_locked_shards = locked_shards
        if locked_shards and self._options.http:
            self.start_servers_with_lock(2 * min(num_workers, len(locked_shards)))

        num_workers = min(num_workers, len(all_shards))
        self._printer.print_workers_and_shards(num_workers, len(all_shards), len(locked_shards))

        if self._options.dry_run:
            return run_results

        self._printer.write_update('Starting %s ...' % grammar.pluralize('worker', num_workers))

        try:
            with message_pool.get(self, self._worker_factory, num_workers, self._port.worker_startup_delay_secs(), self._port.host) as pool:
                pool.run(('test_list', shard.name, shard.test_inputs) for shard in all_shards)
        except TestRunInterruptedException, e:
            _log.warning(e.reason)
            run_results.interrupted = True
    def run_tests(self, expectations, test_inputs, tests_to_skip, num_workers,
                  retrying):
        self._expectations = expectations
        self._test_inputs = test_inputs
        self._retrying = retrying

        # FIXME: rename all variables to test_run_results or some such ...
        run_results = TestRunResults(self._expectations,
                                     len(test_inputs) + len(tests_to_skip))
        self._current_run_results = run_results
        self._printer.num_tests = len(test_inputs)
        self._printer.num_completed = 0

        if not retrying:
            self._printer.print_expected(
                run_results, self._expectations.get_tests_with_result_type)

        for test_name in set(tests_to_skip):
            result = test_results.TestResult(test_name)
            result.type = test_expectations.SKIP
            run_results.add(result,
                            expected=True,
                            test_is_slow=self._test_is_slow(test_name))

        self._printer.write_update('Sharding tests ...')
        locked_shards, unlocked_shards = self._sharder.shard_tests(
            test_inputs, int(self._options.child_processes),
            self._options.fully_parallel)

        # We don't have a good way to coordinate the workers so that they don't
        # try to run the shards that need a lock. The easiest solution is to
        # run all of the locked shards first.
        all_shards = locked_shards + unlocked_shards
        num_workers = min(num_workers, len(all_shards))
        self._printer.print_workers_and_shards(num_workers, len(all_shards),
                                               len(locked_shards))

        if self._options.dry_run:
            return run_results

        self._printer.write_update('Starting %s ...' %
                                   grammar.pluralize('worker', num_workers))

        start_time = time.time()
        try:
            with message_pool.get(self, self._worker_factory, num_workers,
                                  self._port.worker_startup_delay_secs(),
                                  self._port.host) as pool:
                pool.run(('test_list', shard.name, shard.test_inputs)
                         for shard in all_shards)
        except TestRunInterruptedException, e:
            _log.warning(e.reason)
            run_results.interrupted = True
예제 #8
0
    def run_tests(self, expectations, test_inputs, tests_to_skip, num_workers, needs_http, needs_websockets, retrying):
        self._expectations = expectations
        self._test_inputs = test_inputs
        self._needs_http = needs_http
        self._needs_websockets = needs_websockets
        self._retrying = retrying

        # FIXME: rename all variables to test_run_results or some such ...
        run_results = TestRunResults(self._expectations, len(test_inputs) + len(tests_to_skip))
        self._current_run_results = run_results
        self._remaining_locked_shards = []
        self._has_http_lock = False
        self._printer.num_tests = len(test_inputs)
        self._printer.num_started = 0

        if not retrying:
            self._printer.print_expected(run_results, self._expectations.model().get_tests_with_result_type)

        for test_name in set(tests_to_skip):
            result = test_results.TestResult(test_name)
            result.type = test_expectations.SKIP
            run_results.add(result, expected=True, test_is_slow=self._test_is_slow(test_name))

        self._printer.write_update('Sharding tests ...')
        locked_shards, unlocked_shards = self._sharder.shard_tests(test_inputs, int(self._options.child_processes), self._options.fully_parallel)

        # FIXME: We don't have a good way to coordinate the workers so that
        # they don't try to run the shards that need a lock if we don't actually
        # have the lock. The easiest solution at the moment is to grab the
        # lock at the beginning of the run, and then run all of the locked
        # shards first. This minimizes the time spent holding the lock, but
        # means that we won't be running tests while we're waiting for the lock.
        # If this becomes a problem in practice we'll need to change this.

        all_shards = locked_shards + unlocked_shards
        self._remaining_locked_shards = locked_shards
        if locked_shards and self._options.http:
            self.start_servers_with_lock(2 * min(num_workers, len(locked_shards)))

        num_workers = min(num_workers, len(all_shards))
        self._printer.print_workers_and_shards(num_workers, len(all_shards), len(locked_shards))

        if self._options.dry_run:
            return run_results

        self._printer.write_update('Starting %s ...' % grammar.pluralize(num_workers, "worker"))

        try:
            with message_pool.get(self, self._worker_factory, num_workers, self._port.worker_startup_delay_secs(), self._port.host) as pool:
                pool.run(('test_list', shard.name, shard.test_inputs) for shard in all_shards)
        except TestRunInterruptedException, e:
            _log.warning(e.reason)
            run_results.interrupted = True
    def run_tests(self, expectations, test_inputs, tests_to_skip, num_workers, retry_attempt):
        self._expectations = expectations
        self._test_inputs = test_inputs
        self._retry_attempt = retry_attempt
        self._shards_to_redo = []

        # FIXME: rename all variables to test_run_results or some such ...
        run_results = TestRunResults(self._expectations, len(test_inputs) + len(tests_to_skip))
        self._current_run_results = run_results
        self._printer.num_tests = len(test_inputs)
        self._printer.num_completed = 0

        if retry_attempt < 1:
            self._printer.print_expected(run_results, self._expectations.get_tests_with_result_type)

        for test_name in set(tests_to_skip):
            result = test_results.TestResult(test_name)
            result.type = test_expectations.SKIP
            run_results.add(result, expected=True, test_is_slow=self._test_is_slow(test_name))

        self._printer.write_update('Sharding tests ...')
        locked_shards, unlocked_shards = self._sharder.shard_tests(test_inputs,
            int(self._options.child_processes), self._options.fully_parallel,
            self._options.run_singly or (self._options.batch_size == 1))

        # We don't have a good way to coordinate the workers so that they don't
        # try to run the shards that need a lock. The easiest solution is to
        # run all of the locked shards first.
        all_shards = locked_shards + unlocked_shards
        num_workers = min(num_workers, len(all_shards))

        if retry_attempt < 1:
            self._printer.print_workers_and_shards(num_workers, len(all_shards), len(locked_shards))

        if self._options.dry_run:
            return run_results

        self._printer.write_update('Starting %s ...' % grammar.pluralize('worker', num_workers))

        start_time = time.time()
        try:
            with message_pool.get(self, self._worker_factory, num_workers, self._port.host) as pool:
                pool.run(('test_list', shard.name, shard.test_inputs) for shard in all_shards)

            if self._shards_to_redo:
                num_workers -= len(self._shards_to_redo)
                if num_workers > 0:
                    with message_pool.get(self, self._worker_factory, num_workers, self._port.host) as pool:
                        pool.run(('test_list', shard.name, shard.test_inputs) for shard in self._shards_to_redo)
        except TestRunInterruptedException, e:
            _log.warning(e.reason)
            run_results.interrupted = True
예제 #10
0
 def test_look_for_new_crash_logs(self):
     def get_manager():
         host = MockHost()
         port = host.port_factory.get('test-mac-leopard')
         manager = Manager(port, options=MockOptions(test_list=None, http=True, max_locked_shards=1), printer=Mock())
         return manager
     host = MockHost()
     port = host.port_factory.get('test-mac-leopard')
     tests = ['failures/expected/crash.html']
     expectations = test_expectations.TestExpectations(port, tests)
     run_results = TestRunResults(expectations, len(tests))
     manager = get_manager()
     manager._look_for_new_crash_logs(run_results, time.time())
예제 #11
0
    def run_tests(self, expectations, test_inputs, tests_to_skip, num_workers,
                  retrying):
        self._expectations = expectations
        self._test_inputs = test_inputs

        self._retrying = retrying

        # FIXME: rename all variables to test_run_results or some such ...
        run_results = TestRunResults(self._expectations,
                                     len(test_inputs) + len(tests_to_skip))
        self._current_run_results = run_results
        self._printer.num_tests = len(test_inputs)
        self._printer.num_started = 0

        if not retrying:
            self._printer.print_expected(
                run_results,
                self._expectations.model().get_tests_with_result_type)

        for test_name in set(tests_to_skip):
            result = test_results.TestResult(test_name)
            result.type = test_expectations.SKIP
            run_results.add(result,
                            expected=True,
                            test_is_slow=self._test_is_slow(test_name))

        self._printer.write_update('Sharding tests ...')
        all_shards = self._sharder.shard_tests(
            test_inputs, int(self._options.child_processes),
            self._options.fully_parallel)

        self._printer.print_workers_and_shards(num_workers, len(all_shards))

        if self._options.dry_run:
            return run_results

        self._printer.write_update('Starting %s ...' %
                                   grammar.pluralize(num_workers, "worker"))

        try:
            with message_pool.get(self, self._worker_factory, num_workers,
                                  self._port.worker_startup_delay_secs(),
                                  self._port.host) as pool:
                pool.run(('test_list', shard.name, shard.test_inputs)
                         for shard in all_shards)
        except TestRunInterruptedException as e:
            _log.warning(e.reason)
            run_results.interrupted = True
        except KeyboardInterrupt:
            self._printer.flush()
            self._printer.writeln('Interrupted, exiting ...')
            run_results.keyboard_interrupted = True
        except Exception as e:
            _log.debug('%s("%s") raised, exiting' %
                       (e.__class__.__name__, str(e)))
            raise

        return run_results
예제 #12
0
    def test_look_for_new_crash_logs(self):
        def get_manager():
            host = MockHost()
            port = host.port_factory.get('test-mac-mac10.10')
            manager = Manager(port,
                              options=optparse.Values({
                                  'test_list': None,
                                  'http': True,
                                  'max_locked_shards': 1
                              }),
                              printer=FakePrinter())
            return manager

        host = MockHost()
        port = host.port_factory.get('test-mac-mac10.10')
        tests = ['failures/expected/crash.html']
        expectations = test_expectations.TestExpectations(port, tests)
        run_results = TestRunResults(expectations, len(tests))
        manager = get_manager()
        manager._look_for_new_crash_logs(run_results, time.time())
예제 #13
0
    def run_tests(self, expectations, test_inputs, tests_to_skip, num_workers, retry_attempt):
        self._expectations = expectations
        self._test_inputs = test_inputs
        self._retry_attempt = retry_attempt
        self._shards_to_redo = []

        # FIXME: rename all variables to test_run_results or some such ...
        run_results = TestRunResults(self._expectations, len(test_inputs) + len(tests_to_skip))
        self._current_run_results = run_results
        self._printer.num_tests = len(test_inputs)
        self._printer.num_completed = 0

        if retry_attempt < 1:
            self._printer.print_expected(run_results, self._expectations.get_tests_with_result_type)

        for test_name in set(tests_to_skip):
            result = test_results.TestResult(test_name)
            result.type = test_expectations.SKIP
            run_results.add(result, expected=True, test_is_slow=self._test_is_slow(test_name))

        self._printer.write_update('Sharding tests ...')
        locked_shards, unlocked_shards = self._sharder.shard_tests(test_inputs,
                                                                   int(self._options.child_processes), self._options.fully_parallel,
                                                                   self._options.run_singly or (self._options.batch_size == 1))

        # We don't have a good way to coordinate the workers so that they don't
        # try to run the shards that need a lock. The easiest solution is to
        # run all of the locked shards first.
        all_shards = locked_shards + unlocked_shards
        num_workers = min(num_workers, len(all_shards))

        if retry_attempt < 1:
            self._printer.print_workers_and_shards(num_workers, len(all_shards), len(locked_shards))

        if self._options.dry_run:
            return run_results

        self._printer.write_update('Starting %s ...' % grammar.pluralize('worker', num_workers))

        start_time = time.time()
        try:
            with message_pool.get(self, self._worker_factory, num_workers, self._port.host) as pool:
                pool.run(('test_list', shard.name, shard.test_inputs) for shard in all_shards)

            if self._shards_to_redo:
                num_workers -= len(self._shards_to_redo)
                if num_workers > 0:
                    with message_pool.get(self, self._worker_factory, num_workers, self._port.host) as pool:
                        pool.run(('test_list', shard.name, shard.test_inputs) for shard in self._shards_to_redo)
        except TestRunInterruptedException as error:
            _log.warning(error.reason)
            run_results.interrupted = True
        except KeyboardInterrupt:
            self._printer.flush()
            self._printer.writeln('Interrupted, exiting ...')
            run_results.keyboard_interrupted = True
        except Exception as error:
            _log.debug('%s("%s") raised, exiting', error.__class__.__name__, error)
            raise
        finally:
            run_results.run_time = time.time() - start_time

        return run_results
예제 #14
0
    def run(self, args):
        num_failed_uploads = 0

        device_type_list = self._port.supported_device_types()
        try:
            tests_to_run_by_device, aggregate_tests_to_skip = self._collect_tests(args, device_type_list)
        except IOError:
            # This is raised if --test-list doesn't exist
            return test_run_results.RunDetails(exit_code=-1)

        aggregate_tests_to_run = set()  # type: Set[Test]
        for v in tests_to_run_by_device.values():
            aggregate_tests_to_run.update(v)

        skipped_tests_by_path = defaultdict(set)
        for test in aggregate_tests_to_skip:
            skipped_tests_by_path[test.test_path].add(test)

        # If a test is marked skipped, but was explicitly requested, run it anyways
        if self._options.skipped != 'always':
            for arg in args:
                if arg in skipped_tests_by_path:
                    tests = skipped_tests_by_path[arg]
                    tests_to_run_by_device[device_type_list[0]].extend(tests)
                    aggregate_tests_to_run |= tests
                    aggregate_tests_to_skip -= tests
                    del skipped_tests_by_path[arg]

        aggregate_tests = aggregate_tests_to_run | aggregate_tests_to_skip

        self._printer.print_found(len(aggregate_tests),
                                  len(aggregate_tests_to_run),
                                  self._options.repeat_each,
                                  self._options.iterations)
        start_time = time.time()

        # Check to see if all tests we are running are skipped.
        if aggregate_tests == aggregate_tests_to_skip:
            # XXX: this is currently identical to the follow if, which likely isn't intended
            _log.error("All tests skipped.")
            return test_run_results.RunDetails(exit_code=0, skipped_all_tests=True)

        # Check to make sure we have no tests to run that are not skipped.
        if not aggregate_tests_to_run:
            _log.critical('No tests to run.')
            return test_run_results.RunDetails(exit_code=-1)

        self._printer.write_update("Checking build ...")
        if not self._port.check_build():
            _log.error("Build check failed")
            return test_run_results.RunDetails(exit_code=-1)

        if self._options.clobber_old_results:
            self._clobber_old_results()

        # Create the output directory if it doesn't already exist.
        self._port.host.filesystem.maybe_make_directory(self._results_directory)

        needs_http = any(test.needs_http_server for tests in itervalues(tests_to_run_by_device) for test in tests)
        needs_web_platform_test_server = any(test.needs_wpt_server for tests in itervalues(tests_to_run_by_device) for test in tests)
        needs_websockets = any(test.needs_websocket_server for tests in itervalues(tests_to_run_by_device) for test in tests)
        self._runner = LayoutTestRunner(self._options, self._port, self._printer, self._results_directory,
                                        needs_http=needs_http, needs_web_platform_test_server=needs_web_platform_test_server, needs_websockets=needs_websockets)

        initial_results = None
        retry_results = None
        enabled_pixel_tests_in_retry = False

        max_child_processes_for_run = 1
        child_processes_option_value = self._options.child_processes
        uploads = []

        for device_type in device_type_list:
            self._options.child_processes = min(self._port.max_child_processes(device_type=device_type), int(child_processes_option_value or self._port.default_child_processes(device_type=device_type)))

            _log.info('')
            if not self._options.child_processes:
                _log.info('Skipping {} because {} is not available'.format(pluralize(len(tests_to_run_by_device[device_type]), 'test'), str(device_type)))
                _log.info('')
                continue

            max_child_processes_for_run = max(self._options.child_processes, max_child_processes_for_run)

            self._printer.print_baseline_search_path(device_type=device_type)

            _log.info(u'Running {}{}'.format(pluralize(len(tests_to_run_by_device[device_type]), 'test'), u' for {}'.format(device_type) if device_type else ''))
            _log.info('')
            start_time_for_device = time.time()
            if not tests_to_run_by_device[device_type]:
                continue

            test_inputs = [self._test_input_for_file(test, device_type=device_type)
                           for test in tests_to_run_by_device[device_type]]

            if not self._set_up_run(test_inputs, device_type=device_type):
                return test_run_results.RunDetails(exit_code=-1)

            configuration = self._port.configuration_for_upload(self._port.target_host(0))
            if not configuration.get('flavor', None):  # The --result-report-flavor argument should override wk1/wk2
                configuration['flavor'] = 'wk2' if self._options.webkit_test_runner else 'wk1'
            temp_initial_results, temp_retry_results, temp_enabled_pixel_tests_in_retry = self._run_test_subset(test_inputs, device_type=device_type)

            skipped_results = TestRunResults(self._expectations[device_type], len(aggregate_tests_to_skip))
            for skipped_test in set(aggregate_tests_to_skip):
                skipped_result = test_results.TestResult(skipped_test.test_path)
                skipped_result.type = test_expectations.SKIP
                skipped_results.add(skipped_result, expected=True)
            temp_initial_results = temp_initial_results.merge(skipped_results)

            if self._options.report_urls:
                self._printer.writeln('\n')
                self._printer.write_update('Preparing upload data ...')

                upload = Upload(
                    suite=self._options.suite or 'layout-tests',
                    configuration=configuration,
                    details=Upload.create_details(options=self._options),
                    commits=self._port.commits_for_upload(),
                    timestamp=start_time,
                    run_stats=Upload.create_run_stats(
                        start_time=start_time_for_device,
                        end_time=time.time(),
                        tests_skipped=temp_initial_results.remaining + temp_initial_results.expected_skips,
                    ),
                    results=self._results_to_upload_json_trie(self._expectations[device_type], temp_initial_results),
                )
                for hostname in self._options.report_urls:
                    self._printer.write_update('Uploading to {} ...'.format(hostname))
                    if not upload.upload(hostname, log_line_func=self._printer.writeln):
                        num_failed_uploads += 1
                    else:
                        uploads.append(upload)
                self._printer.writeln('Uploads completed!')

            initial_results = initial_results.merge(temp_initial_results) if initial_results else temp_initial_results
            retry_results = retry_results.merge(temp_retry_results) if retry_results else temp_retry_results
            enabled_pixel_tests_in_retry |= temp_enabled_pixel_tests_in_retry

            if (initial_results and (initial_results.interrupted or initial_results.keyboard_interrupted)) or \
                    (retry_results and (retry_results.interrupted or retry_results.keyboard_interrupted)):
                break

        # Used for final logging, max_child_processes_for_run is most relevant here.
        self._options.child_processes = max_child_processes_for_run

        self._runner.stop_servers()

        end_time = time.time()
        result = self._end_test_run(start_time, end_time, initial_results, retry_results, enabled_pixel_tests_in_retry)

        if self._options.report_urls and uploads:
            self._printer.writeln('\n')
            self._printer.write_update('Preparing to upload test archive ...')

            with self._filesystem.mkdtemp() as temp:
                archive = self._filesystem.join(temp, 'test-archive')
                shutil.make_archive(archive, 'zip', self._results_directory)

                for upload in uploads:
                    for hostname in self._options.report_urls:
                        self._printer.write_update('Uploading archive to {} ...'.format(hostname))
                        if not upload.upload_archive(hostname, self._filesystem.open_binary_file_for_reading(archive + '.zip'), log_line_func=self._printer.writeln):
                            num_failed_uploads += 1

        if num_failed_uploads:
            result.exit_code = -1
        return result
예제 #15
0
    def run_tests(self,
                  expectations,
                  test_inputs,
                  num_workers,
                  retrying,
                  device_type=None):
        self._expectations = expectations
        self._test_inputs = list(test_inputs)

        self._retrying = retrying

        # FIXME: rename all variables to test_run_results or some such ...
        run_results = TestRunResults(self._expectations, len(test_inputs))
        self._current_run_results = run_results
        self.printer.num_tests = len(test_inputs)
        self.printer.num_started = 0

        if not retrying:
            self.printer.print_expected(
                run_results,
                self._expectations.model().get_tests_with_result_type)

        self.printer.write_update('Sharding tests ...')
        all_shards = self._sharder.shard_tests(
            test_inputs, int(self._options.child_processes),
            self._options.fully_parallel)

        num_workers = min(num_workers, len(all_shards))
        self.printer.print_workers_and_shards(num_workers, len(all_shards))

        if self._options.dry_run:
            return run_results

        self.printer.write_update('Starting {} ...'.format(
            pluralize(num_workers, "worker")))

        devices = None
        if getattr(self._port, 'DEVICE_MANAGER', None):
            devices = dict(
                available_devices=self._port.DEVICE_MANAGER.AVAILABLE_DEVICES,
                initialized_devices=self._port.DEVICE_MANAGER.
                INITIALIZED_DEVICES,
            )

        try:
            LayoutTestRunner.instance = self
            with TaskPool(
                    workers=num_workers,
                    setup=setup_shard,
                    setupkwargs=dict(
                        port=self._port,
                        devices=devices,
                        results_directory=self._results_directory,
                        retrying=self._retrying,
                    ),
                    teardown=teardown_shard,
            ) as pool:
                for shard in all_shards:
                    pool.do(
                        run_shard,
                        shard,
                        callback=lambda value: self.
                        _annotate_results_with_additional_failures(value),
                    )
                pool.wait()

        except TestRunInterruptedException as e:
            _log.warning(e.reason)
            run_results.interrupted = True
        except KeyboardInterrupt:
            self.printer.flush()
            self.printer.writeln('Interrupted, exiting ...')
            run_results.keyboard_interrupted = True
        except Exception as e:
            _log.debug('{}("{}") raised, exiting'.format(
                e.__class__.__name__, str(e)))
            raise
        finally:
            LayoutTestRunner.instance = None

        return run_results