def test_failing_tests_message(self):
     ews = TestEWS()
     results = lambda a: LayoutTestResults([test_results.TestResult("foo.html", failures=[test_failures.FailureTextMismatch()]),
                                             test_results.TestResult("bar.html", failures=[test_failures.FailureTextMismatch()])],
                                             did_exceed_test_failure_limit=False)
     message = "New failing tests:\nfoo.html\nbar.html"
     self._test_message(ews, results, message)
Esempio n. 2
0
    def test_failing_tests_message(self):
        # Needed to define port_name, used in AbstractEarlyWarningSystem.__init__
        class TestEWS(AbstractEarlyWarningSystem):
            port_name = "win"  # Needs to be a port which port/factory understands.

        ews = TestEWS()
        ews.bind_to_tool(MockTool())
        ews._options = MockOptions(port=None, confirm=False)
        OutputCapture().assert_outputs(
            self,
            ews.begin_work_queue,
            expected_logs=self._default_begin_work_queue_logs(ews.name))
        task = Mock()
        task.results_from_patch_test_run = lambda a: LayoutTestResults(
            [
                test_results.TestResult("foo.html",
                                        failures=
                                        [test_failures.FailureTextMismatch()]),
                test_results.TestResult(
                    "bar.html", failures=[test_failures.FailureTextMismatch()])
            ],
            did_exceed_test_failure_limit=False)
        task.results_from_test_run_without_patch = lambda a: LayoutTestResults(
            [], did_exceed_test_failure_limit=False)
        patch = ews._tool.bugs.fetch_attachment(10000)
        self.assertMultiLineEqual(ews._failing_tests_message(task, patch),
                                  "New failing tests:\nbar.html\nfoo.html")
 def test_basic(self):
     expected_results = [
         test_results.TestResult("svg/dynamic-updates/SVGFEDropShadowElement-dom-stdDeviation-attr.html", [test_failures.FailureImageHashMismatch()], 0),
         test_results.TestResult("fast/dom/prototype-inheritance.html", [test_failures.FailureTextMismatch(), test_failures.FailureImageHashMismatch(), test_failures.FailureAudioMismatch()], 0),
     ]
     parsed_results = ParsedJSONResults(self._example_full_results_json)
     self.assertEqual(expected_results, parsed_results.test_results())
     self.assertTrue(parsed_results.did_exceed_test_failure_limit())
 def test_basic(self):
     expected_results = [
         test_results.TestResult(
             "svg/dynamic-updates/SVGFEDropShadowElement-dom-stdDeviation-attr.html",
             [test_failures.FailureImageHashMismatch()], 0),
         test_results.TestResult("fast/dom/prototype-inheritance.html",
                                 [test_failures.FailureTextMismatch()], 0),
     ]
     results = ResultsJSONParser.parse_results_json(
         self._example_full_results_json)
     self.assertEqual(expected_results, results)
Esempio n. 5
0
 def test_basic(self):
     self.maxDiff = None
     expected_results = [
         test_results.TestResult("svg/dynamic-updates/SVGFEDropShadowElement-dom-stdDeviation-attr.html", [test_failures.FailureImageHashMismatch()], 0),
         test_results.TestResult("fast/dom/prototype-inheritance.html", [test_failures.FailureTextMismatch(), test_failures.FailureImageHashMismatch(), test_failures.FailureAudioMismatch()], 0),
         test_results.TestResult("fast/dom/prototype-strawberry.html", [test_failures.FailureDocumentLeak(['file:///Volumes/Data/worker/webkit/build/LayoutTests/fast/dom/prototype-strawberry.html'])], 0),
     ]
     expected_results.sort(key=lambda result: result.test_name)
     parsed_results = ParsedJSONResults(self._example_full_results_json)
     self.assertEqual(expected_results, parsed_results.test_results())
     self.assertTrue(parsed_results.did_exceed_test_failure_limit())
Esempio n. 6
0
 def results_from_patch_test_run(self, patch):
     return LayoutTestResults([
         test_results.TestResult(
             "mock_test_name.html",
             failures=[test_failures.FailureTextMismatch()])
     ],
                              did_exceed_test_failure_limit=False)
    def test_parse_layout_test_results(self):
        failures = [test_failures.FailureMissingResult(), test_failures.FailureMissingImageHash(), test_failures.FailureMissingImage()]
        testname = 'fast/repaint/no-caret-repaint-in-non-content-editable-element.html'
        expected_results = [test_results.TestResult(testname, failures)]

        results = ORWTResultsHTMLParser.parse_results_html(self._example_results_html)
        self.assertEqual(expected_results, results)
Esempio n. 8
0
 def _test_result_from_row(cls, row, table_title):
     test_name = unicode(row.find("a").string)
     failures = cls._failures_from_row(row, table_title)
     # TestResult is a class designed to work with new-run-webkit-tests.
     # old-run-webkit-tests does not save quite enough information in results.html for us to parse.
     # FIXME: It's unclear if test_name should include LayoutTests or not.
     return test_results.TestResult(test_name, failures)
Esempio n. 9
0
 def get_result(self, test_name, result_type=test_expectations.PASS, run_time=0):
     failures = []
     if result_type == test_expectations.TIMEOUT:
         failures = [test_failures.FailureTimeout()]
     elif result_type == test_expectations.CRASH:
         failures = [test_failures.FailureCrash()]
     return test_results.TestResult(test_name, failures=failures, test_run_time=run_time)
Esempio n. 10
0
 def _mark_interrupted_tests_as_skipped(self, run_results):
     for test_input in self._test_inputs:
         if test_input.test_name not in run_results.results_by_name:
             result = test_results.TestResult(test_input.test_name, [test_failures.FailureEarlyExit()])
             # FIXME: We probably need to loop here if there are multiple iterations.
             # FIXME: Also, these results are really neither expected nor unexpected. We probably
             # need a third type of result.
             run_results.add(result, expected=False, test_is_slow=self._test_is_slow(test_input.test_name))
Esempio n. 11
0
    def run_tests(self, expectations, test_inputs, tests_to_skip, num_workers,
                  retrying):
        self._expectations = expectations
        self._test_inputs = test_inputs

        self._retrying = retrying

        # FIXME: rename all variables to test_run_results or some such ...
        run_results = TestRunResults(self._expectations,
                                     len(test_inputs) + len(tests_to_skip))
        self._current_run_results = run_results
        self._printer.num_tests = len(test_inputs)
        self._printer.num_started = 0

        if not retrying:
            self._printer.print_expected(
                run_results,
                self._expectations.model().get_tests_with_result_type)

        for test_name in set(tests_to_skip):
            result = test_results.TestResult(test_name)
            result.type = test_expectations.SKIP
            run_results.add(result,
                            expected=True,
                            test_is_slow=self._test_is_slow(test_name))

        self._printer.write_update('Sharding tests ...')
        all_shards = self._sharder.shard_tests(
            test_inputs, int(self._options.child_processes),
            self._options.fully_parallel)

        self._printer.print_workers_and_shards(num_workers, len(all_shards))

        if self._options.dry_run:
            return run_results

        self._printer.write_update('Starting %s ...' %
                                   grammar.pluralize(num_workers, "worker"))

        try:
            with message_pool.get(self, self._worker_factory, num_workers,
                                  self._port.worker_startup_delay_secs(),
                                  self._port.host) as pool:
                pool.run(('test_list', shard.name, shard.test_inputs)
                         for shard in all_shards)
        except TestRunInterruptedException as e:
            _log.warning(e.reason)
            run_results.interrupted = True
        except KeyboardInterrupt:
            self._printer.flush()
            self._printer.writeln('Interrupted, exiting ...')
            run_results.keyboard_interrupted = True
        except Exception as e:
            _log.debug('%s("%s") raised, exiting' %
                       (e.__class__.__name__, str(e)))
            raise

        return run_results
Esempio n. 12
0
    def _run_test_in_another_thread(self, test_input, thread_timeout_sec,
                                    stop_when_done):
        """Run a test in a separate thread, enforcing a hard time limit.

        Since we can only detect the termination of a thread, not any internal
        state or progress, we can only run per-test timeouts when running test
        files singly.

        Args:
          test_input: Object containing the test filename and timeout
          thread_timeout_sec: time to wait before killing the driver process.
        Returns:
          A TestResult
        """
        worker = self

        driver = self._port.create_driver(
            int((TaskPool.Process.name).split('/')[-1]),
            self._port.get_option('no_timeout'))

        class SingleTestThread(threading.Thread):
            def __init__(self):
                threading.Thread.__init__(self)
                self.result = None

            def run(self):
                self.result = worker._run_single_test(driver, test_input,
                                                      stop_when_done)

        thread = SingleTestThread()
        thread.start()
        thread.join(thread_timeout_sec)
        result = thread.result
        failures = []
        if thread.is_alive():
            # If join() returned with the thread still running, the
            # DumpRenderTree is completely hung and there's nothing
            # more we can do with it.  We have to kill all the
            # DumpRenderTrees to free it up. If we're running more than
            # one DumpRenderTree thread, we'll end up killing the other
            # DumpRenderTrees too, introducing spurious crashes. We accept
            # that tradeoff in order to avoid losing the rest of this
            # thread's results.
            _log.error('Test thread hung: killing all DumpRenderTrees')
            failures = [test_failures.FailureTimeout()]
        else:
            failure_results = self._do_post_tests_work(driver)
            for failure_result in failure_results:
                if failure_result.test_name == result.test_name:
                    result.convert_to_failure(failure_result)

        driver.stop()

        if not result:
            result = test_results.TestResult(test_input,
                                             failures=failures,
                                             test_run_time=0)
        return result
Esempio n. 13
0
    def run_tests(self, expectations, test_inputs, tests_to_skip, num_workers,
                  needs_http, needs_websockets, needs_web_platform_test_server,
                  retrying):
        self._expectations = expectations
        self._test_inputs = test_inputs
        self._needs_http = needs_http
        self._needs_websockets = needs_websockets
        self._needs_web_platform_test_server = needs_web_platform_test_server
        self._retrying = retrying

        # FIXME: rename all variables to test_run_results or some such ...
        run_results = TestRunResults(self._expectations,
                                     len(test_inputs) + len(tests_to_skip))
        self._current_run_results = run_results
        self._printer.num_tests = len(test_inputs)
        self._printer.num_started = 0

        if not retrying:
            self._printer.print_expected(
                run_results,
                self._expectations.model().get_tests_with_result_type)

        for test_name in set(tests_to_skip):
            result = test_results.TestResult(test_name)
            result.type = test_expectations.SKIP
            run_results.add(result,
                            expected=True,
                            test_is_slow=self._test_is_slow(test_name))

        self._printer.write_update('Sharding tests ...')
        all_shards = self._sharder.shard_tests(
            test_inputs, int(self._options.child_processes),
            self._options.fully_parallel)

        if (self._needs_http and
                self._options.http) or self._needs_web_platform_test_server:
            self.start_servers()

        num_workers = min(num_workers, len(all_shards))
        self._printer.print_workers_and_shards(num_workers, len(all_shards))

        if self._options.dry_run:
            return run_results

        self._printer.write_update('Starting %s ...' %
                                   grammar.pluralize(num_workers, "worker"))

        try:
            with message_pool.get(self, self._worker_factory, num_workers,
                                  self._port.worker_startup_delay_secs(),
                                  self._port.host) as pool:
                pool.run(('test_list', shard.name, shard.test_inputs)
                         for shard in all_shards)
        except TestRunInterruptedException, e:
            _log.warning(e.reason)
            run_results.interrupted = True
Esempio n. 14
0
    def _do_post_tests_work(self, driver):
        additional_results = []
        if not driver:
            return additional_results

        post_test_output = driver.do_post_tests_work()
        if post_test_output:
            for test_name, doc_list in iteritems(post_test_output.world_leaks_dict):
                additional_results.append(test_results.TestResult(test_name, [test_failures.FailureDocumentLeak(doc_list)]))
        return additional_results
    def run_tests(self, expectations, test_inputs, tests_to_skip, num_workers,
                  retrying):
        self._expectations = expectations
        self._test_inputs = test_inputs
        self._retrying = retrying

        # FIXME: rename all variables to test_run_results or some such ...
        run_results = TestRunResults(self._expectations,
                                     len(test_inputs) + len(tests_to_skip))
        self._current_run_results = run_results
        self._printer.num_tests = len(test_inputs)
        self._printer.num_completed = 0

        if not retrying:
            self._printer.print_expected(
                run_results, self._expectations.get_tests_with_result_type)

        for test_name in set(tests_to_skip):
            result = test_results.TestResult(test_name)
            result.type = test_expectations.SKIP
            run_results.add(result,
                            expected=True,
                            test_is_slow=self._test_is_slow(test_name))

        self._printer.write_update('Sharding tests ...')
        locked_shards, unlocked_shards = self._sharder.shard_tests(
            test_inputs, int(self._options.child_processes),
            self._options.fully_parallel)

        # We don't have a good way to coordinate the workers so that they don't
        # try to run the shards that need a lock. The easiest solution is to
        # run all of the locked shards first.
        all_shards = locked_shards + unlocked_shards
        num_workers = min(num_workers, len(all_shards))
        self._printer.print_workers_and_shards(num_workers, len(all_shards),
                                               len(locked_shards))

        if self._options.dry_run:
            return run_results

        self._printer.write_update('Starting %s ...' %
                                   grammar.pluralize('worker', num_workers))

        start_time = time.time()
        try:
            with message_pool.get(self, self._worker_factory, num_workers,
                                  self._port.worker_startup_delay_secs(),
                                  self._port.host) as pool:
                pool.run(('test_list', shard.name, shard.test_inputs)
                         for shard in all_shards)
        except TestRunInterruptedException, e:
            _log.warning(e.reason)
            run_results.interrupted = True
Esempio n. 16
0
    def run_tests(self, expectations, test_inputs, tests_to_skip, num_workers, needs_http, needs_websockets, retrying):
        self._expectations = expectations
        self._test_inputs = test_inputs
        self._needs_http = needs_http
        self._needs_websockets = needs_websockets
        self._retrying = retrying

        # FIXME: rename all variables to test_run_results or some such ...
        run_results = TestRunResults(self._expectations, len(test_inputs) + len(tests_to_skip))
        self._current_run_results = run_results
        self._remaining_locked_shards = []
        self._has_http_lock = False
        self._printer.num_tests = len(test_inputs)
        self._printer.num_started = 0

        if not retrying:
            self._printer.print_expected(run_results, self._expectations.model().get_tests_with_result_type)

        for test_name in set(tests_to_skip):
            result = test_results.TestResult(test_name)
            result.type = test_expectations.SKIP
            run_results.add(result, expected=True, test_is_slow=self._test_is_slow(test_name))

        self._printer.write_update('Sharding tests ...')
        locked_shards, unlocked_shards = self._sharder.shard_tests(test_inputs, int(self._options.child_processes), self._options.fully_parallel)

        # FIXME: We don't have a good way to coordinate the workers so that
        # they don't try to run the shards that need a lock if we don't actually
        # have the lock. The easiest solution at the moment is to grab the
        # lock at the beginning of the run, and then run all of the locked
        # shards first. This minimizes the time spent holding the lock, but
        # means that we won't be running tests while we're waiting for the lock.
        # If this becomes a problem in practice we'll need to change this.

        all_shards = locked_shards + unlocked_shards
        self._remaining_locked_shards = locked_shards
        if locked_shards and self._options.http:
            self.start_servers_with_lock(2 * min(num_workers, len(locked_shards)))

        num_workers = min(num_workers, len(all_shards))
        self._printer.print_workers_and_shards(num_workers, len(all_shards), len(locked_shards))

        if self._options.dry_run:
            return run_results

        self._printer.write_update('Starting %s ...' % grammar.pluralize(num_workers, "worker"))

        try:
            with message_pool.get(self, self._worker_factory, num_workers, self._port.worker_startup_delay_secs(), self._port.host) as pool:
                pool.run(('test_list', shard.name, shard.test_inputs) for shard in all_shards)
        except TestRunInterruptedException, e:
            _log.warning(e.reason)
            run_results.interrupted = True
Esempio n. 17
0
def get_result(test_name, result_type=test_expectations.PASS, run_time=0):
    failures = []
    if result_type == test_expectations.TIMEOUT:
        failures = [test_failures.FailureTimeout()]
    elif result_type == test_expectations.AUDIO:
        failures = [test_failures.FailureAudioMismatch()]
    elif result_type == test_expectations.CRASH:
        failures = [test_failures.FailureCrash()]
    elif result_type == test_expectations.LEAK:
        failures = [test_failures.FailureLeak()]
    return test_results.TestResult(test_name,
                                   failures=failures,
                                   test_run_time=run_time)
Esempio n. 18
0
def get_result(test_name, result_type=test_expectations.PASS, run_time=0):
    failures = []
    if result_type == test_expectations.TIMEOUT:
        failures = [test_failures.FailureTimeout()]
    elif result_type == test_expectations.AUDIO:
        failures = [test_failures.FailureAudioMismatch()]
    elif result_type == test_expectations.CRASH:
        failures = [test_failures.FailureCrash()]
    elif result_type == test_expectations.LEAK:
        failures = [
            test_failures.FailureDocumentLeak(
                ['http://localhost:8000/failures/expected/leak.html'])
        ]
    return test_results.TestResult(test_name,
                                   failures=failures,
                                   test_run_time=run_time)
Esempio n. 19
0
    def _run_test_in_another_thread(self, test_input, thread_timeout_sec):
        """Run a test in a separate thread, enforcing a hard time limit.

        Since we can only detect the termination of a thread, not any internal
        state or progress, we can only run per-test timeouts when running test
        files singly.

        Args:
          test_input: Object containing the test filename and timeout
          thread_timeout_sec: time to wait before killing the driver process.
        Returns:
          A TestResult
        """
        worker = self

        driver = self._port.create_driver(self._worker_number)

        class SingleTestThread(threading.Thread):
            def __init__(self):
                threading.Thread.__init__(self)
                self.result = None

            def run(self):
                self.result = worker._run_single_test(driver, test_input)

        thread = SingleTestThread()
        thread.start()
        thread.join(thread_timeout_sec)
        result = thread.result
        if thread.isAlive():
            # If join() returned with the thread still running, the
            # DumpRenderTree is completely hung and there's nothing
            # more we can do with it.  We have to kill all the
            # DumpRenderTrees to free it up. If we're running more than
            # one DumpRenderTree thread, we'll end up killing the other
            # DumpRenderTrees too, introducing spurious crashes. We accept
            # that tradeoff in order to avoid losing the rest of this
            # thread's results.
            _log.error('Test thread hung: killing all DumpRenderTrees')

        driver.stop()

        if not result:
            result = test_results.TestResult(test_input.test_name,
                                             failures=[],
                                             test_run_time=0)
        return result
Esempio n. 20
0
    def _look_for_new_crash_logs(self, run_results, start_time):
        """Since crash logs can take a long time to be written out if the system is
           under stress do a second pass at the end of the test run.

           run_results: the results of the test run
           start_time: time the tests started at.  We're looking for crash
               logs after that time.
        """
        crashed_processes = []
        for test, result in run_results.unexpected_results_by_name.iteritems():
            if (result.type != test_expectations.CRASH):
                continue
            for failure in result.failures:
                if not isinstance(failure, test_failures.FailureCrash):
                    continue
                crashed_processes.append(
                    [test, failure.process_name, failure.pid])

        sample_files = self._port.look_for_new_samples(crashed_processes,
                                                       start_time)
        if sample_files:
            for test, sample_file in sample_files.iteritems():
                writer = TestResultWriter(self._port._filesystem, self._port,
                                          self._port.results_directory(), test)
                writer.copy_sample_file(sample_file)

        crash_logs = self._port.look_for_new_crash_logs(
            crashed_processes, start_time)
        if crash_logs:
            for test, crash_log in crash_logs.iteritems():
                writer = TestResultWriter(self._port._filesystem, self._port,
                                          self._port.results_directory(), test)
                writer.write_crash_log(crash_log)

                # Check if this crashing 'test' is already in list of crashed_processes, if not add it to the run_results
                if not any(process[0] == test
                           for process in crashed_processes):
                    result = test_results.TestResult(test)
                    result.type = test_expectations.CRASH
                    result.is_other_crash = True
                    run_results.add(result, expected=False, test_is_slow=False)
                    _log.debug("Adding results for other crash: " + str(test))
Esempio n. 21
0
 def _mock_test_result(self, testname):
     return test_results.TestResult(testname,
                                    [test_failures.FailureTextMismatch()])
Esempio n. 22
0
 def test_result(self):
     # FIXME: Optionally pull in the test runtime from times_ms.json.
     return test_results.TestResult(self._test_name, self._failures())
Esempio n. 23
0
    def run(self, args):
        num_failed_uploads = 0

        device_type_list = self._port.supported_device_types()
        try:
            tests_to_run_by_device, aggregate_tests_to_skip = self._collect_tests(args, device_type_list)
        except IOError:
            # This is raised if --test-list doesn't exist
            return test_run_results.RunDetails(exit_code=-1)

        aggregate_tests_to_run = set()  # type: Set[Test]
        for v in tests_to_run_by_device.values():
            aggregate_tests_to_run.update(v)

        skipped_tests_by_path = defaultdict(set)
        for test in aggregate_tests_to_skip:
            skipped_tests_by_path[test.test_path].add(test)

        # If a test is marked skipped, but was explicitly requested, run it anyways
        if self._options.skipped != 'always':
            for arg in args:
                if arg in skipped_tests_by_path:
                    tests = skipped_tests_by_path[arg]
                    tests_to_run_by_device[device_type_list[0]].extend(tests)
                    aggregate_tests_to_run |= tests
                    aggregate_tests_to_skip -= tests
                    del skipped_tests_by_path[arg]

        aggregate_tests = aggregate_tests_to_run | aggregate_tests_to_skip

        self._printer.print_found(len(aggregate_tests),
                                  len(aggregate_tests_to_run),
                                  self._options.repeat_each,
                                  self._options.iterations)
        start_time = time.time()

        # Check to see if all tests we are running are skipped.
        if aggregate_tests == aggregate_tests_to_skip:
            # XXX: this is currently identical to the follow if, which likely isn't intended
            _log.error("All tests skipped.")
            return test_run_results.RunDetails(exit_code=0, skipped_all_tests=True)

        # Check to make sure we have no tests to run that are not skipped.
        if not aggregate_tests_to_run:
            _log.critical('No tests to run.')
            return test_run_results.RunDetails(exit_code=-1)

        self._printer.write_update("Checking build ...")
        if not self._port.check_build():
            _log.error("Build check failed")
            return test_run_results.RunDetails(exit_code=-1)

        if self._options.clobber_old_results:
            self._clobber_old_results()

        # Create the output directory if it doesn't already exist.
        self._port.host.filesystem.maybe_make_directory(self._results_directory)

        needs_http = any(test.needs_http_server for tests in itervalues(tests_to_run_by_device) for test in tests)
        needs_web_platform_test_server = any(test.needs_wpt_server for tests in itervalues(tests_to_run_by_device) for test in tests)
        needs_websockets = any(test.needs_websocket_server for tests in itervalues(tests_to_run_by_device) for test in tests)
        self._runner = LayoutTestRunner(self._options, self._port, self._printer, self._results_directory,
                                        needs_http=needs_http, needs_web_platform_test_server=needs_web_platform_test_server, needs_websockets=needs_websockets)

        initial_results = None
        retry_results = None
        enabled_pixel_tests_in_retry = False

        max_child_processes_for_run = 1
        child_processes_option_value = self._options.child_processes
        uploads = []

        for device_type in device_type_list:
            self._options.child_processes = min(self._port.max_child_processes(device_type=device_type), int(child_processes_option_value or self._port.default_child_processes(device_type=device_type)))

            _log.info('')
            if not self._options.child_processes:
                _log.info('Skipping {} because {} is not available'.format(pluralize(len(tests_to_run_by_device[device_type]), 'test'), str(device_type)))
                _log.info('')
                continue

            max_child_processes_for_run = max(self._options.child_processes, max_child_processes_for_run)

            self._printer.print_baseline_search_path(device_type=device_type)

            _log.info(u'Running {}{}'.format(pluralize(len(tests_to_run_by_device[device_type]), 'test'), u' for {}'.format(device_type) if device_type else ''))
            _log.info('')
            start_time_for_device = time.time()
            if not tests_to_run_by_device[device_type]:
                continue

            test_inputs = [self._test_input_for_file(test, device_type=device_type)
                           for test in tests_to_run_by_device[device_type]]

            if not self._set_up_run(test_inputs, device_type=device_type):
                return test_run_results.RunDetails(exit_code=-1)

            configuration = self._port.configuration_for_upload(self._port.target_host(0))
            if not configuration.get('flavor', None):  # The --result-report-flavor argument should override wk1/wk2
                configuration['flavor'] = 'wk2' if self._options.webkit_test_runner else 'wk1'
            temp_initial_results, temp_retry_results, temp_enabled_pixel_tests_in_retry = self._run_test_subset(test_inputs, device_type=device_type)

            skipped_results = TestRunResults(self._expectations[device_type], len(aggregate_tests_to_skip))
            for skipped_test in set(aggregate_tests_to_skip):
                skipped_result = test_results.TestResult(skipped_test.test_path)
                skipped_result.type = test_expectations.SKIP
                skipped_results.add(skipped_result, expected=True)
            temp_initial_results = temp_initial_results.merge(skipped_results)

            if self._options.report_urls:
                self._printer.writeln('\n')
                self._printer.write_update('Preparing upload data ...')

                upload = Upload(
                    suite=self._options.suite or 'layout-tests',
                    configuration=configuration,
                    details=Upload.create_details(options=self._options),
                    commits=self._port.commits_for_upload(),
                    timestamp=start_time,
                    run_stats=Upload.create_run_stats(
                        start_time=start_time_for_device,
                        end_time=time.time(),
                        tests_skipped=temp_initial_results.remaining + temp_initial_results.expected_skips,
                    ),
                    results=self._results_to_upload_json_trie(self._expectations[device_type], temp_initial_results),
                )
                for hostname in self._options.report_urls:
                    self._printer.write_update('Uploading to {} ...'.format(hostname))
                    if not upload.upload(hostname, log_line_func=self._printer.writeln):
                        num_failed_uploads += 1
                    else:
                        uploads.append(upload)
                self._printer.writeln('Uploads completed!')

            initial_results = initial_results.merge(temp_initial_results) if initial_results else temp_initial_results
            retry_results = retry_results.merge(temp_retry_results) if retry_results else temp_retry_results
            enabled_pixel_tests_in_retry |= temp_enabled_pixel_tests_in_retry

            if (initial_results and (initial_results.interrupted or initial_results.keyboard_interrupted)) or \
                    (retry_results and (retry_results.interrupted or retry_results.keyboard_interrupted)):
                break

        # Used for final logging, max_child_processes_for_run is most relevant here.
        self._options.child_processes = max_child_processes_for_run

        self._runner.stop_servers()

        end_time = time.time()
        result = self._end_test_run(start_time, end_time, initial_results, retry_results, enabled_pixel_tests_in_retry)

        if self._options.report_urls and uploads:
            self._printer.writeln('\n')
            self._printer.write_update('Preparing to upload test archive ...')

            with self._filesystem.mkdtemp() as temp:
                archive = self._filesystem.join(temp, 'test-archive')
                shutil.make_archive(archive, 'zip', self._results_directory)

                for upload in uploads:
                    for hostname in self._options.report_urls:
                        self._printer.write_update('Uploading archive to {} ...'.format(hostname))
                        if not upload.upload_archive(hostname, self._filesystem.open_binary_file_for_reading(archive + '.zip'), log_line_func=self._printer.writeln):
                            num_failed_uploads += 1

        if num_failed_uploads:
            result.exit_code = -1
        return result
Esempio n. 24
0
    def run_tests(self, expectations, test_inputs, tests_to_skip, num_workers, retry_attempt):
        self._expectations = expectations
        self._test_inputs = test_inputs
        self._retry_attempt = retry_attempt
        self._shards_to_redo = []

        # FIXME: rename all variables to test_run_results or some such ...
        run_results = TestRunResults(self._expectations, len(test_inputs) + len(tests_to_skip))
        self._current_run_results = run_results
        self._printer.num_tests = len(test_inputs)
        self._printer.num_completed = 0

        if retry_attempt < 1:
            self._printer.print_expected(run_results, self._expectations.get_tests_with_result_type)

        for test_name in set(tests_to_skip):
            result = test_results.TestResult(test_name)
            result.type = test_expectations.SKIP
            run_results.add(result, expected=True, test_is_slow=self._test_is_slow(test_name))

        self._printer.write_update('Sharding tests ...')
        locked_shards, unlocked_shards = self._sharder.shard_tests(test_inputs,
                                                                   int(self._options.child_processes), self._options.fully_parallel,
                                                                   self._options.run_singly or (self._options.batch_size == 1))

        # We don't have a good way to coordinate the workers so that they don't
        # try to run the shards that need a lock. The easiest solution is to
        # run all of the locked shards first.
        all_shards = locked_shards + unlocked_shards
        num_workers = min(num_workers, len(all_shards))

        if retry_attempt < 1:
            self._printer.print_workers_and_shards(num_workers, len(all_shards), len(locked_shards))

        if self._options.dry_run:
            return run_results

        self._printer.write_update('Starting %s ...' % grammar.pluralize('worker', num_workers))

        start_time = time.time()
        try:
            with message_pool.get(self, self._worker_factory, num_workers, self._port.host) as pool:
                pool.run(('test_list', shard.name, shard.test_inputs) for shard in all_shards)

            if self._shards_to_redo:
                num_workers -= len(self._shards_to_redo)
                if num_workers > 0:
                    with message_pool.get(self, self._worker_factory, num_workers, self._port.host) as pool:
                        pool.run(('test_list', shard.name, shard.test_inputs) for shard in self._shards_to_redo)
        except TestRunInterruptedException as error:
            _log.warning(error.reason)
            run_results.interrupted = True
        except KeyboardInterrupt:
            self._printer.flush()
            self._printer.writeln('Interrupted, exiting ...')
            run_results.keyboard_interrupted = True
        except Exception as error:
            _log.debug('%s("%s") raised, exiting', error.__class__.__name__, error)
            raise
        finally:
            run_results.run_time = time.time() - start_time

        return run_results