Exemple #1
0
 def _test_result_from_row(cls, row, table_title):
     test_name = unicode(row.find("a").string)
     failures = cls._failures_from_row(row, table_title)
     # TestResult is a class designed to work with new-run-webkit-tests.
     # old-run-webkit-tests does not save quite enough information in results.html for us to parse.
     # FIXME: It's unclear if test_name should include LayoutTests or not.
     return test_results.TestResult(test_name, failures)
Exemple #2
0
 def get_result(self, test, result_type=test_expectations.PASS, run_time=0):
     failures = []
     if result_type == test_expectations.TIMEOUT:
         failures = [test_failures.FailureTimeout()]
     elif result_type == test_expectations.CRASH:
         failures = [test_failures.FailureCrash()]
     path = self._port._filesystem.join(self._port.layout_tests_dir(), test)
     return test_results.TestResult(path,
                                    failures=failures,
                                    test_run_time=run_time)
Exemple #3
0
    def test_parse_layout_test_results(self):
        failures = [
            test_failures.FailureMissingResult(),
            test_failures.FailureMissingImageHash(),
            test_failures.FailureMissingImage()
        ]
        testname = 'fast/repaint/no-caret-repaint-in-non-content-editable-element.html'
        expected_results = [test_results.TestResult(testname, failures)]

        results = LayoutTestResults._parse_results_html(
            self._example_results_html)
        self.assertEqual(expected_results, results)
Exemple #4
0
    def _run_test_in_another_thread(self, test_input, thread_timeout_sec):
        """Run a test in a separate thread, enforcing a hard time limit.

        Since we can only detect the termination of a thread, not any internal
        state or progress, we can only run per-test timeouts when running test
        files singly.

        Args:
          test_input: Object containing the test filename and timeout
          thread_timeout_sec: time to wait before killing the driver process.
        Returns:
          A TestResult
        """
        worker = self

        driver = worker._port.create_driver(worker._worker_number)
        driver.start()

        class SingleTestThread(threading.Thread):
            def run(self):
                self.result = worker._run_single_test(driver, test_input)

        thread = SingleTestThread()
        thread.start()
        thread.join(thread_timeout_sec)
        result = getattr(thread, 'result', None)
        if thread.isAlive():
            # If join() returned with the thread still running, the
            # DumpRenderTree is completely hung and there's nothing
            # more we can do with it.  We have to kill all the
            # DumpRenderTrees to free it up. If we're running more than
            # one DumpRenderTree thread, we'll end up killing the other
            # DumpRenderTrees too, introducing spurious crashes. We accept
            # that tradeoff in order to avoid losing the rest of this
            # thread's results.
            _log.error('Test thread hung: killing all DumpRenderTrees')

        driver.stop()

        if not result:
            result = test_results.TestResult(test_input.filename,
                                             failures=[],
                                             test_run_time=0)
        return result
 def _mock_test_result(self, testname):
     return test_results.TestResult(testname,
                                    [test_failures.FailureTextMismatch()])
    def prepare_lists_and_print_output(self):
        """Create appropriate subsets of test lists and returns a
        ResultSummary object. Also prints expected test counts.
        """

        # Remove skipped - both fixable and ignored - files from the
        # top-level list of files to test.
        num_all_test_files = len(self._test_files)
        self._printer.print_expected("Found:  %d tests" %
                                     (len(self._test_files)))
        if not num_all_test_files:
            _log.critical('No tests to run.')
            return None

        skipped = set()
        if num_all_test_files > 1 and not self._options.force:
            skipped = self._expectations.get_tests_with_result_type(
                           test_expectations.SKIP)
            self._test_files -= skipped

        # Create a sorted list of test files so the subset chunk,
        # if used, contains alphabetically consecutive tests.
        self._test_files_list = list(self._test_files)
        if self._options.randomize_order:
            random.shuffle(self._test_files_list)
        else:
            self._test_files_list.sort()

        # If the user specifies they just want to run a subset of the tests,
        # just grab a subset of the non-skipped tests.
        if self._options.run_chunk or self._options.run_part:
            chunk_value = self._options.run_chunk or self._options.run_part
            test_files = self._test_files_list
            try:
                (chunk_num, chunk_len) = chunk_value.split(":")
                chunk_num = int(chunk_num)
                assert(chunk_num >= 0)
                test_size = int(chunk_len)
                assert(test_size > 0)
            except:
                _log.critical("invalid chunk '%s'" % chunk_value)
                return None

            # Get the number of tests
            num_tests = len(test_files)

            # Get the start offset of the slice.
            if self._options.run_chunk:
                chunk_len = test_size
                # In this case chunk_num can be really large. We need
                # to make the slave fit in the current number of tests.
                slice_start = (chunk_num * chunk_len) % num_tests
            else:
                # Validate the data.
                assert(test_size <= num_tests)
                assert(chunk_num <= test_size)

                # To count the chunk_len, and make sure we don't skip
                # some tests, we round to the next value that fits exactly
                # all the parts.
                rounded_tests = num_tests
                if rounded_tests % test_size != 0:
                    rounded_tests = (num_tests + test_size -
                                     (num_tests % test_size))

                chunk_len = rounded_tests / test_size
                slice_start = chunk_len * (chunk_num - 1)
                # It does not mind if we go over test_size.

            # Get the end offset of the slice.
            slice_end = min(num_tests, slice_start + chunk_len)

            files = test_files[slice_start:slice_end]

            tests_run_msg = 'Running: %d tests (chunk slice [%d:%d] of %d)' % (
                (slice_end - slice_start), slice_start, slice_end, num_tests)
            self._printer.print_expected(tests_run_msg)

            # If we reached the end and we don't have enough tests, we run some
            # from the beginning.
            if slice_end - slice_start < chunk_len:
                extra = chunk_len - (slice_end - slice_start)
                extra_msg = ('   last chunk is partial, appending [0:%d]' %
                            extra)
                self._printer.print_expected(extra_msg)
                tests_run_msg += "\n" + extra_msg
                files.extend(test_files[0:extra])
            tests_run_filename = self._fs.join(self._results_directory, "tests_run.txt")
            self._fs.write_text_file(tests_run_filename, tests_run_msg)

            len_skip_chunk = int(len(files) * len(skipped) /
                                 float(len(self._test_files)))
            skip_chunk_list = list(skipped)[0:len_skip_chunk]
            skip_chunk = set(skip_chunk_list)

            # Update expectations so that the stats are calculated correctly.
            # We need to pass a list that includes the right # of skipped files
            # to ParseExpectations so that ResultSummary() will get the correct
            # stats. So, we add in the subset of skipped files, and then
            # subtract them back out.
            self._test_files_list = files + skip_chunk_list
            self._test_files = set(self._test_files_list)

            self.parse_expectations()

            self._test_files = set(files)
            self._test_files_list = files
        else:
            skip_chunk = skipped

        result_summary = ResultSummary(self._expectations,
            self._test_files | skip_chunk)
        self._print_expected_results_of_type(result_summary,
            test_expectations.PASS, "passes")
        self._print_expected_results_of_type(result_summary,
            test_expectations.FAIL, "failures")
        self._print_expected_results_of_type(result_summary,
            test_expectations.FLAKY, "flaky")
        self._print_expected_results_of_type(result_summary,
            test_expectations.SKIP, "skipped")

        if self._options.force:
            self._printer.print_expected('Running all tests, including '
                                         'skips (--force)')
        else:
            # Note that we don't actually run the skipped tests (they were
            # subtracted out of self._test_files, above), but we stub out the
            # results here so the statistics can remain accurate.
            for test in skip_chunk:
                result = test_results.TestResult(test)
                result.type = test_expectations.SKIP
                result_summary.add(result, expected=True)
        self._printer.print_expected('')

        # Check to make sure we didn't filter out all of the tests.
        if not len(self._test_files):
            _log.info("All tests are being skipped")
            return None

        return result_summary