コード例 #1
0
    def test_reftest_diff_image(self):
        """A write_test_result should call port.diff_image with tolerance=0 in case of FailureReftestMismatch."""
        used_tolerance_values = []

        class ImageDiffTestPort(TestPort):
            def diff_image(self,
                           expected_contents,
                           actual_contents,
                           tolerance=None):
                used_tolerance_values.append(tolerance)
                return (True, 1)

        host = MockHost()
        port = ImageDiffTestPort(host)
        test_name = 'failures/unexpected/reftest.html'
        test_reference_file = host.filesystem.join(
            port.layout_tests_dir(),
            'failures/unexpected/reftest-expected.html')
        driver_output1 = DriverOutput('text1', 'image1', 'imagehash1',
                                      'audio1')
        driver_output2 = DriverOutput('text2', 'image2', 'imagehash2',
                                      'audio2')
        failures = [test_failures.FailureReftestMismatch(test_reference_file)]
        test_result_writer.write_test_result(host.filesystem,
                                             ImageDiffTestPort(host),
                                             test_name, driver_output1,
                                             driver_output2, failures)
        self.assertEqual([0], used_tolerance_values)
コード例 #2
0
    def _run_reftest(self):
        test_output = self._driver.run_test(self._driver_input())
        total_test_time = 0
        reference_output = None
        test_result = None

        # A reftest can have multiple match references and multiple mismatch references;
        # the test fails if any mismatch matches and all of the matches don't match.
        # To minimize the number of references we have to check, we run all of the mismatches first,
        # then the matches, and short-circuit out as soon as we can.
        # Note that sorting by the expectation sorts "!=" before "==" so this is easy to do.

        putAllMismatchBeforeMatch = sorted
        for expectation, reference_filename in putAllMismatchBeforeMatch(self._reference_files):
            reference_test_name = self._port.relative_test_filename(reference_filename)
            reference_output = self._driver.run_test(
                DriverInput(reference_test_name, self._timeout, test_output.image_hash, should_run_pixel_test=True)
            )
            test_result = self._compare_output_with_reference(
                test_output, reference_output, reference_filename, expectation == "!="
            )

            if (expectation == "!=" and test_result.failures) or (expectation == "==" and not test_result.failures):
                break
            total_test_time += test_result.test_run_time

        assert reference_output
        test_result_writer.write_test_result(
            self._filesystem, self._port, self._test_name, test_output, reference_output, test_result.failures
        )
        return TestResult(
            self._test_name, test_result.failures, total_test_time + test_result.test_run_time, test_result.has_stderr
        )
コード例 #3
0
    def _run_reftest(self):
        test_output = self._driver.run_test(self._driver_input(), self._stop_when_done)
        total_test_time = test_output.test_time
        expected_output = None
        test_result = None

        expected_text = self._port.expected_text(self._test_name)
        expected_text_output = DriverOutput(text=expected_text, image=None, image_hash=None, audio=None)

        # If the test crashed, or timed out, there's no point in running the reference at all.
        # This can save a lot of execution time if we have a lot of crashes or timeouts.
        if test_output.crash or test_output.timeout:
            test_result = self._compare_output(expected_text_output, test_output)

            if test_output.crash:
                test_result_writer.write_test_result(self._filesystem, self._port, self._results_directory,
                                                     self._test_name, test_output, expected_text_output, test_result.failures)
            return test_result

        # A reftest can have multiple match references and multiple mismatch references;
        # the test fails if any mismatch matches and all of the matches don't match.
        # To minimize the number of references we have to check, we run all of the mismatches first,
        # then the matches, and short-circuit out as soon as we can.
        # Note that sorting by the expectation sorts "!=" before "==" so this is easy to do.

        putAllMismatchBeforeMatch = sorted
        reference_test_names = []
        for expectation, reference_filename in putAllMismatchBeforeMatch(self._reference_files):
            if self._port.lookup_virtual_test_base(self._test_name):
                args = self._port.lookup_virtual_reference_args(self._test_name)
            else:
                args = self._port.lookup_physical_reference_args(self._test_name)
            reference_test_name = self._port.relative_test_filename(reference_filename)
            reference_test_names.append(reference_test_name)
            driver_input = DriverInput(reference_test_name, self._timeout,
                                       image_hash=test_output.image_hash, should_run_pixel_test=True, args=args)
            expected_output = self._reference_driver.run_test(driver_input, self._stop_when_done)
            total_test_time += expected_output.test_time
            test_result = self._compare_output_with_reference(
                expected_output, test_output, reference_filename, expectation == '!=')

            if (expectation == '!=' and test_result.failures) or (expectation == '==' and not test_result.failures):
                break

        assert(expected_output)

        if expected_text:
            text_output = DriverOutput(text=test_output.text, image=None, image_hash=None, audio=None)
            test_result.failures.extend(self._compare_output(expected_text_output, text_output).failures)
            expected_output.text = expected_text_output.text

        test_result_writer.write_test_result(self._filesystem, self._port, self._results_directory,
                                             self._test_name, test_output, expected_output, test_result.failures)

        # FIXME: We don't really deal with a mix of reftest types properly. We pass in a set() to reftest_type
        # and only really handle the first of the references in the result.
        reftest_type = list(set([reference_file[0] for reference_file in self._reference_files]))
        return TestResult(self._test_name, test_result.failures, total_test_time,
                          test_result.has_stderr, reftest_type=reftest_type, pid=test_result.pid,
                          references=reference_test_names)
コード例 #4
0
    def _run_compare_test(self):
        driver_output = self._driver.run_test(self._driver_input(),
                                              self._stop_when_done)
        expected_driver_output = self._expected_driver_output()

        if self._options.ignore_metrics:
            expected_driver_output.strip_metrics()
            driver_output.strip_metrics()

        patterns = self._port.logging_patterns_to_strip()
        expected_driver_output.strip_patterns(patterns)
        driver_output.strip_patterns(patterns)

        driver_output.strip_stderror_patterns(
            self._port.stderr_patterns_to_strip())

        test_result = self._compare_output(expected_driver_output,
                                           driver_output)
        if self._options.new_test_results:
            self._add_missing_baselines(test_result, driver_output)
        test_result_writer.write_test_result(self._filesystem, self._port,
                                             self._results_directory,
                                             self._test_name, driver_output,
                                             expected_driver_output,
                                             test_result.failures)
        return test_result
コード例 #5
0
    def _run_reftest(self):
        test_output = self._driver.run_test(self._driver_input(), self._stop_when_done)
        total_test_time = 0
        reference_output = None
        test_result = None

        # A reftest can have multiple match references and multiple mismatch references;
        # the test fails if any mismatch matches and all of the matches don't match.
        # To minimize the number of references we have to check, we run all of the mismatches first,
        # then the matches, and short-circuit out as soon as we can.
        # Note that sorting by the expectation sorts "!=" before "==" so this is easy to do.

        putAllMismatchBeforeMatch = sorted
        reference_test_names = []
        for expectation, reference_filename in putAllMismatchBeforeMatch(self._reference_files):
            reference_test_name = self._port.relative_test_filename(reference_filename)
            reference_test_names.append(reference_test_name)
            reference_output = self._driver.run_test(DriverInput(reference_test_name, self._timeout, None, should_run_pixel_test=True), self._stop_when_done)
            test_result = self._compare_output_with_reference(reference_output, test_output, reference_filename, expectation == '!=')

            if (expectation == '!=' and test_result.failures) or (expectation == '==' and not test_result.failures):
                break
            total_test_time += test_result.test_run_time

        assert(reference_output)
        test_result_writer.write_test_result(self._filesystem, self._port, self._results_directory, self._test_name, test_output, reference_output, test_result.failures)
        reftest_type = set([reference_file[0] for reference_file in self._reference_files])
        return TestResult(self._test_name, test_result.failures, total_test_time + test_result.test_run_time, test_result.has_stderr, reftest_type=reftest_type, pid=test_result.pid, references=reference_test_names)
コード例 #6
0
 def _run_rebaseline(self):
     driver_output = self._driver.run_test(self._driver_input(), self._stop_when_done)
     failures = self._handle_error(driver_output)
     test_result_writer.write_test_result(self._filesystem, self._port, self._results_directory, self._test_name, driver_output, None, failures)
     # FIXME: It the test crashed or timed out, it might be better to avoid
     # to write new baselines.
     self._overwrite_baselines(driver_output)
     return TestResult(self._test_name, failures, driver_output.test_time, driver_output.has_stderr(), pid=driver_output.pid)
コード例 #7
0
    def _run_compare_test(self):
        driver_output = self._driver.run_test(self._driver_input(), self._stop_when_done)
        expected_driver_output = self._expected_driver_output()

        test_result = self._compare_output(expected_driver_output, driver_output)
        test_result_writer.write_test_result(self._filesystem, self._port, self._results_directory,
                                             self._test_name, driver_output, expected_driver_output, test_result.failures)
        return test_result
コード例 #8
0
 def _run_rebaseline(self):
     driver_output = self._driver.run_test(self._driver_input())
     failures = self._handle_error(driver_output)
     test_result_writer.write_test_result(self._filesystem, self._port, self._test_name, driver_output, None, failures)
     # FIXME: It the test crashed or timed out, it might be better to avoid
     # to write new baselines.
     self._overwrite_baselines(driver_output)
     return TestResult(self._test_name, failures, driver_output.test_time, driver_output.has_stderr())
コード例 #9
0
    def _run_reftest(self):
        test_output = self._driver.run_test(self._driver_input(), self._stop_when_done)
        total_test_time = 0
        reference_output = None
        test_result = None

        # If the test crashed, or timed out, there's no point in running the reference at all.
        # This can save a lot of execution time if we have a lot of crashes or timeouts.
        if test_output.crash or test_output.timeout:
            expected_driver_output = DriverOutput(text=None, image=None, image_hash=None, audio=None)
            return self._compare_output(expected_driver_output, test_output)

        # A reftest can have multiple match references and multiple mismatch references;
        # the test fails if any mismatch matches and all of the matches don't match.
        # To minimize the number of references we have to check, we run all of the mismatches first,
        # then the matches, and short-circuit out as soon as we can.
        # Note that sorting by the expectation sorts "!=" before "==" so this is easy to do.

        putAllMismatchBeforeMatch = sorted
        reference_test_names = []
        for expectation, reference_filename in putAllMismatchBeforeMatch(self._reference_files):
            reference_test_name = self._port.relative_test_filename(reference_filename)
            reference_test_names.append(reference_test_name)
            reference_output = self._driver.run_test(
                DriverInput(reference_test_name, self._timeout, None, should_run_pixel_test=True), self._stop_when_done
            )
            test_result = self._compare_output_with_reference(
                reference_output, test_output, reference_filename, expectation == "!="
            )

            if (expectation == "!=" and test_result.failures) or (expectation == "==" and not test_result.failures):
                break
            total_test_time += test_result.test_run_time

        assert reference_output
        test_result_writer.write_test_result(
            self._filesystem,
            self._port,
            self._results_directory,
            self._test_name,
            test_output,
            reference_output,
            test_result.failures,
        )

        # FIXME: We don't really deal with a mix of reftest types properly. We pass in a set() to reftest_type
        # and only really handle the first of the references in the result.
        reftest_type = list(set([reference_file[0] for reference_file in self._reference_files]))
        return TestResult(
            self._test_name,
            test_result.failures,
            total_test_time + test_result.test_run_time,
            test_result.has_stderr,
            reftest_type=reftest_type,
            pid=test_result.pid,
            references=reference_test_names,
            device_offline=reference_output.device_offline,
        )
コード例 #10
0
 def run_test(self, failures=None, files=None):
     failures = failures or []
     host = MockSystemHost()
     host.filesystem.files = files or {}
     port = TestPort(host=host, port_name='test-mac-mac10.11', options=optparse.Values())
     actual_output = DriverOutput(text='', image=None, image_hash=None, audio=None)
     expected_output = DriverOutput(text='', image=None, image_hash=None, audio=None)
     write_test_result(host.filesystem, port, '/tmp', 'foo.html', actual_output, expected_output, failures)
     return host.filesystem.written_files
コード例 #11
0
 def run_test(self, failures=None, files=None):
     failures = failures or []
     host = MockSystemHost()
     host.filesystem.files = files or {}
     port = TestPort(host=host, port_name='test-mac-mac10.11', options=optparse.Values())
     actual_output = DriverOutput(text='', image=None, image_hash=None, audio=None)
     expected_output = DriverOutput(text='', image=None, image_hash=None, audio=None)
     write_test_result(host.filesystem, port, '/tmp', 'foo.html', actual_output, expected_output, failures)
     return host.filesystem.written_files
コード例 #12
0
ファイル: single_test_runner.py プロジェクト: esprehn/mojo
    def _run_compare_test(self):
        driver_output = self._driver.run_test(self._driver_input(), self._stop_when_done)
        expected_driver_output = self._expected_driver_output()

        test_result = self._compare_output(expected_driver_output, driver_output)
        if self._should_add_missing_baselines:
            self._add_missing_baselines(test_result, driver_output)
        test_result_writer.write_test_result(self._filesystem, self._port, self._results_directory, self._test_name, driver_output, expected_driver_output, test_result.failures)
        return test_result
コード例 #13
0
 def _run_sanitized_test(self):
     # running a sanitized test means that we ignore the actual test output and just look
     # for timeouts and crashes (real or forced by the driver). Most crashes should
     # indicate problems found by a sanitizer (ASAN, LSAN, etc.), but we will report
     # on other crashes and timeouts as well in order to detect at least *some* basic failures.
     driver_output = self._driver.run_test(self._driver_input(), self._stop_when_done)
     expected_driver_output = self._expected_driver_output()
     failures = self._handle_error(driver_output)
     test_result = TestResult(self._test_name, failures, driver_output.test_time, driver_output.has_stderr(),
                              pid=driver_output.pid)
     test_result_writer.write_test_result(self._filesystem, self._port, self._results_directory, self._test_name, driver_output, expected_driver_output, test_result.failures)
     return test_result
コード例 #14
0
 def _run_compare_test(self):
     driver_output = self._driver.run_test(self._driver_input())
     expected_driver_output = self._expected_driver_output()
     test_result = self._compare_output(driver_output,
                                        expected_driver_output)
     if self._options.new_test_results:
         self._add_missing_baselines(test_result, driver_output)
     test_result_writer.write_test_result(self._port, self._test_name,
                                          driver_output,
                                          expected_driver_output,
                                          test_result.failures)
     return test_result
コード例 #15
0
 def _run_sanitized_test(self):
     # running a sanitized test means that we ignore the actual test output and just look
     # for timeouts and crashes (real or forced by the driver). Most crashes should
     # indicate problems found by a sanitizer (ASAN, LSAN, etc.), but we will report
     # on other crashes and timeouts as well in order to detect at least *some* basic failures.
     driver_output = self._driver.run_test(self._driver_input(), self._stop_when_done)
     expected_driver_output = self._expected_driver_output()
     failures = self._handle_error(driver_output)
     test_result = TestResult(self._test_name, failures, driver_output.test_time, driver_output.has_stderr(),
                              pid=driver_output.pid)
     test_result_writer.write_test_result(self._filesystem, self._port, self._results_directory, self._test_name, driver_output, expected_driver_output, test_result.failures)
     return test_result
コード例 #16
0
    def _run_compare_test(self):
        driver_output = self._driver.run_test(self._driver_input())
        expected_driver_output = self._expected_driver_output()

        if self._options.ignore_metrics:
            expected_driver_output.strip_metrics()
            driver_output.strip_metrics()

        test_result = self._compare_output(driver_output, expected_driver_output)
        if self._options.new_test_results:
            self._add_missing_baselines(test_result, driver_output)
        test_result_writer.write_test_result(self._filesystem, self._port, self._test_name, driver_output, expected_driver_output, test_result.failures)
        return test_result
コード例 #17
0
 def _run_rebaseline(self):
     driver_output = self._driver.run_test(self._driver_input(), self._stop_when_done)
     if self._options.reset_results:
         expected_driver_output = None
         failures = self._handle_error(driver_output)
     else:
         expected_driver_output = self._expected_driver_output()
         failures = self._compare_output(expected_driver_output, driver_output).failures
     test_result_writer.write_test_result(self._filesystem, self._port, self._results_directory,
                                          self._test_name, driver_output, expected_driver_output, failures)
     # FIXME: It the test crashed or timed out, it might be better to avoid
     # to write new baselines.
     self._update_or_add_new_baselines(driver_output)
     return TestResult(self._test_name, failures, driver_output.test_time, driver_output.has_stderr(),
                       pid=driver_output.pid, crash_site=driver_output.crash_site)
コード例 #18
0
    def _run_compare_test(self):
        driver_output = self._driver.run_test(self._driver_input(), self._stop_when_done)
        expected_driver_output = self._expected_driver_output()

        if self._options.ignore_metrics:
            expected_driver_output.strip_metrics()
            driver_output.strip_metrics()

        patterns = self._port.logging_patterns_to_strip()
        expected_driver_output.strip_patterns(patterns)
        driver_output.strip_patterns(patterns)

        test_result = self._compare_output(expected_driver_output, driver_output)
        if self._options.new_test_results:
            self._add_missing_baselines(test_result, driver_output)
        test_result_writer.write_test_result(self._filesystem, self._port, self._results_directory, self._test_name, driver_output, expected_driver_output, test_result.failures)
        return test_result
コード例 #19
0
    def test_reftest_diff_image(self):
        """A write_test_result should call port.diff_image with tolerance=0 in case of FailureReftestMismatch."""
        used_tolerance_values = []

        class ImageDiffTestPort(TestPort):
            def diff_image(self, expected_contents, actual_contents, tolerance=None):
                used_tolerance_values.append(tolerance)
                return (True, 1, None)

        host = MockHost()
        port = ImageDiffTestPort(host)
        test_name = 'failures/unexpected/reftest.html'
        test_reference_file = host.filesystem.join(port.layout_tests_dir(), 'failures/unexpected/reftest-expected.html')
        driver_output1 = DriverOutput('text1', 'image1', 'imagehash1', 'audio1')
        driver_output2 = DriverOutput('text2', 'image2', 'imagehash2', 'audio2')
        failures = [test_failures.FailureReftestMismatch(test_reference_file)]
        test_result_writer.write_test_result(host.filesystem, ImageDiffTestPort(host), port.results_directory(), test_name,
                                             driver_output1, driver_output2, failures)
        self.assertEqual([0], used_tolerance_values)
コード例 #20
0
 def _run_rebaseline(self):
     """Similar to _run_compare_test(), but has the side effect of updating or adding baselines.
     This is called when --reset-results and/or --copy-baselines are specified in the command line.
     If --reset-results, in the returned result we treat baseline mismatch as success."""
     driver_output = self._driver.run_test(self._driver_input(),
                                           self._stop_when_done)
     expected_driver_output = self._expected_driver_output()
     actual_failures = self._compare_output(expected_driver_output,
                                            driver_output).failures
     failures = self._handle_error(
         driver_output) if self._options.reset_results else actual_failures
     test_result_writer.write_test_result(self._filesystem, self._port,
                                          self._results_directory,
                                          self._test_name, driver_output,
                                          expected_driver_output, failures)
     self._update_or_add_new_baselines(driver_output, actual_failures)
     return TestResult(self._test_name,
                       failures,
                       driver_output.test_time,
                       driver_output.has_stderr(),
                       pid=driver_output.pid,
                       crash_site=driver_output.crash_site)
コード例 #21
0
    def _run_reftest(self):
        test_output = self._driver.run_test(self._driver_input())
        total_test_time = 0
        reference_output = None
        test_result = None

        # A reftest can have multiple match references and multiple mismatch references;
        # the test fails if any mismatch matches and all of the matches don't match.
        # To minimize the number of references we have to check, we run all of the mismatches first,
        # then the matches, and short-circuit out as soon as we can.
        # Note that sorting by the expectation sorts "!=" before "==" so this is easy to do.

        putAllMismatchBeforeMatch = sorted
        for expectation, reference_filename in putAllMismatchBeforeMatch(
                self._reference_files):
            reference_test_name = self._port.relative_test_filename(
                reference_filename)
            reference_output = self._driver.run_test(
                DriverInput(reference_test_name,
                            self._timeout,
                            test_output.image_hash,
                            is_reftest=True))
            test_result = self._compare_output_with_reference(
                test_output, reference_output, reference_filename,
                expectation == '!=')

            if (expectation == '!=' and test_result.failures) or (
                    expectation == '==' and not test_result.failures):
                break
            total_test_time += test_result.test_run_time

        assert (reference_output)
        test_result_writer.write_test_result(self._port, self._test_name,
                                             test_output, reference_output,
                                             test_result.failures)
        return TestResult(self._test_name, test_result.failures,
                          total_test_time + test_result.test_run_time,
                          test_result.has_stderr)