Ejemplo n.º 1
0
    def _run_compare_test(self):
        """Runs the signle test and returns test result."""
        driver_output = self._driver.run_test(self._driver_input())
        expected_driver_output = self._expected_driver_output()

        test_result = self._compare_output(expected_driver_output, driver_output)
        test_result_writer.write_test_result(self._filesystem, self._port, self._results_directory,
                                             self._test_name, driver_output, expected_driver_output, test_result.failures)
        return test_result
Ejemplo n.º 2
0
 def _run_rebaseline(self):
     """Similar to _run_compare_test(), but has the side effect of updating or adding baselines.
     This is called when --reset-results and/or --copy-baselines are specified in the command line.
     If --reset-results, in the returned result we treat baseline mismatch as success."""
     driver_output = self._driver.run_test(self._driver_input())
     expected_driver_output = self._expected_driver_output()
     actual_failures = self._compare_output(expected_driver_output, driver_output).failures
     failures = self._handle_error(driver_output) if self._options.reset_results else actual_failures
     test_result_writer.write_test_result(self._filesystem, self._port, self._results_directory,
                                          self._test_name, driver_output, expected_driver_output, failures)
     self._update_or_add_new_baselines(driver_output, actual_failures)
     return TestResult(self._test_name, failures, driver_output.test_time, driver_output.has_stderr(),
                       pid=driver_output.pid, crash_site=driver_output.crash_site)
Ejemplo n.º 3
0
 def _run_sanitized_test(self):
     # running a sanitized test means that we ignore the actual test output and just look
     # for timeouts and crashes (real or forced by the driver). Most crashes should
     # indicate problems found by a sanitizer (ASAN, LSAN, etc.), but we will report
     # on other crashes and timeouts as well in order to detect at least *some* basic failures.
     driver_output = self._driver.run_test(self._driver_input())
     expected_driver_output = self._expected_driver_output()
     failures = self._handle_error(driver_output)
     test_result = TestResult(self._test_name, failures, driver_output.test_time, driver_output.has_stderr(),
                              pid=driver_output.pid, crash_site=driver_output.crash_site)
     test_result_writer.write_test_result(self._filesystem, self._port, self._results_directory,
                                          self._test_name, driver_output, expected_driver_output, test_result.failures)
     return test_result
Ejemplo n.º 4
0
 def run_test(self, failures=None, files=None, filename='foo.html'):
     failures = failures or []
     host = MockSystemHost()
     host.filesystem.files = files or {}
     port = TestPort(host=host,
                     port_name='test-mac-mac10.11',
                     options=optparse.Values())
     actual_output = DriverOutput(text='',
                                  image=None,
                                  image_hash=None,
                                  audio=None)
     expected_output = DriverOutput(text='',
                                    image=None,
                                    image_hash=None,
                                    audio=None)
     write_test_result(host.filesystem, port, '/tmp', filename,
                       actual_output, expected_output, failures)
     return host.filesystem
Ejemplo n.º 5
0
    def _run_reftest(self):
        test_output = self._driver.run_test(self._driver_input(),
                                            self._stop_when_done)
        total_test_time = test_output.test_time
        expected_output = None
        test_result = None

        expected_text = self._port.expected_text(self._test_name)
        expected_text_output = DriverOutput(text=expected_text,
                                            image=None,
                                            image_hash=None,
                                            audio=None)

        # If the test crashed, or timed out, there's no point in running the reference at all.
        # This can save a lot of execution time if we have a lot of crashes or timeouts.
        if test_output.crash or test_output.timeout:
            test_result = self._compare_output(expected_text_output,
                                               test_output)

            if test_output.crash:
                test_result_writer.write_test_result(
                    self._filesystem, self._port, self._results_directory,
                    self._test_name, test_output, expected_text_output,
                    test_result.failures)
            return test_result

        # A reftest can have multiple match references and multiple mismatch references;
        # the test fails if any mismatch matches and all of the matches don't match.
        # To minimize the number of references we have to check, we run all of the mismatches first,
        # then the matches, and short-circuit out as soon as we can.
        # Note that sorting by the expectation sorts "!=" before "==" so this is easy to do.

        putAllMismatchBeforeMatch = sorted
        reference_test_names = []
        for expectation, reference_filename in putAllMismatchBeforeMatch(
                self._reference_files):
            if self._port.lookup_virtual_test_base(self._test_name):
                args = self._port.lookup_virtual_reference_args(
                    self._test_name)
            else:
                args = self._port.lookup_physical_reference_args(
                    self._test_name)
            reference_test_name = self._port.relative_test_filename(
                reference_filename)
            reference_test_names.append(reference_test_name)
            driver_input = DriverInput(reference_test_name,
                                       self._timeout_ms,
                                       image_hash=test_output.image_hash,
                                       should_run_pixel_test=True,
                                       args=args)
            expected_output = self._reference_driver.run_test(
                driver_input, self._stop_when_done)
            total_test_time += expected_output.test_time
            test_result = self._compare_output_with_reference(
                expected_output, test_output, reference_filename,
                expectation == '!=')

            if (expectation == '!=' and test_result.failures) or (
                    expectation == '==' and not test_result.failures):
                break

        assert expected_output

        if expected_text:
            text_output = DriverOutput(text=test_output.text,
                                       image=None,
                                       image_hash=None,
                                       audio=None)
            text_compare_result = self._compare_output(expected_text_output,
                                                       text_output)
            test_result.failures.extend(text_compare_result.failures)
            test_result.has_repaint_overlay = text_compare_result.has_repaint_overlay
            expected_output.text = expected_text_output.text

        test_result_writer.write_test_result(self._filesystem, self._port,
                                             self._results_directory,
                                             self._test_name, test_output,
                                             expected_output,
                                             test_result.failures)

        # FIXME: We don't really deal with a mix of reftest types properly. We pass in a set() to reftest_type
        # and only really handle the first of the references in the result.
        reftest_type = list(
            set([
                reference_file[0] for reference_file in self._reference_files
            ]))
        return TestResult(self._test_name,
                          test_result.failures,
                          total_test_time,
                          test_result.has_stderr,
                          reftest_type=reftest_type,
                          pid=test_result.pid,
                          crash_site=test_result.crash_site,
                          references=reference_test_names,
                          has_repaint_overlay=test_result.has_repaint_overlay)