def _compare_output_with_reference(self, driver_output1, driver_output2,
                                       reference_filename, mismatch):
        total_test_time = driver_output1.test_time + driver_output2.test_time
        has_stderr = driver_output1.has_stderr() or driver_output2.has_stderr()
        failures = []
        failures.extend(self._handle_error(driver_output1))
        if failures:
            # Don't continue any more if we already have crash or timeout.
            return TestResult(self._test_name, failures, total_test_time,
                              has_stderr)
        failures.extend(
            self._handle_error(driver_output2,
                               reference_filename=reference_filename))
        if failures:
            return TestResult(self._test_name, failures, total_test_time,
                              has_stderr)

        if not driver_output1.image_hash and not driver_output2.image_hash:
            failures.append(
                test_failures.FailureReftestNoImagesGenerated(
                    reference_filename))
        elif mismatch:
            if driver_output1.image_hash == driver_output2.image_hash:
                failures.append(
                    test_failures.FailureReftestMismatchDidNotOccur(
                        reference_filename))
        elif driver_output1.image_hash != driver_output2.image_hash:
            failures.append(
                test_failures.FailureReftestMismatch(reference_filename))
        return TestResult(self._test_name, failures, total_test_time,
                          has_stderr)
Exemple #2
0
    def _compare_output_with_reference(self, reference_driver_output, actual_driver_output, reference_filename, mismatch):
        total_test_time = reference_driver_output.test_time + actual_driver_output.test_time
        has_stderr = reference_driver_output.has_stderr() or actual_driver_output.has_stderr()
        failures = []
        failures.extend(self._handle_error(actual_driver_output))
        if failures:
            # Don't continue any more if we already have crash or timeout.
            return TestResult(self._test_name, failures, total_test_time, has_stderr)
        failures.extend(self._handle_error(reference_driver_output, reference_filename=reference_filename))
        if failures:
            return TestResult(self._test_name, failures, total_test_time, has_stderr, pid=actual_driver_output.pid)

        if not reference_driver_output.image_hash and not actual_driver_output.image_hash:
            failures.append(test_failures.FailureReftestNoImagesGenerated(reference_filename))
        elif mismatch:
            # Calling image_hash is considered unnecessary for expected mismatch ref tests.
            if reference_driver_output.image_hash == actual_driver_output.image_hash:
                failures.append(test_failures.FailureReftestMismatchDidNotOccur(reference_filename))
        elif reference_driver_output.image_hash != actual_driver_output.image_hash:
            # ImageDiff has a hard coded color distance threshold even though tolerance=0 is specified.
            diff_result = self._port.diff_image(reference_driver_output.image, actual_driver_output.image, tolerance=0)
            error_string = diff_result[2]
            if error_string:
                _log.warning('  %s : %s' % (self._test_name, error_string))
                failures.append(test_failures.FailureReftestMismatch(reference_filename))
                actual_driver_output.error = (actual_driver_output.error or '') + error_string
            elif diff_result[0]:
                failures.append(test_failures.FailureReftestMismatch(reference_filename))

        return TestResult(self._test_name, failures, total_test_time, has_stderr, pid=actual_driver_output.pid)
Exemple #3
0
    def _compare_output(self, expected_driver_output, driver_output):
        failures = []
        failures.extend(self._handle_error(driver_output))

        if driver_output.crash:
            # Don't continue any more if we already have a crash.
            # In case of timeouts, we continue since we still want to see the text and image output.
            return TestResult(self._test_name,
                              failures,
                              driver_output.test_time,
                              driver_output.has_stderr(),
                              pid=driver_output.pid)

        failures.extend(
            self._compare_text(expected_driver_output.text,
                               driver_output.text))
        failures.extend(
            self._compare_audio(expected_driver_output.audio,
                                driver_output.audio))
        if self._should_run_pixel_test:
            failures.extend(
                self._compare_image(expected_driver_output, driver_output))
        return TestResult(self._test_name,
                          failures,
                          driver_output.test_time,
                          driver_output.has_stderr(),
                          pid=driver_output.pid)
    def test_update_summary_with_result(self):
        # Reftests expected to be image mismatch should be respected when pixel_tests=False.
        runner = self._runner()
        runner._options.pixel_tests = False
        runner._options.world_leaks = False
        test = 'failures/expected/reftest.html'
        leak_test = 'failures/expected/leak.html'
        expectations = TestExpectations(runner._port, tests=[test, leak_test])
        expectations.parse_all_expectations()
        runner._expectations = expectations

        runner._current_run_results = TestRunResults(expectations, 1)
        result = TestResult(test, failures=[test_failures.FailureReftestMismatchDidNotOccur()], reftest_type=['!='])
        runner.update_summary_with_result(result)
        self.assertEqual(1, runner._current_run_results.expected)
        self.assertEqual(0, runner._current_run_results.unexpected)

        runner._current_run_results = TestRunResults(expectations, 1)
        result = TestResult(test, failures=[], reftest_type=['=='])
        runner.update_summary_with_result(result)
        self.assertEqual(0, runner._current_run_results.expected)
        self.assertEqual(1, runner._current_run_results.unexpected)

        runner._current_run_results = TestRunResults(expectations, 1)
        result = TestResult(leak_test, failures=[])
        runner.update_summary_with_result(result)
        self.assertEqual(1, runner._current_run_results.expected)
        self.assertEqual(0, runner._current_run_results.unexpected)
    def _compare_output_with_reference(self, reference_driver_output, actual_driver_output, reference_filename, mismatch):
        total_test_time = reference_driver_output.test_time + actual_driver_output.test_time
        has_stderr = reference_driver_output.has_stderr() or actual_driver_output.has_stderr()
        failures = []
        failures.extend(self._handle_error(actual_driver_output))
        if failures:
            # Don't continue any more if we already have crash or timeout.
            return TestResult(self._test_name, failures, total_test_time, has_stderr)
        failures.extend(self._handle_error(reference_driver_output, reference_filename=reference_filename))
        if failures:
            return TestResult(self._test_name, failures, total_test_time, has_stderr, pid=actual_driver_output.pid)

        if not reference_driver_output.image_hash and not actual_driver_output.image_hash:
            failures.append(test_failures.FailureReftestNoImagesGenerated(reference_filename))
        elif mismatch:
            if reference_driver_output.image_hash == actual_driver_output.image_hash:
                diff, err_str = self._port.diff_image(reference_driver_output.image, actual_driver_output.image)
                if not diff:
                    failures.append(test_failures.FailureReftestMismatchDidNotOccur(reference_filename))
                elif err_str:
                    _log.error(err_str)
                else:
                    _log.warning("  %s -> ref test hashes matched but diff failed" % self._test_name)

        elif reference_driver_output.image_hash != actual_driver_output.image_hash:
            diff, err_str = self._port.diff_image(reference_driver_output.image, actual_driver_output.image)
            if diff:
                failures.append(test_failures.FailureReftestMismatch(reference_filename))
            elif err_str:
                _log.error(err_str)
            else:
                _log.warning("  %s -> ref test hashes didn't match but diff passed" % self._test_name)

        return TestResult(self._test_name, failures, total_test_time, has_stderr, pid=actual_driver_output.pid)
Exemple #6
0
    def test_loads(self):
        result = TestResult(test_name='foo', failures=[], test_run_time=1.1)
        s = result.dumps()
        new_result = TestResult.loads(s)
        self.assertIsInstance(new_result, TestResult)

        self.assertEqual(new_result, result)

        # Also check that != is implemented.
        self.assertFalse(new_result != result)
Exemple #7
0
 def run(self):
     if self._reference_files:
         if self._port.get_option("no_ref_tests") or self._options.reset_results:
             result = TestResult(self._test_name)
             result.type = test_expectations.SKIP
             return result
         return self._run_reftest()
     if self._options.reset_results:
         return self._run_rebaseline()
     return self._run_compare_test()
Exemple #8
0
 def run(self):
     if self._reference_files:
         if self._port.get_option('no_ref_tests') or self._options.reset_results:
             reftest_type = set([reference_file[0] for reference_file in self._reference_files])
             result = TestResult(self._test_name, reftest_type=reftest_type)
             result.type = test_expectations.SKIP
             return result
         return self._run_reftest()
     if self._options.reset_results:
         return self._run_rebaseline()
     return self._run_compare_test()
Exemple #9
0
 def run(self):
     if self._reference_files:
         if self._options.reset_results:
             reftest_type = set([reference_file[0] for reference_file in self._reference_files])
             result = TestResult(self._test_name, reftest_type=reftest_type)
             result.type = test_expectations.SKIP
             return result
         return self._run_reftest()
     if self._options.reset_results:
         return self._run_rebaseline()
     return self._run_compare_test()
    def test_loads(self):
        result = TestResult(test_name='foo',
                            failures=[],
                            test_run_time=1.1)
        s = result.dumps()
        new_result = TestResult.loads(s)
        self.assertIsInstance(new_result, TestResult)

        self.assertEqual(new_result, result)

        # Also check that != is implemented.
        self.assertFalse(new_result != result)
Exemple #11
0
    def _compare_output(self, expected_driver_output, driver_output):
        failures = []
        failures.extend(self._handle_error(driver_output))

        if driver_output.crash:
            # Don't continue any more if we already have a crash.
            # In case of timeouts, we continue since we still want to see the text and image output.
            return TestResult(self._test_name,
                              failures,
                              driver_output.test_time,
                              driver_output.has_stderr(),
                              pid=driver_output.pid,
                              crash_site=driver_output.crash_site)

        is_testharness_test, testharness_failures = self._compare_testharness_test(
            driver_output, expected_driver_output)
        if is_testharness_test:
            failures.extend(testharness_failures)

        compare_functions = []
        compare_image_fn = (self._compare_image, (expected_driver_output,
                                                  driver_output))
        compare_txt_fn = (self._compare_text, (expected_driver_output.text,
                                               driver_output.text))
        compare_audio_fn = (self._compare_audio, (expected_driver_output.audio,
                                                  driver_output.audio))

        if self._should_run_pixel_test_first:
            if driver_output.image_hash and self._should_run_pixel_test:
                compare_functions.append(compare_image_fn)
            elif not is_testharness_test:
                compare_functions.append(compare_txt_fn)
        else:
            if not is_testharness_test:
                compare_functions.append(compare_txt_fn)
            if self._should_run_pixel_test:
                compare_functions.append(compare_image_fn)
        compare_functions.append(compare_audio_fn)

        for func, args in compare_functions:
            failures.extend(func(*args))

        has_repaint_overlay = (repaint_overlay.result_contains_repaint_rects(
            expected_driver_output.text) or
                               repaint_overlay.result_contains_repaint_rects(
                                   driver_output.text))
        return TestResult(self._test_name,
                          failures,
                          driver_output.test_time,
                          driver_output.has_stderr(),
                          pid=driver_output.pid,
                          has_repaint_overlay=has_repaint_overlay)
Exemple #12
0
 def run(self):
     if self._options.enable_sanitizer:
         return self._run_sanitized_test()
     if self._reference_files:
         if self._options.reset_results:
             reftest_type = set([reference_file[0] for reference_file in self._reference_files])
             result = TestResult(self._test_name, reftest_type=reftest_type)
             result.type = test_expectations.SKIP
             return result
         return self._run_reftest()
     if self._options.reset_results:
         return self._run_rebaseline()
     return self._run_compare_test()
Exemple #13
0
 def run(self):
     if self._options.new_baseline or self._options.reset_results:
         if self._is_reftest:
             # Returns a dummy TestResult. We don't have to rebase for reftests.
             return TestResult(self._test_name)
         else:
             return self._run_rebaseline()
     if self._is_reftest:
         if self._port.get_option('pixel_tests'):
             return self._run_reftest()
         result = TestResult(self._test_name)
         result.type = test_expectations.SKIP
         return result
     return self._run_compare_test()
 def run(self):
     if self._options.new_baseline or self._options.reset_results:
         if self._is_reftest:
             # Returns a dummy TestResult. We don't have to rebase for reftests.
             return TestResult(self._test_name)
         else:
             return self._run_rebaseline()
     if self._is_reftest:
         if self._port.get_option('pixel_tests'):
             return self._run_reftest()
         result = TestResult(self._test_name)
         result.type = test_expectations.SKIP
         return result
     return self._run_compare_test()
Exemple #15
0
def run_single_test(port, options, results_directory, worker_name, driver, test_input, stop_when_done):
    runner = SingleTestRunner(port, options, results_directory, worker_name, driver, test_input, stop_when_done)
    try:
        return runner.run()
    except DeviceFailure as e:
        _log.error("device failed: %s", str(e))
        return TestResult(test_input.test_name, device_failed=True)
Exemple #16
0
    def _run_reftest(self):
        test_output = self._driver.run_test(self._driver_input(), self._stop_when_done)
        total_test_time = test_output.test_time
        expected_output = None
        test_result = None

        expected_text = self._port.expected_text(self._test_name)
        expected_text_output = DriverOutput(text=expected_text, image=None, image_hash=None, audio=None)

        # If the test crashed, or timed out, there's no point in running the reference at all.
        # This can save a lot of execution time if we have a lot of crashes or timeouts.
        if test_output.crash or test_output.timeout:
            test_result = self._compare_output(expected_text_output, test_output)

            if test_output.crash:
                test_result_writer.write_test_result(self._filesystem, self._port, self._results_directory,
                                                     self._test_name, test_output, expected_text_output, test_result.failures)
            return test_result

        # A reftest can have multiple match references and multiple mismatch references;
        # the test fails if any mismatch matches and all of the matches don't match.
        # To minimize the number of references we have to check, we run all of the mismatches first,
        # then the matches, and short-circuit out as soon as we can.
        # Note that sorting by the expectation sorts "!=" before "==" so this is easy to do.

        putAllMismatchBeforeMatch = sorted
        reference_test_names = []
        for expectation, reference_filename in putAllMismatchBeforeMatch(self._reference_files):
            if self._port.lookup_virtual_test_base(self._test_name):
                args = self._port.lookup_virtual_reference_args(self._test_name)
            else:
                args = self._port.lookup_physical_reference_args(self._test_name)
            reference_test_name = self._port.relative_test_filename(reference_filename)
            reference_test_names.append(reference_test_name)
            driver_input = DriverInput(reference_test_name, self._timeout,
                                       image_hash=test_output.image_hash, should_run_pixel_test=True, args=args)
            expected_output = self._reference_driver.run_test(driver_input, self._stop_when_done)
            total_test_time += expected_output.test_time
            test_result = self._compare_output_with_reference(
                expected_output, test_output, reference_filename, expectation == '!=')

            if (expectation == '!=' and test_result.failures) or (expectation == '==' and not test_result.failures):
                break

        assert(expected_output)

        if expected_text:
            text_output = DriverOutput(text=test_output.text, image=None, image_hash=None, audio=None)
            test_result.failures.extend(self._compare_output(expected_text_output, text_output).failures)
            expected_output.text = expected_text_output.text

        test_result_writer.write_test_result(self._filesystem, self._port, self._results_directory,
                                             self._test_name, test_output, expected_output, test_result.failures)

        # FIXME: We don't really deal with a mix of reftest types properly. We pass in a set() to reftest_type
        # and only really handle the first of the references in the result.
        reftest_type = list(set([reference_file[0] for reference_file in self._reference_files]))
        return TestResult(self._test_name, test_result.failures, total_test_time,
                          test_result.has_stderr, reftest_type=reftest_type, pid=test_result.pid,
                          references=reference_test_names)
 def run(self):
     if self._options.enable_sanitizer:
         return self._run_sanitized_test()
     if self._options.reset_results:
         if self._reference_files:
             expected_txt_filename = self._port.expected_filename(self._test_name, '.txt')
             if not self._filesystem.exists(expected_txt_filename):
                 reftest_type = set([reference_file[0] for reference_file in self._reference_files])
                 result = TestResult(self._test_name, reftest_type=reftest_type)
                 result.type = test_expectations.SKIP
                 return result
             self._should_run_pixel_test = False
         return self._run_rebaseline()
     if self._reference_files:
         return self._run_reftest()
     return self._run_compare_test()
Exemple #18
0
 def run(self):
     if self._options.enable_sanitizer:
         return self._run_sanitized_test()
     if self._options.reset_results:
         if self._reference_files:
             expected_txt_filename = self._port.expected_filename(self._test_name, ".txt")
             if not self._filesystem.exists(expected_txt_filename):
                 reftest_type = set([reference_file[0] for reference_file in self._reference_files])
                 result = TestResult(self._test_name, reftest_type=reftest_type)
                 result.type = test_expectations.SKIP
                 return result
             self._should_run_pixel_test = False
         return self._run_rebaseline()
     if self._reference_files:
         return self._run_reftest()
     return self._run_compare_test()
Exemple #19
0
    def _run_reftest(self):
        test_output = self._driver.run_test(self._driver_input(), self._stop_when_done)
        total_test_time = 0
        reference_output = None
        test_result = None

        # A reftest can have multiple match references and multiple mismatch references;
        # the test fails if any mismatch matches and all of the matches don't match.
        # To minimize the number of references we have to check, we run all of the mismatches first,
        # then the matches, and short-circuit out as soon as we can.
        # Note that sorting by the expectation sorts "!=" before "==" so this is easy to do.

        putAllMismatchBeforeMatch = sorted
        reference_test_names = []
        for expectation, reference_filename in putAllMismatchBeforeMatch(self._reference_files):
            reference_test_name = self._port.relative_test_filename(reference_filename)
            reference_test_names.append(reference_test_name)
            reference_output = self._driver.run_test(DriverInput(reference_test_name, self._timeout, None, should_run_pixel_test=True), self._stop_when_done)
            test_result = self._compare_output_with_reference(reference_output, test_output, reference_filename, expectation == '!=')

            if (expectation == '!=' and test_result.failures) or (expectation == '==' and not test_result.failures):
                break
            total_test_time += test_result.test_run_time

        assert(reference_output)
        test_result_writer.write_test_result(self._filesystem, self._port, self._results_directory, self._test_name, test_output, reference_output, test_result.failures)
        reftest_type = set([reference_file[0] for reference_file in self._reference_files])
        return TestResult(self._test_name, test_result.failures, total_test_time + test_result.test_run_time, test_result.has_stderr, reftest_type=reftest_type, pid=test_result.pid, references=reference_test_names)
Exemple #20
0
 def test_update_summary_with_result(self):
     host = MockHost()
     port = host.port_factory.get('test-win-xp')
     test = 'failures/expected/reftest.html'
     expectations = TestExpectations(
         port,
         tests=[test],
         expectations='WONTFIX : failures/expected/reftest.html = IMAGE',
         test_config=port.test_configuration())
     # Reftests expected to be image mismatch should be respected when pixel_tests=False.
     manager = Manager(port=port,
                       options=MockOptions(
                           pixel_tests=False,
                           exit_after_n_failures=None,
                           exit_after_n_crashes_or_timeouts=None),
                       printer=Mock())
     manager._expectations = expectations
     result_summary = ResultSummary(expectations=expectations,
                                    test_files=[test])
     result = TestResult(
         test_name=test,
         failures=[test_failures.FailureReftestMismatchDidNotOccur()])
     manager._update_summary_with_result(result_summary, result)
     self.assertEquals(1, result_summary.expected)
     self.assertEquals(0, result_summary.unexpected)
Exemple #21
0
 def _run_rebaseline(self):
     driver_output = self._driver.run_test(self._driver_input(), self._stop_when_done)
     failures = self._handle_error(driver_output)
     test_result_writer.write_test_result(self._filesystem, self._port, self._results_directory, self._test_name, driver_output, None, failures)
     # FIXME: It the test crashed or timed out, it might be better to avoid
     # to write new baselines.
     self._overwrite_baselines(driver_output)
     return TestResult(self._test_name, failures, driver_output.test_time, driver_output.has_stderr(), pid=driver_output.pid)
Exemple #22
0
 def _run_sanitized_test(self):
     # running a sanitized test means that we ignore the actual test output and just look
     # for timeouts and crashes (real or forced by the driver). Most crashes should
     # indicate problems found by a sanitizer (ASAN, LSAN, etc.), but we will report
     # on other crashes and timeouts as well in order to detect at least *some* basic failures.
     driver_output = self._driver.run_test(self._driver_input(), self._stop_when_done)
     failures = self._handle_error(driver_output)
     return TestResult(self._test_name, failures, driver_output.test_time, driver_output.has_stderr(),
                       pid=driver_output.pid)
Exemple #23
0
    def test_pickle_roundtrip(self):
        result = TestResult('foo', failures=[], test_run_time=1.1)
        s = pickle.dumps(result)  # multiprocessing uses the default protocol version
        new_result = pickle.loads(s)
        self.assertIsInstance(new_result, TestResult)

        self.assertEqual(new_result, result)

        # Also check that != is implemented.
        self.assertFalse(new_result != result)
Exemple #24
0
    def _compare_output(self, expected_driver_output, driver_output):
        failures = []
        failures.extend(self._handle_error(driver_output))

        if driver_output.crash:
            # Don't continue any more if we already have a crash.
            # In case of timeouts, we continue since we still want to see the text and image output.
            return TestResult(self._test_name, failures, driver_output.test_time, driver_output.has_stderr(),
                              pid=driver_output.pid)

        is_testharness_test, testharness_failures = self._compare_testharness_test(driver_output, expected_driver_output)
        if is_testharness_test:
            failures.extend(testharness_failures)
        else:
            failures.extend(self._compare_text(expected_driver_output.text, driver_output.text))
            failures.extend(self._compare_audio(expected_driver_output.audio, driver_output.audio))
            if self._should_run_pixel_test:
                failures.extend(self._compare_image(expected_driver_output, driver_output))
        has_repaint_overlay = (repaint_overlay.result_contains_repaint_rects(expected_driver_output.text) or
                               repaint_overlay.result_contains_repaint_rects(driver_output.text))
        return TestResult(self._test_name, failures, driver_output.test_time, driver_output.has_stderr(),
                          pid=driver_output.pid, has_repaint_overlay=has_repaint_overlay)
    def test_update_summary_with_result(self):
        # Reftests expected to be image mismatch should be respected when pixel_tests=False.
        runner = self._runner()
        runner._options.pixel_tests = False
        test = 'failures/expected/reftest.html'
        expectations = TestExpectations(runner._port, tests=[test])
        runner._expectations = expectations

        result_summary = ResultSummary(expectations, [test], 1, set())
        result = TestResult(
            test_name=test,
            failures=[test_failures.FailureReftestMismatchDidNotOccur()],
            reftest_type=['!='])
        runner._update_summary_with_result(result_summary, result)
        self.assertEqual(1, result_summary.expected)
        self.assertEqual(0, result_summary.unexpected)

        result_summary = ResultSummary(expectations, [test], 1, set())
        result = TestResult(test_name=test, failures=[], reftest_type=['=='])
        runner._update_summary_with_result(result_summary, result)
        self.assertEqual(0, result_summary.expected)
        self.assertEqual(1, result_summary.unexpected)
 def _run_rebaseline(self):
     driver_output = self._driver.run_test(self._driver_input(), self._stop_when_done)
     if self._options.reset_results:
         expected_driver_output = None
         failures = self._handle_error(driver_output)
     else:
         expected_driver_output = self._expected_driver_output()
         failures = self._compare_output(expected_driver_output, driver_output).failures
     test_result_writer.write_test_result(self._filesystem, self._port, self._results_directory,
                                          self._test_name, driver_output, expected_driver_output, failures)
     # FIXME: It the test crashed or timed out, it might be better to avoid
     # to write new baselines.
     self._update_or_add_new_baselines(driver_output)
     return TestResult(self._test_name, failures, driver_output.test_time, driver_output.has_stderr(),
                       pid=driver_output.pid, crash_site=driver_output.crash_site)
Exemple #27
0
 def _run_rebaseline(self):
     """Similar to _run_compare_test(), but has the side effect of updating or adding baselines.
     This is called when --reset-results and/or --copy-baselines are specified in the command line.
     If --reset-results, in the returned result we treat baseline mismatch as success."""
     driver_output = self._driver.run_test(self._driver_input(),
                                           self._stop_when_done)
     expected_driver_output = self._expected_driver_output()
     actual_failures = self._compare_output(expected_driver_output,
                                            driver_output).failures
     failures = self._handle_error(
         driver_output) if self._options.reset_results else actual_failures
     test_result_writer.write_test_result(self._filesystem, self._port,
                                          self._results_directory,
                                          self._test_name, driver_output,
                                          expected_driver_output, failures)
     self._update_or_add_new_baselines(driver_output, actual_failures)
     return TestResult(self._test_name,
                       failures,
                       driver_output.test_time,
                       driver_output.has_stderr(),
                       pid=driver_output.pid,
                       crash_site=driver_output.crash_site)
Exemple #28
0
    def _run_reftest(self):
        test_output = self._driver.run_test(self._driver_input())
        total_test_time = 0
        reference_output = None
        test_result = None

        # A reftest can have multiple match references and multiple mismatch references;
        # the test fails if any mismatch matches and all of the matches don't match.
        # To minimize the number of references we have to check, we run all of the mismatches first,
        # then the matches, and short-circuit out as soon as we can.
        # Note that sorting by the expectation sorts "!=" before "==" so this is easy to do.

        putAllMismatchBeforeMatch = sorted
        for expectation, reference_filename in putAllMismatchBeforeMatch(
                self._reference_files):
            reference_test_name = self._port.relative_test_filename(
                reference_filename)
            reference_output = self._driver.run_test(
                DriverInput(reference_test_name,
                            self._timeout,
                            test_output.image_hash,
                            is_reftest=True))
            test_result = self._compare_output_with_reference(
                test_output, reference_output, reference_filename,
                expectation == '!=')

            if (expectation == '!=' and test_result.failures) or (
                    expectation == '==' and not test_result.failures):
                break
            total_test_time += test_result.test_run_time

        assert (reference_output)
        test_result_writer.write_test_result(self._port, self._test_name,
                                             test_output, reference_output,
                                             test_result.failures)
        return TestResult(self._test_name, test_result.failures,
                          total_test_time + test_result.test_run_time,
                          test_result.has_stderr)
Exemple #29
0
 def test_defaults(self):
     result = TestResult("foo")
     self.assertEqual(result.test_name, 'foo')
     self.assertEqual(result.failures, [])
     self.assertEqual(result.test_run_time, 0)