Exemple #1
0
    def test_interpret_test_failures(self):
        test_dict = interpret_test_failures(self.port, 'foo/reftest.html', [
            test_failures.FailureReftestMismatch(
                self.port.abspath_for_test('foo/reftest-expected.html'))
        ])
        self.assertTrue('is_reftest' in test_dict)
        self.assertFalse('is_mismatch_reftest' in test_dict)

        test_dict = interpret_test_failures(self.port, 'foo/reftest.html', [
            test_failures.FailureReftestMismatch(
                self.port.abspath_for_test('foo/common.html'))
        ])
        self.assertTrue('is_reftest' in test_dict)
        self.assertFalse('is_mismatch_reftest' in test_dict)
        self.assertEqual(test_dict['ref_file'], 'foo/common.html')

        test_dict = interpret_test_failures(self.port, 'foo/reftest.html', [
            test_failures.FailureReftestMismatchDidNotOccur(
                self.port.abspath_for_test(
                    'foo/reftest-expected-mismatch.html'))
        ])
        self.assertFalse('is_reftest' in test_dict)
        self.assertTrue(test_dict['is_mismatch_reftest'])

        test_dict = interpret_test_failures(self.port, 'foo/reftest.html', [
            test_failures.FailureReftestMismatchDidNotOccur(
                self.port.abspath_for_test('foo/common.html'))
        ])
        self.assertFalse('is_reftest' in test_dict)
        self.assertTrue(test_dict['is_mismatch_reftest'])
        self.assertEqual(test_dict['ref_file'], 'foo/common.html')
    def _compare_output_with_reference(self, reference_driver_output, actual_driver_output, reference_filename, mismatch):
        total_test_time = reference_driver_output.test_time + actual_driver_output.test_time
        has_stderr = reference_driver_output.has_stderr() or actual_driver_output.has_stderr()
        failures = []
        failures.extend(self._handle_error(actual_driver_output))
        if failures:
            # Don't continue any more if we already have crash or timeout.
            return TestResult(self._test_name, failures, total_test_time, has_stderr)
        failures.extend(self._handle_error(reference_driver_output, reference_filename=reference_filename))
        if failures:
            return TestResult(self._test_name, failures, total_test_time, has_stderr, pid=actual_driver_output.pid)

        if not reference_driver_output.image_hash and not actual_driver_output.image_hash:
            failures.append(test_failures.FailureReftestNoImagesGenerated(reference_filename))
        elif mismatch:
            if reference_driver_output.image_hash == actual_driver_output.image_hash:
                diff, err_str = self._port.diff_image(reference_driver_output.image, actual_driver_output.image)
                if not diff:
                    failures.append(test_failures.FailureReftestMismatchDidNotOccur(reference_filename))
                elif err_str:
                    _log.error(err_str)
                else:
                    _log.warning("  %s -> ref test hashes matched but diff failed" % self._test_name)

        elif reference_driver_output.image_hash != actual_driver_output.image_hash:
            diff, err_str = self._port.diff_image(reference_driver_output.image, actual_driver_output.image)
            if diff:
                failures.append(test_failures.FailureReftestMismatch(reference_filename))
            elif err_str:
                _log.error(err_str)
            else:
                _log.warning("  %s -> ref test hashes didn't match but diff passed" % self._test_name)

        return TestResult(self._test_name, failures, total_test_time, has_stderr, pid=actual_driver_output.pid)
    def test_update_summary_with_result(self):
        # Reftests expected to be image mismatch should be respected when pixel_tests=False.
        runner = self._runner()
        runner._options.pixel_tests = False
        runner._options.world_leaks = False
        test = 'failures/expected/reftest.html'
        leak_test = 'failures/expected/leak.html'
        expectations = TestExpectations(runner._port, tests=[test, leak_test])
        expectations.parse_all_expectations()
        runner._expectations = expectations

        runner._current_run_results = TestRunResults(expectations, 1)
        result = TestResult(test, failures=[test_failures.FailureReftestMismatchDidNotOccur()], reftest_type=['!='])
        runner.update_summary_with_result(result)
        self.assertEqual(1, runner._current_run_results.expected)
        self.assertEqual(0, runner._current_run_results.unexpected)

        runner._current_run_results = TestRunResults(expectations, 1)
        result = TestResult(test, failures=[], reftest_type=['=='])
        runner.update_summary_with_result(result)
        self.assertEqual(0, runner._current_run_results.expected)
        self.assertEqual(1, runner._current_run_results.unexpected)

        runner._current_run_results = TestRunResults(expectations, 1)
        result = TestResult(leak_test, failures=[])
        runner.update_summary_with_result(result)
        self.assertEqual(1, runner._current_run_results.expected)
        self.assertEqual(0, runner._current_run_results.unexpected)
Exemple #4
0
    def _compare_output_with_reference(self, reference_driver_output, actual_driver_output, reference_filename, mismatch):
        total_test_time = reference_driver_output.test_time + actual_driver_output.test_time
        has_stderr = reference_driver_output.has_stderr() or actual_driver_output.has_stderr()
        failures = []
        failures.extend(self._handle_error(actual_driver_output))
        if failures:
            # Don't continue any more if we already have crash or timeout.
            return TestResult(self._test_name, failures, total_test_time, has_stderr)
        failures.extend(self._handle_error(reference_driver_output, reference_filename=reference_filename))
        if failures:
            return TestResult(self._test_name, failures, total_test_time, has_stderr, pid=actual_driver_output.pid)

        if not reference_driver_output.image_hash and not actual_driver_output.image_hash:
            failures.append(test_failures.FailureReftestNoImagesGenerated(reference_filename))
        elif mismatch:
            # Calling image_hash is considered unnecessary for expected mismatch ref tests.
            if reference_driver_output.image_hash == actual_driver_output.image_hash:
                failures.append(test_failures.FailureReftestMismatchDidNotOccur(reference_filename))
        elif reference_driver_output.image_hash != actual_driver_output.image_hash:
            # ImageDiff has a hard coded color distance threshold even though tolerance=0 is specified.
            diff_result = self._port.diff_image(reference_driver_output.image, actual_driver_output.image, tolerance=0)
            error_string = diff_result[2]
            if error_string:
                _log.warning('  %s : %s' % (self._test_name, error_string))
                failures.append(test_failures.FailureReftestMismatch(reference_filename))
                actual_driver_output.error = (actual_driver_output.error or '') + error_string
            elif diff_result[0]:
                failures.append(test_failures.FailureReftestMismatch(reference_filename))

        return TestResult(self._test_name, failures, total_test_time, has_stderr, pid=actual_driver_output.pid)
Exemple #5
0
 def test_update_summary_with_result(self):
     host = MockHost()
     port = host.port_factory.get('test-win-xp')
     test = 'failures/expected/reftest.html'
     expectations = TestExpectations(
         port,
         tests=[test],
         expectations='WONTFIX : failures/expected/reftest.html = IMAGE',
         test_config=port.test_configuration())
     # Reftests expected to be image mismatch should be respected when pixel_tests=False.
     manager = Manager(port=port,
                       options=MockOptions(
                           pixel_tests=False,
                           exit_after_n_failures=None,
                           exit_after_n_crashes_or_timeouts=None),
                       printer=Mock())
     manager._expectations = expectations
     result_summary = ResultSummary(expectations=expectations,
                                    test_files=[test])
     result = TestResult(
         test_name=test,
         failures=[test_failures.FailureReftestMismatchDidNotOccur()])
     manager._update_summary_with_result(result_summary, result)
     self.assertEquals(1, result_summary.expected)
     self.assertEquals(0, result_summary.unexpected)
    def test_interpret_test_failures(self):
        test_dict = test_run_results._interpret_test_failures(
            [test_failures.FailureImageHashMismatch(diff_percent=0.42)])
        self.assertEqual(test_dict['image_diff_percent'], 0.42)

        test_dict = test_run_results._interpret_test_failures([
            test_failures.FailureReftestMismatch(
                self.port.abspath_for_test('foo/reftest-expected.html'))
        ])
        self.assertIn('image_diff_percent', test_dict)

        test_dict = test_run_results._interpret_test_failures([
            test_failures.FailureReftestMismatchDidNotOccur(
                self.port.abspath_for_test(
                    'foo/reftest-expected-mismatch.html'))
        ])
        self.assertEqual(len(test_dict), 0)

        test_dict = test_run_results._interpret_test_failures(
            [test_failures.FailureMissingAudio()])
        self.assertIn('is_missing_audio', test_dict)

        test_dict = test_run_results._interpret_test_failures(
            [test_failures.FailureMissingResult()])
        self.assertIn('is_missing_text', test_dict)

        test_dict = test_run_results._interpret_test_failures(
            [test_failures.FailureMissingImage()])
        self.assertIn('is_missing_image', test_dict)

        test_dict = test_run_results._interpret_test_failures(
            [test_failures.FailureMissingImageHash()])
        self.assertIn('is_missing_image', test_dict)
    def _compare_output_with_reference(self, driver_output1, driver_output2,
                                       reference_filename, mismatch):
        total_test_time = driver_output1.test_time + driver_output2.test_time
        has_stderr = driver_output1.has_stderr() or driver_output2.has_stderr()
        failures = []
        failures.extend(self._handle_error(driver_output1))
        if failures:
            # Don't continue any more if we already have crash or timeout.
            return TestResult(self._test_name, failures, total_test_time,
                              has_stderr)
        failures.extend(
            self._handle_error(driver_output2,
                               reference_filename=reference_filename))
        if failures:
            return TestResult(self._test_name, failures, total_test_time,
                              has_stderr)

        if not driver_output1.image_hash and not driver_output2.image_hash:
            failures.append(
                test_failures.FailureReftestNoImagesGenerated(
                    reference_filename))
        elif mismatch:
            if driver_output1.image_hash == driver_output2.image_hash:
                failures.append(
                    test_failures.FailureReftestMismatchDidNotOccur(
                        reference_filename))
        elif driver_output1.image_hash != driver_output2.image_hash:
            failures.append(
                test_failures.FailureReftestMismatch(reference_filename))
        return TestResult(self._test_name, failures, total_test_time,
                          has_stderr)
    def test_reference_is_missing(self):
        failure = test_failures.FailureReftestMismatch()
        failure.reference_filename = 'notfound.html'
        written_files = self.run_test(failures=[failure], files={})
        self.assertEqual(written_files, {})

        failure = test_failures.FailureReftestMismatchDidNotOccur()
        failure.reference_filename = 'notfound.html'
        written_files = self.run_test(failures=[failure], files={})
        self.assertEqual(written_files, {})
    def test_reference_exists(self):
        failure = test_failures.FailureReftestMismatch()
        failure.reference_filename = '/src/exists-expected.html'
        files = {'/src/exists-expected.html': 'yup'}
        written_files = self.run_test(failures=[failure], files=files)
        self.assertEqual(written_files, {'/tmp/exists-expected.html': 'yup'})

        failure = test_failures.FailureReftestMismatchDidNotOccur()
        failure.reference_filename = '/src/exists-expected-mismatch.html'
        files = {'/src/exists-expected-mismatch.html': 'yup'}
        written_files = self.run_test(failures=[failure], files=files)
        self.assertEqual(written_files, {'/tmp/exists-expected-mismatch.html': 'yup'})
 def test_update_summary_with_result(self):
     # Reftests expected to be image mismatch should be respected when pixel_tests=False.
     runner = self._runner()
     runner._options.pixel_tests = False
     test = 'failures/expected/reftest.html'
     expectations = TestExpectations(runner._port, tests=[test])
     runner._expectations = expectations
     result_summary = ResultSummary(expectations, [test], 1, set())
     result = TestResult(
         test_name=test,
         failures=[test_failures.FailureReftestMismatchDidNotOccur()])
     runner._update_summary_with_result(result_summary, result)
     self.assertEquals(1, result_summary.expected)
     self.assertEquals(0, result_summary.unexpected)