Beispiel #1
0
    def _compare_output_with_reference(self, reference_driver_output, actual_driver_output, reference_filename, mismatch):
        has_stderr = reference_driver_output.has_stderr() or actual_driver_output.has_stderr()
        failures = []
        failures.extend(self._handle_error(actual_driver_output))
        if failures:
            # Don't continue any more if we already have crash or timeout.
            return TestResult(self._test_name, failures, 0, has_stderr, crash_site=actual_driver_output.crash_site)
        failures.extend(self._handle_error(reference_driver_output, reference_filename=reference_filename))
        if failures:
            return TestResult(self._test_name, failures, 0, has_stderr, pid=actual_driver_output.pid,
                              crash_site=reference_driver_output.crash_site)

        if not actual_driver_output.image_hash:
            failures.append(test_failures.FailureReftestNoImageGenerated(reference_filename))
        elif not reference_driver_output.image_hash:
            failures.append(test_failures.FailureReftestNoReferenceImageGenerated(reference_filename))
        elif mismatch:
            if reference_driver_output.image_hash == actual_driver_output.image_hash:
                failures.append(test_failures.FailureReftestMismatchDidNotOccur(reference_filename))
        elif reference_driver_output.image_hash != actual_driver_output.image_hash:
            diff, err_str = self._port.diff_image(reference_driver_output.image, actual_driver_output.image)
            if diff:
                failures.append(test_failures.FailureReftestMismatch(reference_filename))
            elif err_str:
                _log.error(err_str)
            else:
                _log.warning("  %s -> ref test hashes didn't match but diff passed", self._test_name)

        return TestResult(self._test_name, failures, 0, has_stderr, pid=actual_driver_output.pid)
    def test_interpret_test_failures(self):
        test_dict = test_run_results._interpret_test_failures([
            test_failures.FailureReftestMismatchDidNotOccur(
                self._actual_output, self._expected_output,
                self.port.abspath_for_test(
                    'foo/reftest-expected-mismatch.html'))
        ])
        self.assertEqual(len(test_dict), 0)

        test_dict = test_run_results._interpret_test_failures([
            test_failures.FailureMissingAudio(self._actual_output,
                                              self._expected_output)
        ])
        self.assertIn('is_missing_audio', test_dict)

        test_dict = test_run_results._interpret_test_failures([
            test_failures.FailureMissingResult(self._actual_output,
                                               self._expected_output)
        ])
        self.assertIn('is_missing_text', test_dict)

        test_dict = test_run_results._interpret_test_failures([
            test_failures.FailureMissingImage(self._actual_output,
                                              self._expected_output)
        ])
        self.assertIn('is_missing_image', test_dict)

        test_dict = test_run_results._interpret_test_failures([
            test_failures.FailureMissingImageHash(self._actual_output,
                                                  self._expected_output)
        ])
        self.assertIn('is_missing_image', test_dict)
    def _compare_output_with_reference(self, reference_driver_output,
                                       actual_driver_output,
                                       reference_filename, mismatch):
        failures = []

        # Don't continue any more if we already have crash
        failures.extend(self._handle_error(actual_driver_output))
        if failures:
            return failures
        failures.extend(
            self._handle_error(
                reference_driver_output,
                reference_filename=reference_filename))
        if failures:
            return failures

        if not actual_driver_output.image_hash:
            failures.append(
                test_failures.FailureReftestNoImageGenerated(
                    actual_driver_output, reference_driver_output,
                    reference_filename))
        elif not reference_driver_output.image_hash:
            failures.append(
                test_failures.FailureReftestNoReferenceImageGenerated(
                    actual_driver_output, reference_driver_output,
                    reference_filename))
        elif mismatch:
            if reference_driver_output.image_hash == actual_driver_output.image_hash:
                failures.append(
                    test_failures.FailureReftestMismatchDidNotOccur(
                        actual_driver_output, reference_driver_output,
                        reference_filename))
        elif reference_driver_output.image_hash != actual_driver_output.image_hash:
            diff, err_str = self._port.diff_image(
                reference_driver_output.image, actual_driver_output.image)
            if diff:
                actual_driver_output.image_diff = diff

            if err_str:
                _log.error(err_str)
                actual_driver_output.error = (actual_driver_output.error
                                              or '') + err_str

            if diff or err_str:
                failures.append(
                    test_failures.FailureReftestMismatch(
                        actual_driver_output, reference_driver_output,
                        reference_filename))
            elif err_str:
                # TODO(rmhasan) Should we include this error message in the artifacts ?
                _log.error('  %s : %s', self._test_name, err_str)
            else:
                _log.warning(
                    "  %s -> ref test hashes didn't match but diff passed",
                    self._test_name)

        return failures
Beispiel #4
0
    def test_reference_is_missing(self):
        failure = test_failures.FailureReftestMismatch()
        failure.reference_filename = 'notfound.html'
        fs = self.run_test(failures=[failure], files={})
        self.assertEqual(fs.written_files, {})

        failure = test_failures.FailureReftestMismatchDidNotOccur()
        failure.reference_filename = 'notfound.html'
        fs = self.run_test(failures=[failure], files={})
        self.assertEqual(fs.written_files, {})
    def test_reference_exists(self):
        failure = test_failures.FailureReftestMismatch()
        failure.reference_filename = '/src/exists-expected.html'
        files = {'/src/exists-expected.html': 'yup'}
        written_files = self.run_test(failures=[failure], files=files)
        self.assertEqual(written_files, {'/tmp/exists-expected.html': 'yup'})

        failure = test_failures.FailureReftestMismatchDidNotOccur()
        failure.reference_filename = '/src/exists-expected-mismatch.html'
        files = {'/src/exists-expected-mismatch.html': 'yup'}
        written_files = self.run_test(failures=[failure], files=files)
        self.assertEqual(written_files,
                         {'/tmp/exists-expected-mismatch.html': 'yup'})
Beispiel #6
0
    def test_update_summary_with_result(self):
        runner = self._runner()
        test = 'failures/expected/reftest.html'
        expectations = TestExpectations(runner._port, tests=[test])
        runner._expectations = expectations

        run_results = TestRunResults(expectations, 1)
        result = TestResult(test_name=test, failures=[test_failures.FailureReftestMismatchDidNotOccur()], reftest_type=['!='])
        runner._update_summary_with_result(run_results, result)
        self.assertEqual(1, run_results.expected)
        self.assertEqual(0, run_results.unexpected)

        run_results = TestRunResults(expectations, 1)
        result = TestResult(test_name=test, failures=[], reftest_type=['=='])
        runner._update_summary_with_result(run_results, result)
        self.assertEqual(0, run_results.expected)
        self.assertEqual(1, run_results.unexpected)
    def test_update_summary_with_result(self):
        # Reftests expected to be image mismatch should be respected when pixel_tests=False.
        runner = self._runner()
        runner._options.pixel_tests = False
        test = 'failures/expected/reftest.html'
        expectations = TestExpectations(runner._port, tests=[test])
        runner._expectations = expectations

        run_results = TestRunResults(expectations, 1)
        result = TestResult(
            test_name=test,
            failures=[test_failures.FailureReftestMismatchDidNotOccur()],
            reftest_type=['!='])
        runner._update_summary_with_result(run_results, result)
        self.assertEqual(1, run_results.expected)
        self.assertEqual(0, run_results.unexpected)

        run_results = TestRunResults(expectations, 1)
        result = TestResult(test_name=test, failures=[], reftest_type=['=='])
        runner._update_summary_with_result(run_results, result)
        self.assertEqual(0, run_results.expected)
        self.assertEqual(1, run_results.unexpected)