예제 #1
0
 def _compare_image(self, expected_driver_output, driver_output):
     failures = []
     # If we didn't produce a hash file, this test must be text-only.
     if driver_output.image_hash is None:
         return failures
     if not expected_driver_output.image:
         failures.append(test_failures.FailureMissingImage())
     elif not expected_driver_output.image_hash:
         failures.append(test_failures.FailureMissingImageHash())
     elif driver_output.image_hash != expected_driver_output.image_hash:
         diff, err_str = self._port.diff_image(expected_driver_output.image,
                                               driver_output.image)
         if err_str:
             _log.warning('  %s : %s', self._test_name, err_str)
             failures.append(test_failures.FailureImageHashMismatch())
             driver_output.error = (driver_output.error or '') + err_str
         else:
             driver_output.image_diff = diff
             if driver_output.image_diff:
                 failures.append(test_failures.FailureImageHashMismatch())
             else:
                 # See https://bugs.webkit.org/show_bug.cgi?id=69444 for why this isn't a full failure.
                 _log.warning('  %s -> pixel hash failed (but diff passed)',
                              self._test_name)
     return failures
예제 #2
0
    def _compare_image(self, expected_driver_output, driver_output):
        if not expected_driver_output.image or not expected_driver_output.image_hash:
            return []
        # The presence of an expected image, but a lack of an outputted image
        # does not signify an error. content::BlinkTestController checks the
        # image_hash, and upon a match simply skips recording the outputted
        # image. This even occurs when results_directory is set.
        if not driver_output.image or not driver_output.image_hash:
            return []

        if driver_output.image_hash != expected_driver_output.image_hash:
            diff, err_str = self._port.diff_image(expected_driver_output.image,
                                                  driver_output.image)
            if err_str:
                _log.warning('  %s : %s', self._test_name, err_str)
                driver_output.error = (driver_output.error or '') + err_str
                return [test_failures.FailureImageHashMismatch()]

            driver_output.image_diff = diff
            if driver_output.image_diff:
                return [test_failures.FailureImageHashMismatch()]
            # See https://bugs.webkit.org/show_bug.cgi?id=69444 for why this isn't a full failure.
            _log.warning('  %s -> pixel hash failed (but diff passed)',
                         self._test_name)

        return []
예제 #3
0
    def _compare_image(self, expected_driver_output, driver_output):
        if not expected_driver_output.image or not expected_driver_output.image_hash:
            return []
        if not driver_output.image or not driver_output.image_hash:
            return []

        if driver_output.image_hash != expected_driver_output.image_hash:
            diff, err_str = self._port.diff_image(expected_driver_output.image,
                                                  driver_output.image)
            if err_str:
                _log.warning('  %s : %s', self._test_name, err_str)
                driver_output.error = (driver_output.error or '') + err_str
                return [test_failures.FailureImageHashMismatch()]

            driver_output.image_diff = diff
            if driver_output.image_diff:
                return [test_failures.FailureImageHashMismatch()]
            # See https://bugs.webkit.org/show_bug.cgi?id=69444 for why this isn't a full failure.
            _log.warning('  %s -> pixel hash failed (but diff passed)',
                         self._test_name)

        return []
예제 #4
0
    def _check_extra_and_missing_baselines(self, expected_driver_output,
                                           driver_output):
        failures = []

        if driver_output.text:
            if self._is_all_pass_testharness_text_not_needing_baseline(
                    driver_output.text):
                if self._report_extra_baseline(
                        driver_output, '.txt',
                        'is a all-pass testharness test'):
                    # TODO(wangxianzhu): Make this a failure.
                    pass
            elif testharness_results.is_testharness_output(driver_output.text):
                # We only need -expected.txt for a testharness test when we
                # expect it to fail or produce additional console output (when
                # -expected.txt is optional), so don't report missing
                # -expected.txt for testharness tests.
                pass
            elif self._reference_files:
                # A reftest's -expected.txt is optional. TODO(wangxianzhu): May
                # let reftests use the standard baseline existence rule.
                pass
            elif not expected_driver_output.text:
                failures.append(test_failures.FailureMissingResult())
        elif self._report_extra_baseline(driver_output, '.txt',
                                         'does not produce text result'):
            failures.append(test_failures.FailureTextMismatch())

        if driver_output.image_hash:
            if self._reference_files:
                if self._report_extra_baseline(driver_output, '.png',
                                               'is a reftest'):
                    # TODO(wangxianzhu): Make this a failure.
                    pass
            else:
                if not expected_driver_output.image:
                    failures.append(test_failures.FailureMissingImage())
                if not expected_driver_output.image_hash:
                    failures.append(test_failures.FailureMissingImageHash())
        elif self._report_extra_baseline(driver_output, '.png',
                                         'does not produce image result'):
            failures.append(test_failures.FailureImageHashMismatch())

        if driver_output.audio:
            if not expected_driver_output.audio:
                failures.append(test_failures.FailureMissingAudio())
        elif self._report_extra_baseline(driver_output, '.wav',
                                         'does not produce audio result'):
            failures.append(test_failures.FailureAudioMismatch())

        return failures
예제 #5
0
def get_result(test_name, result_type=test_expectations.PASS, run_time=0):
    failures = []
    if result_type == test_expectations.TIMEOUT:
        failures = [test_failures.FailureTimeout()]
    elif result_type == test_expectations.AUDIO:
        failures = [test_failures.FailureAudioMismatch()]
    elif result_type == test_expectations.TEXT:
        failures = [test_failures.FailureTextMismatch()]
    elif result_type == test_expectations.IMAGE:
        failures = [test_failures.FailureImageHashMismatch()]
    elif result_type == test_expectations.CRASH:
        failures = [test_failures.FailureCrash()]
    elif result_type == test_expectations.LEAK:
        failures = [test_failures.FailureLeak()]
    return test_results.TestResult(test_name, failures=failures, test_run_time=run_time)
예제 #6
0
def get_result(test_name, result_type=test_expectations.PASS, run_time=0):
    failures = []
    dummy_1, dummy_2 = DriverOutput(None, None, None, None), DriverOutput(
        None, None, None, None)
    if result_type == test_expectations.TIMEOUT:
        failures = [test_failures.FailureTimeout(dummy_1)]
    elif result_type == test_expectations.AUDIO:
        failures = [test_failures.FailureAudioMismatch(dummy_1, dummy_2)]
    elif result_type == test_expectations.TEXT:
        failures = [test_failures.FailureTextMismatch(dummy_1, dummy_2)]
    elif result_type == test_expectations.IMAGE:
        failures = [test_failures.FailureImageHashMismatch(dummy_1, dummy_2)]
    elif result_type == test_expectations.CRASH:
        failures = [test_failures.FailureCrash(dummy_1)]
    elif result_type == test_expectations.LEAK:
        failures = [test_failures.FailureLeak(dummy_1)]
    return test_results.TestResult(test_name,
                                   failures=failures,
                                   test_run_time=run_time)