Example #1
0
    def _compare_text(self, expected_driver_output, driver_output):
        expected_text = expected_driver_output.text
        actual_text = driver_output.text
        if not expected_text or not actual_text:
            return []

        normalized_actual_text = self._get_normalized_output_text(actual_text)
        # Assuming expected_text is already normalized.
        if not self._port.do_text_results_differ(expected_text, normalized_actual_text):
            return []

        # Determine the text mismatch type

        def remove_chars(text, chars):
            for char in chars:
                text = text.replace(char, '')
            return text

        def remove_ng_text(results):
            processed = re.sub(r'LayoutNG(BlockFlow|ListItem|TableCell)', r'Layout\1', results)
            # LayoutTableCaption doesn't override LayoutBlockFlow::GetName, so
            # render tree dumps have "LayoutBlockFlow" for captions.
            processed = re.sub('LayoutNGTableCaption', 'LayoutBlockFlow', processed)
            return processed

        def is_ng_name_mismatch(expected, actual):
            if not re.search("LayoutNG(BlockFlow|ListItem|TableCaption|TableCell)", actual):
                return False
            if not self._is_render_tree(actual) and not self._is_layer_tree(actual):
                return False
            # There's a mix of NG and legacy names in both expected and actual,
            # so just remove NG from both.
            return not self._port.do_text_results_differ(remove_ng_text(expected), remove_ng_text(actual))

        # LayoutNG name mismatch (e.g., LayoutBlockFlow vs. LayoutNGBlockFlow)
        # is treated as pass
        if is_ng_name_mismatch(expected_text, normalized_actual_text):
            return []

        # General text mismatch
        if self._port.do_text_results_differ(
                remove_chars(expected_text, ' \t\n'),
                remove_chars(normalized_actual_text, ' \t\n')):
            return [test_failures.FailureTextMismatch()]

        # Space-only mismatch
        if not self._port.do_text_results_differ(
                remove_chars(expected_text, ' \t'),
                remove_chars(normalized_actual_text, ' \t')):
            return [test_failures.FailureSpacesAndTabsTextMismatch()]

        # Newline-only mismatch
        if not self._port.do_text_results_differ(
                remove_chars(expected_text, '\n'),
                remove_chars(normalized_actual_text, '\n')):
            return [test_failures.FailureLineBreaksTextMismatch()]

        # Spaces and newlines
        return [test_failures.FailureSpaceTabLineBreakTextMismatch()]
Example #2
0
    def _check_extra_and_missing_baselines(self, expected_driver_output,
                                           driver_output):
        failures = []

        if driver_output.text:
            if self._is_all_pass_testharness_text_not_needing_baseline(
                    driver_output.text):
                if self._report_extra_baseline(
                        driver_output, '.txt',
                        'is a all-pass testharness test'):
                    # TODO(wangxianzhu): Make this a failure.
                    pass
            elif testharness_results.is_testharness_output(driver_output.text):
                # We only need -expected.txt for a testharness test when we
                # expect it to fail or produce additional console output (when
                # -expected.txt is optional), so don't report missing
                # -expected.txt for testharness tests.
                pass
            elif self._reference_files:
                # A reftest's -expected.txt is optional. TODO(wangxianzhu): May
                # let reftests use the standard baseline existence rule.
                pass
            elif not expected_driver_output.text:
                failures.append(test_failures.FailureMissingResult())
        elif self._report_extra_baseline(driver_output, '.txt',
                                         'does not produce text result'):
            failures.append(test_failures.FailureTextMismatch())

        if driver_output.image_hash:
            if self._reference_files:
                if self._report_extra_baseline(driver_output, '.png',
                                               'is a reftest'):
                    # TODO(wangxianzhu): Make this a failure.
                    pass
            else:
                if not expected_driver_output.image:
                    failures.append(test_failures.FailureMissingImage())
                if not expected_driver_output.image_hash:
                    failures.append(test_failures.FailureMissingImageHash())
        elif self._report_extra_baseline(driver_output, '.png',
                                         'does not produce image result'):
            failures.append(test_failures.FailureImageHashMismatch())

        if driver_output.audio:
            if not expected_driver_output.audio:
                failures.append(test_failures.FailureMissingAudio())
        elif self._report_extra_baseline(driver_output, '.wav',
                                         'does not produce audio result'):
            failures.append(test_failures.FailureAudioMismatch())

        return failures
def get_result(test_name, result_type=test_expectations.PASS, run_time=0):
    failures = []
    if result_type == test_expectations.TIMEOUT:
        failures = [test_failures.FailureTimeout()]
    elif result_type == test_expectations.AUDIO:
        failures = [test_failures.FailureAudioMismatch()]
    elif result_type == test_expectations.TEXT:
        failures = [test_failures.FailureTextMismatch()]
    elif result_type == test_expectations.IMAGE:
        failures = [test_failures.FailureImageHashMismatch()]
    elif result_type == test_expectations.CRASH:
        failures = [test_failures.FailureCrash()]
    elif result_type == test_expectations.LEAK:
        failures = [test_failures.FailureLeak()]
    return test_results.TestResult(test_name, failures=failures, test_run_time=run_time)
Example #4
0
def get_result(test_name, result_type=test_expectations.PASS, run_time=0):
    failures = []
    dummy_1, dummy_2 = DriverOutput(None, None, None, None), DriverOutput(
        None, None, None, None)
    if result_type == test_expectations.TIMEOUT:
        failures = [test_failures.FailureTimeout(dummy_1)]
    elif result_type == test_expectations.AUDIO:
        failures = [test_failures.FailureAudioMismatch(dummy_1, dummy_2)]
    elif result_type == test_expectations.TEXT:
        failures = [test_failures.FailureTextMismatch(dummy_1, dummy_2)]
    elif result_type == test_expectations.IMAGE:
        failures = [test_failures.FailureImageHashMismatch(dummy_1, dummy_2)]
    elif result_type == test_expectations.CRASH:
        failures = [test_failures.FailureCrash(dummy_1)]
    elif result_type == test_expectations.LEAK:
        failures = [test_failures.FailureLeak(dummy_1)]
    return test_results.TestResult(test_name,
                                   failures=failures,
                                   test_run_time=run_time)
Example #5
0
 def test_results_has_repaint_overlay(self):
     driver_output = DriverOutput('"invalidations": [', None, None, None)
     failures = [test_failures.FailureTextMismatch(driver_output, None)]
     result = TestResult('foo', failures=failures)
     self.assertTrue(result.has_repaint_overlay)