def test_is_testharness_output_negative_cases(self): self.assertFalse(testharness_results.is_testharness_output('foo')) self.assertFalse(testharness_results.is_testharness_output('')) self.assertFalse(testharness_results.is_testharness_output(' ')) self.assertFalse( testharness_results.is_testharness_output( 'This is a testharness.js-based test. Harness: the test ran to completion.' )) self.assertFalse( testharness_results.is_testharness_output( ' This \n' 'is a testharness.js-based test.\n' 'Harness: the test ran to completion.'))
def _compare_testharness_test(self, expected_driver_output, driver_output): """Returns (testharness_completed, testharness_failures).""" if not driver_output.text: return False, [] if expected_driver_output.text: # Will compare text if there is expected text. return False, [] if not testharness_results.is_testharness_output(driver_output.text): return False, [] if not testharness_results.is_testharness_output_passing(driver_output.text): return True, [test_failures.FailureTestHarnessAssertion()] return True, []
def _check_extra_and_missing_baselines(self, expected_driver_output, driver_output): failures = [] if driver_output.text: if self._is_all_pass_testharness_text_not_needing_baseline( driver_output.text): if self._report_extra_baseline( driver_output, '.txt', 'is a all-pass testharness test'): # TODO(wangxianzhu): Make this a failure. pass elif testharness_results.is_testharness_output(driver_output.text): # We only need -expected.txt for a testharness test when we # expect it to fail or produce additional console output (when # -expected.txt is optional), so don't report missing # -expected.txt for testharness tests. pass elif self._reference_files: # A reftest's -expected.txt is optional. TODO(wangxianzhu): May # let reftests use the standard baseline existence rule. pass elif not expected_driver_output.text: failures.append(test_failures.FailureMissingResult()) elif self._report_extra_baseline(driver_output, '.txt', 'does not produce text result'): failures.append(test_failures.FailureTextMismatch()) if driver_output.image_hash: if self._reference_files: if self._report_extra_baseline(driver_output, '.png', 'is a reftest'): # TODO(wangxianzhu): Make this a failure. pass else: if not expected_driver_output.image: failures.append(test_failures.FailureMissingImage()) if not expected_driver_output.image_hash: failures.append(test_failures.FailureMissingImageHash()) elif self._report_extra_baseline(driver_output, '.png', 'does not produce image result'): failures.append(test_failures.FailureImageHashMismatch()) if driver_output.audio: if not expected_driver_output.audio: failures.append(test_failures.FailureMissingAudio()) elif self._report_extra_baseline(driver_output, '.wav', 'does not produce audio result'): failures.append(test_failures.FailureAudioMismatch()) return failures
def _compare_testharness_test(self, driver_output, expected_driver_output): if expected_driver_output.text: return False, [] if self._is_render_tree(driver_output.text): return False, [] text = driver_output.text or '' if not testharness_results.is_testharness_output(text): return False, [] if not testharness_results.is_testharness_output_passing(text): return True, [test_failures.FailureTestHarnessAssertion()] return True, []
def test_is_testharness_output_positive_cases(self): self.assertTrue( testharness_results.is_testharness_output( 'This is a testharness.js-based test.\n' 'Harness: the test ran to completion.')) self.assertTrue( testharness_results.is_testharness_output( '\n' ' \r This is a testharness.js-based test. \n' ' \r \n' ' \rHarness: the test ran to completion. \n' '\n')) self.assertTrue( testharness_results.is_testharness_output( 'This is a testharness.js-based test.\n' 'Foo bar \n' ' Harness: the test ran to completion.')) self.assertTrue( testharness_results.is_testharness_output( 'This is a testharness.js-based test.\n' 'FAIL: bah \n' ' Harness: the test ran to completion.\n' '\n' '\n'))