def test_is_all_pass_testharness_result_positive_cases(self): self.assertTrue(testharness_results.is_all_pass_testharness_result( 'This is a testharness.js-based test.\n' ' PASS: foo bar \n' ' Harness: the test ran to completion.')) self.assertTrue(testharness_results.is_all_pass_testharness_result( 'This is a testharness.js-based test.\n' 'PASS \'grid\' with: grid-template-areas: "a b"\n' '"c d";\n' 'Harness: the test ran to completion.\n'))
def test_is_all_pass_testharness_result_negative_cases(self): self.assertFalse( testharness_results.is_all_pass_testharness_result( 'This is a testharness.js-based test.\n' 'CONSOLE WARNING: This is a warning.\n' 'Test ran to completion.')) self.assertFalse( testharness_results.is_all_pass_testharness_result( 'This is a testharness.js-based test.\n' ' PASS: foo bar \n' 'FAIL \n' ' Harness: the test ran to completion.'))
def test_is_all_pass_testharness_result(self): self.assertFalse(testharness_results.is_all_pass_testharness_result( 'This is a testharness.js-based test.\n' 'CONSOLE WARNING: This is a warning.\n' 'Test ran to completion.')) self.assertTrue(testharness_results.is_all_pass_testharness_result( 'This is a testharness.js-based test.\n' ' PASS: foo bar \n' ' Harness: the test ran to completion.')) self.assertFalse(testharness_results.is_all_pass_testharness_result( 'This is a testharness.js-based test.\n' ' PASS: foo bar \n' 'FAIL \n' ' Harness: the test ran to completion.'))
def _is_all_pass_testharness_result(self, path): """Checks if a baseline is an all-PASS testharness.js result.""" # TODO(robertma): Find an appropriate constant for this (or make one). if not path.endswith('-expected.txt'): return False content = self._filesystem.read_text_file(path) return is_all_pass_testharness_result(content)
def _save_baseline(self, data, target_baseline): if not data: _log.debug("No baseline data to save.") return filesystem = self._tool.filesystem if is_all_pass_testharness_result(data): _log.debug("The new baseline is a passing testharness result with " "no console warnings or errors, so it will not be saved.") if filesystem.exists(target_baseline): filesystem.remove(target_baseline) return filesystem.maybe_make_directory(filesystem.dirname(target_baseline)) filesystem.write_binary_file(target_baseline, data)
def _remove_all_pass_testharness_baselines(self, test_prefix_list): """Removes all of the all-PASS baselines for the given builders and tests. In general, for testharness.js tests, the absence of a baseline indicates that the test is expected to pass. When rebaselining, new all-PASS baselines may be downloaded, but they should not be kept. """ filesystem = self._tool.filesystem baseline_paths = self._all_baseline_paths(test_prefix_list) for path in baseline_paths: if not (filesystem.exists(path) and filesystem.splitext(path)[1] == ".txt"): continue contents = filesystem.read_text_file(path) if is_all_pass_testharness_result(contents): _log.info("Removing all-PASS testharness baseline: %s", path) filesystem.remove(path)
def _remove_all_pass_testharness_baselines(self, test_prefix_list): """Removes all of the all-PASS baselines for the given builders and tests. In general, for testharness.js tests, the absence of a baseline indicates that the test is expected to pass. When rebaselining, new all-PASS baselines may be downloaded, but they should not be kept. """ filesystem = self._tool.filesystem baseline_paths = self._all_baseline_paths(test_prefix_list) for path in baseline_paths: if not (filesystem.exists(path) and filesystem.splitext(path)[1] == '.txt'): continue contents = filesystem.read_text_file(path) if is_all_pass_testharness_result(contents): _log.info('Removing all-PASS testharness baseline: %s', path) filesystem.remove(path)
def _remove_all_pass_testharness_baselines(self, test_baseline_set): """Removes all of the generic all-PASS baselines for the given tests. For testharness.js tests, the absence of a baseline indicates that the test is expected to pass. When rebaselining, new all-PASS baselines may be downloaded to platform directories. After optimization, some of them may be pushed to the root layout test directory and become generic baselines, which can be safely removed. Non-generic all-PASS baselines need to be preserved; otherwise the fallback may be wrong. """ filesystem = self._tool.filesystem baseline_paths = self._generic_baseline_paths(test_baseline_set) for path in baseline_paths: if not (filesystem.exists(path) and filesystem.splitext(path)[1] == '.txt'): continue contents = filesystem.read_text_file(path) if is_all_pass_testharness_result(contents): _log.info('Removing all-PASS testharness baseline: %s', path) filesystem.remove(path)
# Copyright 2014 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Check if a LayoutTest expected file is an all-PASS testharness result. LayoutTests/PRESUBMIT.py uses this script to identify generic all-PASS testharness baselines, which are redundant because run-webkit-tests assumes all-PASS results for testharness tests when baselines are not found. """ import sys from blinkpy.common import add_webkitpy # pylint: disable=unused-import from webkitpy.layout_tests.models.testharness_results import is_all_pass_testharness_result paths = [] for path in sys.argv[1:]: content = open(path, 'r').read() if is_all_pass_testharness_result(content): paths.append(path) if len(paths) > 0: sys.stderr.write( '* The following files are passing testharness results without console error messages, they should be removed:\n ') sys.stderr.write('\n '.join(paths)) sys.stderr.write('\n') sys.exit("ERROR: found passing testharness results without console error messages.")