def testHasNewFailures(self): files = self.GetTestFiles() failures = self.GetTestFailures() # no changes, no new failures cf = compare_failures.CompareFailures(files, failures, set(), set(), self.GetTmpDir(), False) self.failUnless(not cf.HasNewFailures()) # test goes from passing to failing pass_file = os.path.join(path_utils.LayoutTestsDir(), 'fast', 'pass1.html') failures[pass_file] = [test_failures.FailureTextMismatch(None)] cf = compare_failures.CompareFailures(files, failures, set(), set(), self.GetTmpDir(), False) self.failUnless(cf.HasNewFailures()) # Failing to passing failures = self.GetTestFailures() failure_file = os.path.join(path_utils.LayoutTestsDir(), 'fast', 'bar', 'fail2.html') del failures[failure_file] cf = compare_failures.CompareFailures(files, failures, set(), set(), self.GetTmpDir(), False) self.failUnless(not cf.HasNewFailures()) # A new test that fails, this doesn't count as a new failure. new_test_file = os.path.join(path_utils.LayoutTestsDir(), "new-test.html") files.add(new_test_file) failures = self.GetTestFailures() failures[new_test_file] = [test_failures.FailureCrash()] cf = compare_failures.CompareFailures(files, failures, set(), set(), self.GetTmpDir(), False) self.failUnless(not cf.HasNewFailures())
def testFailureToCrash(self): """When there's a new crash, we don't add it to the baseline or remove it from the failure list.""" failures = self.GetTestFailures() # Test case where we don't update new baseline. A failure moving to a # crash shouldn't be added to the expected-crashes.txt file. failure_file = os.path.join(path_utils.LayoutTestsDir(), 'fast', 'foo', 'fail1.html') failures[failure_file] = [test_failures.FailureCrash()] self.CheckNoChanges(failures)
def testPassingToFailure(self): """When there's a new failure, we don't add it to the baseline.""" failures = self.GetTestFailures() # Test case where we don't update new baseline. We have a new failure, # but it shouldn't be added to the expected-failures.txt file. pass_file = os.path.join(path_utils.LayoutTestsDir(), 'fast', 'pass1.html') failures[pass_file] = [test_failures.FailureTextMismatch(None)] self.CheckNoChanges(failures) # Same thing as before: pass -> crash failures[pass_file] = [test_failures.FailureCrash()] self.CheckNoChanges(failures)
def testCrashToFailure(self): """This is better than before, so we should update both lists.""" failures = self.GetTestFailures() crash_file = os.path.join(path_utils.LayoutTestsDir(), 'fast', 'bar', 'betz', 'crash3.html') failures[crash_file] = [test_failures.FailureTextMismatch(None)] cf = compare_failures.CompareFailures(self.GetTestFiles(), failures, set(), set(), self.GetTmpDir(), False) cf.UpdateFailuresOnDisk() self.CheckOutputWithExpectedFiles('expected-passing.txt', 'expected-failures-new-crash.txt', 'expected-crashes-new-passing.txt')
def testCrashToPassing(self): """This is better than before, so we update the crashes file.""" failures = self.GetTestFailures() crash_file = os.path.join(path_utils.LayoutTestsDir(), 'fast', 'bar', 'betz', 'crash3.html') del failures[crash_file] cf = compare_failures.CompareFailures(self.GetTestFiles(), failures, set(), set(), self.GetTmpDir(), False) cf.UpdateFailuresOnDisk() self.CheckOutputWithExpectedFiles('expected-passing-new-passing.txt', 'expected-failures.txt', 'expected-crashes-new-passing.txt')
def _ExpandTests(self, test_list_path): # Convert the test specification to an absolute, normalized # path and make sure directories end with the OS path separator. path = os.path.join(path_utils.LayoutTestsDir(test_list_path), test_list_path) path = os.path.normpath(path) if os.path.isdir(path): path = os.path.join(path, '') # This is kind of slow - O(n*m) - since this is called for all # entries in the test lists. It has not been a performance # issue so far. Maybe we should re-measure the time spent reading # in the test lists? result = [] for test in self._full_test_list: if test.startswith(path): result.append(test) return result
def testNewTestPass(self): """After a merge, we need to update new passing tests properly.""" files = self.GetTestFiles() new_test_file = os.path.join(path_utils.LayoutTestsDir(), "new-test.html") files.add(new_test_file) failures = self.GetTestFailures() # New test file passing cf = compare_failures.CompareFailures(files, failures, set(), set(), self.GetTmpDir(), False) cf.UpdateFailuresOnDisk() self.CheckOutputWithExpectedFiles('expected-passing-new-test.txt', 'expected-failures.txt', 'expected-crashes.txt')
def GetTestFiles(self): """Get a set of files that includes the expected crashes and failures along with two passing tests.""" layout_dir = path_utils.LayoutTestsDir() files = [ 'fast\\pass1.html', 'fast\\foo\\pass2.html', 'fast\\foo\\crash1.html', 'fast\\bar\\crash2.html', 'fast\\bar\\betz\\crash3.html', 'fast\\foo\\fail1.html', 'fast\\bar\\fail2.html', 'fast\\bar\\betz\\fail3.html', ] return set([os.path.join(layout_dir, f) for f in files])
def testFailureToPassing(self): """This is better than before, so we should update the failure list.""" failures = self.GetTestFailures() # Remove one of the failing test cases from the failures dictionary. This # makes failure_file considered to be passing. failure_file = os.path.join(path_utils.LayoutTestsDir(), 'fast', 'bar', 'fail2.html') del failures[failure_file] cf = compare_failures.CompareFailures(self.GetTestFiles(), failures, set(), set(), self.GetTmpDir(), False) cf.UpdateFailuresOnDisk() self.CheckOutputWithExpectedFiles('expected-passing-new-passing2.txt', 'expected-failures-new-passing.txt', 'expected-crashes.txt')
def testGenerateNewBaseline(self): """Test the generation of new expected-*.txt files when either they don't exist or the user explicitly asks to make new files.""" failures = self.GetTestFailures() # Test to make sure we generate baseline files if the file doesn't exist. os.remove(os.path.join(self.GetTmpDir(), 'expected-passing.txt')) os.remove(os.path.join(self.GetTmpDir(), 'expected-failures.txt')) os.remove(os.path.join(self.GetTmpDir(), 'expected-crashes.txt')) # Test force generation of new baseline files with a new failure and one # less passing. pass_file = os.path.join(path_utils.LayoutTestsDir(), 'fast', 'pass1.html') failures[pass_file] = [test_failures.FailureTextMismatch(None)] cf = compare_failures.CompareFailures(self.GetTestFiles(), failures, set(), set(), self.GetTmpDir(), False) cf.UpdateFailuresOnDisk() self.CheckOutputWithExpectedFiles('expected-passing-new-baseline.txt', 'expected-failures-added.txt', 'expected-crashes.txt')
def _Read(self, path): """For each test in an expectations file, generate the expectations for it. """ lineno = 0 for line in open(path): lineno += 1 line = StripComments(line) if not line: continue modifiers = set() if line.find(':') is -1: test_and_expectations = line else: parts = line.split(':') test_and_expectations = parts[1] options = self._GetOptionsList(parts[0]) if not self._HasValidModifiersForCurrentPlatform( options, lineno, test_and_expectations, modifiers): continue tests_and_expecation_parts = test_and_expectations.split('=') if (len(tests_and_expecation_parts) is not 2): self._AddError(lineno, 'Missing expectations.', test_and_expectations) continue test_list_path = tests_and_expecation_parts[0].strip() expectations = self._ParseExpectations( tests_and_expecation_parts[1], lineno, test_list_path) if 'slow' in options and TIMEOUT in expectations: self._AddError( lineno, 'A test cannot be both slow and timeout. If the ' 'test times out indefinitely, the it should be listed as timeout.', test_and_expectations) full_path = os.path.join(path_utils.LayoutTestsDir(test_list_path), test_list_path) full_path = os.path.normpath(full_path) # WebKit's way of skipping tests is to add a -disabled suffix. # So we should consider the path existing if the path or the -disabled # version exists. if not os.path.exists(full_path) and not \ os.path.exists(full_path + '-disabled'): # Log a non fatal error here since you hit this case any time you # update test_expectations.txt without syncing the LayoutTests # directory self._LogNonFatalError(lineno, 'Path does not exist.', test_list_path) continue if not self._full_test_list: tests = [test_list_path] else: tests = self._ExpandTests(test_list_path) self._AddTests(tests, expectations, test_list_path, lineno, modifiers) if len(self._errors) or len(self._non_fatal_errors): if self._is_debug_mode: build_type = 'DEBUG' else: build_type = 'RELEASE' print "\nFAILURES FOR PLATFORM: %s, BUILD_TYPE: %s" \ % (self._platform.upper(), build_type) for error in self._non_fatal_errors: logging.error(error) if len(self._errors): raise SyntaxError('\n'.join(map(str, self._errors)))
def GetStartHttpdCommand(self, output_dir, apache2=False): """Prepares the config file and output directory to start an httpd server. Returns a list of strings containing the server's command line+args. Creates the test output directory and generates an httpd.conf (or httpd2.conf for Apache 2 if apache2 is True) file in it that contains the necessary <VirtualHost> directives for running all the http tests. WebKit http tests expect the DocumentRoot to be in LayoutTests/http/tests/, but that prevents us from running http tests in chrome/ or pending/. So we run two virtual hosts, one on ports 8000 and 8080 for WebKit, and one on port 8081 with a much broader DocumentRoot for everything else. (Note that WebKit http tests that have been modified and are temporarily in pending/ will still fail, if they expect the DocumentRoot to be located as described above.) Args: output_dir: the path to the test output directory. It will be created. apache2: boolean if true will cause this function to return start command for Apache 2.x instead of Apache 1.3.x """ layout_dir = google.platform_utils_win.GetCygwinPath( layout_package_path_utils.LayoutTestsDir()) main_document_root = os.path.join(layout_dir, "LayoutTests", "http", "tests") pending_document_root = os.path.join(layout_dir, "pending", "http", "tests") chrome_document_root = layout_dir apache_config_dir = google.httpd_utils.ApacheConfigDir(self._base_dir) mime_types_path = os.path.join(apache_config_dir, "mime.types") conf_file_name = "httpd.conf" if apache2: conf_file_name = "httpd2.conf" # Make the test output directory and place the generated httpd.conf in it. orig_httpd_conf_path = os.path.join(apache_config_dir, conf_file_name) httpd_conf_path = os.path.join(output_dir, conf_file_name) google.path_utils.MaybeMakeDirectory(output_dir) httpd_conf = open(orig_httpd_conf_path).read() httpd_conf = (httpd_conf + self._GetVirtualHostConfig(main_document_root, 8000) + self._GetVirtualHostConfig(main_document_root, 8080) + self._GetVirtualHostConfig(pending_document_root, 9000) + self._GetVirtualHostConfig(pending_document_root, 9080) + self._GetVirtualHostConfig(chrome_document_root, 8081)) if apache2: httpd_conf += self._GetVirtualHostConfig(main_document_root, 8443, ssl=True) httpd_conf += self._GetVirtualHostConfig(pending_document_root, 9443, ssl=True) f = open(httpd_conf_path, 'wb') f.write(httpd_conf) f.close() return google.platform_utils_win.PlatformUtility.GetStartHttpdCommand( self, output_dir, httpd_conf_path, mime_types_path, apache2=apache2)