def test_parser_initialization_non_existent_file(self): parser = TestParser('some/bogus/path.html', MockHost()) self.assertEqual(parser.filename, 'some/bogus/path.html') self.assertIsNone(parser.test_doc) self.assertIsNone(parser.ref_doc)
def find_importable_tests(self, directory): def should_keep_subdir(filesystem, path): if self._importing_downloaded_tests: return True subdir = path[len(directory):] DIRS_TO_SKIP = ('work-in-progress', 'tools', 'support') should_skip = filesystem.basename(subdir).startswith('.') or (subdir in DIRS_TO_SKIP) return not should_skip directories = self.filesystem.dirs_under(directory, should_keep_subdir) for root in directories: _log.info('Scanning ' + root + '...') total_tests = 0 reftests = 0 jstests = 0 copy_list = [] for filename in self.filesystem.listdir(root): if self.filesystem.isdir(self.filesystem.join(root, filename)): continue # FIXME: This block should really be a separate function, but the early-continues make that difficult. if self.should_skip_file(filename): continue fullpath = self.filesystem.join(root, filename) mimetype = mimetypes.guess_type(fullpath) if not 'html' in str(mimetype[0]) and not 'application/xhtml+xml' in str(mimetype[0]) and not 'application/xml' in str(mimetype[0]): copy_list.append({'src': fullpath, 'dest': filename}) continue test_parser = TestParser(vars(self.options), filename=fullpath, host=self.host) test_info = test_parser.analyze_test() if test_info is None: # This is probably a resource file, but we should generate WPT manifest instead and get the list of resource files from it. if not self._is_in_resources_directory(fullpath): self._potential_test_resource_files.append(fullpath) copy_list.append({'src': fullpath, 'dest': filename}) continue elif self._is_in_resources_directory(fullpath): _log.warning('%s is a test located in a "resources" folder. This test will be skipped by WebKit test runners.', fullpath) if 'manualtest' in test_info.keys(): continue if 'slow' in test_info: self._slow_tests.append(fullpath) if 'referencefile' in test_info.keys(): # Skip it since, the corresponding reference test should have a link to this file continue if 'reference' in test_info.keys(): reftests += 1 total_tests += 1 test_basename = self.filesystem.basename(test_info['test']) # Add the ref file, following WebKit style. # FIXME: Ideally we'd support reading the metadata # directly rather than relying on a naming convention. # Using a naming convention creates duplicate copies of the # reference files. ref_file = self.filesystem.splitext(test_basename)[0] + '-expected' ref_file += self.filesystem.splitext(test_info['reference'])[1] copy_list.append({'src': test_info['reference'], 'dest': ref_file, 'reference_support_info': test_info['reference_support_info']}) copy_list.append({'src': test_info['test'], 'dest': filename}) elif 'jstest' in test_info.keys(): jstests += 1 total_tests += 1 copy_list.append({'src': fullpath, 'dest': filename}) else: total_tests += 1 copy_list.append({'src': fullpath, 'dest': filename}) if copy_list: # Only add this directory to the list if there's something to import self.import_list.append({'dirname': root, 'copy_list': copy_list, 'reftests': reftests, 'jstests': jstests, 'total_tests': total_tests})
def find_importable_tests(self): """Walks through the source directory to find what tests should be imported. This function sets self.import_list, which contains information about how many tests are being imported, and their source and destination paths. """ paths_to_skip = self.find_paths_to_skip() for root, dirs, files in self.filesystem.walk(self.source_repo_path): cur_dir = root.replace(self.dir_above_repo + '/', '') + '/' _log.debug('Scanning %s...', cur_dir) total_tests = 0 reftests = 0 jstests = 0 # Files in 'tools' are not for browser testing, so we skip them. # See: http://web-platform-tests.org/writing-tests/general-guidelines.html#tools dirs_to_skip = ('.git', 'test-plan', 'tools') # We copy all files in 'support', including HTML without metadata. # See: http://web-platform-tests.org/writing-tests/general-guidelines.html#support-files dirs_to_include = ('resources', 'support') if dirs: for name in dirs_to_skip: if name in dirs: dirs.remove(name) for path in paths_to_skip: path_base = path.replace(self.dest_dir_name + '/', '') path_base = path_base.replace(cur_dir, '') path_full = self.filesystem.join(root, path_base) if path_base in dirs: _log.info('Skipping: %s', path_full) dirs.remove(path_base) if self.import_in_place: self.filesystem.rmtree(path_full) copy_list = [] for filename in files: path_full = self.filesystem.join(root, filename) path_base = path_full.replace(self.source_repo_path + '/', '') path_base = self.destination_directory.replace(self.layout_tests_dir + '/', '') + '/' + path_base if path_base in paths_to_skip: if self.import_in_place: _log.debug('Pruning: %s', path_base) self.filesystem.remove(path_full) continue else: continue # FIXME: This block should really be a separate function, but the early-continues make that difficult. if filename.startswith('.') or filename.endswith('.pl'): _log.debug('Skipping: %s', path_full) _log.debug(' Reason: Hidden files and perl scripts are not necessary.') continue if filename == 'OWNERS' or filename == 'reftest.list': # See http://crbug.com/584660 and http://crbug.com/582838. _log.debug('Skipping: %s', path_full) _log.debug(' Reason: This file may cause Chromium presubmit to fail.') continue mimetype = mimetypes.guess_type(path_full) if ('html' not in str(mimetype[0]) and 'application/xhtml+xml' not in str(mimetype[0]) and 'application/xml' not in str(mimetype[0])): copy_list.append({'src': path_full, 'dest': filename}) continue if self.filesystem.basename(root) in dirs_to_include: copy_list.append({'src': path_full, 'dest': filename}) continue test_parser = TestParser(path_full, self.host) test_info = test_parser.analyze_test() if test_info is None: copy_list.append({'src': path_full, 'dest': filename}) continue if 'reference' in test_info.keys(): ref_path_full = test_info['reference'] if not self.filesystem.exists(ref_path_full): _log.warning('Skipping: %s', path_full) _log.warning(' Reason: Ref file "%s" was not found.', ref_path_full) continue if not self.is_wpt: # For csswg-test, we still need to add a ref file # using WebKit naming conventions. See crbug.com/268729. # FIXME: Remove this when csswg-test is merged into wpt. test_basename = self.filesystem.basename(test_info['test']) ref_file = self.filesystem.splitext(test_basename)[0] + '-expected' ref_file += self.filesystem.splitext(ref_path_full)[1] copy_list.append({ 'src': test_info['reference'], 'dest': ref_file, 'reference_support_info': test_info['reference_support_info'], }) reftests += 1 total_tests += 1 copy_list.append({'src': test_info['test'], 'dest': filename}) elif 'jstest' in test_info.keys(): jstests += 1 total_tests += 1 copy_list.append({'src': path_full, 'dest': filename, 'is_jstest': True}) if copy_list: # Only add this directory to the list if there's something to import self.import_list.append({'dirname': root, 'copy_list': copy_list, 'reftests': reftests, 'jstests': jstests, 'total_tests': total_tests})
def test_analyze_non_test_file_returns_none(self): """Tests analyze_test() using a non-test file.""" parser = TestParser('/some/madeup/path/somefile.html', MockHost()) test_info = parser.analyze_test(test_contents='<html>') self.assertIsNone(test_info, 'test should have been skipped')
def find_importable_tests(self): """Walks through the source directory to find what tests should be imported. This function sets self.import_list, which contains information about how many tests are being imported, and their source and destination paths. """ paths_to_skip = self.find_paths_to_skip() for root, dirs, files in self.filesystem.walk(self.source_repo_path): cur_dir = root.replace(self.dir_above_repo + '/', '') + '/' _log.info(' scanning ' + cur_dir + '...') total_tests = 0 reftests = 0 jstests = 0 # Files in 'tools' are not for browser testing, so we skip them. # See: http://testthewebforward.org/docs/test-format-guidelines.html#tools DIRS_TO_SKIP = ('.git', 'test-plan', 'tools') # We copy all files in 'support', including HTML without metadata. # See: http://testthewebforward.org/docs/test-format-guidelines.html#support-files DIRS_TO_INCLUDE = ('resources', 'support') if dirs: for d in DIRS_TO_SKIP: if d in dirs: dirs.remove(d) for path in paths_to_skip: path_base = path.replace(self.options.destination + '/', '') path_base = path_base.replace(cur_dir, '') path_full = self.filesystem.join(root, path_base) if path_base in dirs: dirs.remove(path_base) if not self.options.dry_run and self.import_in_place: _log.info(" pruning %s", path_base) self.filesystem.rmtree(path_full) else: _log.info(" skipping %s", path_base) copy_list = [] for filename in files: path_full = self.filesystem.join(root, filename) path_base = path_full.replace(self.source_repo_path + '/', '') path_base = self.destination_directory.replace( self.layout_tests_dir + '/', '') + '/' + path_base if path_base in paths_to_skip: if not self.options.dry_run and self.import_in_place: _log.info(" pruning %s", path_base) self.filesystem.remove(path_full) continue else: continue # FIXME: This block should really be a separate function, but the early-continues make that difficult. if filename.startswith('.') or filename.endswith('.pl'): # The w3cs repos may contain perl scripts, which we don't care about. continue if filename == 'OWNERS' or filename == 'reftest.list': # These files fail our presubmits. # See http://crbug.com/584660 and http://crbug.com/582838. continue fullpath = self.filesystem.join(root, filename) mimetype = mimetypes.guess_type(fullpath) if ('html' not in str(mimetype[0]) and 'application/xhtml+xml' not in str(mimetype[0]) and 'application/xml' not in str(mimetype[0])): copy_list.append({'src': fullpath, 'dest': filename}) continue if self.filesystem.basename(root) in DIRS_TO_INCLUDE: copy_list.append({'src': fullpath, 'dest': filename}) continue test_parser = TestParser(fullpath, self.host, vars(self.options)) test_info = test_parser.analyze_test() if test_info is None: continue if self.path_too_long(path_full): _log.warning( '%s skipped due to long path. ' 'Max length from repo base %d chars; see http://crbug.com/609871.', path_full, MAX_PATH_LENGTH) continue if 'reference' in test_info.keys(): test_basename = self.filesystem.basename(test_info['test']) # Add the ref file, following WebKit style. # FIXME: Ideally we'd support reading the metadata # directly rather than relying on a naming convention. # Using a naming convention creates duplicate copies of the # reference files. ref_file = self.filesystem.splitext( test_basename)[0] + '-expected' # Make sure to use the extension from the *reference*, not # from the test, because at least flexbox tests use XHTML # references but HTML tests. ref_file += self.filesystem.splitext( test_info['reference'])[1] if self.path_too_long(path_full.replace( filename, ref_file)): _log.warning( '%s skipped because path of ref file %s would be too long. ' 'Max length from repo base %d chars; see http://crbug.com/609871.', path_full, ref_file, MAX_PATH_LENGTH) continue reftests += 1 total_tests += 1 copy_list.append({ 'src': test_info['reference'], 'dest': ref_file, 'reference_support_info': test_info['reference_support_info'] }) copy_list.append({ 'src': test_info['test'], 'dest': filename }) elif 'jstest' in test_info.keys(): jstests += 1 total_tests += 1 copy_list.append({ 'src': fullpath, 'dest': filename, 'is_jstest': True }) else: total_tests += 1 copy_list.append({'src': fullpath, 'dest': filename}) if copy_list: # Only add this directory to the list if there's something to import self.import_list.append({ 'dirname': root, 'copy_list': copy_list, 'reftests': reftests, 'jstests': jstests, 'total_tests': total_tests })
def find_importable_tests(self, directory): # FIXME: use filesystem paths_to_skip = self.find_paths_to_skip() for root, dirs, files in os.walk(directory): cur_dir = root.replace(self.dir_above_repo + '/', '') + '/' _log.info(' scanning ' + cur_dir + '...') total_tests = 0 reftests = 0 jstests = 0 DIRS_TO_SKIP = ('.git', '.hg') if dirs: for d in DIRS_TO_SKIP: if d in dirs: dirs.remove(d) for path in paths_to_skip: path_base = path.replace(self.options.destination + '/', '') path_base = path_base.replace(cur_dir, '') path_full = self.filesystem.join(root, path_base) if path_base in dirs: dirs.remove(path_base) if not self.options.dry_run and self.import_in_place: _log.info(" pruning %s" % path_base) self.filesystem.rmtree(path_full) else: _log.info(" skipping %s" % path_base) copy_list = [] for filename in files: path_full = self.filesystem.join(root, filename) path_base = path_full.replace(self.layout_tests_dir + '/', '') if path_base in paths_to_skip: if not self.options.dry_run and self.import_in_place: _log.info(" pruning %s" % path_base) self.filesystem.remove(path_full) continue else: continue # FIXME: This block should really be a separate function, but the early-continues make that difficult. if filename.startswith('.') or filename.endswith('.pl'): continue # For some reason the w3c repo contains random perl scripts we don't care about. fullpath = os.path.join(root, filename) mimetype = mimetypes.guess_type(fullpath) if not 'html' in str(mimetype[0]) and not 'application/xhtml+xml' in str(mimetype[0]) and not 'application/xml' in str(mimetype[0]): copy_list.append({'src': fullpath, 'dest': filename}) continue if root.endswith('resources'): copy_list.append({'src': fullpath, 'dest': filename}) continue test_parser = TestParser(vars(self.options), filename=fullpath) test_info = test_parser.analyze_test() if test_info is None: continue if 'reference' in test_info.keys(): reftests += 1 total_tests += 1 test_basename = os.path.basename(test_info['test']) # Add the ref file, following WebKit style. # FIXME: Ideally we'd support reading the metadata # directly rather than relying on a naming convention. # Using a naming convention creates duplicate copies of the # reference files. ref_file = os.path.splitext(test_basename)[0] + '-expected' ref_file += os.path.splitext(test_basename)[1] copy_list.append({'src': test_info['reference'], 'dest': ref_file, 'reference_support_info': test_info['reference_support_info']}) copy_list.append({'src': test_info['test'], 'dest': filename}) elif 'jstest' in test_info.keys(): jstests += 1 total_tests += 1 copy_list.append({'src': fullpath, 'dest': filename}) else: total_tests += 1 copy_list.append({'src': fullpath, 'dest': filename}) if copy_list: # Only add this directory to the list if there's something to import self.import_list.append({'dirname': root, 'copy_list': copy_list, 'reftests': reftests, 'jstests': jstests, 'total_tests': total_tests})
def find_importable_tests(self, directory): # FIXME: use filesystem for root, dirs, files in os.walk(directory): _log.info('Scanning ' + root + '...') total_tests = 0 reftests = 0 jstests = 0 # "archive" and "data" dirs are internal csswg things that live in every approved directory. # FIXME: skip 'incoming' tests for now, but we should rework the 'test_status' concept and # support reading them as well. DIRS_TO_SKIP = ('.git', '.hg', 'data', 'archive', 'incoming') for d in DIRS_TO_SKIP: if d in dirs: dirs.remove(d) copy_list = [] for filename in files: # FIXME: This block should really be a separate function, but the early-continues make that difficult. if filename.startswith('.') or filename.endswith('.pl'): continue # For some reason the w3c repo contains random perl scripts we don't care about. fullpath = os.path.join(root, filename) mimetype = mimetypes.guess_type(fullpath) if not 'html' in str(mimetype[0]) and not 'xml' in str( mimetype[0]): copy_list.append({'src': fullpath, 'dest': filename}) continue test_parser = TestParser(vars(self.options), filename=fullpath) test_info = test_parser.analyze_test() if test_info is None: continue if 'reference' in test_info.keys(): reftests += 1 total_tests += 1 test_basename = os.path.basename(test_info['test']) # Add the ref file, following WebKit style. # FIXME: Ideally we'd support reading the metadata # directly rather than relying on a naming convention. # Using a naming convention creates duplicate copies of the # reference files. ref_file = os.path.splitext(test_basename)[0] + '-expected' ref_file += os.path.splitext(test_basename)[1] copy_list.append({ 'src': test_info['reference'], 'dest': ref_file }) copy_list.append({ 'src': test_info['test'], 'dest': filename }) # Update any support files that need to move as well to remain relative to the -expected file. if 'refsupport' in test_info.keys(): for support_file in test_info['refsupport']: source_file = os.path.join( os.path.dirname(test_info['reference']), support_file) source_file = os.path.normpath(source_file) # Keep the dest as it was to_copy = { 'src': source_file, 'dest': support_file } # Only add it once if not (to_copy in copy_list): copy_list.append(to_copy) elif 'jstest' in test_info.keys(): jstests += 1 total_tests += 1 copy_list.append({'src': fullpath, 'dest': filename}) else: total_tests += 1 copy_list.append({'src': fullpath, 'dest': filename}) if not total_tests: # We can skip the support directory if no tests were found. if 'support' in dirs: dirs.remove('support') if copy_list: # Only add this directory to the list if there's something to import self.import_list.append({ 'dirname': root, 'copy_list': copy_list, 'reftests': reftests, 'jstests': jstests, 'total_tests': total_tests })
def test_load_file_with_non_ascii_tags(self): host = MockHost() host.filesystem.files['/some/path.xml'] = '<d\xc3\x98dd></d\xc3\x98dd>' parser = TestParser('/some/path.xml', host) self.assertEqual(parser.filename, '/some/path.xml') self.assertIsNone(parser.test_doc)
def test_analyze_non_html_file(self): """Tests analyze_test() with a file that has no html""" # FIXME: use a mock filesystem parser = TestParser(os.path.join(os.path.dirname(__file__), 'test_parser.py'), MockHost()) test_info = parser.analyze_test() self.assertEqual(test_info, None, 'no tests should have been found in this file')