def setUp(self): self.executive = Executive() self.filesystem = FileSystem() self.original_cwd = self.filesystem.getcwd() # Set up fresh git repository with one commit. self.untracking_checkout_path = self._mkdtemp( suffix='-git_unittest_untracking') self._run(['git', 'init', self.untracking_checkout_path]) self._chdir(self.untracking_checkout_path) self._set_user_config() self._write_text_file('foo_file', 'foo') self._run(['git', 'add', 'foo_file']) self._run(['git', 'commit', '-am', 'dummy commit']) self.untracking_git = Git(cwd=self.untracking_checkout_path, filesystem=self.filesystem, executive=self.executive) # Then set up a second git repo that tracks the first one. self.tracking_git_checkout_path = self._mkdtemp( suffix='-git_unittest_tracking') self._run([ 'git', 'clone', '--quiet', self.untracking_checkout_path, self.tracking_git_checkout_path ]) self._chdir(self.tracking_git_checkout_path) self._set_user_config() self.tracking_git = Git(cwd=self.tracking_git_checkout_path, filesystem=self.filesystem, executive=self.executive)
def test_run_command_args_type(self): executive = Executive() with self.assertRaises(AssertionError): executive.run_command('echo') with self.assertRaises(AssertionError): executive.run_command(u'echo') executive.run_command(command_line('echo', 'foo')) executive.run_command(tuple(command_line('echo', 'foo')))
def test_print_command_unicode(self): executive = Executive() # The expected result is different on Windows because the unicode arg # first gets encoded using 'mbcs'. This encoding makes it unnecessary to # escape any unicode characters in the arg. # Elsewhere, the 'mbcs' encoding is skipped, but then we must escape any # non-ascii unicode characters by encoding with 'unicode_escape'. This # results in an extra \ on non-Win platforms. if sys.platform == 'win32': expected_result = u'echo 1 a\xac' else: expected_result = u'echo 1 a\\xac' self.assertEqual(expected_result, executive.command_for_printing(['echo', 1, u'a\xac']))
def test_timeout_exceeded(self): executive = Executive() def timeout(): executive.run_command(command_line('sleep', 'infinity'), timeout_seconds=0.01) with self.assertRaises(ScriptError): timeout()
def read_git_config(cls, key, cwd=None, executive=None): # FIXME: This should probably use cwd=self.checkout_root. # Pass --get-all for cases where the config has multiple values # Pass the cwd if provided so that we can handle the case of running # blink_tool.py outside of the working directory. # FIXME: This should use an Executive. executive = executive or Executive() return executive.run_command( [cls.executable_name, 'config', '--get-all', key], error_handler=Executive.ignore_error, cwd=cwd).rstrip('\n')
def __init__(self): self.executable = sys.executable self.executive = Executive() self.filesystem = FileSystem() self.user = User() self.platform = PlatformInfo(sys, platform, self.filesystem, self.executive) self.stdin = sys.stdin self.stdout = sys.stdout self.stderr = sys.stderr self.environ = os.environ
def run_pylint(self, path): finder = PathFinder(FileSystem()) executive = Executive() env = os.environ.copy() env['PYTHONPATH'] = os.pathsep.join([ get_blink_tools_dir(), finder.path_from_blink_source('build', 'scripts'), get_blinkpy_thirdparty_dir(), finder.path_from_blink_source('bindings', 'scripts'), finder.path_from_chromium_base('build', 'android'), finder.path_from_chromium_base('third_party', 'catapult', 'devil'), finder.path_from_chromium_base('third_party', 'pymock'), ]) return executive.run_command([ sys.executable, finder.path_from_depot_tools_base('pylint.py'), '--output-format=parseable', '--rcfile=' + finder.path_from_blink_tools('blinkpy', 'pylintrc'), path, ], env=env, error_handler=executive.ignore_error)
def setUp(self): self.executive = Executive() self.filesystem = FileSystem() self.original_cwd = self.filesystem.getcwd() # Set up fresh git repository with one commit. self.untracking_checkout_path = self._mkdtemp( suffix='-git_unittest_untracking') self._run(['git', 'init', self.untracking_checkout_path]) self._chdir(self.untracking_checkout_path) # Explicitly create the default branch instead of relying on # init.defaultBranch. We don't use the new --initial-branch flag with # `git init` to keep the tests compatible with older versions of git. self._run(['git', 'checkout', '-b', 'master']) self._set_user_config() self._write_text_file('foo_file', 'foo') self._run(['git', 'add', 'foo_file']) self._run(['git', 'commit', '-am', 'dummy commit']) self.untracking_git = Git( cwd=self.untracking_checkout_path, filesystem=self.filesystem, executive=self.executive) # Then set up a second git repo that tracks the first one. self.tracking_git_checkout_path = self._mkdtemp( suffix='-git_unittest_tracking') self._run([ 'git', 'clone', '--quiet', self.untracking_checkout_path, self.tracking_git_checkout_path ]) self._chdir(self.tracking_git_checkout_path) self._set_user_config() self.tracking_git = Git( cwd=self.tracking_git_checkout_path, filesystem=self.filesystem, executive=self.executive)
def test_kill_process(self): executive = Executive() process = subprocess.Popen(never_ending_command(), stdout=subprocess.PIPE) self.assertEqual(process.poll(), None) # Process is running executive.kill_process(process.pid) # Killing again should fail silently. executive.kill_process(process.pid)
def __init__(self, fs, options, repo_root): self._fs = fs self._platform = PlatformInfo(sys, platform, fs, Executive()) self._options = options _log.debug(options) self._repo_root = repo_root # The following fields are initialized in _create_basename_maps. self._basename_map = None self._basename_re_list = None self._idl_generated_impl_headers = None # _checked_in_header_re_list is used to distinguish checked-in # header files and generated header files. self._checked_in_header_re_list = None self._updated_files = []
def test_real_code(self): # This test makes sure the real (unmocked) code actually works. info = PlatformInfo(sys, platform, FileSystem(), Executive()) self.assertNotEquals(info.os_name, '') self.assertNotEquals(info.os_version, '') self.assertNotEquals(info.display_name(), '') self.assertTrue(info.is_mac() or info.is_win() or info.is_linux() or info.is_freebsd()) self.assertIsNotNone(info.terminal_width()) if info.is_linux(): self.assertIsNotNone(info.linux_distribution()) if info.is_mac(): self.assertTrue(info.total_bytes_memory() > 0) else: self.assertIsNone(info.total_bytes_memory())
def test_kill_process(self): executive = Executive() if sys.platform == 'win32': process = subprocess.Popen(never_ending_command(), stdout=subprocess.PIPE) else: process = subprocess.Popen(never_ending_command(), stdout=subprocess.PIPE, preexec_fn=lambda: os.setpgid(0, 0)) self.assertEqual(process.poll(), None) # Process is running executive.kill_process(process.pid) # Killing again should fail silently. executive.kill_process(process.pid)
def __init__(self, cwd=None, executive=None, filesystem=None, platform=None): self._executive = executive or Executive() self._filesystem = filesystem or FileSystem() self._executable_name = self.find_executable_name( self._executive, platform) self.cwd = cwd or self._filesystem.abspath(self._filesystem.getcwd()) if not self.in_working_directory(self.cwd): module_directory = self._filesystem.abspath( self._filesystem.dirname( self._filesystem.path_to_module(self.__module__))) _log.info( 'The current directory (%s) is not in a git repo, trying directory %s.', cwd, module_directory) if self.in_working_directory(module_directory): self.cwd = module_directory _log.error('Failed to find Git repo for %s or %s', cwd, module_directory) self.checkout_root = self.find_checkout_root(self.cwd)
def test_run_command_with_unicode(self): """Validate that it is safe to pass unicode() objects to Executive.run* methods, and they will return unicode() objects by default unless decode_output=False """ unicode_tor_input = u"WebKit \u2661 Tor Arne Vestb\u00F8!" if sys.platform == 'win32': encoding = 'mbcs' else: encoding = 'utf-8' encoded_tor = unicode_tor_input.encode(encoding) # On Windows, we expect the unicode->mbcs->unicode roundtrip to be # lossy. On other platforms, we expect a lossless roundtrip. if sys.platform == 'win32': unicode_tor_output = encoded_tor.decode(encoding) else: unicode_tor_output = unicode_tor_input executive = Executive() output = executive.run_command(command_line('cat'), input=unicode_tor_input) self.assertEqual(output, unicode_tor_output) output = executive.run_command(command_line('echo', unicode_tor_input)) self.assertEqual(output, unicode_tor_output) output = executive.run_command(command_line('echo', unicode_tor_input), decode_output=False) self.assertEqual(output, encoded_tor) # Make sure that str() input also works. output = executive.run_command(command_line('cat'), input=encoded_tor, decode_output=False) self.assertEqual(output, encoded_tor)
def test_run_in_parallel_assert_nonempty(self): with self.assertRaises(AssertionError): Executive().run_in_parallel([])
def test_running_pids(self): executive = Executive() pids = executive.running_pids() self.assertIn(os.getpid(), pids)
def test_check_running_pid(self): executive = Executive() self.assertTrue(executive.check_running_pid(os.getpid())) # According to the proc(5) man page, on 64-bit linux systems, # pid_max can be set to any value up to 2^22 (approximately 4 million). self.assertFalse(executive.check_running_pid(5000000))
def test_timeout_satisfied(self): executive = Executive() executive.run_command(command_line('sleep', '0'), timeout_seconds=1000)
def test_timeout_exceeded_exit_code(self): executive = Executive() exit_code = executive.run_command(command_line('sleep', 'infinity'), timeout_seconds=0.01, return_exit_code=True) self.assertNotEqual(exit_code, 0)
def test_popen_args(self): executive = Executive() # Explicitly naming the 'args' argument should not throw an exception. executive.popen(args=command_line('echo', 1), stdout=executive.PIPE).wait()
def move(self, apply_only=None): """Move Blink source files. Args: apply_only: If it's None, move all affected files. Otherwise, it should be a set of file paths and this function moves only the files in |apply_only|. """ _log.info('Planning renaming ...') file_pairs = plan_blink_move(self._fs, []) if apply_only: file_pairs = [ (src, dest) for (src, dest) in file_pairs if 'third_party/WebKit/' + src.replace('\\', '/') in apply_only ] _log.info('Will move %d files', len(file_pairs)) git = self._create_git() files_set = self._get_checked_in_files(git) for i, (src, dest) in enumerate(file_pairs): src_from_repo = self._fs.join('third_party', 'WebKit', src) if src_from_repo.replace('\\', '/') not in files_set: _log.info('%s is not in the repository', src) continue dest_from_repo = self._fs.join('third_party', 'blink', dest) self._fs.maybe_make_directory(self._repo_root, 'third_party', 'blink', self._fs.dirname(dest)) if self._options.run_git: git.move(src_from_repo, dest_from_repo) _log.info('[%d/%d] Git moved %s', i + 1, len(file_pairs), src) else: self._fs.move( self._fs.join(self._repo_root, src_from_repo), self._fs.join(self._repo_root, dest_from_repo)) _log.info('[%d/%d] Moved %s', i + 1, len(file_pairs), src) if apply_only: return self._update_single_file_content('build/get_landmines.py', [( '\ndef main', ' print \'The Great Blink mv for source files (crbug.com/768828)\'\n\ndef main' )]) _log.info('Run run_bindings_tests.py ...') Executive().run_command([ 'python', self._fs.join(get_blink_tools_dir(), 'run_bindings_tests.py'), '--reset-results' ], cwd=self._repo_root) if self._options.run_git: _log.info('Make a local commit ...') git.commit_locally_with_message( """The Great Blink mv for source files, part 2. Move and rename files. NOAUTOREVERT=true NOPRESUBMIT=true NOTREECHECKS=true Bug: 768828 """)
def test_auto_stringify_args(self): executive = Executive() executive.run_command(command_line('echo', 1)) executive.popen(command_line('echo', 1), stdout=executive.PIPE).wait() self.assertEqual('echo 1', executive.command_for_printing(['echo', 1]))
def __init__(self, tests, is_debug): self.executive = Executive() self.tests = tests self.expected_failure = tests[-1] self.is_debug = is_debug self.path_finder = PathFinder(FileSystem())
def __init__(self, platform_info=None): # We cannot get the PlatformInfo object from a SystemHost because # User is part of SystemHost itself. self._platform_info = platform_info or PlatformInfo( sys, platform, FileSystem(), Executive())
def run_bad_command(): Executive().run_command(['foo_bar_command_blah'], error_handler=Executive.ignore_error, return_exit_code=True)
def bindings_tests(output_directory, verbose, suppress_diff): executive = Executive() def list_files(directory): files = [] for component in os.listdir(directory): if component not in COMPONENT_DIRECTORY: continue directory_with_component = os.path.join(directory, component) for filename in os.listdir(directory_with_component): files.append(os.path.join(directory_with_component, filename)) return files def diff(filename1, filename2): # Python's difflib module is too slow, especially on long output, so # run external diff(1) command cmd = [ 'diff', '-u', # unified format '-N', # treat absent files as empty filename1, filename2 ] # Return output and don't raise exception, even though diff(1) has # non-zero exit if files differ. return executive.run_command(cmd, error_handler=lambda x: None) def is_cache_file(filename): return filename.endswith('.cache') def delete_cache_files(): # FIXME: Instead of deleting cache files, don't generate them. cache_files = [ path for path in list_files(output_directory) if is_cache_file(os.path.basename(path)) ] for cache_file in cache_files: os.remove(cache_file) def identical_file(reference_filename, output_filename): reference_basename = os.path.basename(reference_filename) if not os.path.isfile(reference_filename): print 'Missing reference file!' print '(if adding new test, update reference files)' print reference_basename print return False if not filecmp.cmp(reference_filename, output_filename): # cmp is much faster than diff, and usual case is "no difference", # so only run diff if cmp detects a difference print 'FAIL: %s' % reference_basename if not suppress_diff: print diff(reference_filename, output_filename) return False if verbose: print 'PASS: %s' % reference_basename return True def identical_output_files(output_files): reference_files = [ os.path.join(REFERENCE_DIRECTORY, os.path.relpath(path, output_directory)) for path in output_files ] return all([ identical_file(reference_filename, output_filename) for (reference_filename, output_filename) in zip(reference_files, output_files) ]) def no_excess_files(output_files): generated_files = set( [os.path.relpath(path, output_directory) for path in output_files]) excess_files = [] for path in list_files(REFERENCE_DIRECTORY): relpath = os.path.relpath(path, REFERENCE_DIRECTORY) # Ignore backup files made by a VCS. if os.path.splitext(relpath)[1] == '.orig': continue if relpath not in generated_files: excess_files.append(relpath) if excess_files: print( 'Excess reference files! ' '(probably cruft from renaming or deleting):\n' + '\n'.join(excess_files)) return False return True try: generate_interface_dependencies() for component in COMPONENT_DIRECTORY: output_dir = os.path.join(output_directory, component) if not os.path.exists(output_dir): os.makedirs(output_dir) options = IdlCompilerOptions(output_directory=output_dir, impl_output_directory=output_dir, cache_directory=None, target_component=component) if component == 'core': partial_interface_output_dir = os.path.join( output_directory, 'modules') if not os.path.exists(partial_interface_output_dir): os.makedirs(partial_interface_output_dir) partial_interface_options = IdlCompilerOptions( output_directory=partial_interface_output_dir, impl_output_directory=None, cache_directory=None, target_component='modules') idl_filenames = [] dictionary_impl_filenames = [] partial_interface_filenames = [] input_directory = os.path.join(TEST_INPUT_DIRECTORY, component) for filename in os.listdir(input_directory): if (filename.endswith('.idl') and # Dependencies aren't built # (they are used by the dependent) filename not in DEPENDENCY_IDL_FILES): idl_path = os.path.realpath( os.path.join(input_directory, filename)) idl_filenames.append(idl_path) idl_basename = os.path.basename(idl_path) name_from_basename, _ = os.path.splitext(idl_basename) definition_name = get_first_interface_name_from_idl( get_file_contents(idl_path)) is_partial_interface_idl = to_snake_case( definition_name) != name_from_basename if not is_partial_interface_idl: interface_info = interfaces_info[definition_name] if interface_info['is_dictionary']: dictionary_impl_filenames.append(idl_path) if component == 'core' and interface_info[ 'dependencies_other_component_full_paths']: partial_interface_filenames.append(idl_path) info_provider = component_info_providers[component] partial_interface_info_provider = component_info_providers[ 'modules'] generate_union_type_containers(CodeGeneratorUnionType, info_provider, options) generate_callback_function_impl(CodeGeneratorCallbackFunction, info_provider, options) generate_bindings(CodeGeneratorV8, info_provider, options, idl_filenames) generate_bindings(CodeGeneratorWebAgentAPI, info_provider, options, idl_filenames) generate_bindings(CodeGeneratorV8, partial_interface_info_provider, partial_interface_options, partial_interface_filenames) generate_dictionary_impl(CodeGeneratorDictionaryImpl, info_provider, options, dictionary_impl_filenames) generate_origin_trial_features(info_provider, options, [ filename for filename in idl_filenames if filename not in dictionary_impl_filenames ]) finally: delete_cache_files() # Detect all changes output_files = list_files(output_directory) passed = identical_output_files(output_files) passed &= no_excess_files(output_files) if passed: if verbose: print print PASS_MESSAGE return 0 print print FAIL_MESSAGE return 1
class GitTestWithRealFilesystemAndExecutive(unittest.TestCase): def setUp(self): self.executive = Executive() self.filesystem = FileSystem() self.original_cwd = self.filesystem.getcwd() # Set up fresh git repository with one commit. self.untracking_checkout_path = self._mkdtemp( suffix='-git_unittest_untracking') self._run(['git', 'init', self.untracking_checkout_path]) self._chdir(self.untracking_checkout_path) # Explicitly create the default branch instead of relying on # init.defaultBranch. We don't use the new --initial-branch flag with # `git init` to keep the tests compatible with older versions of git. self._run(['git', 'checkout', '-b', 'master']) self._set_user_config() self._write_text_file('foo_file', 'foo') self._run(['git', 'add', 'foo_file']) self._run(['git', 'commit', '-am', 'dummy commit']) self.untracking_git = Git( cwd=self.untracking_checkout_path, filesystem=self.filesystem, executive=self.executive) # Then set up a second git repo that tracks the first one. self.tracking_git_checkout_path = self._mkdtemp( suffix='-git_unittest_tracking') self._run([ 'git', 'clone', '--quiet', self.untracking_checkout_path, self.tracking_git_checkout_path ]) self._chdir(self.tracking_git_checkout_path) self._set_user_config() self.tracking_git = Git( cwd=self.tracking_git_checkout_path, filesystem=self.filesystem, executive=self.executive) def tearDown(self): self._chdir(self.original_cwd) self._run(['rm', '-rf', self.tracking_git_checkout_path]) self._run(['rm', '-rf', self.untracking_checkout_path]) def _set_user_config(self): self._run(['git', 'config', '--local', 'user.name', 'Fake']) self._run( ['git', 'config', '--local', 'user.email', '*****@*****.**']) def _chdir(self, path): self.filesystem.chdir(path) def _mkdir(self, path): assert not self.filesystem.exists(path) self.filesystem.maybe_make_directory(path) def _mkdtemp(self, **kwargs): return str(self.filesystem.mkdtemp(**kwargs)) def _run(self, *args, **kwargs): return self.executive.run_command(*args, **kwargs) def _write_text_file(self, path, contents): self.filesystem.write_text_file(path, contents) def test_add_list(self): self._chdir(self.untracking_checkout_path) git = self.untracking_git self._mkdir('added_dir') self._write_text_file('added_dir/added_file', 'new stuff') self._run(['ls', 'added_dir']) self._run(['pwd']) self._run(['cat', 'added_dir/added_file']) git.add_list(['added_dir/added_file']) self.assertIn('added_dir/added_file', git.added_files()) def test_delete_recursively(self): self._chdir(self.untracking_checkout_path) git = self.untracking_git self._mkdir('added_dir') self._write_text_file('added_dir/added_file', 'new stuff') git.add_list(['added_dir/added_file']) self.assertIn('added_dir/added_file', git.added_files()) git.delete_list(['added_dir/added_file']) self.assertNotIn('added_dir', git.added_files()) def test_delete_recursively_or_not(self): self._chdir(self.untracking_checkout_path) git = self.untracking_git self._mkdir('added_dir') self._write_text_file('added_dir/added_file', 'new stuff') self._write_text_file('added_dir/another_added_file', 'more new stuff') git.add_list(['added_dir/added_file', 'added_dir/another_added_file']) self.assertIn('added_dir/added_file', git.added_files()) self.assertIn('added_dir/another_added_file', git.added_files()) git.delete_list(['added_dir/added_file']) self.assertIn('added_dir/another_added_file', git.added_files()) def test_exists(self): self._chdir(self.untracking_checkout_path) git = self.untracking_git self._chdir(git.checkout_root) self.assertFalse(git.exists('foo.txt')) self._write_text_file('foo.txt', 'some stuff') self.assertFalse(git.exists('foo.txt')) git.add_list(['foo.txt']) git.commit_locally_with_message('adding foo') self.assertTrue(git.exists('foo.txt')) git.delete_list(['foo.txt']) git.commit_locally_with_message('deleting foo') self.assertFalse(git.exists('foo.txt')) def test_move(self): self._chdir(self.untracking_checkout_path) git = self.untracking_git self._write_text_file('added_file', 'new stuff') git.add_list(['added_file']) git.move('added_file', 'moved_file') self.assertIn('moved_file', git.added_files()) def test_move_recursive(self): self._chdir(self.untracking_checkout_path) git = self.untracking_git self._mkdir('added_dir') self._write_text_file('added_dir/added_file', 'new stuff') self._write_text_file('added_dir/another_added_file', 'more new stuff') git.add_list(['added_dir']) git.move('added_dir', 'moved_dir') self.assertIn('moved_dir/added_file', git.added_files()) self.assertIn('moved_dir/another_added_file', git.added_files()) def test_remote_branch_ref(self): # This tests a protected method. pylint: disable=protected-access self.assertEqual(self.tracking_git._remote_branch_ref(), 'refs/remotes/origin/master') self._chdir(self.untracking_checkout_path) self.assertRaises(ScriptError, self.untracking_git._remote_branch_ref) def test_create_patch(self): self._chdir(self.tracking_git_checkout_path) git = self.tracking_git self._write_text_file('test_file_commit1', 'contents') self._run(['git', 'add', 'test_file_commit1']) git.commit_locally_with_message('message') patch = git.create_patch() self.assertNotRegexpMatches(patch, r'Subversion Revision:') def test_patches_have_filenames_with_prefixes(self): self._chdir(self.tracking_git_checkout_path) git = self.tracking_git self._write_text_file('test_file_commit1', 'contents') self._run(['git', 'add', 'test_file_commit1']) git.commit_locally_with_message('message') # Even if diff.noprefix is enabled, create_patch() produces diffs with prefixes. self._run(['git', 'config', 'diff.noprefix', 'true']) patch = git.create_patch() self.assertRegexpMatches( patch, r'^diff --git a/test_file_commit1 b/test_file_commit1') def test_rename_files(self): self._chdir(self.tracking_git_checkout_path) git = self.tracking_git git.move('foo_file', 'bar_file') git.commit_locally_with_message('message') patch = git.create_patch(changed_files=git.changed_files()) self.assertTrue('rename from' in patch) def test_commit_position_from_git_log(self): # This tests a protected method. pylint: disable=protected-access git_log = """ commit 624c3081c0 Author: foobarbaz1 <*****@*****.**> Date: Mon Sep 28 19:10:30 2015 -0700 Test foo bar baz qux 123. BUG=000000 Review URL: https://codereview.chromium.org/999999999 Cr-Commit-Position: refs/heads/master@{#1234567} """ self._chdir(self.tracking_git_checkout_path) git = self.tracking_git self.assertEqual(git._commit_position_from_git_log(git_log), 1234567)
class Bisector(object): def __init__(self, tests, is_debug): self.executive = Executive() self.tests = tests self.expected_failure = tests[-1] self.is_debug = is_debug self.path_finder = PathFinder(FileSystem()) def bisect(self): if self.test_fails_in_isolation(): self.buckets = [Bucket([self.expected_failure])] print '%s fails when run in isolation.' % self.expected_failure self.print_result() return 0 if not self.test_fails(self.tests): _log.error('%s does not fail', self.expected_failure) return 1 # Split the list of test into buckets. Each bucket has at least one test required to cause # the expected failure at the end. Split buckets in half until there are only buckets left # with one item in them. self.buckets = [ Bucket(self.tests[:-1]), Bucket([self.expected_failure]) ] while not self.is_done(): self.print_progress() self.split_largest_bucket() self.print_result() self.verify_non_flaky() return 0 def test_fails_in_isolation(self): return self.test_bucket_list_fails([Bucket([self.expected_failure])]) def verify_non_flaky(self): print 'Verifying the failure is not flaky by running 10 times.' count_failures = 0 for _ in range(0, 10): if self.test_bucket_list_fails(self.buckets): count_failures += 1 print 'Failed %d/10 times' % count_failures def print_progress(self): count = 0 for bucket in self.buckets: count += len(bucket.tests) print '%d tests left, %d buckets' % (count, len(self.buckets)) def print_result(self): tests = [] for bucket in self.buckets: tests += bucket.tests extra_args = ' --debug' if self.is_debug else '' print 'run_web_tests.py%s --child-processes=1 --order=none %s' % ( extra_args, ' '.join(tests)) def is_done(self): for bucket in self.buckets: if bucket.size() > 1: return False return True def split_largest_bucket(self): index = 0 largest_index = 0 largest_size = 0 for bucket in self.buckets: if bucket.size() > largest_size: largest_index = index largest_size = bucket.size() index += 1 bucket_to_split = self.buckets[largest_index] halfway_point = int(largest_size / 2) first_half = Bucket(bucket_to_split.tests[:halfway_point]) second_half = Bucket(bucket_to_split.tests[halfway_point:]) buckets_before = self.buckets[:largest_index] buckets_after = self.buckets[largest_index + 1:] # Do the second half first because it tends to be faster because the http tests are front-loaded and slow. new_buckets = buckets_before + [second_half] + buckets_after if self.test_bucket_list_fails(new_buckets): self.buckets = new_buckets return new_buckets = buckets_before + [first_half] + buckets_after if self.test_bucket_list_fails(new_buckets): self.buckets = new_buckets return self.buckets = buckets_before + [first_half, second_half ] + buckets_after def test_bucket_list_fails(self, buckets): tests = [] for bucket in buckets: tests += bucket.tests return self.test_fails(tests) def test_fails(self, tests): extra_args = ['--debug'] if self.is_debug else [] path_to_run_webkit_tests = self.path_finder.path_from_tools_scripts( 'run_web_tests.py') output = self.executive.popen([ path_to_run_webkit_tests, '--child-processes', '1', '--order', 'none', '--no-retry', '--no-show-results', '--verbose' ] + extra_args + tests, stdout=subprocess.PIPE, stderr=subprocess.PIPE) failure_string = self.expected_failure + ' failed' if failure_string in output.stderr.read(): return True return False
def bindings_tests(output_directory, verbose, suppress_diff): executive = Executive() def list_files(directory): if not os.path.isdir(directory): return [] files = [] for component in os.listdir(directory): if component not in COMPONENT_DIRECTORY: continue directory_with_component = os.path.join(directory, component) for filename in os.listdir(directory_with_component): files.append(os.path.join(directory_with_component, filename)) return files def diff(filename1, filename2): with open(filename1) as file1: file1_lines = file1.readlines() with open(filename2) as file2: file2_lines = file2.readlines() # Use Python's difflib module so that diffing works across platforms return ''.join(difflib.context_diff(file1_lines, file2_lines)) def is_cache_file(filename): return filename.endswith('.cache') def delete_cache_files(): # FIXME: Instead of deleting cache files, don't generate them. cache_files = [ path for path in list_files(output_directory) if is_cache_file(os.path.basename(path)) ] for cache_file in cache_files: os.remove(cache_file) def identical_file(reference_filename, output_filename): reference_basename = os.path.basename(reference_filename) if not os.path.isfile(reference_filename): print 'Missing reference file!' print '(if adding new test, update reference files)' print reference_basename print return False if not filecmp.cmp(reference_filename, output_filename): # cmp is much faster than diff, and usual case is "no difference", # so only run diff if cmp detects a difference print 'FAIL: %s' % reference_basename if not suppress_diff: print diff(reference_filename, output_filename) return False if verbose: print 'PASS: %s' % reference_basename return True def identical_output_files(output_files): reference_files = [ os.path.join(REFERENCE_DIRECTORY, os.path.relpath(path, output_directory)) for path in output_files ] return all([ identical_file(reference_filename, output_filename) for (reference_filename, output_filename) in zip(reference_files, output_files) ]) def no_excess_files(output_files): generated_files = set( [os.path.relpath(path, output_directory) for path in output_files]) excess_files = [] for path in list_files(REFERENCE_DIRECTORY): relpath = os.path.relpath(path, REFERENCE_DIRECTORY) # Ignore backup files made by a VCS. if os.path.splitext(relpath)[1] == '.orig': continue if relpath not in generated_files: excess_files.append(relpath) if excess_files: print( 'Excess reference files! ' '(probably cruft from renaming or deleting):\n' + '\n'.join(excess_files)) return False return True def make_runtime_features_dict(): input_filename = os.path.join(TEST_INPUT_DIRECTORY, 'runtime_enabled_features.json5') json5_file = Json5File.load_from_files([input_filename]) features_map = {} for feature in json5_file.name_dictionaries: features_map[str(feature['name'])] = { 'in_origin_trial': feature['in_origin_trial'] } return features_map try: generate_interface_dependencies(make_runtime_features_dict()) for component in COMPONENT_DIRECTORY: output_dir = os.path.join(output_directory, component) if not os.path.exists(output_dir): os.makedirs(output_dir) options = IdlCompilerOptions(output_directory=output_dir, impl_output_directory=output_dir, cache_directory=None, target_component=component) if component == 'core': partial_interface_output_dir = os.path.join( output_directory, 'modules') if not os.path.exists(partial_interface_output_dir): os.makedirs(partial_interface_output_dir) partial_interface_options = IdlCompilerOptions( output_directory=partial_interface_output_dir, impl_output_directory=None, cache_directory=None, target_component='modules') idl_filenames = [] dictionary_impl_filenames = [] partial_interface_filenames = [] input_directory = os.path.join(TEST_INPUT_DIRECTORY, component) for filename in os.listdir(input_directory): if (filename.endswith('.idl') and # Dependencies aren't built # (they are used by the dependent) filename not in DEPENDENCY_IDL_FILES): idl_path = os.path.realpath( os.path.join(input_directory, filename)) idl_filenames.append(idl_path) idl_basename = os.path.basename(idl_path) name_from_basename, _ = os.path.splitext(idl_basename) definition_name = get_first_interface_name_from_idl( get_file_contents(idl_path)) is_partial_interface_idl = to_snake_case( definition_name) != name_from_basename if not is_partial_interface_idl: interface_info = interfaces_info[definition_name] if interface_info['is_dictionary']: dictionary_impl_filenames.append(idl_path) if component == 'core' and interface_info[ 'dependencies_other_component_full_paths']: partial_interface_filenames.append(idl_path) info_provider = component_info_providers[component] partial_interface_info_provider = component_info_providers[ 'modules'] generate_union_type_containers(CodeGeneratorUnionType, info_provider, options) generate_callback_function_impl(CodeGeneratorCallbackFunction, info_provider, options) generate_bindings(CodeGeneratorV8, info_provider, options, idl_filenames) generate_bindings(CodeGeneratorV8, partial_interface_info_provider, partial_interface_options, partial_interface_filenames) generate_dictionary_impl(CodeGeneratorDictionaryImpl, info_provider, options, dictionary_impl_filenames) generate_origin_trial_features(info_provider, options, [ filename for filename in idl_filenames if filename not in dictionary_impl_filenames ]) finally: delete_cache_files() # Detect all changes output_files = list_files(output_directory) passed = identical_output_files(output_files) passed &= no_excess_files(output_files) if passed: if verbose: print print PASS_MESSAGE return 0 print print FAIL_MESSAGE return 1