def test_create_directory(self): """Test file_host.create_directory.""" result = untrusted_runner_pb2.CreateDirectoryResponse(result=True) self.mock.stub().CreateDirectory.return_value = result self.assertTrue(file_host.create_directory('/path', True)) result = untrusted_runner_pb2.CreateDirectoryResponse(result=False) self.mock.stub().CreateDirectory.return_value = result self.assertFalse(file_host.create_directory('/path', True))
def test_create_directory(self): """Tests remote create_directory.""" path = os.path.join(self.tmp_dir, 'dir1') self.assertTrue(file_host.create_directory(path)) self.assertTrue(os.path.isdir(path)) path = os.path.join(self.tmp_dir, 'dir2', 'dir2') self.assertFalse(file_host.create_directory(path)) self.assertFalse(os.path.exists(path)) path = os.path.join(self.tmp_dir, 'dir2', 'dir2') self.assertTrue(file_host.create_directory(path, True)) self.assertTrue(os.path.isdir(path))
def get_corpus_directory(input_directory, project_qualified_name): """Get the corpus directory given a project qualified fuzz target name.""" corpus_directory = os.path.join(input_directory, project_qualified_name) if environment.is_trusted_host(): from bot.untrusted_runner import file_host corpus_directory = file_host.rebase_to_worker_root(corpus_directory) # Create corpus directory if it does not exist already. if environment.is_trusted_host(): from bot.untrusted_runner import file_host file_host.create_directory(corpus_directory, create_intermediates=True) else: shell.create_directory(corpus_directory) return corpus_directory
def run(self, input_directory, output_directory, no_of_files): """Run the fuzzer to generate testcases.""" build_directory = environment.get_value('BUILD_DIR') if not build_directory: raise BuiltinFuzzerException( 'BUILD_DIR environment variable is not set.') fuzzers = fuzzers_utils.get_fuzz_targets(build_directory) if not fuzzers: raise BuiltinFuzzerException( 'No fuzzer binaries found in |BUILD_DIR| directory.') fuzzer_binary_name = environment.get_value('FUZZ_TARGET') if fuzzer_binary_name: fuzzer_path = _get_fuzzer_path(fuzzers, fuzzer_binary_name) else: fuzzer_path = random.SystemRandom().choice(fuzzers) fuzzer_binary_name = os.path.basename(fuzzer_path) project_qualified_name = data_types.fuzz_target_project_qualified_name( utils.current_project(), fuzzer_binary_name) corpus_directory = os.path.join(input_directory, project_qualified_name) if environment.is_trusted_host(): from bot.untrusted_runner import file_host corpus_directory = file_host.rebase_to_worker_root( corpus_directory) arguments = self.generate_arguments(fuzzer_path) # Create corpus directory if it does not exist already. if environment.is_trusted_host(): from bot.untrusted_runner import file_host file_host.create_directory(corpus_directory, create_intermediates=True) else: if not os.path.exists(corpus_directory): os.mkdir(corpus_directory) # Create fuzz testcases. for i in range(no_of_files): # Contents of testcase file don't matter at this point. Need to create # something non-null so that it is not ignored. testcase_file_path = os.path.join(output_directory, '%s%d' % (tests.FUZZ_PREFIX, i)) utils.write_data_to_file(' ', testcase_file_path) # Write the flags file containing command line for running launcher # script. flags_file_path = os.path.join(output_directory, '%s%d' % (tests.FLAGS_PREFIX, i)) flags = ['%TESTCASE%', fuzzer_binary_name] if arguments: flags.append(arguments) flags_file_content = ' '.join(flags) utils.write_data_to_file(flags_file_content, flags_file_path) output = 'Generated %d testcase for fuzzer %s.\n' % ( no_of_files, fuzzer_binary_name) output += 'metadata::fuzzer_binary_name: %s\n' % fuzzer_binary_name issue_owners = engine_common.get_issue_owners(fuzzer_path) if issue_owners: output += 'metadata::issue_owners: %s\n' % ','.join(issue_owners) issue_labels = engine_common.get_issue_labels(fuzzer_path) if issue_labels: output += 'metadata::issue_labels: %s\n' % ','.join(issue_labels) # Update *SAN_OPTIONS in current environment from .options file. This # environment is used in fuzz task later for deriving the environment # string in |get_environment_settings_as_string| and embedding this as # part of stacktrace. engine_common.process_sanitizer_options_overrides(fuzzer_path) return BuiltinFuzzerResult(output=output, corpus_directory=corpus_directory)
def update_data_bundle(fuzzer, data_bundle): """Updates a data bundle to the latest version.""" # This module can't be in the global imports due to appengine issues # with multiprocessing and psutil imports. from google_cloud_utils import gsutil # If we are using a data bundle on NFS, it is expected that our testcases # will usually be large enough that we would fill up our tmpfs directory # pretty quickly. So, change it to use an on-disk directory. if not data_bundle.is_local: testcase_disk_directory = environment.get_value('FUZZ_INPUTS_DISK') environment.set_value('FUZZ_INPUTS', testcase_disk_directory) data_bundle_directory = get_data_bundle_directory(fuzzer.name) if not data_bundle_directory: logs.log_error('Failed to setup data bundle %s.' % data_bundle.name) return False if not shell.create_directory(data_bundle_directory, create_intermediates=True): logs.log_error('Failed to create data bundle %s directory.' % data_bundle.name) return False # Check if data bundle is up to date. If yes, skip the update. if _is_data_bundle_up_to_date(data_bundle, data_bundle_directory): logs.log('Data bundle was recently synced, skip.') return True # Fetch lock for this data bundle. if not _fetch_lock_for_data_bundle_update(data_bundle): logs.log_error('Failed to lock data bundle %s.' % data_bundle.name) return False # Re-check if another bot did the sync already. If yes, skip. if _is_data_bundle_up_to_date(data_bundle, data_bundle_directory): logs.log('Another bot finished the sync, skip.') _release_lock_for_data_bundle_update(data_bundle) return True time_before_sync_start = time.time() # No need to sync anything if this is a search index data bundle. In that # case, the fuzzer will generate testcases from a gcs bucket periodically. if not _is_search_index_data_bundle(data_bundle.name): bucket_url = data_handler.get_data_bundle_bucket_url(data_bundle.name) if environment.is_trusted_host() and data_bundle.sync_to_worker: from bot.untrusted_runner import corpus_manager from bot.untrusted_runner import file_host worker_data_bundle_directory = file_host.rebase_to_worker_root( data_bundle_directory) file_host.create_directory(worker_data_bundle_directory, create_intermediates=True) result = corpus_manager.RemoteGSUtilRunner().rsync( bucket_url, worker_data_bundle_directory, delete=False) else: result = gsutil.GSUtilRunner().rsync(bucket_url, data_bundle_directory, delete=False) if result.return_code != 0: logs.log_error('Failed to sync data bundle %s: %s.' % (data_bundle.name, result.output)) _release_lock_for_data_bundle_update(data_bundle) return False # Update the testcase list file. testcase_manager.create_testcase_list_file(data_bundle_directory) # Write last synced time in the sync file. sync_file_path = _get_data_bundle_sync_file_path(data_bundle_directory) utils.write_data_to_file(time_before_sync_start, sync_file_path) if environment.is_trusted_host() and data_bundle.sync_to_worker: from bot.untrusted_runner import file_host worker_sync_file_path = file_host.rebase_to_worker_root(sync_file_path) file_host.copy_file_to_worker(sync_file_path, worker_sync_file_path) # Release acquired lock. _release_lock_for_data_bundle_update(data_bundle) return True