def test_empty_lines(self): """Test labels file with empty lines.""" self.fs.create_file("/test/fuzz_target.labels", contents="label1\n\t\nlabel2\n \nlabel3\n") self.assertEqual( ["label1", "label2", "label3"], engine_common.get_issue_labels("/test/fuzz_target"), )
def test_well_formed(self): """Test well formed labels file.""" self.fs.create_file("/test/fuzz_target.labels", contents="label1\nlabel2\nlabel3\n") self.assertEqual( ["label1", "label2", "label3"], engine_common.get_issue_labels("/test/fuzz_target"), )
def run(self, input_directory, output_directory, no_of_files): """Run the fuzzer to generate testcases.""" fuzzer_binary_name, fuzzer_path = self._get_fuzzer_binary_name_and_path( ) project_qualified_name = data_types.fuzz_target_project_qualified_name( utils.current_project(), fuzzer_binary_name) arguments = self.generate_arguments(fuzzer_path) corpus_directory = get_corpus_directory(input_directory, project_qualified_name) # Create fuzz testcases. for i in range(no_of_files): # Contents of testcase file don't matter at this point. Need to create # something non-null so that it is not ignored. testcase_file_path = os.path.join( output_directory, '%s%d' % (testcase_manager.FUZZ_PREFIX, i)) utils.write_data_to_file(' ', testcase_file_path) # Write the flags file containing command line for running launcher # script. flags_file_path = os.path.join( output_directory, '%s%d' % (testcase_manager.FLAGS_PREFIX, i)) flags = ['%TESTCASE%', fuzzer_binary_name] if arguments: flags.append(arguments) flags_file_content = ' '.join(flags) utils.write_data_to_file(flags_file_content, flags_file_path) output = 'Generated %d testcase for fuzzer %s.\n' % ( no_of_files, fuzzer_binary_name) output += 'metadata::fuzzer_binary_name: %s\n' % fuzzer_binary_name issue_owners = engine_common.get_issue_owners(fuzzer_path) if issue_owners: output += 'metadata::issue_owners: %s\n' % ','.join(issue_owners) issue_labels = engine_common.get_issue_labels(fuzzer_path) if issue_labels: output += 'metadata::issue_labels: %s\n' % ','.join(issue_labels) issue_components = engine_common.get_issue_components(fuzzer_path) if issue_components: output += 'metadata::issue_components: %s\n' % ','.join( issue_components) # Update *SAN_OPTIONS in current environment from .options file. This # environment is used in fuzz task later for deriving the environment # string in |get_environment_settings_as_string| and embedding this as # part of stacktrace. engine_common.process_sanitizer_options_overrides(fuzzer_path) return BuiltinFuzzerResult(output=output, corpus_directory=corpus_directory)
def run(self, input_directory, output_directory, no_of_files): """Run the fuzzer to generate testcases.""" build_directory = environment.get_value('BUILD_DIR') if not build_directory: raise BuiltinFuzzerException( 'BUILD_DIR environment variable is not set.') fuzzers = fuzzers_utils.get_fuzz_targets(build_directory) if not fuzzers: raise BuiltinFuzzerException( 'No fuzzer binaries found in |BUILD_DIR| directory.') fuzzer_binary_name = environment.get_value('FUZZ_TARGET') if fuzzer_binary_name: fuzzer_path = _get_fuzzer_path(fuzzers, fuzzer_binary_name) else: fuzzer_path = random.SystemRandom().choice(fuzzers) fuzzer_binary_name = os.path.basename(fuzzer_path) project_qualified_name = data_types.fuzz_target_project_qualified_name( utils.current_project(), fuzzer_binary_name) corpus_directory = os.path.join(input_directory, project_qualified_name) if environment.is_trusted_host(): from bot.untrusted_runner import file_host corpus_directory = file_host.rebase_to_worker_root( corpus_directory) arguments = self.generate_arguments(fuzzer_path) # Create corpus directory if it does not exist already. if environment.is_trusted_host(): from bot.untrusted_runner import file_host file_host.create_directory(corpus_directory, create_intermediates=True) else: if not os.path.exists(corpus_directory): os.mkdir(corpus_directory) # Create fuzz testcases. for i in range(no_of_files): # Contents of testcase file don't matter at this point. Need to create # something non-null so that it is not ignored. testcase_file_path = os.path.join(output_directory, '%s%d' % (tests.FUZZ_PREFIX, i)) utils.write_data_to_file(' ', testcase_file_path) # Write the flags file containing command line for running launcher # script. flags_file_path = os.path.join(output_directory, '%s%d' % (tests.FLAGS_PREFIX, i)) flags = ['%TESTCASE%', fuzzer_binary_name] if arguments: flags.append(arguments) flags_file_content = ' '.join(flags) utils.write_data_to_file(flags_file_content, flags_file_path) output = 'Generated %d testcase for fuzzer %s.\n' % ( no_of_files, fuzzer_binary_name) output += 'metadata::fuzzer_binary_name: %s\n' % fuzzer_binary_name issue_owners = engine_common.get_issue_owners(fuzzer_path) if issue_owners: output += 'metadata::issue_owners: %s\n' % ','.join(issue_owners) issue_labels = engine_common.get_issue_labels(fuzzer_path) if issue_labels: output += 'metadata::issue_labels: %s\n' % ','.join(issue_labels) # Update *SAN_OPTIONS in current environment from .options file. This # environment is used in fuzz task later for deriving the environment # string in |get_environment_settings_as_string| and embedding this as # part of stacktrace. engine_common.process_sanitizer_options_overrides(fuzzer_path) return BuiltinFuzzerResult(output=output, corpus_directory=corpus_directory)
def test_empty_lines(self): """Test labels file with empty lines.""" self.fs.create_file( '/test/fuzz_target.labels', contents='label1\n\t\nlabel2\n \nlabel3\n') self.assertEqual(['label1', 'label2', 'label3'], engine_common.get_issue_labels('/test/fuzz_target'))
def test_well_formed(self): """Test well formed labels file.""" self.fs.create_file( '/test/fuzz_target.labels', contents='label1\nlabel2\nlabel3\n') self.assertEqual(['label1', 'label2', 'label3'], engine_common.get_issue_labels('/test/fuzz_target'))
def test_empty_file(self): """Test empty file.""" self.fs.create_file('/test/fuzz_target.labels', contents='') self.assertEqual([], engine_common.get_issue_labels('/test/fuzz_target'))
def test_no_file(self): """Test no labels file.""" self.assertEqual([], engine_common.get_issue_labels('/test/does_not_exist'))