def setUp(self): """Set up test environment.""" test_helpers.patch_environ(self) environment.set_value('FUZZ_TEST_TIMEOUT', '4800') self.build_dir = os.path.join(SCRIPT_DIR, 'run_data', 'build_dir') self.corpus_directory = 'data/corpus_with_some_files'
def _set_timeout_value_from_user_upload(testcase_id): """Get the timeout associated with this testcase.""" metadata = data_types.TestcaseUploadMetadata.query( data_types.TestcaseUploadMetadata.testcase_id == int( testcase_id)).get() if metadata and metadata.timeout: environment.set_value('TEST_TIMEOUT', metadata.timeout)
def _setup_x(): """Start Xvfb and blackbox before running the test application.""" if environment.platform() != 'LINUX': return [] if environment.is_engine_fuzzer_job(): # For engine fuzzer jobs like AFL, libFuzzer, Xvfb is not needed as the # those fuzz targets do not needed a UI. return [] environment.set_value('DISPLAY', DISPLAY) print('Creating virtual display...') xvfb_runner = new_process.ProcessRunner('/usr/bin/Xvfb') xvfb_process = xvfb_runner.run(additional_args=[ DISPLAY, '-screen', '0', '1280x1024x24', '-ac', '-nolisten', 'tcp' ]) time.sleep(PROCESS_START_WAIT_SECONDS) blackbox_runner = new_process.ProcessRunner('/usr/bin/blackbox') blackbox_process = blackbox_runner.run() time.sleep(PROCESS_START_WAIT_SECONDS) # Return all handles we create so they can be terminated properly at exit. return [xvfb_process, blackbox_process]
def run(self): """Actually runs a emulator, assuming `create` has already been called.""" if not self.process_runner: raise EmulatorError( 'Attempted to `run` emulator before calling `create`') logs.log('Starting emulator.') self.process = self.process_runner.run(stdout=subprocess.PIPE, stderr=subprocess.DEVNULL) device_serial = None while not device_serial: line = self.process.popen.stdout.readline().decode() match = DEVICE_SERIAL_RE.match(line) if match: device_serial = match.group(1) # Close the pipe so we don't hang. self.process.popen.stdout.close() logs.log('Found serial ID: %s.' % device_serial) environment.set_value('ANDROID_SERIAL', device_serial) logs.log('Waiting on device') adb.wait_for_device() logs.log('Device is online')
def setUp(self): BaseTest.setUp(self) self.fuchsia_corpus_dir = os.path.join(self.corpus_bucket, 'fuchsia') shutil.copytree(os.path.join(TEST_DIR, 'fuchsia'), self.fuchsia_corpus_dir) self.temp_dir = tempfile.mkdtemp() builds_dir = os.path.join(self.temp_dir, 'builds') os.mkdir(builds_dir) urls_dir = os.path.join(self.temp_dir, 'urls') os.mkdir(urls_dir) environment.set_value('BUILDS_DIR', builds_dir) environment.set_value('BUILD_URLS_DIR', urls_dir) environment.set_value('QUEUE_OVERRIDE', 'FUCHSIA') environment.set_value('OS_OVERRIDE', 'FUCHSIA') env_string = ('RELEASE_BUILD_BUCKET_PATH = ' 'gs://clusterfuchsia-builds-test/libfuzzer/' 'fuchsia-([0-9]+).zip') commands.update_environment_for_job(env_string) data_types.Job(name='libfuzzer_asan_fuchsia', platform='FUCHSIA', environment_string=env_string).put() data_types.FuzzTarget(binary='example-fuzzers/crash_fuzzer', engine='libFuzzer', project='fuchsia').put() environment.set_value('UNPACK_ALL_FUZZ_TARGETS_AND_FILES', True) helpers.patch(self, [ 'clusterfuzz._internal.system.shell.clear_temp_directory', ])
def test_syzkaller_kasan_android_with_env(self): """Test syzkaller kasan.""" environment.set_value('OS_OVERRIDE', 'ANDROID_KERNEL') environment.set_bot_environment() self._real_read_data_from_file = utils.read_data_from_file test_helpers.patch(self, [ 'clusterfuzz._internal.platforms.android.fetch_artifact.get', 'clusterfuzz._internal.platforms.android.kernel_utils.get_kernel_hash_and_build_id', 'clusterfuzz._internal.platforms.android.kernel_utils.get_kernel_name', 'clusterfuzz._internal.platforms.android.settings.get_product_brand', 'clusterfuzz._internal.google_cloud_utils.storage.get_file_from_cache_if_exists', 'clusterfuzz._internal.google_cloud_utils.storage.store_file_in_cache', 'clusterfuzz._internal.base.utils.write_data_to_file', 'clusterfuzz._internal.base.utils.read_data_from_file' ]) self.mock.get.side_effect = _mock_fetch_artifact_get self.mock.get_kernel_hash_and_build_id.return_value = '40e9b2ff3a2', '12345' self.mock.get_kernel_name.return_value = 'device_kernel' self.mock.get_product_brand.return_value = 'google' self.mock.get_file_from_cache_if_exists.return_value = False self.mock.store_file_in_cache.return_value = None self.mock.write_data_to_file = None self.mock.read_data_from_file.side_effect = self._mock_read_data_from_file data = self._read_test_data('kasan_syzkaller_android.txt') expected_stack = self._read_test_data( 'kasan_syzkaller_android_linkified.txt') actual_state = stack_analyzer.get_crash_data(data) self.assertEqual(actual_state.crash_stacktrace, expected_stack)
def setUp(self): clear_temp_dir() create_temp_dir() test_helpers.patch_environ(self) afl_launcher_integration_test.dont_use_strategies(self) environment.set_value('BUILD_DIR', DATA_DIRECTORY)
def execute_task(*_): """Execute the report uploads.""" logs.log('Uploading pending reports.') # Get metadata for reports requiring upload. reports_metadata = ndb_utils.get_all_from_query( data_types.ReportMetadata.query( ndb_utils.is_false(data_types.ReportMetadata.is_uploaded))) reports_metadata = list(reports_metadata) if not reports_metadata: logs.log('No reports that need upload found.') return environment.set_value('UPLOAD_MODE', 'prod') # Otherwise, upload corresponding reports. logs.log('Uploading reports for testcases: %s' % str([report.testcase_id for report in reports_metadata])) report_metadata_to_delete = [] for report_metadata in reports_metadata: # Convert metadata back into actual report. crash_info = crash_uploader.crash_report_info_from_metadata( report_metadata) testcase_id = report_metadata.testcase_id try: _ = data_handler.get_testcase_by_id(testcase_id) except errors.InvalidTestcaseError: logs.log_warn('Could not find testcase %s.' % testcase_id) report_metadata_to_delete.append(report_metadata.key) continue # Upload the report and update the corresponding testcase info. logs.log('Processing testcase %s for crash upload.' % testcase_id) crash_report_id = crash_info.upload() if crash_report_id is None: logs.log_error( 'Crash upload for testcase %s failed, retry later.' % testcase_id) continue # Update the report metadata to indicate successful upload. report_metadata.crash_report_id = crash_report_id report_metadata.is_uploaded = True report_metadata.put() logs.log('Uploaded testcase %s to crash, got back report id %s.' % (testcase_id, crash_report_id)) time.sleep(1) # Delete report metadata entries where testcase does not exist anymore or # upload is not supported. if report_metadata_to_delete: ndb_utils.delete_multi(report_metadata_to_delete) # Log done with uploads. # Deletion happens in batches in cleanup_task, so that in case of error there # is some buffer for looking at stored ReportMetadata in the meantime. logs.log('Finished uploading crash reports.')
def test_strategy_not_specified(self): """Test 0.0 weight is return when strategy is not defined in |FUZZING_STRATEGIES|.""" environment.set_value('FUZZING_STRATEGIES', '{"strategy_1": 0.5, "strategy_3": 0.3}') self.assertEqual( 0.0, engine_common.get_strategy_probability('strategy_2', 0.33))
def test_finds_fuzzer_with_legacy_prefix_in_name_and_env(self): """Test finding fuzzer, when legacy is set to the prefix in fuzzer_name.""" environment.set_value('PROJECT_NAME', 'chromeos') fuzzer_name = 'chromeos_' + self.fuzzer_name self.assertEqual(self._setup_fuzzer(fuzzer_name), self._find_fuzzer_path(fuzzer_name))
def setup_adb(): """Sets up ADB binary for use.""" adb_binary_path = get_adb_path() # Make sure that ADB env var is set. if not environment.get_value('ADB'): environment.set_value('ADB', adb_binary_path)
def setUp(self): """Put data in the local ndb table the tests to query from and set bandit selection environment variable.""" test_helpers.patch_environ(self) data = [] strategy1 = data_types.FuzzStrategyProbability() strategy1.strategy_name = 'fork,corpus_subset,recommended_dict,' strategy1.probability = 0.33 strategy1.engine = 'libFuzzer' data.append(strategy1) strategy2 = data_types.FuzzStrategyProbability() strategy2.strategy_name = ('random_max_len,corpus_mutations_ml_rnn,' 'value_profile,recommended_dict,') strategy2.probability = 0.34 strategy2.engine = 'libFuzzer' data.append(strategy2) strategy3 = data_types.FuzzStrategyProbability() strategy3.strategy_name = ('corpus_mutations_radamsa,' 'random_max_len,corpus_subset,') strategy3.probability = 0.33 strategy3.engine = 'libFuzzer' data.append(strategy3) ndb.put_multi(data) distribution = fuzz_task.get_strategy_distribution_from_ndb() environment.set_value('USE_BANDIT_STRATEGY_SELECTION', True) environment.set_value('STRATEGY_SELECTION_DISTRIBUTION', distribution)
def test_weighted_strategy_pool(self): """Tests whether a proper strategy pool is returned by the multi armed bandit selection implementation with medium temperature. Based on deterministic strategy selection. Mutator plugin is patched to be included in our strategy pool.""" environment.set_value('STRATEGY_SELECTION_METHOD', 'multi_armed_bandit') strategy_pool = strategy_selection.generate_weighted_strategy_pool( strategy_list=strategy.LIBFUZZER_STRATEGY_LIST, use_generator=True, engine_name='libFuzzer') self.assertTrue( strategy_pool.do_strategy( strategy.CORPUS_MUTATION_ML_RNN_STRATEGY)) self.assertTrue( strategy_pool.do_strategy(strategy.RANDOM_MAX_LENGTH_STRATEGY)) self.assertTrue( strategy_pool.do_strategy(strategy.VALUE_PROFILE_STRATEGY)) self.assertTrue( strategy_pool.do_strategy( strategy.RECOMMENDED_DICTIONARY_STRATEGY)) self.assertFalse( strategy_pool.do_strategy( strategy.CORPUS_MUTATION_RADAMSA_STRATEGY)) self.assertFalse(strategy_pool.do_strategy(strategy.FORK_STRATEGY))
def run_testcase(thread_index, file_path, gestures, env_copy): """Run a single testcase and return crash results in the crash queue.""" try: # Update environment with environment copy from parent. if env_copy: os.environ.update(env_copy) # Initialize variables. needs_http = '-http-' in file_path test_timeout = environment.get_value('TEST_TIMEOUT', 10) app_directory = environment.get_value('APP_DIR') environment.set_value('PIDS', '[]') # Get command line options. command = get_command_line_for_application( file_path, user_profile_index=thread_index, needs_http=needs_http) # Run testcase. return process_handler.run_process( command, timeout=test_timeout, gestures=gestures, env_copy=env_copy, current_working_directory=app_directory) except Exception: logs.log_error('Exception occurred while running run_testcase.') return None, None, None
def setUp(self): self.tmp_dir = tempfile.mkdtemp() os.environ['BOT_TMPDIR'] = os.path.join(self.tmp_dir, 'bot_tmpdir') test_helpers.patch(self, [ 'clusterfuzz._internal.datastore.data_handler.' 'get_data_bundle_bucket_name', 'clusterfuzz._internal.system.environment.' 'set_environment_parameters_from_file', ]) test_helpers.patch_environ(self) # Our tests write data/logs into subdirs of ROOT_DIR. Pivot the ROOT_DIR to # a temporary one. new_root = _create_test_root() os.environ['ROOT_DIR'] = new_root self.saved_cwd = os.getcwd() os.chdir(new_root) environment.set_bot_environment() fuzz_inputs = os.environ['FUZZ_INPUTS'] shell.remove_directory(fuzz_inputs, recreate=True) worker_fuzz_inputs = file_host.rebase_to_worker_root(fuzz_inputs) shell.remove_directory(worker_fuzz_inputs, recreate=True) environment.set_value('GSUTIL_PATH', os.path.dirname(_which('gsutil'))) test_utils.setup_pubsub('test-clusterfuzz') test_utils.create_pubsub_topic(pubsub.PubSubClient(), 'test-clusterfuzz', 'jobs-project-linux')
def test_upload_without_component_revisions(self): """Log should contain message on empty component revisions.""" self.mock.get_component_range_list.return_value = [] mock_gsutil = mock.MagicMock() self.mock.write_data.return_value = mock_gsutil self.fs.create_file( self.testcase_path + '.stats2', contents='{"stat": 1000, "timestamp": 1472846341.017923, "kind": ' '"TestcaseRun", "job": "job", "fuzzer": "fuzzer", ' '"build_revision": 123}\n') environment.set_value('BOT_NAME', 'hostname.company.com') crash_result = CrashResult( return_code=1, crash_time=5, output='fake output') log = testcase_manager.prepare_log_for_upload(crash_result.get_stacktrace(), crash_result.return_code) log_time = testcase_manager._get_testcase_time(self.testcase_path) testcase_manager.upload_log(log, log_time) # Date and time below is derived from 1472846341 timestamp value. self.mock.write_data.assert_called_once_with( b'Component revisions (build r123):\n' b'Not available.\n\n' b'Bot name: hostname.company.com\n' b'Return code: 1\n\nfake output', 'gs://fake-gcs-logs/fuzzer/job/2016-09-02/19:59:01:017923.log')
def test_bazel_test_args(self): """Test bazel test args with a libFuzzer test case""" environment.set_value('HELP_FORMAT', 'bazel test %BAZEL_TEST_ARGS%') testcase = data_types.Testcase() testcase.fuzzer_name = 'libFuzzer' testcase.overridden_fuzzer_name = 'libFuzzer_test_project_test_fuzzer' testcase.job_type = 'ubsan_job_without_help_format' testcase.crash_revision = 1337 testcase.minimized_arguments = ( '%TESTCASE% test_fuzzer -arg1=val1 -arg2="val2 val3"') testcase.put() testcase.set_metadata( 'env', { 'ASAN_OPTIONS': { 'handle_abort': 1, 'symbolize': 0, 'redzone': 512, }, 'UBSAN_OPTIONS': { 'halt_on_error': 1, 'symbolize': 0, }, 'OTHER_OPTIONS': { 'symbolize': 1 } }) self.assertEqual( data_handler.get_formatted_reproduction_help(testcase), 'bazel test ' '--test_env=ASAN_OPTIONS="handle_abort=1:redzone=512" ' '--test_env=UBSAN_OPTIONS="halt_on_error=1" ' '--test_arg=-arg1=val1 ' '--test_arg=\'-arg2=val2 val3\'')
def setUp(self): test_utils.set_up_pyfakefs(self) helpers.patch_environ(self) self.data_bundles_dir = '/data-bundles' os.mkdir(self.data_bundles_dir) environment.set_value('DATA_BUNDLES_DIR', self.data_bundles_dir)
def setUp(self): test_helpers.patch_environ(self) test_utils.set_up_pyfakefs(self) environment.set_value('CACHE_DIR', '/tmp/test-cache') persistent_cache.initialize() self.cache = memoize.FifoOnDisk(5)
def setUp(self): test_helpers.patch_environ(self) self.configs_directory = os.path.join(os.path.dirname(__file__), 'local_config_data') environment.set_value('CONFIG_DIR_OVERRIDE', self.configs_directory) self.config = local_config.Config()
def test_timeout_overrides(self): """Test timeout overrides.""" environment.set_value('FUZZ_TEST_TIMEOUT_OVERRIDE', 9001) environment.set_value('MAX_TESTCASES_OVERRIDE', 42) commands.update_environment_for_job( 'FUZZ_TEST_TIMEOUT = 123\nMAX_TESTCASES = 5\n') self.assertEqual(9001, environment.get_value('FUZZ_TEST_TIMEOUT')) self.assertEqual(42, environment.get_value('MAX_TESTCASES'))
def test_selection_for_android_apk(self): """Ensure that flags are added for the Android APK format.""" self.mock.random.return_value = 0.3 environment.set_value('APP_NAME', 'App_1.apk') trial_selector = trials.Trials() trial_selector.setup_additional_args_for_app() self.assertEqual(environment.get_value('APP_ARGS'), '-x --a1') self.assertEqual(environment.get_value('TRIAL_APP_ARGS'), '--a1')
def test_selection_for_windows_executable(self): """Ensure that flags are added when the app name ends in ".exe".""" self.mock.random.return_value = 0.3 environment.set_value('APP_NAME', 'app_1.exe') trial_selector = trials.Trials() trial_selector.setup_additional_args_for_app() self.assertEqual(environment.get_value('APP_ARGS'), '-x --a1') self.assertEqual(environment.get_value('TRIAL_APP_ARGS'), '--a1')
def test_trial_not_selected(self): """Ensure no additional flags if a trial was not selected.""" self.mock.random.return_value = 0.5 environment.set_value('APP_NAME', 'app_2') trial_selector = trials.Trials() trial_selector.setup_additional_args_for_app() self.assertEqual(environment.get_value('APP_ARGS'), '-x') self.assertIsNone(environment.get_value('TRIAL_APP_ARGS'))
def test_no_effect_on_no_match(self): """Ensure that no additional flags are added if a binary has no trials.""" self.mock.random.return_value = 0.0 environment.set_value('APP_NAME', 'app_0') trial_selector = trials.Trials() trial_selector.setup_additional_args_for_app() self.assertEqual(environment.get_value('APP_ARGS'), '-x') self.assertIsNone(environment.get_value('TRIAL_APP_ARGS'))
def test_trial_selected_one_option(self): """Ensure that the expected flags are added if a trial is selected.""" self.mock.random.return_value = 0.3 environment.set_value('APP_NAME', 'app_1') trial_selector = trials.Trials() trial_selector.setup_additional_args_for_app() self.assertEqual(environment.get_value('APP_ARGS'), '-x --a1') self.assertEqual(environment.get_value('TRIAL_APP_ARGS'), '--a1')
def _setup_env(self, job_type=None): """Set up bot environment.""" if not job_type: return job = data_types.Job.query(data_types.Job.name == job_type).get() environment.set_value('JOB_NAME', job_type) commands.update_environment_for_job(job.environment_string)
def test_reset_environment(self): """Tests that reset_environment() works as intended.""" variable = 'NEW_VARIABLE' # Check that the test's assumptions are correct. self.assertNotIn(variable, os.environ) # Test that reset_environment() works properly. environment.set_value(variable, 1) environment.reset_environment() self.assertNotIn(variable, os.environ)
def setUp(self): """Prepare test data and necessary env variables.""" test_helpers.patch_environ(self) self.data_directory = os.path.join(os.path.dirname(__file__), 'performance_analyzer_data') self.libfuzzer_data_directory = os.path.join(self.data_directory, 'libfuzzer') environment.set_value('FAIL_RETRIES', 1) self.analyzer = performance_analyzer.LibFuzzerPerformanceAnalyzer()
def test_set_environment_with_default(self): """Test that set_environment sets the variable from test config, skipping the ones already set in environment.""" environment.set_value('ISSUE_TRACKER', 'test-issue-tracker-override') environment.set_value('UPDATE_WEB_TESTS', True) self.config.set_environment() self.assertEqual('test-project', environment.get_value('PROJECT_NAME')) self.assertEqual('test-issue-tracker-override', environment.get_value('ISSUE_TRACKER')) self.assertEqual(True, environment.get_value('UPDATE_WEB_TESTS'))