def _unpack_build(self): """Unpack the custom build.""" if not shell.remove_directory(self.build_dir, recreate=True): logs.log_error('Unable to clear custom binary directory.') _handle_unrecoverable_error_on_windows() return False build_local_archive = os.path.join(self.build_dir, self.custom_binary_filename) if not blobs.read_blob_to_disk(self.custom_binary_key, build_local_archive): return False # If custom binary is an archive, then unpack it. if archive.is_archive(self.custom_binary_filename): if not _make_space_for_build(build_local_archive, self.base_build_dir): # Remove downloaded archive to free up space and otherwise, it won't get # deleted until next job run. shell.remove_file(build_local_archive) logs.log_fatal_and_exit('Could not make space for build.') try: archive.unpack(build_local_archive, self.build_dir, trusted=True) except: logs.log_error( 'Unable to unpack build archive %s.' % build_local_archive) return False # Remove the archive. shell.remove_file(build_local_archive) return True
def get_cuttlefish_device_ip(): """Return the ip address of cuttlefish device.""" try: return socket.gethostbyname('cuttlefish') except socket.gaierror: logs.log_fatal_and_exit('Unable to get cvd ip address on cuttlefish host.') return None
def _wrapper(*args, **kwargs): """Wrapper.""" # FIXME: Weird exceptions in imports, might be something relating to our # reload module. Needs furthur investigation, try this as a temporary fix. import multiprocessing.pool import threading # Fix for Python < 2.7.2. if not hasattr(threading.current_thread(), '_children'): # pylint: disable=protected-access threading.current_thread( )._children = weakref.WeakKeyDictionary() global THREAD_POOL if THREAD_POOL is None: THREAD_POOL = multiprocessing.pool.ThreadPool(processes=3) try: async_result = THREAD_POOL.apply_async(func, args=args, kwds=kwargs) return async_result.get(timeout=duration) except multiprocessing.TimeoutError: # Sleep for some minutes in order to wait for flushing metrics. time.sleep(120) # If we don't exit here, we will cause threads to pile up and leading to # out-of-memory. Safe to just exit here. logs.log_fatal_and_exit(( 'Exception occurred in function {0}: args: {1}, kwargs: {2}' ' exception: {3}').format(func, args, kwargs, sys.exc_info()[1]))
def wait_until_fully_booted(): """Wait until device is fully booted or timeout expires.""" def boot_completed(): expected = '1' result = run_adb_shell_command('getprop sys.boot_completed', log_error=False) return result == expected def drive_ready(): expected = '0' result = run_adb_shell_command('\'test -d "/"; echo $?\'', log_error=False) return result == expected def package_manager_ready(): expected = 'package:/system/framework/framework-res.apk' result = run_adb_shell_command('pm path android', log_error=False) if not result: return False # Ignore any extra messages before or after the result we want. return expected in result.splitlines() # Make sure we are not already recursing inside this function. if utils.is_recursive_call(): return False # Wait until device is online. wait_for_device() start_time = time.time() is_boot_completed = False is_drive_ready = False is_package_manager_ready = False while time.time() - start_time < REBOOT_TIMEOUT: # TODO(mbarbella): Investigate potential optimizations. # The package manager check should also work for shell restarts. if not is_drive_ready: is_drive_ready = drive_ready() if not is_package_manager_ready: is_package_manager_ready = package_manager_ready() if not is_boot_completed: is_boot_completed = boot_completed() if is_drive_ready and is_package_manager_ready and is_boot_completed: return True time.sleep(BOOT_WAIT_INTERVAL) factory_reset() logs.log_fatal_and_exit( 'Device failed to finish boot. Resetted to factory settings and exiting.' ) # Not reached. return False
def _check_system_settings(self): """Check system settings required for AFL.""" kernel_core_pattern_file_path = '/proc/sys/kernel/core_pattern' if (os.path.exists(kernel_core_pattern_file_path) and open(kernel_core_pattern_file_path).read().strip() != 'core'): logs.log_fatal_and_exit( 'AFL needs core_pattern to be set to core.') cpu_scaling_file_path = ( '/sys/devices/system/cpu/cpu0/cpufreq/scaling_governor') if (os.path.exists(cpu_scaling_file_path) and open(cpu_scaling_file_path).read().strip() != 'performance'): logs.log_warn('For optimal AFL performance, ' 'set on-demand cpu scaling to performance.')
def bad_state_reached(): """Wait when device is in a bad state and exit.""" persistent_cache.clear_values() logs.log_fatal_and_exit( 'Device in bad state.', wait_before_exit=BAD_STATE_WAIT)
def _unpack_build(base_build_dir, build_dir, build_url, target_weights=None): """Unpacks a build from a build url into the build directory.""" # Track time taken to unpack builds so that it doesn't silently regress. start_time = time.time() # Free up memory. utils.python_gc() # Remove the current build. logs.log('Removing build directory %s.' % build_dir) if not shell.remove_directory(build_dir, recreate=True): logs.log_error('Unable to clear build directory %s.' % build_dir) _handle_unrecoverable_error_on_windows() return False # Decide whether to use cache build archives or not. use_cache = environment.get_value('CACHE_STORE', False) # Download build archive locally. build_local_archive = os.path.join(build_dir, os.path.basename(build_url)) # Make the disk space necessary for the archive available. archive_size = storage.get_download_file_size( build_url, build_local_archive, use_cache=True) if archive_size is not None and not _make_space(archive_size, base_build_dir): shell.clear_data_directories() logs.log_fatal_and_exit( 'Failed to make space for download. ' 'Cleared all data directories to free up space, exiting.') logs.log('Downloading build from url %s.' % build_url) try: storage.copy_file_from(build_url, build_local_archive, use_cache=use_cache) except: logs.log_error('Unable to download build url %s.' % build_url) return False unpack_everything = environment.get_value('UNPACK_ALL_FUZZ_TARGETS_AND_FILES') if not unpack_everything: # For fuzzing, pick a random fuzz target so that we only un-archive that # particular fuzz target and its dependencies and save disk space. # If we are going to unpack everythng in archive based on # |UNPACK_ALL_FUZZ_TARGETS_AND_FILES| in the job defition, then don't set a # random fuzz target before we've unpacked the build. It won't actually save # us anything in this case and can be really expensive for large builds # (such as Chrome OS). Defer setting it until after the build has been # unpacked. _set_random_fuzz_target_for_fuzzing_if_needed( _get_fuzz_targets_from_archive(build_local_archive), target_weights) # Actual list of files to unpack can be smaller if we are only unarchiving # a particular fuzz target. file_match_callback = _get_file_match_callback() assert not (unpack_everything and file_match_callback is not None) if not _make_space_for_build(build_local_archive, base_build_dir, file_match_callback): shell.clear_data_directories() logs.log_fatal_and_exit( 'Failed to make space for build. ' 'Cleared all data directories to free up space, exiting.') # Unpack the local build archive. logs.log('Unpacking build archive %s.' % build_local_archive) trusted = not utils.is_oss_fuzz() try: archive.unpack( build_local_archive, build_dir, trusted=trusted, file_match_callback=file_match_callback) except: logs.log_error('Unable to unpack build archive %s.' % build_local_archive) return False if unpack_everything: # Set a random fuzz target now that the build has been unpacked, if we # didn't set one earlier. _set_random_fuzz_target_for_fuzzing_if_needed( _get_fuzz_targets_from_dir(build_dir), target_weights) # If this is partial build due to selected build files, then mark it as such # so that it is not re-used. if file_match_callback: partial_build_file_path = os.path.join(build_dir, PARTIAL_BUILD_FILE) utils.write_data_to_file('', partial_build_file_path) # No point in keeping the archive around. shell.remove_file(build_local_archive) end_time = time.time() elapsed_time = end_time - start_time log_func = logs.log_warn if elapsed_time > UNPACK_TIME_LIMIT else logs.log log_func('Build took %0.02f minutes to unpack.' % (elapsed_time / 60.)) return True
def check_for_bad_build(job_type, crash_revision): """Return true if the build is bad, i.e. crashes on startup.""" # Check the bad build check flag to see if we want do this. if not environment.get_value('BAD_BUILD_CHECK'): return False # Create a blank command line with no file to run and no http. command = get_command_line_for_application(file_to_run='', needs_http=False) # When checking for bad builds, we use the default window size. # We don't want to pick a custom size since it can potentially cause a # startup crash and cause a build to be detected incorrectly as bad. default_window_argument = environment.get_value('WINDOW_ARG', '') if default_window_argument: command = command.replace(' %s' % default_window_argument, '') # TSAN is slow, and boots slow on first startup. Increase the warmup # timeout for this case. if environment.tool_matches('TSAN', job_type): fast_warmup_timeout = environment.get_value('WARMUP_TIMEOUT') else: fast_warmup_timeout = environment.get_value('FAST_WARMUP_TIMEOUT') # Initialize helper variables. is_bad_build = False build_run_console_output = '' app_directory = environment.get_value('APP_DIR') # Exit all running instances. process_handler.terminate_stale_application_instances() # Check if the build is bad. return_code, crash_time, output = process_handler.run_process( command, timeout=fast_warmup_timeout, current_working_directory=app_directory) crash_result = CrashResult(return_code, crash_time, output) # 1. Need to account for startup crashes with no crash state. E.g. failed to # load shared library. So, ignore state for comparison. # 2. Ignore leaks as they don't block a build from reporting regular crashes # and also don't impact regression range calculations. if (crash_result.is_crash(ignore_state=True) and not crash_result.should_ignore() and not crash_result.get_type() in ['Direct-leak', 'Indirect-leak']): is_bad_build = True build_run_console_output = utils.get_crash_stacktrace_output( command, crash_result.get_stacktrace(symbolized=True), crash_result.get_stacktrace(symbolized=False)) logs.log('Bad build for %s detected at r%d.' % (job_type, crash_revision), output=build_run_console_output) # Exit all running instances. process_handler.terminate_stale_application_instances() # Any of the conditions below indicate that bot is in a bad state and it is # not caused by the build itself. In that case, just exit. build_state = data_handler.get_build_state(job_type, crash_revision) if is_bad_build and utils.sub_string_exists_in(BAD_STATE_HINTS, output): logs.log_fatal_and_exit( 'Bad bot environment detected, exiting.', output=build_run_console_output, snapshot=process_handler.get_runtime_snapshot()) # If none of the other bots have added information about this build, # then add it now. if (build_state == data_types.BuildState.UNMARKED and not crash_result.should_ignore()): data_handler.add_build_metadata(job_type, crash_revision, is_bad_build, build_run_console_output) return is_bad_build
def check_for_bad_build(job_type, crash_revision): """Return true if the build is bad, i.e. crashes on startup.""" # Check the bad build check flag to see if we want do this. if not environment.get_value('BAD_BUILD_CHECK'): return False # Do not detect leaks while checking for bad builds. environment.reset_current_memory_tool_options(leaks=False) # Create a blank command line with no file to run and no http. command = get_command_line_for_application(file_to_run='', needs_http=False) # When checking for bad builds, we use the default window size. # We don't want to pick a custom size since it can potentially cause a # startup crash and cause a build to be detected incorrectly as bad. default_window_argument = environment.get_value('WINDOW_ARG', '') if default_window_argument: command = command.replace(' %s' % default_window_argument, '') # Warmup timeout. fast_warmup_timeout = environment.get_value('FAST_WARMUP_TIMEOUT') # TSAN is slow, and boots slow on first startup. Increase the warmup # timeout for this case. if environment.tool_matches('TSAN', job_type): fast_warmup_timeout = environment.get_value('WARMUP_TIMEOUT') # Initialize helper variables. is_bad_build = False build_run_console_output = '' output = '' app_directory = environment.get_value('APP_DIR') # Check if the build is bad. process_handler.terminate_stale_application_instances() exit_code, _, output = process_handler.run_process( command, timeout=fast_warmup_timeout, current_working_directory=app_directory) output = utils.decode_to_unicode(output) if crash_analyzer.is_crash(exit_code, output): is_bad_build = True build_run_console_output = ( '%s\n\n%s\n\n%s' % (command, stack_symbolizer.symbolize_stacktrace(output), output)) logs.log('Bad build for %s detected at r%d.' % (job_type, crash_revision), output=build_run_console_output) # Exit all running instances. process_handler.terminate_stale_application_instances() # Any of the conditions below indicate that bot is in a bad state and it is # not caused by the build itself. In that case, just exit. build_state = data_handler.get_build_state(job_type, crash_revision) if (is_bad_build and ('cannot open display' in output or 'logging service has stopped' in output or 'Maximum number of clients reached' in output)): logs.log_fatal_and_exit('Bad bot environment detected, exiting.', output=build_run_console_output) # If none of the other bots have added information about this build, # then add it now. if build_state == data_types.BuildState.UNMARKED: data_handler.add_build_metadata(job_type, crash_revision, is_bad_build, build_run_console_output) # Reset memory tool options. environment.reset_current_memory_tool_options() return is_bad_build
def bad_state_reached(): """Wait when device is in a bad state and exit.""" time.sleep(BAD_STATE_WAIT) persistent_cache.clear_values() logs.log_fatal_and_exit('Device in bad state.')