def main(): """Run a cycle of heartbeat checks to ensure Android device is running.""" logs.configure('android_heartbeat') dates.initialize_timezone_from_environment() environment.set_bot_environment() monitor.initialize() if environment.is_android_cuttlefish(): android.adb.set_cuttlefish_device_serial() device_serial = environment.get_value('ANDROID_SERIAL') while True: state = android.adb.get_device_state() if state == android.adb.DEVICE_NOT_FOUND_STRING.format( serial=device_serial): android.adb.connect_to_cuttlefish_device() state = android.adb.get_device_state() logs.log('Android device %s state: %s' % (device_serial, state)) monitoring_metrics.ANDROID_UPTIME.increment_by( int(state == 'device'), { 'serial': device_serial or '', 'platform': environment.get_platform_group() or '', }) time.sleep(data_types.ANDROID_HEARTBEAT_WAIT_INTERVAL) if data_handler.bot_run_timed_out(): break
def main(): """Update the heartbeat if there is bot activity.""" if len(sys.argv) < 2: print('Usage: %s <log file>' % sys.argv[0]) return environment.set_bot_environment() logs.configure('run_heartbeat') log_filename = sys.argv[1] previous_state = None # Get absolute path to heartbeat script and interpreter needed to execute it. startup_scripts_directory = environment.get_startup_scripts_directory() beat_script_path = os.path.join(startup_scripts_directory, BEAT_SCRIPT) beat_interpreter = shell.get_interpreter(beat_script_path) assert beat_interpreter while True: beat_command = [ beat_interpreter, beat_script_path, str(previous_state), log_filename ] try: previous_state = subprocess.check_output(beat_command, stderr=subprocess.STDOUT) except subprocess.CalledProcessError as e: logs.log_error('Failed to beat.', output=e.output) except Exception: logs.log_error('Failed to beat.') # See if our run timed out, if yes bail out. if data_handler.bot_run_timed_out(): break
def test_configure(self): """Test configure.""" self.mock._is_running_on_app_engine.return_value = False # pylint: disable=protected-access logs._logger = None # pylint: disable=protected-access logger = mock.MagicMock() self.mock.getLogger.return_value = logger logs.configure('test') self.mock.set_logger.assert_called_with(logger) self.mock.get_logging_config_dict.assert_called_once_with('test') self.mock.getLogger.assert_called_with('test') self.mock.dictConfig.assert_called_once_with( self.mock.get_logging_config_dict.return_value)
def main(): logs.configure('heartbeat') dates.initialize_timezone_from_environment() environment.set_bot_environment() if sys.argv[1] == 'None': previous_state = None else: previous_state = sys.argv[1] log_filename = sys.argv[2] try: sys.stdout.write(str(beat(previous_state, log_filename))) except Exception: logs.log_error('Failed to beat.') time.sleep(data_types.HEARTBEAT_WAIT_INTERVAL)
def main(): root_directory = environment.get_value('ROOT_DIR') if not root_directory: print( 'Please set ROOT_DIR environment variable to the root of the source ' 'checkout before running. Exiting.') print('For an example, check init.bash in the local directory.') return set_start_time() environment.set_bot_environment() persistent_cache.initialize() logs.configure('run') # Python buffering can otherwise cause exception logs in the child run_*.py # processes to be lost. environment.set_value('PYTHONUNBUFFERED', 1) # Create command strings to launch bot and heartbeat. base_directory = environment.get_startup_scripts_directory() log_directory = environment.get_value('LOG_DIR') bot_log = os.path.join(log_directory, 'bot.log') bot_script_path = os.path.join(base_directory, BOT_SCRIPT) bot_interpreter = shell.get_interpreter(bot_script_path) assert bot_interpreter bot_command = '%s %s' % (bot_interpreter, bot_script_path) heartbeat_script_path = os.path.join(base_directory, HEARTBEAT_SCRIPT) heartbeat_interpreter = shell.get_interpreter(heartbeat_script_path) assert heartbeat_interpreter heartbeat_command = '%s %s %s' % (heartbeat_interpreter, heartbeat_script_path, bot_log) run_loop(bot_command, heartbeat_command) logs.log('Exit run.py')
def run_testcase_and_return_result_in_queue(crash_queue, thread_index, file_path, gestures, env_copy, upload_output=False): """Run a single testcase and return crash results in the crash queue.""" # Since this is running in its own process, initialize the log handler again. # This is needed for Windows where instances are not shared across child # processes. See: # https://stackoverflow.com/questions/34724643/python-logging-with-multiprocessing-root-logger-different-in-windows logs.configure('run_testcase', { 'testcase_path': file_path, }) # Also reinitialize NDB context for the same reason as above. with ndb_init.context(): _do_run_testcase_and_return_result_in_queue( crash_queue, thread_index, file_path, gestures, env_copy, upload_output=upload_output)
# Check if the build succeeded based on the existence of the # local archive file. if os.path.exists(archive_path_local): # Build success. Now, copy it to google cloud storage and make it # public. os.system('gsutil cp %s %s' % (archive_path_local, archive_path_remote)) os.system('gsutil acl set public-read %s' % archive_path_remote) logs.log('Build succeeded, created %s.' % archive_filename) else: LAST_BUILD[tool_and_build_type] = '' logs.log_error('Build failed, unable to create %s.' % archive_filename) logs.log('Completed cycle, waiting for %d secs.' % wait_time) time.sleep(wait_time) if __name__ == '__main__': # Make sure environment is correctly configured. logs.configure('run_bot') environment.set_bot_environment() fail_wait = environment.get_value('FAIL_WAIT') # Continue this forever. while True: try: main() except Exception: logs.log_error('Failed to create build.') time.sleep(fail_wait)
def test_configure_appengine(self): """Test configure on App Engine.""" self.mock._is_running_on_app_engine.return_value = True # pylint: disable=protected-access logs.configure('test') self.assertEqual(0, self.mock.dictConfig.call_count)
('/testcase-detail/update-from-trunk', update_from_trunk.Handler), ('/testcase-detail/update-issue', update_issue.Handler), ('/testcases', testcase_list.Handler), ('/testcases/load', testcase_list.JsonHandler), ('/upload-testcase', upload_testcase.Handler), ('/upload-testcase/get-url-oauth', upload_testcase.UploadUrlHandlerOAuth), ('/upload-testcase/prepare', upload_testcase.PrepareUploadHandler), ('/upload-testcase/load', upload_testcase.JsonHandler), ('/upload-testcase/upload', upload_testcase.UploadHandler), ('/upload-testcase/upload-oauth', upload_testcase.UploadHandlerOAuth), ('/update-job', jobs.UpdateJob), ('/update-job-template', jobs.UpdateJobTemplate), ('/viewer', viewer.Handler), ] logs.configure('appengine') config = local_config.GAEConfig() main_domain = config.get('domains.main') redirect_domains = config.get('domains.redirects') def redirect_handler(): """Redirection handler.""" if not redirect_domains: return None if request.host in redirect_domains: return redirect('https://' + main_domain + request.full_path) return None
def main(): """Prepare the configuration options and start requesting tasks.""" logs.configure('run_bot') root_directory = environment.get_value('ROOT_DIR') if not root_directory: print('Please set ROOT_DIR environment variable to the root of the source ' 'checkout before running. Exiting.') print('For an example, check init.bash in the local directory.') return dates.initialize_timezone_from_environment() environment.set_bot_environment() monitor.initialize() if not profiler.start_if_needed('python_profiler_bot'): sys.exit(-1) fuzzers_init.run() if environment.is_trusted_host(ensure_connected=False): from clusterfuzz._internal.bot.untrusted_runner import host host.init() if environment.is_untrusted_worker(): # Track revision since we won't go into the task_loop. update_task.track_revision() from clusterfuzz._internal.bot.untrusted_runner import \ untrusted as untrusted_worker untrusted_worker.start_server() assert False, 'Unreachable code' while True: # task_loop should be an infinite loop, # unless we run into an exception. error_stacktrace, clean_exit, task_payload = task_loop() # Print the error trace to the console. if not clean_exit: print('Exception occurred while running "%s".' % task_payload) print('-' * 80) print(error_stacktrace) print('-' * 80) should_terminate = ( clean_exit or errors.error_in_list(error_stacktrace, errors.BOT_ERROR_TERMINATION_LIST)) if should_terminate: return logs.log_error( 'Task exited with exception (payload="%s").' % task_payload, error_stacktrace=error_stacktrace) should_hang = errors.error_in_list(error_stacktrace, errors.BOT_ERROR_HANG_LIST) if should_hang: logs.log('Start hanging forever.') while True: # Sleep to avoid consuming 100% of CPU. time.sleep(60) # See if our run timed out, if yes bail out. if data_handler.bot_run_timed_out(): return