def set_initial_testcase_metadata(testcase): """Set various testcase metadata fields during testcase initialization.""" build_key = environment.get_value('BUILD_KEY') if build_key: testcase.set_metadata('build_key', build_key, update_testcase=False) build_url = environment.get_value('BUILD_URL') if build_url: testcase.set_metadata('build_url', build_url, update_testcase=False) gn_args_path = environment.get_value('GN_ARGS_PATH', '') if gn_args_path and os.path.exists(gn_args_path): gn_args = utils.read_data_from_file( gn_args_path, eval_data=False, default='') # Remove goma_dir from gn args since it is only relevant to the machine that # did the build. filtered_gn_args_lines = [ line for line in gn_args.splitlines() if not GOMA_DIR_LINE_REGEX.match(line) ] filtered_gn_args = '\n'.join(filtered_gn_args_lines) testcase.set_metadata('gn_args', filtered_gn_args, update_testcase=False) testcase.platform = environment.platform().lower() testcase.platform_id = environment.get_platform_id()
def process_command(task): """Figures out what to do with the given task and executes the command.""" logs.log("Executing command '%s'" % task.payload()) if not task.payload().strip(): logs.log_error('Empty task received.') return # Parse task payload. task_name = task.command task_argument = task.argument job_name = task.job environment.set_value('TASK_NAME', task_name) environment.set_value('TASK_ARGUMENT', task_argument) environment.set_value('JOB_NAME', job_name) if job_name != 'none': job = data_types.Job.query(data_types.Job.name == job_name).get() # Job might be removed. In that case, we don't want an exception # raised and causing this task to be retried by another bot. if not job: logs.log_error("Job '%s' not found." % job_name) return if not job.platform: error_string = "No platform set for job '%s'" % job_name logs.log_error(error_string) raise errors.BadStateError(error_string) # A misconfiguration led to this point. Clean up the job if necessary. job_queue_suffix = tasks.queue_suffix_for_platform(job.platform) bot_queue_suffix = tasks.default_queue_suffix() if job_queue_suffix != bot_queue_suffix: # This happens rarely, store this as a hard exception. logs.log_error( 'Wrong platform for job %s: job queue [%s], bot queue [%s].' % (job_name, job_queue_suffix, bot_queue_suffix)) # Try to recreate the job in the correct task queue. new_queue = (tasks.high_end_queue() if task.high_end else tasks.regular_queue()) new_queue += job_queue_suffix # Command override is continuously run by a bot. If we keep failing # and recreating the task, it will just DoS the entire task queue. # So, we don't create any new tasks in that case since it needs # manual intervention to fix the override anyway. if not task.is_command_override: tasks.add_task(task_name, task_argument, job_name, new_queue) # Add a wait interval to avoid overflowing task creation. failure_wait_interval = environment.get_value('FAIL_WAIT') time.sleep(failure_wait_interval) return # Get testcase (if any based on task argument). testcase = data_handler.get_entity_by_type_and_id( data_types.Testcase, task_argument) if testcase: current_platform_id = environment.get_platform_id() testcase_platform_id = testcase.platform_id # This indicates we are trying to run this job on the wrong platform. # This can happen when you have different type of devices (e.g # android) on the same platform group. In this case, we just recreate # the task. if (testcase_platform_id and not utils.fields_match( testcase_platform_id, current_platform_id)): logs.log( 'Testcase %d platform (%s) does not match with ours (%s), exiting' % (testcase.key.id(), testcase_platform_id, current_platform_id)) tasks.add_task(task_name, task_argument, job_name) return # Some fuzzers contain additional environment variables that should be # set for them. Append these for tests generated by these fuzzers and for # the fuzz command itself. fuzzer_name = None if task_name == 'fuzz': fuzzer_name = task_argument elif testcase: fuzzer_name = testcase.fuzzer_name # Get job's environment string. environment_string = job.get_environment_string() if task_name == 'minimize': # Let jobs specify a different job and fuzzer to minimize with. job_environment = job.get_environment() minimize_job_override = job_environment.get( 'MINIMIZE_JOB_OVERRIDE') if minimize_job_override: minimize_job = data_types.Job.query( data_types.Job.name == minimize_job_override).get() if minimize_job: environment.set_value('JOB_NAME', minimize_job_override) environment_string = minimize_job.get_environment_string() environment_string += '\nORIGINAL_JOB_NAME = %s\n' % job_name job_name = minimize_job_override else: logs.log_error('Job for minimization not found: %s.' % minimize_job_override) # Fallback to using own job for minimization. minimize_fuzzer_override = job_environment.get( 'MINIMIZE_FUZZER_OVERRIDE') fuzzer_name = minimize_fuzzer_override or fuzzer_name if fuzzer_name: fuzzer = data_types.Fuzzer.query( data_types.Fuzzer.name == fuzzer_name).get() additional_default_variables = '' additional_variables_for_job = '' if (fuzzer and hasattr(fuzzer, 'additional_environment_string') and fuzzer.additional_environment_string): for line in fuzzer.additional_environment_string.splitlines(): # Job specific values may be defined in fuzzer additional # environment variable name strings in the form # job_name:VAR_NAME = VALUE. if '=' in line and ':' in line.split('=', 1)[0]: fuzzer_job_name, environment_definition = line.split( ':', 1) if fuzzer_job_name == job_name: additional_variables_for_job += '\n%s' % environment_definition continue additional_default_variables += '\n%s' % line environment_string += additional_default_variables environment_string += additional_variables_for_job # Update environment for the job. update_environment_for_job(environment_string) # Match the cpu architecture with the ones required in the job definition. # If they don't match, then bail out and recreate task. if not is_supported_cpu_arch_for_job(): logs.log( 'Unsupported cpu architecture specified in job definition, exiting.' ) tasks.add_task(task_name, task_argument, job_name) return # Initial cleanup. cleanup_task_state() # Start http(s) servers. http_server.start() try: run_command(task_name, task_argument, job_name) finally: # Final clean up. cleanup_task_state()