def parse_args(self, args, is_bootstrap): if not args: return for a in args.split(','): if a == 'offline': # use cached recipe and stay offline whole time self.offline = True elif a[:5] == 'cache': if len(a) > 5 and a[5] == '=': # cache a different recipe instead self.recipe_id = a[6:] # remotely retrieve recipe, but stay offline during run if not is_bootstrap: self.offline = True elif a[:8] == 'quickcmd': if len(a) < 8 or a[8] != '=': raise error.HarnessError("Bad use of 'quickcmd'") self.cmd = a[9:] else: raise error.HarnessError("Unknown beaker harness arg: %s" % a)
def bootstrap(self, fetchdir): '''How to kickstart autotest when you have no control file? You download the beaker XML, convert it to a control file and pass it back to autotest. Much like bootstrapping.. :-) ''' # hack to sneakily pass results back to beaker without running # autotest. Need to avoid calling get_recipe below if self.cmd: self.parse_quickcmd(self.cmd) return None recipe = self.init_recipe_from_beaker() # remove stale file if os.path.isfile(self.state_file): os.remove(self.state_file) self.tests = {} # sanity check if self.recipe_id != recipe.id: raise error.HarnessError( 'Recipe mismatch: machine %s.. != XML %s..' % (self.recipe_id, recipe.id)) # create unique name control_file_name = recipe.job_id + '_' + recipe.id + '.control' control_file_path = fetchdir + '/' + control_file_name logging.debug('setting up control file - %s' % control_file_path) control_file = open(control_file_path, 'w') try: # convert recipe xml into control file for task in recipe.tasks: self.convert_task_to_control(fetchdir, control_file, task) # getting the task id later, will be hard, store it in file/memory self.write_processed_tests(self.get_test_name(task), task.id) control_file.close() except HarnessException: # hook to bail out on reservesys systems and not run autotest return None except Exception as ex: os.remove(control_file_path) raise error.HarnessError( 'beaker_harness: convert failed with -> %s' % ex) # autotest should find this under FETCHDIRTEST because it is unique return control_file_path
def find_recipe(self, recipes_dict): if self.hostname in recipes_dict: return recipes_dict[self.hostname] for h in recipes_dict: if self.recipe_id == recipes_dict[h].id: return recipes_dict[h] raise error.HarnessError('No valid recipe for host %s' % self.hostname)
def test_status_detail(self, code, subdir, operation, status, tag, optional_fields): """A test within this job is completing (detail)""" logging.debug('test_status_detail %s / %s / %s / %s / %s / %s', code, subdir, operation, status, tag, str(optional_fields)) if not subdir: # recipes - covered by run_start/complete/abort return """The mapping between beaker tasks and non-beaker tasks is not easy to separate. Therefore we use the START and END markers along with the environment variable BEAKER_TASK_ID to help us. We keep an on-disk-file that stores the tests we have seen (or will run [add by the conversion function above]). If the test is expected, it will have a task id associated with it and we can communicate with beaker about it. Otherwise if no 'id' is found, assume this is a sub-task that beaker doesn't care about and keep all the results contained to the beaker results directory. """ if code.startswith('START'): if subdir in self.tests and self.tests[subdir] != '0': # predefined beaker task self.bkr_proxy.task_start(self.tests[subdir]) else: # some random sub-task, save for cleanup purposes self.write_processed_tests(subdir) return elif code.startswith('END'): if subdir in self.tests and self.tests[subdir] != '0': # predefined beaker task self.upload_task_files(self.tests[subdir], subdir) self.bkr_proxy.task_stop(self.tests[subdir]) return else: if subdir in self.tests and self.tests[subdir] != '0': # predefine beaker tasks, will upload on END task_id = self.tests[subdir] task_upload = False else: # some random sub-task, save upload as task result # because there is no beaker task to add them too # task id was not saved in dictionary, get it from env if 'BEAKER_TASK_ID' not in os.environ: raise error.HarnessError("No BEAKER_TASK_ID set") task_id = os.environ['BEAKER_TASK_ID'] task_upload = True bkr_status = get_beaker_code(code) try: resultid = self.bkr_proxy.task_result(task_id, bkr_status, subdir, 1, '') if task_upload: self.upload_result_files(task_id, resultid, subdir) except Exception: logging.critical('ERROR: Failed to process test results')
def get_recipe_from_LC(self): logging.debug('trying to get recipe from LC:') try: recipe = self.bkr_proxy.get_recipe() except Exception as exc: raise error.HarnessError('Failed to retrieve xml: %s' % exc) return recipe
def init_task_params(self, task): logging.debug('PrepareTaskParams') if task is None: raise error.HarnessError('No valid task') for (name, value) in task.params.items(): logging.debug('adding to os.environ: <%s=%s>', name, value) os.environ[name] = value
def __init__(self, job, harness_args): logging.debug('harness_beaker __init__') super(harness_beaker, self).__init__(job) # temporary hack until BEAKER_RECIPE_ID and BEAKER_LAB_CONTROLLER_URL is setup in beaker os.environ['BEAKER_RECIPE_ID'] = open('/root/RECIPE.TXT', 'r').read().strip() os.environ['BEAKER_LAB_CONTROLLER_URL'] = re.sub( "/bkr/", ":8000", os.environ['BEAKER']) # control whether bootstrap environment remotely connects or stays offline # cheap hack to support flexible debug environment # the bootstrap job object is just a stub and won't have the '_state' attribute if hasattr(job, '_state'): is_bootstrap = False recipe_id = os.environ.get('RECIPE_ID') or '0' else: is_bootstrap = True recipe_id = os.environ.get('BEAKER_RECIPE_ID') os.environ['RECIPE_ID'] = recipe_id self.state_file = os.path.join(os.path.dirname(__file__), 'harness_beaker.state') self.recipe_id = recipe_id self.labc_url = os.environ.get('BEAKER_LAB_CONTROLLER_URL') self.hostname = os.environ.get('HOSTNAME') self.tests = self.get_processed_tests() self.watchdog_pid = None self.offline = False self.cmd = None # handle legacy rhts scripts called from inside tests os.environ['PATH'] = "%s:%s" % ('/var/cache/autotest', os.environ['PATH']) if harness_args: logging.info('harness_args: %s' % harness_args) os.environ['AUTOTEST_HARNESS_ARGS'] = harness_args self.args = self.parse_args(harness_args, is_bootstrap) logging.debug('harness_beaker: state_file: <%s>', self.state_file) logging.debug('harness_beaker: hostname: <%s>', self.hostname) logging.debug('harness_beaker: labc_url: <%s>', self.labc_url) if not self.hostname: raise error.HarnessError('Need valid hostname') # hack for flexible debug environment labc = not self.offline and self.labc_url or None self.bkr_proxy = BkrProxy(self.recipe_id, labc) self.setupInitSymlink()
def bootstrap(self, args, options): """ Bootstrap autotest by fetching the control file first and pass it back Currently this relies on a harness to retrieve the file """ def harness_env(): try: return os.environ['AUTOTEST_HARNESS'] except KeyError: return None def harness_args_env(): try: return os.environ['AUTOTEST_HARNESS_ARGS'] except KeyError: return None class stub_job(object): def config_set(self, name, value): return if not options.harness and not harness_env(): self.help() if options.harness: harness_name = options.harness elif harness_env(): harness_name = harness_env() options.harness = harness_name if options.harness_args: harness_args = options.harness_args else: harness_args = harness_args_env() options.harness_args = harness_args myjob = stub_job() # let harness initialize itself try: myharness = harness.select(harness_name, myjob, harness_args) if not getattr(myharness, 'bootstrap'): raise error.HarnessError("Does not support bootstrapping\n") except Exception, error_detail: if DEBUG: raise sys.stderr.write("Harness %s failed to initialize -> %s\n" % (harness_name, error_detail)) self.help() sys.exit(1)
def parse_quickcmd(self, args): # hack allow tests to quickly submit feedback through harness if not args: return if 'BEAKER_TASK_ID' not in os.environ: raise error.HarnessError("No BEAKER_TASK_ID set") task_id = os.environ['BEAKER_TASK_ID'] # Commands are from tests and should be reported as results cmd, q_args = args.split(':') if cmd == 'submit_log': try: # rhts_submit_log has as args: -S -T -l # we just care about -l f = None arg_list = q_args.split(' ') while arg_list: arg = arg_list.pop(0) if arg == '-l': f = arg_list.pop(0) break if not f: raise self.bkr_proxy.task_upload_file(task_id, f) except Exception: logging.critical('ERROR: Failed to process quick cmd %s' % cmd) elif cmd == 'submit_result': def init_args(testname='Need/a/testname/here', status="None", logfile=None, score="0"): return testname, status, logfile, score try: # report_result has TESTNAME STATUS LOGFILE SCORE arg_list = q_args.split(' ') testname, status, logfile, score = init_args(*arg_list) resultid = self.bkr_proxy.task_result(task_id, status, testname, score, '') if (logfile and os.path.isfile(logfile) and os.path.getsize(logfile) != 0): self.bkr_proxy.result_upload_file(task_id, resultid, logfile) # save the dmesg file dfile = '/tmp/beaker.dmesg' utils.system('dmesg -c > %s' % dfile) if os.path.getsize(dfile) != 0: self.bkr_proxy.result_upload_file(task_id, resultid, dfile) # os.remove(dfile) except Exception: logging.critical('ERROR: Failed to process quick cmd %s' % cmd) elif cmd == 'reboot': # we are in a stub job. Can't use self.job.reboot() :-( utils.system("sync; sync; reboot") self.run_pause() raise error.JobContinue("more to come") else: raise error.HarnessError("Bad sub-quickcmd: %s" % cmd)
def bootstrap(self, args, options): """ Bootstrap autotest by fetching the control file first and pass it back Currently this relies on a harness to retrieve the file """ def harness_env(): try: return os.environ['AUTOTEST_HARNESS'] except KeyError: return None def harness_args_env(): try: return os.environ['AUTOTEST_HARNESS_ARGS'] except KeyError: return None class stub_job(object): def config_set(self, name, value): return if not options.harness and not harness_env(): self.help() if options.harness: harness_name = options.harness elif harness_env(): harness_name = harness_env() options.harness = harness_name if options.harness_args: harness_args = options.harness_args else: harness_args = harness_args_env() options.harness_args = harness_args myjob = stub_job() # let harness initialize itself try: myharness = harness.select(harness_name, myjob, harness_args) if not getattr(myharness, 'bootstrap'): raise error.HarnessError("Does not support bootstrapping\n") except Exception as error_detail: if DEBUG: raise sys.stderr.write("Harness %s failed to initialize -> %s\n" % (harness_name, error_detail)) self.help() sys.exit(1) # get remote control file and stick it in FETCHDIRTEST try: control = myharness.bootstrap(FETCHDIRTEST) except Exception as ex: sys.stderr.write("bootstrap failed -> %s\n" % ex) raise SystemExit(1) logging.debug("bootstrap passing control file %s to run" % control) if not control: # nothing to do politely abort # trick to work around various harness quirks raise SystemExit(0) args.append(control) return args