def _parse_all_objects_then_iterate_for_nodes(config, tag, param_dict, operation): """ Wrapper for getting/setting/unsetting/... state/snapshot. :param param_dict: additional parameters to overwrite the previous dictionary with :type param_dict: {str, str} :param str operation: operation description to use when logging The rest of the arguments match the public functions. """ l, r = config["graph"].l, config["graph"].r selected_vms = sorted(config["vm_strs"].keys()) LOG_UI.info("Starting %s for %s with job %s and params:\n%s", operation, ", ".join(selected_vms), os.path.basename(r.job.logdir), param.ParsedDict(config["param_dict"]).reportable_form().rstrip("\n")) for test_object in l.parse_objects(config["param_dict"], config["vm_strs"]): if test_object.key != "vms": continue vm = test_object # parse individual net only for the current vm net = l.parse_object_from_objects([vm], param_dict=param_dict) setup_dict = config["param_dict"].copy() setup_dict.update(param_dict) setup_str = param.re_str("all..internal..manage.unchanged") test_node = l.parse_node_from_object(net, setup_dict, setup_str, prefix=tag) to_run = r.run_test_node(test_node) asyncio.get_event_loop().run_until_complete(asyncio.wait_for(to_run, r.job.timeout or None)) LOG_UI.info("Finished %s", operation)
def run(self, config): plugin_types = [ (dispatcher.InitDispatcher(), 'Plugins that always need to be initialized (init): '), (dispatcher.CLICmdDispatcher(), 'Plugins that add new commands (cli.cmd):'), (dispatcher.CLIDispatcher(), 'Plugins that add new options to commands (cli):'), (dispatcher.JobPrePostDispatcher(), 'Plugins that run before/after the execution of jobs (job.prepost):'), (dispatcher.ResultDispatcher(), 'Plugins that generate job result in different formats (result):'), (dispatcher.ResultEventsDispatcher(config), ('Plugins that generate job result based on job/test events ' '(result_events):')), (dispatcher.VarianterDispatcher(), 'Plugins that generate test variants (varianter): '), (Resolver(), 'Plugins that resolve test references (resolver): '), (dispatcher.RunnerDispatcher(), 'Plugins that run test suites on a job (runners): '), ] for plugins_active, msg in plugin_types: LOG_UI.info(msg) plugin_matrix = [] for plugin in sorted(plugins_active, key=lambda x: x.name): plugin_matrix.append((plugin.name, plugin.obj.description)) if not plugin_matrix: LOG_UI.debug("(No active plugin)") else: for line in astring.iter_tabular_output(plugin_matrix): LOG_UI.debug(line) LOG_UI.debug("")
def permubuntu(config, tag=""): """ Perform all extra setup needed for the ubuntu permanent vms. :param config: command line arguments and run configuration :type config: {str, str} :param str tag: extra name identifier for the test to be run """ l, r = config["graph"].l, config["graph"].r selected_vms = sorted(config["vm_strs"].keys()) LOG_UI.info("Starting permanent vm setup for %s (%s)", ", ".join(selected_vms), os.path.basename(r.job.logdir)) for vm in l.parse_objects(config["param_dict"], config["vm_strs"]): logging.info("Performing extra setup for the permanent %s", vm.name) # consider this as a special kind of ephemeral test which concerns # permanent objects (i.e. instead of transition from customize to on # root, it is a transition from supposedly "permanentized" vm to the root) logging.info("Booting %s for the first permanent on state", vm.name) setup_dict = config["param_dict"].copy() setup_dict.update({"set_state": "ready"}) setup_str = param.re_str("all..internal..manage.start") test_node = l.parse_node_from_object(vm, setup_dict, setup_str, prefix=tag) r.run_test_node(test_node) LOG_UI.info("Finished permanent vm setup")
def handle_register(config): cache_dirs = config.get("datadir.paths.cache_dirs") name = config.get("assets.register.name") asset_hash = config.get("assets.register.sha1_hash") location = config.get("assets.register.url") # Adding a twice the location is a small hack due the current logic to # return "by_name". This needs to be improved soon. asset = Asset( name=name, asset_hash=asset_hash, locations=[location, location], cache_dirs=cache_dirs, ) try: asset.find_asset_file() LOG_UI.error("Asset with name %s already registered.", name) return exit_codes.AVOCADO_WARNING except OSError: try: asset.fetch() LOG_UI.info("Done. Now you can reference it by name %s", name) return exit_codes.AVOCADO_ALL_OK except OSError as e: LOG_UI.error(e) return exit_codes.AVOCADO_FAIL
def run(self, args): LOG_UI.info('Config files read (in order):') for cfg_path in settings.config_paths: LOG_UI.debug(' %s' % cfg_path) if settings.config_paths_failed: LOG_UI.error('\nConfig files that failed to read:') for cfg_path in settings.config_paths_failed: LOG_UI.error(' %s' % cfg_path) LOG_UI.debug("") if not args.datadir: blength = 0 for section in settings.config.sections(): for value in settings.config.items(section): clength = len('%s.%s' % (section, value[0])) if clength > blength: blength = clength format_str = " %-" + str(blength) + "s %s" LOG_UI.debug(format_str, 'Section.Key', 'Value') for section in settings.config.sections(): for value in settings.config.items(section): config_key = ".".join((section, value[0])) LOG_UI.debug(format_str, config_key, value[1]) else: LOG_UI.debug("Avocado replaces config dirs that can't be accessed") LOG_UI.debug("with sensible defaults. Please edit your local config") LOG_UI.debug("file to customize values") LOG_UI.debug('') LOG_UI.info('Avocado Data Directories:') LOG_UI.debug(' base ' + data_dir.get_base_dir()) LOG_UI.debug(' tests ' + data_dir.get_test_dir()) LOG_UI.debug(' data ' + data_dir.get_data_dir()) LOG_UI.debug(' logs ' + data_dir.get_logs_dir()) LOG_UI.debug(' cache ' + ", ".join(data_dir.get_cache_dirs()))
def handle_default(): LOG_UI.info("Config files read (in order, '*' means the file exists " "and had been read):") # Getting from settings because is already sorted config = settings.as_dict() for cfg_path in settings.all_config_paths: if cfg_path in settings.config_paths: LOG_UI.debug(' * %s', cfg_path) else: LOG_UI.debug(' %s', cfg_path) LOG_UI.debug("") if not config.get('config.datadir'): blength = 0 for namespace, value in config.items(): clength = len(namespace) if clength > blength: blength = clength format_str = " %-" + str(blength) + "s %s" LOG_UI.debug(format_str, 'Section.Key', 'Value') for namespace, value in config.items(): LOG_UI.debug(format_str, namespace, value) else: LOG_UI.debug("Avocado replaces config dirs that can't be accessed") LOG_UI.debug("with sensible defaults. Please edit your local config") LOG_UI.debug("file to customize values") LOG_UI.debug('') LOG_UI.info('Avocado Data Directories:') LOG_UI.debug(' base %s', data_dir.get_base_dir()) LOG_UI.debug(' tests %s', data_dir.get_test_dir()) LOG_UI.debug(' data %s', data_dir.get_data_dir()) LOG_UI.debug(' logs %s', data_dir.get_logs_dir()) LOG_UI.debug(' cache %s', ", ".join(data_dir.get_cache_dirs()))
def render(self, result, job): if job.status == "RUNNING": return # Don't create results on unfinished jobs if not (hasattr(job.args, 'html_job_result') or hasattr(job.args, 'html_output')): return open_browser = getattr(job.args, 'open_browser', False) if getattr(job.args, 'html_job_result', 'off') == 'on': html_dir = os.path.join(job.logdir, 'html') if os.path.exists(html_dir): # update the html result if exists shutil.rmtree(html_dir) os.makedirs(html_dir) html_path = os.path.join(html_dir, 'results.html') self._render(result, html_path) if getattr(job.args, 'stdout_claimed_by', None) is None: LOG_UI.info("JOB HTML : %s", html_path) if open_browser: self._open_browser(html_path) open_browser = False html_path = getattr(job.args, 'html_output', 'None') if html_path is not None: self._render(result, html_path) if open_browser: self._open_browser(html_path)
def run(self, args): LOG_UI.info("Config files read (in order, '*' means the file exists " "and had been read):") for cfg_path in settings.all_config_paths: if cfg_path in settings.config_paths: LOG_UI.debug(' * %s', cfg_path) else: LOG_UI.debug(' %s', cfg_path) LOG_UI.debug("") if not args.datadir: blength = 0 for section in settings.config.sections(): for value in settings.config.items(section): clength = len('%s.%s' % (section, value[0])) if clength > blength: blength = clength format_str = " %-" + str(blength) + "s %s" LOG_UI.debug(format_str, 'Section.Key', 'Value') for section in settings.config.sections(): for value in settings.config.items(section): config_key = ".".join((section, value[0])) LOG_UI.debug(format_str, config_key, value[1]) else: LOG_UI.debug("Avocado replaces config dirs that can't be accessed") LOG_UI.debug("with sensible defaults. Please edit your local config") LOG_UI.debug("file to customize values") LOG_UI.debug('') LOG_UI.info('Avocado Data Directories:') LOG_UI.debug(' base %s', data_dir.get_base_dir()) LOG_UI.debug(' tests %s', data_dir.get_test_dir()) LOG_UI.debug(' data %s', data_dir.get_data_dir()) LOG_UI.debug(' logs %s', data_dir.get_logs_dir()) LOG_UI.debug(' cache %s', ", ".join(data_dir.get_cache_dirs()))
def post(self, job): statuses = job.config.get('job.output.testlogs.statuses') if not statuses: return try: with open(os.path.join(job.logdir, 'results.json')) as json_file: results = json.load(json_file) except FileNotFoundError: return logfiles = job.config.get('job.output.testlogs.logfiles') for test in results['tests']: if test['status'] not in statuses: continue for logfile in logfiles: path = os.path.join(test['logdir'], logfile) try: with open(path) as log: LOG_UI.info( 'Log file "%s" content for test "%s" (%s):', logfile, test['id'], test['status']) LOG_UI.debug(log.read()) except (FileNotFoundError, PermissionError) as error: LOG_UI.error('Failure to access log file "%s": %s', path, error)
def run(self, args): LOG_UI.info('Config files read (in order):') for cfg_path in settings.config_paths: LOG_UI.debug(' %s' % cfg_path) if settings.config_paths_failed: LOG_UI.error('\nConfig files that failed to read:') for cfg_path in settings.config_paths_failed: LOG_UI.error(' %s' % cfg_path) LOG_UI.debug("") if not args.datadir: blength = 0 for section in settings.config.sections(): for value in settings.config.items(section): clength = len('%s.%s' % (section, value[0])) if clength > blength: blength = clength format_str = " %-" + str(blength) + "s %s" LOG_UI.debug(format_str, 'Section.Key', 'Value') for section in settings.config.sections(): for value in settings.config.items(section): config_key = ".".join((section, value[0])) LOG_UI.debug(format_str, config_key, value[1]) else: LOG_UI.debug("Avocado replaces config dirs that can't be accessed") LOG_UI.debug( "with sensible defaults. Please edit your local config") LOG_UI.debug("file to customize values") LOG_UI.debug('') LOG_UI.info('Avocado Data Directories:') LOG_UI.debug(' base ' + data_dir.get_base_dir()) LOG_UI.debug(' tests ' + data_dir.get_test_dir()) LOG_UI.debug(' data ' + data_dir.get_data_dir()) LOG_UI.debug(' logs ' + data_dir.get_logs_dir())
def run(self, config): LOG_UI.info("Config files read (in order, '*' means the file exists " "and had been read):") for cfg_path in settings.all_config_paths: if cfg_path in settings.config_paths: LOG_UI.debug(' * %s', cfg_path) else: LOG_UI.debug(' %s', cfg_path) LOG_UI.debug("") if not config.get("datadir"): blength = 0 for section in settings.config.sections(): for value in settings.config.items(section): clength = len('%s.%s' % (section, value[0])) if clength > blength: blength = clength format_str = " %-" + str(blength) + "s %s" LOG_UI.debug(format_str, 'Section.Key', 'Value') for section in settings.config.sections(): for value in settings.config.items(section): config_key = ".".join((section, value[0])) LOG_UI.debug(format_str, config_key, value[1]) else: LOG_UI.debug("Avocado replaces config dirs that can't be accessed") LOG_UI.debug("with sensible defaults. Please edit your local config") LOG_UI.debug("file to customize values") LOG_UI.debug('') LOG_UI.info('Avocado Data Directories:') LOG_UI.debug(' base %s', data_dir.get_base_dir()) LOG_UI.debug(' tests %s', data_dir.get_test_dir()) LOG_UI.debug(' data %s', data_dir.get_data_dir()) LOG_UI.debug(' logs %s', data_dir.get_logs_dir()) LOG_UI.debug(' cache %s', ", ".join(data_dir.get_cache_dirs()))
async def spawn_tasks(self, parallel_tasks): while True: while len( set(self.status_server.tasks_pending).intersection( self.spawned_tasks)) >= parallel_tasks: await asyncio.sleep(0.1) try: task = self.pending_tasks[0] except IndexError: print("Finished spawning tasks") break spawn_result = await self.spawner.spawn_task(task) identifier = task.identifier self.pending_tasks.remove(task) self.spawned_tasks.append(identifier) if not spawn_result: LOG_UI.error("ERROR: failed to spawn task: %s", identifier) continue alive = self.spawner.is_task_alive(task) if not alive: LOG_UI.warning("%s is not alive shortly after being spawned", identifier) else: LOG_UI.info("%s spawned and alive", identifier)
def render(self, result, job): if (job.config.get('resultsdb_logs', None) is not None and job.config.get('stdout_claimed_by', None) is None): log_msg = "JOB URL : %s/%s" LOG_UI.info( log_msg, job.config.get('resultsdb_logs', os.path.basename(job.logdir)))
def _parse_all_objects_then_iterate_for_nodes(config, tag, param_dict, operation): """ Wrapper for setting state/snapshot, same as :py:func:`set`. :param param_dict: additional parameters to overwrite the previous dictionary with :type param_dict: {str, str} :param str operation: operation description to use when logging :param str tag: extra name identifier for the test to be run """ l, r = config["graph"].l, config["graph"].r selected_vms = sorted(config["vm_strs"].keys()) LOG_UI.info( "Starting %s for %s with job %s and params:\n%s", operation, ", ".join(selected_vms), os.path.basename(r.job.logdir), param.ParsedDict(config["param_dict"]).reportable_form().rstrip("\n")) for vm in l.parse_objects(config["param_dict"], config["vm_strs"]): setup_dict = config["param_dict"].copy() setup_dict.update(param_dict) setup_str = param.re_str("all..internal..manage.unchanged") test_node = l.parse_node_from_object(vm, setup_dict, setup_str, prefix=tag) r.run_test_node(test_node) LOG_UI.info("Finished %s", operation)
def render(self, result, job): if job.status == "RUNNING": return # Don't create results on unfinished jobs if not (hasattr(job.args, 'html_job_result') or hasattr(job.args, 'html_output')): return open_browser = getattr(job.args, 'open_browser', False) if getattr(job.args, 'html_job_result', 'off') == 'on': html_dir = os.path.join(job.logdir, 'html') if os.path.exists(html_dir): # update the html result if exists shutil.rmtree(html_dir) os.makedirs(html_dir) html_path = os.path.join(html_dir, 'results.html') self._render(result, html_path) if getattr(job.args, 'stdout_claimed_by', None) is None: LOG_UI.info("JOB HTML : %s", html_path) if open_browser: self._open_browser(html_path) open_browser = False html_path = getattr(job.args, 'html_output', 'None') if html_path is not None: # Avoid removing static content in user-provided path as it might # also contain other user file/dirs (eg. "/tmp/report.html" would # otherwise delete "/tmp" self._render(result, html_path) if open_browser: self._open_browser(html_path)
def internal(config, tag=""): """ Run an internal test node, thus performing a particular automated setup on the desired vms. :param config: command line arguments and run configuration :type config: {str, str} :param str tag: extra name identifier for the test to be run """ l, r = config["graph"].l, config["graph"].r selected_vms = sorted(config["vm_strs"].keys()) LOG_UI.info("Performing internal setup on %s (%s)", ", ".join(selected_vms), os.path.basename(r.job.logdir)) for vm in l.parse_objects(config["param_dict"], config["vm_strs"]): setup_dict = config["param_dict"].copy() if vm.params.get("stateless", "yes") == "yes": setup_dict.update({ "get_state": "", "set_state": "", "skip_image_processing": "yes", "kill_vm": "no" }) setup_str = param.re_str("all..internal.." + vm.params["node"]) test_node = l.parse_node_from_object(vm, setup_dict, setup_str, prefix=tag) r.run_test_node(test_node) LOG_UI.info("Finished internal setup")
def _parse_one_node_for_all_objects(config, tag, verb): """ Wrapper for setting state/snapshot, same as :py:func:`set`. :param verb: verb forms in a tuple (gerund form, variant, test name, present) :type verb: (str, str, str, str) :param str tag: extra name identifier for the test to be run """ l, r = config["graph"].l, config["graph"].r selected_vms = sorted(config["vm_strs"].keys()) LOG_UI.info("%s virtual machines %s (%s)", verb[0], ", ".join(selected_vms), os.path.basename(r.job.logdir)) vms = " ".join(selected_vms) setup_dict = config["param_dict"].copy() setup_dict.update({"vms": vms, "main_vm": selected_vms[0]}) setup_str = param.re_str("all..internal..manage.%s" % verb[1]) tests, vms = l.parse_object_nodes(setup_dict, setup_str, config["vm_strs"], prefix=tag) assert len( tests) == 1, "There must be exactly one %s test variant from %s" % ( verb[2], tests) r.run_test_node(TestNode(tag, tests[0].config, vms)) LOG_UI.info("%s complete", verb[3])
def develop(config, tag=""): """ Run manual tests specialized at development speedup. :param config: command line arguments and run configuration :type config: {str, str} :param str tag: extra name identifier for the test to be run Current modes that can be supplied from the command line can be found in the "develop" test set. As with all manual tests, providing setup and making sure that all the vms exist is a user's responsibility. """ l, r = config["graph"].l, config["graph"].r selected_vms = list(config["vm_strs"].keys()) LOG_UI.info("Developing on virtual machines %s (%s)", ", ".join(selected_vms), os.path.basename(r.job.logdir)) vms = " ".join(selected_vms) mode = config["tests_params"].get("devmode", "generator") setup_dict = config["param_dict"].copy() setup_dict.update({"vms": vms, "main_vm": selected_vms[0]}) setup_str = param.re_str("all..manual..develop.%s" % mode) tests, vms = l.parse_object_nodes(setup_dict, setup_str, config["vm_strs"], prefix=tag) assert len( tests ) == 1, "There must be exactly one develop test variant from %s" % tests r.run_test_node(TestNode(tag, tests[0].config, vms)) LOG_UI.info("Development complete")
def render(self, result, job): resultsdb_logs = job.config.get('plugins.resultsdb.logs_url') stdout_claimed_by = job.config.get('stdout_claimed_by') if (resultsdb_logs is not None and stdout_claimed_by is None): log_msg = "JOB URL : %s/%s" LOG_UI.info(log_msg, resultsdb_logs, os.path.basename(job.logdir))
def full(config, tag=""): """ Perform all the setup needed to achieve a certain state and save the state. :param config: command line arguments and run configuration :type config: {str, str} :param str tag: extra name identifier for the test to be run The state can be achieved all the way from the test object creation. The performed setup depends entirely on the state's dependencies which can be completely different than the regular create->install->deploy path. Only singleton test setup is supported within the full setup path since we cannot guarantee other setup involved vms exist. """ l, r = config["graph"].l, config["graph"].r selected_vms = sorted(config["vm_strs"].keys()) LOG_UI.info("Starting full setup for %s (%s)", ", ".join(selected_vms), os.path.basename(r.job.logdir)) for vm_name in selected_vms: vm_params = config["vms_params"].object_params(vm_name) logging.info("Creating the full state '%s' of %s", vm_params.get("state", "customize"), vm_name) state = vm_params.get("state", "customize") state = "0root" if state == "root" else state state = "0preinstall" if state == "install" else state # in case of permanent vms, support creation and other otherwise dangerous operations setup_dict = config["param_dict"].copy() setup_dict["create_permanent_vm"] = "yes" setup_dict["main_vm"] = vm_name # overwrite any existing test objects create_graph = l.parse_object_trees( setup_dict, param.re_str("all.." + state), {vm_name: config["vm_strs"][vm_name]}, prefix=tag) create_graph.flag_parent_intersection(create_graph, flag_type="run", flag=False) create_graph.flag_parent_intersection(create_graph, flag_type="run", flag=True, skip_shared_root=True) # NOTE: this makes sure that any present states are overwritten and no created # states are removed, skipping any state restoring for better performance setup_dict = config["param_dict"].copy() setup_dict.update({ "get_mode": "ia", "set_mode": "ff", "unset_mode": "ra" }) r.run_traversal(create_graph, setup_dict) LOG_UI.info("Finished full setup")
def __init__(self, config=None): # pylint: disable=W0613 baseurl = os.environ.get("BEAKER_LAB_CONTROLLER_URL") recipeid = os.environ.get("RSTRNT_RECIPEID") taskid = os.environ.get("RSTRNT_TASKID") if not all([baseurl, recipeid, taskid]): return baseurl = baseurl.rstrip("/") self.beaker_url = baseurl + "/recipes/" + recipeid + "/tasks/" + taskid LOG_UI.info("beaker: using API at %s (R:%s T:%s)", baseurl, recipeid, taskid)
def noop(config, tag=""): """ Empty setup step to invoke plugin without performing anything. :param config: command line arguments and run configuration :type config: {str, str} :param str tag: extra name identifier for the test to be run """ LOG_UI.info("NOOP")
def report_results(self): """Reports a summary, with verbose listing of fail/error tasks.""" summary = {status: len(tasks) for (status, tasks) in self.status_server.result.items()} LOG_UI.info("Tasks result summary: %s", summary) for status, tasks in self.status_server.result.items(): if status in ('fail', 'error'): LOG_UI.error("Tasks ended with '%s': %s", status, ", ".join(tasks))
def post_tests(self, job): if not self.owns_stdout: return if job.status == 'PASS': LOG_UI.info("RESULTS : PASS %d | ERROR %d | FAIL %d | SKIP %d | " "WARN %d | INTERRUPT %s | CANCEL %s", job.result.passed, job.result.errors, job.result.failed, job.result.skipped, job.result.warned, job.result.interrupted, job.result.cancelled)
def post_tests(self, job): if not self.owns_stdout: return if job.status == 'PASS': LOG_UI.info( "RESULTS : PASS %d | ERROR %d | FAIL %d | SKIP %d | " "WARN %d | INTERRUPT %s | CANCEL %s", job.result.passed, job.result.errors, job.result.failed, job.result.skipped, job.result.warned, job.result.interrupted, job.result.cancelled)
def send_request(self, req): LOG_UI.debug("beaker: %s %s ...", req.method, req.full_url) try: res = urllib.request.urlopen(req) # nosec return res except urllib.error.URLError as err: LOG_UI.info("beaker: %s %s failed: %s", req.method, req.full_url, err) return None except Exception as err: # should not happen LOG_UI.info("beaker: Oops: %s", err) return None
def deploy(config, tag=""): """ Deploy customized data and utilities to the guest vms, to one or to more of their states, either temporary (``stateless=no``) or taking a respective 'customize' snapshot. :param config: command line arguments and run configuration :type config: {str, str} :param str tag: extra name identifier for the test to be run """ l, r = config["graph"].l, config["graph"].r selected_vms = sorted(config["vm_strs"].keys()) LOG_UI.info("Deploying data to %s (%s)", ", ".join(selected_vms), os.path.basename(r.job.logdir)) for vm in l.parse_objects(config["param_dict"], config["vm_strs"]): states = vm.params.objects("states") if len(states) == 0: states = ["current_state"] stateless = vm.params.get("stateless", "yes") == "yes" else: stateless = False for i, state in enumerate(states): setup_dict = config["param_dict"].copy() if state != "current_state": setup_dict.update({ "get_state": state, "set_state": state, "get_type": "any", "set_type": "any" }) setup_dict.update({ "skip_image_processing": "yes", "kill_vm": "no", "redeploy_only": config["vms_params"].get("redeploy_only", "yes") }) if stateless: setup_dict["get_state"] = "" setup_dict["set_state"] = "" setup_tag = "%s%s" % (tag, i + 1 if i > 0 else "") setup_str = param.re_str("all..internal..customize") test_node = l.parse_node_from_object(vm, setup_dict, setup_str, prefix=setup_tag) r.run_test_node(test_node) LOG_UI.info("Finished data deployment")
def _download_tests(self, tests, destination, job_id, spawner): for test in tests: test_id = test.get('id') LOG_UI.info("Downloading files for test %s", test_id) try: files_buffers = spawner().stream_output(job_id, test_id) for filename, stream in files_buffers: dest = os.path.join(destination, filename) self._save_stream_to_file(stream, dest) except SpawnerException as ex: LOG_UI.error("Error: Failed to download: %s. Exiting...", ex) return exit_codes.AVOCADO_GENERIC_CRASH return exit_codes.AVOCADO_ALL_OK
def _display(self, test_matrix, stats): header = None if self.args.verbose: header = (output.TERM_SUPPORT.header_str('Type'), output.TERM_SUPPORT.header_str('Test')) for line in astring.iter_tabular_output(test_matrix, header=header): LOG_UI.debug(line) if self.args.verbose: LOG_UI.debug("") for key in sorted(stats): LOG_UI.info("%s: %s", key.upper(), stats[key])
def handle_list_command(jobs_results): """Called when 'avocado jobs list' command is executed.""" for filename in jobs_results.values(): with open(filename, 'r') as fp: job = json.load(fp) try: started_ts = job['tests'][0]['start'] started = datetime.fromtimestamp(started_ts) except IndexError: continue LOG_UI.info("%-40s %-26s %3s (%s/%s/%s/%s)", job['job_id'], str(started), job['total'], job['pass'], job['skip'], job['errors'], job['failures']) return exit_codes.AVOCADO_ALL_OK
def handle_show_command(self, config): """Called when 'avocado jobs show' command is executed.""" job_id = config.get("jobs.show.job_id") results_dir = get_job_results_dir(job_id) if results_dir is None: LOG_UI.error("Error: Job %s not found", job_id) return exit_codes.AVOCADO_GENERIC_CRASH results_file = os.path.join(results_dir, "results.json") config_file = os.path.join(results_dir, "jobdata/args.json") try: results_data = self._get_data_from_file(results_file) except FileNotFoundError as ex: # Results data are important and should exit if not found LOG_UI.error(ex) return exit_codes.AVOCADO_GENERIC_CRASH try: config_data = self._get_data_from_file(config_file) except FileNotFoundError: pass data = { "JOB ID": job_id, "JOB LOG": results_data.get("debuglog"), "SPAWNER": config_data.get("nrunner.spawner", "unknown"), } # We could improve this soon with more data and colors self._print_job_details(data) LOG_UI.info("") self._print_job_tests(results_data.get("tests")) results = ("PASS %d | ERROR %d | FAIL %d | SKIP %d |" "WARN %d | INTERRUPT %s | CANCEL %s") results %= ( results_data.get("pass", 0), results_data.get("error", 0), results_data.get("failures", 0), results_data.get("skip", 0), results_data.get("warn", 0), results_data.get("interrupt", 0), results_data.get("cancel", 0), ) self._print_job_details({"RESULTS": results}) return exit_codes.AVOCADO_ALL_OK
def sigtstp_handler(signum, frame): # pylint: disable=W0613 """ SIGSTOP all test processes on SIGTSTP """ if not proc: # Ignore ctrl+z when proc not yet started return with sigtstp: msg = "ctrl+z pressed, %%s test (%s)" % proc.pid app_log_msg = '\n%s' % msg if self.sigstopped: APP_LOG.info(app_log_msg, "resumming") TEST_LOG.info(msg, "resumming") process.kill_process_tree(proc.pid, signal.SIGCONT, False) self.sigstopped = False else: APP_LOG.info(app_log_msg, "stopping") TEST_LOG.info(msg, "stopping") process.kill_process_tree(proc.pid, signal.SIGSTOP, False) self.sigstopped = True
def post(self, job): statuses = job.config.get('job.output.testlogs.statuses') if not statuses: return try: with open(os.path.join(job.logdir, 'results.json')) as json_file: results = json.load(json_file) except FileNotFoundError: return for test in results['tests']: if test['status'] not in statuses: continue LOG_UI.info('Log content for test "%s" (%s)', test['id'], test['status']) with open(test['logfile']) as log: LOG_UI.debug(log.read())
def render(self, result, job): if job.status == "RUNNING": return # Don't create results on unfinished jobs if not (hasattr(job.args, 'html_job_result') or hasattr(job.args, 'html_output')): return open_browser = getattr(job.args, 'open_browser', False) if getattr(job.args, 'html_job_result', 'off') == 'on': html_path = os.path.join(job.logdir, 'results.html') self._render(result, html_path) if getattr(job.args, 'stdout_claimed_by', None) is None: LOG_UI.info("JOB HTML : %s", html_path) if open_browser: self._open_browser(html_path) open_browser = False html_path = getattr(job.args, 'html_output', 'None') if html_path is not None: self._render(result, html_path) if open_browser: self._open_browser(html_path)
def pre_tests(self, job): if not self.owns_stdout: return LOG_UI.info("JOB ID : %s", job.unique_id) replay_source_job = getattr(job.args, "replay_sourcejob", False) if replay_source_job: LOG_UI.info("SRC JOB ID : %s", replay_source_job) LOG_UI.info("JOB LOG : %s", job.logfile)
def post(self, job): if job.status == 'PASS': if not getattr(job.args, 'stdout_claimed_by', None): LOG_UI.info("JOB TIME : %.2f s", job.time_elapsed)
def render(self, result, job): if (getattr(job.args, 'resultsdb_logs', None) is not None and getattr(job.args, 'stdout_claimed_by', None) is None): LOG_UI.info("JOB URL : %s/%s" % (job.args.resultsdb_logs, os.path.basename(job.logdir)))
def _display(self, test_matrix, stats, tag_stats): header = None if self.args.verbose: header = (output.TERM_SUPPORT.header_str('Type'), output.TERM_SUPPORT.header_str('Test'), output.TERM_SUPPORT.header_str('Tag(s)')) for line in astring.iter_tabular_output(test_matrix, header=header, strip=True): LOG_UI.debug(line) if self.args.verbose: LOG_UI.info("") LOG_UI.info("TEST TYPES SUMMARY") LOG_UI.info("==================") for key in sorted(stats): LOG_UI.info("%s: %s", key.upper(), stats[key]) if tag_stats: LOG_UI.info("") LOG_UI.info("TEST TAGS SUMMARY") LOG_UI.info("=================") for key in sorted(tag_stats): LOG_UI.info("%s: %s", key, tag_stats[key])
def run(self, args): def _get_name(test): return str(test['id']) def _get_name_no_id(test): return str(test['id']).split('-', 1)[1] job1_dir, job1_id = self._setup_job(args.jobids[0]) job2_dir, job2_id = self._setup_job(args.jobids[1]) job1_data = self._get_job_data(job1_dir) job2_data = self._get_job_data(job2_dir) report_header = 'Avocado Job Report\n' job1_results = [report_header] job2_results = [report_header] if 'cmdline' in args.diff_filter: cmdline1 = self._get_command_line(job1_dir) cmdline2 = self._get_command_line(job2_dir) if str(cmdline1) != str(cmdline2): command_line_header = ['\n', '# COMMAND LINE\n'] job1_results.extend(command_line_header) job1_results.append(cmdline1) job2_results.extend(command_line_header) job2_results.append(cmdline2) if 'time' in args.diff_filter: time1 = '%.2f s\n' % job1_data['time'] time2 = '%.2f s\n' % job2_data['time'] if str(time1) != str(time2): total_time_header = ['\n', '# TOTAL TIME\n'] job1_results.extend(total_time_header) job1_results.append(time1) job2_results.extend(total_time_header) job2_results.append(time2) if 'variants' in args.diff_filter: variants1 = self._get_variants(job1_dir) variants2 = self._get_variants(job2_dir) if str(variants1) != str(variants2): variants_header = ['\n', '# VARIANTS\n'] job1_results.extend(variants_header) job1_results.extend(variants1) job2_results.extend(variants_header) job2_results.extend(variants2) if 'results' in args.diff_filter: results1 = [] if args.diff_strip_id: get_name = _get_name_no_id else: get_name = _get_name for test in job1_data['tests']: test_result = '%s: %s\n' % (get_name(test), str(test['status'])) results1.append(test_result) results2 = [] for test in job2_data['tests']: test_result = '%s: %s\n' % (get_name(test), str(test['status'])) results2.append(test_result) if str(results1) != str(results2): test_results_header = ['\n', '# TEST RESULTS\n'] job1_results.extend(test_results_header) job1_results.extend(results1) job2_results.extend(test_results_header) job2_results.extend(results2) if 'config' in args.diff_filter: config1 = self._get_config(job1_dir) config2 = self._get_config(job2_dir) if str(config1) != str(config2): config_header = ['\n', '# SETTINGS\n'] job1_results.extend(config_header) job1_results.extend(config1) job2_results.extend(config_header) job2_results.extend(config2) if 'sysinfo' in args.diff_filter: sysinfo_pre1 = self._get_sysinfo(job1_dir, 'pre') sysinfo_pre2 = self._get_sysinfo(job2_dir, 'pre') if str(sysinfo_pre1) != str(sysinfo_pre2): sysinfo_header_pre = ['\n', '# SYSINFO PRE\n'] job1_results.extend(sysinfo_header_pre) job1_results.extend(sysinfo_pre1) job2_results.extend(sysinfo_header_pre) job2_results.extend(sysinfo_pre2) sysinfo_post1 = self._get_sysinfo(job1_dir, 'post') sysinfo_post2 = self._get_sysinfo(job2_dir, 'post') if str(sysinfo_post1) != str(sysinfo_post2): sysinfo_header_post = ['\n', '# SYSINFO POST\n'] job1_results.extend(sysinfo_header_post) job1_results.extend(sysinfo_post1) job2_results.extend(sysinfo_header_post) job2_results.extend(sysinfo_post2) if getattr(args, 'create_reports', False): self.std_diff_output = False prefix = 'avocado_diff_%s_' % job1_id[:7] tmp_file1 = tempfile.NamedTemporaryFile(mode='w', prefix=prefix, suffix='.txt', delete=False) tmp_file1.writelines(job1_results) tmp_file1.close() prefix = 'avocado_diff_%s_' % job2_id[:7] tmp_file2 = tempfile.NamedTemporaryFile(mode='w', prefix=prefix, suffix='.txt', delete=False) tmp_file2.writelines(job2_results) tmp_file2.close() LOG_UI.info('%s %s', tmp_file1.name, tmp_file2.name) if (getattr(args, 'open_browser', False) and getattr(args, 'html', None) is None): prefix = 'avocado_diff_%s_%s_' % (job1_id[:7], job2_id[:7]) tmp_file = tempfile.NamedTemporaryFile(mode='w', prefix=prefix, suffix='.html', delete=False) setattr(args, 'html', tmp_file.name) if getattr(args, 'html', None) is not None: self.std_diff_output = False try: html_diff = HtmlDiff() html_diff._legend = """ <table class="diff" summary="Legends"> <tr> <td> <table border="" summary="Colors"> <tr><th> Colors </th> </tr> <tr><td class="diff_add"> Added </td></tr> <tr><td class="diff_chg">Changed</td> </tr> <tr><td class="diff_sub">Deleted</td> </tr> </table></td> <td> <table border="" summary="Links"> <tr><th colspan="2"> Links </th> </tr> <tr><td>(f)irst change</td> </tr> <tr><td>(n)ext change</td> </tr> <tr><td>(t)op</td> </tr> </table></td> </tr> </table>""" job_diff_html = html_diff.make_file((_.decode("utf-8") for _ in job1_results), (_.decode("utf-8") for _ in job2_results), fromdesc=job1_id, todesc=job2_id) with open(args.html, 'w') as html_file: html_file.writelines(job_diff_html.encode("utf-8")) LOG_UI.info(args.html) except IOError as exception: LOG_UI.error(exception) sys.exit(exit_codes.AVOCADO_FAIL) if getattr(args, 'open_browser', False): setsid = getattr(os, 'setsid', None) if not setsid: setsid = getattr(os, 'setpgrp', None) with open(os.devnull, "r+") as inout: cmd = ['xdg-open', args.html] subprocess.Popen(cmd, close_fds=True, stdin=inout, stdout=inout, stderr=inout, preexec_fn=setsid) if self.std_diff_output: if self.term.enabled: for line in self._cdiff(unified_diff(job1_results, job2_results, fromfile=job1_id, tofile=job2_id)): LOG_UI.debug(line.strip()) else: for line in unified_diff(job1_results, job2_results, fromfile=job1_id, tofile=job2_id): LOG_UI.debug(line.strip())
def sleep(self, job): for i in xrange(1, self.seconds + 1): LOG_UI.info("Sleeping %2i/%s", i, self.seconds) time.sleep(1)
def sleep(self, job): # pylint: disable=W0613 for i in range(1, self.seconds + 1): LOG_UI.info("Sleeping %2i/%s", i, self.seconds) time.sleep(1)