def run(self, config): name = config.get('distro.distro_def_name') version = config.get('distro.distro_def_version') release = config.get('distro.distro_def_release') arch = config.get('distro.distro_def_arch') distro_type = config.get('distro.distro_def_type') path = config.get('distro.distro_def_path') if config.get('distro.distro_def_create'): if not (name and version and arch and distro_type and path): LOG_UI.error('Required arguments: name, version, arch, type ' 'and path') sys.exit(exit_codes.AVOCADO_FAIL) output_file_name = self._get_output_file_name( name, version, arch, release) if os.path.exists(output_file_name): error_msg = ('Output file "%s" already exists, will not ' 'overwrite it', output_file_name) LOG_UI.error(error_msg) else: LOG_UI.debug("Loading distro information from tree... " "Please wait...") distro = load_from_tree(name, version, release, arch, distro_type, path) save_distro(distro, output_file_name) LOG_UI.debug('Distro information saved to "%s"', output_file_name) else: detected = utils_distro.detect() LOG_UI.debug( 'Detected distribution: %s (%s) version %s release ' '%s', detected.name, detected.arch, detected.version, detected.release)
def initialize(self, args): self.variants = None cit_parameter_file = getattr(args, "cit_parameter_file", None) if cit_parameter_file is None: return else: cit_parameter_file = os.path.expanduser(cit_parameter_file) if not os.access(cit_parameter_file, os.R_OK): LOG_UI.error( "parameter file '%s' could not be found or " "is not readable", cit_parameter_file) self.error_exit(args) config = configparser.ConfigParser() try: config.read(cit_parameter_file) except Exception as details: LOG_UI.error("Cannot parse parameter file: %s", details) self.error_exit(args) parameters = [(key, value.split(', ')) for key, value in config.items('parameters')] order = args.cit_order_of_combinations cit = Cit(parameters, order) self.headers, self.variants = cit.combine()
def run(self, args): if args.distro_def_create: if not (args.distro_def_name and args.distro_def_version and args.distro_def_arch and args.distro_def_type and args.distro_def_path): LOG_UI.error('Required arguments: name, version, arch, type ' 'and path') sys.exit(exit_codes.AVOCADO_FAIL) output_file_name = self.get_output_file_name(args) if os.path.exists(output_file_name): error_msg = ('Output file "%s" already exists, will not ' 'overwrite it', output_file_name) LOG_UI.error(error_msg) else: LOG_UI.debug("Loading distro information from tree... " "Please wait...") distro = load_from_tree(args.distro_def_name, args.distro_def_version, args.distro_def_release, args.distro_def_arch, args.distro_def_type, args.distro_def_path) save_distro(distro, output_file_name) LOG_UI.debug('Distro information saved to "%s"', output_file_name) else: detected = utils_distro.detect() LOG_UI.debug('Detected distribution: %s (%s) version %s release ' '%s', detected.name, detected.arch, detected.version, detected.release)
def _execute_sos_cmd(self): """Run the sos command for this test case, and extract it """ _cmd = '%s report --batch --tmp-dir %s %s' exec_cmd = _cmd % (SOS_BIN, self.tmpdir, self.sos_cmd) try: self.cmd_output = process.run(exec_cmd, timeout=300) except Exception as err: if self._exception_expected: self.cmd_output = err.result else: msg = err.result.stderr.decode() or err.result.stdout.decode() # a little hacky, but using self.log methods here will not # print to console unless we ratchet up the verbosity for the # entire test suite, which will become very difficult to read LOG_UI.error('ERROR:\n' + msg[:8196]) # don't flood w/ super verbose logs if err.result.interrupted: raise Exception("Timeout exceeded, see output above") else: raise Exception("Command failed, see output above: '%s'" % err.command.split('bin/')[1]) with open(os.path.join(self.tmpdir, 'output'), 'wb') as pfile: pickle.dump(self.cmd_output, pfile) self.cmd_output.stdout = self.cmd_output.stdout.decode() self.cmd_output.stderr = self.cmd_output.stderr.decode() self.archive = re.findall('/.*sosreport-.*tar.*', self.cmd_output.stdout) if self.archive: self.archive = self.archive[-1] self._extract_archive(self.archive)
def run(self, config): verbose = config.get('core.verbose') write_to_json_file = config.get('list.write_to_json_file') resolver = config.get('list.resolver') runner = 'nrunner' if resolver else 'runner' config['run.ignore_missing_references'] = True config['run.test_runner'] = runner try: if not resolver: try: loader.loader.load_plugins(config) loader.loader.get_extra_listing() except loader.LoaderError as error: LOG_UI.error(error) return exit_codes.AVOCADO_FAIL suite = TestSuite.from_config(config) if runner == 'nrunner': matrix = self._get_resolution_matrix(suite) self._display(suite, matrix) directory = config.get('list.recipes.write_to_directory') if directory is not None: self.save_recipes(suite, directory, len(matrix)) else: matrix = self._get_test_matrix(suite) self._display(suite, matrix) if write_to_json_file: self._save_to_json(matrix, write_to_json_file, verbose) except KeyboardInterrupt: LOG_UI.error('Command interrupted by user...') return exit_codes.AVOCADO_FAIL
def handle_fetch(config): exitcode = exit_codes.AVOCADO_ALL_OK # fetch assets from instrumented tests for test_file in config.get('assets.fetch.references'): if os.path.isfile(test_file) and test_file.endswith('.py'): LOG_UI.debug('Fetching assets from %s.', test_file) success, fail = fetch_assets(test_file) for asset_file in success: LOG_UI.debug(' File %s fetched or already on' ' cache.', asset_file) for asset_file in fail: LOG_UI.error(asset_file) if fail: exitcode |= exit_codes.AVOCADO_FAIL else: LOG_UI.warning('No such file or file not supported: %s', test_file) exitcode |= exit_codes.AVOCADO_FAIL # check if we should ignore the errors if config.get('assets.fetch.ignore_errors'): return exit_codes.AVOCADO_ALL_OK return exitcode
def post(self, job): statuses = job.config.get('job.output.testlogs.statuses') if not statuses: return try: with open(os.path.join(job.logdir, 'results.json')) as json_file: results = json.load(json_file) except FileNotFoundError: return logfiles = job.config.get('job.output.testlogs.logfiles') for test in results['tests']: if test['status'] not in statuses: continue for logfile in logfiles: path = os.path.join(test['logdir'], logfile) try: with open(path) as log: LOG_UI.info( 'Log file "%s" content for test "%s" (%s):', logfile, test['id'], test['status']) LOG_UI.debug(log.read()) except (FileNotFoundError, PermissionError) as error: LOG_UI.error('Failure to access log file "%s": %s', path, error)
def run(self, config): """ Run test modules or simple tests. :param config: Configuration received from command line parser and possibly other sources. :type config: dict """ unique_job_id = config.get('run.unique_job_id') if unique_job_id is not None: try: int(unique_job_id, 16) if len(unique_job_id) != 40: raise ValueError except ValueError: LOG_UI.error('Unique Job ID needs to be a 40 digit hex number') sys.exit(exit_codes.AVOCADO_FAIL) try: suite = TestSuite.from_config(config, name='') if suite.size == 0: msg = ( "Suite is empty. There is no tests to run. This usually " "happens when you pass --ignore-missing-references and " "there is no more references to process.") LOG_UI.warning(msg) sys.exit(exit_codes.AVOCADO_FAIL) except TestSuiteError as err: LOG_UI.error(err) sys.exit(exit_codes.AVOCADO_JOB_FAIL) with job.Job(config, [suite]) as job_instance: return job_instance.run()
def run(self, config): try: nrunner.subcommand_runnable_run(config, LOG_UI.info) return exit_codes.AVOCADO_ALL_OK except Exception as e: LOG_UI.error(e) return exit_codes.AVOCADO_FAIL
def error(self, message): LOG_UI.debug(self.format_help()) LOG_UI.error("%s: error: %s", self.prog, message) if "unrecognized arguments" in message: LOG_UI.warning("Perhaps a plugin is missing; run 'avocado" " plugins' to list the installed ones") self.exit(exit_codes.AVOCADO_FAIL)
def _run_scripts(self, kind, scripts_dir, job): config = job.config if not os.path.isdir(scripts_dir): if config.get("plugins.jobscripts.warn_non_existing_dir"): LOG_UI.error( "Directory configured to hold %s-job scripts " "has not been found: %s", kind, scripts_dir, ) return dir_list = os.listdir(scripts_dir) scripts = [os.path.join(scripts_dir, f) for f in dir_list] scripts = [f for f in scripts if os.access(f, os.R_OK | os.X_OK)] scripts.sort() if not scripts: return env = self._job_to_environment_variables(job) non_zero_namespace = "plugins.jobscripts.warn_non_zero_status" warn_non_zero_status = config.get(non_zero_namespace) for script in scripts: result = process.run(script, ignore_status=True, env=env) if (result.exit_status != 0) and warn_non_zero_status: LOG_UI.error( '%s job script "%s" exited with status "%i"', kind.capitalize(), script, result.exit_status, )
def run_tests(self): """ The actual test execution phase """ self._log_job_debug_info() jobdata.record(self, sys.argv) if self.size == 0: msg = ('Unable to resolve any reference or "resolver.references"' "is empty.") LOG_UI.error(msg) if not self.test_suites: self.exitcode |= exit_codes.AVOCADO_JOB_FAIL return self.exitcode summary = set() for suite in self.test_suites: summary |= suite.run(self) # If it's all good so far, set job status to 'PASS' if self.status == 'RUNNING': self.status = 'PASS' LOG_JOB.info('Test results available in %s', self.logdir) if 'INTERRUPTED' in summary: self.exitcode |= exit_codes.AVOCADO_JOB_INTERRUPTED if 'FAIL' in summary or 'ERROR' in summary: self.exitcode |= exit_codes.AVOCADO_TESTS_FAIL return self.exitcode
def run(self, config): subcommand = config.get('assets_subcommand') # we want to let the command caller knows about fails exitcode = exit_codes.AVOCADO_ALL_OK if subcommand == 'fetch': # fetch assets from instrumented tests for test_file in config.get('assets.fetch.references'): if os.path.isfile(test_file) and test_file.endswith('.py'): LOG_UI.debug('Fetching assets from %s.', test_file) success, fail = fetch_assets(test_file) for asset_file in success: LOG_UI.debug(' File %s fetched or already on' ' cache.', asset_file) for asset_file in fail: LOG_UI.error(asset_file) if fail: exitcode |= exit_codes.AVOCADO_FAIL else: LOG_UI.warning('No such file or file not supported: %s', test_file) exitcode |= exit_codes.AVOCADO_FAIL # check if we should ignore the errors if config.get('assets.fetch.ignore_errors'): exitcode = exit_codes.AVOCADO_ALL_OK return exitcode
def initialize(self, args): self.variants = None cit_parameter_file = getattr(args, "cit_parameter_file", None) if cit_parameter_file is None: return else: cit_parameter_file = os.path.expanduser(cit_parameter_file) if not os.access(cit_parameter_file, os.R_OK): LOG_UI.error("parameter file '%s' could not be found or " "is not readable", cit_parameter_file) self.error_exit(args) config = configparser.ConfigParser() try: config.read(cit_parameter_file) except Exception as details: LOG_UI.error("Cannot parse parameter file: %s", details) self.error_exit(args) parameters = [(key, value.split(', ')) for key, value in config.items('parameters')] order = args.cit_order_of_combinations cit = Cit(parameters, order) self.headers, self.variants = cit.combine()
def _execute_sos_cmd(self): """Run the sos command for this test case, and extract it """ exec_cmd = self._generate_sos_command() try: self.cmd_output = process.run(exec_cmd, timeout=self.sos_timeout, env={'SOS_TEST_LOGS': 'keep'}) except Exception as err: if not hasattr(err, 'result'): # can't inspect the exception raised, just bail out raise if self._exception_expected: self.cmd_output = err.result else: msg = err.result.stderr.decode() or err.result.stdout.decode() # a little hacky, but using self.log methods here will not # print to console unless we ratchet up the verbosity for the # entire test suite, which will become very difficult to read LOG_UI.error('ERROR:\n' + msg[:8196]) # don't flood w/ super verbose logs if err.result.interrupted: raise Exception("Timeout exceeded, see output above") else: raise Exception("Command failed, see output above: '%s'" % err.command.split('bin/')[1]) with open(os.path.join(self.tmpdir, 'output'), 'wb') as pfile: pickle.dump(self.cmd_output, pfile) self.cmd_output.stdout = self.cmd_output.stdout.decode() self.cmd_output.stderr = self.cmd_output.stderr.decode()
async def spawn_tasks(self, parallel_tasks): while True: while len( set(self.status_server.tasks_pending).intersection( self.spawned_tasks)) >= parallel_tasks: await asyncio.sleep(0.1) try: task = self.pending_tasks[0] except IndexError: print("Finished spawning tasks") break spawn_result = await self.spawner.spawn_task(task) identifier = task.identifier self.pending_tasks.remove(task) self.spawned_tasks.append(identifier) if not spawn_result: LOG_UI.error("ERROR: failed to spawn task: %s", identifier) continue alive = self.spawner.is_task_alive(task) if not alive: LOG_UI.warning("%s is not alive shortly after being spawned", identifier) else: LOG_UI.info("%s spawned and alive", identifier)
def run(self, config): try: nrunner.subcommand_task_run_recipe(config, LOG_UI.info) return exit_codes.AVOCADO_ALL_OK except Exception as e: # pylint: disable=W0703 LOG_UI.error(e) return exit_codes.AVOCADO_FAIL
def run(self, config): resolutions = resolver.resolve(config.get('references')) tasks = self.resolutions_to_tasks(resolutions, config) self.pending_tasks = self.check_tasks_requirements(tasks) # pylint: disable=W0201 if not self.pending_tasks: LOG_UI.error('No test to be executed, exiting...') sys.exit(exit_codes.AVOCADO_JOB_FAIL) if not config.get('disable_task_randomization'): random.shuffle(self.pending_tasks) self.spawned_tasks = [] # pylint: disable=W0201 try: loop = asyncio.get_event_loop() self.status_server = nrunner.StatusServer( config.get('status_server'), # pylint: disable=W0201 [t.identifier for t in self.pending_tasks]) self.status_server.start() loop.run_until_complete(self.spawn_tasks()) loop.run_until_complete(self.status_server.wait()) print(self.status_server.status) exit_code = exit_codes.AVOCADO_ALL_OK if self.status_server.status.get('fail') is not None: exit_code |= exit_codes.AVOCADO_TESTS_FAIL elif self.status_server.status.get('error') is not None: exit_code |= exit_codes.AVOCADO_TESTS_FAIL return exit_code except Exception as e: LOG_UI.error(e) return exit_codes.AVOCADO_FAIL
def _get_test_suite(self, paths): which_tests = loader.ALL if self.args.verbose else loader.AVAILABLE try: return loader.loader.discover(paths, which_tests=which_tests) except loader.LoaderUnhandledReferenceError as details: LOG_UI.error(str(details)) sys.exit(exit_codes.AVOCADO_FAIL)
def initialize(self, config): subcommand = config.get('subcommand') data = mux.MuxTreeNode() # Merge the multiplex multiplex_files = config.get("yaml_to_mux.files") if multiplex_files: try: data.merge(create_from_yaml(multiplex_files)) except IOError as details: error_msg = "%s : %s" % (details.strerror, details.filename) LOG_UI.error(error_msg) if subcommand == 'run': sys.exit(exit_codes.AVOCADO_JOB_FAIL) else: sys.exit(exit_codes.AVOCADO_FAIL) # Extend default multiplex tree of --mux-inject values for inject in config.get("yaml_to_mux.inject"): entry = inject.split(':', 3) if len(entry) < 2: raise ValueError("key:entry pairs required, found only %s" % (entry)) elif len(entry) == 2: # key, entry entry.insert(0, '') # add path='' (root) data.get_node(entry[0], True).value[entry[1]] = entry[2] mux_filter_only = config.get('yaml_to_mux.filter_only') mux_filter_out = config.get('yaml_to_mux.filter_out') data = mux.apply_filters(data, mux_filter_only, mux_filter_out) if data != mux.MuxTreeNode(): paths = config.get("yaml_to_mux.parameter_paths") self.initialize_mux(data, paths)
def run(self, args): try: nrunner.subcommand_runnable_run_recipe(args, LOG_UI.info) return exit_codes.AVOCADO_ALL_OK except Exception as e: LOG_UI.error(e) return exit_codes.AVOCADO_FAIL
def run(self, args): LOG_UI.info('Config files read (in order):') for cfg_path in settings.config_paths: LOG_UI.debug(' %s' % cfg_path) if settings.config_paths_failed: LOG_UI.error('\nConfig files that failed to read:') for cfg_path in settings.config_paths_failed: LOG_UI.error(' %s' % cfg_path) LOG_UI.debug("") if not args.datadir: blength = 0 for section in settings.config.sections(): for value in settings.config.items(section): clength = len('%s.%s' % (section, value[0])) if clength > blength: blength = clength format_str = " %-" + str(blength) + "s %s" LOG_UI.debug(format_str, 'Section.Key', 'Value') for section in settings.config.sections(): for value in settings.config.items(section): config_key = ".".join((section, value[0])) LOG_UI.debug(format_str, config_key, value[1]) else: LOG_UI.debug("Avocado replaces config dirs that can't be accessed") LOG_UI.debug( "with sensible defaults. Please edit your local config") LOG_UI.debug("file to customize values") LOG_UI.debug('') LOG_UI.info('Avocado Data Directories:') LOG_UI.debug(' base ' + data_dir.get_base_dir()) LOG_UI.debug(' tests ' + data_dir.get_test_dir()) LOG_UI.debug(' data ' + data_dir.get_data_dir()) LOG_UI.debug(' logs ' + data_dir.get_logs_dir())
def run(self, args): try: loader.loader.load_plugins(args) except loader.LoaderError as details: sys.stderr.write(str(details)) sys.stderr.write('\n') sys.exit(exit_codes.AVOCADO_FAIL) suite = self.create_test_suite(args.reference) self.pending_tasks = self.suite_to_tasks(suite, [args.status_server]) if not args.disable_task_randomization: random.shuffle(self.pending_tasks) self.spawned_tasks = [] try: loop = asyncio.get_event_loop() self.status_server = nrunner.StatusServer( args.status_server, [t.identifier for t in self.pending_tasks]) self.status_server.start() loop.run_until_complete(self.spawn_tasks()) loop.run_until_complete(self.status_server.wait()) print(self.status_server.status) exit_code = exit_codes.AVOCADO_ALL_OK if self.status_server.status.get('fail') is not None: exit_code |= exit_codes.AVOCADO_TESTS_FAIL elif self.status_server.status.get('error') is not None: exit_code |= exit_codes.AVOCADO_TESTS_FAIL return exit_code except Exception as e: LOG_UI.error(e) return exit_codes.AVOCADO_FAIL
def handle_register(config): cache_dirs = config.get("datadir.paths.cache_dirs") name = config.get("assets.register.name") asset_hash = config.get("assets.register.sha1_hash") location = config.get("assets.register.url") # Adding a twice the location is a small hack due the current logic to # return "by_name". This needs to be improved soon. asset = Asset( name=name, asset_hash=asset_hash, locations=[location, location], cache_dirs=cache_dirs, ) try: asset.find_asset_file() LOG_UI.error("Asset with name %s already registered.", name) return exit_codes.AVOCADO_WARNING except OSError: try: asset.fetch() LOG_UI.info("Done. Now you can reference it by name %s", name) return exit_codes.AVOCADO_ALL_OK except OSError as e: LOG_UI.error(e) return exit_codes.AVOCADO_FAIL
def run(self, config): """ Run test modules or simple tests. :param config: Configuration received from command line parser and possibly other sources. :type config: dict """ if 'run.output_check_record' in config: check_record = config.get('run.output_check_record') process.OUTPUT_CHECK_RECORD_MODE = check_record unique_job_id = config.get('run.unique_job_id') if unique_job_id is not None: try: int(unique_job_id, 16) if len(unique_job_id) != 40: raise ValueError except ValueError: LOG_UI.error('Unique Job ID needs to be a 40 digit hex number') sys.exit(exit_codes.AVOCADO_FAIL) try: suite = TestSuite.from_config(config, name='') if suite.size == 0: sys.exit(exit_codes.AVOCADO_JOB_FAIL) except TestSuiteError as err: LOG_UI.error(err) sys.exit(exit_codes.AVOCADO_JOB_FAIL) with job.Job(config, [suite]) as job_instance: return job_instance.run()
def handle_output_files_command(self, config): """Called when 'avocado jobs get-output-files' command is executed.""" job_id = config.get('jobs.get.output_files.job_id') destination = config.get('jobs.get.output_files.destination') results_dir = get_job_results_dir(job_id) results_file = os.path.join(results_dir, 'results.json') config_file = os.path.join(results_dir, 'jobdata/args.json') try: config_data = self._get_data_from_file(config_file) results_data = self._get_data_from_file(results_file) except FileNotFoundError as ex: LOG_UI.error("Could not get job information: %s", ex) return exit_codes.AVOCADO_GENERIC_CRASH spawners = {'process': ProcessSpawner, 'podman': PodmanSpawner} spawner_name = config_data.get('nrunner.spawner') spawner = spawners.get(spawner_name) if spawner is None: msg = ("Could not find the spawner for job %s. This command is " "experimental and only supported when job executed with " "the Spawner architecture.") LOG_UI.error(msg, job_id) return exit_codes.AVOCADO_GENERIC_CRASH return self._download_tests(results_data.get('tests'), destination, job_id, spawner)
def run(self, args): LOG_UI.info('Config files read (in order):') for cfg_path in settings.config_paths: LOG_UI.debug(' %s' % cfg_path) if settings.config_paths_failed: LOG_UI.error('\nConfig files that failed to read:') for cfg_path in settings.config_paths_failed: LOG_UI.error(' %s' % cfg_path) LOG_UI.debug("") if not args.datadir: blength = 0 for section in settings.config.sections(): for value in settings.config.items(section): clength = len('%s.%s' % (section, value[0])) if clength > blength: blength = clength format_str = " %-" + str(blength) + "s %s" LOG_UI.debug(format_str, 'Section.Key', 'Value') for section in settings.config.sections(): for value in settings.config.items(section): config_key = ".".join((section, value[0])) LOG_UI.debug(format_str, config_key, value[1]) else: LOG_UI.debug("Avocado replaces config dirs that can't be accessed") LOG_UI.debug("with sensible defaults. Please edit your local config") LOG_UI.debug("file to customize values") LOG_UI.debug('') LOG_UI.info('Avocado Data Directories:') LOG_UI.debug(' base ' + data_dir.get_base_dir()) LOG_UI.debug(' tests ' + data_dir.get_test_dir()) LOG_UI.debug(' data ' + data_dir.get_data_dir()) LOG_UI.debug(' logs ' + data_dir.get_logs_dir()) LOG_UI.debug(' cache ' + ", ".join(data_dir.get_cache_dirs()))
def report_results(self): """Reports a summary, with verbose listing of fail/error tasks.""" summary = {status: len(tasks) for (status, tasks) in self.status_server.result.items()} LOG_UI.info("Tasks result summary: %s", summary) for status, tasks in self.status_server.result.items(): if status in ('fail', 'error'): LOG_UI.error("Tasks ended with '%s': %s", status, ", ".join(tasks))
def discover(self, references, which_tests=DiscoverMode.DEFAULT, force=None): """ Discover (possible) tests from test references. :param references: a list of tests references; if [] use plugin defaults :type references: builtin.list :param which_tests: Limit tests to be displayed :type which_tests: :class:`DiscoverMode` :param force: don't raise an exception when some test references are not resolved to tests. :return: A list of test factories (tuples (TestClass, test_params)) """ def handle_exception(plugin, details): # FIXME: Introduce avocado.exceptions logger and use here stacktrace.log_message((f"Test discovery plugin {plugin} " f"failed: {details}"), LOG_UI.getChild("exceptions")) # FIXME: Introduce avocado.traceback logger and use here stacktrace.log_exc_info(sys.exc_info(), LOG_UI.getChild("debug")) tests = [] unhandled_references = [] if not references: for loader_plugin in self._initialized_plugins: try: tests.extend(loader_plugin.discover(None, which_tests)) except Exception as details: # pylint: disable=W0703 handle_exception(loader_plugin, details) else: for reference in references: handled = False for loader_plugin in self._initialized_plugins: try: _test = loader_plugin.discover(reference, which_tests) if _test: tests.extend(_test) handled = True if which_tests != DiscoverMode.ALL: break # Don't process other plugins except Exception as details: # pylint: disable=W0703 handle_exception(loader_plugin, details) if not handled: unhandled_references.append(reference) if unhandled_references: if which_tests == DiscoverMode.ALL: tests.extend([(MissingTest, {'name': reference}) for reference in unhandled_references]) else: # This is a workaround to avoid changing the method signature if force is True or force == 'on': LOG_UI.error(LoaderUnhandledReferenceError(unhandled_references, self._initialized_plugins)) else: raise LoaderUnhandledReferenceError(unhandled_references, self._initialized_plugins) self._update_mappings() return tests
def _setup_job(job_id): resultsdir = data_dir.get_job_results_dir(job_id) if resultsdir is None: LOG_UI.error("Can't find job results directory for '%s'", job_id) sys.exit(exit_codes.AVOCADO_FAIL) with open(os.path.join(resultsdir, 'id'), 'r') as id_file: sourcejob = id_file.read().strip() return resultsdir, sourcejob
def _get_test_suite(self, paths): if self.args.verbose: which_tests = loader.DiscoverMode.ALL else: which_tests = loader.DiscoverMode.AVAILABLE try: return loader.loader.discover(paths, which_tests=which_tests) except loader.LoaderUnhandledReferenceError as details: LOG_UI.error(str(details)) sys.exit(exit_codes.AVOCADO_FAIL)
def _save_stream_to_file(self, stream, filename): """Save stream to a file. Directory must exists before calling this function. """ dirname = os.path.dirname(filename) if not os.path.isdir(dirname): LOG_UI.error("%s does not exist. Exiting...", dirname) return exit_codes.AVOCADO_GENERIC_CRASH with open(filename, 'ab') as output_file: output_file.write(stream)
def _download_tests(self, tests, destination, job_id, spawner): for test in tests: test_id = test.get('id') LOG_UI.info("Downloading files for test %s", test_id) try: files_buffers = spawner().stream_output(job_id, test_id) for filename, stream in files_buffers: dest = os.path.join(destination, filename) self._save_stream_to_file(stream, dest) except SpawnerException as ex: LOG_UI.error("Error: Failed to download: %s. Exiting...", ex) return exit_codes.AVOCADO_GENERIC_CRASH return exit_codes.AVOCADO_ALL_OK
def run(self, config): """ Run test modules or simple tests. :param config: Configuration received from command line parser and possibly other sources. :type config: dict """ if 'output_check_record' in config: process.OUTPUT_CHECK_RECORD_MODE = config.get( 'output_check_record', None) warnings.warn( "The following arguments will be changed to boolean soon: " "sysinfo, output-check, failfast, keep-tmp, " "ignore-missing-references, sysinfo and output-check", FutureWarning) if config.get('unique_job_id') is not None: try: int(config.get('unique_job_id'), 16) if len(config.get('unique_job_id')) != 40: raise ValueError except ValueError: LOG_UI.error('Unique Job ID needs to be a 40 digit hex number') sys.exit(exit_codes.AVOCADO_FAIL) try: config['job_timeout'] = time_to_seconds(config.get('job_timeout')) except ValueError as detail: LOG_UI.error(detail.args[0]) sys.exit(exit_codes.AVOCADO_FAIL) with job.Job(config) as job_instance: pre_post_dispatcher = JobPrePostDispatcher() try: # Run JobPre plugins output.log_plugin_failures(pre_post_dispatcher.load_failures) pre_post_dispatcher.map_method('pre', job_instance) job_run = job_instance.run() finally: # Run JobPost plugins pre_post_dispatcher.map_method('post', job_instance) result_dispatcher = ResultDispatcher() if result_dispatcher.extensions: result_dispatcher.map_method('render', job_instance.result, job_instance) return job_run
def initialize(self, args): load_variants = getattr(args, "json_variants_load", None) if load_variants is None: self.variants = _NO_VARIANTS return try: with open(load_variants, 'r') as var_file: self.variants = varianter.Varianter(state=json.load(var_file)) except IOError: LOG_UI.error("JSON serialized file '%s' could not be found or " "is not readable", load_variants) if args.subcommand == 'run': sys.exit(exit_codes.AVOCADO_JOB_FAIL) else: sys.exit(exit_codes.AVOCADO_FAIL)
def mail(self, job): # build proper subject based on job status subject = '%s Job %s - Status: %s' % (self.subject, job.unique_id, job.status) msg = MIMEText(subject) msg['Subject'] = self.subject msg['From'] = self.sender msg['To'] = self.rcpt # So many possible failures, let's just tell the user about it try: smtp = smtplib.SMTP(self.server) smtp.sendmail(self.sender, [self.rcpt], msg.as_string()) smtp.quit() except: LOG_UI.error("Failure to send email notification: " "please check your mail configuration")
def _check_required_args(args, enable_arg, required_args): """ :return: True when enable_arg enabled and all required args are set :raise sys.exit: When missing required argument. """ if (not hasattr(args, enable_arg) or not getattr(args, enable_arg)): return False missing = [] for arg in required_args: if not getattr(args, arg): missing.append(arg) if missing: LOG_UI.error("Use of %s requires %s arguments to be set. Please " "set %s.", enable_arg, ', '.join(required_args), ', '.join(missing)) return sys.exit(exit_codes.AVOCADO_FAIL) return True
def run(self, args): """ Run test modules or simple tests. :param args: Command line args received from the run subparser. """ if 'output_check_record' in args: process.OUTPUT_CHECK_RECORD_MODE = getattr(args, 'output_check_record', None) if args.unique_job_id is not None: try: int(args.unique_job_id, 16) if len(args.unique_job_id) != 40: raise ValueError except ValueError: LOG_UI.error('Unique Job ID needs to be a 40 digit hex number') sys.exit(exit_codes.AVOCADO_FAIL) try: args.job_timeout = time_to_seconds(args.job_timeout) except ValueError as detail: LOG_UI.error(detail.args[0]) sys.exit(exit_codes.AVOCADO_FAIL) with job.Job(args) as job_instance: pre_post_dispatcher = JobPrePostDispatcher() try: # Run JobPre plugins output.log_plugin_failures(pre_post_dispatcher.load_failures) pre_post_dispatcher.map_method('pre', job_instance) job_run = job_instance.run() finally: # Run JobPost plugins pre_post_dispatcher.map_method('post', job_instance) result_dispatcher = ResultDispatcher() if result_dispatcher.extensions: result_dispatcher.map_method('render', job_instance.result, job_instance) return job_run
def _run_scripts(self, kind, scripts_dir, job): if not os.path.isdir(scripts_dir): if self.warn_non_existing_dir: LOG_UI.error("Directory configured to hold %s-job scripts " "has not been found: %s", kind, scripts_dir) return dir_list = os.listdir(scripts_dir) scripts = [os.path.join(scripts_dir, f) for f in dir_list] scripts = [f for f in scripts if os.access(f, os.R_OK | os.X_OK)] scripts.sort() if not scripts: return env = self._job_to_environment_variables(job) for script in scripts: result = process.run(script, ignore_status=True, env=env) if (result.exit_status != 0) and self.warn_non_zero_status: LOG_UI.error('%s job script "%s" exited with status "%i"', kind.capitalize(), script, result.exit_status)
def run(self, args): err = None if args.tree and args.varianter_debug: err = "Option --tree is incompatible with --debug." elif not args.tree and args.inherit: err = "Option --inherit can be only used with --tree" if err: LOG_UI.error(err) sys.exit(exit_codes.AVOCADO_FAIL) varianter = args.avocado_variants try: varianter.parse(args) except (IOError, ValueError) as details: LOG_UI.error("Unable to parse varianter: %s", details) sys.exit(exit_codes.AVOCADO_FAIL) use_utf8 = settings.get_value("runner.output", "utf8", key_type=bool, default=None) summary = args.summary or 0 variants = args.variants or 0 # Parse obsolete options (unsafe to combine them with new args) if args.tree: variants = 0 summary += 1 if args.contents: summary += 1 if args.inherit: summary += 2 else: if args.contents: variants += 2 # Produce the output lines = args.avocado_variants.to_str(summary=summary, variants=variants, use_utf8=use_utf8) for line in lines.splitlines(): LOG_UI.debug(line) sys.exit(exit_codes.AVOCADO_ALL_OK)
def _setup_job(job_id): if os.path.isdir(job_id): resultsdir = os.path.expanduser(job_id) job_id = '' elif os.path.isfile(job_id): resultsdir = os.path.dirname(os.path.expanduser(job_id)) job_id = '' else: logdir = settings.get_value(section='datadir.paths', key='logs_dir', key_type='path', default=None) try: resultsdir = jobdata.get_resultsdir(logdir, job_id) except ValueError as exception: LOG_UI.error(exception) sys.exit(exit_codes.AVOCADO_FAIL) if resultsdir is None: LOG_UI.error("Can't find job results directory for '%s' in '%s'", job_id, logdir) sys.exit(exit_codes.AVOCADO_FAIL) sourcejob = jobdata.get_id(os.path.join(resultsdir, 'id'), job_id) if sourcejob is None: LOG_UI.error("Can't find matching job id '%s' in '%s' directory.", job_id, resultsdir) sys.exit(exit_codes.AVOCADO_FAIL) return resultsdir, sourcejob
def initialize(self, args): debug = getattr(args, "varianter_debug", False) if debug: data = mux.MuxTreeNodeDebug() else: data = mux.MuxTreeNode() # Merge the multiplex multiplex_files = getattr(args, "mux_yaml", None) if multiplex_files: try: data.merge(create_from_yaml(multiplex_files, debug)) except IOError as details: error_msg = "%s : %s" % (details.strerror, details.filename) LOG_UI.error(error_msg) if args.subcommand == 'run': sys.exit(exit_codes.AVOCADO_JOB_FAIL) else: sys.exit(exit_codes.AVOCADO_FAIL) # Extend default multiplex tree of --mux-inject values for inject in getattr(args, "mux_inject", []): entry = inject.split(':', 3) if len(entry) < 2: raise ValueError("key:entry pairs required, found only %s" % (entry)) elif len(entry) == 2: # key, entry entry.insert(0, '') # add path='' (root) data.get_node(entry[0], True).value[entry[1]] = entry[2] mux_filter_only = getattr(args, 'mux_filter_only', None) mux_filter_out = getattr(args, 'mux_filter_out', None) data = mux.apply_filters(data, mux_filter_only, mux_filter_out) if data != mux.MuxTreeNode(): paths = getattr(args, "mux_parameter_paths", ["/run/*"]) if paths is None: paths = ["/run/*"] self.initialize_mux(data, paths, debug)
def run(self, args): """ Print libexec path and finish :param args: Command line args received from the run subparser. """ if 'VIRTUAL_ENV' in os.environ: LOG_UI.debug('libexec') elif os.path.exists('/usr/libexec/avocado'): LOG_UI.debug('/usr/libexec/avocado') elif os.path.exists('/usr/lib/avocado'): LOG_UI.debug('/usr/lib/avocado') else: for path in os.environ.get('PATH').split(':'): if (os.path.exists(os.path.join(path, 'avocado')) and os.path.exists(os.path.join(os.path.dirname(path), 'libexec'))): LOG_UI.debug(os.path.join(os.path.dirname(path), 'libexec')) break else: LOG_UI.error("Can't locate avocado libexec path") sys.exit(exit_codes.AVOCADO_FAIL) return sys.exit(exit_codes.AVOCADO_ALL_OK)
def initialize(self, args): self.variants = None error = False pict_parameter_file = getattr(args, "pict_parameter_file", None) if pict_parameter_file is None: return else: pict_parameter_file = os.path.expanduser(pict_parameter_file) if not os.access(pict_parameter_file, os.R_OK): LOG_UI.error("pict parameter file '%s' could not be found or " "is not readable", pict_parameter_file) error = True pict_binary = getattr(args, "pict_binary", None) if pict_binary is None: LOG_UI.error("pict binary could not be found in $PATH. Please set " "its location with --pict-binary or put it in your " "$PATH") error = True else: pict_binary = os.path.expanduser(pict_binary) if not os.access(pict_binary, os.R_OK | os.X_OK): LOG_UI.error("pict binary '%s' can not be executed, please check " "the option given with --pict-binary and/or the file " "permissions", pict_binary) error = True if error: if args.subcommand == 'run': sys.exit(exit_codes.AVOCADO_JOB_FAIL) else: sys.exit(exit_codes.AVOCADO_FAIL) self.parameter_path = getattr(args, "pict_parameter_path") output = run_pict(pict_binary, pict_parameter_file, getattr(args, "pict_order_of_combinations")) self.headers, self.variants = parse_pict_output(output)
def run(self, args): if 'html_output' in args and args.html_output == '-': LOG_UI.error('HTML to stdout not supported (not all HTML resources' ' can be embedded on a single file)') sys.exit(exit_codes.AVOCADO_JOB_FAIL)
def run(self, args): if getattr(args, 'replay_jobid', None) is None: return err = None if args.replay_teststatus and 'variants' in args.replay_ignore: err = ("Option `--replay-test-status` is incompatible with " "`--replay-ignore variants`.") elif args.replay_teststatus and args.reference: err = ("Option --replay-test-status is incompatible with " "test references given on the command line.") elif getattr(args, "remote_hostname", False): err = "Currently we don't replay jobs in remote hosts." if err is not None: LOG_UI.error(err) sys.exit(exit_codes.AVOCADO_FAIL) base_logdir = getattr(args, 'base_logdir', None) if base_logdir is None: base_logdir = settings.get_value(section='datadir.paths', key='logs_dir', key_type='path', default=None) try: resultsdir = jobdata.get_resultsdir(base_logdir, args.replay_jobid) except ValueError as exception: LOG_UI.error(exception.message) sys.exit(exit_codes.AVOCADO_FAIL) if resultsdir is None: LOG_UI.error("Can't find job results directory in '%s'", base_logdir) sys.exit(exit_codes.AVOCADO_FAIL) sourcejob = jobdata.get_id(os.path.join(resultsdir, 'id'), args.replay_jobid) if sourcejob is None: msg = ("Can't find matching job id '%s' in '%s' directory." % (args.replay_jobid, resultsdir)) LOG_UI.error(msg) sys.exit(exit_codes.AVOCADO_FAIL) setattr(args, 'replay_sourcejob', sourcejob) replay_args = jobdata.retrieve_args(resultsdir) whitelist = ['loaders', 'external_runner', 'external_runner_testdir', 'external_runner_chdir', 'failfast', 'ignore_missing_references', 'execution_order'] if replay_args is None: LOG_UI.warn('Source job args data not found. These options will ' 'not be loaded in this replay job: %s', ', '.join(whitelist)) else: for option in whitelist: optvalue = getattr(args, option, None) if optvalue is not None: LOG_UI.warn("Overriding the replay %s with the --%s value " "given on the command line.", option.replace('_', '-'), option.replace('_', '-')) elif option in replay_args: setattr(args, option, replay_args[option]) if getattr(args, 'reference', None): LOG_UI.warn('Overriding the replay test references with test ' 'references given in the command line.') else: references = jobdata.retrieve_references(resultsdir) if references is None: LOG_UI.error('Source job test references data not found. ' 'Aborting.') sys.exit(exit_codes.AVOCADO_FAIL) else: setattr(args, 'reference', references) if 'config' in args.replay_ignore: LOG_UI.warn("Ignoring configuration from source job with " "--replay-ignore.") else: self.load_config(resultsdir) if 'variants' in args.replay_ignore: LOG_UI.warn("Ignoring variants from source job with " "--replay-ignore.") else: variants = jobdata.retrieve_variants(resultsdir) if variants is None: LOG_UI.error('Source job variants data not found. Aborting.') sys.exit(exit_codes.AVOCADO_FAIL) else: LOG_UI.warning("Using src job Mux data only, use " "`--replay-ignore variants` to override " "them.") setattr(args, "avocado_variants", variants) # Extend "replay_test_status" of "INTERRUPTED" when --replay-resume # supplied. if args.replay_resume: if not args.replay_teststatus: args.replay_teststatus = ["INTERRUPTED"] elif "INTERRUPTED" not in args.replay_teststatus: args.replay_teststatus.append("INTERRUPTED") if args.replay_teststatus: replay_map = self._create_replay_map(resultsdir, args.replay_teststatus) setattr(args, 'replay_map', replay_map) # Use the original directory to resolve test references properly pwd = jobdata.retrieve_pwd(resultsdir) if pwd is not None: if os.path.exists(pwd): os.chdir(pwd) else: LOG_UI.warn("Directory used in the replay source job '%s' does" " not exist, using '.' instead", pwd)
def run(self, args): def _get_name(test): return str(test['id']) def _get_name_no_id(test): return str(test['id']).split('-', 1)[1] job1_dir, job1_id = self._setup_job(args.jobids[0]) job2_dir, job2_id = self._setup_job(args.jobids[1]) job1_data = self._get_job_data(job1_dir) job2_data = self._get_job_data(job2_dir) report_header = 'Avocado Job Report\n' job1_results = [report_header] job2_results = [report_header] if 'cmdline' in args.diff_filter: cmdline1 = self._get_command_line(job1_dir) cmdline2 = self._get_command_line(job2_dir) if str(cmdline1) != str(cmdline2): command_line_header = ['\n', '# COMMAND LINE\n'] job1_results.extend(command_line_header) job1_results.append(cmdline1) job2_results.extend(command_line_header) job2_results.append(cmdline2) if 'time' in args.diff_filter: time1 = '%.2f s\n' % job1_data['time'] time2 = '%.2f s\n' % job2_data['time'] if str(time1) != str(time2): total_time_header = ['\n', '# TOTAL TIME\n'] job1_results.extend(total_time_header) job1_results.append(time1) job2_results.extend(total_time_header) job2_results.append(time2) if 'variants' in args.diff_filter: variants1 = self._get_variants(job1_dir) variants2 = self._get_variants(job2_dir) if str(variants1) != str(variants2): variants_header = ['\n', '# VARIANTS\n'] job1_results.extend(variants_header) job1_results.extend(variants1) job2_results.extend(variants_header) job2_results.extend(variants2) if 'results' in args.diff_filter: results1 = [] if args.diff_strip_id: get_name = _get_name_no_id else: get_name = _get_name for test in job1_data['tests']: test_result = '%s: %s\n' % (get_name(test), str(test['status'])) results1.append(test_result) results2 = [] for test in job2_data['tests']: test_result = '%s: %s\n' % (get_name(test), str(test['status'])) results2.append(test_result) if str(results1) != str(results2): test_results_header = ['\n', '# TEST RESULTS\n'] job1_results.extend(test_results_header) job1_results.extend(results1) job2_results.extend(test_results_header) job2_results.extend(results2) if 'config' in args.diff_filter: config1 = self._get_config(job1_dir) config2 = self._get_config(job2_dir) if str(config1) != str(config2): config_header = ['\n', '# SETTINGS\n'] job1_results.extend(config_header) job1_results.extend(config1) job2_results.extend(config_header) job2_results.extend(config2) if 'sysinfo' in args.diff_filter: sysinfo_pre1 = self._get_sysinfo(job1_dir, 'pre') sysinfo_pre2 = self._get_sysinfo(job2_dir, 'pre') if str(sysinfo_pre1) != str(sysinfo_pre2): sysinfo_header_pre = ['\n', '# SYSINFO PRE\n'] job1_results.extend(sysinfo_header_pre) job1_results.extend(sysinfo_pre1) job2_results.extend(sysinfo_header_pre) job2_results.extend(sysinfo_pre2) sysinfo_post1 = self._get_sysinfo(job1_dir, 'post') sysinfo_post2 = self._get_sysinfo(job2_dir, 'post') if str(sysinfo_post1) != str(sysinfo_post2): sysinfo_header_post = ['\n', '# SYSINFO POST\n'] job1_results.extend(sysinfo_header_post) job1_results.extend(sysinfo_post1) job2_results.extend(sysinfo_header_post) job2_results.extend(sysinfo_post2) if getattr(args, 'create_reports', False): self.std_diff_output = False prefix = 'avocado_diff_%s_' % job1_id[:7] tmp_file1 = tempfile.NamedTemporaryFile(mode='w', prefix=prefix, suffix='.txt', delete=False) tmp_file1.writelines(job1_results) tmp_file1.close() prefix = 'avocado_diff_%s_' % job2_id[:7] tmp_file2 = tempfile.NamedTemporaryFile(mode='w', prefix=prefix, suffix='.txt', delete=False) tmp_file2.writelines(job2_results) tmp_file2.close() LOG_UI.info('%s %s', tmp_file1.name, tmp_file2.name) if (getattr(args, 'open_browser', False) and getattr(args, 'html', None) is None): prefix = 'avocado_diff_%s_%s_' % (job1_id[:7], job2_id[:7]) tmp_file = tempfile.NamedTemporaryFile(mode='w', prefix=prefix, suffix='.html', delete=False) setattr(args, 'html', tmp_file.name) if getattr(args, 'html', None) is not None: self.std_diff_output = False try: html_diff = HtmlDiff() html_diff._legend = """ <table class="diff" summary="Legends"> <tr> <td> <table border="" summary="Colors"> <tr><th> Colors </th> </tr> <tr><td class="diff_add"> Added </td></tr> <tr><td class="diff_chg">Changed</td> </tr> <tr><td class="diff_sub">Deleted</td> </tr> </table></td> <td> <table border="" summary="Links"> <tr><th colspan="2"> Links </th> </tr> <tr><td>(f)irst change</td> </tr> <tr><td>(n)ext change</td> </tr> <tr><td>(t)op</td> </tr> </table></td> </tr> </table>""" job_diff_html = html_diff.make_file((_.decode("utf-8") for _ in job1_results), (_.decode("utf-8") for _ in job2_results), fromdesc=job1_id, todesc=job2_id) with open(args.html, 'w') as html_file: html_file.writelines(job_diff_html.encode("utf-8")) LOG_UI.info(args.html) except IOError as exception: LOG_UI.error(exception) sys.exit(exit_codes.AVOCADO_FAIL) if getattr(args, 'open_browser', False): setsid = getattr(os, 'setsid', None) if not setsid: setsid = getattr(os, 'setpgrp', None) with open(os.devnull, "r+") as inout: cmd = ['xdg-open', args.html] subprocess.Popen(cmd, close_fds=True, stdin=inout, stdout=inout, stderr=inout, preexec_fn=setsid) if self.std_diff_output: if self.term.enabled: for line in self._cdiff(unified_diff(job1_results, job2_results, fromfile=job1_id, tofile=job2_id)): LOG_UI.debug(line.strip()) else: for line in unified_diff(job1_results, job2_results, fromfile=job1_id, tofile=job2_id): LOG_UI.debug(line.strip())
def list(self): try: self._list() except KeyboardInterrupt: LOG_UI.error('Command interrupted by user...') return exit_codes.AVOCADO_FAIL
def initialize(self, args): # Deprecated filters only = getattr(args, "filter_only", None) if only: self._log_deprecation_msg("--filter-only", "--mux-filter-only") mux_filter_only = getattr(args, "mux_filter_only") if mux_filter_only: args.mux_filter_only = mux_filter_only + only else: args.mux_filter_only = only out = getattr(args, "filter_out", None) if out: self._log_deprecation_msg("--filter-out", "--mux-filter-out") mux_filter_out = getattr(args, "mux_filter_out") if mux_filter_out: args.mux_filter_out = mux_filter_out + out else: args.mux_filter_out = out debug = getattr(args, "mux_debug", False) if debug: data = mux.MuxTreeNodeDebug() else: data = mux.MuxTreeNode() # Merge the multiplex multiplex_files = getattr(args, "mux_yaml", None) if multiplex_files: try: data.merge(create_from_yaml(multiplex_files, debug)) except IOError as details: error_msg = "%s : %s" % (details.strerror, details.filename) LOG_UI.error(error_msg) if args.subcommand == 'run': sys.exit(exit_codes.AVOCADO_JOB_FAIL) else: sys.exit(exit_codes.AVOCADO_FAIL) # Deprecated --multiplex option multiplex_files = getattr(args, "multiplex", None) if multiplex_files: self._log_deprecation_msg("--multiplex", "--mux-yaml") try: data.merge(create_from_yaml(multiplex_files, debug)) from_yaml = create_from_yaml(multiplex_files, debug) args.avocado_variants.data_merge(from_yaml) except IOError as details: error_msg = "%s : %s" % (details.strerror, details.filename) LOG_UI.error(error_msg) if args.subcommand == 'run': sys.exit(exit_codes.AVOCADO_JOB_FAIL) else: sys.exit(exit_codes.AVOCADO_FAIL) # Extend default multiplex tree of --mux-inject values for inject in getattr(args, "mux_inject", []): entry = inject.split(':', 3) if len(entry) < 2: raise ValueError("key:entry pairs required, found only %s" % (entry)) elif len(entry) == 2: # key, entry entry.insert(0, '') # add path='' (root) data.get_node(entry[0], True).value[entry[1]] = entry[2] mux_filter_only = getattr(args, 'mux_filter_only', None) mux_filter_out = getattr(args, 'mux_filter_out', None) data = mux.apply_filters(data, mux_filter_only, mux_filter_out) if data != mux.MuxTreeNode(): mux_path = getattr(args, "mux_path", ["/run/*"]) if mux_path is None: mux_path = ["/run/*"] self.initialize_mux(data, mux_path, debug)