def error(self, message): LOG_UI.debug(self.format_help()) LOG_UI.error("%s: error: %s", self.prog, message) if "unrecognized arguments" in message: LOG_UI.warning("Perhaps a plugin is missing; run 'avocado" " plugins' to list the installed ones") self.exit(exit_codes.AVOCADO_FAIL)
async def spawn_tasks(self, parallel_tasks): while True: while len( set(self.status_server.tasks_pending).intersection( self.spawned_tasks)) >= parallel_tasks: await asyncio.sleep(0.1) try: task = self.pending_tasks[0] except IndexError: print("Finished spawning tasks") break spawn_result = await self.spawner.spawn_task(task) identifier = task.identifier self.pending_tasks.remove(task) self.spawned_tasks.append(identifier) if not spawn_result: LOG_UI.error("ERROR: failed to spawn task: %s", identifier) continue alive = self.spawner.is_task_alive(task) if not alive: LOG_UI.warning("%s is not alive shortly after being spawned", identifier) else: LOG_UI.info("%s spawned and alive", identifier)
def run(self, config): """ Run test modules or simple tests. :param config: Configuration received from command line parser and possibly other sources. :type config: dict """ unique_job_id = config.get('run.unique_job_id') if unique_job_id is not None: try: int(unique_job_id, 16) if len(unique_job_id) != 40: raise ValueError except ValueError: LOG_UI.error('Unique Job ID needs to be a 40 digit hex number') sys.exit(exit_codes.AVOCADO_FAIL) try: suite = TestSuite.from_config(config, name='') if suite.size == 0: msg = ( "Suite is empty. There is no tests to run. This usually " "happens when you pass --ignore-missing-references and " "there is no more references to process.") LOG_UI.warning(msg) sys.exit(exit_codes.AVOCADO_FAIL) except TestSuiteError as err: LOG_UI.error(err) sys.exit(exit_codes.AVOCADO_JOB_FAIL) with job.Job(config, [suite]) as job_instance: return job_instance.run()
def run(self, config): subcommand = config.get('assets_subcommand') # we want to let the command caller knows about fails exitcode = exit_codes.AVOCADO_ALL_OK if subcommand == 'fetch': # fetch assets from instrumented tests for test_file in config.get('assets.fetch.references'): if os.path.isfile(test_file) and test_file.endswith('.py'): LOG_UI.debug('Fetching assets from %s.', test_file) success, fail = fetch_assets(test_file) for asset_file in success: LOG_UI.debug(' File %s fetched or already on' ' cache.', asset_file) for asset_file in fail: LOG_UI.error(asset_file) if fail: exitcode |= exit_codes.AVOCADO_FAIL else: LOG_UI.warning('No such file or file not supported: %s', test_file) exitcode |= exit_codes.AVOCADO_FAIL # check if we should ignore the errors if config.get('assets.fetch.ignore_errors'): exitcode = exit_codes.AVOCADO_ALL_OK return exitcode
def handle_fetch(config): exitcode = exit_codes.AVOCADO_ALL_OK # fetch assets from instrumented tests for test_file in config.get('assets.fetch.references'): if os.path.isfile(test_file) and test_file.endswith('.py'): LOG_UI.debug('Fetching assets from %s.', test_file) success, fail = fetch_assets(test_file) for asset_file in success: LOG_UI.debug(' File %s fetched or already on' ' cache.', asset_file) for asset_file in fail: LOG_UI.error(asset_file) if fail: exitcode |= exit_codes.AVOCADO_FAIL else: LOG_UI.warning('No such file or file not supported: %s', test_file) exitcode |= exit_codes.AVOCADO_FAIL # check if we should ignore the errors if config.get('assets.fetch.ignore_errors'): return exit_codes.AVOCADO_ALL_OK return exitcode
def run(self, config): hint_filepath = '.avocado.hint' hint = None if os.path.exists(hint_filepath): hint = HintParser(hint_filepath) resolutions = resolver.resolve(config.get('nrun.references'), hint) tasks = job.resolutions_to_tasks(resolutions, config) # pylint: disable=W0201 self.pending_tasks, missing_requirements = nrunner.check_tasks_requirements( tasks) if missing_requirements: missing_tasks_msg = "\n".join( [str(t) for t in missing_requirements]) LOG_UI.warning( 'Tasks will not be run due to missing requirements: %s', missing_tasks_msg) if not self.pending_tasks: LOG_UI.error('No test to be executed, exiting...') sys.exit(exit_codes.AVOCADO_JOB_FAIL) if not config.get('nrun.disable_task_randomization'): random.shuffle(self.pending_tasks) self.spawned_tasks = [] # pylint: disable=W0201 try: if config.get('nrun.spawners.podman.enabled'): if not os.path.exists(PodmanSpawner.PODMAN_BIN): msg = ('Podman Spawner selected, but podman binary "%s" ' 'is not available on the system. Please install ' 'podman before attempting to use this feature.') msg %= PodmanSpawner.PODMAN_BIN LOG_UI.error(msg) sys.exit(exit_codes.AVOCADO_JOB_FAIL) self.spawner = PodmanSpawner() # pylint: disable=W0201 else: self.spawner = ProcessSpawner() # pylint: disable=W0201 listen = config.get('nrun.status_server.listen') verbose = config.get('core.verbose') self.status_server = nrunner.StatusServer( listen, # pylint: disable=W0201 [t.identifier for t in self.pending_tasks], verbose) self.status_server.start() parallel_tasks = config.get('nrun.parallel_tasks') loop = asyncio.get_event_loop() loop.run_until_complete(self.spawn_tasks(parallel_tasks)) loop.run_until_complete(self.status_server.wait()) self.report_results() exit_code = exit_codes.AVOCADO_ALL_OK if self.status_server.result.get('fail') is not None: exit_code |= exit_codes.AVOCADO_TESTS_FAIL elif self.status_server.result.get('error') is not None: exit_code |= exit_codes.AVOCADO_TESTS_FAIL return exit_code except Exception as e: # pylint: disable=W0703 LOG_UI.error(e) return exit_codes.AVOCADO_FAIL
def check_tasks_requirements(self, tasks): result = [] for task in tasks: runner = self.pick_runner(task) if runner: result.append(task) else: LOG_UI.warning('Task will not be run due to missing requirements: %s', task) return result
def check_tasks_requirements(tasks, runners_registry): """ Checks if tasks have runner requirements fulfilled :param tasks: the tasks whose runner requirements will be checked :type tasks: list of :class:`avocado.core.nrunner.Task` :param runners_registry: a registry with previously found (and not found) runners keyed by task kind :param runners_registry: dict """ result = [] for task in tasks: runner = pick_runner(task, runners_registry) if runner: result.append(task) else: LOG_UI.warning('Task will not be run due to missing requirements: %s', task) return result
def spawn_tasks(self, parallel_tasks): while True: while len(set(self.status_server.tasks_pending).intersection(self.spawned_tasks)) >= parallel_tasks: yield from asyncio.sleep(0.1) try: task = self.pending_tasks[0] except IndexError: print("Finished spawning tasks") break yield from self.spawner.spawn_task(task) identifier = task.identifier self.pending_tasks.remove(task) self.spawned_tasks.append(identifier) alive = self.spawner.is_task_alive(task) if not alive: LOG_UI.warning("%s is not alive shortly after being spawned", identifier) else: LOG_UI.info("%s spawned and alive", identifier)
def _setup_job_category(self): """ This has to be called after self.logdir has been defined It attempts to create a directory one level up from the job results, with the given category name. Then, a symbolic link is created to this job results directory. This should allow a user to look at a single directory for all jobs of a given category. """ category = self.config.get("run.job_category") if category is None: return if category != astring.string_to_safe_path(category): msg = ( f"Unable to set category in job results: name is not " f"filesystem safe: {category}" ) LOG_UI.warning(msg) LOG_JOB.warning(msg) return # we could also get "base_logdir" from config, but I believe this is # the best choice because it reduces the dependency surface (depends # only on self.logdir) category_path = os.path.join(os.path.dirname(self.logdir), category) try: os.mkdir(category_path) except FileExistsError: pass try: os.symlink( os.path.relpath(self.logdir, category_path), os.path.join(category_path, os.path.basename(self.logdir)), ) except NotImplementedError: msg = f"Unable to link this job to category {category}" LOG_UI.warning(msg) LOG_JOB.warning(msg) except OSError: msg = f"Permission denied to link this job to category {category}" LOG_UI.warning(msg) LOG_JOB.warning(msg)
def run(self, config): job_id = config.get('run.replay.job_id') if job_id is None: return err = None replay_ignore = config.get('run.replay.ignore') test_status = config.get('run.replay.test_status') if test_status and 'variants' in replay_ignore: err = ("Option `--replay-test-status` is incompatible with " "`--replay-ignore variants`.") elif test_status and config.get('run.references'): err = ("Option --replay-test-status is incompatible with " "test references given on the command line.") elif config.get("remote_hostname", False): err = "Currently we don't replay jobs in remote hosts." if err is not None: LOG_UI.error(err) sys.exit(exit_codes.AVOCADO_FAIL) resultsdir = data_dir.get_job_results_dir( job_id, config.get('run.results_dir')) if resultsdir is None: LOG_UI.error("Can't find job results directory for '%s'", job_id) sys.exit(exit_codes.AVOCADO_FAIL) with open(os.path.join(resultsdir, 'id'), 'r') as id_file: config['replay_sourcejob'] = id_file.read().strip() replay_config = jobdata.retrieve_job_config(resultsdir) whitelist = [ 'loaders', 'external_runner', 'external_runner_testdir', 'external_runner_chdir', 'failfast', 'ignore_missing_references', 'execution_order' ] if replay_config is None: LOG_UI.warn( 'Source job config data not found. These options will ' 'not be loaded in this replay job: %s', ', '.join(whitelist)) else: for option in whitelist: optvalue = config.get(option, None) # Temporary, this will be removed soon if option in [ 'failfast', 'ignore_missing_references', 'execution_order', 'loaders', 'external_runner', 'external_runner_chdir', 'external_runner_testdir' ]: optvalue = config.get('run.{}'.format(option)) if optvalue is not None: LOG_UI.warn( "Overriding the replay %s with the --%s value " "given on the command line.", option.replace('_', '-'), option.replace('_', '-')) elif option in replay_config: config[option] = replay_config[option] if config.get('run.references'): LOG_UI.warn('Overriding the replay test references with test ' 'references given in the command line.') else: references = jobdata.retrieve_references(resultsdir) if references is None: LOG_UI.error('Source job test references data not found. ' 'Aborting.') sys.exit(exit_codes.AVOCADO_FAIL) else: config['run.references'] = references if 'config' in replay_ignore: LOG_UI.warn("Ignoring configuration from source job with " "--replay-ignore.") else: self.load_config(resultsdir) if 'variants' in replay_ignore: LOG_UI.warn("Ignoring variants from source job with " "--replay-ignore.") else: variants = jobdata.retrieve_variants(resultsdir) if variants is None: LOG_UI.error('Source job variants data not found. Aborting.') sys.exit(exit_codes.AVOCADO_FAIL) else: LOG_UI.warning("Using src job Mux data only, use " "`--replay-ignore variants` to override " "them.") config["avocado_variants"] = variants # Extend "replay_test_status" of "INTERRUPTED" when --replay-resume # supplied. if config.get('run.replay.resume'): if not test_status: config['replay_teststatus'] = ["INTERRUPTED"] elif "INTERRUPTED" not in test_status: config['replay_teststatus'].append("INTERRUPTED") if test_status: replay_map = self._create_replay_map(resultsdir, test_status) config['replay_map'] = replay_map # Use the original directory to resolve test references properly pwd = jobdata.retrieve_pwd(resultsdir) if pwd is not None: if os.path.exists(pwd): os.chdir(pwd) else: LOG_UI.warn( "Directory used in the replay source job '%s' does" " not exist, using '.' instead", pwd)
def run(self, args): if getattr(args, 'replay_jobid', None) is None: return err = None if args.replay_teststatus and 'variants' in args.replay_ignore: err = ("Option `--replay-test-status` is incompatible with " "`--replay-ignore variants`.") elif args.replay_teststatus and args.reference: err = ("Option --replay-test-status is incompatible with " "test references given on the command line.") elif args.remote_hostname: err = "Currently we don't replay jobs in remote hosts." if err is not None: LOG_UI.error(err) sys.exit(exit_codes.AVOCADO_FAIL) if getattr(args, 'logdir', None) is not None: logdir = args.logdir else: logdir = settings.get_value(section='datadir.paths', key='logs_dir', key_type='path', default=None) try: resultsdir = jobdata.get_resultsdir(logdir, args.replay_jobid) except ValueError as exception: LOG_UI.error(exception.message) sys.exit(exit_codes.AVOCADO_FAIL) if resultsdir is None: LOG_UI.error("Can't find job results directory in '%s'", logdir) sys.exit(exit_codes.AVOCADO_FAIL) sourcejob = jobdata.get_id(os.path.join(resultsdir, 'id'), args.replay_jobid) if sourcejob is None: msg = ("Can't find matching job id '%s' in '%s' directory." % (args.replay_jobid, resultsdir)) LOG_UI.error(msg) sys.exit(exit_codes.AVOCADO_FAIL) setattr(args, 'replay_sourcejob', sourcejob) replay_args = jobdata.retrieve_args(resultsdir) whitelist = [ 'loaders', 'external_runner', 'external_runner_testdir', 'external_runner_chdir', 'failfast', 'ignore_missing_references' ] if replay_args is None: LOG_UI.warn( 'Source job args data not found. These options will ' 'not be loaded in this replay job: %s', ', '.join(whitelist)) else: for option in whitelist: optvalue = getattr(args, option, None) if optvalue is not None: LOG_UI.warn( "Overriding the replay %s with the --%s value " "given on the command line.", option.replace('_', '-'), option.replace('_', '-')) else: setattr(args, option, replay_args[option]) # Keeping this for compatibility. # TODO: Use replay_args['reference'] at some point in the future. if getattr(args, 'reference', None): LOG_UI.warn('Overriding the replay test references with test ' 'references given in the command line.') else: references = jobdata.retrieve_references(resultsdir) if references is None: LOG_UI.error('Source job test references data not found. ' 'Aborting.') sys.exit(exit_codes.AVOCADO_FAIL) else: setattr(args, 'reference', references) if 'config' in args.replay_ignore: LOG_UI.warn("Ignoring configuration from source job with " "--replay-ignore.") else: self.load_config(resultsdir) if 'variants' in args.replay_ignore: LOG_UI.warn("Ignoring variants from source job with " "--replay-ignore.") else: variants = jobdata.retrieve_variants(resultsdir) if variants is None: LOG_UI.error('Source job variants data not found. Aborting.') sys.exit(exit_codes.AVOCADO_FAIL) else: # Ignore data manipulation. This is necessary, because # we replaced the unparsed object with parsed one. There # are other plugins running before/after this which might # want to alter the variants object. if args.avocado_variants.is_parsed(): LOG_UI.warning("Using src job Mux data only, use " "`--replay-ignore variants` to override " "them.") setattr(args, "avocado_variants", variants) # Extend "replay_test_status" of "INTERRUPTED" when --replay-resume # supplied. if args.replay_resume: if not args.replay_teststatus: args.replay_teststatus = ["INTERRUPTED"] elif "INTERRUPTED" not in args.replay_teststatus: args.replay_teststatus.append("INTERRUPTED") if args.replay_teststatus: replay_map = self._create_replay_map(resultsdir, args.replay_teststatus) setattr(args, 'replay_map', replay_map) # Use the original directory to resolve test references properly pwd = jobdata.retrieve_pwd(resultsdir) if pwd is not None: if os.path.exists(pwd): os.chdir(pwd) else: LOG_UI.warn( "Directory used in the replay source job '%s' does" " not exist, using '.' instead", pwd)
def _log_deprecation_msg(deprecated, current): """ Log a message into the avocado.LOG_UI warning log """ msg = "The use of '%s' is deprecated, please use '%s' instead" LOG_UI.warning(msg, deprecated, current)
def _log_deprecation_msg(deprecated, current): """ Log a message into the avocado.LOG_UI warning log """ msg = "The use of '%s' is deprecated, please use '%s' instead" LOG_UI.warning(msg, deprecated, current)
def run(self, args): LOG_UI.warning("The 'avocado multiplex' command is deprecated by the " "'avocado variants' one. Please start using that one " "instead as this will be removed in Avocado 52.0.") super(Multiplex, self).run(args)
def run(self, args): LOG_UI.warning("The 'avocado multiplex' command is deprecated by the " "'avocado variants' one. Please start using that one " "instead as this will be removed in Avocado 52.0.") super(Multiplex, self).run(args)
def run(self, args): if getattr(args, 'replay_jobid', None) is None: return err = None if args.replay_teststatus and 'variants' in args.replay_ignore: err = ("Option `--replay-test-status` is incompatible with " "`--replay-ignore variants`.") elif args.replay_teststatus and args.reference: err = ("Option --replay-test-status is incompatible with " "test references given on the command line.") elif getattr(args, "remote_hostname", False): err = "Currently we don't replay jobs in remote hosts." if err is not None: LOG_UI.error(err) sys.exit(exit_codes.AVOCADO_FAIL) base_logdir = getattr(args, 'base_logdir', None) if base_logdir is None: base_logdir = settings.get_value(section='datadir.paths', key='logs_dir', key_type='path', default=None) try: resultsdir = jobdata.get_resultsdir(base_logdir, args.replay_jobid) except ValueError as exception: LOG_UI.error(exception.message) sys.exit(exit_codes.AVOCADO_FAIL) if resultsdir is None: LOG_UI.error("Can't find job results directory in '%s'", base_logdir) sys.exit(exit_codes.AVOCADO_FAIL) sourcejob = jobdata.get_id(os.path.join(resultsdir, 'id'), args.replay_jobid) if sourcejob is None: msg = ("Can't find matching job id '%s' in '%s' directory." % (args.replay_jobid, resultsdir)) LOG_UI.error(msg) sys.exit(exit_codes.AVOCADO_FAIL) setattr(args, 'replay_sourcejob', sourcejob) replay_args = jobdata.retrieve_args(resultsdir) whitelist = ['loaders', 'external_runner', 'external_runner_testdir', 'external_runner_chdir', 'failfast', 'ignore_missing_references', 'execution_order'] if replay_args is None: LOG_UI.warn('Source job args data not found. These options will ' 'not be loaded in this replay job: %s', ', '.join(whitelist)) else: for option in whitelist: optvalue = getattr(args, option, None) if optvalue is not None: LOG_UI.warn("Overriding the replay %s with the --%s value " "given on the command line.", option.replace('_', '-'), option.replace('_', '-')) elif option in replay_args: setattr(args, option, replay_args[option]) if getattr(args, 'reference', None): LOG_UI.warn('Overriding the replay test references with test ' 'references given in the command line.') else: references = jobdata.retrieve_references(resultsdir) if references is None: LOG_UI.error('Source job test references data not found. ' 'Aborting.') sys.exit(exit_codes.AVOCADO_FAIL) else: setattr(args, 'reference', references) if 'config' in args.replay_ignore: LOG_UI.warn("Ignoring configuration from source job with " "--replay-ignore.") else: self.load_config(resultsdir) if 'variants' in args.replay_ignore: LOG_UI.warn("Ignoring variants from source job with " "--replay-ignore.") else: variants = jobdata.retrieve_variants(resultsdir) if variants is None: LOG_UI.error('Source job variants data not found. Aborting.') sys.exit(exit_codes.AVOCADO_FAIL) else: LOG_UI.warning("Using src job Mux data only, use " "`--replay-ignore variants` to override " "them.") setattr(args, "avocado_variants", variants) # Extend "replay_test_status" of "INTERRUPTED" when --replay-resume # supplied. if args.replay_resume: if not args.replay_teststatus: args.replay_teststatus = ["INTERRUPTED"] elif "INTERRUPTED" not in args.replay_teststatus: args.replay_teststatus.append("INTERRUPTED") if args.replay_teststatus: replay_map = self._create_replay_map(resultsdir, args.replay_teststatus) setattr(args, 'replay_map', replay_map) # Use the original directory to resolve test references properly pwd = jobdata.retrieve_pwd(resultsdir) if pwd is not None: if os.path.exists(pwd): os.chdir(pwd) else: LOG_UI.warn("Directory used in the replay source job '%s' does" " not exist, using '.' instead", pwd)
def run(self, config): if config.get('replay_jobid', None) is None: return err = None if config.get('replay_teststatus') and 'variants' in config.get( 'replay_ignore'): err = ("Option `--replay-test-status` is incompatible with " "`--replay-ignore variants`.") elif config.get('replay_teststatus') and config.get('references'): err = ("Option --replay-test-status is incompatible with " "test references given on the command line.") elif config.get("remote_hostname", False): err = "Currently we don't replay jobs in remote hosts." if err is not None: LOG_UI.error(err) sys.exit(exit_codes.AVOCADO_FAIL) base_logdir = config.get('base_logdir', None) if base_logdir is None: base_logdir = settings.get_value(section='datadir.paths', key='logs_dir', key_type='path', default=None) try: resultsdir = jobdata.get_resultsdir(base_logdir, config.get('replay_jobid')) except ValueError as exception: LOG_UI.error(exception) sys.exit(exit_codes.AVOCADO_FAIL) if resultsdir is None: LOG_UI.error("Can't find job results directory in '%s'", base_logdir) sys.exit(exit_codes.AVOCADO_FAIL) sourcejob = jobdata.get_id(os.path.join(resultsdir, 'id'), config.get('replay_jobid')) if sourcejob is None: msg = ("Can't find matching job id '%s' in '%s' directory." % (config.get('replay_jobid'), resultsdir)) LOG_UI.error(msg) sys.exit(exit_codes.AVOCADO_FAIL) config['replay_sourcejob'] = sourcejob replay_config = jobdata.retrieve_job_config(resultsdir) whitelist = [ 'loaders', 'external_runner', 'external_runner_testdir', 'external_runner_chdir', 'failfast', 'ignore_missing_references', 'execution_order' ] if replay_config is None: LOG_UI.warn( 'Source job config data not found. These options will ' 'not be loaded in this replay job: %s', ', '.join(whitelist)) else: for option in whitelist: optvalue = config.get(option, None) if optvalue is not None: LOG_UI.warn( "Overriding the replay %s with the --%s value " "given on the command line.", option.replace('_', '-'), option.replace('_', '-')) elif option in replay_config: config[option] = replay_config[option] if config.get('references', None): LOG_UI.warn('Overriding the replay test references with test ' 'references given in the command line.') else: references = jobdata.retrieve_references(resultsdir) if references is None: LOG_UI.error('Source job test references data not found. ' 'Aborting.') sys.exit(exit_codes.AVOCADO_FAIL) else: config['references'] = references if 'config' in config.get('replay_ignore'): LOG_UI.warn("Ignoring configuration from source job with " "--replay-ignore.") else: self.load_config(resultsdir) if 'variants' in config.get('replay_ignore'): LOG_UI.warn("Ignoring variants from source job with " "--replay-ignore.") else: variants = jobdata.retrieve_variants(resultsdir) if variants is None: LOG_UI.error('Source job variants data not found. Aborting.') sys.exit(exit_codes.AVOCADO_FAIL) else: LOG_UI.warning("Using src job Mux data only, use " "`--replay-ignore variants` to override " "them.") config["avocado_variants"] = variants # Extend "replay_test_status" of "INTERRUPTED" when --replay-resume # supplied. if config.get('replay_resume'): if not config.get('replay_teststatus'): config['replay_teststatus'] = ["INTERRUPTED"] elif "INTERRUPTED" not in config.get('replay_teststatus'): config['replay_teststatus'].append("INTERRUPTED") if config.get('replay_teststatus'): replay_map = self._create_replay_map( resultsdir, config.get('replay_teststatus')) config['replay_map'] = replay_map # Use the original directory to resolve test references properly pwd = jobdata.retrieve_pwd(resultsdir) if pwd is not None: if os.path.exists(pwd): os.chdir(pwd) else: LOG_UI.warn( "Directory used in the replay source job '%s' does" " not exist, using '.' instead", pwd)