def handle_output_files_command(self, config): """Called when 'avocado jobs get-output-files' command is executed.""" job_id = config.get('jobs.get.output_files.job_id') destination = config.get('jobs.get.output_files.destination') results_dir = get_job_results_dir(job_id) results_file = os.path.join(results_dir, 'results.json') config_file = os.path.join(results_dir, 'jobdata/args.json') try: config_data = self._get_data_from_file(config_file) results_data = self._get_data_from_file(results_file) except FileNotFoundError as ex: LOG_UI.error("Could not get job information: %s", ex) return exit_codes.AVOCADO_GENERIC_CRASH spawners = {'process': ProcessSpawner, 'podman': PodmanSpawner} spawner_name = config_data.get('nrunner.spawner') spawner = spawners.get(spawner_name) if spawner is None: msg = ("Could not find the spawner for job %s. This command is " "experimental and only supported when job executed with " "the Spawner architecture.") LOG_UI.error(msg, job_id) return exit_codes.AVOCADO_GENERIC_CRASH return self._download_tests(results_data.get('tests'), destination, job_id, spawner)
def _setup_job(job_id): resultsdir = data_dir.get_job_results_dir(job_id) if resultsdir is None: LOG_UI.error("Can't find job results directory for '%s'", job_id) sys.exit(exit_codes.AVOCADO_FAIL) with open(os.path.join(resultsdir, 'id'), 'r') as id_file: sourcejob = id_file.read().strip() return resultsdir, sourcejob
def _retrieve_source_job_config(source_job_id): results_dir = get_job_results_dir(source_job_id) if not results_dir: msg = f"Could not find the results directory " f'for Job "{source_job_id}"' Replay._exit_fail(msg) try: return retrieve_job_config(results_dir) except OSError: msg = f"Could not open the {source_job_id} " f"Job configuration" Replay._exit_fail(msg) except json.decoder.JSONDecodeError: msg = f"Could not read a valid configuration " f'of Job "{source_job_id}"' Replay._exit_fail(msg)
def _retrieve_source_job_config(source_job_id): results_dir = get_job_results_dir(source_job_id) if not results_dir: msg = 'Could not find the results directory for Job "%s"' % source_job_id Replay._exit_fail(msg) config_file_path = os.path.join(results_dir, 'jobdata', 'args.json') try: with open(config_file_path, 'r') as config_file: return json.load(config_file) except OSError: msg = 'Could not open the source Job configuration "%s"' % config_file_path Replay._exit_fail(msg) except json.decoder.JSONDecodeError: msg = 'Could not read a valid configuration from file "%s"' % config_file_path Replay._exit_fail(msg)
def _retrieve_source_job_config(source_job_id): results_dir = get_job_results_dir(source_job_id) if not results_dir: msg = ('Could not find the results directory for Job "%s"' % source_job_id) Replay._exit_fail(msg) try: return retrieve_job_config(results_dir) except OSError: msg = 'Could not open the %s Job configuration' % source_job_id Replay._exit_fail(msg) except json.decoder.JSONDecodeError: msg = ('Could not read a valid configuration of Job "%s"' % source_job_id) Replay._exit_fail(msg)
def handle_show_command(self, config): """Called when 'avocado jobs show' command is executed.""" job_id = config.get("jobs.show.job_id") results_dir = get_job_results_dir(job_id) if results_dir is None: LOG_UI.error("Error: Job %s not found", job_id) return exit_codes.AVOCADO_GENERIC_CRASH results_file = os.path.join(results_dir, "results.json") config_file = os.path.join(results_dir, "jobdata/args.json") try: results_data = self._get_data_from_file(results_file) except FileNotFoundError as ex: # Results data are important and should exit if not found LOG_UI.error(ex) return exit_codes.AVOCADO_GENERIC_CRASH try: config_data = self._get_data_from_file(config_file) except FileNotFoundError: pass data = { "JOB ID": job_id, "JOB LOG": results_data.get("debuglog"), "SPAWNER": config_data.get("nrunner.spawner", "unknown"), } # We could improve this soon with more data and colors self._print_job_details(data) LOG_UI.info("") self._print_job_tests(results_data.get("tests")) results = ("PASS %d | ERROR %d | FAIL %d | SKIP %d |" "WARN %d | INTERRUPT %s | CANCEL %s") results %= ( results_data.get("pass", 0), results_data.get("error", 0), results_data.get("failures", 0), results_data.get("skip", 0), results_data.get("warn", 0), results_data.get("interrupt", 0), results_data.get("cancel", 0), ) self._print_job_details({"RESULTS": results}) return exit_codes.AVOCADO_ALL_OK
def handle_show_command(self, config): """Called when 'avocado jobs show' command is executed.""" job_id = config.get('jobs.show.job_id') results_dir = get_job_results_dir(job_id) if results_dir is None: LOG_UI.error("Error: Job %s not found", job_id) return exit_codes.AVOCADO_GENERIC_CRASH results_file = os.path.join(results_dir, 'results.json') config_file = os.path.join(results_dir, 'jobdata/args.json') try: results_data = self._get_data_from_file(results_file) except FileNotFoundError as ex: # Results data are important and should exit if not found LOG_UI.error(ex) return exit_codes.AVOCADO_GENERIC_CRASH try: config_data = self._get_data_from_file(config_file) except FileNotFoundError: pass data = { 'JOB ID': job_id, 'JOB LOG': results_data.get('debuglog'), 'SPAWNER': config_data.get('nrunner.spawner', 'unknown') } # We could improve this soon with more data and colors self._print_job_details(data) LOG_UI.info("") self._print_job_tests(results_data.get('tests')) results = ('PASS %d | ERROR %d | FAIL %d | SKIP %d |' 'WARN %d | INTERRUPT %s | CANCEL %s') results %= (results_data.get('pass', 0), results_data.get('error', 0), results_data.get('failures', 0), results_data.get('skip', 0), results_data.get('warn', 0), results_data.get('interrupt', 0), results_data.get('cancel', 0)) self._print_job_details({'RESULTS': results}) return exit_codes.AVOCADO_ALL_OK
def stream_output(job_id, task_id): """Returns output files streams in binary mode from a task. This method will find for output files generated by a task and will return a generator with tuples, each one containing a filename and bytes. You need to provide in your spawner a `stream_output()` method if this one is not suitable for your spawner. i.e: if the spawner is trying to access a remote output file. """ results_dir = get_job_results_dir(job_id) task_id = string_to_safe_path(task_id) data_pointer = '{}/test-results/{}/data'.format(results_dir, task_id) src = open(data_pointer, 'r').readline().rstrip() try: for path in Path(src).expanduser().iterdir(): if path.is_file() and path.stat().st_size != 0: for stream in SpawnerMixin.bytes_from_file(str(path)): yield (path.name, stream) except FileNotFoundError as e: raise SpawnerException("Task not found: {}".format(e))
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # # See LICENSE for more details. # # Copyright: Red Hat Inc. 2016 # Author: Amador Pahim <*****@*****.**> # # Simple script that, given the Job (partial) ID, returns the job # results directory. # # $ python avocado-get-job-results-dir.py <job_id> # import sys from avocado.core import data_dir if __name__ == '__main__': if len(sys.argv) < 2: sys.stderr.write("Please inform the Job ID.\n") sys.exit(-1) resultsdir = data_dir.get_job_results_dir(sys.argv[1]) if resultsdir is None: sys.stderr.write("Can't find job results directory for '%s'\n" % sys.argv[1]) sys.exit(-1) sys.stdout.write('%s\n' % resultsdir)
def run(self, config): job_id = config.get('run.replay.job_id') if job_id is None: return err = None replay_ignore = config.get('run.replay.ignore') test_status = config.get('run.replay.test_status') if test_status and 'variants' in replay_ignore: err = ("Option `--replay-test-status` is incompatible with " "`--replay-ignore variants`.") elif test_status and config.get('run.references'): err = ("Option --replay-test-status is incompatible with " "test references given on the command line.") elif config.get("remote_hostname", False): err = "Currently we don't replay jobs in remote hosts." if err is not None: LOG_UI.error(err) sys.exit(exit_codes.AVOCADO_FAIL) resultsdir = data_dir.get_job_results_dir( job_id, config.get('run.results_dir')) if resultsdir is None: LOG_UI.error("Can't find job results directory for '%s'", job_id) sys.exit(exit_codes.AVOCADO_FAIL) with open(os.path.join(resultsdir, 'id'), 'r') as id_file: config['replay_sourcejob'] = id_file.read().strip() replay_config = jobdata.retrieve_job_config(resultsdir) whitelist = [ 'loaders', 'external_runner', 'external_runner_testdir', 'external_runner_chdir', 'failfast', 'ignore_missing_references', 'execution_order' ] if replay_config is None: LOG_UI.warn( 'Source job config data not found. These options will ' 'not be loaded in this replay job: %s', ', '.join(whitelist)) else: for option in whitelist: optvalue = config.get(option, None) # Temporary, this will be removed soon if option in [ 'failfast', 'ignore_missing_references', 'execution_order', 'loaders', 'external_runner', 'external_runner_chdir', 'external_runner_testdir' ]: optvalue = config.get('run.{}'.format(option)) if optvalue is not None: LOG_UI.warn( "Overriding the replay %s with the --%s value " "given on the command line.", option.replace('_', '-'), option.replace('_', '-')) elif option in replay_config: config[option] = replay_config[option] if config.get('run.references'): LOG_UI.warn('Overriding the replay test references with test ' 'references given in the command line.') else: references = jobdata.retrieve_references(resultsdir) if references is None: LOG_UI.error('Source job test references data not found. ' 'Aborting.') sys.exit(exit_codes.AVOCADO_FAIL) else: config['run.references'] = references if 'config' in replay_ignore: LOG_UI.warn("Ignoring configuration from source job with " "--replay-ignore.") else: self.load_config(resultsdir) if 'variants' in replay_ignore: LOG_UI.warn("Ignoring variants from source job with " "--replay-ignore.") else: variants = jobdata.retrieve_variants(resultsdir) if variants is None: LOG_UI.error('Source job variants data not found. Aborting.') sys.exit(exit_codes.AVOCADO_FAIL) else: LOG_UI.warning("Using src job Mux data only, use " "`--replay-ignore variants` to override " "them.") config["avocado_variants"] = variants # Extend "replay_test_status" of "INTERRUPTED" when --replay-resume # supplied. if config.get('run.replay.resume'): if not test_status: config['replay_teststatus'] = ["INTERRUPTED"] elif "INTERRUPTED" not in test_status: config['replay_teststatus'].append("INTERRUPTED") if test_status: replay_map = self._create_replay_map(resultsdir, test_status) config['replay_map'] = replay_map # Use the original directory to resolve test references properly pwd = jobdata.retrieve_pwd(resultsdir) if pwd is not None: if os.path.exists(pwd): os.chdir(pwd) else: LOG_UI.warn( "Directory used in the replay source job '%s' does" " not exist, using '.' instead", pwd)
def test_get_job_results_dir(self): from avocado.core import data_dir, job_id # First let's mock a jobs results directory # logs_dir = self.mapping.get('logs_dir') self.assertNotEqual(None, logs_dir) unique_id = job_id.create_unique_job_id() # Expected job results dir expected_jrd = data_dir.create_job_logs_dir(logs_dir, unique_id) # Now let's test some cases # self.assertEqual(None, data_dir.get_job_results_dir(expected_jrd, logs_dir), ("If passing a directory reference, it expects the id" "file")) # Create the id file. id_file_path = os.path.join(expected_jrd, 'id') with open(id_file_path, 'w') as id_file: id_file.write("%s\n" % unique_id) id_file.flush() os.fsync(id_file) self.assertEqual(expected_jrd, data_dir.get_job_results_dir(expected_jrd, logs_dir), "It should get from the path to the directory") results_dirname = os.path.basename(expected_jrd) self.assertEqual( None, data_dir.get_job_results_dir(results_dirname, logs_dir), "It should not get from a valid path to the directory") pwd = os.getcwd() os.chdir(logs_dir) self.assertEqual( expected_jrd, data_dir.get_job_results_dir(results_dirname, logs_dir), "It should get from relative path to the directory") os.chdir(pwd) self.assertEqual(expected_jrd, data_dir.get_job_results_dir(id_file_path, logs_dir), "It should get from the path to the id file") self.assertEqual(expected_jrd, data_dir.get_job_results_dir(unique_id, logs_dir), "It should get from the id") another_id = job_id.create_unique_job_id() self.assertNotEqual(unique_id, another_id) self.assertEqual(None, data_dir.get_job_results_dir(another_id, logs_dir), "It should not get from unexisting job") self.assertEqual(expected_jrd, data_dir.get_job_results_dir(unique_id[:7], logs_dir), "It should get from partial id equals to 7 digits") self.assertEqual(expected_jrd, data_dir.get_job_results_dir(unique_id[:4], logs_dir), "It should get from partial id less than 7 digits") almost_id = unique_id[:7] + ('a' * (len(unique_id) - 7)) self.assertNotEqual(unique_id, almost_id) self.assertEqual(None, data_dir.get_job_results_dir(almost_id, logs_dir), ("It should not get if the id is equal on only" "the first 7 characters")) os.symlink(expected_jrd, os.path.join(logs_dir, 'latest')) self.assertEqual(expected_jrd, data_dir.get_job_results_dir('latest', logs_dir), "It should get from the 'latest' id") stg = settings.Settings() with unittest.mock.patch('avocado.core.stgs', stg): import avocado.core avocado.core.register_core_options() stg.process_config_path(self.config_file_path) stg.merge_with_configs() with unittest.mock.patch('avocado.core.data_dir.settings', stg): self.assertEqual(expected_jrd, data_dir.get_job_results_dir(unique_id), "It should use the default base logs directory")