def _save_series_id(self): """Save the series id to json file that tracks last series ran by user on a per system basis.""" sys_vars = system_variables.get_vars(True) sys_name = sys_vars['sys_name'] json_file = self.pav_cfg.working_dir/'users' json_file /= '{}.json'.format(utils.get_login()) lockfile_path = json_file.with_suffix('.lock') with LockFile(lockfile_path): data = {} try: with json_file.open('r') as json_series_file: try: data = json.load(json_series_file) except json.decoder.JSONDecodeError: # File was empty, therefore json couldn't be loaded. pass with PermissionsManager(json_file, self.pav_cfg['shared_group'], self.pav_cfg['umask']), \ json_file.open('w') as json_series_file: data[sys_name] = self.sid json_series_file.write(json.dumps(data)) except FileNotFoundError: # File hadn't been created yet. with PermissionsManager(json_file, self.pav_cfg['shared_group'], self.pav_cfg['umask']), \ json_file.open('w') as json_series_file: data[sys_name] = self.sid json_series_file.write(json.dumps(data))
def _sys_var_cmd(pav_cfg, args, outfile=sys.stdout): del pav_cfg rows = [] sys_vars = system_variables.get_vars(defer=True) for key in sorted(list(sys_vars.keys())): value = sys_vars[key] deferred = isinstance(value, DeferredVariable) help_str = sys_vars.help(key) rows.append({ 'name': key, 'value': value if not deferred else '<deferred>', 'description': help_str, 'path': sys_vars.get_obj(key).path, }) fields = ['name', 'value', 'description'] if args.verbose: fields.append('path') utils.draw_table( outfile, field_info={}, fields=fields, rows=rows, title="Available System Variables" )
def _system_variables_cmd(self, _, args): rows = [] sys_vars = system_variables.get_vars(defer=True) for key in sorted(list(sys_vars.keys())): try: value = sys_vars[key] deferred = isinstance(value, DeferredVariable) help_str = sys_vars.help(key) except system_variables.SystemPluginError as err: value = output.ANSIString('error', code=output.RED) deferred = False help_str = output.ANSIString(str(err), code=output.RED) rows.append({ 'name': key, 'value': value if not deferred else '<deferred>', 'description': help_str, 'path': sys_vars.get_obj(key).path, }) fields = ['name', 'value', 'description'] if args.verbose: fields.append('path') output.draw_table(self.outfile, fields=fields, rows=rows, title="Available System Variables")
def test_sys_name_tracker(self): """Make sure the expected values are stored in the user.json file.""" user = utils.get_login() sys_vars = system_variables.get_vars(True) sys_name = sys_vars['sys_name'] arg_parser = arguments.get_parser() args = arg_parser.parse_args(['run', 'hello_world']) run_cmd = commands.get_command(args.command_name) run_cmd.outfile = io.StringIO() run_cmd.errfile = run_cmd.outfile run_cmd.run(self.pav_cfg, args) series = run_cmd.last_series json_file = self.pav_cfg.working_dir / 'users' json_file /= '{}.json'.format(user) with json_file.open('r') as json_series_file: data = json.load(json_series_file) self.assertEqual(data[sys_name], series.sid)
def _get_tests(self, pav_cfg, args, mb_tracker, build_only=False, local_builds_only=False): """Turn the test run arguments into actual TestRun objects. :param pav_cfg: The pavilion config object :param args: The run command arguments :param MultiBuildTracker mb_tracker: The build tracker. :param bool build_only: Whether to denote that we're only building these tests. :param bool local_builds_only: Only include tests that would be built locally. :return: :rtype: {} """ overrides = self._parse_overrides(args.overrides) sys_vars = system_variables.get_vars(True) try: configs_by_sched = self._get_test_configs( pav_cfg=pav_cfg, host=args.host, test_files=args.files, tests=args.tests, modes=args.modes, overrides=overrides, sys_vars=sys_vars, ) # Remove non-local builds when doing only local builds. if build_only and local_builds_only: for sched in configs_by_sched: sched_cfgs = configs_by_sched[sched] for i in range(len(sched_cfgs)): config, _ = sched_cfgs[i] if config['build']['on_nodes'].lower() == 'true': sched_cfgs[i] = None sched_cfgs = [cfg for cfg in sched_cfgs if cfg is not None] configs_by_sched[sched] = sched_cfgs tests_by_sched = self._configs_to_tests( pav_cfg=pav_cfg, configs_by_sched=configs_by_sched, mb_tracker=mb_tracker, build_only=build_only, rebuild=args.rebuild, ) except commands.CommandError as err: # Our error messages get escaped to a silly degree err = codecs.decode(str(err), 'unicode-escape') fprint(err, file=self.errfile, flush=True) return None return tests_by_sched
def _quick_test(self, cfg=None, name="quick_test", build=True, finalize=True, sched_vars=None): """Create a test run object to work with. The default is a simple hello world test with the raw scheduler. :param dict cfg: An optional config dict to create the test from. :param str name: The name of the test. :param bool build: Build this test, while we're at it. :param bool finalize: Finalize this test. :param dict sched_vars: Add these scheduler variables to our var set. :rtype: TestRun """ if cfg is None: cfg = self._quick_test_cfg() cfg = copy.deepcopy(cfg) loader = TestConfigLoader() cfg = loader.validate(loader.normalize(cfg)) cfg['name'] = name var_man = VariableSetManager() var_man.add_var_set('var', cfg['variables']) var_man.add_var_set('sys', system_variables.get_vars(defer=True)) var_man.add_var_set('pav', self.pav_cfg.pav_vars) if sched_vars is not None: var_man.add_var_set('sched', sched_vars) var_man.resolve_references() cfg = resolver.TestConfigResolver.resolve_test_vars(cfg, var_man) test = TestRun( pav_cfg=self.pav_cfg, config=cfg, var_man=var_man, ) if build: test.build() if finalize: fin_sys = system_variables.SysVarDict(unique=True) fin_var_man = VariableSetManager() fin_var_man.add_var_set('sys', fin_sys) resolver.TestConfigResolver.finalize(test, fin_var_man) return test
def __init__(self, pav_cfg): self.pav_cfg = pav_cfg self.base_var_man = variables.VariableSetManager() try: self.base_var_man.add_var_set( 'sys', system_variables.get_vars(defer=True)) except system_variables.SystemPluginError as err: raise TestConfigError("Error in system variables: {}".format(err)) self.base_var_man.add_var_set('pav', pavilion_variables.PavVars()) self.logger = logging.getLogger(__file__)
def make_series_filter( user: str = None, sys_name: str = None, newer_than: dt.datetime = None, older_than: dt.datetime = None, complete: bool = False, incomplete: bool = False) -> Callable[[Dict[str, Any]], bool]: """Generate a filter for using with dir_db functions to filter series. This is expected to operate on series.SeriesInfo objects, so make sure to pass Series info as the dir_db transform function. :param complete: Only accept series for which all tests are complete. :param incomplete: Only accept series for which not all tests are complete. :param newer_than: Only accept series created after this time. :param older_than: Only accept series created before this time. :param sys_name: Only accept series created on this system. :param user: Only accept series created by this user. """ if sys_name == LOCAL_SYS_NAME: sys_vars = system_variables.get_vars(defer=True) sys_name = sys_vars['sys_name'] def series_filter(series: Dict[str, Any]): """Generated series filter function.""" if user is not None and series['user'] != user: return False if newer_than and series.get('created') < newer_than: return False if older_than and series.get('created') > older_than: return False if complete and not series.get('complete'): return False if incomplete and series.get('complete'): return False if sys_name and series.get('sys_name') != sys_name: return False return True return series_filter
def load_user_series_id(pav_cfg): """Load the last series id used by the current user.""" last_series_fn = pav_cfg.working_dir / 'users' last_series_fn /= '{}.json'.format(utils.get_login()) sys_vars = system_variables.get_vars(True) sys_name = sys_vars['sys_name'] if not last_series_fn.exists(): return None try: with last_series_fn.open() as last_series_file: sys_name_series_dict = json.load(last_series_file) return sys_name_series_dict[sys_name].strip() except (IOError, OSError, KeyError) as err: logger.warning("Failed to read series id file '%s': %s", last_series_fn, err) return None
def run(self, pav_cfg, args): """Resolve the test configurations into individual tests and assign to schedulers. Have those schedulers kick off jobs to run the individual tests themselves. :param err_file: """ overrides = {} for ovr in args.overrides: if '=' not in ovr: fprint( "Invalid override value. Must be in the form: " "<key>=<value>. Ex. -c run.modules=['gcc'] ", file=self.errfile) return errno.EINVAL key, value = ovr.split('=', 1) overrides[key] = value tests = [args.test] self.logger.debug("Finding Configs") sys_vars = system_variables.get_vars(True) try: configs = self._get_test_configs( pav_cfg=pav_cfg, host=args.host, test_files=[], tests=tests, modes=args.modes, overrides=overrides, sys_vars=sys_vars, ) except commands.CommandError as err: fprint(err, file=self.errfile, color=output.RED) return errno.EINVAL configs = sum(configs.values(), []) for config in configs: pprint.pprint(config, stream=self.outfile) # ext-print: ignore
def setUp(self) -> None: plugins.initialize_plugins(self.pav_cfg) self.var_man = variables.VariableSetManager() self.var_man.add_var_set( 'var', { 'int1': "1", 'int2': "2", 'float1': '1.1', 'str1': 'hello', 'ints': ['0', '1', '2', '3', '4', '5'], 'floats': ['0.1', '2.3'], 'more_ints': ['0', '1'], 'struct': { 'cpus': '200', 'flops': '2.1', 'name': 'earth_chicken', }, 'structs': [ { 'type': 'cat', 'bites': '3', 'evil_rating': '5.2' }, { 'type': 'dog', 'bites': '0', 'evil_rating': '0.2' }, { 'type': 'fish', 'bites': '1', 'evil_rating': '9.7' }, ] }) self.var_man.add_var_set('sys', system_variables.get_vars(defer=True))
def _get_var_man(test, sched): """Get the variable manager for the given test. :param TestRun test: The test run object :param sched: The scheduler for this test. :rtype VariableSetManager """ # Re-add var sets that may have had deferred variables. try: var_man = VariableSetManager() var_man.add_var_set('sys', system_variables.get_vars(defer=False)) sched_config = test.config[test.scheduler] var_man.add_var_set('sched', sched.get_vars(sched_config)) except Exception: test.status.set( STATES.RUN_ERROR, "Unknown error getting pavilion variables at " "run time.") raise return var_man
def test_status_command_with_sched(self): """Test status command when test is 'SCHEDULED'.""" test = file_format.TestConfigLoader().validate({ 'scheduler': 'raw', 'run': { 'env': { 'foo': 'bar', }, 'cmds': ['sleep 1'], }, }) test['name'] = 'testytest' sys_vars = system_variables.get_vars(False) test = PavTest(self.pav_cfg, test, sys_vars) test.build() schedulers.get_scheduler_plugin(test.scheduler) \ .schedule_test(self.pav_cfg, test) status_cmd = commands.get_command('status') status_cmd.outfile = io.StringIO() parser = argparse.ArgumentParser() status_cmd._setup_arguments(parser) args = parser.parse_args([str(test.id)]) test.status.set(status_file.STATES.SCHEDULED, "faker") self.assertEqual(status_cmd.run(self.pav_cfg, args), 0) parser = argparse.ArgumentParser() status_cmd._setup_arguments(parser) args = parser.parse_args(['-j', str(test.id)]) test.status.set(status_file.STATES.SCHEDULED, "faker") self.assertEqual(status_cmd.run(self.pav_cfg, args), 0)
def _run(self, pav_cfg, test): """Run an already prepped test in the current environment. """ try: sched = schedulers.get_plugin(test.scheduler) except Exception: test.status.set(STATES.BUILD_ERROR, "Unknown error getting the scheduler. Refer to " "the kickoff log.") raise # Re-add var sets that may have had deferred variables. try: var_man = VariableSetManager() var_man.add_var_set('sys', system_variables.get_vars(defer=False)) sched_config = test.config[test.scheduler] var_man.add_var_set('sched', sched.get_vars(sched_config)) except Exception: test.status.set(STATES.RUN_ERROR, "Unknown error getting pavilion variables at " "run time.") raise try: test.finalize(var_man) except Exception: test.status.set(STATES.RUN_ERROR, "Unknown error finalizing test.") raise try: if test.config['build']['on_nodes'] in ['true', 'True']: if not test.build(): self.logger.warning( "Test {t.id} failed to build:" ) except Exception: test.status.set(STATES.BUILD_ERROR, "Unknown build error. Refer to the kickoff log.") raise # Optionally wait on other tests running under the same scheduler. # This depends on the scheduler and the test configuration. lock = sched.lock_concurrency(pav_cfg, test) try: run_result = test.run() except TestRunError as err: test.status.set(STATES.RUN_ERROR, err) return 1 except TimeoutError: return 1 except Exception: test.status.set( STATES.RUN_ERROR, "Unknown error while running test. Refer to the kickoff log.") raise finally: sched.unlock_concurrency(lock) try: rp_errors = [] # Make sure the result parsers have reasonable arguments. # We check here because the parser code itself will likely assume # the args are valid form _check_args, but those might not be # checkable before kickoff due to deferred variables. try: result_parsers.check_args(test.config['results']) except TestRunError as err: rp_errors.append(str(err)) if rp_errors: for msg in rp_errors: test.status.set(STATES.RESULTS_ERROR, msg) test.set_run_complete() return 1 results = test.gather_results(run_result) except result_parsers.ResultParserError as err: self.logger.error("Unexpected error gathering results: %s", err) test.status.set(STATES.RESULTS_ERROR, "Error parsing results: {}".format(err)) return 1 try: test.save_results(results) result_logger = logging.getLogger('results') result_logger.info(output.json_dumps(results)) except Exception: test.status.set( STATES.RESULTS_ERROR, "Unknown error while saving results. Refer to the kickoff log.") raise try: test.status.set(STATES.COMPLETE, "The test completed with result: {}" .format(results.get('result', '<unknown>'))) except Exception: test.status.set( STATES.UNKNOWN, "Unknown error while setting test completion. Refer to the " "kickoff log.") raise
def run(self, pav_cfg, args, out_file=sys.stdout, err_file=sys.stderr): """Resolve the test configurations into individual tests and assign to schedulers. Have those schedulers kick off jobs to run the individual tests themselves. :param pav_cfg: The pavilion configuration. :param args: The parsed command line argument object. :param out_file: The file object to output to (stdout) :param err_file: The file object to output errors to (stderr) """ # 1. Resolve the test configs # - Get sched vars from scheduler. # - Compile variables. # overrides = {} for ovr in args.overrides: if '=' not in ovr: fprint( "Invalid override value. Must be in the form: " "<key>=<value>. Ex. -c run.modules=['gcc'] ", file=self.errfile) return errno.EINVAL key, value = ovr.split('=', 1) overrides[key] = value sys_vars = system_variables.get_vars(True) try: configs_by_sched = self._get_tests( pav_cfg=pav_cfg, host=args.host, test_files=args.files, tests=args.tests, modes=args.modes, overrides=overrides, sys_vars=sys_vars, ) tests_by_sched = self._configs_to_tests( pav_cfg=pav_cfg, sys_vars=sys_vars, configs_by_sched=configs_by_sched, ) except commands.CommandError as err: fprint(err, file=self.errfile) return errno.EINVAL all_tests = sum(tests_by_sched.values(), []) if not all_tests: fprint("You must specify at least one test.", file=self.errfile) return errno.EINVAL series = TestSeries(pav_cfg, all_tests) rp_errors = [] for test in all_tests: # Make sure the result parsers have reasonable arguments. try: result_parsers.check_args(test.config['results']) except PavTestError as err: rp_errors.append(str(err)) if rp_errors: fprint("Result Parser configurations had errors:", file=self.errfile, color=utils.RED) for msg in rp_errors: fprint(msg, bullet=' - ', file=self.errfile) return errno.EINVAL # Building any tests that specify that they should be built before for test in all_tests: if test.config['build']['on_nodes'] not in ['true', 'True']: if not test.build(): for oth_test in all_tests: if oth_test.build_hash != test.build_hash: oth_test.status.set( STATES.BUILD_ERROR, "Build cancelled because build {} failed.". format(test.id)) fprint("Error building test: ", file=self.errfile, color=utils.RED) fprint("status {status.state} - {status.note}".format( status=test.status.current()), file=self.errfile) fprint( "For more information, run 'pav log build {}'".format( test.id), file=self.errfile) return errno.EINVAL for sched_name, tests in tests_by_sched.items(): sched = schedulers.get_scheduler_plugin(sched_name) try: sched.schedule_tests(pav_cfg, tests) except schedulers.SchedulerPluginError as err: fprint('Error scheduling tests:', file=self.errfile, color=utils.RED) fprint(err, bullet=' ', file=self.errfile) fprint('Cancelling already kicked off tests.', file=self.errfile) self._cancel_all(tests_by_sched) # Tests should all be scheduled now, and have the SCHEDULED state # (at some point, at least). Wait until something isn't scheduled # anymore (either running or dead), or our timeout expires. wait_result = None if args.wait is not None: end_time = time.time() + args.wait while time.time() < end_time and wait_result is None: last_time = time.time() for sched_name, tests in tests_by_sched.items(): sched = schedulers.get_scheduler_plugin(sched_name) for test in tests: status = test.status.current() if status == STATES.SCHEDULED: status = sched.job_status(pav_cfg, test) if status != STATES.SCHEDULED: # The test has moved past the scheduled state. wait_result = None break break if wait_result is None: # Sleep at most SLEEP INTERVAL seconds, minus the time # we spent checking our jobs. time.sleep(self.SLEEP_INTERVAL - (time.time() - last_time)) fprint("{} test{} started as test series {}.".format( len(all_tests), 's' if len(all_tests) > 1 else '', series.id), file=self.outfile, color=utils.GREEN) if args.status: tests = list(series.tests.keys()) tests, _ = test_obj_from_id(pav_cfg, tests) return print_from_test_obj(pav_cfg, tests, self.outfile, args.json) return 0
def test_status_command(self): """Test status command by generating a suite of tests.""" config1 = file_format.TestConfigLoader().validate({ 'scheduler': 'raw', 'run': { 'env': { 'foo': 'bar', }, 'cmds': ['echo "I $foo, punks"'], }, }) config1['name'] = 'run_test0' config2 = file_format.TestConfigLoader().validate({ 'scheduler': 'raw', 'run': { 'env': { 'too': 'tar', }, 'cmds': ['echo "I $too, punks"'], }, }) config2['name'] = 'run_test1' config3 = file_format.TestConfigLoader().validate({ 'scheduler': 'raw', 'run': { 'env': { 'too': 'tar', }, 'cmds': ['sleep 10'], }, }) config3['name'] = 'run_test2' configs = [config1, config2, config3] sys_vars = system_variables.get_vars(False) tests = [PavTest(self.pav_cfg, test, sys_vars) for test in configs] for test in tests: test.RUN_SILENT_TIMEOUT = 1 # Make sure this doesn't explode suite = TestSeries(self.pav_cfg, tests) test_str = " ".join([str(test) for test in suite.tests]) status_cmd = commands.get_command('status') status_cmd.outfile = io.StringIO() # Testing for individual tests with json output for test in suite.tests: parser = argparse.ArgumentParser() status_cmd._setup_arguments(parser) arg_list = ['-j', str(test)] args = parser.parse_args(arg_list) self.assertEqual(status_cmd.run(self.pav_cfg, args), 0) # Testing for multiple tests with json output parser = argparse.ArgumentParser() status_cmd._setup_arguments(parser) arg_list = ['-j'] + test_str.split() args = parser.parse_args(arg_list) self.assertEqual(status_cmd.run(self.pav_cfg, args), 0) # Testing for individual tests with tabular output for test in suite.tests: parser = argparse.ArgumentParser() status_cmd._setup_arguments(parser) args = parser.parse_args([str(test)]) self.assertEqual(status_cmd.run(self.pav_cfg, args), 0) # Testing for multiple tests with tabular output parser = argparse.ArgumentParser() status_cmd._setup_arguments(parser) arg_list = test_str.split() args = parser.parse_args(arg_list) self.assertEqual(status_cmd.run(self.pav_cfg, args), 0)
def test_system_plugins(self): """Make sure system values appear as expected. Also that deferred variables behave as expected.""" # Get an empty pavilion config and set some config dirs on it. plugins.initialize_plugins(self.pav_cfg) self.assertFalse(system_variables._LOADED_PLUGINS is None) host_arch = subprocess.check_output(['uname', '-i']) host_arch = host_arch.strip().decode('UTF-8') host_name = subprocess.check_output(['hostname', '-s']) host_name = host_name.strip().decode('UTF-8') with open('/etc/os-release', 'r') as release: rlines = release.readlines() host_os = {} for line in rlines: if line[:3] == 'ID=': host_os['name'] = line[3:].strip().strip('"') elif line[:11] == 'VERSION_ID=': host_os['version'] = line[11:].strip().strip('"') sys_vars = system_variables.get_vars(defer=False) self.assertFalse('sys_arch' in sys_vars) self.assertEqual(host_arch, sys_vars['sys_arch']) self.assertTrue('sys_arch' in sys_vars) self.assertFalse('sys_host' in sys_vars) self.assertEqual(host_name, sys_vars['sys_host']) self.assertTrue('sys_host' in sys_vars) self.assertFalse('sys_os' in sys_vars) self.assertEqual(host_os['name'], sys_vars['sys_os']['name']) self.assertEqual(host_os['version'], sys_vars['sys_os']['version']) self.assertTrue('sys_os' in sys_vars) self.assertFalse('host_arch' in sys_vars) self.assertEqual(host_arch, sys_vars['host_arch']) self.assertTrue('host_arch' in sys_vars) self.assertFalse('host_name' in sys_vars) self.assertEqual(host_name, sys_vars['host_name']) self.assertTrue('host_name' in sys_vars) self.assertFalse('host_os' in sys_vars) self.assertEqual(host_os['name'], sys_vars['host_os']['name']) self.assertEqual(host_os['version'], sys_vars['host_os']['version']) self.assertTrue('host_os' in sys_vars) # Re-initialize the plugin system. plugins._reset_plugins() # Make sure these have been wiped self.assertIsNone(system_variables._LOADED_PLUGINS) # Make sure these have been wiped. self.assertIsNone(system_variables._SYS_VAR_DICT) plugins.initialize_plugins(self.pav_cfg) # but these are back self.assertIsNotNone(system_variables._LOADED_PLUGINS) sys_vars = system_variables.get_vars(defer=True) # Check that the deferred values are actually deferred. self.assertFalse('host_arch' in sys_vars) self.assertTrue(isinstance(sys_vars['host_arch'], variables.DeferredVariable)) self.assertFalse('host_name' in sys_vars) self.assertTrue(isinstance(sys_vars['host_name'], variables.DeferredVariable)) self.assertFalse('host_os' in sys_vars) self.assertTrue(isinstance(sys_vars['host_os'], variables.DeferredVariable)) plugins._reset_plugins()
def make_test_run_filter(complete: bool = False, failed: bool = False, incomplete: bool = False, name: str = None, newer_than: float = None, older_than: float = None, passed: bool = False, result_error: bool = False, show_skipped: bool = False, sys_name: str = None, user: str = None): """Generate a filter function for use by dir_db.select and similar functions. This operates on TestAttribute objects, so make sure to pass the TestAttribute class as the transform to dir_db functions. :param complete: Only accept complete tests :param failed: Only accept failed tests :param incomplete: Only accept incomplete tests :param name: Only accept names that match this glob. :param newer_than: Only accept tests that are more recent than this date. :param older_than: Only accept tests older than this date. :param passed: Only accept passed tests :param result_error: Only accept tests with a result error. :param show_skipped: Accept skipped tests. :param sys_name: Only accept tests with a matching sys_name. :param user: Only accept tests started by this user. :return: """ if sys_name == LOCAL_SYS_NAME: sys_vars = system_variables.get_vars(defer=True) sys_name = sys_vars['sys_name'] # select once so we only make one filter. def filter_test_run(test_attrs: dict) -> bool: """Determine whether the test run at the given path should be included in the set.""" if show_skipped == 'no' and test_attrs.get('skipped'): return False elif show_skipped == 'only' and not test_attrs.get('skipped'): return False if complete and not test_attrs.get('complete'): return False if incomplete and test_attrs.get('complete'): return False if user and test_attrs.get('user') != user: return False if sys_name and sys_name != test_attrs.get('sys_name'): return False if passed and test_attrs.get('result') != TestRun.PASS: return False if failed and test_attrs.get('result') != TestRun.FAIL: return False if result_error and test_attrs.get('result') != TestRun.ERROR: return False if older_than is not None and test_attrs.get('created') > older_than: return False if newer_than is not None and test_attrs.get('created') < newer_than: return False if name and not fnmatch.fnmatch(test_attrs.get('name'), name): return False return True return filter_test_run
def test_set_status_command(self): """Test set status command by generating a suite of tests.""" config1 = file_format.TestConfigLoader().validate({ 'scheduler': 'raw', 'run': { 'env': { 'foo': 'bar', }, 'cmds': ['echo "I $foo, punks"'], }, }) config1['name'] = 'run_test0' config2 = file_format.TestConfigLoader().validate({ 'scheduler': 'raw', 'run': { 'env': { 'too': 'tar', }, 'cmds': ['echo "I $too, punks"'], }, }) config2['name'] = 'run_test1' config3 = file_format.TestConfigLoader().validate({ 'scheduler': 'raw', 'run': { 'env': { 'too': 'tar', }, 'cmds': ['sleep 10'], }, }) config3['name'] = 'run_test2' configs = [config1, config2, config3] sys_vars = system_variables.get_vars(False) tests = [PavTest(self.pav_cfg, test, sys_vars) for test in configs] for test in tests: test.RUN_SILENT_TIMEOUT = 1 set_status_cmd = commands.get_command('set_status') set_status_cmd.outfile = io.StringIO() # Testing for individual tests with json output for test in tests: start_status = test.status.current() parser = argparse.ArgumentParser() set_status_cmd._setup_arguments(parser) arg_list = [ '-s', 'RUN_USER', '-n', 'tacos are delicious', str(test.id) ] args = parser.parse_args(arg_list) self.assertEqual(set_status_cmd.run(self.pav_cfg, args), 0) end_status = test.status.current() self.assertNotEqual(end_status.state, start_status.state) self.assertNotEqual(end_status.note, start_status.note) self.assertEqual(end_status.state, 'RUN_USER') self.assertEqual(end_status.note, 'tacos are delicious')
def run(self, pav_cfg, args, out_file=sys.stdout, err_file=sys.stderr): """Load and run an already prepped test in the current environment. :param out_file: :param err_file: """ try: test = PavTest.load(pav_cfg, args.test_id) except PavTestError as err: self.logger.error("Error loading test '%s': %s", args.test_id, err) raise try: if test.config['build']['on_nodes'] in ['true', 'True']: if not test.build(): self.logger.warning("Test {t.id} failed to build:") except Exception: test.status.set(STATES.BUILD_ERROR, "Unknown build error. Refer to the kickoff log.") raise try: sched = schedulers.get_scheduler_plugin(test.scheduler) except Exception: test.status.set( STATES.BUILD_ERROR, "Unknown error getting the scheduler. Refer to " "the kickoff log.") raise # Optionally wait on other tests running under the same scheduler. # This depends on the scheduler and the test configuration. lock = sched.lock_concurrency(pav_cfg, test) try: run_result = test.run(sched.get_vars(test), system_variables.get_vars(defer=False)) except PavTestError as err: test.status.set(STATES.RUN_ERROR, err) test.set_run_complete() return 1 except Exception: test.status.set( STATES.RUN_ERROR, "Unknown error while running test. Refer to the kickoff log.") raise finally: sched.unlock_concurrency(lock) # The test.run() method should have already logged the error and # set an appropriate status. if run_result in (STATES.RUN_ERROR, STATES.RUN_TIMEOUT): return 1 try: rp_errors = [] # Make sure the result parsers have reasonable arguments. # We check here because the parser code itself will likely assume # the args are valid form _check_args, but those might not be # checkable before kickoff due to deferred variables. try: result_parsers.check_args(test.config['results']) except PavTestError as err: rp_errors.append(str(err)) if rp_errors: for msg in rp_errors: test.status.set(STATES.RESULTS_ERROR, msg) test.set_run_complete() return 1 results = test.gather_results(run_result) except result_parsers.ResultParserError as err: self.logger.error("Unexpected error gathering results: %s", err) test.status.set(STATES.RESULTS_ERROR, "Error parsing results: {}".format(err)) test.set_run_complete() return 1 try: test.save_results(results) result_logger = logging.getLogger('results') result_logger.info(utils.json_dumps(results)) except Exception: test.status.set( STATES.RESULTS_ERROR, "Unknown error while saving results. Refer to the kickoff log." ) raise try: test.status.set( STATES.COMPLETE, "The test completed with result: {}".format( results.get('result', '<unknown>'))) test.set_run_complete() except Exception: test.status.set( STATES.UNKNOWN, "Unknown error while setting test completion. Refer to the " "kickoff log.") raise