def test_clean_wait(self): """Test clean command after waiting for tests to finish.""" arg_parser = arguments.get_parser() args = arg_parser.parse_args([ 'run', '-H', 'this', 'clean_test' ]) run_cmd = commands.get_command(args.command_name) run_cmd.outfile = StringIO() run_cmd.run(self.pav_cfg, args) time.sleep(5) args = arg_parser.parse_args([ 'clean' ]) clean_cmd = commands.get_command(args.command_name) clean_cmd.outfile = StringIO() clean_cmd.errfile = StringIO() self.assertEqual(clean_cmd.run(self.pav_cfg, args), 0)
def test_local_builds_only(self): """Make sure we can just build multiple simultanious builds on both the front-end and the nodes.""" arg_parser = arguments.get_parser() args = arg_parser.parse_args( ['build', '-H', 'this', '--local-builds-only', 'build_parallel']) build_cmd = commands.get_command(args.command_name) # type: RunCommand build_ret = build_cmd.run(self.pav_cfg, args) build_cmd.outfile.seek(0) self.assertEqual(build_ret, 0, msg=build_cmd.outfile.read()) for test in build_cmd.last_tests: test.wait(timeout=10) # Make sure we actually built separate builds builds = [test.builder for test in build_cmd.last_tests] build_names = set([b.name for b in builds]) self.assertEqual(len(build_names), 2) for test in build_cmd.last_tests: self.assertEqual(test.status.current().state, STATES.BUILD_DONE, msg='Test {} status: {}'.format( test.id, test.status.current()))
def test_multi_build(self): """Make sure we can build multiple simultanious builds on both the front-end and the nodes.""" arg_parser = arguments.get_parser() args = arg_parser.parse_args([ 'run', '-H', 'this', 'build_parallel' ]) run_cmd = commands.get_command(args.command_name) # type: RunCommand run_ret = run_cmd.run(self.pav_cfg, args) run_cmd.outfile.seek(0) self.assertEqual(run_ret, 0, msg=run_cmd.outfile.read()) for test in run_cmd.last_tests: test.wait(timeout=4) # Make sure we actually built separate builds builds = [test.builder for test in run_cmd.last_tests] build_names = set([b.name for b in builds]) self.assertEqual(len(build_names), 4) for test in run_cmd.last_tests: self.assertEqual(test.results['result'], 'PASS', msg='Test {} status: {}' .format(test.id, test.status.current()))
def test_multi_build_fail(self): """Make sure we can build multiple simultanious builds on both the front-end and the nodes.""" arg_parser = arguments.get_parser() args = arg_parser.parse_args([ 'run', '-H', 'this', 'build_parallel_fail' ]) run_cmd = commands.get_command(args.command_name) # type: RunCommand self.assertEqual(run_cmd.run(self.pav_cfg, args), 22) # Make sure we actually built separate builds builds = [test.builder for test in run_cmd.last_tests] build_names = set([b.name for b in builds]) self.assertEqual(len(build_names), 4) statuses = [test.status.current().state for test in run_cmd.last_tests] statuses = set(statuses) self.assertEqual(statuses, {STATES.ABORTED, STATES.BUILD_FAILED}) self.assertTrue(all([test.check_run_complete() for test in run_cmd.last_tests]))
def test_run_timeouts(self): """Make sure run timeout file works as expected.""" run_cmd = commands.get_command('run') run_cmd.silence() arg_parser = arguments.get_parser() # All test follow the same pattern seen above, but we can run them all # at once, since a run timeout doesn't effect the others. args = arg_parser.parse_args(['run', 'timeout_run_tests']) self.assertEqual(run_cmd.run(self.pav_cfg, args), 0) time.sleep(35) correct_statuses = { 'timeout_run_tests.GoodRun': 'COMPLETE', 'timeout_run_tests.GoodRun2': 'COMPLETE', 'timeout_run_tests.GoodRun3': 'COMPLETE', 'timeout_run_tests.BadRun': 'RUN_TIMEOUT', 'timeout_run_tests.BadRun2': 'RUN_TIMEOUT', 'timeout_run_tests.BadRun3': 'RUN_TIMEOUT' } status_args = arg_parser.parse_args(['status']) statuses = get_statuses(self.pav_cfg, status_args.tests, io.StringIO()) for test_status in statuses: self.assertEqual(correct_statuses[test_status['name']], test_status['state'])
def test_build_parallel_lots(self): """Make sure building works beyond the parallel building limit.""" arg_parser = arguments.get_parser() args = arg_parser.parse_args( ['run', '-H', 'this', 'build_parallel_lots']) run_cmd = commands.get_command(args.command_name) # type: RunCommand run_ret = run_cmd.run(self.pav_cfg, args) run_cmd.outfile.seek(0) self.assertEqual(run_ret, 0, msg=run_cmd.outfile.read()) for test in run_cmd.last_tests: test.wait(timeout=5) # Make sure we actually built separate builds builds = [test.builder for test in run_cmd.last_tests] build_names = set([b.name for b in builds]) self.assertEqual(len(build_names), 8) for test in run_cmd.last_tests: self.assertEqual(test.results['result'], 'PASS', msg='Test {} status: {}'.format( test.id, test.status.current()))
def test_cancel_sched_check(self): """Cancel Test and make sure it is cancelled through scheduler.""" arg_parser = arguments.get_parser() args = arg_parser.parse_args(['run', '-H', 'this' 'hello_world2']) run_cmd = commands.get_command(args.command_name) run_cmd.outfile = StringIO() run_cmd.run(self.pav_cfg, args) args = arg_parser.parse_args(['cancel']) cancel_cmd = commands.get_command(args.command_name) cancel_cmd.outfile = StringIO() cancel_cmd.errfile = StringIO() test = [] series_id = series.TestSeries.load_user_series_id(self.pav_cfg) test.append(series_id) test_list = [] test_list.extend( series.TestSeries.from_id(self.pav_cfg, int(test[0][1:])).tests) for test_id in test_list: test = PavTest.load(self.pav_cfg, test_id) if test.status.current().state != STATES.COMPLETE: sched = schedulers.get_scheduler_plugin(test.scheduler) sched_status = sched.job_status(self.pav_cfg, test) self.assertIn("SCHED_CANCELLED", str(sched_status))
def test_cancel_status_json(self): """Test cancel command with status flag and json flag.""" arg_parser = arguments.get_parser() args = arg_parser.parse_args( ['run', '-H', 'this', 'hello_world.world']) run_cmd = commands.get_command(args.command_name) run_cmd.outfile = StringIO() run_cmd.run(self.pav_cfg, args) args = arg_parser.parse_args(['cancel', '-s', '-j']) cancel_cmd = commands.get_command(args.command_name) cancel_cmd.outfile = StringIO() cancel_cmd.errfile = StringIO() self.assertEqual(cancel_cmd.run(self.pav_cfg, args), 0) results = cancel_cmd.outfile.getvalue().split('\n')[-1].strip().encode( 'UTF-8') results = results[4:].decode('UTF-8') results = json.loads(results) self.assertNotEqual(len(results), 0)
def test_cancel_series_test(self): """Test cancel command with combination of series and tests.""" arg_parser = arguments.get_parser() args = arg_parser.parse_args( ['run', '-H', 'this', 'hello_world.hello', 'hello_world.world']) run_cmd = commands.get_command(args.command_name) run_cmd.outfile = StringIO() run_cmd.run(self.pav_cfg, args) tests = [] series_id = series.TestSeries.load_user_series_id(self.pav_cfg) tests.append(series_id) tests.extend( series.TestSeries.from_id(self.pav_cfg, int(series_id[1:])).tests) args = arg_parser.parse_args( ['cancel', tests[0], str(tests[1]), str(tests[2])]) cancel_cmd = commands.get_command(args.command_name) cancel_cmd.outfile = StringIO() cancel_cmd.errfile = StringIO() self.assertEqual(cancel_cmd.run(self.pav_cfg, args), 0)
def test_cancel_series_test(self): """Test cancel command with combination of series and tests.""" arg_parser = arguments.get_parser() args = arg_parser.parse_args( ['run', '-H', 'this', 'cancel_test.test1', 'cancel_test.test2']) run_cmd = commands.get_command(args.command_name) run_cmd.outfile = StringIO() run_cmd.run(self.pav_cfg, args) tests = [] series_id = series.TestSeries.load_user_series_id(self.pav_cfg) tests.append(series_id) args = arg_parser.parse_args([ 'cancel', tests[0], ]) cancel_cmd = commands.get_command(args.command_name) cancel_cmd.outfile = cancel_cmd.errfile = StringIO() self.assertEqual(cancel_cmd.run(self.pav_cfg, args), 0)
def test_clean_with_older_than_flag(self): """Test clean command with multiple date formats.""" arg_parser = arguments.get_parser() args = arg_parser.parse_args(['run', '-H', 'this', 'clean_test']) run_cmd = commands.get_command(args.command_name) run_cmd.outfile = StringIO() run_cmd.run(self.pav_cfg, args) args = arg_parser.parse_args(['clean', '--older-than', '5 weeks']) clean_cmd = commands.get_command(args.command_name) clean_cmd.outfile = StringIO() clean_cmd.errfile = StringIO() self.assertEqual(clean_cmd.run(self.pav_cfg, args), 0) args = arg_parser.parse_args(['run', '-H', 'this', 'clean_test']) run_cmd = commands.get_command(args.command_name) run_cmd.outfile = StringIO() run_cmd.run(self.pav_cfg, args) args = arg_parser.parse_args(['clean', '--older-than', 'Jul 3 2019']) clean_cmd = commands.get_command(args.command_name) clean_cmd.outfile = StringIO() clean_cmd.errfile = StringIO() self.assertEqual(clean_cmd.run(self.pav_cfg, args), 0)
def test_rebuilds(self): """Make sure rebuilding works as expected.""" arg_parser = arguments.get_parser() args = arg_parser.parse_args([ 'build', '-H', 'this', 'build_rebuild', '--rebuild', ]) build_cmd = commands.get_command(args.command_name) # type: RunCommand self.assertEqual(build_cmd.run(self.pav_cfg, args), 0) for test in build_cmd.last_tests: test.wait(timeout=3) # Make sure we actually built separate builds builds = [test.builder for test in build_cmd.last_tests] build_names = set([b.name for b in builds]) self.assertEqual(len(build_names), 4) result_matrix = { 'local1': [STATES.BUILD_DONE, STATES.BUILD_REUSED], 'local1a': [STATES.BUILD_REUSED, STATES.BUILD_DONE], 'nodes1': [STATES.BUILD_REUSED, STATES.BUILD_DONE], 'nodes1a': [STATES.BUILD_REUSED, STATES.BUILD_DONE], 'local2': [STATES.BUILD_DONE], 'nodes3': [STATES.BUILD_DONE], } orig_names = {} for test in build_cmd.last_tests: tname = test.name.split('.')[1] self.assertIn(test.status.current().state, result_matrix[tname], msg='Test {} status: {}'.format( test.name, test.status.current())) orig_names[test.name] = test.builder.name self.assertEqual(build_cmd.run(self.pav_cfg, args), 0) for test in build_cmd.last_tests: test.wait(timeout=3) # Make sure we actually built separate builds builds = [test.builder for test in build_cmd.last_tests] build_names = set([b.name for b in builds]) self.assertEqual(len(build_names), 4) for test in build_cmd.last_tests: expected_name = orig_names[test.name] + '-2' self.assertEqual(test._load_build_name(), expected_name, msg=test.name) origin = test.build_origin_path.resolve().name self.assertEqual(origin, expected_name, msg=test.name)
def test_no_sched(self): """Check that we get a reasonable error for a non-available scheduler.""" arg_parser = arguments.get_parser() args = arg_parser.parse_args(['run', 'not_available']) run_cmd = commands.get_command(args.command_name) self.assertNotEqual(run_cmd.run(self.pav_cfg, args), 0)
def test_series_circle(self): """Test if it can detect circular references and that ordered: True works as intended.""" series_cmd = commands.get_command('series') arg_parser = arguments.get_parser() series_args = arg_parser.parse_args(['series', 'series_circle1']) self.assertRaises(pavilion.series_util.TestSeriesError, lambda: series_cmd.run(self.pav_cfg, series_args))
def test_run(self): arg_parser = arguments.get_parser() args = arg_parser.parse_args( ['run', '-H', 'this', 'hello_world.world', 'hello_world.narf']) run_cmd = commands.get_command(args.command_name) self.assertEqual(run_cmd.run(self.pav_cfg, args), 0)
def __init__(self, *args, **kwargs): """Setup the pav_cfg object, and do other initialization required by pavilion.""" # Open the default pav config file (found in # test/data/pav_config_dir/pavilion.yaml), modify it, and then # save the modified file to a temp location and read it instead. with self.PAV_CONFIG_PATH.open() as cfg_file: raw_pav_cfg = config.PavilionConfigLoader().load(cfg_file) raw_pav_cfg.config_dirs = [self.TEST_DATA_ROOT/'pav_config_dir', self.PAV_LIB_DIR] raw_pav_cfg.working_dir = self.PAV_ROOT_DIR/'test'/'working_dir' raw_pav_cfg.user_config = False raw_pav_cfg.result_log = raw_pav_cfg.working_dir/'results.log' if not raw_pav_cfg.working_dir.exists(): raw_pav_cfg.working_dir.mkdir() cfg_dir = raw_pav_cfg.working_dir/'pav_cfgs' if not cfg_dir.exists(): cfg_dir.mkdir() cfg_path = Path(tempfile.mktemp( suffix='.yaml', dir=str(cfg_dir))) with cfg_path.open('w') as pav_cfg_file: config.PavilionConfigLoader().dump(pav_cfg_file, raw_pav_cfg) with cfg_path.open() as cfg_file: self.pav_cfg = config.PavilionConfigLoader().load(cfg_file) self.pav_cfg.pav_cfg_file = cfg_path self.pav_cfg.pav_vars = pavilion_variables.PavVars() if not self.pav_cfg.working_dir.exists(): self.pav_cfg.working_dir.mkdir(parents=True) # Create the basic directories in the working directory for path in self.WORKING_DIRS: path = self.pav_cfg.working_dir/path if not path.exists(): path.mkdir() self.tmp_dir = tempfile.TemporaryDirectory() # We have to get this to set up the base argument parser before # plugins can add to it. _ = arguments.get_parser() super().__init__(*args, **kwargs)
def test_cancel_invalid_test(self): """Test cancel command with invalid test.""" arg_parser = arguments.get_parser() args = arg_parser.parse_args(['cancel', '{}'.format(sys.maxsize)]) cancel_cmd = commands.get_command(args.command_name) cancel_cmd.outfile = cancel_cmd.errfile = StringIO() self.assertEqual(cancel_cmd.run(self.pav_cfg, args), errno.EINVAL)
def test_version_incompatibility(self): """Make sure incompatible versions exit gracefully when attempting to run.""" arg_parser = arguments.get_parser() args = arg_parser.parse_args(['run', 'version_incompatible']) run_cmd = commands.get_command(args.command_name) run_cmd.outfile = io.StringIO() run_cmd.errfile = run_cmd.outfile self.assertEqual(run_cmd.run(self.pav_cfg, args), 22)
def test_run_status(self): """Tests run command with status flag.""" arg_parser = arguments.get_parser() args = arg_parser.parse_args([ 'run', '-s', 'hello_world', ]) run_cmd = commands.get_command(args.command_name) self.assertEqual(run_cmd.run(self.pav_cfg, args), 0)
def test_run_status(self): '''Tests run command with status flag.''' arg_parser = arguments.get_parser() args = arg_parser.parse_args(['run', '-s', 'hello_world']) run_cmd = commands.get_command(args.command_name) run_cmd.outfile = io.StringIO() run_cmd.outfile = io.StringIO() self.assertEqual(run_cmd.run(self.pav_cfg, args), 0)
def test_show_cmds(self): plugins.initialize_plugins(self.pav_cfg) arg_lists = [ ('show', 'config'), ('show', 'config', '--template'), ('show', 'functions'), ('show', 'functions', '--detail', 'int'), ('show', 'hosts'), ('show', 'hosts', '--verbose'), ('show', 'hosts', '--vars', 'this'), ('show', 'hosts', '--config', 'this'), ('show', 'modes'), ('show', 'modes', '--verbose'), ('show', 'modes', '--vars', 'defaulted'), ('show', 'modes', '--config', 'defaulted'), ('show', 'module_wrappers'), ('show', 'module_wrappers', '--verbose'), ('show', 'pav_vars'), ('show', 'result_parsers'), ('show', 'result_parsers', '--doc=regex'), ('show', 'result_parsers', '--verbose'), ('show', 'sched'), ('show', 'sched', '--config=slurm'), ('show', 'sched', '--vars=slurm'), ('show', 'states'), ('show', 'suites'), ('show', 'suites', '--err'), ('show', 'suites', '--supersedes'), ('show', 'suites', '--verbose'), ('show', 'system_variables'), ('show', 'system_variables', '--verbose'), ('show', 'test_config'), ('show', 'tests'), ('show', 'tests', '--err'), ('show', 'tests', '--doc', 'hello_world.narf'), ('show', 'tests', '--hidden'), ('show', 'tests', '--verbose'), ] parser = arguments.get_parser() show_cmd = commands.get_command('show') show_cmd.silence() for arg_list in arg_lists: args = parser.parse_args(arg_list) show_cmd.run(self.pav_cfg, args) plugins._reset_plugins()
def test_clean(self): """Test clean command with no arguments.""" arg_parser = arguments.get_parser() args = arg_parser.parse_args(['run', '-H', 'this', 'clean_test']) run_cmd = commands.get_command(args.command_name) run_cmd.silence() run_cmd.run(self.pav_cfg, args) args = arg_parser.parse_args(['clean']) clean_cmd = commands.get_command(args.command_name) clean_cmd.silence() self.assertEqual(clean_cmd.run(self.pav_cfg, args), 0)
def test_cat(self): """Checking cat command functionality""" test = self._quick_test() cat_cmd = commands.get_command('cat') cat_cmd.outfile = io.StringIO() cat_cmd.errfile = io.StringIO() arg_parser = arguments.get_parser() arg_sets = (['cat', str(test.id), 'build.sh'], ) for arg_set in arg_sets: args = arg_parser.parse_args(arg_set) cat_cmd.run(self.pav_cfg, args) output.fprint(cat_cmd.outfile) output.fprint(cat_cmd.errfile)
def __init__(self, *args, **kwargs): with self.PAV_CONFIG_PATH.open() as cfg_file: raw_pav_cfg = config.PavilionConfigLoader().load(cfg_file) raw_pav_cfg.config_dirs = [ self.TEST_DATA_ROOT / 'pav_config_dir', self.PAV_LIB_DIR ] raw_pav_cfg.working_dir = Path('/tmp') / get_login() / 'pav_tests' raw_pav_cfg.result_log = raw_pav_cfg.working_dir / 'results.log' if not raw_pav_cfg.working_dir.exists(): raw_pav_cfg.working_dir.mkdir() cfg_dir = raw_pav_cfg.working_dir / 'pav_cfgs' if not cfg_dir.exists(): cfg_dir.mkdir() cfg_path = Path(tempfile.mktemp(suffix='.yaml', dir=str(cfg_dir))) with cfg_path.open('w') as pav_cfg_file: config.PavilionConfigLoader().dump(pav_cfg_file, raw_pav_cfg) with cfg_path.open() as cfg_file: self.pav_cfg = config.PavilionConfigLoader().load(cfg_file) self.pav_cfg.pav_cfg_file = cfg_path # Create the basic directories in the working directory for path in [ self.pav_cfg.working_dir, self.pav_cfg.working_dir / 'builds', self.pav_cfg.working_dir / 'tests', self.pav_cfg.working_dir / 'series', self.pav_cfg.working_dir / 'users', self.pav_cfg.working_dir / 'downloads' ]: if not path.exists(): os.makedirs(str(path), exist_ok=True) self.tmp_dir = tempfile.TemporaryDirectory() # We have to get this to set up the base argument parser before # plugins can add to it. _ = arguments.get_parser() super().__init__(*args, **kwargs)
def test_pruning(self): """Check that we only prune what we expect to, and that the result log remains valid.""" tmp_path = Path(tempfile.mktemp()) shutil.copy(self.pav_cfg.result_log.as_posix(), tmp_path.as_posix()) tests = [self._quick_test() for i in range(20)] for test in tests: results = test.gather_results(test.run()) test.save_results(results) prune = [str(test.id) for test in tests if test.id % 3 == 0] prune.extend([test.uuid for test in tests if test.id % 4 == 0]) maint_cmd = commands.get_command('maint') maint_cmd.silence() parser = arguments.get_parser() args = parser.parse_args(['maint', 'prune_results', '--json'] + prune) maint_cmd.run(self.pav_cfg, args) out, err = maint_cmd.clear_output() self.assertEqual(err, '') pruned = json.loads(out) for presult in pruned: self.assertTrue(str(presult['id']) in prune or presult['uuid'] in prune) pruned_ids = [str(pr['id']) for pr in pruned] pruned_uuids = [pr['uuid'] for pr in pruned] for prune_id in prune: self.assertTrue( prune_id in pruned_ids or prune_id in pruned_uuids, msg="Missing expected prune_id {} in {} or {}" .format(prune_id, pruned_ids, pruned_uuids)) # Prune id multiples of 5 + 1 prune2 = [str(test.id) for test in tests] args2 = parser.parse_args(['maint', 'prune_results'] + prune2) maint_cmd.run(self.pav_cfg, args2) out, err = maint_cmd.clear_output() self.assertEqual(err, '') self._cmp_files(tmp_path, self.pav_cfg.result_log)
def test_build_verbosity(self): """Make sure that the build verbosity levels at least appear to work.""" arg_parser = arguments.get_parser() arg_sets = [ ['build', '-H', 'this', '-l', '-b', 'build_parallel'], ['build', '-H', 'this', '-l', '-b', '-b', 'build_parallel'], ] build_cmd = commands.get_command('build') # type: BuildCmd for arg_set in arg_sets: args = arg_parser.parse_args(arg_set) build_ret = build_cmd.run(self.pav_cfg, args) build_cmd.outfile.seek(0) self.assertEqual(build_ret, 0, msg=build_cmd.outfile.read())
def test_show_cmds(self): plugins.initialize_plugins(self.pav_cfg) arg_lists = [ ('show', 'sched'), ('show', 'sched', '--config=slurm'), ('show', 'sched', '--vars=slurm'), ('show', 'result_parsers'), ('show', 'result_parsers', '--verbose'), ('show', 'result_parsers', '--config=regex'), ('show', 'states'), ('show', 'config'), ('show', 'config', '--template'), ('show', 'test_config'), ('show', 'module_wrappers'), ('show', 'module_wrappers', '--verbose'), ('show', 'system_variables'), ('show', 'system_variables', '--verbose'), ('show', 'pav_vars'), ('show', 'suites'), ('show', 'suites', '--verbose'), ('show', 'suites', '--err'), ('show', 'suites', '--supersedes'), ('show', 'tests'), ('show', 'tests', '--verbose'), ('show', 'tests', '--err'), ('show', 'tests', '--hidden'), ('show', 'hosts'), ('show', 'hosts', '--verbose'), ('show', 'modes'), ('show', 'modes', '--verbose'), ] parser = arguments.get_parser() show_cmd = commands.get_command('show') show_cmd.outfile = io.StringIO() show_cmd.errfile = io.StringIO() for arg_list in arg_lists: args = parser.parse_args(arg_list) show_cmd.run(self.pav_cfg, args) plugins._reset_plugins()
def test_cancel(self): """Test cancel command with no arguments.""" arg_parser = arguments.get_parser() args = arg_parser.parse_args(['run', '-H', 'this', 'cancel_test']) run_cmd = commands.get_command(args.command_name) run_cmd.outfile = run_cmd.errfile = StringIO() run_cmd.run(self.pav_cfg, args) args = arg_parser.parse_args(['cancel']) get_statuses(self.pav_cfg, args, StringIO()) cancel_cmd = commands.get_command(args.command_name) cancel_cmd.outfile = cancel_cmd.errfile = StringIO() self.assertEqual(cancel_cmd.run(self.pav_cfg, args), 0)
def test_clean_with_invalid_date(self): """Test clean command with invalid arguments.""" arg_parser = arguments.get_parser() args = arg_parser.parse_args(['run', '-H', 'this', 'clean_test']) run_cmd = commands.get_command(args.command_name) run_cmd.outfile = StringIO() run_cmd.run(self.pav_cfg, args) args = arg_parser.parse_args( ['clean', '--older-than', '5 foo invalid']) clean_cmd = commands.get_command(args.command_name) clean_cmd.outfile = StringIO() clean_cmd.errfile = StringIO() self.assertEqual(clean_cmd.run(self.pav_cfg, args), errno.EINVAL)
def test_ls(self): """Checking ls command functionality""" test = self._quick_test() ls_cmd = commands.get_command('ls') ls_cmd.outfile = io.StringIO() ls_cmd.errfile = io.StringIO() arg_parser = arguments.get_parser() arg_sets = ( ['ls', str(test.id)], ['ls', str(test.id), '--tree'], ['ls', str(test.id), '--subdir', 'build'], ) for arg_set in arg_sets: args = arg_parser.parse_args(arg_set) ls_cmd.run(self.pav_cfg, args)