def _result_parsers_cmd(self, _, args): """Show all the result parsers.""" if args.doc: try: res_plugin = parsers.get_plugin(args.doc) except pavilion.result.common.ResultError: output.fprint("Invalid result parser '{}'.".format(args.doc), color=output.RED) return errno.EINVAL output.fprint(res_plugin.doc(), file=self.outfile) else: rps = [] for rp_name in parsers.list_plugins(): res_plugin = parsers.get_plugin(rp_name) desc = " ".join(str(res_plugin.__doc__).split()) rps.append({ 'name': rp_name, 'description': desc, 'path': res_plugin.path }) fields = ['name', 'description'] if args.verbose: fields.append('path') output.draw_table(self.outfile, fields=fields, rows=rps, title="Available Result Parsers")
def _series_cmd(self, pav_cfg, args): series_dict = series_config.find_all_series(pav_cfg) rows = [] for series_name in sorted(list(series_dict.keys())): series = series_dict[series_name] if series['err']: series_name = output.ANSIString('{}.*'\ .format(series_name), output.RED) rows.append({ 'name': series_name, 'summary': 'Loading the series failed. ' 'For more info, run `pav show series --err`.', 'path': series['path'], 'err': series['err'] }) fields = ['name'] if args.verbose or args.err: fields.extend(['tests', 'path']) if args.err: fields.append('err') output.draw_table(self.outfile, field_info={}, fields=fields, rows=rows)
def print_status(statuses, outfile, json=False): """Prints the statuses provided in the statuses parameter. :param list statuses: list of dictionary objects containing the test ID, name, state, time of state update, and note associated with that state. :param bool json: Whether state should be printed as a JSON object or not. :param stream outfile: Stream to which the statuses should be printed. :return: success or failure. :rtype: int """ ret_val = 1 for stat in statuses: if stat['note'] != "Test not found.": ret_val = 0 if json: json_data = {'statuses': statuses} output.json_dump(json_data, outfile) else: fields = ['test_id', 'name', 'state', 'time', 'note'] output.draw_table( outfile=outfile, field_info={ 'time': {'transform': output.get_relative_timestamp} }, fields=fields, rows=statuses, title='Test statuses') return ret_val
def show_configs_table(self, pav_cfg, conf_type, errors=False, verbose=False): """Default config table, shows the config name and if it can be loaded.""" configs = resolver.TestConfigResolver(pav_cfg).find_all_configs( conf_type) data = [] col_names = ['name', 'summary'] if verbose: col_names.append('path') if errors: col_names.append('path') col_names.append('err') for name in configs: data.append({ 'name': name, 'summary': configs[name]['status'], 'path': configs[name]['path'], 'err': configs[name]['error'] }) output.draw_table(self.outfile, fields=col_names, rows=data)
def _modes_cmd(self, pav_cfg, args): """List all known mode files.""" modes = [] col_names = ['Name'] if args.verbose: col_names.append('Path') for conf_dir in pav_cfg.config_dirs: path = conf_dir / 'modes' if not (path.exists() and path.is_dir()): continue for file in os.listdir(path.as_posix()): file = path / file if file.suffix == '.yaml' and file.is_file(): mode_id = file.stem mode_path = file modes.append({ 'Name': mode_id, 'Path': mode_path }) output.draw_table( self.outfile, fields=col_names, rows=modes )
def _system_variables_cmd(self, _, args): rows = [] sys_vars = system_variables.get_vars(defer=True) for key in sorted(list(sys_vars.keys())): try: value = sys_vars[key] deferred = isinstance(value, DeferredVariable) help_str = sys_vars.help(key) except system_variables.SystemPluginError as err: value = output.ANSIString('error', code=output.RED) deferred = False help_str = output.ANSIString(str(err), code=output.RED) rows.append({ 'name': key, 'value': value if not deferred else '<deferred>', 'description': help_str, 'path': sys_vars.get_obj(key).path, }) fields = ['name', 'value', 'description'] if args.verbose: fields.append('path') output.draw_table(self.outfile, fields=fields, rows=rows, title="Available System Variables")
def print_status_history(pav_cfg: dict, test_id: str, outfile: TextIO, json: bool = False): """Print the status history for a given test object. :param pav_cfg: Base pavilion configuration. :param test_id: Single test ID. :param outfile: Stream to which the status history should be printed. :param json: Whether the output should be a JSON object or not :return: 0 for success. """ test = TestRun.load(pav_cfg, int(test_id)) status_history = status_history_from_test_obj(test) ret_val = 1 for status in status_history: if status['note'] != "Test not found.": ret_val = 0 if json: json_data = {'status_history': status_history} output.json_dump(json_data, outfile) else: fields = ['state', 'time', 'note'] output.draw_table( outfile=outfile, field_info={'time': { 'transform': output.get_relative_timestamp }}, fields=fields, rows=status_history, title='Test {} Status History ({})'.format(test.id, test.name)) return ret_val
def _config_dirs(self, pav_cfg, _): rows = [{'path': path} for path in pav_cfg.config_dirs] output.draw_table(self.outfile, field_info={}, fields=['path'], rows=rows, title="Config directories by priority.")
def _result_base_cmd(self, _, __): """Show base result keys.""" rows = [{ 'name': key, 'doc': doc } for key, (_, doc) in result.BASE_RESULTS.items()] output.draw_table(self.outfile, ['name', 'doc'], rows)
def _config_dirs_cmd(self, pav_cfg, _): """List the configuration directories.""" rows = [{'path': path} for path in pav_cfg.config_dirs] output.draw_table(self.outfile, fields=['path'], rows=rows, title="Config directories by priority.")
def _tests_cmd(self, pav_cfg, args): if args.test_name is not None: self._test_docs_subcmd(pav_cfg, args) return resolv = resolver.TestConfigResolver(pav_cfg) suites = resolv.find_all_tests() rows = [] for suite_name in sorted(list(suites.keys())): suite = suites[suite_name] if suite['err']: suite_name = output.ANSIString(suite_name, output.RED) rows.append({ 'name': '{}.*'.format(suite_name), 'summary': 'Loading the suite failed. ' 'For more info, run `pav show tests --err`.', 'path': suite['path'], 'err': suite['err'] }) elif args.err: continue for test_name in sorted(list(suite['tests'])): test = suite['tests'][test_name] if test_name.startswith('_') and not args.hidden: # Skip any hidden tests. continue rows.append({ 'name': '{}.{}'.format(suite_name, test_name), 'summary': test['summary'][:self.SUMMARY_SIZE_LIMIT], 'path': suite['path'], 'err': 'None' }) fields = ['name', 'summary'] if args.verbose or args.err: fields.append('path') if args.err: fields.append('err') output.draw_table(self.outfile, fields=fields, rows=rows, title="Available Tests")
def _states_cmd(self, *_): """Show all of the states that a test can be in.""" states = [] for state in sorted(status_file.STATES.list()): states.append({ 'name': state, 'description': status_file.STATES.help(state) }) output.draw_table(self.outfile, fields=['name', 'description'], rows=states, title="Pavilion Test States")
def display_history(self, pav_cfg, args): """Display_history takes a test_id from the command line arguments and formats the status file from the id and displays it for the user through draw tables. :param pav_cfg: The pavilion config. :param argparse namespace args: The test via command line :rtype int""" ret_val = 0 # status_path locates the status file per test_run id. status_path = (pav_cfg.working_dir / 'test_runs' / str(args.history).zfill(7) / 'status') try: test = TestRun.load(pav_cfg, args.history) name_final = test.name id_final = test.id states = [] # dictionary list for table output with status_path.open() as file: for line in file: val = line.split(' ', 2) states.append({ 'state': val[1], 'time': datetime.strptime(val[0], '%Y-%m-%dT%H:%M:%S.%f'), 'note': val[2] }) except (TestRunError, TestRunNotFoundError): output.fprint("The test_id {} does not exist in your " "working directory.".format(args.history), file=self.errfile, color=output.RED) return errno.EINVAL fields = ['state', 'time', 'note'] output.draw_table( outfile=self.outfile, field_info={'time': { 'transform': output.get_relative_timestamp }}, fields=fields, rows=states, title='Status history for test {} (id: {})'.format( name_final, id_final)) return ret_val
def _pavilion_variables_cmd(self, pav_cfg, _): rows = [] for key in sorted(list(pav_cfg.pav_vars.keys())): rows.append({ 'name': key, 'value': pav_cfg.pav_vars[key], 'description': pav_cfg.pav_vars.info(key)['help'], }) output.draw_table(self.outfile, fields=['name', 'value', 'description'], rows=rows, title="Available Pavilion Variables")
def _states_cmd(self, pav_cfg, args): del pav_cfg, args states = [] for state in sorted(status_file.STATES.list()): states.append({ 'name': state, 'description': status_file.STATES.help(state) }) output.draw_table(self.outfile, field_info={}, fields=['name', 'description'], rows=states, title="Pavilion Test States")
def _suites_cmd(self, pav_cfg, args): suites = find_all_tests(pav_cfg) rows = [] for suite_name in sorted(list(suites.keys())): suite = suites[suite_name] if suite['err']: name = output.ANSIString(suite_name, output.RED) else: name = suite_name rows.append({ 'name': name, 'path': suite['path'], 'tests': len(suite['tests']), 'err': suite['err'] }) if args.supersedes and suite['supersedes']: for path in suite['supersedes']: rows.append({ # Make these rows appear faded. 'name': output.ANSIString(suite_name, output.WHITE), 'path': output.ANSIString(path, output.WHITE), 'tests': '?', 'err': '' }) fields = ['name', 'tests'] if args.verbose or args.err: fields.append('path') if args.err: fields.append('err') output.draw_table(self.outfile, field_info={}, fields=fields, rows=rows, title="Available Test Suites")
def run(self, pav_cfg, args): """Print the test results in a variety of formats.""" test_ids = self._get_tests(pav_cfg, args.tests, args.full) tests = [] for id_ in test_ids: try: tests.append(TestRun.load(pav_cfg, id_)) except TestRunError as err: self.logger.warning("Could not load test %s - %s", id_, err) except TestRunNotFoundError as err: self.logger.warning("Could not find test %s - %s", id_, err) results = [test.results for test in tests] all_keys = set() for res in results: all_keys = all_keys.union(res.keys()) all_keys = list(all_keys.difference(['result', 'name', 'id'])) # Sort the keys by the size of the data # all_keys.sort(key=lambda k: max([len(res[k]) for res in results])) all_keys.sort(key=lambda k: max([len(r) for r in results])) if args.json: output.json_dump(results, self.outfile) return 0 if args.full: try: pprint.pprint(results) # ext-print: ignore except OSError: # It's ok if this fails. Generally means we're piping to # another command. pass return 0 else: fields = ['name', 'id', 'result'] + sum(args.key, list()) output.draw_table(outfile=self.outfile, field_info={}, fields=fields, rows=results, title="Test Results")
def run(self, pav_cfg, args): test_ids = self._get_tests(pav_cfg, args.tests, args.full) tests = [] for id_ in test_ids: try: tests.append(TestRun.load(pav_cfg, id_)) except TestRunError as err: self.logger.warning("Could not load test %s - %s", id_, err) except TestRunNotFoundError as err: self.logger.warning("Could not find test %s - %s", id_, err) results = [] for test in tests: res = test.load_results() if res is None: res = {'name': test.name, 'id': test.id, 'result': ''} results.append(res) all_keys = set() for res in results: all_keys = all_keys.union(res.keys()) all_keys = list(all_keys.difference(['result', 'name', 'id'])) # Sort the keys by the size of the data # all_keys.sort(key=lambda k: max([len(res[k]) for res in results])) all_keys.sort(key=lambda k: max([len(res) for res in results])) if args.json: output.json_dump(results, self.outfile) return 0 if args.full: pprint.pprint(results) # ext-print: ignore return 0 else: fields = ['name', 'id', 'result'] + sum(args.key, list()) output.draw_table(outfile=self.outfile, field_info={}, fields=fields, rows=results, title="Test Results")
def _result_parsers_cmd(self, _, args): """Show all the result parsers.""" if args.config: try: res_plugin = parsers.get_plugin(args.config) except pavilion.result.base.ResultError: output.fprint("Invalid result parser '{}'.".format( args.config), color=output.RED) return errno.EINVAL config_items = res_plugin.get_config_items() class Loader(yaml_config.YamlConfigLoader): """Loader for just a result parser's config.""" ELEMENTS = config_items Loader().dump(self.outfile) else: rps = [] for rp_name in parsers.list_plugins(): res_plugin = parsers.get_plugin(rp_name) desc = " ".join(str(res_plugin.__doc__).split()) rps.append({ 'name': rp_name, 'description': desc, 'path': res_plugin.path }) fields = ['name', 'description'] if args.verbose: fields.append('path') output.draw_table(self.outfile, field_info={}, fields=fields, rows=rps, title="Available Result Parsers")
def _result_prune_cmd(self, pav_cfg, args): """Remove matching results from the results log.""" try: pruned = result.prune_result_log(pav_cfg.result_log, args.ids) except pavilion.result.common.ResultError as err: output.fprint(err.args[0], file=self.errfile, color=output.RED) return errno.EACCES if args.json: output.json_dump( obj=pruned, file=self.outfile, ) else: output.draw_table( outfile=self.outfile, fields=['id', 'uuid', 'name', 'result', 'created'], rows=pruned, title="Pruned Results")
def _functions_cmd(self, _, args): """List all of the known function plugins.""" if args.detail: func = expression_functions.get_plugin(args.detail) output.fprint(func.signature, color=output.CYAN, file=self.outfile) output.fprint('-' * len(func.signature), file=self.outfile) output.fprint(func.long_description, file=self.outfile) else: rows = [{ 'name': func.name, 'signature': func.signature, 'description': func.description } for func in list_plugins()['function']] output.draw_table(self.outfile, field_info={}, fields=['name', 'signature', 'description'], rows=rows, title="Available Expression Functions")
def write_output(self, mode: str, rows: List[dict], fields: List[str], header: bool, vsep: str, wrap: bool): """Generically produce the output. :param mode: The output mode :param rows: Output items :param fields: List of fields to display. :param header: Whether to display a header in long/cvs mode :param vsep: Long mode vertical separator :param wrap: Wrap columns in long output mode. """ if not rows: output.fprint("No matching items found.", file=self.errfile) return 0 if mode in (self.OUTMODE_SPACE, self.OUTMODE_NEWLINE): sep = ' ' if mode == self.OUTMODE_SPACE else '\n' for row in rows: output.fprint(row[fields[0]], end=sep, file=self.outfile) output.fprint(file=self.outfile) elif mode == self.OUTMODE_LONG: output.draw_table( outfile=self.outfile, fields=fields, field_info=self.FIELD_INFO, rows=rows, header=header, border_chars={'vsep': vsep}, table_width=None if wrap else 1024**2 ) else: # CSV output.output_csv( outfile=self.outfile, fields=fields, rows=rows, header=header, ) return 0
def _module_wrappers_cmd(self, _, args): """List the various module wrapper plugins.""" modules = [] for mod_name in sorted(module_wrapper.list_module_wrappers()): mod_wrap = module_wrapper.get_module_wrapper(mod_name) modules.append({ 'name': mod_name, 'version': mod_wrap._version, # pylint: disable=W0212 'description': mod_wrap.help_text, 'path': mod_wrap.path, }) fields = ['name', 'version', 'description'] if args.verbose: fields.append('path') output.draw_table(self.outfile, fields=fields, rows=modules, title="Available Module Wrapper Plugins")
def _hosts_cmd(self, pav_cfg, args): hosts = [] col_names = ['Name'] if args.verbose: col_names.append('Path') for conf_dir in pav_cfg.config_dirs: path = conf_dir / 'hosts' if not (path.exists() and path.is_dir()): continue for file in os.listdir(path.as_posix()): file = path / file if file.suffix == '.yaml' and file.is_file(): host_id = file.stem host_path = file hosts.append({'Name': host_id, 'Path': host_path}) output.draw_table(self.outfile, field_info={}, fields=col_names, rows=hosts)
def print_summary(self, statuses): """Print_summary takes in a list of test statuses. It summarizes basic state output and displays the data to the user through draw_table. :param statuses: state list of current jobs :rtype: int """ # Populating table dynamically requires dict summary_dict = {} passes = 0 ret_val = 0 total_tests = len(statuses) rows = [] fields = ['State', 'Amount', 'Percent'] fails = 0 # Shrink statues dict to singular keys with total # amount of key as the value for test in statuses: if test['state'] not in summary_dict.keys(): summary_dict[test['state']] = 1 else: summary_dict[test['state']] += 1 # Gathers info on passed tests from completed tests. if 'COMPLETE' in test['state'] and 'PASS' in test['note']: passes += 1 if 'COMPLETE' in summary_dict.keys(): fails = summary_dict['COMPLETE'] - passes fields = ['State', 'Amount', 'Percent', 'PASSED', 'FAILED'] for key, value in summary_dict.items(): # Build the rows for drawtables. # Determine Color. if key.endswith('ERROR') or key.endswith('TIMEOUT') or \ key.endswith('FAILED') or key == 'ABORTED' or key == 'INVALID': color = output.RED elif key == 'COMPLETE': color = output.GREEN elif key == 'SKIPPED': color = output.YELLOW elif key == 'RUNNING' or key == 'SCHEDULED' \ or key == 'PREPPING_RUN' \ or key == 'BUILDING' or key == 'BUILD_DONE' \ or key == 'BUILD_REUSED': color = output.CYAN else: color = output.WHITE # Not enough to warrant color. # Populating rows... if key == 'COMPLETE': # only time we need to populate pass/fail rows.append( {'State': output.ANSIString(key, color), 'Amount': value, 'Percent': '{0:.0%}'.format(value / total_tests), 'PASSED': '{0:.0%}'.format(passes / value) + ',({}/{})'.format(passes, value), 'FAILED': '{0:.0%}'.format(fails / value) + ',({}/{})'.format(fails, value)} ) else: rows.append( {'State': output.ANSIString(key, color), 'Amount': value, 'Percent': '{0:.0%}'.format(value / total_tests)} ) field_info = { 'PASSED': { 'transform': lambda t: output.ANSIString(t, output.GREEN) }, 'FAILED': { 'transform': lambda t: output.ANSIString(t, output.RED), }} output.draw_table(outfile=self.outfile, field_info=field_info, fields=fields, rows=rows, border=True, title='Test Summary') return ret_val
def run(self, pav_cfg, args): """Print the test results in a variety of formats.""" test_ids = self._get_tests(pav_cfg, args.tests) tests = [] for id_ in test_ids: try: tests.append(TestRun.load(pav_cfg, id_)) except TestRunError as err: self.logger.warning("Could not load test %s - %s", id_, err) except TestRunNotFoundError as err: self.logger.warning("Could not find test %s - %s", id_, err) if args.re_run: if not self.update_results(pav_cfg, tests): return errno.EINVAL if args.json or args.full: if len(tests) > 1: results = {test.name: test.results for test in tests} else: # There should always be at least one test results = tests[0].results width = shutil.get_terminal_size().columns try: if args.json: output.json_dump(results, self.outfile) else: pprint.pprint(results, # ext-print: ignore stream=self.outfile, width=width, compact=True) except OSError: # It's ok if this fails. Generally means we're piping to # another command. pass return 0 else: fields = self.BASE_FIELDS + args.key results = [test.results for test in tests] def fix_timestamp(ts_str: str) -> str: """Read the timestamp text and get a minimized, formatted value.""" try: when = datetime.datetime.strptime(ts_str, '%Y-%m-%d %H:%M:%S.%f') except ValueError: return '' return output.get_relative_timestamp(when) output.draw_table( outfile=self.outfile, field_info={ 'started': {'transform': fix_timestamp}, 'finished': {'transform': fix_timestamp}, }, fields=fields, rows=results, title="Test Results" ) return 0
def test_draw_table(self): """Exercise draw_table in many ways.""" # We can't actually check that the output looks ok, but we # can at make sure it doesn't throw errors by testing a wide combination # of things. # Dump all the output here. dev_null = open('/dev/null', 'w') words = [ word.strip() for word in (self.TEST_DATA_ROOT / 'words').open().readlines() ] # Choose random column header names columns = [random.choice(words) for i in range(20)] field_info = { # Column 0-2 have a max width. We put this one first to test the # case where all columns have hit their max width. columns[0]: { 'max_width': 10 }, columns[1]: { 'max_width': 10 }, columns[2]: { 'max_width': 10 }, # Column 3 is capitalized (test transforms) columns[3]: { 'transform': lambda s: s.capitalize() }, # Column 1 is specially formatted columns[4]: { 'format': '"{}"' }, # Column 2 has a title columns[5]: { 'title': columns[2].capitalize() }, # Column 4 has a min width. columns[6]: { 'min_width': 10 }, } count = 0 timer = 0 for col_count in range(1, 11, 3): table_width = 20 while table_width < 200: data_sizes = { col: random.randint(1, 100) for col in columns[:col_count] } rows = [] for i in range(5): row = {} for col in columns[:col_count]: data = [ random.choice(words) for i in range(data_sizes[col]) ] row[col] = ' '.join(data) rows.append(row) # Randomly assign a title. title = None if random.randint(0, 1) == 0 else 'Title' pad = random.randint(0, 1) == 0 border = random.randint(0, 1) == 0 args = (dev_null, columns[:col_count], rows) kwargs = { 'field_info': field_info, 'table_width': table_width, 'title': title, 'pad': pad, 'border': border } start = time.time() output.draw_table(*args, **kwargs) timer += time.time() - start count += 1 try: table_width += random.randint(10, 50) except: import traceback traceback.print_exc() self.fail("Raised an error while rendering a table. " "args: {}, kwargs: {}".format(args, kwargs)) self.assertLess(timer / count, .3, "Per table draw speed exceed 30 ms")
def _scheduler_cmd(self, _, args): """ :param argparse.Namespace args: """ sched = None # type : schedulers.SchedulerPlugin sched_name = None if args.vars is not None or args.config is not None: sched_name = args.vars if args.vars is not None else args.config try: sched = schedulers.get_plugin(sched_name) except schedulers.SchedulerPluginError: output.fprint( "Invalid scheduler plugin '{}'.".format(sched_name), color=output.RED, ) return errno.EINVAL if args.vars is not None: sched_vars = [] empty_config = file_format.TestConfigLoader().load_empty() svars = sched.get_vars(empty_config[sched_name]) for key in sorted(list(svars.keys())): sched_vars.append(svars.info(key)) output.draw_table( self.outfile, fields=['name', 'deferred', 'example', 'help'], rows=sched_vars, title="Variables for the {} scheduler plugin.".format( args.vars)) elif args.config is not None: sched_config = sched.get_conf() class Loader(yaml_config.YamlConfigLoader): """Loader for just a scheduler's config.""" ELEMENTS = [sched_config] defaults = Loader().load_empty() Loader().dump(self.outfile, values=defaults) else: # Assuming --list was given scheds = [] for sched_name in schedulers.list_plugins(): sched = schedulers.get_plugin(sched_name) scheds.append({ 'name': sched_name, 'description': sched.description, 'path': sched.path }) fields = ['name', 'description'] if args.verbose: fields.append('path') output.draw_table(self.outfile, fields=fields, rows=scheds, title="Available Scheduler Plugins")
def run(self, pav_cfg, args): """Print the test results in a variety of formats.""" test_ids = cmd_utils.arg_filtered_tests(pav_cfg, args, verbose=self.errfile) tests = [] for id_ in test_ids: try: tests.append(TestRun.load(pav_cfg, id_)) except TestRunError as err: self.logger.warning("Could not load test %s - %s", id_, err) except TestRunNotFoundError as err: self.logger.warning("Could not find test %s - %s", id_, err) log_file = None if args.show_log and args.re_run: log_file = io.StringIO() if args.re_run: if not self.update_results(pav_cfg, tests, log_file): return errno.EINVAL if args.save: if not self.update_results(pav_cfg, tests, log_file, save=True): return errno.EINVAL if args.json or args.full: if len(tests) > 1: results = {test.name: test.results for test in tests} elif len(tests) == 1: results = tests[0].results else: output.fprint("Could not find any matching tests.", color=output.RED, file=self.outfile) return errno.EINVAL width = shutil.get_terminal_size().columns or 80 try: if args.json: output.json_dump(results, self.outfile) else: pprint.pprint( results, # ext-print: ignore stream=self.outfile, width=width, compact=True) except OSError: # It's ok if this fails. Generally means we're piping to # another command. pass else: fields = self.BASE_FIELDS + args.key results = [test.results for test in tests] output.draw_table(outfile=self.outfile, field_info={ 'started': { 'transform': output.get_relative_timestamp }, 'finished': { 'transform': output.get_relative_timestamp }, }, fields=fields, rows=results, title="Test Results") if args.show_log: if log_file is not None: output.fprint(log_file.getvalue(), file=self.outfile, color=output.GREY) else: for test in tests: output.fprint("\nResult logs for test {}\n".format( test.name), file=self.outfile) if test.results_log.exists(): with test.results_log.open() as log_file: output.fprint(log_file.read(), color=output.GREY, file=self.outfile) else: output.fprint("<log file missing>", file=self.outfile, color=output.YELLOW) return 0
def show_vars(self, pav_cfg, cfg, conf_type): """Show the variables of a config, each variable is displayed as a table.""" file = resolver.TestConfigResolver(pav_cfg).find_config(conf_type, cfg) with file.open() as config_file: cfg = file_format.TestConfigLoader().load(config_file) simple_vars = [] complex_vars = [] for var in cfg.get('variables').keys(): subvar = cfg['variables'][var] if isinstance(subvar, list) and (len(subvar) > 1 or isinstance(subvar[0], dict)): complex_vars.append(var) continue simple_vars.append({'name': var, 'value': cfg['variables'][var]}) if simple_vars: output.draw_table(self.outfile, field_info={}, fields=['name', 'value'], rows=simple_vars, title="Simple Variables") for var in complex_vars: subvar = cfg['variables'][var][0] # List of strings. if isinstance(subvar, str): simple_vars = [] for idx in range(len(cfg['variables'][var])): simple_vars.append({ 'index': idx, 'value': cfg['variables'][var][idx] }) output.draw_table(self.outfile, field_info={}, fields=['index', 'value'], rows=simple_vars, title=var) # List of dicts. elif len(subvar) < 10: simple_vars = [] fields = ['index'] for idx in range(len(cfg['variables'][var])): dict_data = {'index': idx} for key, val in cfg['variables'][var][idx].items(): if idx == 0: fields.append(key) dict_data.update({key: val}) simple_vars.append(dict_data) output.draw_table(self.outfile, field_info={}, fields=fields, rows=simple_vars, title=var) else: output.fprint(var, file=self.outfile) output.fprint( "(Showing as json due to the insane number of " "keys)", file=self.outfile) output.fprint(pprint.pformat(cfg['variables'][var], compact=True), file=self.outfile) output.fprint("\n", file=self.outfile)