Exemplo n.º 1
0
    def show_configs_table(self,
                           pav_cfg,
                           conf_type,
                           errors=False,
                           verbose=False):
        """Default config table, shows the config name and if it can be
        loaded."""

        configs = resolver.TestConfigResolver(pav_cfg).find_all_configs(
            conf_type)

        data = []
        col_names = ['name', 'summary']

        if verbose:
            col_names.append('path')

        if errors:
            col_names.append('path')
            col_names.append('err')

        for name in configs:
            data.append({
                'name': name,
                'summary': configs[name]['status'],
                'path': configs[name]['path'],
                'err': configs[name]['error']
            })

        output.draw_table(self.outfile, fields=col_names, rows=data)
Exemplo n.º 2
0
    def _load_test(self,
                   name: str,
                   host: str = 'this',
                   modes: List[str] = None,
                   build=True,
                   finalize=True) -> List[TestRun]:
        """Load the named test config from file. Returns a list of the
        resulting configs."""

        if modes is None:
            modes = []

        res = resolver.TestConfigResolver(self.pav_cfg)
        test_cfgs = res.load([name], host, modes)

        tests = []
        for ptest in test_cfgs:
            test = TestRun(self.pav_cfg, ptest.config, var_man=ptest.var_man)

            if build:
                test.build()

            if finalize:
                fin_sys = system_variables.SysVarDict(unique=True)
                fin_var_man = VariableSetManager()
                fin_var_man.add_var_set('sys', fin_sys)
                res.finalize(test, fin_var_man)

            tests.append(test)

        return tests
Exemplo n.º 3
0
    def _tests_cmd(self, pav_cfg, args):

        if args.test_name is not None:
            self._test_docs_subcmd(pav_cfg, args)
            return

        resolv = resolver.TestConfigResolver(pav_cfg)
        suites = resolv.find_all_tests()
        rows = []

        for suite_name in sorted(list(suites.keys())):
            suite = suites[suite_name]

            if suite['err']:
                suite_name = output.ANSIString(suite_name, output.RED)

                rows.append({
                    'name': '{}.*'.format(suite_name),
                    'summary': 'Loading the suite failed.  '
                    'For more info, run `pav show tests --err`.',
                    'path': suite['path'],
                    'err': suite['err']
                })
            elif args.err:
                continue

            for test_name in sorted(list(suite['tests'])):
                test = suite['tests'][test_name]

                if test_name.startswith('_') and not args.hidden:
                    # Skip any hidden tests.
                    continue

                rows.append({
                    'name':
                    '{}.{}'.format(suite_name, test_name),
                    'summary':
                    test['summary'][:self.SUMMARY_SIZE_LIMIT],
                    'path':
                    suite['path'],
                    'err':
                    'None'
                })

        fields = ['name', 'summary']
        if args.verbose or args.err:
            fields.append('path')

            if args.err:
                fields.append('err')

        output.draw_table(self.outfile,
                          fields=fields,
                          rows=rows,
                          title="Available Tests")
Exemplo n.º 4
0
    def skip_set(self):
        """Procedure to skip tests. """

        temp_resolver = resolver.TestConfigResolver(self.pav_cfg)
        raw_configs = temp_resolver.load_raw_configs(list(self.tests.keys()),
                                                     [], self.modes)
        for config in raw_configs:
            # Delete conditionals - we're already skipping this test but for
            # different reasons
            skipped_test = TestRun(self.pav_cfg, config)
            skipped_test.set_skipped('Previous test in series did not PASS.')
            skipped_test.save_attributes()
            self.series_obj.add_tests([skipped_test])
            self.tests[skipped_test.name] = skipped_test

        self.done = True
Exemplo n.º 5
0
    def _test_docs_subcmd(self, pav_cfg, args):
        """Show the documentation for the requested test."""

        resolv = resolver.TestConfigResolver(pav_cfg)
        suites = resolv.find_all_tests()

        parts = args.test_name.split('.')
        if len(parts) != 2:
            output.fprint("You must give a test name as '<suite>.<test>'.",
                          file=self.outfile,
                          color=output.RED)
            return

        suite_name, test_name = parts

        if suite_name not in suites:
            output.fprint("No such suite: '{}'.\n"
                          "Available test suites:\n{}".format(
                              suite_name, "\n".join(sorted(suites.keys()))),
                          file=self.outfile,
                          color=output.RED)
            return
        tests = suites[suite_name]['tests']
        if test_name not in tests:
            output.fprint("No such test '{}' in suite '{}'.\n"
                          "Available tests in suite:\n{}".format(
                              test_name, suite_name,
                              "\n".join(sorted(tests.keys()))))
            return

        test = tests[test_name]

        def pvalue(header, *values):
            """An item header."""
            output.fprint(header,
                          color=output.CYAN,
                          file=self.outfile,
                          end=' ')
            for val in values:
                output.fprint(val, file=self.outfile)

        pvalue("Name:", args.test_name)
        pvalue("Maintainer:", test['maintainer'])
        pvalue("Email:", test['email'])
        pvalue("Summary:", test['summary'])
        pvalue("Documentation:", '\n\n', test['doc'], '\n')
Exemplo n.º 6
0
    def _suites_cmd(self, pav_cfg, args):
        suites = resolver.TestConfigResolver(pav_cfg).find_all_tests()

        rows = []
        for suite_name in sorted(list(suites.keys())):
            suite = suites[suite_name]

            if suite['err']:
                name = output.ANSIString(suite_name,
                                         output.RED)
            else:
                name = suite_name

            rows.append({
                'name':  name,
                'path':  suite['path'],
                'tests': len(suite['tests']),
                'err':   suite['err']
            })

            if args.supersedes and suite['supersedes']:
                for path in suite['supersedes']:
                    rows.append({
                        # Make these rows appear faded.
                        'name':  output.ANSIString(suite_name,
                                                   output.WHITE),
                        'path':  output.ANSIString(path,
                                                   output.WHITE),
                        'tests': '?',
                        'err':   ''
                    })

        fields = ['name', 'tests']

        if args.verbose or args.err:
            fields.append('path')

            if args.err:
                fields.append('err')

        output.draw_table(
            self.outfile,
            fields=fields,
            rows=rows,
            title="Available Test Suites"
        )
Exemplo n.º 7
0
    def show_full_config(self, pav_cfg, cfg_name, conf_type):
        """Show the full config of a given host/mode."""

        file = resolver.TestConfigResolver(pav_cfg).find_config(
            conf_type, cfg_name)
        config_data = None
        if file is not None:
            with file.open() as config_file:
                config_data = file_format.TestConfigLoader()\
                              .load_raw(config_file)

        if config_data is not None:
            output.fprint(pprint.pformat(config_data, compact=True),
                          file=self.outfile)
        else:
            output.fprint("No {} config found for "
                          "{}.".format(conf_type.strip('s'), cfg_name))
            return errno.EINVAL
Exemplo n.º 8
0
    def show_vars(self, pav_cfg, cfg, conf_type):
        """Show the variables of a config, each variable is displayed as a
        table."""

        file = resolver.TestConfigResolver(pav_cfg).find_config(conf_type, cfg)
        with file.open() as config_file:
            cfg = file_format.TestConfigLoader().load(config_file)

        simple_vars = []
        complex_vars = []
        for var in cfg.get('variables').keys():
            subvar = cfg['variables'][var]
            if isinstance(subvar, list) and (len(subvar) > 1
                                             or isinstance(subvar[0], dict)):
                complex_vars.append(var)
                continue
            simple_vars.append({'name': var, 'value': cfg['variables'][var]})
        if simple_vars:
            output.draw_table(self.outfile,
                              field_info={},
                              fields=['name', 'value'],
                              rows=simple_vars,
                              title="Simple Variables")

        for var in complex_vars:
            subvar = cfg['variables'][var][0]
            # List of strings.
            if isinstance(subvar, str):
                simple_vars = []
                for idx in range(len(cfg['variables'][var])):
                    simple_vars.append({
                        'index': idx,
                        'value': cfg['variables'][var][idx]
                    })
                output.draw_table(self.outfile,
                                  field_info={},
                                  fields=['index', 'value'],
                                  rows=simple_vars,
                                  title=var)
            # List of dicts.
            elif len(subvar) < 10:
                simple_vars = []
                fields = ['index']
                for idx in range(len(cfg['variables'][var])):
                    dict_data = {'index': idx}
                    for key, val in cfg['variables'][var][idx].items():
                        if idx == 0:
                            fields.append(key)
                        dict_data.update({key: val})
                    simple_vars.append(dict_data)
                output.draw_table(self.outfile,
                                  field_info={},
                                  fields=fields,
                                  rows=simple_vars,
                                  title=var)
            else:
                output.fprint(var, file=self.outfile)
                output.fprint(
                    "(Showing as json due to the insane number of "
                    "keys)",
                    file=self.outfile)
                output.fprint(pprint.pformat(cfg['variables'][var],
                                             compact=True),
                              file=self.outfile)
            output.fprint("\n", file=self.outfile)
Exemplo n.º 9
0
    def update_results(self,
                       pav_cfg: dict,
                       tests: List[TestRun],
                       log_file: IO[str],
                       save: bool = False) -> bool:
        """Update each of the given tests with the result section from the
        current version of their configs. Then rerun result processing and
        update the results in the test object (but change nothing on disk).

        :param pav_cfg: The pavilion config.
        :param tests: A list of test objects to update.
        :param log_file: The logfile to log results to. May be None.
        :param save: Whether to save the updated results to the test's result
                     log. It will not update the general result log.
        :returns: True if successful, False otherwise. Will handle
            printing of any failure related errors.
        """

        reslvr = resolver.TestConfigResolver(pav_cfg)

        for test in tests:

            # Re-load the raw config using the saved name, host, and modes
            # of the original test.
            try:
                test_name = '.'.join(
                    (test.config['suite'], test.config['name']))

                configs = reslvr.load_raw_configs(
                    tests=[test_name],
                    host=test.config['host'],
                    modes=test.config['modes'],
                )
            except resolver.TestConfigError as err:
                output.fprint("Test '{}' could not be found: {}".format(
                    test.name, err.args[0]),
                              color=output.RED,
                              file=self.errfile)
                return False

            # These conditions guard against unexpected results from
            # load_raw_configs. They may not be possible.
            if not configs:
                output.fprint(
                    "No configs found for test '{}'. Skipping update.".format(
                        test.name),
                    color=output.YELLOW,
                    file=self.errfile)
                continue
            elif len(configs) > 1:
                output.fprint("Test '{}' somehow matched multiple configs."
                              "Skipping update.".format(test.name),
                              color=output.YELLOW,
                              file=self.errfile)
                continue

            cfg = configs[0]
            updates = {}

            for section in 'result_parse', 'result_evaluate':
                # Try to resolve the updated result section of the config using
                # the original variable values.
                try:
                    updates[section] = reslvr.resolve_section_values(
                        component=cfg[section],
                        var_man=test.var_man,
                    )
                except resolver.TestConfigError as err:
                    output.fprint(
                        "Test '{}' had a {} section that could not be "
                        "resolved with it's original variables: {}".format(
                            test.name, section, err.args[0]),
                        file=self.errfile,
                        color=output.RED)
                    return False
                except RuntimeError as err:
                    output.fprint(
                        "Unexpected error updating {} section for test "
                        "'{}': {}".format(section, test.name, err.args[0]),
                        color=output.RED,
                        file=self.errfile)
                    return False

            # Set the test's result section to the newly resolved one.
            test.config['result_parse'] = updates['result_parse']
            test.config['result_evaluate'] = updates['result_evaluate']

            try:
                result.check_config(test.config['result_parse'],
                                    test.config['result_evaluate'])

            except result.ResultError as err:
                output.fprint(
                    "Error found in results configuration: {}".format(
                        err.args[0]),
                    color=output.RED,
                    file=self.errfile)
                return False

            if save:
                test.builder.tracker.update(state=STATES.RESULTS,
                                            note="Re-running results.")

            # The new results will be attached to the test (but not saved).
            results = test.gather_results(test.results.get('return_value', 1),
                                          regather=True if not save else False,
                                          log_file=log_file)

            if save:
                test.save_results(results)
                with test.results_log.open('a') as log_file:
                    log_file.write(
                        "Results were re-ran and saved on {}\n".format(
                            datetime.datetime.today().strftime('%m-%d-%Y')))
                    log_file.write("See results.json for updated results.\n")
                test.builder.tracker.update(
                    state=STATES.COMPLETE,
                    note="The test completed with result: {}".format(
                        results["result"]))

        return True
Exemplo n.º 10
0
    def setUp(self):
        """Initialize plugins and setup a resolver."""
        plugins.initialize_plugins(self.pav_cfg)

        self.resolver = resolver.TestConfigResolver(self.pav_cfg)
Exemplo n.º 11
0
    def update_results(self, pav_cfg: dict, tests: List[TestRun]) -> bool:
        """Update each of the given tests with the result section from the
        current version of their configs. Then rerun result processing and
        update the results in the test object (but change nothing on disk).

        :param pav_cfg: The pavilion config.
        :param tests: A list of test objects to update.
        :returns: True if successful, False otherwise. Will handle
            printing of any failure related errors.
        """

        reslvr = resolver.TestConfigResolver(pav_cfg)

        for test in tests:

            # Re-load the raw config using the saved name, host, and modes
            # of the original test.
            try:
                test_name = '.'.join((test.config['suite'],
                                      test.config['name']))

                configs = reslvr.load_raw_configs(
                    tests=[test_name],
                    host=test.config['host'],
                    modes=test.config['modes'],
                )
            except resolver.TestConfigError as err:
                output.fprint(
                    "Test '{}' could not be found: {}"
                    .format(test.name, err.args[0]),
                    color=output.RED, file=self.errfile)
                return False

            # These conditions guard against unexpected results from
            # load_raw_configs. They may not be possible.
            if not configs:
                output.fprint(
                    "No configs found for test '{}'. Skipping update."
                    .format(test.name), color=output.YELLOW, file=self.errfile)
                continue
            elif len(configs) > 1:
                output.fprint(
                    "Test '{}' somehow matched multiple configs."
                    "Skipping update.".format(test.name),
                    color=output.YELLOW, file=self.errfile)
                continue

            cfg = configs[0]
            updates = {}

            for section in 'result_parse', 'result_evaluate':
                # Try to resolve the updated result section of the config using
                # the original variable values.
                try:
                    updates[section] = reslvr.resolve_section_values(
                        component=cfg[section],
                        var_man=test.var_man,
                    )
                except resolver.TestConfigError as err:
                    output.fprint(
                        "Test '{}' had a {} section that could not be "
                        "resolved with it's original variables: {}"
                        .format(test.name, section, err.args[0])
                    )
                    return False
                except RuntimeError as err:
                    output.fprint(
                        "Unexpected error updating {} section for test "
                        "'{}': {}".format(section, test.name, err.args[0]),
                        color=output.RED, file=self.errfile)
                    return False

            # Set the test's result section to the newly resolved one.
            test.config['result_parse'] = updates['result_parse']
            test.config['result_evaluate'] = updates['result_evaluate']

            try:
                check_config(test.config['result_parse'],
                             test.config['result_evaluate'])
            except TestRunError as err:
                output.fprint(
                    "Error found in results configuration: {}"
                    .format(err.args[0]))
                return False

            # The new results will be attached to the test (but not saved).
            test.gather_results(test.results['return_value'], regather=True)

        return True