Exemple #1
0
    def _get_tests(self, pav_cfg, tests_arg, full_arg):
        if not tests_arg:
            # Get the last series ran by this user.
            series_id = series.TestSeries.load_user_series_id(pav_cfg)
            if series_id is not None:
                tests_arg.append(series_id)

        if len(tests_arg) > 1 and full_arg:
            tests_arg = [tests_arg[0]]

        test_list = []
        for test_id in tests_arg:
            if test_id.startswith('s'):
                try:
                    test_list.extend(
                        series.TestSeries.from_id(pav_cfg,
                                                  int(test_id[1:])).tests)
                except series.TestSeriesError as err:
                    self.logger.warning("Suite %s could not be found.\n%s",
                                        test_id[1:], err)
                    continue
            else:
                test_list.append(test_id)

        if full_arg:
            if len(test_list) > 1:
                output.fprint(
                    "Requested full test results but provided multiple tests. "
                    "Giving results for only the first found.",
                    color=output.YELLOW,
                    file=sys.stdout,
                )
                test_list = [test_list[0]]

        return map(int, test_list)
Exemple #2
0
    def run(self, pav_cfg, args):
        """Gathers and prints the statuses from the specified test runs and/or
        series."""

        try:
            test_ids = cmd_utils.arg_filtered_tests(pav_cfg, args, verbose=self.errfile)
        except ValueError as err:
            output.fprint(err.args[0], color=output.RED, file=self.errfile)
            return errno.EINVAL

        statuses = status_utils.get_statuses(pav_cfg, test_ids)

        if args.summary:
            return self.print_summary(statuses)
        elif args.history:
            if len(test_ids) != 1:
                output.fprint("'--history' flag requires a single test id, "
                              "got: {}"
                              .format(test_ids),
                              file=self.errfile,
                              color=output.RED)
                return 1
            return status_utils.print_status_history(pav_cfg, test_ids[-1],
                                                     self.outfile, args.json)
        else:
            return status_utils.print_status(statuses, self.outfile, args.json)
Exemple #3
0
    def get_fields(self, fields_arg: str, mode_arg: str,
                   avail_fields: List[str], default_single_field: str,
                   default_fields: List[str]) -> (List[str], str):
        """Get the fields and updated output mode.

        :param fields_arg: The fields given by the user (if any).
        :param mode_arg: The output mode.
        :param avail_fields: The available fields.
        :param default_single_field: Default field for basic/newline modes.
        :param default_fields: The default fields for long/csv modes.
        :return: The fields list and an updated mode
        """

        fields = []
        if fields_arg:
            fields = [field.strip() for field in fields_arg.split(',')]
            for field in fields:
                if field not in avail_fields:
                    output.fprint("Invalid output field '{}'. See 'pav list "
                                  "--show-fields.".format(field))
                    return errno.EINVAL

        if (len(fields) > 1
                and mode_arg not in (self.OUTMODE_LONG, self.OUTMODE_CSV)):
            mode_arg = self.OUTMODE_LONG

        if not fields:
            if mode_arg in (self.OUTMODE_LONG, self.OUTMODE_CSV):
                fields = default_fields
            else:
                fields = [default_single_field]

        return fields, mode_arg
Exemple #4
0
    def _result_parsers_cmd(self, _, args):
        """Show all the result parsers."""

        if args.doc:
            try:
                res_plugin = parsers.get_plugin(args.doc)
            except pavilion.result.common.ResultError:
                output.fprint("Invalid result parser '{}'.".format(args.doc),
                              color=output.RED)
                return errno.EINVAL

            output.fprint(res_plugin.doc(), file=self.outfile)

        else:

            rps = []
            for rp_name in parsers.list_plugins():
                res_plugin = parsers.get_plugin(rp_name)
                desc = " ".join(str(res_plugin.__doc__).split())
                rps.append({
                    'name': rp_name,
                    'description': desc,
                    'path': res_plugin.path
                })

            fields = ['name', 'description']

            if args.verbose:
                fields.append('path')

            output.draw_table(self.outfile,
                              fields=fields,
                              rows=rps,
                              title="Available Result Parsers")
Exemple #5
0
    def _get_tests(self,
                   pav_cfg,
                   args,
                   mb_tracker,
                   build_only=False,
                   local_builds_only=False):
        """Turn the test run arguments into actual TestRun objects.
        :param pav_cfg: The pavilion config object
        :param args: The run command arguments
        :param MultiBuildTracker mb_tracker: The build tracker.
        :param bool build_only: Whether to denote that we're only building
            these tests.
        :param bool local_builds_only: Only include tests that would be built
            locally.
        :return:
        :rtype: {}
        """

        overrides = self._parse_overrides(args.overrides)

        sys_vars = system_variables.get_vars(True)

        try:
            configs_by_sched = self._get_test_configs(
                pav_cfg=pav_cfg,
                host=args.host,
                test_files=args.files,
                tests=args.tests,
                modes=args.modes,
                overrides=overrides,
                sys_vars=sys_vars,
            )

            # Remove non-local builds when doing only local builds.
            if build_only and local_builds_only:
                for sched in configs_by_sched:
                    sched_cfgs = configs_by_sched[sched]
                    for i in range(len(sched_cfgs)):
                        config, _ = sched_cfgs[i]
                        if config['build']['on_nodes'].lower() == 'true':
                            sched_cfgs[i] = None
                    sched_cfgs = [cfg for cfg in sched_cfgs if cfg is not None]
                    configs_by_sched[sched] = sched_cfgs

            tests_by_sched = self._configs_to_tests(
                pav_cfg=pav_cfg,
                configs_by_sched=configs_by_sched,
                mb_tracker=mb_tracker,
                build_only=build_only,
                rebuild=args.rebuild,
            )

        except commands.CommandError as err:
            # Our error messages get escaped to a silly degree
            err = codecs.decode(str(err), 'unicode-escape')
            fprint(err, file=self.errfile, flush=True)
            return None

        return tests_by_sched
Exemple #6
0
 def pvalue(header, *values):
     """An item header."""
     output.fprint(header,
                   color=output.CYAN,
                   file=self.outfile,
                   end=' ')
     for val in values:
         output.fprint(val, file=self.outfile)
Exemple #7
0
    def run(self, pav_cfg, args):
        try:
            if not args.all:
                test_statuses = get_statuses(pav_cfg, args, self.errfile)
            else:
                test_statuses = get_all_tests(pav_cfg, args, self.errfile)
        except commands.CommandError as err:
            output.fprint("Status Error:", err, color=output.RED)
            return 1

        return print_status(test_statuses, self.outfile, args.json)
Exemple #8
0
    def run(self, pav_cfg, args):
        """Gathers and prints the statuses from the specified test runs and/or
        series."""
        try:
            if not args.all:
                test_statuses = get_statuses(pav_cfg, args, self.errfile)
            else:
                test_statuses = get_all_tests(pav_cfg, args)
        except commands.CommandError as err:
            output.fprint("Status Error:", err, color=output.RED)
            return 1

        return print_status(test_statuses, self.outfile, args.json)
Exemple #9
0
    def run(self, pav_cfg, args):
        test_dir = pav_cfg.working_dir / 'test_runs'
        job_dir = utils.make_id_path(test_dir, args.job_id)

        if os.path.isdir(job_dir.as_posix()) is False:
            output.fprint("directory '{}' does not exist."
                          .format(job_dir.as_posix()),
                          file=sys.stderr, color=output.RED)
            return errno.EEXIST

        level = 0
        print_directory(level, job_dir)
        return 0
Exemple #10
0
    def run(self, pav_cfg, args):
        """Run this command."""

        test_dir = pav_cfg.working_dir / 'test_runs'
        job_dir = dir_db.make_id_path(test_dir, args.job_id)

        if os.path.isdir(job_dir.as_posix()) is False:
            output.fprint("directory '{}' does not exist."
                          .format(job_dir.as_posix()),
                          file=sys.stderr, color=output.RED)
            return errno.EEXIST

        return self.print_file(job_dir / args.file)
Exemple #11
0
    def print_file(self, file):
        """Print the file at the given path.
        :param path file: The path to the file to print.
        """

        try:
            with file.open() as file:
                while True:
                    block = file.read(4096)
                    if not block:
                        break
                    output.fprint(block, width=None, file=self.outfile, end="")

        except FileNotFoundError:
            output.fprint("file '{}' does not exist.".format(file),
                          sys.stderr,
                          color=output.RED)
            return errno.EEXIST

        except IsADirectoryError:
            output.fprint("{} is a directory.".format(file),
                          sys.stderr,
                          color=output.RED)
            return errno.EINVAL

        except (IOError, OSError, PermissionError) as err:
            output.fprint("Error opening file '{}': {}".format(file, err),
                          color=output.RED)
            return errno.EIO
Exemple #12
0
    def display_history(self, pav_cfg, args):
        """Display_history takes a test_id from the command
        line arguments and formats the status file from the id
        and displays it for the user through draw tables.
        :param pav_cfg: The pavilion config.
        :param argparse namespace args: The test via command line
        :rtype int"""

        ret_val = 0
        # status_path locates the status file per test_run id.
        status_path = (pav_cfg.working_dir / 'test_runs' /
                       str(args.history).zfill(7) / 'status')

        try:
            test = TestRun.load(pav_cfg, args.history)
            name_final = test.name
            id_final = test.id
            states = []  # dictionary list for table output

            with status_path.open() as file:
                for line in file:
                    val = line.split(' ', 2)
                    states.append({
                        'state':
                        val[1],
                        'time':
                        datetime.strptime(val[0], '%Y-%m-%dT%H:%M:%S.%f'),
                        'note':
                        val[2]
                    })
        except (TestRunError, TestRunNotFoundError):
            output.fprint("The test_id {} does not exist in your "
                          "working directory.".format(args.history),
                          file=self.errfile,
                          color=output.RED)
            return errno.EINVAL

        fields = ['state', 'time', 'note']
        output.draw_table(
            outfile=self.outfile,
            field_info={'time': {
                'transform': output.get_relative_timestamp
            }},
            fields=fields,
            rows=states,
            title='Status history for test {} (id: {})'.format(
                name_final, id_final))

        return ret_val
Exemple #13
0
    def _configs_to_tests(pav_cfg,
                          configs_by_sched,
                          mb_tracker=None,
                          build_only=False,
                          rebuild=False,
                          outfile=None):
        """Convert the dictionary of test configs by scheduler into actual
        tests.

        :param pav_cfg: The Pavilion config
        :param dict[str,list] configs_by_sched: A dictionary of lists of test
        configs.
        :param Union[MultiBuildTracker,None] mb_tracker: The build tracker.
        :param bool build_only: Whether to only build these tests.
        :param bool rebuild: After figuring out what build to use, rebuild it.
        :return:
        """

        tests_by_sched = {}
        progress = 0
        tot_tests = sum([len(tests) for tests in configs_by_sched.values()])

        for sched_name in configs_by_sched.keys():
            tests_by_sched[sched_name] = []
            try:
                for i in range(len(configs_by_sched[sched_name])):
                    cfg, var_man = configs_by_sched[sched_name][i]
                    tests_by_sched[sched_name].append(
                        TestRun(
                            pav_cfg=pav_cfg,
                            config=cfg,
                            var_man=var_man,
                            build_tracker=mb_tracker,
                            build_only=build_only,
                            rebuild=rebuild,
                        ))
                    progress += 1.0 / tot_tests
                    if outfile is not None:
                        fprint("Creating Test Runs: {:.0%}".format(progress),
                               file=outfile,
                               end='\r')
            except (TestRunError, TestConfigError) as err:
                raise commands.CommandError(err)

        if outfile is not None:
            fprint('', file=outfile)

        return tests_by_sched
Exemple #14
0
    def test_cat(self):
        """Checking cat command functionality"""
        test = self._quick_test()

        cat_cmd = commands.get_command('cat')
        cat_cmd.outfile = io.StringIO()
        cat_cmd.errfile = io.StringIO()

        arg_parser = arguments.get_parser()
        arg_sets = (['cat', str(test.id), 'build.sh'], )

        for arg_set in arg_sets:
            args = arg_parser.parse_args(arg_set)
            cat_cmd.run(self.pav_cfg, args)
            output.fprint(cat_cmd.outfile)
            output.fprint(cat_cmd.errfile)
Exemple #15
0
    def save_series_config(self) -> None:
        """Saves series config to a file."""

        series_config_path = self.path/CONFIG_FN
        try:
            series_config_tmp = series_config_path.with_suffix('.tmp')
            with PermissionsManager(series_config_tmp,
                                    self.pav_cfg['shared_group'],
                                    self.pav_cfg['umask']), \
                    series_config_tmp.open('w') as config_file:
                config_file.write(json.dumps(self.config))

            series_config_path.with_suffix('.tmp').rename(series_config_path)
        except OSError:
            fprint("Could not write series config to file. Cancelling.",
                   color=output.RED)
Exemple #16
0
    def run(self, pav_cfg, args):
        """Figure out which log the user wants and print it."""

        if args.log_cmd is None:
            self._parser.print_help(self.outfile)
            return errno.EINVAL
        else:
            cmd_name = args.log_cmd

        try:
            if cmd_name == 'series':
                test = series.TestSeries.from_id(pav_cfg, args.ts_id)
            else:
                test = test_run.TestRun.load(pav_cfg, int(args.ts_id))
        except test_run.TestRunError as err:
            output.fprint("Error loading test: {}".format(err),
                          color=output.RED,
                          file=self.errfile)
            return 1
        except series_config.SeriesConfigError as err:
            output.fprint("Error loading series: {}".format(err),
                          color=output.RED,
                          file=self.errfile)
            return 1

        file_name = test.path / self.LOG_PATHS[cmd_name]

        if not file_name.exists():
            output.fprint("Log file does not exist: {}".format(file_name),
                          color=output.RED,
                          file=self.errfile)
            return 1

        try:
            with file_name.open() as file:
                output.fprint(file.read(),
                              file=self.outfile,
                              width=None,
                              end='')
        except (IOError, OSError) as err:
            output.fprint("Could not read log file '{}': {}".format(
                file_name, err),
                          color=output.RED,
                          file=self.errfile)
            return 1

        return 0
Exemple #17
0
    def _get_tests(self, pav_cfg, args, mb_tracker, build_only=False,
                   local_builds_only=False):
        """Turn the test run arguments into actual TestRun objects.
        :param pav_cfg: The pavilion config object
        :param args: The run command arguments
        :param MultiBuildTracker mb_tracker: The build tracker.
        :param bool build_only: Whether to denote that we're only building
            these tests.
        :param bool local_builds_only: Only include tests that would be built
            locally.
        :return:
        :rtype: []
        """

        try:
            test_configs = cmd_utils.get_test_configs(pav_cfg=pav_cfg,
                                                      host=args.host,
                                                      test_files=args.files,
                                                      tests=args.tests,
                                                      modes=args.modes,
                                                      overrides=args.overrides,
                                                      outfile=self.outfile)

            # Remove non-local builds when doing only local builds.
            if build_only and local_builds_only:
                locally_built_tests = []
                for ptest in test_configs:
                    if ptest.config['build']['on_nodes'].lower() != 'true':
                        locally_built_tests.append(ptest)

                test_configs = locally_built_tests

            test_list = cmd_utils.configs_to_tests(
                pav_cfg=pav_cfg,
                proto_tests=test_configs,
                mb_tracker=mb_tracker,
                build_only=build_only,
                rebuild=args.rebuild,
                outfile=self.outfile,
            )

        except commands.CommandError as err:
            fprint(err, file=self.errfile, flush=True)
            return None

        return test_list
Exemple #18
0
    def _run_sub_command(self, pav_cfg, args):
        """Find and run the subcommand."""

        cmd_name = args.sub_cmd

        if cmd_name is None:
            output.fprint(
                "You must provide a sub command '{}'.".format(cmd_name),
                color=output.RED, file=self.errfile)
            self._parser.print_help(file=self.errfile)
            return errno.EINVAL

        if cmd_name not in self.sub_cmds:
            raise RuntimeError("Invalid sub-cmd '{}'".format(cmd_name))

        cmd_result = self.sub_cmds[cmd_name](self, pav_cfg, args)
        return 0 if cmd_result is None else cmd_result
Exemple #19
0
    def _series_cmd(self, pav_cfg, args):
        """Print info on each series."""

        series_attrs = {
            key: SeriesInfo.attr_doc(key)
            for key in SeriesInfo.list_attrs()
        }

        if args.show_fields:
            for field, doc in series_attrs.items():
                output.fprint(field, '-', doc, file=self.outfile)
            return 0

        fields, mode = self.get_fields(
            fields_arg=args.out_fields,
            mode_arg=args.output_mode,
            default_single_field='sid',
            default_fields=self.SERIES_LONG_FIELDS,
            avail_fields=list(series_attrs.keys()),
        )

        series_filter = filters.make_series_filter(complete=args.complete,
                                                   incomplete=args.incomplete,
                                                   newer_than=args.newer_than,
                                                   older_than=args.older_than,
                                                   sys_name=args.sys_name)

        series_order, ascending = filters.get_sort_opts(
            sort_name=args.sort_by, choices=filters.SERIES_SORT_FUNCS)

        series = dir_db.select(
            id_dir=pav_cfg.working_dir / 'series',
            filter_func=series_filter,
            transform=SeriesInfo,
            order_func=series_order,
            order_asc=ascending,
        )
        self.write_output(
            mode=mode,
            rows=[sinfo.attr_dict() for sinfo in series],
            fields=fields,
            header=args.header,
            vsep=args.vsep,
            wrap=args.wrap,
        )
Exemple #20
0
    def run(self, pav_cfg, args):
        """Gathers and prints the statuses from the specified test runs and/or
        series."""

        try:
            test_ids = cmd_utils.arg_filtered_tests(pav_cfg,
                                                    args,
                                                    verbose=self.errfile)
        except ValueError as err:
            output.fprint(err.args[0], color=output.RED, file=self.errfile)
            return errno.EINVAL

        statuses = status_utils.get_statuses(pav_cfg, test_ids)

        if args.summary:
            return self.print_summary(statuses)
        else:
            return status_utils.print_status(statuses, self.outfile, args.json)
Exemple #21
0
    def show_full_config(self, pav_cfg, cfg_name, conf_type):
        """Show the full config of a given host/mode."""

        file = resolver.TestConfigResolver(pav_cfg).find_config(
            conf_type, cfg_name)
        config_data = None
        if file is not None:
            with file.open() as config_file:
                config_data = file_format.TestConfigLoader()\
                              .load_raw(config_file)

        if config_data is not None:
            output.fprint(pprint.pformat(config_data, compact=True),
                          file=self.outfile)
        else:
            output.fprint("No {} config found for "
                          "{}.".format(conf_type.strip('s'), cfg_name))
            return errno.EINVAL
Exemple #22
0
    def save_dep_graph(self):
        """Write dependency tree and config in series dir

        :return:
        """

        series_dep_path = self.path/DEPENDENCY_FN
        series_dep_tmp = series_dep_path.with_suffix('.tmp')
        try:
            with PermissionsManager(series_dep_tmp,
                                    self.pav_cfg['shared_group'],
                                    self.pav_cfg['umask']), \
                  series_dep_tmp.open('w') as dep_file:
                dep_file.write(json.dumps(self.dep_graph))

            series_dep_path.with_suffix('.tmp').rename(series_dep_path)
        except OSError:
            fprint("Could not write dependency tree to file. Cancelling.",
                   color=output.RED, file=self.errfile)
Exemple #23
0
def configs_to_tests(pav_cfg,
                     proto_tests: List[test_config.ProtoTest],
                     mb_tracker: Union[MultiBuildTracker, None] = None,
                     build_only: bool = False,
                     rebuild: bool = False,
                     outfile: TextIO = None) -> List[TestRun]:
    """Convert configs/var_man tuples into actual
    tests.

    :param pav_cfg: The Pavilion config
    :param proto_tests: A list of test configs.
    :param mb_tracker: The build tracker.
    :param build_only: Whether to only build these tests.
    :param rebuild: After figuring out what build to use, rebuild it.
    :param outfile: Output file for printing messages
    """

    test_list = []
    progress = 0
    tot_tests = len(proto_tests)

    for ptest in proto_tests:
        try:
            test_list.append(
                TestRun(pav_cfg=pav_cfg,
                        config=ptest.config,
                        var_man=ptest.var_man,
                        build_tracker=mb_tracker,
                        build_only=build_only,
                        rebuild=rebuild))
            progress += 1.0 / tot_tests
            if outfile is not None:
                output.fprint("Creating Test Runs: {:.0%}".format(progress),
                              file=outfile,
                              end='\r')
        except (TestRunError, TestConfigError) as err:
            raise commands.CommandError(err.args[0])

    if outfile is not None:
        output.fprint('', file=outfile)

    return test_list
Exemple #24
0
def get_tests(pav_cfg, args, errfile):
    """
    Gets the tests depending on arguments.

:param pav_cfg: The pavilion config
:param argparse namespace args: The tests via command line args.
:param errfile: stream to output errors as needed
:return: List of test objects
    """

    if not args.tests:
        # Get the last series ran by this user
        series_id = series.TestSeries.load_user_series_id(pav_cfg)
        if series_id is not None:
            args.tests.append(series_id)
        else:
            raise commands.CommandError(
                "No tests specified and no last series was found."
            )

    test_list = []

    for test_id in args.tests:
        # Series
        if test_id.startswith('s'):
            try:
                test_list.extend(series.TestSeries.from_id(pav_cfg,
                                                           test_id).tests)
            except series.TestSeriesError as err:
                output.fprint(
                    "Suite {} could not be found.\n{}"
                    .format(test_id, err),
                    file=errfile,
                    color=output.RED
                )
                continue
        # Test
        else:
            test_list.append(test_id)

    test_list = list(map(int, test_list))
    return test_list
Exemple #25
0
    def _result_parsers_cmd(self, _, args):
        """Show all the result parsers."""

        if args.config:
            try:
                res_plugin = parsers.get_plugin(args.config)
            except pavilion.result.base.ResultError:
                output.fprint("Invalid result parser '{}'.".format(
                    args.config),
                              color=output.RED)
                return errno.EINVAL

            config_items = res_plugin.get_config_items()

            class Loader(yaml_config.YamlConfigLoader):
                """Loader for just a result parser's config."""
                ELEMENTS = config_items

            Loader().dump(self.outfile)

        else:

            rps = []
            for rp_name in parsers.list_plugins():
                res_plugin = parsers.get_plugin(rp_name)
                desc = " ".join(str(res_plugin.__doc__).split())
                rps.append({
                    'name': rp_name,
                    'description': desc,
                    'path': res_plugin.path
                })

            fields = ['name', 'description']

            if args.verbose:
                fields.append('path')

            output.draw_table(self.outfile,
                              field_info={},
                              fields=fields,
                              rows=rps,
                              title="Available Result Parsers")
Exemple #26
0
    def run(self, pav_cfg, args):
        """Resolve the test configurations into individual tests and assign to
        schedulers. Have those schedulers kick off jobs to run the individual
        tests themselves.
        :param err_file: """

        overrides = {}
        for ovr in args.overrides:
            if '=' not in ovr:
                fprint(
                    "Invalid override value. Must be in the form: "
                    "<key>=<value>. Ex. -c run.modules=['gcc'] ",
                    file=self.errfile)
                return errno.EINVAL

            key, value = ovr.split('=', 1)
            overrides[key] = value

        tests = [args.test]

        self.logger.debug("Finding Configs")

        sys_vars = system_variables.get_vars(True)

        try:
            configs = self._get_test_configs(
                pav_cfg=pav_cfg,
                host=args.host,
                test_files=[],
                tests=tests,
                modes=args.modes,
                overrides=overrides,
                sys_vars=sys_vars,
            )
        except commands.CommandError as err:
            fprint(err, file=self.errfile, color=output.RED)
            return errno.EINVAL

        configs = sum(configs.values(), [])

        for config in configs:
            pprint.pprint(config, stream=self.outfile)  # ext-print: ignore
Exemple #27
0
    def check_result_parsers(self, tests):
        """Make sure the result parsers for each test are ok."""

        rp_errors = []
        for test in tests:

            # Make sure the result parsers have reasonable arguments.
            try:
                result_parsers.check_args(test.config['results'])
            except TestRunError as err:
                rp_errors.append(str(err))

        if rp_errors:
            fprint("Result Parser configurations had errors:",
                   file=self.errfile, color=output.RED)
            for msg in rp_errors:
                fprint(msg, bullet=' - ', file=self.errfile)
            return errno.EINVAL

        return 0
Exemple #28
0
    def _result_prune_cmd(self, pav_cfg, args):
        """Remove matching results from the results log."""

        try:
            pruned = result.prune_result_log(pav_cfg.result_log, args.ids)
        except pavilion.result.common.ResultError as err:
            output.fprint(err.args[0], file=self.errfile, color=output.RED)
            return errno.EACCES

        if args.json:
            output.json_dump(
                obj=pruned,
                file=self.outfile,
            )
        else:
            output.draw_table(
                outfile=self.outfile,
                fields=['id', 'uuid', 'name', 'result', 'created'],
                rows=pruned,
                title="Pruned Results")
Exemple #29
0
    def run(self, pav_cfg, args):

        # Zero is given as the default when running test scripts outside of
        # Pavilion.
        if args.test == 0:
            return 0

        try:
            test = TestRun.load(pav_cfg, args.test)
        except (TestRunError, TestRunNotFoundError) as err:
            output.fprint(
                "Test {} could not be opened.\n{}".format(args.test, err),
                color=output.RED,
                file=self.errfile,
            )
            return errno.EINVAL

        test.status.set(args.state, args.note)

        return 0
Exemple #30
0
    def check_result_format(self, tests):
        """Make sure the result parsers for each test are ok."""

        rp_errors = []
        for test in tests:

            # Make sure the result parsers have reasonable arguments.
            try:
                result.check_config(test.config['result_parse'],
                                    test.config['result_evaluate'])
            except result.ResultError as err:
                rp_errors.append((test, str(err)))

        if rp_errors:
            fprint("Result Parser configurations had errors:",
                   file=self.errfile, color=output.RED)
            for test, msg in rp_errors:
                fprint(test.name, '-', msg, file=self.errfile)
            return errno.EINVAL

        return 0