Beispiel #1
0
def print_status_history(pav_cfg: dict,
                         test_id: str,
                         outfile: TextIO,
                         json: bool = False):
    """Print the status history for a given test object.

    :param pav_cfg: Base pavilion configuration.
    :param test_id: Single test ID.
    :param outfile: Stream to which the status history should be printed.
    :param json: Whether the output should be a JSON object or not
    :return: 0 for success.
    """

    test = TestRun.load(pav_cfg, int(test_id))
    status_history = status_history_from_test_obj(test)

    ret_val = 1
    for status in status_history:
        if status['note'] != "Test not found.":
            ret_val = 0
    if json:
        json_data = {'status_history': status_history}
        output.json_dump(json_data, outfile)
    else:
        fields = ['state', 'time', 'note']
        output.draw_table(
            outfile=outfile,
            field_info={'time': {
                'transform': output.get_relative_timestamp
            }},
            fields=fields,
            rows=status_history,
            title='Test {} Status History ({})'.format(test.id, test.name))

    return ret_val
Beispiel #2
0
def print_status(statuses, outfile, json=False):
    """Prints the statuses provided in the statuses parameter.

:param list statuses: list of dictionary objects containing the test
                      ID, name, state, time of state update, and note
                      associated with that state.
:param bool json: Whether state should be printed as a JSON object or
                  not.
:param stream outfile: Stream to which the statuses should be printed.
:return: success or failure.
:rtype: int
"""

    ret_val = 1
    for stat in statuses:
        if stat['note'] != "Test not found.":
            ret_val = 0
    if json:
        json_data = {'statuses': statuses}
        output.json_dump(json_data, outfile)
    else:
        fields = ['test_id', 'name', 'state', 'time', 'note']
        output.draw_table(
            outfile=outfile,
            field_info={
                'time': {'transform': output.get_relative_timestamp}
            },
            fields=fields,
            rows=statuses,
            title='Test statuses')

    return ret_val
Beispiel #3
0
    def run(self, pav_cfg, args):
        """Print the test results in a variety of formats."""

        test_ids = self._get_tests(pav_cfg, args.tests, args.full)

        tests = []
        for id_ in test_ids:
            try:
                tests.append(TestRun.load(pav_cfg, id_))
            except TestRunError as err:
                self.logger.warning("Could not load test %s - %s", id_, err)
            except TestRunNotFoundError as err:
                self.logger.warning("Could not find test %s - %s", id_, err)

        results = [test.results for test in tests]

        all_keys = set()
        for res in results:
            all_keys = all_keys.union(res.keys())

        all_keys = list(all_keys.difference(['result', 'name', 'id']))
        # Sort the keys by the size of the data
        # all_keys.sort(key=lambda k: max([len(res[k]) for res in results]))
        all_keys.sort(key=lambda k: max([len(r) for r in results]))

        if args.json:
            output.json_dump(results, self.outfile)
            return 0

        if args.full:
            try:
                pprint.pprint(results)  # ext-print: ignore
            except OSError:
                # It's ok if this fails. Generally means we're piping to
                # another command.
                pass
            return 0
        else:
            fields = ['name', 'id', 'result'] + sum(args.key, list())

        output.draw_table(outfile=self.outfile,
                          field_info={},
                          fields=fields,
                          rows=results,
                          title="Test Results")
Beispiel #4
0
    def run(self, pav_cfg, args):

        test_ids = self._get_tests(pav_cfg, args.tests, args.full)

        tests = []
        for id_ in test_ids:
            try:
                tests.append(TestRun.load(pav_cfg, id_))
            except TestRunError as err:
                self.logger.warning("Could not load test %s - %s", id_, err)
            except TestRunNotFoundError as err:
                self.logger.warning("Could not find test %s - %s", id_, err)

        results = []
        for test in tests:
            res = test.load_results()
            if res is None:
                res = {'name': test.name, 'id': test.id, 'result': ''}

            results.append(res)

        all_keys = set()
        for res in results:
            all_keys = all_keys.union(res.keys())

        all_keys = list(all_keys.difference(['result', 'name', 'id']))
        # Sort the keys by the size of the data
        # all_keys.sort(key=lambda k: max([len(res[k]) for res in results]))
        all_keys.sort(key=lambda k: max([len(res) for res in results]))

        if args.json:
            output.json_dump(results, self.outfile)
            return 0

        if args.full:
            pprint.pprint(results)  # ext-print: ignore
            return 0
        else:
            fields = ['name', 'id', 'result'] + sum(args.key, list())

        output.draw_table(outfile=self.outfile,
                          field_info={},
                          fields=fields,
                          rows=results,
                          title="Test Results")
Beispiel #5
0
    def _result_prune_cmd(self, pav_cfg, args):
        """Remove matching results from the results log."""

        try:
            pruned = result.prune_result_log(pav_cfg.result_log, args.ids)
        except pavilion.result.common.ResultError as err:
            output.fprint(err.args[0], file=self.errfile, color=output.RED)
            return errno.EACCES

        if args.json:
            output.json_dump(
                obj=pruned,
                file=self.outfile,
            )
        else:
            output.draw_table(
                outfile=self.outfile,
                fields=['id', 'uuid', 'name', 'result', 'created'],
                rows=pruned,
                title="Pruned Results")
Beispiel #6
0
    def _save_config(self):
        """Save the configuration for this test to the test config file."""

        config_path = self.path / 'config'

        # make lock
        lock_path = self.path / 'config.lockfile'
        config_lock = lockfile.LockFile(lock_path,
                                        group=self._pav_cfg.shared_group)

        try:
            config_lock.lock()
            with config_path.open('w') as json_file:
                output.json_dump(self.config, json_file)
        except (OSError, IOError) as err:
            raise TestRunError(
                "Could not save TestRun ({}) config at {}: {}".format(
                    self.name, self.path, err))
        except TypeError as err:
            raise TestRunError("Invalid type in config for ({}): {}".format(
                self.name, err))
        finally:
            config_lock.unlock()
Beispiel #7
0
    def _save_config(self):
        """Save the configuration for this test to the test config file."""

        config_path = self.path / 'config'

        # make lock
        tmp_path = config_path.with_suffix('.tmp')

        try:
            with PermissionsManager(config_path, self.group, self.umask), \
                    tmp_path.open('w') as json_file:
                output.json_dump(self.config, json_file)
                try:
                    config_path.unlink()
                except OSError:
                    pass
                tmp_path.rename(config_path)
        except (OSError, IOError) as err:
            raise TestRunError(
                "Could not save TestRun ({}) config at {}: {}".format(
                    self.name, self.path, err))
        except TypeError as err:
            raise TestRunError("Invalid type in config for ({}): {}".format(
                self.name, err))
Beispiel #8
0
    def run(self, pav_cfg, args):
        """Print the test results in a variety of formats."""

        test_ids = cmd_utils.arg_filtered_tests(pav_cfg,
                                                args,
                                                verbose=self.errfile)

        tests = []
        for id_ in test_ids:
            try:
                tests.append(TestRun.load(pav_cfg, id_))
            except TestRunError as err:
                self.logger.warning("Could not load test %s - %s", id_, err)
            except TestRunNotFoundError as err:
                self.logger.warning("Could not find test %s - %s", id_, err)

        log_file = None
        if args.show_log and args.re_run:
            log_file = io.StringIO()

        if args.re_run:
            if not self.update_results(pav_cfg, tests, log_file):
                return errno.EINVAL

        if args.save:
            if not self.update_results(pav_cfg, tests, log_file, save=True):
                return errno.EINVAL

        if args.json or args.full:
            if len(tests) > 1:
                results = {test.name: test.results for test in tests}
            elif len(tests) == 1:
                results = tests[0].results
            else:
                output.fprint("Could not find any matching tests.",
                              color=output.RED,
                              file=self.outfile)
                return errno.EINVAL

            width = shutil.get_terminal_size().columns or 80

            try:
                if args.json:
                    output.json_dump(results, self.outfile)
                else:
                    pprint.pprint(
                        results,  # ext-print: ignore
                        stream=self.outfile,
                        width=width,
                        compact=True)
            except OSError:
                # It's ok if this fails. Generally means we're piping to
                # another command.
                pass

        else:
            fields = self.BASE_FIELDS + args.key
            results = [test.results for test in tests]

            output.draw_table(outfile=self.outfile,
                              field_info={
                                  'started': {
                                      'transform':
                                      output.get_relative_timestamp
                                  },
                                  'finished': {
                                      'transform':
                                      output.get_relative_timestamp
                                  },
                              },
                              fields=fields,
                              rows=results,
                              title="Test Results")

        if args.show_log:
            if log_file is not None:
                output.fprint(log_file.getvalue(),
                              file=self.outfile,
                              color=output.GREY)
            else:
                for test in tests:
                    output.fprint("\nResult logs for test {}\n".format(
                        test.name),
                                  file=self.outfile)
                    if test.results_log.exists():
                        with test.results_log.open() as log_file:
                            output.fprint(log_file.read(),
                                          color=output.GREY,
                                          file=self.outfile)
                    else:
                        output.fprint("<log file missing>",
                                      file=self.outfile,
                                      color=output.YELLOW)

        return 0
Beispiel #9
0
    def run(self, pav_cfg, args):
        """Print the test results in a variety of formats."""

        test_ids = self._get_tests(pav_cfg, args.tests)

        tests = []
        for id_ in test_ids:
            try:
                tests.append(TestRun.load(pav_cfg, id_))
            except TestRunError as err:
                self.logger.warning("Could not load test %s - %s", id_, err)
            except TestRunNotFoundError as err:
                self.logger.warning("Could not find test %s - %s", id_, err)

        if args.re_run:
            if not self.update_results(pav_cfg, tests):
                return errno.EINVAL

        if args.json or args.full:
            if len(tests) > 1:
                results = {test.name: test.results for test in tests}
            else:
                # There should always be at least one test
                results = tests[0].results

            width = shutil.get_terminal_size().columns

            try:
                if args.json:
                    output.json_dump(results, self.outfile)
                else:
                    pprint.pprint(results,  # ext-print: ignore
                                  stream=self.outfile, width=width,
                                  compact=True)
            except OSError:
                # It's ok if this fails. Generally means we're piping to
                # another command.
                pass

            return 0

        else:
            fields = self.BASE_FIELDS + args.key
            results = [test.results for test in tests]

        def fix_timestamp(ts_str: str) -> str:
            """Read the timestamp text and get a minimized, formatted value."""
            try:
                when = datetime.datetime.strptime(ts_str,
                                                  '%Y-%m-%d %H:%M:%S.%f')
            except ValueError:
                return ''

            return output.get_relative_timestamp(when)

        output.draw_table(
            outfile=self.outfile,
            field_info={
                'started': {'transform': fix_timestamp},
                'finished': {'transform': fix_timestamp},
            },
            fields=fields,
            rows=results,
            title="Test Results"
        )

        return 0