Esempio n. 1
0
    def test_forced_parser_defaults(self):
        """Make sure we honor the result parser's FORCED_DEFAULTS."""

        cfg = self._quick_test_cfg()
        cfg['result_parse'] = {
            'constant': {
                'foo': {
                    'const': 'bar',
                    'preceded_by': 'unsettable',
                }
            }
        }

        with self.assertRaises(pavilion.result.common.ResultError):
            result.check_config(cfg['result_parse'], {})

        test = self._quick_test(cfg, 'split_test')
        test.run()
        results = test.gather_results(0)

        self.assertNotIn('foo', results)
        self.assertTrue(results[result.RESULT_ERRORS][0].endswith(
            "This parser requires that you not set the 'preceded_by' key, as "
            "the default value is the only valid option."
        ))
Esempio n. 2
0
    def check_result_format(self, tests):
        """Make sure the result parsers for each test are ok."""

        rp_errors = []
        for test in tests:

            # Make sure the result parsers have reasonable arguments.
            try:
                result.check_config(test.config['result_parse'],
                                    test.config['result_evaluate'])
            except result.ResultError as err:
                rp_errors.append((test, str(err)))

        if rp_errors:
            fprint("Result Parser configurations had errors:",
                   file=self.errfile, color=output.RED)
            for test, msg in rp_errors:
                fprint(test.name, '-', msg, file=self.errfile)
            return errno.EINVAL

        return 0
Esempio n. 3
0
    def update_results(self,
                       pav_cfg: dict,
                       tests: List[TestRun],
                       log_file: IO[str],
                       save: bool = False) -> bool:
        """Update each of the given tests with the result section from the
        current version of their configs. Then rerun result processing and
        update the results in the test object (but change nothing on disk).

        :param pav_cfg: The pavilion config.
        :param tests: A list of test objects to update.
        :param log_file: The logfile to log results to. May be None.
        :param save: Whether to save the updated results to the test's result
                     log. It will not update the general result log.
        :returns: True if successful, False otherwise. Will handle
            printing of any failure related errors.
        """

        reslvr = resolver.TestConfigResolver(pav_cfg)

        for test in tests:

            # Re-load the raw config using the saved name, host, and modes
            # of the original test.
            try:
                test_name = '.'.join(
                    (test.config['suite'], test.config['name']))

                configs = reslvr.load_raw_configs(
                    tests=[test_name],
                    host=test.config['host'],
                    modes=test.config['modes'],
                )
            except resolver.TestConfigError as err:
                output.fprint("Test '{}' could not be found: {}".format(
                    test.name, err.args[0]),
                              color=output.RED,
                              file=self.errfile)
                return False

            # These conditions guard against unexpected results from
            # load_raw_configs. They may not be possible.
            if not configs:
                output.fprint(
                    "No configs found for test '{}'. Skipping update.".format(
                        test.name),
                    color=output.YELLOW,
                    file=self.errfile)
                continue
            elif len(configs) > 1:
                output.fprint("Test '{}' somehow matched multiple configs."
                              "Skipping update.".format(test.name),
                              color=output.YELLOW,
                              file=self.errfile)
                continue

            cfg = configs[0]
            updates = {}

            for section in 'result_parse', 'result_evaluate':
                # Try to resolve the updated result section of the config using
                # the original variable values.
                try:
                    updates[section] = reslvr.resolve_section_values(
                        component=cfg[section],
                        var_man=test.var_man,
                    )
                except resolver.TestConfigError as err:
                    output.fprint(
                        "Test '{}' had a {} section that could not be "
                        "resolved with it's original variables: {}".format(
                            test.name, section, err.args[0]),
                        file=self.errfile,
                        color=output.RED)
                    return False
                except RuntimeError as err:
                    output.fprint(
                        "Unexpected error updating {} section for test "
                        "'{}': {}".format(section, test.name, err.args[0]),
                        color=output.RED,
                        file=self.errfile)
                    return False

            # Set the test's result section to the newly resolved one.
            test.config['result_parse'] = updates['result_parse']
            test.config['result_evaluate'] = updates['result_evaluate']

            try:
                result.check_config(test.config['result_parse'],
                                    test.config['result_evaluate'])

            except result.ResultError as err:
                output.fprint(
                    "Error found in results configuration: {}".format(
                        err.args[0]),
                    color=output.RED,
                    file=self.errfile)
                return False

            if save:
                test.builder.tracker.update(state=STATES.RESULTS,
                                            note="Re-running results.")

            # The new results will be attached to the test (but not saved).
            results = test.gather_results(test.results.get('return_value', 1),
                                          regather=True if not save else False,
                                          log_file=log_file)

            if save:
                test.save_results(results)
                with test.results_log.open('a') as log_file:
                    log_file.write(
                        "Results were re-ran and saved on {}\n".format(
                            datetime.datetime.today().strftime('%m-%d-%Y')))
                    log_file.write("See results.json for updated results.\n")
                test.builder.tracker.update(
                    state=STATES.COMPLETE,
                    note="The test completed with result: {}".format(
                        results["result"]))

        return True
Esempio n. 4
0
    def _run(self, pav_cfg, test, sched):
        """Run an already prepped test in the current environment.
        :param pav_cfg: The pavilion configuration object.
        :param TestRun test: The test to run
        :param sched: The scheduler we're running under.
        :return:
        """

        # Optionally wait on other tests running under the same scheduler.
        # This depends on the scheduler and the test configuration.
        lock = sched.lock_concurrency(pav_cfg, test)

        try:
            run_result = test.run()
        except TestRunError as err:
            test.status.set(STATES.RUN_ERROR, err)
            return 1
        except TimeoutError:
            return 1
        except Exception:
            test.status.set(
                STATES.RUN_ERROR,
                "Unknown error while running test. Refer to the kickoff log.")
            raise
        finally:
            sched.unlock_concurrency(lock)

        try:
            # Make sure the result parsers have reasonable arguments.
            # We check here because the parser code itself will likely assume
            # the args are valid form _check_args, but those might not be
            # check-able before kickoff due to deferred variables.
            try:
                result.check_config(test.config['result_parse'],
                                    test.config['result_evaluate'])
            except result.ResultError as err:
                test.status.set(
                    STATES.RESULTS_ERROR,
                    "Error checking result parser configs: {}".format(
                        err.args[0]))
                return 1

            with PermissionsManager(test.results_log,
                                    group=test.group, umask=test.umask), \
                    test.results_log.open('w') as log_file:
                results = test.gather_results(run_result, log_file=log_file)

        except Exception as err:
            self.logger.error("Unexpected error gathering results: \n%s",
                              traceback.format_exc())
            test.status.set(
                STATES.RESULTS_ERROR,
                "Unexpected error parsing results: {}. (This is a "
                "bug, you should report it.)"
                "See 'pav log kickoff {}' for the full error.".format(
                    err, test.id))
            raise

        try:
            test.save_results(results)
        except Exception:
            test.status.set(
                STATES.RESULTS_ERROR,
                "Unknown error while saving results. Refer to the kickoff log."
            )
            raise

        try:
            test.status.set(
                STATES.COMPLETE, "The test completed with result: {}".format(
                    results.get('result', '<unknown>')))
        except Exception:
            test.status.set(
                STATES.UNKNOWN,
                "Unknown error while setting test completion. Refer to the "
                "kickoff log.")
            raise
Esempio n. 5
0
    def update_results(self, pav_cfg: dict, tests: List[TestRun]) -> bool:
        """Update each of the given tests with the result section from the
        current version of their configs. Then rerun result processing and
        update the results in the test object (but change nothing on disk).

        :param pav_cfg: The pavilion config.
        :param tests: A list of test objects to update.
        :returns: True if successful, False otherwise. Will handle
            printing of any failure related errors.
        """

        reslvr = resolver.TestConfigResolver(pav_cfg)

        for test in tests:

            # Re-load the raw config using the saved name, host, and modes
            # of the original test.
            try:
                test_name = '.'.join((test.config['suite'],
                                      test.config['name']))

                configs = reslvr.load_raw_configs(
                    tests=[test_name],
                    host=test.config['host'],
                    modes=test.config['modes'],
                )
            except resolver.TestConfigError as err:
                output.fprint(
                    "Test '{}' could not be found: {}"
                    .format(test.name, err.args[0]),
                    color=output.RED, file=self.errfile)
                return False

            # These conditions guard against unexpected results from
            # load_raw_configs. They may not be possible.
            if not configs:
                output.fprint(
                    "No configs found for test '{}'. Skipping update."
                    .format(test.name), color=output.YELLOW, file=self.errfile)
                continue
            elif len(configs) > 1:
                output.fprint(
                    "Test '{}' somehow matched multiple configs."
                    "Skipping update.".format(test.name),
                    color=output.YELLOW, file=self.errfile)
                continue

            cfg = configs[0]
            updates = {}

            for section in 'result_parse', 'result_evaluate':
                # Try to resolve the updated result section of the config using
                # the original variable values.
                try:
                    updates[section] = reslvr.resolve_section_values(
                        component=cfg[section],
                        var_man=test.var_man,
                    )
                except resolver.TestConfigError as err:
                    output.fprint(
                        "Test '{}' had a {} section that could not be "
                        "resolved with it's original variables: {}"
                        .format(test.name, section, err.args[0])
                    )
                    return False
                except RuntimeError as err:
                    output.fprint(
                        "Unexpected error updating {} section for test "
                        "'{}': {}".format(section, test.name, err.args[0]),
                        color=output.RED, file=self.errfile)
                    return False

            # Set the test's result section to the newly resolved one.
            test.config['result_parse'] = updates['result_parse']
            test.config['result_evaluate'] = updates['result_evaluate']

            try:
                check_config(test.config['result_parse'],
                             test.config['result_evaluate'])
            except TestRunError as err:
                output.fprint(
                    "Error found in results configuration: {}"
                    .format(err.args[0]))
                return False

            # The new results will be attached to the test (but not saved).
            test.gather_results(test.results['return_value'], regather=True)

        return True