Esempio n. 1
0
    def check_result_parsers(self, tests):
        """Make sure the result parsers for each test are ok."""

        rp_errors = []
        for test in tests:

            # Make sure the result parsers have reasonable arguments.
            try:
                result_parsers.check_args(test.config['results'])
            except TestRunError as err:
                rp_errors.append(str(err))

        if rp_errors:
            fprint("Result Parser configurations had errors:",
                   file=self.errfile, color=output.RED)
            for msg in rp_errors:
                fprint(msg, bullet=' - ', file=self.errfile)
            return errno.EINVAL

        return 0
Esempio n. 2
0
    def test_check_args(self):
        plugins.initialize_plugins(self.pav_cfg)

        # Make sure we can check arguments.
        test_cfg = {
            'results': {
                'regex': [
                    {'key': 'ok', 'regex': r'foo'},
                ]
            }
        }
        test = self._quick_test(test_cfg, 'check_args_test')
        result_parsers.check_args(test.config['results'])

        # Make sure duplicate keys aren't allowed.
        test_cfg = {
            'results': {
                'regex': [
                    {'key': 'repeated', 'regex': r'foo'},
                    {'key': 'repeated', 'regex': r'foo'},
                ]
            }
        }
        test = self._quick_test(test_cfg, 'check_args_test')
        with self.assertRaises(result_parsers.ResultParserError):
            result_parsers.check_args(test.config['results'])

        # Make sure we handle bad key names.
        test_cfg = {
            'results': {
                'regex': [
                    {'key': '#!@123948aa', 'regex': r'foo'},
                ]
            }
        }
        with self.assertRaises(ValueError):
            self._quick_test(test_cfg, 'check_args_test')

        # Make sure we handle missing the 'key' attribute as expected.
        test_cfg = {
            'results': {
                'regex': [
                    {'regex': r'foo'},
                ]
            }
        }
        with self.assertRaises(ValueError):
            self._quick_test(test_cfg, 'check_args_test')

        # Make sure reserved keys aren't allowed.
        test_cfg = {
            'results': {
                'regex': [
                    {'key': 'started', 'regex': r'foo'},
                ]
            }
        }
        test = self._quick_test(test_cfg, 'check_args_test')
        with self.assertRaises(result_parsers.ResultParserError):
            result_parsers.check_args(test.config['results'])

        # Missing a key for the parser plugin
        test_cfg = {
            'results': {
                'regex': [
                    {'key': 'nope'},
                ]
            }
        }
        with self.assertRaises(yc.RequiredError):
            self._quick_test(test_cfg, 'check_args_test')

        test_cfg = {
            'results': {
                'regex': [
                    {'key': 'test', 'regex': '[[['},
                ]
            }
        }
        test = self._quick_test(test_cfg, 'check_args_test')
        with self.assertRaises(result_parsers.ResultParserError):
            result_parsers.check_args(test.config['results'])

        test_cfg = {
            'results': {
                'regex': [
                    {
                        'key': 'test',
                        'regex': '^User:(.*)$',
                        'expected': ['12:11']
                    },
                ]
            }
        }
        test = self._quick_test(test_cfg, 'check_args_test')
        with self.assertRaises(result_parsers.ResultParserError):
            result_parsers.check_args(test.config['results'])

        test_cfg = {
            'results': {
                'regex': [
                    {
                        'key': 'test',
                        'regex': '^User:(.*)$',
                        'expected': ['-11:-12']
                    },
                ]
            }
        }
        test = self._quick_test(test_cfg, 'check_args_test')
        with self.assertRaises(result_parsers.ResultParserError):
            result_parsers.check_args(test.config['results'])

        test_cfg = {
            'results': {
                'regex': [
                    {
                        'key': 'test',
                        'regex': '^User:(.*)$',
                        'expected': ['11:12:13']
                    },
                ]
            }
        }
        test = self._quick_test(test_cfg, 'check_args_test')
        with self.assertRaises(result_parsers.ResultParserError):
            result_parsers.check_args(test.config['results'])

        test_cfg = {
            'results': {
                'regex': [
                    {
                        'key': 'test',
                        'regex': '^User:(.*)$',
                        'expected': ['11:words']
                    },
                ]
            }
        }
        test = self._quick_test(test_cfg, 'check_args_test')
        with self.assertRaises(result_parsers.ResultParserError):
            result_parsers.check_args(test.config['results'])

        test_cfg = {
            'results': {
                'regex': [
                    {
                        'key': 'test',
                        'regex': 'res',
                        'threshold': '-5',
                    },
                ]
            }
        }
        test = self._quick_test(test_cfg, 'check_args_test')
        with self.assertRaises(result_parsers.ResultParserError):
            result_parsers.check_args(test.config['results'])

        test_cfg = {
            'results': {
                'regex': [
                    {
                        'key': 'test',
                        'regex': 'res',
                        'threshold': 'A',
                    },
                ]
            }
        }
        test = self._quick_test(test_cfg, 'check_args_test')
        with self.assertRaises(result_parsers.ResultParserError):
            result_parsers.check_args(test.config['results'])

        test_cfg = {
            'results': {
                'regex': [
                    {
                        'key': 'test',
                        'regex': 'res',
                        'threshold': 'A',
                        'expected': '10:12',
                    },
                ]
            }
        }
        test = self._quick_test(test_cfg, 'check_args_test')
        with self.assertRaises(result_parsers.ResultParserError):
            result_parsers.check_args(test.config['results'])

        plugins._reset_plugins()
Esempio n. 3
0
    def _run(self, pav_cfg, test):
        """Run an already prepped test in the current environment.
        """

        try:
            sched = schedulers.get_plugin(test.scheduler)
        except Exception:
            test.status.set(STATES.BUILD_ERROR,
                            "Unknown error getting the scheduler. Refer to "
                            "the kickoff log.")
            raise

        # Re-add var sets that may have had deferred variables.
        try:
            var_man = VariableSetManager()
            var_man.add_var_set('sys', system_variables.get_vars(defer=False))
            sched_config = test.config[test.scheduler]
            var_man.add_var_set('sched', sched.get_vars(sched_config))
        except Exception:
            test.status.set(STATES.RUN_ERROR,
                            "Unknown error getting pavilion variables at "
                            "run time.")
            raise

        try:
            test.finalize(var_man)
        except Exception:
            test.status.set(STATES.RUN_ERROR,
                            "Unknown error finalizing test.")
            raise

        try:
            if test.config['build']['on_nodes'] in ['true', 'True']:
                if not test.build():
                    self.logger.warning(
                        "Test {t.id} failed to build:"
                    )
        except Exception:
            test.status.set(STATES.BUILD_ERROR,
                            "Unknown build error. Refer to the kickoff log.")
            raise

        # Optionally wait on other tests running under the same scheduler.
        # This depends on the scheduler and the test configuration.
        lock = sched.lock_concurrency(pav_cfg, test)

        try:
            run_result = test.run()
        except TestRunError as err:
            test.status.set(STATES.RUN_ERROR, err)
            return 1
        except TimeoutError:
            return 1
        except Exception:
            test.status.set(
                STATES.RUN_ERROR,
                "Unknown error while running test. Refer to the kickoff log.")
            raise
        finally:
            sched.unlock_concurrency(lock)

        try:
            rp_errors = []
            # Make sure the result parsers have reasonable arguments.
            # We check here because the parser code itself will likely assume
            # the args are valid form _check_args, but those might not be
            # checkable before kickoff due to deferred variables.
            try:
                result_parsers.check_args(test.config['results'])
            except TestRunError as err:
                rp_errors.append(str(err))

            if rp_errors:
                for msg in rp_errors:
                    test.status.set(STATES.RESULTS_ERROR, msg)
                test.set_run_complete()
                return 1

            results = test.gather_results(run_result)
        except result_parsers.ResultParserError as err:
            self.logger.error("Unexpected error gathering results: %s", err)
            test.status.set(STATES.RESULTS_ERROR,
                            "Error parsing results: {}".format(err))
            return 1

        try:
            test.save_results(results)

            result_logger = logging.getLogger('results')
            result_logger.info(output.json_dumps(results))
        except Exception:
            test.status.set(
                STATES.RESULTS_ERROR,
                "Unknown error while saving results. Refer to the kickoff log.")
            raise

        try:
            test.status.set(STATES.COMPLETE,
                            "The test completed with result: {}"
                            .format(results.get('result', '<unknown>')))
        except Exception:
            test.status.set(
                STATES.UNKNOWN,
                "Unknown error while setting test completion. Refer to the "
                "kickoff log.")
            raise
Esempio n. 4
0
    def run(self, pav_cfg, args, out_file=sys.stdout, err_file=sys.stderr):
        """Resolve the test configurations into individual tests and assign to
        schedulers. Have those schedulers kick off jobs to run the individual
        tests themselves.
        :param pav_cfg: The pavilion configuration.
        :param args: The parsed command line argument object.
        :param out_file: The file object to output to (stdout)
        :param err_file: The file object to output errors to (stderr)
        """

        # 1. Resolve the test configs
        #   - Get sched vars from scheduler.
        #   - Compile variables.
        #

        overrides = {}
        for ovr in args.overrides:
            if '=' not in ovr:
                fprint(
                    "Invalid override value. Must be in the form: "
                    "<key>=<value>. Ex. -c run.modules=['gcc'] ",
                    file=self.errfile)
                return errno.EINVAL

            key, value = ovr.split('=', 1)
            overrides[key] = value

        sys_vars = system_variables.get_vars(True)

        try:
            configs_by_sched = self._get_tests(
                pav_cfg=pav_cfg,
                host=args.host,
                test_files=args.files,
                tests=args.tests,
                modes=args.modes,
                overrides=overrides,
                sys_vars=sys_vars,
            )

            tests_by_sched = self._configs_to_tests(
                pav_cfg=pav_cfg,
                sys_vars=sys_vars,
                configs_by_sched=configs_by_sched,
            )

        except commands.CommandError as err:
            fprint(err, file=self.errfile)
            return errno.EINVAL

        all_tests = sum(tests_by_sched.values(), [])

        if not all_tests:
            fprint("You must specify at least one test.", file=self.errfile)
            return errno.EINVAL

        series = TestSeries(pav_cfg, all_tests)

        rp_errors = []
        for test in all_tests:
            # Make sure the result parsers have reasonable arguments.
            try:
                result_parsers.check_args(test.config['results'])
            except PavTestError as err:
                rp_errors.append(str(err))

        if rp_errors:
            fprint("Result Parser configurations had errors:",
                   file=self.errfile,
                   color=utils.RED)
            for msg in rp_errors:
                fprint(msg, bullet=' - ', file=self.errfile)
            return errno.EINVAL

        # Building any tests that specify that they should be built before
        for test in all_tests:
            if test.config['build']['on_nodes'] not in ['true', 'True']:
                if not test.build():
                    for oth_test in all_tests:
                        if oth_test.build_hash != test.build_hash:
                            oth_test.status.set(
                                STATES.BUILD_ERROR,
                                "Build cancelled because build {} failed.".
                                format(test.id))
                    fprint("Error building test: ",
                           file=self.errfile,
                           color=utils.RED)
                    fprint("status {status.state} - {status.note}".format(
                        status=test.status.current()),
                           file=self.errfile)
                    fprint(
                        "For more information, run 'pav log build {}'".format(
                            test.id),
                        file=self.errfile)
                    return errno.EINVAL

        for sched_name, tests in tests_by_sched.items():
            sched = schedulers.get_scheduler_plugin(sched_name)

            try:
                sched.schedule_tests(pav_cfg, tests)
            except schedulers.SchedulerPluginError as err:
                fprint('Error scheduling tests:',
                       file=self.errfile,
                       color=utils.RED)
                fprint(err, bullet='  ', file=self.errfile)
                fprint('Cancelling already kicked off tests.',
                       file=self.errfile)
                self._cancel_all(tests_by_sched)

        # Tests should all be scheduled now, and have the SCHEDULED state
        # (at some point, at least). Wait until something isn't scheduled
        # anymore (either running or dead), or our timeout expires.
        wait_result = None
        if args.wait is not None:
            end_time = time.time() + args.wait
            while time.time() < end_time and wait_result is None:
                last_time = time.time()
                for sched_name, tests in tests_by_sched.items():
                    sched = schedulers.get_scheduler_plugin(sched_name)
                    for test in tests:
                        status = test.status.current()
                        if status == STATES.SCHEDULED:
                            status = sched.job_status(pav_cfg, test)

                        if status != STATES.SCHEDULED:
                            # The test has moved past the scheduled state.
                            wait_result = None
                            break

                        break

                if wait_result is None:
                    # Sleep at most SLEEP INTERVAL seconds, minus the time
                    # we spent checking our jobs.
                    time.sleep(self.SLEEP_INTERVAL - (time.time() - last_time))

        fprint("{} test{} started as test series {}.".format(
            len(all_tests), 's' if len(all_tests) > 1 else '', series.id),
               file=self.outfile,
               color=utils.GREEN)

        if args.status:
            tests = list(series.tests.keys())
            tests, _ = test_obj_from_id(pav_cfg, tests)
            return print_from_test_obj(pav_cfg, tests, self.outfile, args.json)

        return 0
Esempio n. 5
0
    def run(self, pav_cfg, args, out_file=sys.stdout, err_file=sys.stderr):
        """Load and run an already prepped test in the current environment.
        :param out_file:
        :param err_file:
        """

        try:
            test = PavTest.load(pav_cfg, args.test_id)
        except PavTestError as err:
            self.logger.error("Error loading test '%s': %s", args.test_id, err)
            raise

        try:
            if test.config['build']['on_nodes'] in ['true', 'True']:
                if not test.build():
                    self.logger.warning("Test {t.id} failed to build:")
        except Exception:
            test.status.set(STATES.BUILD_ERROR,
                            "Unknown build error. Refer to the kickoff log.")
            raise

        try:
            sched = schedulers.get_scheduler_plugin(test.scheduler)
        except Exception:
            test.status.set(
                STATES.BUILD_ERROR,
                "Unknown error getting the scheduler. Refer to "
                "the kickoff log.")
            raise

        # Optionally wait on other tests running under the same scheduler.
        # This depends on the scheduler and the test configuration.
        lock = sched.lock_concurrency(pav_cfg, test)

        try:
            run_result = test.run(sched.get_vars(test),
                                  system_variables.get_vars(defer=False))
        except PavTestError as err:
            test.status.set(STATES.RUN_ERROR, err)
            test.set_run_complete()
            return 1
        except Exception:
            test.status.set(
                STATES.RUN_ERROR,
                "Unknown error while running test. Refer to the kickoff log.")
            raise
        finally:
            sched.unlock_concurrency(lock)

        # The test.run() method should have already logged the error and
        # set an appropriate status.
        if run_result in (STATES.RUN_ERROR, STATES.RUN_TIMEOUT):
            return 1

        try:
            rp_errors = []
            # Make sure the result parsers have reasonable arguments.
            # We check here because the parser code itself will likely assume
            # the args are valid form _check_args, but those might not be
            # checkable before kickoff due to deferred variables.
            try:
                result_parsers.check_args(test.config['results'])
            except PavTestError as err:
                rp_errors.append(str(err))

            if rp_errors:
                for msg in rp_errors:
                    test.status.set(STATES.RESULTS_ERROR, msg)
                test.set_run_complete()
                return 1

            results = test.gather_results(run_result)
        except result_parsers.ResultParserError as err:
            self.logger.error("Unexpected error gathering results: %s", err)
            test.status.set(STATES.RESULTS_ERROR,
                            "Error parsing results: {}".format(err))
            test.set_run_complete()
            return 1

        try:
            test.save_results(results)

            result_logger = logging.getLogger('results')
            result_logger.info(utils.json_dumps(results))
        except Exception:
            test.status.set(
                STATES.RESULTS_ERROR,
                "Unknown error while saving results. Refer to the kickoff log."
            )
            raise

        try:
            test.status.set(
                STATES.COMPLETE, "The test completed with result: {}".format(
                    results.get('result', '<unknown>')))
            test.set_run_complete()
        except Exception:
            test.status.set(
                STATES.UNKNOWN,
                "Unknown error while setting test completion. Refer to the "
                "kickoff log.")
            raise
Esempio n. 6
0
    def _run(self, pav_cfg, test, sched):
        """Run an already prepped test in the current environment.
        :param pav_cfg: The pavilion configuration object.
        :param TestRun test: The test to run
        :param sched: The scheduler we're running under.
        :return:
        """

        # Optionally wait on other tests running under the same scheduler.
        # This depends on the scheduler and the test configuration.
        lock = sched.lock_concurrency(pav_cfg, test)

        try:
            run_result = test.run()
        except TestRunError as err:
            test.status.set(STATES.RUN_ERROR, err)
            return 1
        except TimeoutError:
            return 1
        except Exception:
            test.status.set(
                STATES.RUN_ERROR,
                "Unknown error while running test. Refer to the kickoff log.")
            raise
        finally:
            sched.unlock_concurrency(lock)

        try:
            rp_errors = []
            # Make sure the result parsers have reasonable arguments.
            # We check here because the parser code itself will likely assume
            # the args are valid form _check_args, but those might not be
            # checkable before kickoff due to deferred variables.
            try:
                result_parsers.check_args(test.config['results'])
            except TestRunError as err:
                rp_errors.append(str(err))

            if rp_errors:
                for msg in rp_errors:
                    test.status.set(STATES.RESULTS_ERROR, msg)
                test.set_run_complete()
                return 1

            results = test.gather_results(run_result)
        except result_parsers.ResultParserError as err:
            self.logger.error("Unexpected error gathering results: %s", err)
            test.status.set(STATES.RESULTS_ERROR,
                            "Error parsing results: {}".format(err))
            return 1

        try:
            test.save_results(results)

            result_logger = logging.getLogger('results')
            result_logger.info(output.json_dumps(results))
        except Exception:
            test.status.set(
                STATES.RESULTS_ERROR,
                "Unknown error while saving results. Refer to the kickoff log."
            )
            raise

        try:
            test.status.set(
                STATES.COMPLETE, "The test completed with result: {}".format(
                    results.get('result', '<unknown>')))
        except Exception:
            test.status.set(
                STATES.UNKNOWN,
                "Unknown error while setting test completion. Refer to the "
                "kickoff log.")
            raise