Example #1
0
class TestDefinitionAction(TestAction):

    def __init__(self):
        """
        The TestDefinitionAction installs each test definition into
        the overlay. It does not execute the scripts in the test
        definition, that is the job of the TestAction class.
        One TestDefinitionAction handles all test definitions for
        the current job.
        In addition, a TestOverlayAction is added to the pipeline
        to handle parts of the overlay which are test definition dependent.
        """
        super(TestDefinitionAction, self).__init__()
        self.name = "test-definition"
        self.description = "load test definitions into image"
        self.summary = "loading test definitions"
        self.test_list = None

    def populate(self, parameters):
        """
        Each time a test definition is processed by a handler, a new set of
        overlay files are needed, based on that test definition. Basic overlay
        files are created by TestOverlayAction. More complex scripts like the
        install:deps script and the main run script have custom Actions.
        """
        index = {}
        self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters)
        self.test_list = identify_test_definitions(self.job.parameters)
        if not self.test_list:
            return
        for testdefs in self.test_list:
            for testdef in testdefs:
                # FIXME: only run the tests defined for this test action, not all the jobs for this deployment/job
                # This includes only running the install steps for the relevant deployment as the next deployment
                # could be a different OS.
                handler = RepoAction.select(testdef['from'])()

                # set the full set of job YAML parameters for this handler as handler parameters.
                handler.job = self.job
                handler.parameters = testdef
                # store the correct test_name before incrementing the local index dict
                handler.parameters['test_name'] = "%s_%s" % (len(list(index.keys())), handler.parameters['name'])

                # copy details into the overlay, one per handler but the same class each time.
                overlay = TestOverlayAction()
                overlay.job = self.job
                overlay.parameters = testdef
                overlay.parameters['test_name'] = handler.parameters['test_name']
                overlay.test_uuid = handler.uuid

                # add install handler - uses job parameters
                installer = TestInstallAction()
                installer.job = self.job
                installer.parameters = testdef
                installer.parameters['test_name'] = handler.parameters['test_name']
                installer.test_uuid = handler.uuid

                # add runsh handler - uses job parameters
                runsh = TestRunnerAction()
                runsh.job = self.job
                runsh.parameters = testdef
                runsh.parameters['test_name'] = handler.parameters['test_name']
                runsh.test_uuid = handler.uuid

                index[len(list(index.keys()))] = handler.parameters['name']
                self.internal_pipeline.add_action(handler)

                # add overlay handlers to the pipeline
                self.internal_pipeline.add_action(overlay)
                self.internal_pipeline.add_action(installer)
                self.internal_pipeline.add_action(runsh)

    def validate(self):
        """
        TestDefinitionAction is part of the overlay and therefore part of the deployment -
        the internal pipeline then looks inside the job definition for details of the tests to deploy.
        Jobs with no test actions defined (empty test_list) are explicitly allowed.
        """
        super(TestDefinitionAction, self).validate()
        if not self.job:
            self.errors = "missing job object"
            return
        if 'actions' not in self.job.parameters:
            self.errors = "No actions defined in job parameters"
            return
        if not self.test_list:
            return
        for testdef in self.test_list[0]:
            if 'from' not in testdef:
                self.errors = "missing 'from' field in test definition %s" % testdef
        self.internal_pipeline.validate_actions()

    def run(self, connection, args=None):
        """
        Creates the list of test definitions for this Test

        :param connection: Connection object, if any.
        :param args: Not used.
        :return: the received Connection.
        """
        if 'location' not in self.data['lava-overlay']:
            raise RuntimeError("Missing lava overlay location")
        self.logger.info("Loading test definitions")

        # overlay_path is the location of the files before boot
        self.data[self.name]['overlay_dir'] = os.path.abspath(
            "%s/%s" % (self.data['lava-overlay']['location'], self.data['lava_test_results_dir']))

        connection = super(TestDefinitionAction, self).run(connection, args)

        self.logger.debug("lava-test-runner.conf")
        with open('%s/lava-test-runner.conf' % self.data['test-definition']['overlay_dir'], 'a') as runner_conf:
            for handler in self.internal_pipeline.actions:
                if isinstance(handler, RepoAction):
                    runner_conf.write(handler.runner)

        return connection
Example #2
0
class TestDefinitionAction(TestAction):

    def __init__(self):
        """
        The TestDefinitionAction installs each test definition into
        the overlay. It does not execute the scripts in the test
        definition, that is the job of the TestAction class.
        One TestDefinitionAction handles all test definitions for
        the current job.
        In addition, a TestOverlayAction is added to the pipeline
        to handle parts of the overlay which are test definition dependent.
        """
        super(TestDefinitionAction, self).__init__()
        self.name = "test-definition"
        self.description = "load test definitions into image"
        self.summary = "loading test definitions"
        self.test_list = None

    def populate(self, parameters):
        """
        Each time a test definition is processed by a handler, a new set of
        overlay files are needed, based on that test definition. Basic overlay
        files are created by TestOverlayAction. More complex scripts like the
        install:deps script and the main run script have custom Actions.
        """
        index = {}
        self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters)
        self.test_list = identify_test_definitions(self.job.parameters)
        if not self.test_list:
            return
        for testdefs in self.test_list:
            for testdef in testdefs:
                # FIXME: only run the tests defined for this test action, not all the jobs for this deployment/job
                # This includes only running the install steps for the relevant deployment as the next deployment
                # could be a different OS.
                handler = RepoAction.select(testdef['from'])()

                # set the full set of job YAML parameters for this handler as handler parameters.
                handler.job = self.job
                handler.parameters = testdef
                # store the correct test_name before incrementing the local index dict
                handler.parameters['test_name'] = "%s_%s" % (len(list(index.keys())), handler.parameters['name'])

                # copy details into the overlay, one per handler but the same class each time.
                overlay = TestOverlayAction()
                overlay.job = self.job
                overlay.parameters = testdef
                overlay.parameters['test_name'] = handler.parameters['test_name']
                overlay.test_uuid = handler.uuid

                # add install handler - uses job parameters
                installer = TestInstallAction()
                installer.job = self.job
                installer.parameters = testdef
                installer.parameters['test_name'] = handler.parameters['test_name']
                installer.test_uuid = handler.uuid

                # add runsh handler - uses job parameters
                runsh = TestRunnerAction()
                runsh.job = self.job
                runsh.parameters = testdef
                runsh.parameters['test_name'] = handler.parameters['test_name']
                runsh.test_uuid = handler.uuid

                index[len(list(index.keys()))] = handler.parameters['name']
                self.internal_pipeline.add_action(handler)

                # add overlay handlers to the pipeline
                self.internal_pipeline.add_action(overlay)
                self.internal_pipeline.add_action(installer)
                self.internal_pipeline.add_action(runsh)

    def validate(self):
        """
        TestDefinitionAction is part of the overlay and therefore part of the deployment -
        the internal pipeline then looks inside the job definition for details of the tests to deploy.
        Jobs with no test actions defined (empty test_list) are explicitly allowed.
        """
        super(TestDefinitionAction, self).validate()
        if not self.job:
            self.errors = "missing job object"
            return
        if 'actions' not in self.job.parameters:
            self.errors = "No actions defined in job parameters"
            return
        if not self.test_list:
            return
        for testdef in self.test_list[0]:
            if 'from' not in testdef:
                self.errors = "missing 'from' field in test definition %s" % testdef
        self.internal_pipeline.validate_actions()

    def run(self, connection, args=None):
        """
        Creates the list of test definitions for this Test

        :param connection: Connection object, if any.
        :param args: Not used.
        :return: the received Connection.
        """
        if 'location' not in self.data['lava-overlay']:
            raise RuntimeError("Missing lava overlay location")
        self.logger.info("Loading test definitions")

        # overlay_path is the location of the files before boot
        self.data[self.name]['overlay_dir'] = os.path.abspath(
            "%s/%s" % (self.data['lava-overlay']['location'], self.data['lava_test_results_dir']))

        connection = super(TestDefinitionAction, self).run(connection, args)

        self.logger.debug("lava-test-runner.conf")
        with open('%s/lava-test-runner.conf' % self.data['test-definition']['overlay_dir'], 'a') as runner_conf:
            for handler in self.internal_pipeline.actions:
                if isinstance(handler, RepoAction):
                    runner_conf.write(handler.runner)

        return connection
class TestDefinitionAction(TestAction):
    def __init__(self):
        """
        The TestDefinitionAction installs each test definition into
        the overlay. It does not execute the scripts in the test
        definition, that is the job of the TestAction class.
        One TestDefinitionAction handles all test definitions for
        the current job.
        In addition, a TestOverlayAction is added to the pipeline
        to handle parts of the overlay which are test definition dependent.
        """
        super(TestDefinitionAction, self).__init__()
        self.name = "test-definition"
        self.description = "load test definitions into image"
        self.summary = "loading test definitions"

    def populate(self):
        """
        validate allows this to be a lot simpler, no need
        to check if the key exists each time.
        """
        index = {}
        self.internal_pipeline = Pipeline(parent=self, job=self.job)
        for testdef in self.parameters['test']['definitions']:
            if testdef['from'] == 'git':
                handler = GitRepoAction()
            elif testdef['from'] == 'bzr':
                handler = BzrRepoAction()
            elif testdef['from'] == 'tar':
                handler = TarRepoAction()
            elif testdef['from'] == 'url':
                handler = UrlRepoAction()
            else:
                self.errors = "unsupported handler"
                raise JobError("unsupported testdef handler: %s %s" %
                               (testdef, testdef['from']))
            # set the full set of job YAML parameters for this handler as handler parameters.
            handler.parameters = testdef
            # store the correct test_name before incrementing the local index dict
            handler.parameters['test_name'] = "%s_%s" % (len(
                index.keys()), handler.parameters['name'])
            index[len(index.keys())] = handler.parameters['name']
            self.internal_pipeline.add_action(handler)
            # FIXME: the outer pipeline may add unwanted data to the parameters['test']
        self.internal_pipeline.add_action(TestOverlayAction())

    def validate(self):
        if not self.job:
            self.errors = "missing job object"
        if 'test' not in self.parameters:
            self.errors = "testaction without test parameters"
            # runtimeerror?
        if 'definitions' not in self.parameters['test']:
            self.errors = "test action without definition"
        for testdef in self.parameters['test']['definitions']:
            if 'from' not in testdef:
                self.errors = "missing 'from' field in test definition %s" % testdef
            if testdef['from'] is 'git':
                repository = str(testdef['repository'])
                if not repository.endswith('.git'):
                    self.errors = "git specified but repository does not look like git"

        self.internal_pipeline.validate_actions()
        if self.errors:  # FIXME: call from the base class
            self._log("Validation failed")
            raise JobError("Invalid job data: %s\n" % '\n'.join(self.errors))

    def _inject_testdef_parameters(self,
                                   fout):  # FIXME: needs a separate action
        # inject default parameters that were defined in yaml first
        fout.write('###default parameters from yaml###\n')
        if 'params' in self.testdef:
            for def_param_name, def_param_value in self.testdef[
                    'params'].items():
                fout.write('%s=\'%s\'\n' % (def_param_name, def_param_value))
        fout.write('######\n')
        # inject the parameters that was set in json
        fout.write('###test parameters from json###\n')
        if self._sw_sources and 'test_params' in self._sw_sources[
                0] and self._sw_sources[0]['test_params'] != '':
            _test_params_temp = eval(self._sw_sources[0]['test_params'])
            for param_name, param_value in _test_params_temp.items():
                fout.write('%s=\'%s\'\n' % (param_name, param_value))
        fout.write('######\n')

    def copy_test(self, hostdir,
                  targetdir):  # FIXME: needs porting to the new classes
        """Copy the files needed to run this test to the device.

        :param hostdir: The location on the device filesystem to copy too.
        :param targetdir: The location `hostdir` will have when the device
            boots.
        """

        with open('%s/testdef_metadata' % hostdir, 'w') as f:
            f.write(yaml.safe_dump(self.testdef_metadata))

        if self.skip_install != "all":
            if 'install' in self.testdef:
                if self.skip_install != 'repos':
                    self._create_repos(hostdir)
                self._create_target_install(hostdir, targetdir)

    def run(self, connection, args=None):
        """
        Puts the requested test definition files into the overlay

        :param connection: Connection object, if any.
        :param args: Not used.
        :return: the received Connection.
        """
        self._log("Loading test definitions")
        # developer hack - if the image hasn't been downloaded this time, it may already contain old files
        # should really be an rmtree but it is only here to save developer time on downloads...
        if os.path.exists(self.data['mount_action']['mntdir']):
            os.unlink('%s/lava-test-runner.conf' %
                      self.data['mount_action']['mntdir'])
        connection = self.internal_pipeline.run_actions(connection)
        with open(
                '%s/lava-test-runner.conf' %
                self.data['mount_action']['mntdir'], 'a') as runner_conf:
            for handler in self.internal_pipeline.actions:
                if isinstance(handler, RepoAction) or isinstance(
                        handler, UrlRepoAction):
                    runner_conf.write(handler.runner)
                if isinstance(handler, TestAction
                              ):  # FIXME: separate actions for copy & inject
                    # run copy_test
                    hostdir = self.data['mount_action']['mntdir']
                    targetdir = ''
        return connection