Exemple #1
0
    def test_compatibility(self):
        """
        Test compatibility support.

        The class to use in the comparison will change according to which class
        is related to the change which caused the compatibility to be modified.
        """
        factory = Factory()
        job = factory.create_kvm_job("sample_jobs/kvm.yaml")
        self.assertIsNotNone(job)
        pipe = job.describe()
        self.assertEqual(pipe["compatibility"], DeployImages.compatibility)
        self.assertEqual(job.compatibility, DeployImages.compatibility)
        kvm_yaml = os.path.join(os.path.dirname(__file__),
                                "sample_jobs/kvm.yaml")
        with open(kvm_yaml, "r") as kvm_yaml:
            job_def = yaml_safe_load(kvm_yaml)
        job_def["compatibility"] = job.compatibility
        parser = JobParser()
        (rendered, data) = factory.create_device("kvm01.jinja2")
        device = yaml_safe_load(rendered)
        job = parser.parse(yaml_safe_dump(job_def), device, 4212, None, "")
        self.assertIsNotNone(job)
        job_def["compatibility"] = job.compatibility + 1
        self.assertRaises(JobError, parser.parse, yaml_safe_dump(job_def),
                          device, 4212, None, "")
        job_def["compatibility"] = 0
        job = parser.parse(yaml_safe_dump(job_def), device, 4212, None, "")
        self.assertIsNotNone(job)
Exemple #2
0
 def pipeline_reference(cls, filename, job=None):
     y_file = os.path.join(os.path.dirname(__file__), "pipeline_refs",
                           filename)
     if cls.update_ref:
         sys.stderr.write("WARNING: modifying pipeline references!")
         with open(y_file, "w") as describe:
             yaml_safe_dump(job.pipeline.describe(False),
                            describe,
                            default_flow_style=None)
     with open(y_file, "r") as f_ref:
         return yaml_safe_load(f_ref)
Exemple #3
0
 def test_image(self, which_mock):
     self.deploy_block["kernel"]["type"] = "image"
     job = self.parser.parse(
         yaml_safe_dump(self.base_data), self.device, 4212, None, ""
     )
     job.logger = DummyLogger()
     job.validate()
     deploy = [
         action for action in job.pipeline.actions if action.name == "tftp-deploy"
     ][0]
     overlay = [
         action
         for action in deploy.internal_pipeline.actions
         if action.name == "prepare-tftp-overlay"
     ][0]
     prepare = [
         action
         for action in overlay.internal_pipeline.actions
         if action.name == "prepare-kernel"
     ][0]
     uboot_prepare = [
         action
         for action in prepare.internal_pipeline.actions
         if action.name == "uboot-prepare-kernel"
     ][0]
     self.assertEqual("image", uboot_prepare.kernel_type)
     # bbb-01.yaml does not contain booti parameters, try to convert to a uImage
     self.assertEqual("bootm", uboot_prepare.bootcommand)
     self.assertTrue(uboot_prepare.mkimage_conversion)
Exemple #4
0
 def test_job_no_tags(self, which_mock):
     with open(self.filename) as yaml_data:
         alpha_data = yaml_safe_load(yaml_data)
     for vlan_key, _ in alpha_data["protocols"][VlandProtocol.name].items():
         alpha_data["protocols"][VlandProtocol.name][vlan_key] = {"tags": []}
     # removed tags from original job to simulate job where any interface tags will be acceptable
     self.assertEqual(
         alpha_data["protocols"][VlandProtocol.name], {"vlan_one": {"tags": []}}
     )
     parser = JobParser()
     job = parser.parse(yaml_safe_dump(alpha_data), self.device, 4212, None, "")
     job.logger = DummyLogger()
     job.validate()
     vprotocol = [
         vprotocol
         for vprotocol in job.protocols
         if vprotocol.name == VlandProtocol.name
     ][0]
     self.assertTrue(vprotocol.valid)
     self.assertEqual(vprotocol.names, {"vlan_one": "4212vlanone"})
     self.assertFalse(vprotocol.check_timeout(120, {"request": "no call"}))
     self.assertRaises(JobError, vprotocol.check_timeout, 60, "deploy_vlans")
     self.assertRaises(
         JobError, vprotocol.check_timeout, 60, {"request": "deploy_vlans"}
     )
     self.assertTrue(vprotocol.check_timeout(120, {"request": "deploy_vlans"}))
     for vlan_name in job.parameters["protocols"][VlandProtocol.name]:
         self.assertIn(vlan_name, vprotocol.params)
         self.assertIn("switch", vprotocol.params[vlan_name])
         self.assertIn("port", vprotocol.params[vlan_name])
Exemple #5
0
 def test_uimage(self, which_mock):
     self.deploy_block["kernel"]["type"] = "uimage"
     job = self.parser.parse(yaml_safe_dump(self.base_data), self.device,
                             4212, None, "")
     job.logger = DummyLogger()
     job.validate()
     deploy = [
         action for action in job.pipeline.actions
         if action.name == "tftp-deploy"
     ][0]
     overlay = [
         action for action in deploy.pipeline.actions
         if action.name == "prepare-tftp-overlay"
     ][0]
     prepare = [
         action for action in overlay.pipeline.actions
         if action.name == "prepare-kernel"
     ][0]
     uboot_prepare = [
         action for action in prepare.pipeline.actions
         if action.name == "uboot-prepare-kernel"
     ][0]
     self.assertEqual("uimage", uboot_prepare.kernel_type)
     self.assertEqual("bootm", uboot_prepare.bootcommand)
     self.assertFalse(uboot_prepare.mkimage_conversion)
Exemple #6
0
 def test_zimage_nobootz(self, which_mock):
     print(which_mock)
     # drop bootz from the device for this part of the test
     del self.device["parameters"]["bootz"]
     self.deploy_block["kernel"]["type"] = "zimage"
     job = self.parser.parse(yaml_safe_dump(self.base_data), self.device,
                             4212, None, "")
     job.logger = DummyLogger()
     job.validate()
     deploy = [
         action for action in job.pipeline.actions
         if action.name == "tftp-deploy"
     ][0]
     overlay = [
         action for action in deploy.pipeline.actions
         if action.name == "prepare-tftp-overlay"
     ][0]
     prepare = [
         action for action in overlay.pipeline.actions
         if action.name == "prepare-kernel"
     ][0]
     uboot_prepare = [
         action for action in prepare.pipeline.actions
         if action.name == "uboot-prepare-kernel"
     ][0]
     self.assertEqual("zimage", uboot_prepare.kernel_type)
     self.assertEqual("bootm", uboot_prepare.bootcommand)
     self.assertTrue(uboot_prepare.mkimage_conversion)
Exemple #7
0
 def create_custom_job(self,
                       template,
                       job_data,
                       job_ctx=None,
                       validate=True):
     if validate:
         validate_job(job_data, strict=False)
     if job_ctx:
         job_data["context"] = job_ctx
     else:
         job_ctx = job_data.get("context")
     (data, device_dict) = self.create_device(template, job_ctx)
     device = NewDevice(yaml_safe_load(data))
     print("####### Device configuration #######")
     print(data)
     print("#######")
     try:
         parser = JobParser()
         job = parser.parse(yaml_safe_dump(job_data), device, 4999, None,
                            "")
     except (ConfigurationError, TypeError) as exc:
         print("####### Parser exception ########")
         print(device)
         print("#######")
         raise ConfigurationError("Invalid device: %s" % exc)
     job.logger = DummyLogger()
     return job
Exemple #8
0
 def create_kvm_job(self, filename, validate=False):
     """
     Custom function to allow for extra exception handling.
     """
     job_ctx = {
         "arch": "amd64",
         "no_kvm": True,
     }  # override to allow unit tests on all types of systems
     (data, device_dict) = self.create_device("kvm01.jinja2", job_ctx)
     device = NewDevice(yaml_safe_load(data))
     print("####### Device configuration #######")
     print(data)
     print("#######")
     self.validate_data("hi6220-hikey-01", device_dict)
     kvm_yaml = os.path.join(os.path.dirname(__file__), filename)
     parser = JobParser()
     job_data = ""
     with open(kvm_yaml) as sample_job_data:
         job_data = yaml_safe_load(sample_job_data.read())
     print("########## Test Job Submission validation #######")
     if validate:
         validate_job(job_data, strict=False)
     try:
         job = parser.parse(yaml_safe_dump(job_data), device, 4212, None,
                            "")
         job.logger = DummyLogger()
     except LAVAError as exc:
         print(exc)
         return None
     return job
Exemple #9
0
 def test_lxc_without_lxctest(self):
     lxc_yaml = os.path.join(os.path.dirname(__file__),
                             "sample_jobs/bbb-lxc-notest.yaml")
     with open(lxc_yaml) as sample_job_data:
         data = yaml_safe_load(sample_job_data)
     parser = JobParser()
     (rendered, _) = self.factory.create_device("bbb-01.jinja2")
     device = NewDevice(yaml_safe_load(rendered))
     job = parser.parse(yaml_safe_dump(data), device, 4577, None, "")
     job.logger = DummyLogger()
     job.validate()
     lxc_deploy = [
         action for action in job.pipeline.actions
         if action.name == "lxc-deploy"
     ][0]
     names = [action.name for action in lxc_deploy.pipeline.actions]
     self.assertNotIn("prepare-tftp-overlay", names)
     namespace1 = lxc_deploy.parameters.get("namespace")
     tftp_deploy = [
         action for action in job.pipeline.actions
         if action.name == "tftp-deploy"
     ][0]
     prepare = [
         action for action in tftp_deploy.pipeline.actions
         if action.name == "prepare-tftp-overlay"
     ][0]
     overlay = [
         action for action in prepare.pipeline.actions
         if action.name == "lava-overlay"
     ][0]
     test_def = [
         action for action in overlay.pipeline.actions
         if action.name == "test-definition"
     ][0]
     namespace = test_def.parameters.get("namespace")
     self.assertIsNotNone(namespace)
     self.assertIsNotNone(namespace1)
     self.assertNotEqual(namespace, namespace1)
     self.assertNotEqual(self.job.pipeline.describe(False),
                         job.pipeline.describe(False))
     test_actions = [
         action for action in job.parameters["actions"] if "test" in action
     ]
     for action in test_actions:
         if "namespace" in action["test"]:
             if action["test"]["namespace"] == namespace:
                 self.assertEqual(action["test"]["definitions"][0]["name"],
                                  "smoke-tests-bbb")
         else:
             self.fail("Found a test action not from the tftp boot")
     namespace_tests = [
         action["test"]["definitions"] for action in test_actions
         if "namespace" in action["test"]
         and action["test"]["namespace"] == namespace
     ]
     self.assertEqual(len(namespace_tests), 1)
     self.assertEqual(len(test_actions), 1)
     description_ref = self.pipeline_reference("bbb-lxc-notest.yaml",
                                               job=job)
     self.assertEqual(description_ref, job.pipeline.describe(False))
Exemple #10
0
    def run(self, connection, max_end_time):
        """
        Extract the inlined test definition and dump it onto the target image
        """
        # use the base class to populate the runner_path and overlay_path data into the context
        connection = super().run(connection, max_end_time)

        # NOTE: the runner_path dir must remain empty until after the VCS clone, so let the VCS clone create the final dir
        runner_path = self.get_namespace_data(
            action="uuid", label="overlay_path", key=self.parameters["test_name"]
        )

        # Grab the inline test definition
        testdef = self.parameters["repository"]
        sha1 = hashlib.sha1()  # nosec - not used for cryptography

        # Dump the test definition and compute the sha1
        yaml_file = os.path.join(runner_path, self.parameters["path"])
        yaml_dirname = os.path.dirname(yaml_file)
        if yaml_dirname != "":
            os.makedirs(os.path.join(runner_path, yaml_dirname))
        with open(yaml_file, "w") as test_file:
            data = yaml_safe_dump(testdef)
            sha1.update(data.encode("utf-8"))
            test_file.write(data)

        # set testdef metadata in base class
        self.store_testdef(self.parameters["repository"], "inline")
        return connection
Exemple #11
0
 def test_match_devices_without_map(self):
     """
     Without a map, there is no support for knowing which interfaces to
     put onto a VLAN, so these devices cannot be assigned to a VLAN testjob
     See http://localhost/static/docs/v2/vland.html#vland-and-interface-tags-in-lava
     """
     self.bbb3 = self.factory.make_device(self.factory.bbb_type,
                                          hostname="bbb-03")
     self.cubie2 = self.factory.make_device(self.factory.cubie_type,
                                            hostname="cubie2")
     devices = [self.bbb3, self.cubie2]
     self.factory.ensure_tag("usb-eth")
     self.factory.ensure_tag("sata")
     self.factory.bbb1.tags.set(Tag.objects.filter(name="usb-eth"))
     self.factory.cubie1.tags.set(Tag.objects.filter(name="sata"))
     user = self.factory.make_user()
     sample_job_file = os.path.join(os.path.dirname(__file__),
                                    "sample_jobs",
                                    "bbb-cubie-vlan-group.yaml")
     with open(sample_job_file, "r") as test_support:
         data = yaml_safe_load(test_support)
     vlan_job = TestJob.from_yaml_and_user(yaml_safe_dump(data), user)
     assignments = {}
     for job in vlan_job:
         self.assertFalse(
             match_vlan_interface(self.bbb3,
                                  yaml_safe_load(job.definition)))
         self.assertFalse(
             match_vlan_interface(self.cubie2,
                                  yaml_safe_load(job.definition)))
Exemple #12
0
 def test_primary_interface(self, which_mock):
     with open(self.filename) as yaml_data:
         alpha_data = yaml_safe_load(yaml_data)
     for interface in self.device["parameters"]["interfaces"]:
         # jinja2 processing of tags: [] results in tags:
         if self.device["parameters"]["interfaces"][interface]["tags"] == []:
             self.device["parameters"]["interfaces"][interface]["tags"] = None
     parser = JobParser()
     job = parser.parse(yaml_safe_dump(alpha_data), self.device, 4212, None, "")
     deploy = [
         action for action in job.pipeline.actions if action.name == "tftp-deploy"
     ][0]
     prepare = [
         action
         for action in deploy.pipeline.actions
         if action.name == "prepare-tftp-overlay"
     ][0]
     overlay = [
         action
         for action in prepare.pipeline.actions
         if action.name == "lava-overlay"
     ][0]
     vland_overlay = [
         action
         for action in overlay.pipeline.actions
         if action.name == "lava-vland-overlay"
     ][0]
     vland_overlay.validate()
     job.logger = DummyLogger()
     job.validate()
Exemple #13
0
    def run(self, connection, max_end_time):
        connection = super().run(connection, max_end_time)
        runner_path = self.get_namespace_data(
            action="uuid", label="overlay_path", key=self.parameters["test_name"]
        )

        # now read the YAML to create a testdef dict to retrieve metadata
        yaml_file = os.path.join(runner_path, self.parameters["path"])
        try:
            with open(yaml_file, "r") as test_file:
                testdef = yaml_safe_load(test_file)
        except OSError as exc:
            raise JobError(
                "Unable to open test definition '%s': %s"
                % (self.parameters["path"], str(exc))
            )

        # FIXME: change lava-test-runner to accept a variable instead of duplicating the YAML?
        with open("%s/testdef.yaml" % runner_path, "w") as run_file:
            yaml_safe_dump(testdef, run_file)

        # write out the UUID of each test definition.
        # FIXME: is this necessary any longer?
        with open("%s/uuid" % runner_path, "w") as uuid:
            uuid.write(self.test_uuid)

        # FIXME: does this match old-world test-shell & is it needed?
        with open("%s/testdef_metadata" % runner_path, "w") as metadata:
            content = self.get_namespace_data(
                action="test", label=self.test_uuid, key="testdef_metadata"
            )
            metadata.write(yaml_safe_dump(content))

        # Need actions for the run.sh script (calling parameter support in base class)
        # and install script (also calling parameter support here.)
        # this run then only does the incidental files.

        self.results = {
            "uuid": self.test_uuid,
            "name": self.parameters["name"],
            "path": self.parameters["path"],
            "from": self.parameters["from"],
        }
        if self.parameters["from"] != "inline":
            self.results["repository"] = self.parameters["repository"]
        return connection
Exemple #14
0
    def test_prompt_from_job(self, which_mock):
        """
        Support setting the prompt after login via the job

        Loads a known YAML, adds a prompt to the dict and re-parses the job.
        Checks that the prompt is available in the expect_shell_connection action.
        """
        job = self.factory.create_job("x86-01.jinja2", "sample_jobs/ipxe-ramdisk.yaml")
        job.validate()
        bootloader = [
            action
            for action in job.pipeline.actions
            if action.name == "bootloader-action"
        ][0]
        retry = [
            action
            for action in bootloader.pipeline.actions
            if action.name == "bootloader-retry"
        ][0]
        expect = [
            action
            for action in retry.pipeline.actions
            if action.name == "expect-shell-connection"
        ][0]
        check = expect.parameters
        (rendered, _) = self.factory.create_device("x86-01.jinja2")
        device = NewDevice(yaml_safe_load(rendered))
        extra_yaml = os.path.join(os.path.dirname(__file__), "sample_jobs/ipxe.yaml")
        with open(extra_yaml) as data:
            sample_job_string = data.read()
        parser = JobParser()
        sample_job_data = yaml_safe_load(sample_job_string)
        boot = [item["boot"] for item in sample_job_data["actions"] if "boot" in item][
            0
        ]
        self.assertIsNotNone(boot)
        sample_job_string = yaml_safe_dump(sample_job_data)
        job = parser.parse(sample_job_string, device, 4212, None, "")
        job.logger = DummyLogger()
        job.validate()
        bootloader = [
            action
            for action in job.pipeline.actions
            if action.name == "bootloader-action"
        ][0]
        retry = [
            action
            for action in bootloader.pipeline.actions
            if action.name == "bootloader-retry"
        ][0]
        expect = [
            action
            for action in retry.pipeline.actions
            if action.name == "expect-shell-connection"
        ][0]
Exemple #15
0
    def run(self, connection, max_end_time):
        connection = super().run(connection, max_end_time)
        # Substitute in the device commands
        substitutions = {}
        for key in self.parameters["images"].keys():
            filename = self.get_namespace_data(action="download-action",
                                               label=key,
                                               key="file")
            filename = filename[len(self.path) + 1:]
            substitutions["{%s}" % key.upper()] = filename

        # Add power commands
        substitutions["{HARD_RESET_COMMAND}"] = str(
            self.job.device.hard_reset_command)
        substitutions["{SOFT_RESET_COMMAND}"] = str(
            self.job.device.soft_reset_command)
        substitutions["{PRE_OS_COMMAND}"] = str(self.job.device.pre_os_command)
        if self.job.device.pre_os_command is None:
            substitutions["{PRE_OS_COMMAND}"] = ""
        substitutions["{PRE_POWER_COMMAND}"] = str(
            self.job.device.pre_power_command)
        if self.job.device.pre_power_command is None:
            substitutions["{PRE_POWER_COMMAND}"] = ""
        substitutions["{POWER_ON_COMMAND}"] = str(
            self.job.device.power_command)
        substitutions["{POWER_OFF_COMMAND}"] = str(
            self.job.device.get("commands", {}).get("power_off", ""))

        # Add some device configuration
        substitutions["{DEVICE_INFO}"] = yaml_safe_dump(
            self.job.device.get("device_info", []))
        substitutions["{STATIC_INFO}"] = yaml_safe_dump(
            self.job.device.get("static_info", []))

        # Run the commands
        for cmd in self.commands:
            cmds = substitute([cmd], substitutions)
            self.run_cmd(cmds[0],
                         error_msg="Unable to flash the device",
                         cwd=self.path)

        return connection
Exemple #16
0
 def test_job_protocols(self):
     self.factory.ensure_tag("usb-eth")
     self.factory.ensure_tag("sata")
     self.factory.bbb1.tags.set(Tag.objects.filter(name="usb-eth"))
     self.factory.cubie1.tags.set(Tag.objects.filter(name="sata"))
     target_group = "unit-test-only"
     job_dict = split_multinode_yaml(self.factory.make_vland_job(),
                                     target_group)
     client_job = job_dict["client"][0]
     client_handle, client_file_name = tempfile.mkstemp()
     yaml_safe_dump(client_job, open(client_file_name, "w"))
     # YAML device file, as required by lava-dispatch --target
     data = "{% extends 'beaglebone-black.jinja2' %}"
     device_yaml_file = prepare_jinja_template("bbb-01", data, raw=False)
     parser = JobParser()
     bbb_device = NewDevice(device_yaml_file)
     with open(client_file_name) as sample_job_data:
         bbb_job = parser.parse(sample_job_data, bbb_device, 4212, None, "")
     os.close(client_handle)
     os.unlink(client_file_name)
     self.assertIn("protocols", bbb_job.parameters)
     self.assertIn(VlandProtocol.name, bbb_job.parameters["protocols"])
     self.assertIn(MultinodeProtocol.name, bbb_job.parameters["protocols"])
Exemple #17
0
 def test_host_role(self):
     # need a full job to properly test the multinode YAML split
     hostname = "fakeqemu3"
     self.factory.make_device(self.device_type, hostname)
     # create a new device to allow the submission to reach the multinode YAML test.
     hostname = "fakeqemu4"
     self.factory.make_device(self.device_type, hostname)
     data = yaml_safe_load(self.factory.make_job_yaml())
     data["protocols"]["lava-multinode"]["roles"]["host"]["count"] = 2
     self.assertRaises(
         SubmissionException,
         TestJob.from_yaml_and_user,
         yaml_safe_dump(data),
         self.factory.make_user(),
     )
Exemple #18
0
 def test_job_protocols(self):
     self.factory.ensure_tag("usb-eth")
     self.factory.ensure_tag("sata")
     self.factory.bbb1.tags.set(Tag.objects.filter(name="usb-eth"))
     self.factory.cubie1.tags.set(Tag.objects.filter(name="sata"))
     target_group = "unit-test-only"
     job_dict = split_multinode_yaml(self.factory.make_vland_job(),
                                     target_group)
     client_job = job_dict["client"][0]
     client_handle, client_file_name = tempfile.mkstemp()
     yaml_safe_dump(client_job, open(client_file_name, "w"))
     # YAML device file, as required by lava-dispatch --target
     device_yaml_file = os.path.realpath(
         os.path.join(os.path.dirname(__file__), "devices", "bbb-01.yaml"))
     self.assertTrue(os.path.exists(device_yaml_file))
     parser = JobParser()
     bbb_device = NewDevice(device_yaml_file)
     with open(client_file_name) as sample_job_data:
         bbb_job = parser.parse(sample_job_data, bbb_device, 4212, None, "")
     os.close(client_handle)
     os.unlink(client_file_name)
     self.assertIn("protocols", bbb_job.parameters)
     self.assertIn(VlandProtocol.name, bbb_job.parameters["protocols"])
     self.assertIn(MultinodeProtocol.name, bbb_job.parameters["protocols"])
Exemple #19
0
 def test_name(self):
     deploy = [
         action for action in self.job.pipeline.actions
         if action.name == "deployimages"
     ][0]
     overlay = [
         action for action in deploy.pipeline.actions
         if action.name == "lava-overlay"
     ][0]
     testdef = [
         action for action in overlay.pipeline.actions
         if action.name == "test-definition"
     ][0]
     testdef.validate()
     self.assertEqual([], testdef.errors)
     (rendered, _) = self.factory.create_device("kvm01.jinja2")
     device = yaml_safe_load(rendered)
     kvm_yaml = os.path.join(os.path.dirname(__file__),
                             "sample_jobs/kvm.yaml")
     parser = JobParser()
     with open(kvm_yaml, "r") as sample_job_data:
         content = yaml_safe_load(sample_job_data)
     data = [
         block["test"] for block in content["actions"] if "test" in block
     ][0]
     definitions = [
         block for block in data["definitions"] if "path" in block
     ][0]
     definitions["name"] = "smoke tests"
     job = parser.parse(yaml_safe_dump(content), device, 4212, None, "")
     deploy = [
         action for action in job.pipeline.actions
         if action.name == "deployimages"
     ][0]
     overlay = [
         action for action in deploy.pipeline.actions
         if action.name == "lava-overlay"
     ][0]
     testdef = [
         action for action in overlay.pipeline.actions
         if action.name == "test-definition"
     ][0]
     testdef.validate()
     self.assertNotEqual([], testdef.errors)
     self.assertIn(
         "Invalid characters found in test definition name: smoke tests",
         job.pipeline.errors,
     )
Exemple #20
0
 def test_job_bad_tags(self):
     with open(self.filename) as yaml_data:
         alpha_data = yaml_safe_load(yaml_data)
     for vlan_key, _ in alpha_data["protocols"][VlandProtocol.name].items():
         alpha_data["protocols"][VlandProtocol.name][vlan_key] = {
             "tags": ["spurious"]
         }
     # replaced tags from original job to simulate job where an unsupported tag is specified
     self.assertEqual(
         alpha_data["protocols"][VlandProtocol.name],
         {"vlan_one": {"tags": ["spurious"]}},
     )
     parser = JobParser()
     job = parser.parse(yaml_safe_dump(alpha_data), self.device, 4212, None, "")
     job.logger = DummyLogger()
     self.assertRaises(JobError, job.validate)
Exemple #21
0
 def test_extra_options(self):
     (rendered, _) = self.factory.create_device("kvm01.jinja2")
     device = NewDevice(yaml_safe_load(rendered))
     kvm_yaml = os.path.join(
         os.path.dirname(__file__), "sample_jobs/kvm-inline.yaml"
     )
     with open(kvm_yaml) as sample_job_data:
         job_data = yaml_safe_load(sample_job_data)
     device["actions"]["boot"]["methods"]["qemu"]["parameters"][
         "extra"
     ] = yaml_safe_load(
         """
               - -smp
               - 1
               - -global
               - virtio-blk-device.scsi=off
               - -device virtio-scsi-device,id=scsi
               - --append "console=ttyAMA0 root=/dev/vda rw"
               """
     )
     self.assertIsInstance(
         device["actions"]["boot"]["methods"]["qemu"]["parameters"]["extra"][1], int
     )
     parser = JobParser()
     job = parser.parse(yaml_safe_dump(job_data), device, 4212, None, "")
     job.logger = DummyLogger()
     job.validate()
     boot_image = [
         action
         for action in job.pipeline.actions
         if action.name == "boot-image-retry"
     ][0]
     boot_qemu = [
         action
         for action in boot_image.pipeline.actions
         if action.name == "boot-qemu-image"
     ][0]
     qemu = [
         action
         for action in boot_qemu.pipeline.actions
         if action.name == "execute-qemu"
     ][0]
     self.assertIsInstance(qemu.sub_command, list)
     [self.assertIsInstance(item, str) for item in qemu.sub_command]
     self.assertIn("virtio-blk-device.scsi=off", qemu.sub_command)
     self.assertIn("1", qemu.sub_command)
     self.assertNotIn(1, qemu.sub_command)
Exemple #22
0
 def test_missing_handler(self):
     (rendered, _) = self.factory.create_device("kvm01.jinja2")
     device = NewDevice(yaml_safe_load(rendered))
     kvm_yaml = os.path.join(os.path.dirname(__file__),
                             "sample_jobs/kvm.yaml")
     parser = JobParser()
     with open(kvm_yaml) as sample_job_data:
         data = yaml_safe_load(sample_job_data)
     data["actions"][2]["test"]["definitions"][0][
         "from"] = "unusable-handler"
     try:
         job = parser.parse(yaml_safe_dump(data), device, 4212, None, "")
         job.logger = DummyLogger()
     except JobError:
         pass
     except Exception as exc:
         self.fail(exc)
     else:
         self.fail("JobError not raised")
Exemple #23
0
 def test_multidefinition(self):
     block = [
         testblock["test"]
         for testblock in self.job_data["actions"]
         if "test" in testblock
     ][0]
     self.assertIn("definitions", block)
     block["definitions"][1] = block["definitions"][0]
     self.assertEqual(len(block["definitions"]), 2)
     self.assertEqual(block["definitions"][1], block["definitions"][0])
     parser = JobParser()
     job = parser.parse(yaml_safe_dump(self.job_data), self.device, 4212, None, "")
     self.assertIsNotNone(job)
     deploy = [
         action for action in job.pipeline.actions if action.name == "tftp-deploy"
     ][0]
     tftp = [
         action
         for action in deploy.internal_pipeline.actions
         if action.name == "prepare-tftp-overlay"
     ][0]
     overlay = [
         action
         for action in tftp.internal_pipeline.actions
         if action.name == "lava-overlay"
     ][0]
     testdef = [
         action
         for action in overlay.internal_pipeline.actions
         if action.name == "test-definition"
     ][0]
     runscript = [
         action
         for action in testdef.internal_pipeline.actions
         if action.name == "test-runscript-overlay"
     ][0]
     testdef_index = runscript.get_namespace_data(
         action="test-definition", label="test-definition", key="testdef_index"
     )
     self.assertEqual(len(block["definitions"]), len(testdef_index))
     runscript.validate()
     self.assertIsNotNone(runscript.errors)
     self.assertIn("Test definition names need to be unique.", runscript.errors)
Exemple #24
0
def transition_multinode_jobs(logger):
    """
    Transition multinode jobs that are ready to be scheduled.
    A multinode is ready when all sub jobs are in STATE_SCHEDULING.
    """
    jobs = TestJob.objects.filter(state=TestJob.STATE_SCHEDULING)
    # Ordering by target_group is mandatory for distinct to work
    jobs = jobs.order_by("target_group", "id")
    jobs = jobs.distinct("target_group")

    new_jobs = []
    for job in jobs:
        sub_jobs = job.sub_jobs_list
        if not all([
                j.state == TestJob.STATE_SCHEDULING or j.dynamic_connection
                for j in sub_jobs
        ]):
            continue

        logger.debug("-> multinode [%d] scheduled", job.id)
        # Inject the actual group hostnames into the roles for the dispatcher
        # to populate in the overlay.
        devices = {}
        for sub_job in sub_jobs:
            # build a list of all devices in this group
            if sub_job.dynamic_connection:
                continue
            definition = yaml_safe_load(sub_job.definition)
            devices[str(sub_job.id
                        )] = definition["protocols"]["lava-multinode"]["role"]

        for sub_job in sub_jobs:
            # apply the complete list to all jobs in this group
            definition = yaml_safe_load(sub_job.definition)
            definition["protocols"]["lava-multinode"]["roles"] = devices
            sub_job.definition = yaml_safe_dump(definition)
            # transition the job and device
            sub_job.go_state_scheduled()
            sub_job.save()
            new_jobs.append(sub_job.id)
            logger.debug("--> %d", job.sub_id)
    return new_jobs
Exemple #25
0
    def test_panda_lxc_template(self):
        data = """{% extends 'panda.jinja2' %}
{% set power_off_command = '/usr/local/lab-scripts/snmp_pdu_control --hostname pdu15 --command off --port 07' %}
{% set hard_reset_command = '/usr/local/lab-scripts/snmp_pdu_control --hostname pdu15 --command reboot --port 07' %}
{% set connection_command = 'telnet serial4 7010' %}
{% set power_on_command = '/usr/local/lab-scripts/snmp_pdu_control --hostname pdu15 --command on --port 07' %}"""
        self.assertTrue(self.validate_data("staging-panda-01", data))
        template_dict = prepare_jinja_template("staging-panda-01",
                                               data,
                                               raw=False)
        fdesc, device_yaml = tempfile.mkstemp()
        os.write(fdesc, yaml_safe_dump(template_dict).encode())
        panda = NewDevice(device_yaml)
        lxc_yaml = os.path.join(os.path.dirname(__file__), "sample_jobs",
                                "panda-lxc-aep.yaml")
        with open(lxc_yaml) as sample_job_data:
            parser = JobParser()
            job = parser.parse(sample_job_data, panda, 4577, None, "")
        os.close(fdesc)
        job.logger = DummyLogger()
        job.validate()
Exemple #26
0
 def test_vland_overlay(self, which_mock):
     with open(self.filename) as yaml_data:
         alpha_data = yaml_safe_load(yaml_data)
     for vlan_key, _ in alpha_data["protocols"][VlandProtocol.name].items():
         alpha_data["protocols"][VlandProtocol.name][vlan_key] = {
             "tags": []
         }
     # removed tags from original job to simulate job where any interface tags will be acceptable
     self.assertEqual(alpha_data["protocols"][VlandProtocol.name],
                      {"vlan_one": {
                          "tags": []
                      }})
     parser = JobParser()
     job = parser.parse(yaml_safe_dump(alpha_data), self.device, 4212, None,
                        "")
     job.logger = DummyLogger()
     job.validate()
     tftp_deploy = [
         action for action in job.pipeline.actions
         if action.name == "tftp-deploy"
     ][0]
     prepare = [
         action for action in tftp_deploy.internal_pipeline.actions
         if action.name == "prepare-tftp-overlay"
     ][0]
     overlay = [
         action for action in prepare.internal_pipeline.actions
         if action.name == "lava-overlay"
     ][0]
     vland = [
         action for action in overlay.internal_pipeline.actions
         if action.name == "lava-vland-overlay"
     ][0]
     self.assertTrue(os.path.exists(vland.lava_vland_test_dir))
     vland_files = os.listdir(vland.lava_vland_test_dir)
     self.assertIn("lava-vland-names", vland_files)
     self.assertIn("lava-vland-tags", vland_files)
     self.assertIn("lava-vland-self", vland_files)
Exemple #27
0
    def test_job_go_state_finished_multinode(self):
        # 1/ Essential role
        self.device2 = Device.objects.create(hostname="device-02",
                                             device_type=self.device_type)
        self.device3 = Device.objects.create(hostname="device-03",
                                             device_type=self.device_type)

        self.job.definition = yaml_safe_dump({
            "protocols": {
                "lava-multinode": {
                    "role": "master",
                    "essential": True
                }
            }
        })
        self.job.target_group = "target_group"
        self.job.save()
        self.sub_job1 = TestJob.objects.create(
            requested_device_type=self.device_type,
            submitter=self.user,
            target_group="target_group",
        )
        self.sub_job1.definition = yaml_safe_dump({
            "protocols": {
                "lava-multinode": {
                    "role": "worker",
                    "essential": False
                }
            }
        })
        self.sub_job1.actual_device = self.device2
        self.sub_job1.save()
        self.sub_job2 = TestJob.objects.create(
            requested_device_type=self.device_type,
            submitter=self.user,
            target_group="target_group",
        )
        self.sub_job2.definition = yaml_safe_dump({
            "protocols": {
                "lava-multinode": {
                    "role": "worker",
                    "essential": False
                }
            }
        })
        self.sub_job2.actual_device = self.device3
        self.sub_job2.save()

        # 1/ Essential role
        # 1.1/ Success
        self.job.state = TestJob.STATE_RUNNING
        self.job.actual_device = self.device
        self.job.actual_device.state = Device.STATE_RUNNING
        self.job.actual_device.save()
        self.job.health = TestJob.HEALTH_UNKNOWN
        self.job.save()

        self.sub_job1.state = TestJob.STATE_RUNNING
        self.sub_job1.actual_device.state = Device.STATE_RUNNING
        self.sub_job1.save()
        self.sub_job1.health = TestJob.HEALTH_UNKNOWN
        self.sub_job1.save()

        self.sub_job2.state = TestJob.STATE_RUNNING
        self.sub_job2.actual_device.state = Device.STATE_RUNNING
        self.sub_job2.save()
        self.sub_job2.health = TestJob.HEALTH_UNKNOWN
        self.sub_job2.save()

        self.assertTrue(self.job.is_multinode)
        self.assertTrue(self.job.essential_role)
        self.assertTrue(self.sub_job1.is_multinode)
        self.assertFalse(self.sub_job1.essential_role)
        self.assertTrue(self.sub_job2.is_multinode)
        self.assertFalse(self.sub_job2.essential_role)

        self.job.go_state_finished(TestJob.HEALTH_COMPLETE)
        self.assertEqual(self.job.state, TestJob.STATE_FINISHED)
        self.sub_job1.refresh_from_db()
        self.assertEqual(self.sub_job1.state, TestJob.STATE_RUNNING)
        self.sub_job2.refresh_from_db()
        self.assertEqual(self.sub_job2.state, TestJob.STATE_RUNNING)

        # 1.2/ Failure
        self.job.state = TestJob.STATE_RUNNING
        self.job.actual_device = self.device
        self.job.actual_device.state = Device.STATE_RUNNING
        self.job.actual_device.save()
        self.job.health = TestJob.HEALTH_UNKNOWN
        self.job.save()

        self.sub_job1.state = TestJob.STATE_RUNNING
        self.sub_job1.actual_device.state = Device.STATE_RUNNING
        self.sub_job1.save()
        self.sub_job1.health = TestJob.HEALTH_UNKNOWN
        self.sub_job1.save()

        self.sub_job2.state = TestJob.STATE_RUNNING
        self.sub_job2.actual_device.state = Device.STATE_RUNNING
        self.sub_job2.save()
        self.sub_job2.health = TestJob.HEALTH_UNKNOWN
        self.sub_job2.save()

        self.assertTrue(self.job.is_multinode)
        self.assertTrue(self.job.essential_role)
        self.assertTrue(self.sub_job1.is_multinode)
        self.assertFalse(self.sub_job1.essential_role)
        self.assertTrue(self.sub_job2.is_multinode)
        self.assertFalse(self.sub_job2.essential_role)

        self.job.go_state_finished(TestJob.HEALTH_INCOMPLETE)
        self.assertEqual(self.job.state, TestJob.STATE_FINISHED)
        self.sub_job1.refresh_from_db()
        self.assertEqual(self.sub_job1.state, TestJob.STATE_CANCELING)
        self.sub_job2.refresh_from_db()
        self.assertEqual(self.sub_job2.state, TestJob.STATE_CANCELING)

        # 2/ Non-essential role
        # 1.1/ Success
        self.job.definition = yaml_safe_dump({
            "protocols": {
                "lava-multinode": {
                    "role": "master",
                    "essential": False
                }
            }
        })
        self.job.state = TestJob.STATE_RUNNING
        self.job.actual_device = self.device
        self.job.actual_device.state = Device.STATE_RUNNING
        self.job.actual_device.save()
        self.job.health = TestJob.HEALTH_UNKNOWN
        self.job.save()

        self.sub_job1.state = TestJob.STATE_RUNNING
        self.sub_job1.actual_device.state = Device.STATE_RUNNING
        self.sub_job1.save()
        self.sub_job1.health = TestJob.HEALTH_UNKNOWN
        self.sub_job1.save()

        self.sub_job2.state = TestJob.STATE_RUNNING
        self.sub_job2.actual_device.state = Device.STATE_RUNNING
        self.sub_job2.save()
        self.sub_job2.health = TestJob.HEALTH_UNKNOWN
        self.sub_job2.save()

        self.assertTrue(self.job.is_multinode)
        self.assertFalse(self.job.essential_role)
        self.assertTrue(self.sub_job1.is_multinode)
        self.assertFalse(self.sub_job1.essential_role)
        self.assertTrue(self.sub_job2.is_multinode)
        self.assertFalse(self.sub_job2.essential_role)

        self.job.go_state_finished(TestJob.HEALTH_COMPLETE)
        self.assertEqual(self.job.state, TestJob.STATE_FINISHED)
        self.sub_job1.refresh_from_db()
        self.assertEqual(self.sub_job1.state, TestJob.STATE_RUNNING)
        self.sub_job2.refresh_from_db()
        self.assertEqual(self.sub_job2.state, TestJob.STATE_RUNNING)

        # 1.2/ Failure
        self.job.state = TestJob.STATE_RUNNING
        self.job.actual_device = self.device
        self.job.actual_device.state = Device.STATE_RUNNING
        self.job.actual_device.save()
        self.job.health = TestJob.HEALTH_UNKNOWN
        self.job.save()

        self.sub_job1.state = TestJob.STATE_RUNNING
        self.sub_job1.actual_device.state = Device.STATE_RUNNING
        self.sub_job1.save()
        self.sub_job1.health = TestJob.HEALTH_UNKNOWN
        self.sub_job1.save()

        self.sub_job2.state = TestJob.STATE_RUNNING
        self.sub_job2.actual_device.state = Device.STATE_RUNNING
        self.sub_job2.save()
        self.sub_job2.health = TestJob.HEALTH_UNKNOWN
        self.sub_job2.save()

        self.assertTrue(self.job.is_multinode)
        self.assertFalse(self.job.essential_role)
        self.assertTrue(self.sub_job1.is_multinode)
        self.assertFalse(self.sub_job1.essential_role)
        self.assertTrue(self.sub_job2.is_multinode)
        self.assertFalse(self.sub_job2.essential_role)

        self.job.go_state_finished(TestJob.HEALTH_INCOMPLETE)
        self.assertEqual(self.job.state, TestJob.STATE_FINISHED)
        self.sub_job1.refresh_from_db()
        self.assertEqual(self.sub_job1.state, TestJob.STATE_RUNNING)
        self.sub_job2.refresh_from_db()
        self.assertEqual(self.sub_job2.state, TestJob.STATE_RUNNING)
Exemple #28
0
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses>.

from django.contrib.auth.models import User
from django.test import TestCase

from lava_common.compat import yaml_safe_dump
from lava_scheduler_app.models import Device, DeviceType, TestJob, Worker

minimal_valid_job = yaml_safe_dump("""
job_name: minimal valid job
visibility: public
timeouts:
  job:
    minutes: 10
  action:
    minutes: 5
actions: []
""")


class TestTestJobStateMachine(TestCase):
    def setUp(self):
        self.worker = Worker.objects.create(hostname="worker-01",
                                            state=Worker.STATE_ONLINE)
        self.device_type = DeviceType.objects.create(name="dt-01")
        self.device = Device.objects.create(
            hostname="device-01",
            device_type=self.device_type,
            worker_host=self.worker,
Exemple #29
0
 def make_job_yaml(self, **kw):
     return yaml_safe_dump(self.make_job_data(**kw))
Exemple #30
0
    def run(self, connection, max_end_time):
        """
        Common run function for subclasses which define custom patterns
        """
        super().run(connection, max_end_time)

        # Get the connection, specific to this namespace
        connection_namespace = self.parameters.get("connection-namespace")
        parameters = None
        if self.timeout.can_skip(self.parameters):
            self.logger.info(
                "The timeout has 'skip' enabled. "
                "If this test action block times out, the job will continue at the next action block."
            )

        if connection_namespace:
            self.logger.debug("Using connection namespace: %s",
                              connection_namespace)
            parameters = {"namespace": connection_namespace}
        else:
            parameters = {
                "namespace": self.parameters.get("namespace", "common")
            }
            self.logger.debug("Using namespace: %s", parameters["namespace"])
        connection = self.get_namespace_data(
            action="shared",
            label="shared",
            key="connection",
            deepcopy=False,
            parameters=parameters,
        )

        if not connection:
            raise LAVABug("No connection retrieved from namespace data")

        self.signal_director.connection = connection

        pattern_dict = {self.pattern.name: self.pattern}
        # pattern dictionary is the lookup from the STARTRUN to the parse pattern.
        self.set_namespace_data(
            action=self.name,
            label=self.name,
            key="pattern_dictionary",
            value=pattern_dict,
        )
        if self.character_delay > 0:
            self.logger.debug("Using a character delay of %i (ms)",
                              self.character_delay)

        if not connection.prompt_str:
            connection.prompt_str = [
                self.job.device.get_constant("default-shell-prompt")
            ]
            # FIXME: This should be logged whenever prompt_str is changed, by the connection object.
            self.logger.debug("Setting default test shell prompt %s",
                              connection.prompt_str)
        connection.timeout = self.connection_timeout
        # force an initial prompt - not all shells will respond without an excuse.
        connection.sendline(connection.check_char)
        self.wait(connection)

        # use the string instead of self.name so that inheriting classes (like multinode)
        # still pick up the correct command.
        running = self.parameters["stage"]
        pre_command_list = self.get_namespace_data(action="test",
                                                   label="lava-test-shell",
                                                   key="pre-command-list")
        lava_test_results_dir = self.get_namespace_data(
            action="test", label="results", key="lava_test_results_dir")
        lava_test_sh_cmd = self.get_namespace_data(action="test",
                                                   label="shared",
                                                   key="lava_test_sh_cmd")

        # Any errors arising from this command are not checked.
        # If the result of the command means that lava-test-runner cannot be found,
        # this will cause the job to time out as Incomplete.
        if pre_command_list:
            for command in pre_command_list:
                connection.sendline(command, delay=self.character_delay)
                connection.wait()

        if lava_test_results_dir is None:
            raise JobError(
                "Nothing to run. Maybe the 'deploy' stage is missing, "
                "otherwise this is a bug which should be reported.")

        self.logger.debug("Using %s" % lava_test_results_dir)
        if lava_test_sh_cmd:
            connection.sendline("export SHELL=%s" % lava_test_sh_cmd,
                                delay=self.character_delay)
            connection.wait()

        # source the environment file containing device-specific shell variables
        connection.sendline(". %s/environment" % lava_test_results_dir,
                            delay=self.character_delay)
        connection.wait()

        try:
            feedbacks = []
            for feedback_ns in self.data.keys():
                feedback_connection = self.get_namespace_data(
                    action="shared",
                    label="shared",
                    key="connection",
                    deepcopy=False,
                    parameters={"namespace": feedback_ns},
                )
                if feedback_connection == connection:
                    continue
                if feedback_connection:
                    self.logger.debug(
                        "Will listen to feedbacks from '%s' for 1 second",
                        feedback_ns)
                    feedbacks.append((feedback_ns, feedback_connection))

            with connection.test_connection() as test_connection:
                # the structure of lava-test-runner means that there is just one TestAction and it must run all definitions
                test_connection.sendline(
                    "%s/bin/lava-test-runner %s/%s" %
                    (lava_test_results_dir, lava_test_results_dir, running),
                    delay=self.character_delay,
                )

                test_connection.timeout = min(self.timeout.duration,
                                              self.connection_timeout.duration)
                self.logger.info(
                    "Test shell timeout: %ds (minimum of the action and connection timeout)",
                    test_connection.timeout,
                )

                # Because of the feedbacks, we use a small value for the
                # timeout.  This allows to grab feedback regularly.
                last_check = time.time()
                while self._keep_running(test_connection,
                                         test_connection.timeout,
                                         connection.check_char):
                    # Only grab the feedbacks every test_connection.timeout
                    if feedbacks and time.time(
                    ) - last_check > test_connection.timeout:
                        for feedback in feedbacks:
                            # The timeout is really small because the goal is only
                            # to clean the buffer of the feedback connections:
                            # the characters are already in the buffer.
                            # With an higher timeout, this can have a big impact on
                            # the performances of the overall loop.
                            bytes_read = feedback[1].listen_feedback(timeout=1)
                            if bytes_read > 1:
                                self.logger.debug(
                                    "Listened to connection for namespace '%s' done",
                                    feedback[0],
                                )
                        last_check = time.time()
        finally:
            if self.current_run is not None:
                self.logger.error("Marking unfinished test run as failed")
                self.current_run["duration"] = "%.02f" % (time.time() -
                                                          self.start)
                self.logger.results(self.current_run)
                self.current_run = None

        # Only print if the report is not empty
        if self.report:
            self.logger.debug(
                yaml_safe_dump(self.report, default_flow_style=False))
        if self.errors:
            raise TestError(self.errors)
        return connection