Exemplo n.º 1
0
 def test_writer_deployment(self):
     (rendered, _) = self.factory.create_device("cubie1.jinja2")
     cubie = NewDevice(yaml_safe_load(rendered))
     self._check_deployment(cubie, "cubietruck-removable-with-writer.yaml")
Exemplo n.º 2
0
    def run(self, connection, max_end_time):
        connection = super().run(connection, max_end_time)
        runner_path = self.get_namespace_data(
            action="uuid", label="overlay_path", key=self.parameters["test_name"]
        )

        # now read the YAML to create a testdef dict to retrieve metadata
        yaml_file = os.path.join(runner_path, self.parameters["path"])
        try:
            with open(yaml_file, "r") as test_file:
                testdef = yaml_safe_load(test_file)
        except OSError as exc:
            raise JobError(
                "Unable to open test definition '%s': %s"
                % (self.parameters["path"], str(exc))
            )

        self.logger.debug("runner path: %s test_uuid %s", runner_path, self.test_uuid)
        filename = "%s/run.sh" % runner_path
        content = self.handle_parameters(testdef)

        # the 'lava' testdef name is reserved
        if self.parameters["name"] == "lava":
            raise TestError('The "lava" test definition name is reserved.')

        lava_signal = self.parameters.get("lava-signal", "stdout")

        testdef_levels = self.get_namespace_data(
            action=self.name, label=self.name, key="testdef_levels"
        )
        with open(filename, "a") as runsh:
            for line in content:
                runsh.write(line)
            runsh.write("set -e\n")
            runsh.write("set -x\n")
            # use the testdef_index value for the testrun name to handle repeats at source
            runsh.write("export TESTRUN_ID=%s\n" % testdef_levels[self.level])
            runsh.write(
                "cd %s\n"
                % self.get_namespace_data(
                    action="uuid", label="runner_path", key=self.parameters["test_name"]
                )
            )
            runsh.write("UUID=`cat uuid`\n")
            runsh.write("set +x\n")
            if lava_signal == "kmsg":
                runsh.write("export KMSG=true\n")
                runsh.write(
                    'echo "<0><LAVA_SIGNAL_STARTRUN $TESTRUN_ID $UUID>" > /dev/kmsg\n'
                )
            else:
                runsh.write('echo "<LAVA_SIGNAL_STARTRUN $TESTRUN_ID $UUID>"\n')
            runsh.write("set -x\n")
            steps = testdef.get("run", {}).get("steps", [])
            for cmd in steps:
                if "--cmd" in cmd or "--shell" in cmd:
                    cmd = re.sub(r"\$(\d+)\b", r"\\$\1", cmd)
                runsh.write("%s\n" % cmd)
            runsh.write("set +x\n")
            if lava_signal == "kmsg":
                runsh.write("unset KMSG\n")
                runsh.write(
                    'echo "<0><LAVA_SIGNAL_ENDRUN $TESTRUN_ID $UUID>" > /dev/kmsg\n'
                )
            else:
                runsh.write('echo "<LAVA_SIGNAL_ENDRUN $TESTRUN_ID $UUID>"\n')

        self.results = {
            "uuid": self.test_uuid,
            "filename": filename,
            "name": self.parameters["name"],
            "path": self.parameters["path"],
            "from": self.parameters["from"],
        }
        if self.parameters["from"] != "inline":
            self.results["repository"] = self.parameters["repository"]
        return connection
Exemplo n.º 3
0
    def run(self, connection, max_end_time):
        connection = super().run(connection, max_end_time)
        runner_path = self.get_namespace_data(
            action="uuid", label="overlay_path", key=self.parameters["test_name"]
        )

        # now read the YAML to create a testdef dict to retrieve metadata
        yaml_file = os.path.join(runner_path, self.parameters["path"])
        try:
            with open(yaml_file, "r") as test_file:
                testdef = yaml_safe_load(test_file)
        except OSError as exc:
            raise JobError(
                "Unable to open test definition '%s': %s"
                % (self.parameters["path"], str(exc))
            )

        if "install" not in testdef:
            self.results = {"skipped %s" % self.name: self.test_uuid}
            return

        filename = "%s/install.sh" % runner_path
        content = self.handle_parameters(testdef)

        # TODO: once the migration is complete, design a better way to do skip_install support.
        with open(filename, "w") as install_file:
            for line in content:
                install_file.write(line)
            if "keys" not in self.skip_options:
                sources = testdef["install"].get("keys", [])
                for src in sources:
                    install_file.write("lava-add-keys %s" % src)
                    install_file.write("\n")

            if "sources" not in self.skip_options:
                sources = testdef["install"].get("sources", [])
                for src in sources:
                    install_file.write("lava-add-sources %s" % src)
                    install_file.write("\n")

            if "deps" not in self.skip_options:
                # generic dependencies - must be named the same across all distros
                # supported by the testdef
                deps = testdef["install"].get("deps", [])

                # distro-specific dependencies
                if "distro" in self.parameters["deployment_data"]:
                    deps = deps + testdef["install"].get(
                        "deps-" + self.parameters["deployment_data"]["distro"], []
                    )

                if deps:
                    install_file.write("lava-install-packages ")
                    for dep in deps:
                        install_file.write("%s " % dep)
                    install_file.write("\n")

            if "steps" not in self.skip_options:
                steps = testdef["install"].get("steps", [])
                if steps:
                    # Allow install steps to use the git-repo directly
                    # fake up the directory as it will be after the overlay is applied
                    # os.path.join refuses if the directory does not exist on the dispatcher
                    base = len(DISPATCHER_DOWNLOAD_DIR.split("/")) + 2
                    # skip job_id/action-tmpdir/ as well
                    install_dir = "/" + "/".join(runner_path.split("/")[base:])
                    install_file.write("cd %s\n" % install_dir)
                    install_file.write("pwd\n")
                    for cmd in steps:
                        install_file.write("%s\n" % cmd)

            if "git-repos" not in self.skip_options:
                self.install_git_repos(testdef, runner_path)

        self.results = {"uuid": self.test_uuid}
        return connection
Exemplo n.º 4
0
 def test_secondary_media(self, which_mock):
     """
     Test UBootSecondaryMedia validation
     """
     job_parser = JobParser()
     (rendered, _) = self.factory.create_device("cubie1.jinja2")
     cubie = NewDevice(yaml_safe_load(rendered))
     sample_job_file = os.path.join(
         os.path.dirname(__file__), "sample_jobs/cubietruck-removable.yaml"
     )
     sample_job_data = open(sample_job_file)
     job = job_parser.parse(sample_job_data, cubie, 4212, None, "")
     job.logger = DummyLogger()
     job.validate()
     sample_job_data.close()
     uboot_action = [
         action
         for action in job.pipeline.actions
         if action.name == "uboot-action"
         and action.parameters["namespace"] == "boot2"
     ][0]
     u_boot_media = [
         action
         for action in uboot_action.pipeline.actions
         if action.name == "uboot-from-media"
         and action.parameters["namespace"] == "boot2"
     ][0]
     self.assertIsInstance(u_boot_media, UBootSecondaryMedia)
     self.assertEqual([], u_boot_media.errors)
     self.assertEqual(
         u_boot_media.parameters["kernel"], "/boot/vmlinuz-3.16.0-4-armmp-lpae"
     )
     self.assertEqual(
         u_boot_media.parameters["kernel"],
         u_boot_media.get_namespace_data(
             action="download-action", label="file", key="kernel"
         ),
     )
     self.assertEqual(
         u_boot_media.parameters["ramdisk"],
         u_boot_media.get_namespace_data(
             action="compress-ramdisk", label="file", key="ramdisk"
         ),
     )
     self.assertEqual(
         u_boot_media.parameters["dtb"],
         u_boot_media.get_namespace_data(
             action="download-action", label="file", key="dtb"
         ),
     )
     # use the base class name so that uboot-from-media can pick up the value reliably.
     self.assertEqual(
         u_boot_media.parameters["root_uuid"],
         u_boot_media.get_namespace_data(
             action="bootloader-from-media", label="uuid", key="root"
         ),
     )
     device = u_boot_media.get_namespace_data(
         action="storage-deploy", label="u-boot", key="device"
     )
     self.assertIsNotNone(device)
     part_reference = "%s:%s" % (
         job.device["parameters"]["media"]["usb"][device]["device_id"],
         u_boot_media.parameters["boot_part"],
     )
     self.assertEqual(
         part_reference,
         u_boot_media.get_namespace_data(
             action=u_boot_media.name, label="uuid", key="boot_part"
         ),
     )
     self.assertEqual(part_reference, "0:1")
Exemplo n.º 5
0
    def start_job(self, job):
        # Load job definition to get the variables for template
        # rendering
        job_def = yaml_safe_load(job.definition)
        job_def["compatibility"] = job.pipeline_compatibility
        job_def_str = yaml.dump(job_def, Dumper=yaml.CDumper)
        job_ctx = job_def.get("context", {})

        device = job.actual_device
        worker = device.worker_host

        # TODO: check that device_cfg is not None!
        device_cfg_str = device.load_configuration(job_ctx,
                                                   output_format="yaml")

        # Try to load the dispatcher specific files and then fallback to the
        # default configuration files.
        env_str = load_optional_yaml_file(
            os.path.join(DISPATCHERS_PATH, worker.hostname, "env.yaml"),
            ENV_PATH)
        env_dut_str = load_optional_yaml_file(
            os.path.join(DISPATCHERS_PATH, worker.hostname, "env.dut.yaml"),
            ENV_DUT_PATH,
        )
        dispatcher_cfg = load_optional_yaml_file(
            os.path.join(DISPATCHERS_PATH, worker.hostname, "dispatcher.yaml"),
            os.path.join(DISPATCHERS_PATH, "%s.yaml" % worker.hostname),
        )

        self.save_job_config(job, job_def_str, device_cfg_str, env_str,
                             env_dut_str, dispatcher_cfg)
        self.logger.info("[%d] START => %s (%s)", job.id, worker.hostname,
                         device.hostname)
        send_multipart_u(
            self.controler,
            [
                worker.hostname,
                "START",
                str(job.id),
                job_def_str,
                device_cfg_str,
                dispatcher_cfg,
                env_str,
                env_dut_str,
            ],
        )

        if not job.is_multinode:
            return

        device_cfg = yaml_safe_load(device_cfg_str)
        # For multinode jobs, start the dynamic connections
        for sub_job in job.sub_jobs_list:
            if sub_job == job or not sub_job.dynamic_connection:
                continue

            # Render the sub job definition
            sub_job_def = yaml_safe_load(sub_job.definition)
            sub_job_def["compatibility"] = sub_job.pipeline_compatibility
            sub_job_def_str = yaml.dump(sub_job_def, Dumper=yaml.CDumper)

            # inherit only enough configuration for dynamic_connection operation
            self.logger.info(
                "[%d] Trimming dynamic connection device configuration.",
                sub_job.id)
            min_device_cfg = job.actual_device.minimise_configuration(
                device_cfg)
            min_device_cfg_str = yaml.dump(min_device_cfg, Dumper=yaml.CDumper)

            self.save_job_config(
                sub_job,
                sub_job_def_str,
                min_device_cfg_str,
                env_str,
                env_dut_str,
                dispatcher_cfg,
            )
            self.logger.info("[%d] START => %s (connection)", sub_job.id,
                             worker.hostname)
            send_multipart_u(
                self.controler,
                [
                    worker.hostname,
                    "START",
                    str(sub_job.id),
                    sub_job_def_str,
                    min_device_cfg_str,
                    dispatcher_cfg,
                    env_str,
                    env_dut_str,
                ],
            )
Exemplo n.º 6
0
 def result(self) -> Dict[str, Any]:
     with contextlib.suppress(OSError, yaml.YAMLError):
         data = yaml_safe_load((self.base_dir / "result.yaml").read_bytes())
         if isinstance(data, dict):
             return data
     return {}
Exemplo n.º 7
0
    def test_pipeline(self):
        description_ref = self.pipeline_reference("kvm-inline.yaml", job=self.job)
        self.assertEqual(description_ref, self.job.pipeline.describe(False))

        self.assertEqual(len(self.job.pipeline.describe()), 4)
        inline_repo = None
        for action in self.job.pipeline.actions:
            if isinstance(action, DeployAction):
                self.assertIsNotNone(action.pipeline.actions[1])
                overlay = action.pipeline.actions[1]
                self.assertIsNotNone(overlay.pipeline.actions[1])
                testdef = overlay.pipeline.actions[2]
                self.assertIsNotNone(testdef.pipeline.actions[0])
                inline_repo = testdef.pipeline.actions[0]
                break
        # Test the InlineRepoAction directly
        self.assertIsNotNone(inline_repo)
        location = mkdtemp()
        # other actions have not been run, so fake up
        inline_repo.set_namespace_data(
            action="test", label="results", key="lava_test_results_dir", value=location
        )
        inline_repo.set_namespace_data(
            action="test", label="test-definition", key="overlay_dir", value=location
        )
        inline_repo.set_namespace_data(
            action="test", label="shared", key="location", value=location
        )
        inline_repo.set_namespace_data(
            action="test", label="test-definiton", key="overlay_dir", value=location
        )

        inline_repo.run(None, None)
        yaml_file = os.path.join(
            location, "0/tests/0_smoke-tests-inline/inline/smoke-tests-basic.yaml"
        )
        self.assertTrue(os.path.exists(yaml_file))
        with open(yaml_file, "r") as f_in:
            testdef = yaml_safe_load(f_in)
        expected_testdef = {
            "metadata": {
                "description": "Basic system test command for Linaro Ubuntu images",
                "devices": [
                    "panda",
                    "panda-es",
                    "arndale",
                    "vexpress-a9",
                    "vexpress-tc2",
                ],
                "format": "Lava-Test Test Definition 1.0",
                "name": "smoke-tests-basic",
                "os": ["ubuntu"],
                "scope": ["functional"],
            },
            "run": {
                "steps": [
                    "lava-test-case linux-INLINE-pwd --shell pwd",
                    "lava-test-case linux-INLINE-uname --shell uname -a",
                    "lava-test-case linux-INLINE-vmstat --shell vmstat",
                    "lava-test-case linux-INLINE-ifconfig --shell ifconfig -a",
                    "lava-test-case linux-INLINE-lscpu --shell lscpu",
                    "lava-test-case linux-INLINE-lsusb --shell lsusb",
                    "lava-test-case linux-INLINE-lsb_release --shell lsb_release -a",
                ]
            },
        }
        self.assertEqual(set(testdef), set(expected_testdef))
Exemplo n.º 8
0
    def test_overlay_action(self):  # pylint: disable=too-many-locals
        parameters = {
            "device_type": "d02",
            "job_name": "grub-standard-ramdisk",
            "job_timeout": "15m",
            "action_timeout": "5m",
            "priority": "medium",
            "actions": {
                "boot": {
                    "method": "grub",
                    "commands": "ramdisk",
                    "prompts": ["linaro-test", "root@debian:~#"],
                },
                "deploy": {
                    "ramdisk": "initrd.gz",
                    "kernel": "zImage",
                    "dtb": "broken.dtb",
                },
            },
        }
        (rendered, _) = self.factory.create_device("d02-01.jinja2")
        device = NewDevice(yaml_safe_load(rendered))
        job = Job(4212, parameters, None)
        job.device = device
        pipeline = Pipeline(job=job, parameters=parameters["actions"]["boot"])
        job.pipeline = pipeline
        overlay = BootloaderCommandOverlay()
        pipeline.add_action(overlay)
        ip_addr = dispatcher_ip(None)
        parsed = []
        kernel = parameters["actions"]["deploy"]["kernel"]
        ramdisk = parameters["actions"]["deploy"]["ramdisk"]
        dtb = parameters["actions"]["deploy"]["dtb"]

        substitution_dictionary = {
            "{SERVER_IP}": ip_addr,
            # the addresses need to be hexadecimal
            "{RAMDISK}": ramdisk,
            "{KERNEL}": kernel,
            "{DTB}": dtb,
        }
        params = device["actions"]["boot"]["methods"]
        commands = params["grub"]["ramdisk"]["commands"]
        self.assertIn("net_bootp", commands)
        self.assertIn(
            "linux (tftp,{SERVER_IP})/{KERNEL} console=ttyS0,115200 earlycon=uart8250,mmio32,0x80300000 root=/dev/ram0 ip=dhcp",
            commands,
        )
        self.assertIn("initrd (tftp,{SERVER_IP})/{RAMDISK}", commands)
        self.assertIn("devicetree (tftp,{SERVER_IP})/{DTB}", commands)

        params["grub"]["ramdisk"]["commands"] = substitute(
            params["grub"]["ramdisk"]["commands"], substitution_dictionary)
        substituted_commands = params["grub"]["ramdisk"]["commands"]
        self.assertIs(type(substituted_commands), list)
        self.assertIn("net_bootp", substituted_commands)
        self.assertNotIn(
            "linux (tftp,{SERVER_IP})/{KERNEL} console=ttyS0,115200 earlycon=uart8250,mmio32,0x80300000 root=/dev/ram0 ip=dhcp",
            substituted_commands,
        )
        self.assertIn(
            "linux (tftp,%s)/%s console=ttyS0,115200 earlycon=uart8250,mmio32,0x80300000 root=/dev/ram0 ip=dhcp"
            % (ip_addr, kernel),
            substituted_commands,
        )
        self.assertNotIn("initrd (tftp,{SERVER_IP})/{RAMDISK}", parsed)
        self.assertNotIn("devicetree (tftp,{SERVER_IP})/{DTB}", parsed)
Exemplo n.º 9
0
    def validate_pipeline_devices(self, name=None):
        """
        Name
        ----
        `validate_pipeline_device` [`name`]

        Description
        -----------
        Validate that the device dictionary and device-type template
        together create a valid YAML file which matches the pipeline
        device schema.
        Retired devices are ignored.

        See also get_pipeline_device_config

        Arguments
        ---------
        `name`: string
            Can be device hostname or device type name.
        If name is specified, method will search for either a matching device
        hostname or matching device type name in which case it will only
        validate that(those) device(s).
        If not specified, this method will validate all non-retired devices
        in the system.

        Return value
        ------------
        This function returns an XML-RPC structure of results with the
        following fields.

        `device_hostname`: {'Valid': null}
        or
        `device_hostname`: {'Invalid': message}
        `

        """
        if not name:
            devices = Device.objects.exclude(health=Device.HEALTH_RETIRED)
        else:
            devices = Device.objects.exclude(
                health=Device.HEALTH_RETIRED).filter(device_type__name=name)
            if not devices:
                devices = Device.objects.exclude(
                    health=Device.HEALTH_RETIRED).filter(hostname=name)

        devices = devices.visible_by_user(self.user)

        if not devices and name:
            raise xmlrpc.client.Fault(
                404,
                "No devices found with hostname or device type name %s, visible by the user"
                % name,
            )
        if not devices and not name:
            raise xmlrpc.client.Fault(
                404,
                "No pipeline devices visible by user were found on this instance."
            )
        results = {}
        for device in devices:
            key = str(device.hostname)
            config = device.load_configuration(output_format="yaml")
            if config is None:
                results[key] = {"Invalid": "Missing device dictionary"}
                continue
            try:
                # validate against the device schema
                validate_device(yaml_safe_load(config))
            except SubmissionException as exc:
                results[key] = {"Invalid": exc}
                continue
            results[key] = {"Valid": None}
        return xmlrpc.client.Binary(yaml_safe_dump(results).encode("UTF-8"))
Exemplo n.º 10
0
    def handle(self, *_, **options):
        dicts = File("device").list("*.jinja2")
        synced_devices = []
        self.stdout.write("Scanning devices:")
        for name in dicts:
            hostname = name.rsplit(".", 1)[0]

            # Get value of 'sync_to_lava' variable from template.
            sync_dict, exc = self._get_sync_to_lava(hostname)

            if exc:
                self.stdout.write(f"* {hostname} [SKIP]")
                self.stdout.write(f"  -> invalid jinja2 template")
                self.stdout.write(f"  -> {exc}")
                continue

            if sync_dict is None:
                self.stdout.write(f"* {hostname} [SKIP]")
                self.stdout.write(f"  -> missing '{self.SYNC_KEY}'")
                continue

            # Convert it to dictionary.
            sync_dict = self._parse_sync_dict(sync_dict)

            try:
                template = environment.devices().get_template(name)
                device_template = yaml_safe_load(template.render())
            except jinja2.TemplateError as exc:
                self.stdout.write(f"* {hostname} [SKIP]")
                self.stdout.write(f"  -> invalid jinja2 template")
                self.stdout.write(f"  -> {exc}")
                continue
            except yaml.YAMLError as exc:
                self.stdout.write(f"* {hostname} [SKIP]")
                self.stdout.write(f"  -> invalid yaml")
                self.stdout.write(f"  -> {exc}")
                continue

            # Check if this device is already manually created in db.
            with contextlib.suppress(Device.DoesNotExist):
                device = Device.objects.get(hostname=hostname)
                if not device.is_synced:
                    self.stdout.write(f"* {hostname} [SKIP]")
                    self.stdout.write(f"  -> created manually")
                    continue

            # Check keys
            if "device_type" not in sync_dict:
                self.stdout.write(f"* {hostname} [SKIP]")
                self.stdout.write(f"  -> 'device_type' is mandatory")
                continue

            # Add to managed devices list.
            self.stdout.write(f"* {hostname}")
            synced_devices.append(hostname)

            # Create device type. If not found, report an error and skip.
            device_type, created = DeviceType.objects.get_or_create(
                name=sync_dict["device_type"])
            if created:
                self.stdout.write(
                    f"  -> create device type: {device_type.name}")

            worker = None
            if "worker" in sync_dict:
                worker, created = Worker.objects.get_or_create(
                    hostname=sync_dict["worker"])
                if created:
                    self.stdout.write(
                        f"  -> create worker: {sync_dict['worker']}")

            # Create/update device.
            defaults = {
                "device_type": device_type,
                "description": "Created automatically by LAVA.",
                "worker_host": worker,
                "is_synced": True,
            }
            device, created = Device.objects.update_or_create(
                defaults, hostname=hostname)
            if created:
                Device.objects.filter(hostname=hostname).update(
                    health=Device.HEALTH_UNKNOWN)

            # Create aliases.
            for alias_name in sync_dict.get("aliases", []):
                Alias.objects.get_or_create(name=alias_name,
                                            device_type=device_type)
                self.stdout.write(f"  -> alias: {alias_name}")

            # Remove all tag relations first.
            device.tags.clear()
            # Create tags.
            for tag_name in sync_dict.get("tags", []):
                tag, _ = Tag.objects.get_or_create(name=tag_name)
                device.tags.add(tag)
                self.stdout.write(f"  -> tag: {tag_name}")

            # Link physical owner
            specified_owner = sync_dict.get("physical_owner", "")
            try:
                physical_owner = User.objects.get(username=specified_owner)
                device.physical_owner = physical_owner
                self.stdout.write(f"  -> user: {specified_owner}")
            except User.DoesNotExist:
                device.physical_owner = None
                if specified_owner:
                    self.stdout.write(
                        f"  -> user '{specified_owner}' does not exist")
            finally:
                device.save()

            # Link physical group
            specified_group = sync_dict.get("physical_group", "")
            try:
                physical_group = Group.objects.get(name=specified_group)
                device.physical_group = physical_group
                self.stdout.write(f"  -> group: {specified_group}")
            except Group.DoesNotExist:
                device.physical_group = None
                if specified_group:
                    self.stdout.write(
                        f"  -> group '{specified_group}' does not exist")
            finally:
                device.save()

            # Assign permission
            specified_permissions = sync_dict.get("group_device_permissions",
                                                  [])
            for permission in specified_permissions:
                perm = permission[0]
                group = permission[1]

                try:
                    permission_group = Group.objects.get(name=group)
                    try:
                        GroupDevicePermission.objects.assign_perm(
                            perm, permission_group, device)
                        self.stdout.write(
                            f"  -> add group permission: ({perm}, {group})")
                    except PermissionNameError:
                        self.stdout.write(
                            f"  -> permission '{perm}' does not exist")
                except Group.DoesNotExist:
                    self.stdout.write(f"  -> group '{group}' does not exist")

            # Delete unused permission
            kwargs = {"device": device}
            obj_perm = GroupDevicePermission.objects.filter(**kwargs)
            for perm in obj_perm:
                if [
                        perm.permission.codename,
                        perm.group.name,
                ] not in specified_permissions:
                    GroupDevicePermission.objects.remove_perm(
                        perm.permission.codename, perm.group, perm.device)
                    self.stdout.write(
                        f"  -> delete group permission: ({perm.permission.codename}, {perm.group.name})"
                    )

        # devices which have is_synced true if there's no device dict for them.
        Device.objects.filter(is_synced=True).exclude(
            hostname__in=synced_devices).update(health=Device.HEALTH_RETIRED)

        # Device types which have all the devices synced and all of them retired
        # should become invisible.
        synced_retired_queryset = DeviceType.objects.annotate(
            not_synced_retired_count=Count(
                Case(
                    When(
                        Q(device__is_synced=False)
                        | ~Q(device__health=Device.HEALTH_RETIRED),
                        then=1,
                    ),
                    output_field=IntegerField(),
                )))
        synced_retired_queryset.filter(not_synced_retired_count=0).update(
            display=False)

        # Device types which have all the devices synced and some of them not
        # retired should become visible.
        synced_not_retired_queryset = DeviceType.objects.annotate(
            not_synced=Count(
                Case(
                    When(Q(device__is_synced=False), then=1),
                    output_field=IntegerField(),
                )),
            not_retired=Count(
                Case(
                    When(~Q(device__health=Device.HEALTH_RETIRED), then=1),
                    output_field=IntegerField(),
                )),
        )
        synced_not_retired_queryset.filter(not_synced=0).filter(
            not_retired__gt=0).update(display=True)
Exemplo n.º 11
0
    def start_job(self, job):
        # Load job definition to get the variables for template
        # rendering
        job_def = yaml_safe_load(job.definition)
        job_def["compatibility"] = job.pipeline_compatibility
        job_def_str = yaml_safe_dump(job_def)
        job_ctx = job_def.get("context", {})

        device = job.actual_device
        worker = device.worker_host

        # TODO: check that device_cfg is not None!
        device_cfg_str = device.load_configuration(job_ctx,
                                                   output_format="yaml")

        env_str = config("env", worker)
        env_dut_str = config("env-dut", worker)
        dispatcher_cfg = config("dispatcher", worker)

        self.save_job_config(job, job_def_str, device_cfg_str, env_str,
                             env_dut_str, dispatcher_cfg)
        self.logger.info("[%d] START => %s (%s)", job.id, worker.hostname,
                         device.hostname)
        send_multipart_u(
            self.controler,
            [
                worker.hostname,
                "START",
                str(job.id),
                job_def_str,
                device_cfg_str,
                dispatcher_cfg,
                env_str,
                env_dut_str,
            ],
        )

        if not job.is_multinode:
            return

        device_cfg = yaml_safe_load(device_cfg_str)
        # For multinode jobs, start the dynamic connections
        for sub_job in job.sub_jobs_list:
            if sub_job == job or not sub_job.dynamic_connection:
                continue

            # Render the sub job definition
            sub_job_def = yaml_safe_load(sub_job.definition)
            sub_job_def["compatibility"] = sub_job.pipeline_compatibility
            sub_job_def_str = yaml_safe_dump(sub_job_def)

            # inherit only enough configuration for dynamic_connection operation
            self.logger.info(
                "[%d] Trimming dynamic connection device configuration.",
                sub_job.id)
            min_device_cfg = job.actual_device.minimise_configuration(
                device_cfg)
            min_device_cfg_str = yaml_safe_dump(min_device_cfg)

            self.save_job_config(
                sub_job,
                sub_job_def_str,
                min_device_cfg_str,
                env_str,
                env_dut_str,
                dispatcher_cfg,
            )
            self.logger.info("[%d] START => %s (connection)", sub_job.id,
                             worker.hostname)
            send_multipart_u(
                self.controler,
                [
                    worker.hostname,
                    "START",
                    str(sub_job.id),
                    sub_job_def_str,
                    min_device_cfg_str,
                    dispatcher_cfg,
                    env_str,
                    env_dut_str,
                ],
            )
Exemplo n.º 12
0
    def test_imx8m_template(self):
        fastboot_cmd_order = [
            "update",
            "ptable",
            "partition",
            "hyp",
            "modem",
            "rpm",
            "sbl1",
            "sbl2",
            "sec",
            "tz",
            "aboot",
            "boot",
            "rootfs",
            "vendor",
            "system",
            "cache",
            "userdata",
        ]

        rendered = self.render_device_dictionary_file("imx8m-01.jinja2")
        template_dict = yaml_safe_load(rendered)
        self.assertIsNotNone(template_dict)
        self.assertIn("error-messages", template_dict["constants"]["u-boot"])
        self.assertEqual(
            "u-boot=>",
            template_dict["actions"]["boot"]["methods"]["u-boot"]["parameters"]
            ["bootloader_prompt"],
        )

        context = {"bootloader_prompt": "imx8m=>"}
        rendered = self.render_device_dictionary_file("imx8m-01.jinja2",
                                                      context)
        template_dict = yaml_safe_load(rendered)
        self.assertIsNotNone(template_dict)
        self.assertIn("error-messages", template_dict["constants"]["u-boot"])
        self.assertEqual(
            "imx8m=>",
            template_dict["actions"]["boot"]["methods"]["u-boot"]["parameters"]
            ["bootloader_prompt"],
        )

        for cmd in template_dict["flash_cmds_order"]:
            idx = template_dict["flash_cmds_order"].index(cmd)
            self.assertEqual(cmd, fastboot_cmd_order[idx])
        # test overwriting kernel args
        checked = False
        context = {"console_device": "ttyUSB1"}
        rendered = self.render_device_dictionary_file("imx8m-01.jinja2",
                                                      context)
        template_dict = yaml_safe_load(rendered)
        commands = template_dict["actions"]["boot"]["methods"]["u-boot"][
            "ramdisk"]["commands"]
        self.assertIsNotNone(commands)
        self.assertIsInstance(commands, list)
        for line in commands:
            if "setenv bootargs" in line:
                self.assertIn("console=ttyUSB1", line)
                checked = True
        self.assertTrue(checked)
Exemplo n.º 13
0
    def test_juno_vexpress_template(self):
        data = """{% extends 'juno.jinja2' %}
    {% set connection_command = 'telnet serial4 7001' %}
    {% set hard_reset_command = '/usr/local/lab-scripts/snmp_pdu_control --hostname pdu18 --command reboot --port 10 --delay 10' %}
    {% set power_off_command = '/usr/local/lab-scripts/snmp_pdu_control --hostname pdu18 --command off --port 10 --delay 10' %}
    {% set power_on_command = '/usr/local/lab-scripts/snmp_pdu_control --hostname pdu18 --command on --port 10 --delay 10' %}
    {% set usb_label = 'SanDiskCruzerBlade' %}
    {% set usb_uuid = 'usb-SanDisk_Cruzer_Blade_20060266531DA442AD42-0:0' %}
    {% set usb_device_id = 0 %}"""
        self.assertTrue(self.validate_data("staging-juno-01", data))
        test_template = prepare_jinja_template("staging-juno-01",
                                               data,
                                               raw=True)
        rendered = test_template.render()
        template_dict = yaml_safe_load(rendered)
        self.assertIsNotNone(template_dict)
        self.assertEqual({"boot": 30}, template_dict["character_delays"])
        self.assertIn("error-messages", template_dict["constants"]["u-boot"])
        self.assertEqual(
            "juno#",
            template_dict["actions"]["boot"]["methods"]["u-boot"]["parameters"]
            ["bootloader_prompt"],
        )
        self.assertEqual(
            "Shell>",
            template_dict["actions"]["boot"]["methods"]["uefi"]["parameters"]
            ["bootloader_prompt"],
        )
        self.assertEqual(
            "Start:",
            template_dict["actions"]["boot"]["methods"]["uefi-menu"]
            ["parameters"]["bootloader_prompt"],
        )

        rendered = test_template.render(bootloader_prompt="vexpress>")
        template_dict = yaml_safe_load(rendered)
        self.assertIsNotNone(template_dict)
        self.assertEqual({"boot": 30}, template_dict["character_delays"])
        self.assertIn("error-messages", template_dict["constants"]["u-boot"])
        self.assertEqual(
            "vexpress>",
            template_dict["actions"]["boot"]["methods"]["u-boot"]["parameters"]
            ["bootloader_prompt"],
        )
        self.assertEqual(
            "Shell>",
            template_dict["actions"]["boot"]["methods"]["uefi"]["parameters"]
            ["bootloader_prompt"],
        )
        self.assertEqual(
            "Start:",
            template_dict["actions"]["boot"]["methods"]["uefi-menu"]
            ["parameters"]["bootloader_prompt"],
        )
        self.assertIn("nfs",
                      template_dict["actions"]["boot"]["methods"]["u-boot"])
        self.assertIn(
            "commands",
            template_dict["actions"]["boot"]["methods"]["u-boot"]["nfs"])
        commands = template_dict["actions"]["boot"]["methods"]["u-boot"][
            "nfs"]["commands"]
        check = 0
        for line in commands:
            if line.startswith("setenv bootargs console"):
                check = 1
                self.assertIn(",vers=3 ", line)
        if not check:
            self.fail("Unable to find setenv nfsargs")
Exemplo n.º 14
0
    def test_substitutions(self):
        """
        Test substitution of secondary media values into u-boot commands

        Unlike most u-boot calls, removable knows in advance all the values it needs to substitute
        into the boot commands for the secondary deployment as these are fixed by the device config
        and the image details from the job submission.
        """
        job_parser = JobParser()
        (rendered, _) = self.factory.create_device("cubie1.jinja2")
        cubie = NewDevice(yaml_safe_load(rendered))
        sample_job_file = os.path.join(
            os.path.dirname(__file__), "sample_jobs/cubietruck-removable.yaml")
        with open(sample_job_file) as sample_job_data:
            job = job_parser.parse(sample_job_data, cubie, 4212, None, "")
        job.logger = DummyLogger()
        job.validate()
        boot_params = [
            methods for methods in job.parameters["actions"]
            if "boot" in methods.keys()
        ][1]["boot"]
        self.assertIn("ramdisk", boot_params)
        self.assertIn("kernel", boot_params)
        self.assertIn("dtb", boot_params)
        self.assertIn("root_uuid", boot_params)
        self.assertIn("boot_part", boot_params)
        self.assertNotIn("type", boot_params)
        self.assertGreater(len(job.pipeline.actions), 1)
        self.assertIsNotNone(job.pipeline.actions[1].internal_pipeline)
        u_boot_action = [
            action for action in job.pipeline.actions
            if action.name == "uboot-action"
        ][1]
        overlay = [
            action for action in u_boot_action.internal_pipeline.actions
            if action.name == "bootloader-overlay"
        ][0]
        self.assertIsNotNone(
            overlay.get_namespace_data(action="storage-deploy",
                                       label="u-boot",
                                       key="device"))

        methods = cubie["actions"]["boot"]["methods"]
        self.assertIn("u-boot", methods)
        self.assertIn("usb", methods["u-boot"])
        self.assertIn("commands", methods["u-boot"]["usb"])
        commands_list = methods["u-boot"]["usb"]["commands"]
        device_id = u_boot_action.get_namespace_data(action="storage-deploy",
                                                     label="u-boot",
                                                     key="device")
        self.assertIsNotNone(device_id)
        kernel_type = u_boot_action.parameters["kernel_type"]
        bootcommand = map_kernel_uboot(kernel_type,
                                       device_params=cubie.get("parameters"))
        substitutions = {
            "{BOOTX}":
            "%s %s %s %s" % (
                bootcommand,
                cubie["parameters"][bootcommand]["kernel"],
                cubie["parameters"][bootcommand]["ramdisk"],
                cubie["parameters"][bootcommand]["dtb"],
            ),
            "{RAMDISK}":
            boot_params["ramdisk"],
            "{KERNEL}":
            boot_params["kernel"],
            "{DTB}":
            boot_params["dtb"],
            "{ROOT}":
            boot_params["root_uuid"],
            "{ROOT_PART}":
            "%s:%s" % (
                cubie["parameters"]["media"]["usb"][device_id]["device_id"],
                u_boot_action.parameters["boot_part"],
            ),
        }
        self.assertEqual("bootz 0x42000000 0x43300000 0x43000000",
                         substitutions["{BOOTX}"])
        self.assertEqual("/boot/initrd.img-3.16.0-4-armmp-lpae.u-boot",
                         substitutions["{RAMDISK}"])
        commands = substitute(commands_list, substitutions)
        print(commands)
        self.assertEqual(
            commands,
            [
                "usb start",
                "setenv autoload no",
                "load usb 0:0:1 {KERNEL_ADDR} /boot/vmlinuz-3.16.0-4-armmp-lpae",
                "load usb 0:0:1 {RAMDISK_ADDR} /boot/initrd.img-3.16.0-4-armmp-lpae.u-boot",
                "setenv initrd_size ${filesize}",
                "load usb 0:0:1 {DTB_ADDR} /boot/dtb-3.16.0-4-armmp-lpae",
                "console=ttyS0,115200n8 root=UUID=159d17cc-697c-4125-95a0-a3775e1deabe  ip=dhcp",
                "bootz 0x42000000 0x43300000 0x43000000",
            ],
        )
Exemplo n.º 15
0
 def __init__(self):
     data = yaml_safe_load(Factory().create_device("bbb-01.jinja2")[0])
     super().__init__(data)
Exemplo n.º 16
0
 def test_ssh_job_data(self):
     data = yaml_safe_load(self.factory.make_job_yaml())
     self.assertNotIn("context", data)
     self.assertNotIn("timeout", data)
     self.assertIn("timeouts", data)
     self.assertIn("job", data["timeouts"])
Exemplo n.º 17
0
def start_job(
    url: str,
    token: str,
    job_id: int,
    definition: str,
    device: str,
    dispatcher: str,
    env_str: str,
    env_dut: str,
    job_log_interval: int,
) -> Optional[int]:
    """
    Start the lava-run process and return the pid
    """
    # Create the base directory
    dispatcher_cfg = yaml_safe_load(dispatcher)
    base_dir = tmp_dir / f"{get_prefix(dispatcher_cfg)}{job_id}"
    base_dir.mkdir(mode=0o755, exist_ok=True, parents=True)

    # Write back the job, device and dispatcher configuration
    (base_dir / "job.yaml").write_text(definition, encoding="utf-8")
    (base_dir / "device.yaml").write_text(device, encoding="utf-8")
    (base_dir / "dispatcher.yaml").write_text(dispatcher, encoding="utf-8")

    # Dump the environment variables in the tmp file.
    if env_dut:
        (base_dir / "env-dut.yaml").write_text(env_dut, encoding="utf-8")

    try:
        if debug:
            out_file = sys.stdout
            err_file = sys.stderr
        else:
            out_file = (base_dir / "stdout").open("w")
            err_file = (base_dir / "stderr").open("w")
        env = create_environ(env_str)
        args = [
            # Run lava-run under nice so every sub-commands will be niced
            "nice",
            "lava-run",
            f"--device={base_dir / 'device.yaml'}",
            f"--dispatcher={base_dir / 'dispatcher.yaml'}",
            f"--output-dir={base_dir}",
            f"--job-id={job_id}",
            f"--url={url}",
            f"--token={token}",
            f"--job-log-interval={job_log_interval}",
        ]
        if debug:
            args.append("--debug")
        args.append(str(base_dir / "job.yaml"))

        if env_dut:
            args.append("--env-dut=%s" % (base_dir / "env-dut.yaml"))

        proc = subprocess.Popen(args,
                                stdout=out_file,
                                stderr=err_file,
                                env=env,
                                preexec_fn=os.setpgrp)
        return proc.pid
    except Exception as exc:  # pylint: disable=broad-except
        LOG.error("[%d] Unable to start: %s", job_id, args)
        # daemon must always continue running even if the job crashes
        if hasattr(exc, "child_traceback"):
            LOG.exception("[%d] %s", job_id, exc.child_traceback)
        else:
            LOG.exception("[%d] %s", job_id, exc)
            err_file.write("%s\n%s\n" % (exc, traceback.format_exc()))
        # The process has not started
        # The END message will be sent the next time
        # check_job_status is run
        return None
Exemplo n.º 18
0
    def test_make_ssh_guest_yaml(self):
        hostname = "fakeqemu3"
        device = self.factory.make_device(self.device_type, hostname)
        try:
            jobs = TestJob.from_yaml_and_user(self.factory.make_job_yaml(),
                                              self.factory.make_user())
        except DevicesUnavailableException as exc:
            self.fail(exc)

        sub_id = []
        group_size = 0
        path = os.path.join(os.path.dirname(os.path.join(__file__)),
                            "sample_jobs")
        host_role = []
        for job in jobs:
            data = yaml_safe_load(job.definition)
            params = data["protocols"]["lava-multinode"]
            params["target_group"] = "replaced"
            if not group_size:
                group_size = params["group_size"]
            if job.device_role == "host":
                self.assertFalse(job.dynamic_connection)
                self.assertEqual(job.requested_device_type.name,
                                 device.device_type.name)
                self.assertIn(params["sub_id"], [0, 1, 2])
                sub_id.append(params["sub_id"])
                comparison = yaml_safe_load(
                    open(os.path.join(path, "qemu-ssh-parent.yaml"),
                         "r").read())
                self.assertIn("protocols", data)
                self.assertIn("lava-multinode", data["protocols"])
                self.assertIn("sub_id", data["protocols"]["lava-multinode"])
                del comparison["protocols"]["lava-multinode"]["sub_id"]
                del data["protocols"]["lava-multinode"]["sub_id"]
                self.assertEqual(data, comparison)
                self.assertEqual(job.device_role, "host")
                host_role.append(job.device_role)
            else:
                self.assertTrue(job.dynamic_connection)
                self.assertNotIn(sub_id, params["sub_id"])
                sub_id.append(params["sub_id"])
                self.assertIsNone(job.requested_device_type)
                deploy = [
                    action for action in data["actions"] if "deploy" in action
                ][0]
                self.assertEqual(deploy["deploy"]["connection"], "ssh")
                # validate each job
                del data["protocols"]["lava-multinode"]["sub_id"]
                self.assertEqual(
                    data,
                    yaml_safe_load(
                        open(os.path.join(path, "qemu-ssh-guest-1.yaml"),
                             "r").read()),
                )
                self.assertIsNone(job.requested_device_type)
                self.assertIsNone(job.actual_device)
                host_role.append(data["host_role"])

        self.assertFalse(any(role for role in host_role if role != "host"))
        self.assertEqual(len(sub_id), group_size)
        self.assertEqual(sub_id, list(range(group_size)))
Exemplo n.º 19
0
def start(url: str, jobs: JobsDB, job_id: int, token: str,
          job_log_interval: int) -> None:
    LOG.info("[%d] server => START", job_id)
    # Was the job already started?
    job = jobs.get(job_id)

    # Start the job
    if job is None:
        ret = requests_get(f"{url}{URL_JOBS}{job_id}/", token)
        if ret.status_code != 200:
            LOG.error("[%d] -> server error: code %d", job_id, ret.status_code)
            LOG.debug("[%d] --> %s", job_id, ret.text)
            return

        try:
            data = ret.json()
            definition = data["definition"]
            device = data["device"]
            dispatcher = data["dispatcher"]
            env = data["env"]
            env_dut = data["env-dut"]
        except (KeyError, ValueError) as exc:
            LOG.error("[%d] -> invalid response: %r", job_id, str(exc))
            return

        LOG.info("[%d] Starting job", job_id)
        LOG.debug("[%d]         : %s", job_id, yaml_safe_load(definition))
        LOG.debug("[%d] device  : %s", job_id, yaml_safe_load(device))
        LOG.debug("[%d] dispatch: %s", job_id, yaml_safe_load(dispatcher))
        LOG.debug("[%d] env     : %s", job_id, yaml_safe_load(env))
        LOG.debug("[%d] env-dut : %s", job_id, yaml_safe_load(env_dut))

        # Start the job, grab the pid and create it in the dabatase
        pid = start_job(
            url,
            token,
            job_id,
            definition,
            device,
            dispatcher,
            env,
            env_dut,
            job_log_interval,
        )
        job = jobs.create(
            job_id,
            0 if pid is None else pid,
            Job.FINISHED if pid is None else Job.RUNNING,
            yaml_safe_load(dispatcher),
            token,
        )
    else:
        LOG.info("[%d] -> already running", job_id)

    # Update the server state
    LOG.info("[%d] RUNNING => server", job_id)
    ret = requests_post(f"{url}{URL_JOBS}{job_id}/",
                        token,
                        data={"state": "RUNNING"})
    if ret.status_code != 200:
        LOG.error("[%d] -> server error: code %d", job_id, ret.status_code)
        LOG.debug("[%d] --> %s", job_id, ret.text)
Exemplo n.º 20
0
Arquivo: api.py Projeto: ivoire/lava
    def master_config(self):
        """
        Name
        ----
        `master_config` ()

        Description
        -----------
        Return a dictionary containing the master and logger ZMQ
        socket addresses for this instance.

        Arguments
        ---------
        None

        Return value
        ------------
        Returns a dictionary containing the following keys:
        {
          "MASTER_URL": "tcp://<lava-master-dns>:5556",
          "LOGGING_URL": "tcp://<lava-master-dns>:5555",
          "ENCRYPT": False,
          "IPv6": False,
          "EVENT_SOCKET": "tcp://*:5500",
          "EVENT_TOPIC": "org.linaro.validation",
          "EVENT_NOTIFICATION": True,
          "LOG_SIZE_LIMIT": 10,
        }

        If ENCRYPT is True, clients MUST already have a usable
        client certificate installed on the master AND the current
        master certificate installed on the client, before a
        connection can be made.
        """
        data = {
            "master_socket": "tcp://<lava-master-dns>:5556",
            "socket": "tcp://<lava-master-dns>:5555",
            "encrypt": False,
            "ipv6": False,
        }

        master = {"ERROR": "invalid master config"}
        filename = os.path.join(settings.MEDIA_ROOT, "lava-master-config.yaml")
        if os.path.exists(filename):
            try:
                with open(filename, "r") as output:
                    master = yaml_safe_load(output)
            except yaml.YAMLError as exc:
                return master
        if master:
            data.update(master)

        log_config = {"ERROR": "invalid logging config"}
        filename = os.path.join(settings.MEDIA_ROOT, "lava-logs-config.yaml")
        if os.path.exists(filename):
            try:
                with open(filename, "r") as output:
                    log_config = yaml_safe_load(output)
            except yaml.YAMLError as exc:
                return log_config
        if log_config:
            data.update(log_config)
        return {
            "MASTER_URL": data["master_socket"],
            "LOGGING_URL": data["socket"],
            "IPv6": data["ipv6"],
            "ENCRYPT": data.get("encrypt", False),
            "EVENT_TOPIC": settings.EVENT_TOPIC,
            "EVENT_SOCKET": settings.EVENT_SOCKET,
            "EVENT_NOTIFICATION": settings.EVENT_NOTIFICATION,
            "LOG_SIZE_LIMIT": settings.LOG_SIZE_LIMIT,
        }
Exemplo n.º 21
0
    def test_overlay_notee(self, which_mock):
        parameters = {
            "dispatcher": {},  # fake dispatcher parameter. Normally added by parser
            "device_type": "beaglebone-black",
            "job_name": "uboot-pipeline",
            "job_timeout": "15m",
            "action_timeout": "5m",
            "priority": "medium",
            "actions": {
                "boot": {
                    "namespace": "common",
                    "method": "u-boot",
                    "commands": "ramdisk",
                    "prompts": ["linaro-test", "root@debian:~#"],
                },
                "deploy": {
                    "namespace": "common",
                    "ramdisk": {"url": "initrd.gz", "compression": "gz"},
                    "kernel": {"url": "zImage", "type": "zimage"},
                    "dtb": {"url": "broken.dtb"},
                    "tee": {"url": ""},
                },
            },
        }
        data = yaml_safe_load(Factory().create_device("bbb-01.jinja2")[0])
        device = NewDevice(data)
        ip_addr = dispatcher_ip(None)
        parsed = []
        kernel_addr = "0x83000000"
        ramdisk_addr = "0x83000000"
        dtb_addr = "0x88000000"
        tee_adr = "0x83000000"
        kernel = parameters["actions"]["deploy"]["kernel"]["url"]
        ramdisk = parameters["actions"]["deploy"]["ramdisk"]["url"]
        dtb = parameters["actions"]["deploy"]["dtb"]["url"]
        tee = parameters["actions"]["deploy"]["tee"]["url"]

        substitution_dictionary = {
            "{SERVER_IP}": ip_addr,
            # the addresses need to be hexadecimal
            "{KERNEL_ADDR}": kernel_addr,
            "{DTB_ADDR}": dtb_addr,
            "{RAMDISK_ADDR}": ramdisk_addr,
            "{BOOTX}": "%s %s %s %s" % ("bootz", kernel_addr, ramdisk_addr, dtb_addr),
            "{RAMDISK}": ramdisk,
            "{KERNEL}": kernel,
            "{DTB}": dtb,
            "{TEE}": tee,
        }
        params = device["actions"]["boot"]["methods"]
        params["u-boot"]["ramdisk"]["commands"] = substitute(
            params["u-boot"]["ramdisk"]["commands"], substitution_dictionary, drop=True
        )
        commands = params["u-boot"]["ramdisk"]["commands"]
        self.assertIs(type(commands), list)
        self.assertIn("tftp 0x83000000 zImage", commands)
        self.assertNotIn("tftp 0x83000000 {TEE}", commands)
        self.assertNotIn("tftp 0x83000000 ", commands)
        self.assertIn("setenv initrd_size ${filesize}", commands)
        self.assertIn("tftp 0x88000000 broken.dtb", commands)
        self.assertNotIn("setenv kernel_addr_r '{KERNEL_ADDR}'", commands)
        self.assertNotIn("setenv initrd_addr_r '{RAMDISK_ADDR}'", commands)
        self.assertNotIn("setenv fdt_addr_r '{DTB_ADDR}'", commands)
Exemplo n.º 22
0
 def test_lxc_with_device(self):
     self.assertIsNotNone(self.job)
     # validate with two test actions, lxc and device
     self.job.validate()
     lxc_yaml = os.path.join(os.path.dirname(__file__),
                             "sample_jobs/bbb-lxc.yaml")
     with open(lxc_yaml) as sample_job_data:
         data = yaml_safe_load(sample_job_data)
     lxc_deploy = [
         action for action in self.job.pipeline.actions
         if action.name == "lxc-deploy"
     ][0]
     overlay = [
         action for action in lxc_deploy.internal_pipeline.actions
         if action.name == "lava-overlay"
     ][0]
     test_def = [
         action for action in overlay.internal_pipeline.actions
         if action.name == "test-definition"
     ][0]
     self.assertIsNotNone(test_def.level, test_def.test_list)
     runner = [
         action for action in test_def.internal_pipeline.actions
         if action.name == "test-runscript-overlay"
     ][0]
     self.assertIsNotNone(runner.testdef_levels)
     tftp_deploy = [
         action for action in self.job.pipeline.actions
         if action.name == "tftp-deploy"
     ][0]
     prepare = [
         action for action in tftp_deploy.internal_pipeline.actions
         if action.name == "prepare-tftp-overlay"
     ][0]
     overlay = [
         action for action in prepare.internal_pipeline.actions
         if action.name == "lava-overlay"
     ][0]
     test_def = [
         action for action in overlay.internal_pipeline.actions
         if action.name == "test-definition"
     ][0]
     namespace = test_def.parameters.get("namespace")
     self.assertIsNotNone(namespace)
     test_actions = [
         action for action in self.job.parameters["actions"]
         if "test" in action
     ]
     for action in test_actions:
         if "namespace" in action["test"]:
             if action["test"]["namespace"] == namespace:
                 self.assertEqual(action["test"]["definitions"][0]["name"],
                                  "smoke-tests-bbb")
     namespace_tests = [
         action["test"]["definitions"] for action in test_actions
         if "namespace" in action["test"]
         and action["test"]["namespace"] == namespace
     ]
     self.assertEqual(len(namespace_tests), 1)
     self.assertEqual(len(test_actions), 2)
     self.assertEqual("smoke-tests-bbb", namespace_tests[0][0]["name"])
     self.assertEqual("smoke-tests-bbb", test_def.test_list[0][0]["name"])
     self.assertIsNotNone(test_def.level, test_def.test_list)
     runner = [
         action for action in test_def.internal_pipeline.actions
         if action.name == "test-runscript-overlay"
     ][0]
     self.assertIsNotNone(runner.testdef_levels)
     # remove the second test action
     data["actions"].pop()
     test_actions = [
         action for action in data["actions"] if "test" in action
     ]
     self.assertEqual(len(test_actions), 1)
     self.assertEqual(test_actions[0]["test"]["namespace"], "probe")
     parser = JobParser()
     (rendered, _) = self.factory.create_device("bbb-01.jinja2")
     device = NewDevice(yaml_safe_load(rendered))
     job = parser.parse(yaml_safe_dump(data), device, 4577, None, "")
     job.logger = DummyLogger()
     job.validate()
     lxc_deploy = [
         action for action in self.job.pipeline.actions
         if action.name == "lxc-deploy"
     ][0]
     overlay = [
         action for action in lxc_deploy.internal_pipeline.actions
         if action.name == "lava-overlay"
     ][0]
     test_def = [
         action for action in overlay.internal_pipeline.actions
         if action.name == "test-definition"
     ][0]
     self.assertIsNotNone(test_def.level, test_def.test_list)
     runner = [
         action for action in test_def.internal_pipeline.actions
         if action.name == "test-runscript-overlay"
     ][0]
     self.assertIsNotNone(runner.testdef_levels)
Exemplo n.º 23
0
    def parse(self,
              content,
              device,
              job_id,
              logger,
              dispatcher_config,
              env_dut=None):
        data = yaml_safe_load(content)
        job = Job(job_id, data, logger)
        test_counts = {}
        job.device = device
        job.parameters["env_dut"] = env_dut
        # Load the dispatcher config
        job.parameters["dispatcher"] = {}
        if dispatcher_config is not None:
            config = yaml_safe_load(dispatcher_config)
            if isinstance(config, dict):
                job.parameters["dispatcher"] = config

        level_tuple = Protocol.select_all(job.parameters)
        # sort the list of protocol objects by the protocol class level.
        job.protocols = [
            item[0](job.parameters, job_id)
            for item in sorted(level_tuple,
                               key=lambda level_tuple: level_tuple[1])
        ]
        pipeline = Pipeline(job=job)
        self._timeouts(data, job)

        # deploy and boot classes can populate the pipeline differently depending
        # on the test action type they are linked with (via namespacing).
        # This code builds an information dict for each namespace which is then
        # passed as a parameter to each Action class to use.
        test_actions = [
            action for action in data["actions"] if "test" in action
        ]
        for test_action in test_actions:
            test_parameters = test_action["test"]
            test_type = LavaTest.select(device, test_parameters)
            namespace = test_parameters.get("namespace", "common")
            connection_namespace = test_parameters.get("connection-namespace",
                                                       namespace)
            if namespace in job.test_info:
                job.test_info[namespace].append({
                    "class": test_type,
                    "parameters": test_parameters
                })
            else:
                job.test_info.update({
                    namespace: [{
                        "class": test_type,
                        "parameters": test_parameters
                    }]
                })
            if namespace != connection_namespace:
                job.test_info.update({
                    connection_namespace: [{
                        "class": test_type,
                        "parameters": test_parameters
                    }]
                })

        # FIXME: also read permissable overrides from device config and set from job data
        # FIXME: ensure that a timeout for deployment 0 does not get set as the timeout for deployment 1 if 1 is default
        for action_data in data["actions"]:
            for name in action_data:
                # Set a default namespace if needed
                namespace = action_data[name].setdefault("namespace", "common")
                test_counts.setdefault(namespace, 1)

                if name in ["deploy", "boot", "test"]:
                    action = parse_action(
                        action_data,
                        name,
                        device,
                        pipeline,
                        job.test_info,
                        test_counts[namespace],
                    )
                    if name == "test" and action.needs_overlay():
                        test_counts[namespace] += 1
                elif name == "command":
                    action = CommandAction()
                    action.parameters = action_data[name]
                    pipeline.add_action(action)

                else:
                    raise JobError("Unknown action name '%s'" % name)

        # there's always going to need to be a finalize_process action
        finalize = FinalizeAction()
        pipeline.add_action(finalize)
        finalize.populate(None)
        job.pipeline = pipeline
        if "compatibility" in data:
            try:
                job_c = int(job.compatibility)
                data_c = int(data["compatibility"])
            except ValueError as exc:
                raise JobError("invalid compatibility value: %s" % exc)
            if job_c < data_c:
                raise JobError(
                    "Dispatcher unable to meet job compatibility requirement. %d > %d"
                    % (job_c, data_c))
        return job
Exemplo n.º 24
0
 def test_lxc_without_lxctest(self):
     lxc_yaml = os.path.join(os.path.dirname(__file__),
                             "sample_jobs/bbb-lxc-notest.yaml")
     with open(lxc_yaml) as sample_job_data:
         data = yaml_safe_load(sample_job_data)
     parser = JobParser()
     (rendered, _) = self.factory.create_device("bbb-01.jinja2")
     device = NewDevice(yaml_safe_load(rendered))
     job = parser.parse(yaml_safe_dump(data), device, 4577, None, "")
     job.logger = DummyLogger()
     job.validate()
     lxc_deploy = [
         action for action in job.pipeline.actions
         if action.name == "lxc-deploy"
     ][0]
     names = [
         action.name for action in lxc_deploy.internal_pipeline.actions
     ]
     self.assertNotIn("prepare-tftp-overlay", names)
     namespace1 = lxc_deploy.parameters.get("namespace")
     tftp_deploy = [
         action for action in job.pipeline.actions
         if action.name == "tftp-deploy"
     ][0]
     prepare = [
         action for action in tftp_deploy.internal_pipeline.actions
         if action.name == "prepare-tftp-overlay"
     ][0]
     overlay = [
         action for action in prepare.internal_pipeline.actions
         if action.name == "lava-overlay"
     ][0]
     test_def = [
         action for action in overlay.internal_pipeline.actions
         if action.name == "test-definition"
     ][0]
     namespace = test_def.parameters.get("namespace")
     self.assertIsNotNone(namespace)
     self.assertIsNotNone(namespace1)
     self.assertNotEqual(namespace, namespace1)
     self.assertNotEqual(self.job.pipeline.describe(False),
                         job.pipeline.describe(False))
     test_actions = [
         action for action in job.parameters["actions"] if "test" in action
     ]
     for action in test_actions:
         if "namespace" in action["test"]:
             if action["test"]["namespace"] == namespace:
                 self.assertEqual(action["test"]["definitions"][0]["name"],
                                  "smoke-tests-bbb")
         else:
             self.fail("Found a test action not from the tftp boot")
     namespace_tests = [
         action["test"]["definitions"] for action in test_actions
         if "namespace" in action["test"]
         and action["test"]["namespace"] == namespace
     ]
     self.assertEqual(len(namespace_tests), 1)
     self.assertEqual(len(test_actions), 1)
     description_ref = self.pipeline_reference("bbb-lxc-notest.yaml",
                                               job=job)
     self.assertEqual(description_ref, job.pipeline.describe(False))
Exemplo n.º 25
0
 def create_job(self, template, filename, job_ctx=None, validate=True):
     y_file = os.path.join(os.path.dirname(__file__), filename)
     with open(y_file) as sample_job_data:
         job_data = yaml_safe_load(sample_job_data.read())
     return self.create_custom_job(template, job_data, job_ctx, validate)
Exemplo n.º 26
0
def update(values):
    # Add values to the local context
    ADMINS = values.get("ADMINS")
    AUTH_LDAP_GROUP_SEARCH = values.get("AUTH_LDAP_GROUP_SEARCH")
    AUTH_LDAP_GROUP_TYPE = values.get("AUTH_LDAP_GROUP_TYPE")
    AUTH_LDAP_SERVER_URI = values.get("AUTH_LDAP_SERVER_URI")
    AUTH_LDAP_USER_SEARCH = values.get("AUTH_LDAP_USER_SEARCH")
    AUTH_DEBIAN_SSO = values.get("AUTH_DEBIAN_SSO")
    AUTH_SOCIALACCOUNT = values.get("AUTH_SOCIALACCOUNT")
    AUTH_GITLAB_URL = values.get("AUTH_GITLAB_URL")
    AUTH_GITLAB_SCOPE = values.get("AUTH_GITLAB_SCOPE")
    AUTHENTICATION_BACKENDS = values.get("AUTHENTICATION_BACKENDS")
    DISALLOWED_USER_AGENTS = values.get("DISALLOWED_USER_AGENTS")
    DJANGO_LOGFILE = values.get("DJANGO_LOGFILE")
    EVENT_NOTIFICATION = values.get("EVENT_NOTIFICATION")
    INSTALLED_APPS = values.get("INSTALLED_APPS")
    INTERNAL_IPS = values.get("INTERNAL_IPS")
    LOGGING = values.get("LOGGING")
    MANAGERS = values.get("MANAGERS")
    MIDDLEWARE = values.get("MIDDLEWARE")
    MOUNT_POINT = values.get("MOUNT_POINT")
    SENTRY_DSN = values.get("SENTRY_DSN")
    STATEMENT_TIMEOUT = values.get("STATEMENT_TIMEOUT")
    USE_DEBUG_TOOLBAR = values.get("USE_DEBUG_TOOLBAR")

    # Fix mount point
    # Remove the leading slash and keep only one trailing slash
    MOUNT_POINT = (MOUNT_POINT.rstrip("/") + "/").lstrip("/")

    # Set the session cookie path according to the mount point.
    # Hence cookies from two lava servers hosted on the same domain name but with
    # different path won't override each others.
    # Keep in mind that mount point is empty by default. In this case the session
    # cookie path should be "/" (it should never be empty).
    SESSION_COOKIE_PATH = "/" + MOUNT_POINT.lstrip("/")

    # Fix ADMINS and MANAGERS variables
    # In Django >= 1.9 this is a list of tuples
    # and https://docs.djangoproject.com/en/1.9/ref/settings/#admins
    ADMINS = [tuple(v) for v in ADMINS]
    MANAGERS = [tuple(v) for v in MANAGERS]

    # EVENT_NOTIFICATION is a boolean
    EVENT_NOTIFICATION = bool(EVENT_NOTIFICATION)

    # Social accounts authentication config
    if AUTH_SOCIALACCOUNT or AUTH_GITLAB_URL:
        auth_socialaccount = {}
        if AUTH_SOCIALACCOUNT:
            try:
                auth_socialaccount = yaml_safe_load(AUTH_SOCIALACCOUNT)
                if not isinstance(auth_socialaccount, dict):
                    auth_socialaccount = {}
            except YAMLError:
                raise ImproperlyConfigured(
                    "Failed to load social account configuration.")

        if (
                AUTH_GITLAB_URL
        ):  # former GitLab authentication config takes precedence over new approach
            auth_socialaccount["gitlab"] = {
                "GITLAB_URL": AUTH_GITLAB_URL,
                "SCOPE": AUTH_GITLAB_SCOPE,
            }

        if auth_socialaccount:
            INSTALLED_APPS.append("allauth")
            INSTALLED_APPS.append("allauth.account")
            INSTALLED_APPS.append("allauth.socialaccount")

            for provider in auth_socialaccount.keys():
                INSTALLED_APPS.append(
                    f"allauth.socialaccount.providers.{provider}")

            AUTHENTICATION_BACKENDS.append(
                "allauth.account.auth_backends.AuthenticationBackend")
            SOCIALACCOUNT_PROVIDERS = auth_socialaccount

    # LDAP authentication config
    if AUTH_LDAP_SERVER_URI:
        INSTALLED_APPS.append("ldap")
        INSTALLED_APPS.append("django_auth_ldap")
        import ldap
        from django_auth_ldap.config import LDAPSearch, LDAPSearchUnion

        def get_ldap_group_types():
            """Return a list of all LDAP group types supported by django_auth_ldap module"""
            import django_auth_ldap.config
            import inspect

            types = []
            for name, obj in inspect.getmembers(django_auth_ldap.config):
                if inspect.isclass(obj) and name.endswith("Type"):
                    types.append(name)

            return types

        AUTHENTICATION_BACKENDS.append("django_auth_ldap.backend.LDAPBackend")

        # Available variables: AUTH_LDAP_BIND_DN, AUTH_LDAP_BIND_PASSWORD,
        # AUTH_LDAP_USER_DN_TEMPLATE AUTH_LDAP_USER_ATTR_MAP
        if AUTH_LDAP_USER_SEARCH:
            AUTH_LDAP_USER_SEARCH = eval(AUTH_LDAP_USER_SEARCH.encode("utf-8"))
            # AUTH_LDAP_USER_SEARCH and AUTH_LDAP_USER_DN_TEMPLATE are mutually
            # exclusive, hence,
            AUTH_LDAP_USER_DN_TEMPLATE = None

        if AUTH_LDAP_GROUP_SEARCH:
            AUTH_LDAP_GROUP_SEARCH = eval(
                AUTH_LDAP_GROUP_SEARCH.encode("utf-8"))

        if AUTH_LDAP_GROUP_TYPE:
            group_type = AUTH_LDAP_GROUP_TYPE
            # strip params from group type to get the class name
            group_class = group_type.split("(", 1)[0]
            group_types = get_ldap_group_types()
            if group_class in group_types:
                exec("from django_auth_ldap.config import " + group_class)
                AUTH_LDAP_GROUP_TYPE = eval(group_type)

    elif AUTH_DEBIAN_SSO:
        MIDDLEWARE.append("lava_server.debian_sso.DebianSsoUserMiddleware")
        AUTHENTICATION_BACKENDS.append(
            "lava_server.debian_sso.DebianSsoUserBackend")

    if USE_DEBUG_TOOLBAR:
        INSTALLED_APPS.append("debug_toolbar")
        MIDDLEWARE = ["debug_toolbar.middleware.DebugToolbarMiddleware"
                      ] + MIDDLEWARE
        INTERNAL_IPS.extend(["127.0.0.1", "::1"])

    # List of compiled regular expression objects representing User-Agent strings
    # that are not allowed to visit any page, systemwide. Use this for bad
    # robots/crawlers
    DISALLOWED_USER_AGENTS = [
        re.compile(r"%s" % reg, re.IGNORECASE)
        for reg in DISALLOWED_USER_AGENTS
    ]

    if LOGGING is None:
        LOGGING = {
            "version": 1,
            "disable_existing_loggers": False,
            "filters": {
                "require_debug_false": {
                    "()": "django.utils.log.RequireDebugFalse"
                }
            },
            "formatters": {
                "lava": {
                    "format":
                    "%(levelname)s %(asctime)s %(module)s %(message)s"
                }
            },
            "handlers": {
                "console": {
                    "level": "DEBUG",
                    "class": "logging.StreamHandler",
                    "formatter": "lava",
                },
                "logfile": {
                    "class": "logging.handlers.WatchedFileHandler",
                    "filename": DJANGO_LOGFILE,
                    "formatter": "lava",
                },
            },
            "loggers": {
                "django": {
                    "handlers": ["logfile"],
                    # DEBUG outputs all SQL statements
                    "level": "ERROR",
                    "propagate": True,
                },
                "django_auth_ldap": {
                    "handlers": ["logfile"],
                    "level": "INFO",
                    "propagate": True,
                },
                "lava_results_app": {
                    "handlers": ["logfile"],
                    "level": "INFO",
                    "propagate": True,
                },
                "lava_scheduler_app": {
                    "handlers": ["logfile"],
                    "level": "INFO",
                    "propagate": True,
                },
            },
        }

    if SENTRY_DSN:
        import sentry_sdk
        from sentry_sdk.integrations.django import DjangoIntegration

        sentry_sdk.init(
            dsn=SENTRY_DSN,
            integrations=[DjangoIntegration()],
            release=f"lava@{__version__}",
        )

    # Return settings
    return {k: v for (k, v) in locals().items() if k.isupper()}
Exemplo n.º 27
0
    def run(self, connection, max_end_time):
        """
        Clones the git repo into a directory name constructed from the mount_path,
        lava-$hostname prefix, tests, $index_$test_name elements. e.g.
        /tmp/tmp.234Ga213/lava-kvm01/tests/3_smoke-tests-basic
        Also updates some basic metadata about the test definition.
        """
        # use the base class to populate the runner_path and overlay_path data into the context
        connection = super().run(connection, max_end_time)

        # NOTE: the runner_path dir must remain empty until after the VCS clone, so let the VCS clone create the final dir
        runner_path = self.get_namespace_data(
            action="uuid", label="overlay_path", key=self.parameters["test_name"]
        )

        if os.path.exists(runner_path) and os.listdir(runner_path) == []:
            raise LAVABug(
                "Directory already exists and is not empty - duplicate Action?"
            )

        # Clear the data
        if os.path.exists(runner_path):
            shutil.rmtree(runner_path)

        self.logger.info("Fetching tests from %s", self.parameters["repository"])

        # Get the branch if specified.
        branch = self.parameters.get("branch")

        # Set shallow to False if revision is specified.
        # Otherwise default to True if not specified as a parameter.
        revision = self.parameters.get("revision")
        shallow = False
        if not revision:
            shallow = self.parameters.get("shallow", True)

        commit_id = self.vcs.clone(
            runner_path,
            shallow=shallow,
            revision=revision,
            branch=branch,
            history=self.parameters.get("history", True),
        )
        if commit_id is None:
            raise InfrastructureError(
                "Unable to get test definition from %s (%s)"
                % (self.vcs.binary, self.parameters)
            )
        self.results = {
            "commit": commit_id,
            "repository": self.parameters["repository"],
            "path": self.parameters["path"],
        }

        # now read the YAML to create a testdef dict to retrieve metadata
        yaml_file = os.path.join(runner_path, self.parameters["path"])
        self.logger.debug("Tests stored (tmp) in %s", yaml_file)
        try:
            with open(yaml_file, "r") as test_file:
                testdef = yaml_safe_load(test_file)
        except OSError as exc:
            raise JobError(
                "Unable to open test definition '%s': %s"
                % (self.parameters["path"], str(exc))
            )

        # set testdef metadata in base class
        self.store_testdef(testdef, "git", commit_id)

        return connection
Exemplo n.º 28
0
def map_metadata(description, job):
    """
    Generate metadata from the combination of the pipeline definition
    file (after any parsing for protocols) and the pipeline description
    into static metadata (TestData) related to this specific job
    The description itself remains outside the database - it will need
    to be made available as a download link.
    :param description: the pipeline description output
    :param job: the TestJob to associate
    :return: True on success, False on error
    """
    logger = logging.getLogger("lava-master")
    try:
        submission_data = yaml_safe_load(job.definition)
        description_data = yaml_load(description)
    except yaml.YAMLError as exc:
        logger.exception("[%s] %s", job.id, exc)
        return False
    try:
        testdata, created = TestData.objects.get_or_create(testjob=job)
    except MultipleObjectsReturned:
        # only happens for small number of jobs affected by original bug.
        logger.info("[%s] skipping alteration of duplicated TestData", job.id)
        return False
    if not created:
        # prevent updates of existing TestData
        logger.debug("[%s] skipping alteration of existing TestData", job.id)
        return False

    # get job-action metadata
    if description is None:
        logger.warning("[%s] skipping empty description", job.id)
        return False
    if not description_data:
        logger.warning("[%s] skipping invalid description data", job.id)
        return False
    if "job" not in description_data:
        logger.warning("[%s] skipping description without a job.", job.id)
        return False
    action_values = _get_action_metadata(description_data["job"]["actions"])
    for key, value in action_values.items():
        if not key or not value:
            logger.warning("[%s] Missing element in job. %s: %s", job.id, key,
                           value)
            continue
        testdata.attributes.create(name=key, value=value)

    # get common job metadata
    job_metadata = _get_job_metadata(job)
    for key, value in job_metadata.items():
        testdata.attributes.create(name=key, value=value)

    # get metadata from device
    device_values = {}
    device_values["target.device_type"] = job.requested_device_type
    for key, value in device_values.items():
        if not key or not value:
            logger.warning("[%s] Missing element in device. %s: %s", job.id,
                           key, value)
            continue
        testdata.attributes.create(name=key, value=value)

    # Add metadata from job submission data.
    if "metadata" in submission_data:
        for key in submission_data["metadata"]:
            value = submission_data["metadata"][key]
            if not key or not value:
                logger.warning("[%s] Missing element in job. %s: %s", job.id,
                               key, value)
                continue
            testdata.attributes.create(name=key, value=value)

    walk_actions(description_data["pipeline"], testdata, submission_data)
    return True
Exemplo n.º 29
0
    def test_overlay_action(self, which_mock):
        parameters = {
            "dispatcher":
            {},  # fake dispatcher parameter. Normally added by parser
            "device_type": "beaglebone-black",
            "job_name": "uboot-pipeline",
            "job_timeout": "15m",
            "action_timeout": "5m",
            "priority": "medium",
            "actions": {
                "boot": {
                    "namespace": "common",
                    "method": "u-boot",
                    "commands": "ramdisk",
                    "prompts": ["linaro-test", "root@debian:~#"],
                },
                "deploy": {
                    "namespace": "common",
                    "ramdisk": {
                        "url": "initrd.gz",
                        "compression": "gz"
                    },
                    "kernel": {
                        "url": "zImage",
                        "type": "zimage"
                    },
                    "dtb": {
                        "url": "broken.dtb"
                    },
                },
            },
        }
        data = yaml_safe_load(Factory().create_device("bbb-01.jinja2")[0])
        device = NewDevice(data)
        job = Job(4212, parameters, None)
        job.device = device
        pipeline = Pipeline(job=job, parameters=parameters["actions"]["boot"])
        job.pipeline = pipeline
        overlay = BootloaderCommandOverlay()
        connection = MagicMock()
        connection.timeout = MagicMock()
        pipeline.add_action(overlay)
        overlay.set_namespace_data(
            action="uboot-prepare-kernel",
            label="bootcommand",
            key="bootcommand",
            value="bootz",
        )
        overlay.validate()
        overlay.run(connection, 100)
        ip_addr = dispatcher_ip(None)
        parsed = []
        kernel_addr = job.device["parameters"][overlay.bootcommand]["ramdisk"]
        ramdisk_addr = job.device["parameters"][overlay.bootcommand]["ramdisk"]
        dtb_addr = job.device["parameters"][overlay.bootcommand]["dtb"]
        kernel = parameters["actions"]["deploy"]["kernel"]["url"]
        ramdisk = parameters["actions"]["deploy"]["ramdisk"]["url"]
        dtb = parameters["actions"]["deploy"]["dtb"]["url"]

        substitution_dictionary = {
            "{SERVER_IP}":
            ip_addr,
            # the addresses need to be hexadecimal
            "{KERNEL_ADDR}":
            kernel_addr,
            "{DTB_ADDR}":
            dtb_addr,
            "{RAMDISK_ADDR}":
            ramdisk_addr,
            "{BOOTX}":
            "%s %s %s %s" %
            (overlay.bootcommand, kernel_addr, ramdisk_addr, dtb_addr),
            "{RAMDISK}":
            ramdisk,
            "{KERNEL}":
            kernel,
            "{DTB}":
            dtb,
        }
        params = device["actions"]["boot"]["methods"]
        params["u-boot"]["ramdisk"]["commands"] = substitute(
            params["u-boot"]["ramdisk"]["commands"], substitution_dictionary)

        commands = params["u-boot"]["ramdisk"]["commands"]
        self.assertIs(type(commands), list)
        self.assertIn("tftp 0x83000000 zImage", commands)
        self.assertIn("tftp 0x83000000 initrd.gz", commands)
        self.assertIn("setenv initrd_size ${filesize}", commands)
        self.assertIn("tftp 0x88000000 broken.dtb", commands)
        self.assertNotIn("setenv kernel_addr_r '{KERNEL_ADDR}'", commands)
        self.assertNotIn("setenv initrd_addr_r '{RAMDISK_ADDR}'", commands)
        self.assertNotIn("setenv fdt_addr_r '{DTB_ADDR}'", commands)

        for line in params["u-boot"]["ramdisk"]["commands"]:
            line = line.replace("{SERVER_IP}", ip_addr)
            # the addresses need to be hexadecimal
            line = line.replace("{KERNEL_ADDR}", kernel_addr)
            line = line.replace("{DTB_ADDR}", dtb_addr)
            line = line.replace("{RAMDISK_ADDR}", ramdisk_addr)
            line = line.replace(
                "{BOOTX}",
                "%s %s %s %s" %
                (overlay.bootcommand, kernel_addr, ramdisk_addr, dtb_addr),
            )
            line = line.replace("{RAMDISK}", ramdisk)
            line = line.replace("{KERNEL}", kernel)
            line = line.replace("{DTB}", dtb)
            parsed.append(line)
        self.assertNotIn("setenv kernel_addr_r '{KERNEL_ADDR}'", parsed)
        self.assertNotIn("setenv initrd_addr_r '{RAMDISK_ADDR}'", parsed)
        self.assertNotIn("setenv fdt_addr_r '{DTB_ADDR}'", parsed)
Exemplo n.º 30
0
 def test_definition_lists(self):  # pylint: disable=too-many-locals
     self.job.validate()
     tftp_deploy = [
         action for action in self.job.pipeline.actions
         if action.name == "tftp-deploy"
     ][0]
     prepare = [
         action for action in tftp_deploy.internal_pipeline.actions
         if action.name == "prepare-tftp-overlay"
     ][0]
     overlay = [
         action for action in prepare.internal_pipeline.actions
         if action.name == "lava-overlay"
     ][0]
     apply_o = [
         action for action in prepare.internal_pipeline.actions
         if action.name == "apply-overlay-tftp"
     ][0]
     self.assertIsInstance(apply_o.parameters.get("persistent_nfs"), dict)
     self.assertIsInstance(
         apply_o.parameters["persistent_nfs"].get("address"), str)
     definition = [
         action for action in overlay.internal_pipeline.actions
         if action.name == "test-definition"
     ][0]
     git_repos = [
         action for action in definition.internal_pipeline.actions
         if action.name == "git-repo-action"
     ]
     self.assertIn("common", self.job.context)
     self.assertIn("test-definition", self.job.context["common"])
     self.assertIsNotNone(
         definition.get_namespace_data(action=definition.name,
                                       label="test-definition",
                                       key="testdef_index"))
     self.assertEqual(
         definition.get_namespace_data(action=definition.name,
                                       label="test-definition",
                                       key="testdef_index"),
         ["smoke-tests", "singlenode-advanced"],
     )
     self.assertEqual(
         git_repos[0].get_namespace_data(
             action="test-runscript-overlay",
             label="test-runscript-overlay",
             key="testdef_levels",
         ),
         {
             "1.3.2.4.4": "0_smoke-tests",
             "1.3.2.4.8": "1_singlenode-advanced"
         },
     )
     self.assertEqual({repo.uuid
                       for repo in git_repos},
                      {"4999_1.3.2.4.1", "4999_1.3.2.4.5"})
     self.assertEqual(
         set(git_repos[0].get_namespace_data(
             action="test-runscript-overlay",
             label="test-runscript-overlay",
             key="testdef_levels",
         ).values()),
         {"1_singlenode-advanced", "0_smoke-tests"},
     )
     # fake up a run step
     with open(self.testdef, "r") as par:
         params = yaml_safe_load(par)
     self.assertEqual(
         r"(?P<test_case_id>.*-*):\s+(?P<result>(pass|fail))",
         params["parse"]["pattern"],
     )
     self.job.context.setdefault("test", {})
     for git_repo in git_repos:
         self.job.context["test"].setdefault(git_repo.uuid, {})
         self.job.context["test"][git_repo.uuid]["testdef_pattern"] = {
             "pattern": params["parse"]["pattern"]
         }
     self.assertEqual(
         self.job.context["test"],
         {
             "4999_1.3.2.4.5": {
                 "testdef_pattern": {
                     "pattern":
                     "(?P<test_case_id>.*-*):\\s+(?P<result>(pass|fail))"
                 }
             },
             "4999_1.3.2.4.1": {
                 "testdef_pattern": {
                     "pattern":
                     "(?P<test_case_id>.*-*):\\s+(?P<result>(pass|fail))"
                 }
             },
         },
     )
     testdef_index = self.job.context["common"]["test-definition"][
         "test-definition"]["testdef_index"]
     start_run = "0_smoke-tests"
     uuid_list = definition.get_namespace_data(action="repo-action",
                                               label="repo-action",
                                               key="uuid-list")
     self.assertIsNotNone(uuid_list)
     for key, value in enumerate(testdef_index):
         if start_run == "%s_%s" % (key, value):
             self.assertEqual("4999_1.3.2.4.1", uuid_list[key])
             self.assertEqual(
                 self.job.context["test"][uuid_list[key]]["testdef_pattern"]
                 ["pattern"],
                 "(?P<test_case_id>.*-*):\\s+(?P<result>(pass|fail))",
             )