Example #1
0
    def run(self, connection, max_end_time):
        connection = super().run(connection, max_end_time)
        overlay_file = self.get_namespace_data(action="compress-overlay",
                                               label="output",
                                               key="file")
        if overlay_file is None:
            self.logger.debug("skipped %s", self.name)
            return connection
        lxc_name = self.get_namespace_data(action="lxc-create-action",
                                           label="lxc",
                                           key="name")
        lxc_default_path = lxc_path(self.job.parameters["dispatcher"])
        lxc_rootfs_path = os.path.join(lxc_default_path, lxc_name, "rootfs")
        if not os.path.exists(lxc_rootfs_path):
            raise LAVABug("Lxc container rootfs not found")
        tar_cmd = [
            "tar",
            "--warning",
            "no-timestamp",
            "-C",
            lxc_rootfs_path,
            "-xaf",
            overlay_file,
        ]
        command_output = self.run_command(tar_cmd)
        if command_output and command_output != "":
            raise JobError("Unable to untar overlay: %s" % command_output)

        # FIXME: Avoid copying this special 'lava-test-runner' which does not
        #        have 'sync' in cleanup. This should be handled during the
        #        creation of the overlay instead. Make a special case to copy
        #        lxc specific scripts, with distro specific versions.
        fname = os.path.join(self.lava_test_dir, "lava-test-runner")
        output_file = "%s/bin/%s" % (lxc_rootfs_path, os.path.basename(fname))
        self.logger.debug("Copying %s", output_file)
        try:
            shutil.copy(fname, output_file)
        except OSError:
            raise InfrastructureError("Unable to copy: %s" % output_file)

        return connection
Example #2
0
 def run(self, connection, max_end_time):
     connection = super().run(connection, max_end_time)
     if not connection:
         raise LAVABug("Cannot transfer overlay, no connection available.")
     ip_addr = dispatcher_ip(self.job.parameters['dispatcher'])
     overlay_full_path = self.get_namespace_data(action='compress-overlay',
                                                 label='output',
                                                 key='file')
     if not overlay_full_path:
         raise JobError("No overlay file identified for the transfer.")
     if not overlay_full_path.startswith(DISPATCHER_DOWNLOAD_DIR):
         raise ConfigurationError(
             "overlay should already be in DISPATCHER_DOWNLOAD_DIR")
     overlay_path = overlay_full_path[len(DISPATCHER_DOWNLOAD_DIR) + 1:]
     overlay = os.path.basename(overlay_path)
     dwnld = self.parameters['transfer_overlay']['download_command']
     dwnld += " http://%s/tmp/%s" % (ip_addr, overlay_path)
     unpack = self.parameters['transfer_overlay']['unpack_command']
     unpack += ' ' + overlay
     connection.sendline("rm %s; %s && %s" % (overlay, dwnld, unpack))
     return connection
Example #3
0
def decompress_file(infile, compression):
    if not compression:
        return infile
    if compression not in decompress_command_map.keys():
        raise JobError("Cannot find shell command to decompress: %s" % compression)

    # Check that the command does exists
    which(decompress_command_map[compression][0])

    with chdir(os.path.dirname(infile)):
        # local copy for idempotency
        cmd = decompress_command_map[compression][:]
        cmd.append(infile)
        outfile = infile
        if infile.endswith(compression):
            outfile = infile[:-(len(compression) + 1)]
        try:
            subprocess.check_output(cmd)  # nosec - internal use.
            return outfile
        except (OSError, subprocess.CalledProcessError) as exc:
            raise InfrastructureError('unable to decompress file %s: %s' % (infile, exc))
Example #4
0
 def run(self, connection, max_end_time):
     """
     # cp ./iso/install.amd/vmlinuz vmlinuz
     # cp ./iso/install.amd/initrd.gz initrd.gz
     """
     # need download location
     iso_download = self.get_namespace_data(
         action="download-action", label="iso", key="file"
     )
     if not iso_download:
         raise JobError("installer image path is not present in the namespace.")
     destination = os.path.dirname(iso_download)
     copy_out_files(iso_download, list(self.files.values()), destination)
     for key, value in self.files.items():
         filename = os.path.join(destination, os.path.basename(value))
         self.logger.info("filename: %s size: %s", filename, os.stat(filename)[6])
         self.set_namespace_data(
             action=self.name, label=self.name, key=key, value=filename
         )
     self.results = {"success": list(self.files.values())}
     return connection
Example #5
0
 def run(self, connection, max_end_time):
     if not self.parameters.get("modules"):  # idempotency
         return connection
     connection = super().run(connection, max_end_time)
     modules = self.get_namespace_data(
         action="download-action", label="modules", key="file"
     )
     if not self.parameters.get("ramdisk"):
         if not self.parameters.get("nfsrootfs"):
             raise JobError("Unable to identify a location for the unpacked modules")
     # if both NFS and ramdisk are specified, apply modules to both
     # as the kernel may need some modules to raise the network and
     # will need other modules to support operations within the NFS
     if self.parameters.get("nfsrootfs"):
         if not self.parameters["nfsrootfs"].get("install_modules", True):
             self.logger.info("Skipping applying overlay to NFS")
             return connection
         root = self.get_namespace_data(
             action="extract-rootfs", label="file", key="nfsroot"
         )
         self.logger.info("extracting modules file %s to %s", modules, root)
         untar_file(modules, root)
     if self.parameters.get("ramdisk"):
         if not self.parameters["ramdisk"].get("install_modules", True):
             self.logger.info("Not adding modules to the ramdisk.")
             return connection
         root = self.get_namespace_data(
             action="extract-overlay-ramdisk",
             label="extracted_ramdisk",
             key="directory",
         )
         self.logger.info("extracting modules file %s to %s", modules, root)
         untar_file(modules, root)
     try:
         os.unlink(modules)
     except OSError as exc:
         raise InfrastructureError(
             "Unable to remove tarball: '%s' - %s" % (modules, exc)
         )
     return connection
Example #6
0
    def run(self, connection, max_end_time):
        retries = 0
        has_failed = False
        self.call_protocols()
        while retries < self.max_retries:
            try:
                connection = self.internal_pipeline.run_actions(connection, max_end_time)
                if 'repeat' not in self.parameters:
                    # failure_retry returns on first success. repeat returns only at max_retries.
                    return connection
            # Do not retry for LAVABug (as it's a bug in LAVA)
            except (InfrastructureError, JobError, TestError) as exc:
                has_failed = True
                # Restart max_end_time or the retry on a timeout fails with duration < 0
                max_end_time = self.timeout.duration + time.time()
                self.timeout.start = time.time()
                # Print the error message
                retries += 1
                msg = "%s failed: %d of %d attempts. '%s'" % (self.name, retries,
                                                              self.max_retries, exc)
                self.logger.error(msg)
                # Cleanup the action to allow for a safe restart
                self.cleanup(connection)

                # re-raise if this is the last loop
                if retries == self.max_retries:
                    self.errors = "%s retries failed for %s" % (retries, self.name)
                    self.set_namespace_data(action='shared', label='shared',
                                            key='connection', value=connection)
                    raise

                # Wait some time before retrying
                time.sleep(self.sleep)
                self.logger.warning("Retrying: %s %s (%s sec)", self.level, self.name, max_end_time)

        # If we are repeating, check that all repeat were a success.
        if has_failed:
            # tried and failed
            raise JobError("%s retries failed for %s" % (retries, self.name))
        return connection
Example #7
0
def copy_overlay_to_lxc(lxc_name, src, dispatcher_config, namespace):
    """Copies given overlay tar file in SRC to LAVA_LXC_HOME with the provided
    LXC_NAME and configured lxc_path

    For example,

    SRC such as:
    '/var/lib/lava/dispatcher/slave/tmp/523/overlay-1.8.4.tar.gz'

    will get copied to:
    '/var/lib/lxc/db410c-523/rootfs/lava-lxc/overlays/${namespace}/overlay.tar.gz'

    where,
    '/var/lib/lxc' is the lxc_path
    'db410c-523' is the LXC_NAME
    ${namespace} is the given NAMESPACE

    Returns the destination path. For example,
    '/var/lib/lxc/db410c-523/rootfs/lava-lxc/overlays/${namespace}/overlay.tar.gz'

    Raises JobError if the copy failed.
    """
    dst = os.path.join(
        lava_lxc_home(lxc_name, dispatcher_config),
        "overlays",
        namespace,
        "overlay.tar.gz",
    )
    logger = logging.getLogger("dispatcher")
    logger.debug("Copying %s to %s", os.path.basename(src), dst)
    try:
        shutil.copy(src, dst)
    except OSError as exc:
        # ENOENT(2): No such file or directory
        if exc.errno != errno.ENOENT:
            raise JobError("Unable to copy image: %s" % src)
        # try creating parent directories
        os.makedirs(os.path.dirname(dst), 0o755)
        shutil.copy(src, dst)
    return dst
Example #8
0
    def validate(self):
        super().validate()
        self.container = "lava-%s-%s" % (self.job.job_id, self.level)

        options = self.job.device["actions"]["boot"]["methods"]["docker"]["options"]

        docker_image = self.get_namespace_data(
            action="deploy-docker", label="image", key="name"
        )
        if docker_image is None:
            raise JobError("Missing deploy action before boot")

        if options["cpus"]:
            self.extra_options += " --cpus %s" % options["cpus"]
        if options["memory"]:
            self.extra_options += " --memory %s" % options["memory"]
        if options["privileged"]:
            self.extra_options += " --privileged"
        for device in options["devices"]:
            self.extra_options += " --device %s" % device
        for volume in options["volumes"]:
            self.extra_options += " --volume %s" % volume
Example #9
0
    def run(self, connection, max_end_time):
        connection = super().run(connection, max_end_time)

        boot_img = self.get_namespace_data(
            action="download-action", label="boot", key="file"
        )
        boot_img = self.maybe_copy_to_container(boot_img)

        command_output = self.get_fastboot_output(["boot", boot_img], allow_fail=True)
        if command_output and "booting" not in command_output.lower():
            raise JobError("Unable to boot with fastboot: %s" % command_output)
        else:
            lines = [
                status
                for status in command_output.split("\n")
                if "finished" in status.lower()
            ]
            if lines:
                self.results = {"status": lines[0].strip()}
            else:
                self.results = {"fail": self.name}
        return connection
Example #10
0
    def update_guestfs(self):
        image = self.get_namespace_data(action="download-action",
                                        label=self.key,
                                        key="file")
        partition = self.params.get("partition", None)
        self.logger.info("Modifying %r", image)
        guest = guestfs.GuestFS(python_return_dict=True)
        guest.add_drive(image)
        try:
            guest.launch()
            if partition is not None:
                device = guest.list_partitions()[partition]
            else:
                device = guest.list_devices()[0]
            guest.mount(device, "/")
        except RuntimeError as exc:
            self.logger.exception(str(exc))
            raise JobError("Unable to update image %s: %r" %
                           (self.key, str(exc)))

        self.logger.debug("Overlays:")
        for overlay in self.params["overlays"]:
            label = "%s.%s" % (self.key, overlay)
            overlay_image = None
            if overlay == "lava":
                overlay_image = self.get_namespace_data(
                    action="compress-overlay", label="output", key="file")
                path = "/"
                compress = "gzip"
            else:
                overlay_image = self.get_namespace_data(
                    action="download-action", label=label, key="file")
                path = self.params["overlays"][overlay]["path"]
                compress = None
            self.logger.debug("- %s: %r to %r", label, overlay_image, path)
            guest.mkdir_p(path)
            guest.tar_in(overlay_image, path, compress=compress)
        guest.umount(device)
        guest.shutdown()
Example #11
0
    def run(self, connection, max_end_time):
        connection = super().run(connection, max_end_time)
        fname = self.get_namespace_data(action='download-action',
                                        label=self.key,
                                        key='file')
        origin = fname
        # Remove the '.qcow2' extension and add '.img'
        if fname.endswith('.qcow2'):
            fname = fname[:-6]
        fname += ".img"

        self.logger.debug("Converting downloaded image from qcow2 to raw")
        try:
            subprocess.check_output(
                [
                    'qemu-img',
                    'convert',  # nosec - checked.
                    '-f',
                    'qcow2',
                    '-O',
                    'raw',
                    origin,
                    fname
                ],
                stderr=subprocess.STDOUT)
        except subprocess.CalledProcessError as exc:
            self.logger.error("Unable to convert the qcow2 image")
            self.logger.error(exc.output)
            raise JobError(exc.output)

        self.set_namespace_data(action=self.name,
                                label=self.key,
                                key='file',
                                value=fname)
        self.set_namespace_data(action=self.name,
                                label='file',
                                key=self.key,
                                value=fname)
        return connection
Example #12
0
    def run(self, connection, max_end_time):
        # In downloads://foo/bar.ext, "foo" is the "netloc", "/bar.ext" is
        # the path. But in downloads://foo.ext, "foo.ext" is the netloc, and
        # the path is empty.
        # In downloads:///foo/bar.ext, netloc is None while path is /foo/bar.ext.
        filename = self.url.netloc if self.url.netloc else ""
        if self.url.path:
            filename += self.url.path

        namespace = self.parameters["namespace"]
        top = pathlib.Path(self.job.tmp_dir) / "downloads" / namespace
        src = top / filename
        if not src.exists():
            existing = [
                str(f).replace(str(top) + "/", "") for f in top.rglob("*")
                if not f.is_dir()
            ]
            if existing:
                available = "available: " + ", ".join(existing)
            else:
                available = "not files at all available"
            raise JobError(f"Resource unavailable: {filename} ({available})")

        dest = pathlib.Path(self.path) / filename
        dest.parent.mkdir(parents=True, exist_ok=True)
        self.logger.debug("Linking %s -> %s", src, dest)
        try:
            os.link(src, dest)
        except FileExistsError:
            self.logger.warning("-> destination already exists, skipping")

        self.set_namespace_data(action="download-action",
                                label=self.key,
                                key="file",
                                value=str(dest))
        if "lava-xnbd" in self.parameters and str(self.key) == "nbdroot":
            self.parameters["lava-xnbd"]["nbdroot"] = str(dest)

        return connection
Example #13
0
    def run(self, connection, max_end_time):
        connection = super().run(connection, max_end_time)
        self.logger.debug("%s: starting nbd-server", self.name)
        # pull from parameters - as previously set
        self.nbd_root = self.parameters["lava-xnbd"]["nbdroot"]
        self.nbd_server_port = self.get_namespace_data(action="nbd-deploy",
                                                       label="nbd",
                                                       key="nbd_server_port")
        self.nbd_server_ip = self.get_namespace_data(action="nbd-deploy",
                                                     label="nbd",
                                                     key="nbd_server_ip")
        if self.nbd_server_port is None:
            self.errors = "NBD server port is unset"
            return connection
        self.logger.debug(
            "NBD-IP: %s, NBD-PORT: %s, NBD-ROOT: %s",
            self.nbd_server_ip,
            self.nbd_server_port,
            self.nbd_root,
        )
        if re.search(filesystem.tftpd_dir(), self.nbd_root):
            fullpath_nbdroot = self.nbd_root
        else:
            fullpath_nbdroot = "%s/%s" % (
                os.path.realpath(filesystem.tftpd_dir()),
                self.nbd_root,
            )
        nbd_cmd = [
            "nbd-server",
            "%s" % self.nbd_server_port,
            fullpath_nbdroot,
        ]
        command_output = self.run_command(nbd_cmd, allow_fail=False)

        if command_output and "error" in command_output:
            raise JobError("nbd-server: %s" % command_output)
        else:
            self.logger.debug("%s: starting nbd-server done", self.name)
        return connection
Example #14
0
    def run(self, connection, max_end_time):
        """
        Extracts the provided encoded tar archive into tmpdir.
        """
        connection = super().run(connection, max_end_time)
        runner_path = self.get_namespace_data(
            action="uuid", label="overlay_dir", key=self.parameters["test_name"]
        )
        temp_tar = os.path.join(runner_path, "..", "..", "tar-repo.tar")

        try:
            if not os.path.isdir(runner_path):
                self.logger.debug("Creating directory to extract the tar archive into.")
                os.makedirs(runner_path)

            encoded_in = io.StringIO(self.parameters["repository"])
            decoded_out = io.StringIO()
            base64.decode(encoded_in, decoded_out)

            # The following two operations can also be done in memory
            # using cStringIO.
            # At the moment the tar file sent is not big, but that can change.
            with open(temp_tar, "w") as write_tar:
                write_tar.write(decoded_out.getvalue())

            with tarfile.open(temp_tar) as tar:
                tar.extractall(path=runner_path)
        except OSError as exc:
            raise InfrastructureError(
                "Unable to extract the tar archive: %s" % str(exc)
            )
        except tarfile.TarError as ex:
            raise JobError("Error extracting the tar archive: %s" % str(ex))
        finally:
            # Remove the temporary created tar file after it has been extracted.
            if os.path.isfile(temp_tar):
                os.unlink(temp_tar)
        return connection
Example #15
0
 def run(self, connection, max_end_time):
     connection = super().run(connection, max_end_time)
     serial_number = self.job.device["fastboot_serial_number"]
     fastboot_opts = self.job.device["fastboot_options"]
     fastboot_cmd = (
         lxc_cmd_prefix(self.job)
         + ["fastboot", "-s", serial_number, "reboot"]
         + fastboot_opts
     )
     command_output = self.parsed_command(fastboot_cmd, allow_fail=True)
     if command_output and "rebooting" not in command_output.lower():
         raise JobError("Unable to fastboot reboot: %s" % command_output)
     else:
         lines = [
             status
             for status in command_output.split("\n")
             if "finished" in status.lower()
         ]
         if lines:
             self.results = {"status": lines[0].strip()}
         else:
             self.results = {"fail": self.name}
     return connection
Example #16
0
    def select(cls, device, parameters):
        candidates = cls.__subclasses__()  # pylint: disable=no-member
        replies = {}
        willing = []
        for c in candidates:
            res = c.accepts(device, parameters)
            if not isinstance(res, tuple):
                raise LAVABug('class %s accept function did not return a tuple' % c.__name__)
            if res[0]:
                willing.append(c)
            else:
                class_name = c.name if hasattr(c, 'name') else c.__name__
                replies[class_name] = res[1]

        if len(willing) == 0:
            replies_string = ""
            for name, reply in replies.items():
                replies_string += ("%s: %s\n" % (name, reply))
            raise JobError(
                "None of the test strategies accepted your test parameters, reasons given:\n%s" % replies_string)

        willing.sort(key=lambda x: x.priority, reverse=True)
        return willing[0]
Example #17
0
File: fvp.py Project: slawr/lava
    def run(self, connection, max_end_time):
        fvp_arguments = " ".join(self.parameters.get("arguments"))

        # Build the command line
        # The docker image is safe to be included in the command line
        cmd = self.construct_docker_fvp_command(self.docker_image, fvp_arguments)

        self.logger.debug("Boot command: %s", cmd)
        shell = ShellCommand(cmd, self.timeout, logger=self.logger)

        shell_connection = ShellSession(self.job, shell)
        shell_connection = super().run(shell_connection, max_end_time)

        # Wait for the console string
        shell_connection.prompt_str = self.fvp_console_string
        self.wait(shell_connection)
        # We should now have the matched output
        if "PORT" not in shell_connection.raw_connection.match.groupdict():
            raise JobError(
                "'console_string' should contain a regular expression section, such as '(?P<PORT>\\d+)' to extract the serial port of the FVP. Group name must be 'PORT'"
            )

        serial_port = shell_connection.raw_connection.match.groupdict()["PORT"]
        self.set_namespace_data(
            action=StartFVPAction.name,
            label="fvp",
            key="serial_port",
            value=serial_port,
        )
        self.logger.info("Found FVP port %s", serial_port)
        self.set_namespace_data(
            action=StartFVPAction.name,
            label="fvp",
            key="container",
            value=self.container,
        )
        return shell_connection
Example #18
0
    def run(self, connection, max_end_time):
        connection = super().run(connection, max_end_time)
        overlay_file = self.get_namespace_data(action='compress-overlay',
                                               label='output',
                                               key='file')
        if overlay_file is None:
            self.logger.debug("skipped %s", self.name)
            return connection
        lxc_name = self.get_namespace_data(action='lxc-create-action',
                                           label='lxc',
                                           key='name')
        lxc_default_path = lxc_path(self.job.parameters['dispatcher'])
        lxc_rootfs_path = os.path.join(lxc_default_path, lxc_name, 'rootfs')
        if not os.path.exists(lxc_rootfs_path):
            raise LAVABug("Lxc container rootfs not found")
        tar_cmd = [
            'tar', '--warning', 'no-timestamp', '-C', lxc_rootfs_path, '-xaf',
            overlay_file
        ]
        command_output = self.run_command(tar_cmd)
        if command_output and command_output != '':
            raise JobError("Unable to untar overlay: %s" % command_output)

        # FIXME: Avoid copying this special 'lava-test-runner' which does not
        #        have 'sync' in cleanup. This should be handled during the
        #        creation of the overlay instead. Make a special case to copy
        #        lxc specific scripts, with distro specific versions.
        fname = os.path.join(self.lava_test_dir, 'lava-test-runner')
        output_file = '%s/bin/%s' % (lxc_rootfs_path, os.path.basename(fname))
        self.logger.debug("Copying %s", output_file)
        try:
            shutil.copy(fname, output_file)
        except IOError:
            raise InfrastructureError("Unable to copy: %s" % output_file)

        return connection
Example #19
0
    def run(self, connection, max_end_time):
        overlay_file = self.get_namespace_data(action="compress-overlay",
                                               label="output",
                                               key="file")
        if overlay_file:
            self.logger.debug("Overlay: %s", overlay_file)
            decompressed_image = self.get_namespace_data(
                action="download-action", label=self.image_key, key="file")
            self.logger.debug("Image: %s", decompressed_image)
            root_partition = None

            if self.use_root_partition:
                root_partition = self.parameters[self.image_key].get(
                    "root_partition")
                if root_partition is None:
                    raise JobError(
                        "Unable to apply the overlay image without 'root_partition'"
                    )
                self.logger.debug("root_partition: %s", root_partition)

            copy_in_overlay(decompressed_image, root_partition, overlay_file)
        else:
            self.logger.debug("No overlay to deploy")
        return connection
Example #20
0
 def run(self, connection, max_end_time):
     connection = super().run(connection, max_end_time)
     serial_number = self.job.device['fastboot_serial_number']
     fastboot_opts = self.job.device['fastboot_options']
     fastboot_cmd = lxc_cmd_prefix(self.job) + [
         'fastboot', '-s', serial_number, 'reboot'
     ] + fastboot_opts
     command_output = self.parsed_command(fastboot_cmd, allow_fail=True)
     if command_output and 'rebooting' not in command_output.lower():
         raise JobError("Unable to fastboot reboot: %s" % command_output)
     else:
         lines = [
             status for status in command_output.split('\n')
             if 'finished' in status.lower()
         ]
         if lines:
             self.results = {'status': lines[0].strip()}
         else:
             self.results = {'fail': self.name}
     self.set_namespace_data(action='shared',
                             label='shared',
                             key='connection',
                             value=connection)
     return connection
Example #21
0
    def run(self, connection, max_end_time):
        """
        Clone the bazar repository into a directory
        """
        connection = super().run(connection, max_end_time)

        # NOTE: the runner_path dir must remain empty until after the VCS clone, so let the VCS clone create the final dir
        runner_path = self.get_namespace_data(action="uuid",
                                              label="overlay_path",
                                              key=self.parameters["test_name"])

        commit_id = self.vcs.clone(runner_path,
                                   revision=self.parameters.get("revision"))
        if commit_id is None:
            raise InfrastructureError(
                "Unable to get test definition from %s (%s)" %
                (self.vcs.binary, self.parameters))
        self.results = {
            "commit": commit_id,
            "repository": self.parameters["repository"],
            "path": self.parameters["path"],
        }

        # now read the YAML to create a testdef dict to retrieve metadata
        yaml_file = os.path.join(runner_path, self.parameters["path"])
        try:
            with open(yaml_file, "r") as test_file:
                self.testdef = yaml.safe_load(test_file)
        except OSError as exc:
            raise JobError("Unable to open test definition '%s': %s" %
                           (self.parameters["path"], str(exc)))

        # set testdef metadata in base class
        self.store_testdef(self.testdef, "bzr", commit_id)

        return connection
Example #22
0
 def deploy_vlans(self):
     """
     Calls vland to create a vlan. Passes -1 to get the next available vlan tag
     Always passes False to is_base_vlan
     friendly_name is the name specified by the test writer and is not sent to vland.
     self.names maps the friendly names to unique names for the VLANs, usable on the switches themselves.
     Some switches have limits on the allowed characters and length of the name, so this
     string is controlled by the protocol and differs from the friendly name supplied by the
     test writer. Each VLAN also has an ID which is used to identify the VLAN to vland, this
     ID is stored in self.vlans for each friendly_name for use with vland.
     The vlan tag is also stored but not used by the protocol itself.
     """
     # FIXME implement a fake daemon to test the calls
     # create vlans by iterating and appending to self.base_group for the vlan name
     # run_admin_command --create_vlan test30 -1 false
     if self.sub_id != 0:
         for friendly_name, _ in self.names.items():
             self.logger.info("Waiting for vlan %s : %s to be deployed.", friendly_name, self.names[friendly_name])
             self.vlans[friendly_name], tag = self._wait_on_create(friendly_name)
             self.logger.debug("vlan name: %s vlan tag: %s", self.vlans[friendly_name], tag)
     else:
         for friendly_name, _ in self.names.items():
             self.logger.info("Deploying vlan %s : %s", friendly_name, self.names[friendly_name])
             self.vlans[friendly_name], tag = self._create_vlan(friendly_name)
             self.logger.debug("vlan name: %s vlan tag: %s", self.vlans[friendly_name], tag)
             if not tag:  # error state from create_vlan
                 raise JobError("Unable to create vlan %s" % friendly_name)
             self._declare_created(friendly_name, tag)
     for friendly_name, _ in self.names.items():
         params = self.params[friendly_name]
         switch_id = self._lookup_switch_id(params['switch'])
         port_id = self._lookup_port_id(switch_id, params['port'])
         self.logger.info("Setting switch %s port %s to vlan %s on %s",
                          params['switch'], params['port'], friendly_name, params['iface'])
         self._set_port_onto_vlan(self.vlans[friendly_name], port_id)
         self.ports.append(port_id)
Example #23
0
    def _check_data(self, data):
        try:
            json_data = json.loads(data)
        except (ValueError, TypeError) as exc:
            raise JobError(
                "Invalid data for %s protocol: %s %s" % (self.name, data, exc)
            )
        if not isinstance(json_data, dict):
            raise JobError("Invalid data type %s for protocol %s" % (data, self.name))
        if not json_data:
            raise JobError("No data to be sent over protocol %s" % self.name)
        if "request" not in json_data:
            raise JobError("Bad API call over protocol - missing request")
        if json_data["request"] == "aggregate":
            raise JobError("Pipeline submission has not been implemented.")
        if "poll_delay" in json_data:
            self.settings["poll_delay"] = int(json_data["poll_delay"])
        if "timeout" in json_data:
            if isinstance(json_data["timeout"], dict):
                self.poll_timeout.duration = Timeout.parse(json_data["timeout"])
            elif isinstance(json_data["timeout"], int) or isinstance(
                json_data["timeout"], float
            ):
                self.poll_timeout.duration = json_data["timeout"]
            else:
                self.logger.debug(json_data["timeout"])
                raise JobError("Invalid timeout request")
            self.logger.debug(
                "Setting poll timeout of %s seconds", int(self.poll_timeout.duration)
            )
        if "messageID" not in json_data:
            raise JobError("Missing messageID")
        # handle conversion of api calls to internal functions
        json_data["request"] = json_data["request"].replace("-", "_")

        return json_data
Example #24
0
    def validate(self):
        super().validate()
        # Check that we have "overlays" dict
        if "overlays" not in self.params:
            raise JobError("Missing 'overlays' dictionary")
        if not isinstance(self.params["overlays"], dict):
            raise JobError("'overlays' is not a dictionary")
        for overlay, params in self.params["overlays"].items():
            if overlay == "lava":
                continue
            if params.get("format") not in self.OVERLAY_FORMATS:
                raise JobError("Invalid 'format' (%r) for 'overlays.%s'" %
                               (params.get("format", ""), overlay))
            path = params.get("path")
            if path is None:
                raise JobError("Missing 'path' for 'overlays.%s'" % overlay)
            if not path.startswith("/") or ".." in path:
                raise JobError("Invalid 'path': %r" % path)

        # Check the image format
        if self.params.get("format") not in self.IMAGE_FORMATS:
            raise JobError("Unsupported image format %r" %
                           self.params.get("format"))
Example #25
0
    def run(self, connection, max_end_time):  # pylint: disable=too-many-locals
        """
        Common run function for subclasses which define custom patterns
        """
        super().run(connection, max_end_time)

        # Nexell extension
        '''
        if 'nexell_ext' in self.parameters:
            self.logger.debug("[SEOJI] nexell_ext exist! - pass connection check.")
        else:
        '''
        # Get the connection, specific to this namespace
        connection_namespace = self.parameters.get('connection-namespace')
        self.logger.debug("[SEOJI] connection_namespace:" +
                          connection_namespace)
        self.logger.debug("[SEOJI] check yaml param self.parameters: " +
                          str(self.parameters))
        parameters = None
        if self.timeout.can_skip(self.parameters):
            self.logger.info(
                "The timeout has 'skip' enabled. "
                "If this test action block times out, the job will continue at the next action block."
            )

        if connection_namespace:
            self.logger.debug("Using connection namespace: %s",
                              connection_namespace)
            parameters = {"namespace": connection_namespace}
        else:
            parameters = {
                'namespace': self.parameters.get('namespace', 'common')
            }
            self.logger.debug("Using namespace: %s", parameters['namespace'])
        connection = self.get_namespace_data(action='shared',
                                             label='shared',
                                             key='connection',
                                             deepcopy=False,
                                             parameters=parameters)

        if not connection:
            raise LAVABug("No connection retrieved from namespace data")

        self.signal_director.connection = connection

        pattern_dict = {self.pattern.name: self.pattern}
        self.logger.debug("[SEOJI] pattern_dict: " + str(pattern_dict))
        # pattern dictionary is the lookup from the STARTRUN to the parse pattern.
        self.set_namespace_data(action=self.name,
                                label=self.name,
                                key='pattern_dictionary',
                                value=pattern_dict)
        if self.character_delay > 0:
            self.logger.debug("Using a character delay of %i (ms)",
                              self.character_delay)

        if not connection.prompt_str:
            connection.prompt_str = [
                self.job.device.get_constant('default-shell-prompt')
            ]
            # FIXME: This should be logged whenever prompt_str is changed, by the connection object.
            self.logger.debug("Setting default test shell prompt %s",
                              connection.prompt_str)
        connection.timeout = self.connection_timeout
        # force an initial prompt - not all shells will respond without an excuse.
        self.logger.debug("[SEOJI] send check_char " +
                          str(connection.check_char))
        connection.sendline(connection.check_char)
        self.wait(connection)

        # use the string instead of self.name so that inheriting classes (like multinode)
        # still pick up the correct command.
        running = self.parameters['stage']
        pre_command_list = self.get_namespace_data(action='test',
                                                   label="lava-test-shell",
                                                   key='pre-command-list')
        self.logger.debug("[SEOJI] lava_test_result_dir")
        self.logger.debug("[SEOJI] self.data: %s", self.data)
        self.logger.debug("[SEOJI] self.parameters: %s", self.parameters)
        lava_test_results_dir = self.get_namespace_data(
            action='test', label='results', key='lava_test_results_dir')
        lava_test_sh_cmd = self.get_namespace_data(action='test',
                                                   label='shared',
                                                   key='lava_test_sh_cmd')

        # Any errors arising from this command are not checked.
        # If the result of the command means that lava-test-runner cannot be found,
        # this will cause the job to time out as Incomplete.
        if pre_command_list:
            for command in pre_command_list:
                connection.sendline(command, delay=self.character_delay)

        if lava_test_results_dir is None:
            # Nexell extension
            if 'nexell_ext' in self.parameters:
                self.logger.debug(
                    "[SEOJI] nexell_ext exist! - set lava_test_results_dir.")

            else:
                raise JobError(
                    "Nothing to run. Maybe the 'deploy' stage is missing, "
                    "otherwise this is a bug which should be reported.")

        self.logger.debug("Using %s" % lava_test_results_dir)
        if lava_test_sh_cmd:
            connection.sendline('export SHELL=%s' % lava_test_sh_cmd,
                                delay=self.character_delay)

        try:
            feedbacks = []
            for feedback_ns in self.data.keys():  # pylint: disable=no-member
                self.logger.debug("[SEOJI] feedback_ns:" + str(feedback_ns))
                self.logger.debug("[SEOJI] self.parameters.get('namespace'):" +
                                  str(self.parameters.get('namespace')))
                if feedback_ns == self.parameters.get('namespace'):
                    continue
                feedback_connection = self.get_namespace_data(
                    action='shared',
                    label='shared',
                    key='connection',
                    deepcopy=False,
                    parameters={"namespace": feedback_ns})
                if feedback_connection:
                    self.logger.debug("[SEOJI] feedback_connection: " +
                                      str(feedback_connection))
                    self.logger.debug(
                        "Will listen to feedbacks from '%s' for 1 second",
                        feedback_ns)
                    feedbacks.append((feedback_ns, feedback_connection))

            with connection.test_connection() as test_connection:
                # the structure of lava-test-runner means that there is just one TestAction and it must run all definitions
                self.logger.debug("[SEOJI] connection.test_connection(): " +
                                  str(connection.test_connection))
                self.logger.debug("[SEOJI] test_connection.sendline()")

                # Nexell extension
                self.logger.debug("[SEOJI] params: " +
                                  str(self.parameters.get('params')))
                definition_list = list(self.parameters['definitions'])
                self.logger.debug("[SEOJI] definition_list: " +
                                  str(definition_list))

                root_cmd = False

                for item in definition_list:
                    self.logger.debug("[SEOJI] item:" + str(item))
                    if 'params' in item:
                        self.logger.debug(
                            "[SEOJI] yes 'params' exist in item!")
                        if 'ROOT' in item['params']:
                            self.logger.debug(
                                "[SEOJI] yes 'ROOT' exist in item!")
                            if str(item['params']['ROOT']) == 'True':
                                self.logger.debug("[SEOJI] yes ROOT is True")
                                root_cmd = True

                #s5p4418-navi-ref-ubuntu needs root permission
                if root_cmd == True:
                    self.logger.debug("[SEOJI] test param includes 'ROOT'")
                    self.logger.debug(
                        "[SEOJI] sendline 'sudo %s/bin/lava-test-runner %s/%s'"
                        % (lava_test_results_dir, lava_test_results_dir,
                           running))
                    test_connection.sendline(
                        "sudo %s/bin/lava-test-runner %s/%s" %
                        (lava_test_results_dir, lava_test_results_dir,
                         running),
                        delay=self.character_delay)
                    test_connection.sendline(
                        self.parameters['nexell_ext']['password'])

                    self.logger.debug("[SEOJI] self.parameters: " +
                                      str(self.parameters))
                    if 'nexell_ext' in self.parameters:
                        self.logger.debug(
                            "[SEOJI] auto_login exist in self.parameters")
                        if 'password' in self.parameters['nexell_ext']:
                            self.logger.debug(
                                "[SEOJI] password in self.parameters['nexell_ext']"
                            )
                            nexell_param = self.parameters
                            nexell_cmd = [
                                'echo', nexell_param['nexell_ext']['password'],
                                '>', nexell_param['nexell_ext']['device_path']
                            ]
                            for i in range(5):
                                self.logger.debug("[SEOJI] nexell_cmd: " +
                                                  str(nexell_cmd))
                                command_output = self.run_command(nexell_cmd)
                else:
                    self.logger.debug(
                        "[SEOJI] sendline '%s/bin/lava-test-runner %s/%s'" %
                        (lava_test_results_dir, lava_test_results_dir,
                         running))
                    test_connection.sendline("%s/bin/lava-test-runner %s/%s" %
                                             (lava_test_results_dir,
                                              lava_test_results_dir, running),
                                             delay=self.character_delay)

                test_connection.timeout = min(self.timeout.duration,
                                              self.connection_timeout.duration)
                self.logger.info(
                    "Test shell timeout: %ds (minimum of the action and connection timeout)",
                    test_connection.timeout)

                # Because of the feedbacks, we use a small value for the
                # timeout.  This allows to grab feedback regularly.
                last_check = time.time()
                while self._keep_running(test_connection,
                                         test_connection.timeout,
                                         connection.check_char):
                    # Only grab the feedbacks every test_connection.timeout
                    if feedbacks and time.time(
                    ) - last_check > test_connection.timeout:
                        for feedback in feedbacks:
                            self.logger.debug("[SEOJI] feedback:" +
                                              str(feedback))
                            # The timeout is really small because the goal is only
                            # to clean the buffer of the feedback connections:
                            # the characters are already in the buffer.
                            # With an higher timeout, this can have a big impact on
                            # the performances of the overall loop.
                            bytes_read = feedback[1].listen_feedback(timeout=1)
                            if bytes_read > 1:
                                self.logger.debug(
                                    "Listened to connection for namespace '%s' done",
                                    feedback[0])
                        last_check = time.time()
        finally:
            if self.current_run is not None:
                self.logger.error("Marking unfinished test run as failed")
                self.current_run["duration"] = "%.02f" % (time.time() -
                                                          self.start)
                self.logger.results(self.current_run)  # pylint: disable=no-member
                self.current_run = None

        # Only print if the report is not empty
        if self.report:
            self.logger.debug(yaml.dump(self.report, default_flow_style=False))
        if self.errors:
            raise TestError(self.errors)
        return connection
Example #26
0
File: ssh.py Project: ivoire/lava
    def run(self, connection, max_end_time):
        path = self.get_namespace_data(action="prepare-scp-overlay",
                                       label="scp-deploy",
                                       key=self.key)
        if not path:
            error_msg = "%s: could not find details of '%s'" % (self.name,
                                                                self.key)
            self.logger.error(error_msg)
            raise JobError(error_msg)

        overrides = self.get_namespace_data(action="prepare-scp-overlay",
                                            label="prepare-scp-overlay",
                                            key=self.key)
        if self.primary:
            host_address = self.job.device["actions"]["deploy"]["methods"][
                "ssh"]["host"]
        else:
            self.logger.info(
                "Retrieving common data for prepare-scp-overlay using %s",
                ",".join(overrides),
            )
            host_address = str(
                self.get_namespace_data(
                    action="prepare-scp-overlay",
                    label="prepare-scp-overlay",
                    key=overrides[0],
                ))
            self.logger.debug("Using common data for host: %s", host_address)
        if not host_address:
            error_msg = "%s: could not find host for deployment using %s" % (
                self.name,
                self.key,
            )
            self.logger.error(error_msg)
            raise JobError(error_msg)

        destination = self.get_namespace_data(action="test",
                                              label="results",
                                              key="lava_test_results_dir")
        destination = "%s-%s" % (destination, os.path.basename(path))
        command = self.scp[:]  # local copy
        # add the argument for setting the port (-P port)
        command.extend(self.scp_port)
        connection = super().run(connection, max_end_time)
        if self.identity_file:
            command.extend(["-i", self.identity_file])
        # add arguments to ignore host key checking of the host device
        command.extend([
            "-o", "UserKnownHostsFile=/dev/null", "-o",
            "StrictHostKeyChecking=no"
        ])
        # add the local file as source
        command.append(path)
        command_str = " ".join(str(item) for item in command)
        self.logger.info("Copying %s using %s to %s", self.key, command_str,
                         host_address)
        # add the remote as destination, with :/ top level directory
        command.extend(
            ["%s@%s:%s" % (self.ssh_user, host_address, destination)])
        self.run_cmd(command, error_msg="Unable to copy %s" % self.key)
        connection = super().run(connection, max_end_time)
        self.results = {"success": "ssh deployment"}
        self.set_namespace_data(
            action=self.name,
            label="scp-overlay-unpack",
            key="overlay",
            value=destination,
        )
        return connection
Example #27
0
    def run(self, connection, max_end_time):  # pylint: disable=too-many-locals,too-many-branches,too-many-statements
        def progress_unknown_total(downloaded_sz, last_val):
            """ Compute progress when the size is unknown """
            condition = downloaded_sz >= last_val + 25 * 1024 * 1024
            return (condition, downloaded_sz, "progress %dMB" %
                    (int(downloaded_sz / (1024 * 1024))) if condition else "")

        def progress_known_total(downloaded_sz, last_val):
            """ Compute progress when the size is known """
            percent = math.floor(downloaded_sz / float(self.size) * 100)
            condition = percent >= last_val + 5
            return (condition, percent, "progress %3d%% (%dMB)" %
                    (percent, int(downloaded_sz /
                                  (1024 * 1024))) if condition else "")

        connection = super().run(connection, max_end_time)
        # self.cookies = self.job.context.config.lava_cookies  # FIXME: work out how to restore
        md5 = hashlib.md5()  # nosec - not being used for cryptography.
        sha256 = hashlib.sha256()

        # Create a fresh directory if the old one has been removed by a previous cleanup
        # (when retrying inside a RetryAction)
        try:
            os.makedirs(self.path, 0o755)
        except OSError as exc:
            if exc.errno != errno.EEXIST:
                raise InfrastructureError("Unable to create %s: %s" %
                                          (self.path, str(exc)))

        if 'images' in self.parameters and self.key in self.parameters[
                'images']:
            remote = self.parameters['images'][self.key]
            compression = self.parameters['images'][self.key].get(
                'compression', False)

        else:
            remote = self.parameters[self.key]
            if self.key == 'ramdisk':
                compression = False
                self.logger.debug(
                    "Not decompressing ramdisk as can be used compressed.")
            else:
                compression = self.parameters[self.key].get(
                    'compression', False)

        md5sum = remote.get('md5sum')
        sha256sum = remote.get('sha256sum')

        fname, _ = self._url_to_fname_suffix(self.path, compression)
        if os.path.isdir(fname):
            raise JobError("Download '%s' is a directory, not a file" % fname)
        if os.path.exists(fname):
            os.remove(fname)

        self.logger.info("downloading %s", remote['url'])
        self.logger.debug("saving as %s", fname)

        downloaded_size = 0
        beginning = time.time()
        # Choose the progress bar (is the size known?)
        if self.size == -1:
            self.logger.debug("total size: unknown")
            last_value = -25 * 1024 * 1024
            progress = progress_unknown_total
        else:
            self.logger.debug("total size: %d (%dMB)" %
                              (self.size, int(self.size / (1024 * 1024))))
            last_value = -5
            progress = progress_known_total

        decompress_command = None
        if compression:
            if compression in self.decompress_command_map:
                decompress_command = self.decompress_command_map[compression]
                self.logger.info("Using %s to decompress %s",
                                 decompress_command, compression)
            else:
                self.logger.info(
                    "Compression %s specified but not decompressing during download",
                    compression)
        else:
            self.logger.debug("No compression specified")

        def update_progress():
            nonlocal downloaded_size, last_value, md5, sha256
            downloaded_size += len(buff)
            (printing, new_value, msg) = progress(downloaded_size, last_value)
            if printing:
                last_value = new_value
                self.logger.debug(msg)
            md5.update(buff)
            sha256.update(buff)

        if compression and decompress_command:
            try:
                with open(fname, 'wb') as dwnld_file:
                    proc = subprocess.Popen(
                        [decompress_command],  # nosec - internal.
                        stdin=subprocess.PIPE,
                        stdout=dwnld_file)
            except OSError as exc:
                msg = "Unable to open %s: %s" % (fname, exc.strerror)
                self.logger.error(msg)
                raise InfrastructureError(msg)

            with proc.stdin as pipe:
                for buff in self.reader():
                    update_progress()
                    try:
                        pipe.write(buff)
                    except BrokenPipeError as exc:
                        error_message = str(exc)
                        self.logger.exception(error_message)
                        msg = "Make sure the 'compression' is corresponding " \
                              "to the image file type."
                        self.logger.error(msg)
                        raise JobError(error_message)
            proc.wait()
        else:
            with open(fname, 'wb') as dwnld_file:
                for buff in self.reader():
                    update_progress()
                    dwnld_file.write(buff)

        # Log the download speed
        ending = time.time()
        self.logger.info(
            "%dMB downloaded in %0.2fs (%0.2fMB/s)" %
            (downloaded_size / (1024 * 1024), round(ending - beginning, 2),
             round(downloaded_size / (1024 * 1024 * (ending - beginning)), 2)))

        # If the remote server uses "Content-Encoding: gzip", this calculation will be wrong
        # because requests will decompress the file on the fly, creating a larger file than
        # LAVA expects.
        if self.size:
            if self.size != downloaded_size:
                raise InfrastructureError(
                    "Download finished (%i bytes) but was not expected size (%i bytes), check your networking."
                    % (downloaded_size, self.size))

        # set the dynamic data into the context
        self.set_namespace_data(action='download-action',
                                label=self.key,
                                key='file',
                                value=fname)
        self.set_namespace_data(action='download-action',
                                label=self.key,
                                key='md5',
                                value=md5.hexdigest())
        self.set_namespace_data(action='download-action',
                                label=self.key,
                                key='sha256',
                                value=sha256.hexdigest())

        # handle archive files
        if 'images' in self.parameters and self.key in self.parameters[
                'images']:
            archive = self.parameters['images'][self.key].get('archive', False)
        else:
            archive = self.parameters[self.key].get('archive')
        if archive:
            origin = fname
            target_fname = os.path.basename(origin).rstrip('.' + archive)
            target_fname_path = os.path.join(os.path.dirname(origin),
                                             target_fname)
            if os.path.exists(target_fname_path):
                os.remove(target_fname_path)

            if archive == 'tar':
                untar_file(origin,
                           None,
                           member=target_fname,
                           outfile=target_fname_path)
                self.set_namespace_data(action='download-action',
                                        label=self.key,
                                        key='file',
                                        value=target_fname_path)
                self.set_namespace_data(action='download-action',
                                        label='file',
                                        key=self.key,
                                        value=target_fname)
            self.logger.debug("Using %s archive" % archive)

        if md5sum is not None:
            chk_md5sum = self.get_namespace_data(action='download-action',
                                                 label=self.key,
                                                 key='md5')
            if md5sum != chk_md5sum:
                self.logger.error("md5sum of downloaded content: %s" %
                                  chk_md5sum)
                self.logger.info(
                    "sha256sum of downloaded content: %s" %
                    (self.get_namespace_data(action='download-action',
                                             label=self.key,
                                             key='sha256')))
                self.results = {
                    'fail': {
                        'md5': md5sum,
                        'download': chk_md5sum
                    }
                }
                raise JobError("MD5 checksum for '%s' does not match." % fname)
            self.results = {'success': {'md5': md5sum}}

        if sha256sum is not None:
            chk_sha256sum = self.get_namespace_data(action='download-action',
                                                    label=self.key,
                                                    key='sha256')
            if sha256sum != chk_sha256sum:
                self.logger.info(
                    "md5sum of downloaded content: %s" %
                    (self.get_namespace_data(
                        action='download-action', label=self.key, key='md5')))
                self.logger.error("sha256sum of downloaded content: %s" %
                                  chk_sha256sum)
                self.results = {
                    'fail': {
                        'sha256': sha256sum,
                        'download': chk_sha256sum
                    }
                }
                raise JobError("SHA256 checksum for '%s' does not match." %
                               fname)
            self.results = {'success': {'sha256': sha256sum}}

        # certain deployments need prefixes set
        if self.parameters['to'] == 'tftp' or self.parameters['to'] == 'nbd':
            suffix = self.get_namespace_data(action='tftp-deploy',
                                             label='tftp',
                                             key='suffix')
            self.set_namespace_data(action='download-action',
                                    label='file',
                                    key=self.key,
                                    value=os.path.join(
                                        suffix, self.key,
                                        os.path.basename(fname)))
        elif self.parameters['to'] == 'iso-installer':
            suffix = self.get_namespace_data(action='deploy-iso-installer',
                                             label='iso',
                                             key='suffix')
            self.set_namespace_data(action='download-action',
                                    label='file',
                                    key=self.key,
                                    value=os.path.join(
                                        suffix, self.key,
                                        os.path.basename(fname)))
        else:
            self.set_namespace_data(action='download-action',
                                    label='file',
                                    key=self.key,
                                    value=fname)

        # xnbd protocoll needs to know the location
        nbdroot = self.get_namespace_data(action='download-action',
                                          label='file',
                                          key='nbdroot')
        if 'lava-xnbd' in self.parameters and nbdroot:
            self.parameters['lava-xnbd']['nbdroot'] = nbdroot

        self.results = {
            'label':
            self.key,
            'size':
            downloaded_size,
            'md5sum':
            str(
                self.get_namespace_data(action='download-action',
                                        label=self.key,
                                        key='md5')),
            'sha256sum':
            str(
                self.get_namespace_data(action='download-action',
                                        label=self.key,
                                        key='sha256'))
        }
        return connection
Example #28
0
    def run(self, connection, max_end_time):
        connection = super().run(connection, max_end_time)
        runner_path = self.get_namespace_data(
            action="uuid", label="overlay_path", key=self.parameters["test_name"]
        )

        # now read the YAML to create a testdef dict to retrieve metadata
        yaml_file = os.path.join(runner_path, self.parameters["path"])
        try:
            with open(yaml_file, "r") as test_file:
                testdef = yaml_safe_load(test_file)
        except OSError as exc:
            raise JobError(
                "Unable to open test definition '%s': %s"
                % (self.parameters["path"], str(exc))
            )

        if "install" not in testdef:
            self.results = {"skipped %s" % self.name: self.test_uuid}
            return

        filename = "%s/install.sh" % runner_path
        content = self.handle_parameters(testdef)

        # TODO: once the migration is complete, design a better way to do skip_install support.
        with open(filename, "w") as install_file:
            for line in content:
                install_file.write(line)
            if "keys" not in self.skip_options:
                sources = testdef["install"].get("keys", [])
                for src in sources:
                    install_file.write("lava-add-keys %s" % src)
                    install_file.write("\n")

            if "sources" not in self.skip_options:
                sources = testdef["install"].get("sources", [])
                for src in sources:
                    install_file.write("lava-add-sources %s" % src)
                    install_file.write("\n")

            if "deps" not in self.skip_options:
                # generic dependencies - must be named the same across all distros
                # supported by the testdef
                deps = testdef["install"].get("deps", [])

                # distro-specific dependencies
                if "distro" in self.parameters["deployment_data"]:
                    deps = deps + testdef["install"].get(
                        "deps-" + self.parameters["deployment_data"]["distro"], []
                    )

                if deps:
                    install_file.write("lava-install-packages ")
                    for dep in deps:
                        install_file.write("%s " % dep)
                    install_file.write("\n")

            if "steps" not in self.skip_options:
                steps = testdef["install"].get("steps", [])
                if steps:
                    # Allow install steps to use the git-repo directly
                    # fake up the directory as it will be after the overlay is applied
                    # os.path.join refuses if the directory does not exist on the dispatcher
                    base = len(DISPATCHER_DOWNLOAD_DIR.split("/")) + 2
                    # skip job_id/action-tmpdir/ as well
                    install_dir = "/" + "/".join(runner_path.split("/")[base:])
                    install_file.write("cd %s\n" % install_dir)
                    install_file.write("pwd\n")
                    for cmd in steps:
                        install_file.write("%s\n" % cmd)

            if "git-repos" not in self.skip_options:
                self.install_git_repos(testdef, runner_path)

        self.results = {"uuid": self.test_uuid}
        return connection
Example #29
0
    def run(self, connection, max_end_time):
        connection = super().run(connection, max_end_time)
        runner_path = self.get_namespace_data(
            action="uuid", label="overlay_path", key=self.parameters["test_name"]
        )

        # now read the YAML to create a testdef dict to retrieve metadata
        yaml_file = os.path.join(runner_path, self.parameters["path"])
        try:
            with open(yaml_file, "r") as test_file:
                testdef = yaml_safe_load(test_file)
        except OSError as exc:
            raise JobError(
                "Unable to open test definition '%s': %s"
                % (self.parameters["path"], str(exc))
            )

        self.logger.debug("runner path: %s test_uuid %s", runner_path, self.test_uuid)
        filename = "%s/run.sh" % runner_path
        content = self.handle_parameters(testdef)

        # the 'lava' testdef name is reserved
        if self.parameters["name"] == "lava":
            raise TestError('The "lava" test definition name is reserved.')

        lava_signal = self.parameters.get("lava-signal", "stdout")

        testdef_levels = self.get_namespace_data(
            action=self.name, label=self.name, key="testdef_levels"
        )
        with open(filename, "a") as runsh:
            for line in content:
                runsh.write(line)
            runsh.write("set -e\n")
            runsh.write("set -x\n")
            # use the testdef_index value for the testrun name to handle repeats at source
            runsh.write("export TESTRUN_ID=%s\n" % testdef_levels[self.level])
            runsh.write(
                "cd %s\n"
                % self.get_namespace_data(
                    action="uuid", label="runner_path", key=self.parameters["test_name"]
                )
            )
            runsh.write("UUID=`cat uuid`\n")
            runsh.write("set +x\n")
            if lava_signal == "kmsg":
                runsh.write("export KMSG=true\n")
                runsh.write(
                    'echo "<0><LAVA_SIGNAL_STARTRUN $TESTRUN_ID $UUID>" > /dev/kmsg\n'
                )
            else:
                runsh.write('echo "<LAVA_SIGNAL_STARTRUN $TESTRUN_ID $UUID>"\n')
            runsh.write("set -x\n")
            steps = testdef.get("run", {}).get("steps", [])
            for cmd in steps:
                if "--cmd" in cmd or "--shell" in cmd:
                    cmd = re.sub(r"\$(\d+)\b", r"\\$\1", cmd)
                runsh.write("%s\n" % cmd)
            runsh.write("set +x\n")
            if lava_signal == "kmsg":
                runsh.write("unset KMSG\n")
                runsh.write(
                    'echo "<0><LAVA_SIGNAL_ENDRUN $TESTRUN_ID $UUID>" > /dev/kmsg\n'
                )
            else:
                runsh.write('echo "<LAVA_SIGNAL_ENDRUN $TESTRUN_ID $UUID>"\n')

        self.results = {
            "uuid": self.test_uuid,
            "filename": filename,
            "name": self.parameters["name"],
            "path": self.parameters["path"],
            "from": self.parameters["from"],
        }
        if self.parameters["from"] != "inline":
            self.results["repository"] = self.parameters["repository"]
        return connection
Example #30
0
    def run(self, connection, max_end_time):
        """
        Clones the git repo into a directory name constructed from the mount_path,
        lava-$hostname prefix, tests, $index_$test_name elements. e.g.
        /tmp/tmp.234Ga213/lava-kvm01/tests/3_smoke-tests-basic
        Also updates some basic metadata about the test definition.
        """
        # use the base class to populate the runner_path and overlay_path data into the context
        connection = super().run(connection, max_end_time)

        # NOTE: the runner_path dir must remain empty until after the VCS clone, so let the VCS clone create the final dir
        runner_path = self.get_namespace_data(
            action="uuid", label="overlay_path", key=self.parameters["test_name"]
        )

        if os.path.exists(runner_path) and os.listdir(runner_path) == []:
            raise LAVABug(
                "Directory already exists and is not empty - duplicate Action?"
            )

        # Clear the data
        if os.path.exists(runner_path):
            shutil.rmtree(runner_path)

        self.logger.info("Fetching tests from %s", self.parameters["repository"])

        # Get the branch if specified.
        branch = self.parameters.get("branch")

        # Set shallow to False if revision is specified.
        # Otherwise default to True if not specified as a parameter.
        revision = self.parameters.get("revision")
        shallow = False
        if not revision:
            shallow = self.parameters.get("shallow", True)

        commit_id = self.vcs.clone(
            runner_path,
            shallow=shallow,
            revision=revision,
            branch=branch,
            history=self.parameters.get("history", True),
        )
        if commit_id is None:
            raise InfrastructureError(
                "Unable to get test definition from %s (%s)"
                % (self.vcs.binary, self.parameters)
            )
        self.results = {
            "commit": commit_id,
            "repository": self.parameters["repository"],
            "path": self.parameters["path"],
        }

        # now read the YAML to create a testdef dict to retrieve metadata
        yaml_file = os.path.join(runner_path, self.parameters["path"])
        self.logger.debug("Tests stored (tmp) in %s", yaml_file)
        try:
            with open(yaml_file, "r") as test_file:
                testdef = yaml_safe_load(test_file)
        except OSError as exc:
            raise JobError(
                "Unable to open test definition '%s': %s"
                % (self.parameters["path"], str(exc))
            )

        # set testdef metadata in base class
        self.store_testdef(testdef, "git", commit_id)

        return connection