Exemple #1
0
class MountAction(DeployAction):
    """
    Depending on the type of deployment, this needs to perform
    an OffsetAction, LoopCheckAction, LoopMountAction
    """

    name = "mount-action"
    description = "mount with offset"
    summary = "mount loop"

    def __init__(self, key):
        super(MountAction, self).__init__()
        self.key = key

    def populate(self, parameters):
        """
        Needs to take account of the deployment type / image type etc.
        to determine which actions need to be added to the internal pipeline
        as part of the deployment selection step.
        """
        if not self.job:
            raise LAVABug("No job object supplied to action")
        # FIXME: not all mount operations will need these actions
        self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters)
        self.internal_pipeline.add_action(OffsetAction(self.key))
        # FIXME: LoopCheckAction and LoopMountAction should be in only one Action
        self.internal_pipeline.add_action(LoopCheckAction(self.key))
        self.internal_pipeline.add_action(LoopMountAction(self.key))
Exemple #2
0
class BootCMSISRetry(RetryAction):

    name = "boot-cmsis-retry"
    description = "boot cmsis usb image with retry"
    summary = "boot cmsis usb image with retry"

    def validate(self):
        super(BootCMSISRetry, self).validate()
        method_params = self.job.device['actions']['boot']['methods']['cmsis-dap']['parameters']
        usb_mass_device = method_params.get('usb_mass_device', None)
        if not usb_mass_device:
            self.errors = "usb_mass_device unset"

    def populate(self, parameters):
        self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters)
        method_params = self.job.device['actions']['boot']['methods']['cmsis-dap']['parameters']
        usb_mass_device = method_params.get('usb_mass_device', None)
        resets_after_flash = method_params.get('resets_after_flash', True)
        if self.job.device.hard_reset_command:
            self.internal_pipeline.add_action(ResetDevice())
            self.internal_pipeline.add_action(WaitDevicePathAction(usb_mass_device))
        self.internal_pipeline.add_action(FlashCMSISAction())
        if resets_after_flash:
            self.internal_pipeline.add_action(WaitUSBSerialDeviceAction())
        self.internal_pipeline.add_action(ConnectDevice())
 def _make_pipeline(self, params):
     pipeline = Pipeline()
     auto_login = AutoLoginAction()
     auto_login.section = "internal"
     auto_login.parameters = params
     pipeline.add_action(auto_login)
     return pipeline
Exemple #4
0
class BootCMSIS(BootAction):

    name = "boot-cmsis"
    description = "boot cmsis usb image"
    summary = "boot cmsis usb image"

    def populate(self, parameters):
        self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters)
        self.internal_pipeline.add_action(BootCMSISRetry())
Exemple #5
0
class BootQemuRetry(RetryAction):

    name = 'boot-qemu-image'
    description = "boot image using QEMU command line"
    summary = "boot QEMU image"

    def populate(self, parameters):
        self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters)
        self.internal_pipeline.add_action(CallQemuAction())
Exemple #6
0
class BootDFU(BootAction):

    name = 'boot-dfu-image'
    description = "boot dfu image with retry"
    summary = "boot dfu image with retry"

    def populate(self, parameters):
        self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters)
        self.internal_pipeline.add_action(BootDFURetry())
Exemple #7
0
class BootDockerRetry(RetryAction):

    name = 'boot-docker-retry'
    description = "boot docker image with retry"
    summary = "boot docker image"

    def populate(self, parameters):
        self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters)
        self.internal_pipeline.add_action(CallDockerAction())
Exemple #8
0
class TestShellRetry(RetryAction):

    name = "lava-test-retry"
    description = "Retry wrapper for lava-test-shell"
    summary = "Retry support for Lava Test Shell"

    def populate(self, parameters):
        self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters)
        self.internal_pipeline.add_action(TestShellAction())
Exemple #9
0
class UnmountAction(RetryAction):

    name = "umount-retry"
    description = "retry support for umount"
    summary = "retry umount"

    def populate(self, parameters):
        self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters)
        self.internal_pipeline.add_action(Unmount())
    class InternalRetryAction(RetryAction):

        section = 'internal'
        name = "internal-retry-action"
        description = "internal, do not use outside unit tests"
        summary = "internal retry action for unit tests"

        def populate(self, parameters):
            self.internal_pipeline = Pipeline(parent=self, job=self.job)
            self.internal_pipeline.add_action(TestAction.FakeAction(), parameters)
Exemple #11
0
    def test_overlay_action(self):  # pylint: disable=too-many-locals
        parameters = {
            'device_type': 'd02',
            'job_name': 'grub-standard-ramdisk',
            'job_timeout': '15m',
            'action_timeout': '5m',
            'priority': 'medium',
            'actions': {
                'boot': {
                    'method': 'grub',
                    'commands': 'ramdisk',
                    'prompts': ['linaro-test', 'root@debian:~#']
                },
                'deploy': {
                    'ramdisk': 'initrd.gz',
                    'kernel': 'zImage',
                    'dtb': 'broken.dtb'
                }
            }
        }
        device = NewDevice(os.path.join(os.path.dirname(__file__), '../devices/d02-01.yaml'))
        job = Job(4212, parameters, None)
        job.device = device
        pipeline = Pipeline(job=job, parameters=parameters['actions']['boot'])
        job.pipeline = pipeline
        overlay = BootloaderCommandOverlay()
        pipeline.add_action(overlay)
        ip_addr = dispatcher_ip(None)
        parsed = []
        kernel = parameters['actions']['deploy']['kernel']
        ramdisk = parameters['actions']['deploy']['ramdisk']
        dtb = parameters['actions']['deploy']['dtb']

        substitution_dictionary = {
            '{SERVER_IP}': ip_addr,
            # the addresses need to be hexadecimal
            '{RAMDISK}': ramdisk,
            '{KERNEL}': kernel,
            '{DTB}': dtb
        }
        params = device['actions']['boot']['methods']
        commands = params['grub']['ramdisk']['commands']
        self.assertIn('net_bootp', commands)
        self.assertIn("linux (tftp,{SERVER_IP})/{KERNEL} console=ttyS0,115200 earlycon=uart8250,mmio32,0x80300000 root=/dev/ram0 ip=dhcp", commands)
        self.assertIn('initrd (tftp,{SERVER_IP})/{RAMDISK}', commands)
        self.assertIn('devicetree (tftp,{SERVER_IP})/{DTB}', commands)

        params['grub']['ramdisk']['commands'] = substitute(params['grub']['ramdisk']['commands'], substitution_dictionary)
        substituted_commands = params['grub']['ramdisk']['commands']
        self.assertIs(type(substituted_commands), list)
        self.assertIn('net_bootp', substituted_commands)
        self.assertNotIn("linux (tftp,{SERVER_IP})/{KERNEL} console=ttyS0,115200 earlycon=uart8250,mmio32,0x80300000 root=/dev/ram0 ip=dhcp", substituted_commands)
        self.assertIn("linux (tftp,%s)/%s console=ttyS0,115200 earlycon=uart8250,mmio32,0x80300000 root=/dev/ram0 ip=dhcp" % (ip_addr, kernel), substituted_commands)
        self.assertNotIn('initrd (tftp,{SERVER_IP})/{RAMDISK}', parsed)
        self.assertNotIn('devicetree (tftp,{SERVER_IP})/{DTB}', parsed)
Exemple #12
0
class BootQEMUImageAction(BootAction):

    name = 'boot-image-retry'
    description = "boot image with retry"
    summary = "boot with retry"

    def populate(self, parameters):
        self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters)
        self.internal_pipeline.add_action(BootQemuRetry())
        if self.has_prompts(parameters):
            self.internal_pipeline.add_action(AutoLoginAction())
            if self.test_has_shell(parameters):
                self.internal_pipeline.add_action(ExpectShellSession())
                if 'transfer_overlay' in parameters:
                    self.internal_pipeline.add_action(OverlayUnpack())
                self.internal_pipeline.add_action(ExportDeviceEnvironment())
Exemple #13
0
class ResetDevice(Action):
    """
    Used within a RetryAction - first tries 'reboot' then
    tries PDU.
    """

    name = "reset-device"
    description = "reboot or power-cycle the device"
    summary = "reboot the device"

    def populate(self, parameters):
        self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters)
        if self.job.device.hard_reset_command:
            self.internal_pipeline.add_action(PDUReboot())
        else:
            self.internal_pipeline.add_action(SendRebootCommands())
Exemple #14
0
    def test_overlay_action(self):  # pylint: disable=too-many-locals
        parameters = {
            'device_type': 'x86',
            'job_name': 'ipxe-pipeline',
            'job_timeout': '15m',
            'action_timeout': '5m',
            'priority': 'medium',
            'actions': {
                'boot': {
                    'method': 'ipxe',
                    'commands': 'ramdisk',
                    'prompts': ['linaro-test', 'root@debian:~#']
                },
                'deploy': {
                    'ramdisk': 'initrd.gz',
                    'kernel': 'zImage',
                }
            }
        }
        device = NewDevice(os.path.join(os.path.dirname(__file__), '../devices/x86-01.yaml'))
        job = Job(4212, parameters, None)
        job.device = device
        pipeline = Pipeline(job=job, parameters=parameters['actions']['boot'])
        job.pipeline = pipeline
        overlay = BootloaderCommandOverlay()
        pipeline.add_action(overlay)
        ip_addr = dispatcher_ip(None)
        kernel = parameters['actions']['deploy']['kernel']
        ramdisk = parameters['actions']['deploy']['ramdisk']

        substitution_dictionary = {
            '{SERVER_IP}': ip_addr,
            '{RAMDISK}': ramdisk,
            '{KERNEL}': kernel,
            '{LAVA_MAC}': "00:00:00:00:00:00"
        }
        params = device['actions']['boot']['methods']
        params['ipxe']['ramdisk']['commands'] = substitute(params['ipxe']['ramdisk']['commands'], substitution_dictionary)

        commands = params['ipxe']['ramdisk']['commands']
        self.assertIs(type(commands), list)
        self.assertIn("dhcp net0", commands)
        self.assertIn("set console console=ttyS0,115200n8 lava_mac=00:00:00:00:00:00", commands)
        self.assertIn("set extraargs init=/sbin/init ip=dhcp", commands)
        self.assertNotIn("kernel tftp://{SERVER_IP}/{KERNEL} ${extraargs} ${console}", commands)
        self.assertNotIn("initrd tftp://{SERVER_IP}/{RAMDISK}", commands)
        self.assertIn("boot", commands)
Exemple #15
0
class DockerAction(DeployAction):

    name = "deploy-docker"
    description = "deploy docker images"
    summary = "deploy docker"

    def validate(self):
        super(DockerAction, self).validate()
        which("docker")

        # Print docker version
        try:
            out = subprocess.check_output(["docker", "version", "-f", "{{.Server.Version}}"])
            out = out.decode("utf-8", errors="replace").strip("\n")
            self.logger.debug("docker server, installed at version: %s", out)
            out = subprocess.check_output(["docker", "version", "-f", "{{.Client.Version}}"])
            out = out.decode("utf-8", errors="replace").strip("\n")
            self.logger.debug("docker client, installed at version: %s", out)
        except subprocess.CalledProcessError as exc:
            raise InfrastructureError("Unable to call '%s': %s" % (exc.cmd, exc.output))
        except OSError:
            raise InfrastructureError("Command 'docker' does not exist")

        # check docker image name
        # The string should be safe for command line inclusion
        image_name = self.parameters["image"]
        if re.compile("^[a-z0-9._:/-]+$").match(image_name) is None:
            self.errors = "image_name '%s' is invalid" % image_name
        self.set_namespace_data(action=self.name, label='image',
                                key='name', value=image_name)

    def populate(self, parameters):
        self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters)
        if self.test_needs_deployment(parameters):
            self.internal_pipeline.add_action(DeployDeviceEnvironment())
        if self.test_needs_overlay(parameters):
            self.internal_pipeline.add_action(OverlayAction())

    def run(self, connection, max_end_time, args=None):
        # Pull the image
        cmd = ["docker", "pull", self.parameters["image"]]
        out = self.run_command(cmd, allow_fail=False, allow_silent=False)
        if not out:
            msg = "Unable to pull docker image '%s'" % self.parameters["image"]
            raise JobError(msg)

        return super(DockerAction, self).run(connection, max_end_time, args)
Exemple #16
0
class PrepareKernelAction(Action):
    """
    Populate the pipeline with a kernel conversion action, if needed
    """

    name = "prepare-kernel"
    description = "populates the pipeline with a kernel conversion action"
    summary = "add a kernel conversion"

    def populate(self, parameters):
        self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters)
        # the logic here can be upgraded in future if needed with more parameters to the deploy.
        methods = self.job.device['actions']['boot']['methods']
        if 'u-boot' in methods:
            self.internal_pipeline.add_action(UBootPrepareKernelAction())
        elif 'depthcharge' in methods:
            self.internal_pipeline.add_action(PrepareFITAction())
Exemple #17
0
class SecondaryShellAction(BootAction):

    name = "secondary-shell-action"
    description = "Connect to a secondary shell on specified hardware"
    summary = "connect to a specified second shell"

    def populate(self, parameters):
        self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters)
        name = parameters['connection']
        self.internal_pipeline.add_action(ConnectShell(name=name))
        if self.has_prompts(parameters):
            self.internal_pipeline.add_action(AutoLoginAction())
            if self.test_has_shell(parameters):
                self.internal_pipeline.add_action(ExpectShellSession())
                if 'transfer_overlay' in parameters:
                    self.internal_pipeline.add_action(OverlayUnpack())
                self.internal_pipeline.add_action(ExportDeviceEnvironment())
Exemple #18
0
class BootLxcAction(BootAction):
    """
    Provide for auto_login parameters in this boot stanza and re-establish the
    connection after boot.
    """
    name = "lxc-boot"
    description = "lxc boot into the system"
    summary = "lxc boot"

    def populate(self, parameters):
        self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters)
        self.internal_pipeline.add_action(LxcStartAction())
        self.internal_pipeline.add_action(LxcAddStaticDevices())
        self.internal_pipeline.add_action(ConnectLxc())
        # Skip AutoLoginAction unconditionally as this action tries to parse kernel message
        # self.internal_pipeline.add_action(AutoLoginAction())
        self.internal_pipeline.add_action(ExpectShellSession())
        self.internal_pipeline.add_action(ExportDeviceEnvironment())
Exemple #19
0
class FinalizeAction(Action):

    section = "finalize"
    name = "finalize"
    description = "finish the process and cleanup"
    summary = "finalize the job"

    def __init__(self):
        """
        The FinalizeAction is always added as the last Action in the top level pipeline by the parser.
        The tasks include finalising the connection (whatever is the last connection in the pipeline)
        and writing out the final pipeline structure containing the results as a logfile.
        """
        super(FinalizeAction, self).__init__()
        self.ran = False

    def populate(self, parameters):
        self.internal_pipeline = Pipeline(job=self.job, parent=self, parameters=parameters)
        self.internal_pipeline.add_action(PowerOff())
        self.internal_pipeline.add_action(ReadFeedback(finalize=True, repeat=True))

    def run(self, connection, max_end_time, args=None):
        """
        The pexpect.spawn here is the ShellCommand not the ShellSession connection object.
        So call the finalise() function of the connection which knows about the raw_connection inside.
        The internal_pipeline of FinalizeAction is special - it needs to run even in the case of error / cancel.
        """
        self.ran = True
        try:
            connection = super(FinalizeAction, self).run(connection, max_end_time, args)
            if connection:
                connection.finalise()

        except Exception as exc:  # pylint: disable=unused-variable,broad-except
            pass
        finally:
            for protocol in self.job.protocols:
                protocol.finalise_protocol(self.job.device)
        return connection

    def cleanup(self, connection):
        # avoid running Finalize in validate or unit tests
        if not self.ran and self.job.started:
            self.run(connection, None, None)
Exemple #20
0
class BootDFURetry(RetryAction):

    name = 'boot-dfu-retry'
    description = "boot dfu image using the command line interface"
    summary = "boot dfu image"

    def populate(self, parameters):
        self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters)
        self.internal_pipeline.add_action(ConnectDevice())
        self.internal_pipeline.add_action(ResetDevice())
        self.internal_pipeline.add_action(WaitDFUDeviceAction())
        self.internal_pipeline.add_action(FlashDFUAction())
    class FakeAction(Action):
        """
        Isolated Action which can be used to generate artificial exceptions.
        """

        name = "fake-action"
        description = "fake, do not use outside unit tests"
        summary = "fake action for unit tests"

        def populate(self, parameters):
            self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters)
            self.internal_pipeline.add_action(TestAction.FakeAction())

        def run(self, connection, max_end_time, args=None):
            if connection:
                raise LAVABug("Fake action not meant to have a real connection")
            time.sleep(3)
            self.results = {'status': "failed"}
            return connection
Exemple #22
0
class BootPyOCDRetry(RetryAction):

    name = 'boot-pyocd-image'
    description = "boot pyocd image using the command line interface"
    summary = "boot pyocd image"

    def populate(self, parameters):
        self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters)
        if self.job.device.hard_reset_command:
            self.internal_pipeline.add_action(ResetDevice())
            self.internal_pipeline.add_action(WaitDeviceBoardID(self.job.device.get('board_id', None)))
        self.internal_pipeline.add_action(FlashPyOCDAction())
        self.internal_pipeline.add_action(ConnectDevice())
Exemple #23
0
class DownloaderAction(RetryAction):
    """
    The retry pipeline for downloads.
    To allow any deploy action to work with multinode, each call *must* set a unique path.
    """

    name = "download-retry"
    description = "download with retry"
    summary = "download-retry"

    def __init__(self, key, path, uniquify=True):
        super(DownloaderAction, self).__init__()
        self.max_retries = 3
        self.key = key  # the key in the parameters of what to download
        self.path = path  # where to download
        self.uniquify = uniquify

    def populate(self, parameters):
        self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters)

        # Find the right action according to the url
        if 'images' in parameters and self.key in parameters['images']:
            url = parameters['images'][self.key].get('url')
        else:
            url = parameters[self.key].get('url')
        if url is None:
            raise JobError("Invalid deploy action: 'url' is missing for '%s'" % self.key)

        url = lavaurl.urlparse(url)
        if url.scheme == 'scp':
            action = ScpDownloadAction(self.key, self.path, url, self.uniquify)
        elif url.scheme == 'http' or url.scheme == 'https':
            action = HttpDownloadAction(self.key, self.path, url, self.uniquify)  # pylint: disable=redefined-variable-type
        elif url.scheme == 'file':
            action = FileDownloadAction(self.key, self.path, url, self.uniquify)  # pylint: disable=redefined-variable-type
        elif url.scheme == 'lxc':
            action = LxcDownloadAction(self.key, self.path, url)  # pylint: disable=redefined-variable-type
        else:
            raise JobError("Unsupported url protocol scheme: %s" % url.scheme)
        self.internal_pipeline.add_action(action)
Exemple #24
0
class UBootUMSAction(DeployAction):  # pylint:disable=too-many-instance-attributes

    name = "uboot-ums-deploy"
    description = "download image and deploy using uboot mass storage emulation"
    summary = "uboot-ums deployment"

    def populate(self, parameters):
        self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters)
        path = self.mkdtemp()
        self.internal_pipeline.add_action(DownloaderAction('image', path=path))
        if self.test_needs_overlay(parameters):
            self.internal_pipeline.add_action(OverlayAction())
            self.internal_pipeline.add_action(ApplyOverlayImage())
            if self.test_needs_deployment(parameters):
                self.internal_pipeline.add_action(DeployDeviceEnvironment())
Exemple #25
0
class BootKexecAction(BootAction):
    """
    Provide for auto_login parameters in this boot stanza and re-establish the connection after boot
    """

    name = "kexec-boot"
    summary = "kexec a new kernel"
    description = "replace current kernel using kexec"

    def populate(self, parameters):
        self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters)
        self.internal_pipeline.add_action(KexecAction())
        # Add AutoLoginAction unconditionally as this action does nothing if
        # the configuration does not contain 'auto_login'
        self.internal_pipeline.add_action(AutoLoginAction())
        self.internal_pipeline.add_action(ExpectShellSession())
        self.internal_pipeline.add_action(ExportDeviceEnvironment())
Exemple #26
0
class BootDockerAction(BootAction):

    name = 'boot-docker'
    description = "boot docker image"
    summary = "boot docker image"

    def populate(self, parameters):
        self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters)
        self.internal_pipeline.add_action(BootDockerRetry())
        if self.has_prompts(parameters):
            if self.test_has_shell(parameters):
                self.internal_pipeline.add_action(ExpectShellSession())
                self.internal_pipeline.add_action(ExportDeviceEnvironment())
Exemple #27
0
class DepthchargeAction(BootAction):
    """
    Wraps the Retry Action to allow for actions which precede the reset,
    e.g. Connect.
    """

    name = "depthcharge-action"
    description = "interactive Depthcharge action"
    summary = "sets up boot with Depthcharge"

    def populate(self, parameters):
        self.internal_pipeline = Pipeline(
            parent=self, job=self.job, parameters=parameters)
        self.internal_pipeline.add_action(DepthchargeCommandOverlay())
        self.internal_pipeline.add_action(ConnectDevice())
        self.internal_pipeline.add_action(DepthchargeRetry())
Exemple #28
0
class BootloaderAction(BootAction):
    """
    Wraps the Retry Action to allow for actions which precede
    the reset, e.g. Connect.
    """

    name = "bootloader-action"
    description = "interactive bootloader action"
    summary = "pass boot commands"

    def populate(self, parameters):
        self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters)
        # customize the device configuration for this job
        self.internal_pipeline.add_action(BootloaderCommandOverlay())
        self.internal_pipeline.add_action(ConnectDevice())
        self.internal_pipeline.add_action(BootloaderRetry())
Exemple #29
0
class UBootAction(BootAction):
    """
    Wraps the Retry Action to allow for actions which precede
    the reset, e.g. Connect.
    """

    name = "uboot-action"
    description = "interactive uboot action"
    summary = "pass uboot commands"

    def validate(self):
        super(UBootAction, self).validate()
        if 'type' in self.parameters:
            self.logger.warning("Specifying a type in the boot action is deprecated. "
                                "Please specify the kernel type in the deploy parameters.")

    def populate(self, parameters):
        self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters)
        # customize the device configuration for this job
        self.internal_pipeline.add_action(UBootSecondaryMedia())
        self.internal_pipeline.add_action(BootloaderCommandOverlay())
        self.internal_pipeline.add_action(ConnectDevice())
        self.internal_pipeline.add_action(UBootRetry())
Exemple #30
0
class ScpOverlay(DeployAction):
    """
    Prepares the overlay and copies it to the target
    """

    section = 'deploy'
    name = "scp-overlay"
    description = "prepare overlay and scp to device"
    summary = "copy overlay to device"

    def __init__(self):
        super(ScpOverlay, self).__init__()
        self.items = []

    def validate(self):
        super(ScpOverlay, self).validate()
        self.items = [
            'firmware', 'kernel', 'dtb', 'rootfs', 'modules'
        ]
        if not self.test_has_shell(self.parameters):
            self.errors = "Scp overlay needs a test action."
            return

    def populate(self, parameters):
        self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters)
        tar_flags = parameters['deployment_data']['tar_flags'] if 'tar_flags' in parameters['deployment_data'].keys() else ''
        self.set_namespace_data(action=self.name, label=self.name, key='tar_flags', value=tar_flags, parameters=parameters)
        self.internal_pipeline.add_action(OverlayAction())
        for item in self.items:
            if item in parameters:
                self.internal_pipeline.add_action(DownloaderAction(item, path=self.mkdtemp()),
                                                  parameters)
                self.set_namespace_data(action=self.name, label='scp', key=item, value=True, parameters=parameters)
        # we might not have anything to download, just the overlay to push
        self.internal_pipeline.add_action(PrepareOverlayScp())
        # prepare the device environment settings in common data for enabling in the boot step
        self.internal_pipeline.add_action(DeployDeviceEnvironment())
Exemple #31
0
class UBootEnterFastbootAction(RetryAction):

    name = "uboot-enter-fastboot"
    description = "interactive uboot enter fastboot action"
    summary = "uboot commands to enter fastboot mode"

    def __init__(self):
        super().__init__()
        self.params = {}

    def populate(self, parameters):
        self.pipeline = Pipeline(parent=self,
                                 job=self.job,
                                 parameters=parameters)
        # establish a new connection before trying the reset
        self.pipeline.add_action(ResetDevice())
        # need to look for Hit any key to stop autoboot
        self.pipeline.add_action(BootloaderInterruptAction())
        self.pipeline.add_action(ConnectLxc())

    def validate(self):
        super().validate()
        if "u-boot" not in self.job.device["actions"]["deploy"]["methods"]:
            self.errors = "uboot method missing"

        self.params = self.job.device["actions"]["deploy"]["methods"][
            "u-boot"]["parameters"]
        if ("commands" not in self.job.device["actions"]["deploy"]["methods"]
            ["u-boot"]["parameters"]["fastboot"]):
            self.errors = "uboot command missing"

    def run(self, connection, max_end_time):
        connection = super().run(connection, max_end_time)
        connection.prompt_str = self.params["bootloader_prompt"]
        self.logger.debug("Changing prompt to %s", connection.prompt_str)
        self.wait(connection)
        commands = self.job.device["actions"]["deploy"]["methods"]["u-boot"][
            "parameters"]["fastboot"]["commands"]

        for (index, line) in enumerate(commands):
            connection.sendline(line, delay=self.character_delay)
            if index + 1 < len(commands):
                self.wait(connection)

        return connection
Exemple #32
0
class BootloaderAction(RetryAction):
    """
    Wraps the Retry Action to allow for actions which precede
    the reset, e.g. Connect.
    """

    name = "bootloader-action"
    description = "interactive bootloader action"
    summary = "pass boot commands"

    def populate(self, parameters):
        self.pipeline = Pipeline(parent=self,
                                 job=self.job,
                                 parameters=parameters)
        # customize the device configuration for this job
        self.pipeline.add_action(BootloaderCommandOverlay())
        self.pipeline.add_action(ConnectDevice())
        self.pipeline.add_action(BootloaderRetry())
Exemple #33
0
    def test_overlay_action(self):  # pylint: disable=too-many-locals
        parameters = {
            'device_type': 'd02',
            'job_name': 'grub-standard-ramdisk',
            'job_timeout': '15m',
            'action_timeout': '5m',
            'priority': 'medium',
            'actions': {
                'boot': {
                    'method': 'grub',
                    'commands': 'ramdisk',
                    'prompts': ['linaro-test', 'root@debian:~#']
                },
                'deploy': {
                    'ramdisk': 'initrd.gz',
                    'kernel': 'zImage',
                    'dtb': 'broken.dtb'
                }
            }
        }
        (rendered, _) = self.factory.create_device('d02-01.jinja2')
        device = NewDevice(yaml.safe_load(rendered))
        job = Job(4212, parameters, None)
        job.device = device
        pipeline = Pipeline(job=job, parameters=parameters['actions']['boot'])
        job.pipeline = pipeline
        overlay = BootloaderCommandOverlay()
        pipeline.add_action(overlay)
        ip_addr = dispatcher_ip(None)
        parsed = []
        kernel = parameters['actions']['deploy']['kernel']
        ramdisk = parameters['actions']['deploy']['ramdisk']
        dtb = parameters['actions']['deploy']['dtb']

        substitution_dictionary = {
            '{SERVER_IP}': ip_addr,
            # the addresses need to be hexadecimal
            '{RAMDISK}': ramdisk,
            '{KERNEL}': kernel,
            '{DTB}': dtb
        }
        params = device['actions']['boot']['methods']
        commands = params['grub']['ramdisk']['commands']
        self.assertIn('net_bootp', commands)
        self.assertIn(
            "linux (tftp,{SERVER_IP})/{KERNEL} console=ttyS0,115200 earlycon=uart8250,mmio32,0x80300000 root=/dev/ram0 ip=dhcp",
            commands)
        self.assertIn('initrd (tftp,{SERVER_IP})/{RAMDISK}', commands)
        self.assertIn('devicetree (tftp,{SERVER_IP})/{DTB}', commands)

        params['grub']['ramdisk']['commands'] = substitute(
            params['grub']['ramdisk']['commands'], substitution_dictionary)
        substituted_commands = params['grub']['ramdisk']['commands']
        self.assertIs(type(substituted_commands), list)
        self.assertIn('net_bootp', substituted_commands)
        self.assertNotIn(
            "linux (tftp,{SERVER_IP})/{KERNEL} console=ttyS0,115200 earlycon=uart8250,mmio32,0x80300000 root=/dev/ram0 ip=dhcp",
            substituted_commands)
        self.assertIn(
            "linux (tftp,%s)/%s console=ttyS0,115200 earlycon=uart8250,mmio32,0x80300000 root=/dev/ram0 ip=dhcp"
            % (ip_addr, kernel), substituted_commands)
        self.assertNotIn('initrd (tftp,{SERVER_IP})/{RAMDISK}', parsed)
        self.assertNotIn('devicetree (tftp,{SERVER_IP})/{DTB}', parsed)
Exemple #34
0
class SshAction(RetryAction):
    """
    Simple action to wrap AutoLoginAction and ExpectShellSession
    """

    section = "boot"
    name = "login-ssh"
    description = "connect over ssh and ensure a shell is found"
    summary = "login over ssh"

    def populate(self, parameters):
        self.internal_pipeline = Pipeline(parent=self,
                                          job=self.job,
                                          parameters=parameters)
        scp = Scp("overlay")
        self.internal_pipeline.add_action(scp)
        self.internal_pipeline.add_action(PrepareSsh())
        self.internal_pipeline.add_action(ConnectSsh())
        self.internal_pipeline.add_action(AutoLoginAction(booting=False))
        self.internal_pipeline.add_action(ExpectShellSession())
        self.internal_pipeline.add_action(ExportDeviceEnvironment())
        self.internal_pipeline.add_action(ScpOverlayUnpack())
Exemple #35
0
class OverlayAction(DeployAction):
    """
    Creates a temporary location into which the lava test shell scripts are installed.
    The location remains available for the testdef actions to populate
    Multinode and LMP actions also populate the one location.
    CreateOverlay then creates a tarball of that location in the output directory
    of the job and removes the temporary location.
    ApplyOverlay extracts that tarball onto the image.

    Deployments which are for a job containing a 'test' action will have
    a TestDefinitionAction added to the job pipeline by this Action.

    The resulting overlay needs to be applied separately and custom classes
    exist for particular deployments, so that the overlay can be applied
    whilst the image is still mounted etc.

    This class handles parts of the overlay which are independent
    of the content of the test definitions themselves. Other
    overlays are handled by TestDefinitionAction.
    """

    name = "lava-overlay"
    description = "add lava scripts during deployment for test shell use"
    summary = "overlay the lava support scripts"

    def __init__(self):
        super().__init__()
        self.lava_test_dir = os.path.realpath('%s/../../lava_test_shell' %
                                              os.path.dirname(__file__))
        self.scripts_to_copy = []
        # 755 file permissions
        self.xmod = stat.S_IRWXU | stat.S_IXGRP | stat.S_IRGRP | stat.S_IXOTH | stat.S_IROTH
        self.target_mac = ''
        self.target_ip = ''
        self.probe_ip = ''
        self.probe_channel = ''

    def validate(self):
        super().validate()
        self.scripts_to_copy = sorted(
            glob.glob(os.path.join(self.lava_test_dir, 'lava-*')))
        # Distro-specific scripts override the generic ones
        if not self.test_needs_overlay(self.parameters):
            return
        lava_test_results_dir = self.get_constant('lava_test_results_dir',
                                                  'posix')
        lava_test_results_dir = lava_test_results_dir % self.job.job_id
        self.set_namespace_data(action='test',
                                label='results',
                                key='lava_test_results_dir',
                                value=lava_test_results_dir)
        lava_test_sh_cmd = self.get_constant('lava_test_sh_cmd', 'posix')
        self.set_namespace_data(action='test',
                                label='shared',
                                key='lava_test_sh_cmd',
                                value=lava_test_sh_cmd)

        # Add distro support scripts - only if deployment_data is set
        distro = self.parameters['deployment_data'].get('distro')
        if distro:
            distro_support_dir = '%s/distro/%s' % (self.lava_test_dir, distro)
            self.scripts_to_copy += sorted(
                glob.glob(os.path.join(distro_support_dir, 'lava-*')))

        if not self.scripts_to_copy:
            self.logger.debug("Skipping lava_test_shell support scripts.")
        if 'parameters' in self.job.device:
            if 'interfaces' in self.job.device['parameters']:
                if 'target' in self.job.device['parameters']['interfaces']:
                    self.target_mac = self.job.device['parameters'][
                        'interfaces']['target'].get('mac', '')
                    self.target_ip = self.job.device['parameters'][
                        'interfaces']['target'].get('ip', '')
        for device in self.job.device.get('static_info', []):
            if 'probe_channel' in device and 'probe_ip' in device:
                self.probe_channel = device['probe_channel']
                self.probe_ip = device['probe_ip']
                break

    def populate(self, parameters):
        self.internal_pipeline = Pipeline(parent=self,
                                          job=self.job,
                                          parameters=parameters)
        if self.test_needs_overlay(parameters):
            if any('ssh' in data for data in self.job.device['actions']
                   ['deploy']['methods']):
                # only devices supporting ssh deployments add this action.
                self.internal_pipeline.add_action(SshAuthorize())
            self.internal_pipeline.add_action(VlandOverlayAction())
            self.internal_pipeline.add_action(MultinodeOverlayAction())
            self.internal_pipeline.add_action(TestDefinitionAction())
            self.internal_pipeline.add_action(CompressOverlay())
            self.internal_pipeline.add_action(
                PersistentNFSOverlay())  # idempotent

    def run(self, connection, max_end_time):  # pylint: disable=too-many-locals
        """
        Check if a lava-test-shell has been requested, implement the overlay
        * create test runner directories beneath the temporary location
        * copy runners into test runner directories
        """
        tmp_dir = self.mkdtemp()
        namespace = self.parameters.get('namespace')
        if namespace:
            if namespace not in get_test_action_namespaces(
                    self.job.parameters):
                self.logger.info("[%s] skipped %s - no test action.",
                                 namespace, self.name)
                return connection
        self.set_namespace_data(action='test',
                                label='shared',
                                key='location',
                                value=tmp_dir)
        lava_test_results_dir = self.get_namespace_data(
            action='test', label='results', key='lava_test_results_dir')
        if not lava_test_results_dir:
            raise LAVABug("Unable to identify top level lava test directory")
        shell = self.get_namespace_data(action='test',
                                        label='shared',
                                        key='lava_test_sh_cmd')
        self.logger.debug("[%s] Preparing overlay tarball in %s", namespace,
                          tmp_dir)
        lava_path = os.path.abspath("%s/%s" % (tmp_dir, lava_test_results_dir))
        for runner_dir in ['bin', 'tests', 'results']:
            # avoid os.path.join as lava_test_results_dir startswith / so location is *dropped* by join.
            path = os.path.abspath("%s/%s" % (lava_path, runner_dir))
            if not os.path.exists(path):
                os.makedirs(path, 0o755)
                self.logger.debug("makedir: %s", path)
        for fname in self.scripts_to_copy:
            with open(fname, 'r') as fin:
                foutname = os.path.basename(fname)
                output_file = '%s/bin/%s' % (lava_path, foutname)
                if "distro" in fname:
                    distribution = os.path.basename(os.path.dirname(fname))
                    self.logger.debug("Updating %s (%s)", output_file,
                                      distribution)
                else:
                    self.logger.debug("Creating %s", output_file)
                with open(output_file, 'w') as fout:
                    fout.write("#!%s\n\n" % shell)
                    if foutname == 'lava-target-mac':
                        fout.write("TARGET_DEVICE_MAC='%s'\n" %
                                   self.target_mac)
                    if foutname == 'lava-target-ip':
                        fout.write("TARGET_DEVICE_IP='%s'\n" % self.target_ip)
                    if foutname == 'lava-probe-ip':
                        fout.write("PROBE_DEVICE_IP='%s'\n" % self.probe_ip)
                    if foutname == 'lava-probe-channel':
                        fout.write("PROBE_DEVICE_CHANNEL='%s'\n" %
                                   self.probe_channel)
                    if foutname == 'lava-target-storage':
                        fout.write('LAVA_STORAGE="\n')
                        for method in self.job.device.get(
                                'storage_info', [{}]):
                            for key, value in method.items():
                                if key == 'yaml_line':
                                    continue
                                self.logger.debug("storage methods:\t%s\t%s",
                                                  key, value)
                                fout.write(r"\t%s\t%s\n" % (key, value))
                        fout.write('"\n')
                    fout.write(fin.read())
                    os.fchmod(fout.fileno(), self.xmod)

        # Generate the file containing the secrets
        if 'secrets' in self.job.parameters:
            self.logger.debug("Creating %s/secrets", lava_path)
            with open(os.path.join(lava_path, 'secrets'), 'w') as fout:
                for key, value in self.job.parameters['secrets'].items():
                    if key == 'yaml_line':
                        continue
                    fout.write("%s=%s\n" % (key, value))

        connection = super().run(connection, max_end_time)
        return connection
Exemple #36
0
class FastbootAction(DeployAction):  # pylint:disable=too-many-instance-attributes

    name = "fastboot-deploy"
    description = "download files and deploy using fastboot"
    summary = "fastboot deployment"

    def __init__(self):
        super(FastbootAction, self).__init__()
        self.force_prompt = False

    def validate(self):
        super(FastbootAction, self).validate()
        if not self.test_needs_deployment(self.parameters):
            return
        protocol = [
            protocol for protocol in self.job.protocols
            if protocol.name == LxcProtocol.name
        ]
        if not protocol:
            self.errors = "No LXC device requested"

    def populate(self, parameters):
        self.internal_pipeline = Pipeline(parent=self,
                                          job=self.job,
                                          parameters=parameters)
        if self.test_needs_overlay(parameters):
            self.internal_pipeline.add_action(OverlayAction())
        # Check if the device has a power command such as HiKey, Dragonboard,
        # etc. against device that doesn't like Nexus, etc.
        if self.job.device.get('fastboot_via_uboot', False):
            self.internal_pipeline.add_action(ConnectDevice())
            self.internal_pipeline.add_action(UBootEnterFastbootAction())
        elif self.job.device.hard_reset_command:
            self.force_prompt = True
            self.internal_pipeline.add_action(ConnectDevice())
            self.internal_pipeline.add_action(ResetDevice())
        else:
            self.internal_pipeline.add_action(EnterFastbootAction())

        fastboot_dir = self.mkdtemp()
        image_keys = sorted(parameters['images'].keys())
        for image in image_keys:
            if image != 'yaml_line':
                self.internal_pipeline.add_action(
                    DownloaderAction(image, fastboot_dir))
                if parameters['images'][image].get('apply-overlay', False):
                    if self.test_needs_overlay(parameters):
                        if parameters['images'][image].get('sparse', True):
                            self.internal_pipeline.add_action(
                                ApplyOverlaySparseImage(image))
                        else:
                            self.internal_pipeline.add_action(
                                ApplyOverlayImage(image,
                                                  use_root_partition=False))
                if self.test_needs_overlay(parameters) and \
                   self.test_needs_deployment(parameters):
                    self.internal_pipeline.add_action(
                        DeployDeviceEnvironment())
        self.internal_pipeline.add_action(FastbootFlashOrderAction())
Exemple #37
0
class BareboxRetry(BootHasMixin, RetryAction):

    name = "barebox-retry"
    description = "interactive barebox retry action"
    summary = "barebox commands with retry"

    def populate(self, parameters):
        self.pipeline = Pipeline(parent=self, job=self.job, parameters=parameters)
        # establish a new connection before trying the reset
        self.pipeline.add_action(ResetDevice())
        self.pipeline.add_action(BootloaderInterruptAction())
        self.pipeline.add_action(BootloaderCommandsAction())
        if self.has_prompts(parameters):
            self.pipeline.add_action(AutoLoginAction())
            if self.test_has_shell(parameters):
                self.pipeline.add_action(ExpectShellSession())
                if "transfer_overlay" in parameters:
                    self.pipeline.add_action(OverlayUnpack())
                self.pipeline.add_action(ExportDeviceEnvironment())

    def validate(self):
        super().validate()
        self.set_namespace_data(
            action=self.name,
            label="bootloader_prompt",
            key="prompt",
            value=self.job.device["actions"]["boot"]["methods"]["barebox"][
                "parameters"
            ]["bootloader_prompt"],
        )
Exemple #38
0
    def test_kvm_simulation(self):  # pylint: disable=too-many-statements
        """
        Build a pipeline which simulates a KVM LAVA job
        without using the formal objects (to avoid validating
        data known to be broken). The details are entirely
        arbitrary.
        """
        factory = Factory()
        job = factory.create_kvm_job("sample_jobs/kvm.yaml")
        pipe = Pipeline()
        action = Action()
        action.name = "deploy_linaro_image"
        action.description = "deploy action using preset subactions in an internal pipe"
        action.summary = "deploy_linaro_image"
        action.job = job
        # deliberately unlikely location
        # a successful validation would need to use the cwd
        action.parameters = {
            "image": "file:///none/images/bad-kvm-debian-wheezy.img"
        }
        pipe.add_action(action)
        self.assertEqual(action.level, "1")
        deploy_pipe = Pipeline(action)
        action = Action()
        action.name = "downloader"
        action.description = "download image wrapper, including an internal retry pipe"
        action.summary = "downloader"
        action.job = job
        deploy_pipe.add_action(action)
        self.assertEqual(action.level, "1.1")
        # a formal RetryAction would contain a pre-built pipeline which can be inserted directly
        retry_pipe = Pipeline(action)
        action = Action()
        action.name = "wget"
        action.description = "do the download with retries"
        action.summary = "wget"
        action.job = job
        retry_pipe.add_action(action)
        self.assertEqual(action.level, "1.1.1")
        action = Action()
        action.name = "checksum"
        action.description = "checksum the downloaded file"
        action.summary = "md5sum"
        action.job = job
        deploy_pipe.add_action(action)
        self.assertEqual(action.level, "1.2")
        action = Action()
        action.name = "overlay"
        action.description = "apply lava overlay"
        action.summary = "overlay"
        action.job = job
        deploy_pipe.add_action(action)
        self.assertEqual(action.level, "1.3")
        action = Action()
        action.name = "boot"
        action.description = "boot image"
        action.summary = "qemu"
        action.job = job
        # cmd_line built from device configuration
        action.parameters = {
            "cmd_line": [
                "qemu-system-x86_64",
                "-machine accel=kvm:tcg",
                "-hda"
                "%s" % "tbd",
                "-nographic",
                "-net",
                "nic,model=virtio"
                "-net user",
            ]
        }
        pipe.add_action(action)
        self.assertEqual(action.level, "2")

        action = Action()
        action.name = "simulated"
        action.description = "lava test shell"
        action.summary = "simulated"
        action.job = job
        # a formal lava test shell action would include an internal pipe
        # which would handle the run.sh
        pipe.add_action(action)
        self.assertEqual(action.level, "3")
        # just a fake action
        action = Action()
        action.name = "fake"
        action.description = "faking results"
        action.summary = "fake action"
        action.job = job
        pipe.add_action(action)
        self.assertEqual(action.level, "4")
        self.assertEqual(len(pipe.describe()), 4)
Exemple #39
0
    def test_keep_connection(self):

        pipe = Pipeline()
        pipe.add_action(TestFakeActions.KeepConnection())
        conn = object()
        self.assertIs(conn, pipe.run_actions(conn, None))
Exemple #40
0
 def test_list_of_subcommands(self):
     pipe = Pipeline()
     pipe.add_action(self.sub0)
     pipe.add_action(self.sub1)
     self.assertIs(pipe.actions[0], self.sub0)
     self.assertIs(pipe.actions[1], self.sub1)
Exemple #41
0
    def test_complex_pipeline(self):  # pylint: disable=too-many-statements
        action = Action()
        action.name = "starter_action"
        action.description = "test action only"
        action.summary = "starter"
        pipe = Pipeline()
        pipe.add_action(action)
        self.assertEqual(action.level, "1")
        action = Action()
        action.name = "pipe_action"
        action.description = "action implementing an internal pipe"
        action.summary = "child"
        pipe.add_action(action)
        self.assertEqual(action.level, "2")
        # a formal RetryAction would contain a pre-built pipeline which can be inserted directly
        retry_pipe = Pipeline(action)
        action = Action()
        action.name = "child_action"
        action.description = "action inside the internal pipe"
        action.summary = "child"
        retry_pipe.add_action(action)
        self.assertEqual(action.level, "2.1")
        action = Action()
        action.name = "second-child-action"
        action.description = "second action inside the internal pipe"
        action.summary = "child2"
        retry_pipe.add_action(action)
        self.assertEqual(action.level, "2.2")
        action = Action()
        action.name = "baby_action"
        action.description = "action implementing an internal pipe"
        action.summary = "baby"
        retry_pipe.add_action(action)
        self.assertEqual(action.level, "2.3")
        inner_pipe = Pipeline(action)
        action = Action()
        action.name = "single_action"
        action.description = "single line action"
        action.summary = "single"
        inner_pipe.add_action(action)
        self.assertEqual(action.level, "2.3.1")

        action = Action()
        action.name = "step_out"
        action.description = "step out of inner pipe"
        action.summary = "brother"
        retry_pipe.add_action(action)
        self.assertEqual(action.level, "2.4")
        action = Action()
        action.name = "top-level"
        action.description = "top level"
        action.summary = "action"
        pipe.add_action(action)
        self.assertEqual(action.level, "3")
        self.assertEqual(len(pipe.describe()), 3)
Exemple #42
0
class BootDFURetry(RetryAction):

    name = "boot-dfu-retry"
    description = "boot dfu image using the command line interface"
    summary = "boot dfu image"

    def populate(self, parameters):
        dfu = self.job.device["actions"]["boot"]["methods"]["dfu"]
        parameters = dfu["parameters"]

        self.pipeline = Pipeline(parent=self,
                                 job=self.job,
                                 parameters=parameters)
        self.pipeline.add_action(ConnectDevice())
        self.pipeline.add_action(ResetDevice())
        if dfu.get("implementation") == "u-boot":
            self.pipeline.add_action(
                BootloaderInterruptAction(method="u-boot"))
            self.pipeline.add_action(EnterDFU())
        self.pipeline.add_action(WaitDFUDeviceAction())
        self.pipeline.add_action(FlashDFUAction())
Exemple #43
0
class CreateOverlay(Action):
    """
    Creates a temporary location into which the lava test shell scripts are installed.
    The location remains available for the testdef actions to populate
    Multinode and LMP actions also populate the one location.
    CreateOverlay then creates a tarball of that location in the output directory
    of the job and removes the temporary location.
    ApplyOverlay extracts that tarball onto the image.

    Deployments which are for a job containing a 'test' action will have
    a TestDefinitionAction added to the job pipeline by this Action.

    The resulting overlay needs to be applied separately and custom classes
    exist for particular deployments, so that the overlay can be applied
    whilst the image is still mounted etc.

    This class handles parts of the overlay which are independent
    of the content of the test definitions themselves. Other
    overlays are handled by TestDefinitionAction.
    """

    name = "lava-create-overlay"
    description = "add lava scripts during deployment for test shell use"
    summary = "overlay the lava support scripts"

    def __init__(self):
        super().__init__()
        self.lava_test_dir = os.path.realpath("%s/../../lava_test_shell" %
                                              os.path.dirname(__file__))
        self.scripts_to_copy = []
        # 755 file permissions
        self.xmod = (stat.S_IRWXU | stat.S_IXGRP | stat.S_IRGRP | stat.S_IXOTH
                     | stat.S_IROTH)
        self.target_mac = ""
        self.target_ip = ""
        self.probe_ip = ""
        self.probe_channel = ""

    def validate(self):
        super().validate()
        self.scripts_to_copy = sorted(
            glob.glob(os.path.join(self.lava_test_dir, "lava-*")))

        lava_test_results_dir = self.get_constant("lava_test_results_dir",
                                                  "posix")
        lava_test_results_dir = lava_test_results_dir % self.job.job_id
        self.set_namespace_data(
            action="test",
            label="results",
            key="lava_test_results_dir",
            value=lava_test_results_dir,
        )
        lava_test_sh_cmd = self.get_constant("lava_test_sh_cmd", "posix")
        self.set_namespace_data(
            action="test",
            label="shared",
            key="lava_test_sh_cmd",
            value=lava_test_sh_cmd,
        )

        # Add distro support scripts - only if deployment_data is set
        distro = self.parameters.get("deployment_data", {}).get("distro")
        if distro:
            distro_support_dir = "%s/distro/%s" % (self.lava_test_dir, distro)
            self.scripts_to_copy += sorted(
                glob.glob(os.path.join(distro_support_dir, "lava-*")))

        if not self.scripts_to_copy:
            self.logger.debug("Skipping lava_test_shell support scripts.")
        if "parameters" in self.job.device:
            if "interfaces" in self.job.device["parameters"]:
                if "target" in self.job.device["parameters"]["interfaces"]:
                    self.target_mac = self.job.device["parameters"][
                        "interfaces"]["target"].get("mac", "")
                    self.target_ip = self.job.device["parameters"][
                        "interfaces"]["target"].get("ip", "")
        for device in self.job.device.get("static_info", []):
            if "probe_channel" in device and "probe_ip" in device:
                self.probe_channel = device["probe_channel"]
                self.probe_ip = device["probe_ip"]
                break

    def populate(self, parameters):
        self.pipeline = Pipeline(parent=self,
                                 job=self.job,
                                 parameters=parameters)
        if any("ssh" in data
               for data in self.job.device["actions"]["deploy"]["methods"]):
            # only devices supporting ssh deployments add this action.
            self.pipeline.add_action(SshAuthorize())
        self.pipeline.add_action(VlandOverlayAction())
        self.pipeline.add_action(MultinodeOverlayAction())
        self.pipeline.add_action(TestDefinitionAction())
        self.pipeline.add_action(CompressOverlay())
        self.pipeline.add_action(PersistentNFSOverlay())  # idempotent

    def _export_data(self, fout, data, prefix):
        if isinstance(data, dict):
            if prefix:
                prefix += "_"
            for key, value in data.items():
                self._export_data(fout, value, "%s%s" % (prefix, key))
        elif isinstance(data, (list, tuple)):
            if prefix:
                prefix += "_"
            for index, value in enumerate(data):
                self._export_data(fout, value, "%s%s" % (prefix, index))
        else:
            if isinstance(data, bool):
                data = "1" if data else "0"
            elif isinstance(data, int):
                data = data
            else:
                data = "'%s'" % data
            self.logger.debug("- %s=%s", prefix, data)
            fout.write("export %s=%s\n" % (prefix, data))

    def run(self, connection, max_end_time):
        tmp_dir = self.mkdtemp()
        self.set_namespace_data(action="test",
                                label="shared",
                                key="location",
                                value=tmp_dir)
        lava_test_results_dir = self.get_namespace_data(
            action="test", label="results", key="lava_test_results_dir")
        if not lava_test_results_dir:
            raise LAVABug("Unable to identify top level lava test directory")
        shell = self.get_namespace_data(action="test",
                                        label="shared",
                                        key="lava_test_sh_cmd")
        namespace = self.parameters.get("namespace")
        self.logger.debug("[%s] Preparing overlay tarball in %s", namespace,
                          tmp_dir)
        lava_path = os.path.abspath("%s/%s" % (tmp_dir, lava_test_results_dir))
        for runner_dir in ["bin", "tests", "results"]:
            # avoid os.path.join as lava_test_results_dir startswith / so location is *dropped* by join.
            path = os.path.abspath("%s/%s" % (lava_path, runner_dir))
            if not os.path.exists(path):
                os.makedirs(path, 0o755)
                self.logger.debug("makedir: %s", path)
        for fname in self.scripts_to_copy:
            with open(fname, "r") as fin:
                foutname = os.path.basename(fname)
                output_file = "%s/bin/%s" % (lava_path, foutname)
                if "distro" in fname:
                    distribution = os.path.basename(os.path.dirname(fname))
                    self.logger.debug("Updating %s (%s)", output_file,
                                      distribution)
                else:
                    self.logger.debug("Creating %s", output_file)
                with open(output_file, "w") as fout:
                    fout.write("#!%s\n\n" % shell)
                    if foutname == "lava-target-mac":
                        fout.write("TARGET_DEVICE_MAC='%s'\n" %
                                   self.target_mac)
                    if foutname == "lava-target-ip":
                        fout.write("TARGET_DEVICE_IP='%s'\n" % self.target_ip)
                    if foutname == "lava-probe-ip":
                        fout.write("PROBE_DEVICE_IP='%s'\n" % self.probe_ip)
                    if foutname == "lava-probe-channel":
                        fout.write("PROBE_DEVICE_CHANNEL='%s'\n" %
                                   self.probe_channel)
                    if foutname == "lava-target-storage":
                        fout.write('LAVA_STORAGE="\n')
                        for method in self.job.device.get(
                                "storage_info", [{}]):
                            for key, value in method.items():
                                self.logger.debug("storage methods:\t%s\t%s",
                                                  key, value)
                                fout.write(r"\t%s\t%s\n" % (key, value))
                        fout.write('"\n')
                    fout.write(fin.read())
                    os.fchmod(fout.fileno(), self.xmod)

        # Generate environment file
        self.logger.debug("Creating %s/environment", lava_path)
        with open(os.path.join(lava_path, "environment"), "w") as fout:
            sources = [
                ("environment", ""),
                ("device_info", "LAVA_DEVICE_INFO"),
                ("static_info", "LAVA_STATIC_INFO"),
                ("storage_info", "LAVA_STORAGE_INFO"),
            ]
            for source, prefix in sources:
                data = self.job.device.get(source, {})
                if data:
                    self.logger.debug("%s:", source)
                    self._export_data(fout, data, prefix)
            data = None
            if ("protocols" in self.job.parameters
                    and "lava-multinode" in self.job.parameters["protocols"]
                    and "environment"
                    in self.job.parameters["protocols"]["lava-multinode"]):
                data = self.job.parameters["protocols"]["lava-multinode"][
                    "environment"]
            elif "environment" in self.job.parameters:
                data = self.job.parameters["environment"]
            if data:
                self.logger.debug("job environment:")
                self._export_data(fout, data, "")

        # Generate the file containing the secrets
        if "secrets" in self.job.parameters:
            self.logger.debug("Creating %s/secrets", lava_path)
            with open(os.path.join(lava_path, "secrets"), "w") as fout:
                for key, value in self.job.parameters["secrets"].items():
                    fout.write("%s=%s\n" % (key, value))

        connection = super().run(connection, max_end_time)
        return connection
Exemple #44
0
class VExpressMsdAction(DeployAction):
    """
    Action for deploying firmware to a Versatile Express board
    in the form of a board recovery image.
    """

    name = "vexpress-fw-deploy"
    description = "deploy vexpress board recovery image"
    summary = "VExpress FW deployment"

    def validate(self):
        super().validate()
        if "recovery_image" not in self.parameters:
            self.errors = "recovery_image is required"

    def populate(self, parameters):
        download_dir = self.mkdtemp()
        self.internal_pipeline = Pipeline(parent=self,
                                          job=self.job,
                                          parameters=parameters)
        self.internal_pipeline.add_action(
            DownloaderAction("recovery_image", path=download_dir))
        self.internal_pipeline.add_action(LxcCreateUdevRuleAction())
        self.force_prompt = True
        self.internal_pipeline.add_action(ConnectDevice())
        self.internal_pipeline.add_action(ResetDevice())
        self.internal_pipeline.add_action(ExtractVExpressRecoveryImage())
        self.internal_pipeline.add_action(EnterVExpressMCC())
        self.internal_pipeline.add_action(EnableVExpressMassStorage())
        self.internal_pipeline.add_action(WaitUSBMassStorageDeviceAction())
        self.internal_pipeline.add_action(MountVExpressMassStorageDevice())
        self.internal_pipeline.add_action(DeployVExpressRecoveryImage())
        self.internal_pipeline.add_action(UnmountVExpressMassStorageDevice())
        self.internal_pipeline.add_action(VExpressFlashErase())
Exemple #45
0
class LxcAction(DeployAction):  # pylint:disable=too-many-instance-attributes

    name = "lxc-deploy"
    description = "download files and deploy using lxc"
    summary = "lxc deployment"

    def __init__(self):
        super().__init__()
        self.lxc_data = {}

    def validate(self):
        super().validate()
        lxc_version = debian_package_version(pkg='lxc', split=False)
        if lxc_version is not '':
            self.logger.info("lxc, installed at version: %s", lxc_version)
        else:
            self.logger.info(
                "lava-lxc-mocker, installed at version: %s",
                debian_package_version(pkg='lava-lxc-mocker', split=False))
        protocols = [protocol.name for protocol in self.job.protocols]
        if LxcProtocol.name not in protocols:
            self.logger.debug("Missing protocol '%s' in %s", LxcProtocol.name,
                              protocols)
            self.errors = "Missing protocol '%s'" % LxcProtocol.name
        which('lxc-create')

    def populate(self, parameters):
        self.internal_pipeline = Pipeline(parent=self,
                                          job=self.job,
                                          parameters=parameters)
        self.logger.debug("[SEOJI] deploy/lxc.py parameters: " +
                          str(parameters))
        self.logger.debug("[SEOJI] deploy/lxc.py parameters['nexell_ext]: " +
                          str(parameters['nexell_ext']))
        if parameters['nexell_ext']:
            self.internal_pipeline.add_action(NexellCreateAction())
            self.internal_pipeline.add_action(OverlayAction())
            #self.internal_pipeline.add_action(ApplyNexellLxcOverlay)
            self.internal_pipeline.add_action(
                ApplyNexellLxcOverlay(parameters['nexell_ext']))
        else:
            self.internal_pipeline.add_action(LxcCreateAction())
            self.internal_pipeline.add_action(LxcCreateUdevRuleAction())
            if 'packages' in parameters:
                self.internal_pipeline.add_action(LxcStartAction())
                self.internal_pipeline.add_action(LxcAptUpdateAction())
                self.internal_pipeline.add_action(LxcAptInstallAction())
                self.internal_pipeline.add_action(LxcStopAction())
            if self.test_needs_deployment(parameters):
                self.internal_pipeline.add_action(DeployDeviceEnvironment())
            if self.test_needs_overlay(parameters):
                self.internal_pipeline.add_action(OverlayAction())
                self.internal_pipeline.add_action(ApplyLxcOverlay())
        '''
Exemple #46
0
    def test_change_connection(self):

        pipe = Pipeline()
        pipe.add_action(TestFakeActions.MakeNewConnection())
        conn = object()
        self.assertIsNot(conn, pipe.run_actions(conn, None))
Exemple #47
0
class BootloaderRetry(BootHasMixin, RetryAction):

    name = "bootloader-retry"
    description = "interactive uboot retry action"
    summary = "uboot commands with retry"

    def __init__(self):
        super().__init__()
        self.type = "ipxe"
        self.force_prompt = False

    def populate(self, parameters):
        self.pipeline = Pipeline(parent=self,
                                 job=self.job,
                                 parameters=parameters)
        # establish a new connection before trying the reset
        self.pipeline.add_action(ResetDevice())
        self.pipeline.add_action(BootloaderInterruptAction())
        # need to look for Hit any key to stop autoboot
        self.pipeline.add_action(BootloaderCommandsAction())
        if self.has_prompts(parameters):
            self.pipeline.add_action(AutoLoginAction())
            if self.test_has_shell(parameters):
                self.pipeline.add_action(ExpectShellSession())
                if "transfer_overlay" in parameters:
                    self.pipeline.add_action(OverlayUnpack())
                self.pipeline.add_action(ExportDeviceEnvironment())

    def validate(self):
        super().validate()
        if ("bootloader_prompt" not in self.job.device["actions"]["boot"]
            ["methods"][self.type]["parameters"]):
            self.errors = "Missing bootloader prompt for device"
        self.set_namespace_data(
            action=self.name,
            label="bootloader_prompt",
            key="prompt",
            value=self.job.device["actions"]["boot"]["methods"][self.type]
            ["parameters"]["bootloader_prompt"],
        )
Exemple #48
0
class MassStorage(Action):

    name = "storage-deploy"
    description = "Deploy image to mass storage"
    summary = "write image to storage"

    def __init__(self):
        super().__init__()
        self.suffix = None
        self.image_path = None

    def validate(self):
        super().validate()
        # if 'image' not in self.parameters.keys():
        #     self.errors = "%s needs an image to deploy" % self.name
        if "device" not in self.parameters:
            self.errors = "No device specified for mass storage deployment"
        if not self.valid:
            return

        self.set_namespace_data(
            action=self.name,
            label="u-boot",
            key="device",
            value=self.parameters["device"],
        )

    def populate(self, parameters):
        """
        The dispatcher does the first download as the first deployment is not guaranteed to
        have DNS resolution fully working, so we can use the IP address of the dispatcher
        to get it (with the advantage that the dispatcher decompresses it so that the ramdisk
        can pipe the raw image directly from wget to dd.
        This also allows the use of local file:// locations which are visible to the dispatcher
        but not the device.
        """
        self.image_path = self.mkdtemp()
        self.pipeline = Pipeline(parent=self,
                                 job=self.job,
                                 parameters=parameters)
        if self.test_needs_overlay(parameters):
            self.pipeline.add_action(
                OverlayAction())  # idempotent, includes testdef
        uniquify = parameters.get("uniquify", True)
        if "images" in parameters:
            for k in sorted(parameters["images"].keys()):
                self.pipeline.add_action(
                    DownloaderAction(
                        k,
                        path=self.image_path,
                        uniquify=uniquify,
                        params=parameters["images"][k],
                    ))
                if parameters["images"][k].get("apply-overlay", False):
                    if self.test_needs_overlay(parameters):
                        self.pipeline.add_action(ApplyOverlayImage())
            self.pipeline.add_action(DDAction())
        elif "image" in parameters:
            self.pipeline.add_action(
                DownloaderAction(
                    "image",
                    path=self.image_path,
                    uniquify=uniquify,
                    params=parameters["image"],
                ))
            if self.test_needs_overlay(parameters):
                self.pipeline.add_action(ApplyOverlayImage())
            self.pipeline.add_action(DDAction())

        # FIXME: could support tarballs too
        if self.test_needs_deployment(parameters):
            self.pipeline.add_action(DeployDeviceEnvironment())
Exemple #49
0
class UUUBootRetryAction(RetryAction):
    """
    Wraps the Retry Action to allow for actions which precede
    the reset, e.g. Connect.
    """

    name = "uuu-boot-retry"
    description = "Boot the board using uboot and perform uuu commands"
    summary = "Pass uuu commands"

    def populate(self, parameters):
        self.pipeline = Pipeline(parent=self, job=self.job, parameters=parameters)
        self.pipeline.add_action(ResetDevice())
        self.pipeline.add_action(CheckSerialDownloadMode())
        self.pipeline.add_action(BootBootloaderCorruptBootMediaAction())
        self.pipeline.add_action(ResetDevice())
        self.pipeline.add_action(UUUBootAction(), parameters=parameters)
        self.pipeline.add_action(ConnectDevice())
Exemple #50
0
class MinimalBoot(BootAction):

    name = 'minimal-boot'
    description = "connect and reset device"
    summary = "connect and reset device"

    def populate(self, parameters):
        self.internal_pipeline = Pipeline(parent=self,
                                          job=self.job,
                                          parameters=parameters)
        self.internal_pipeline.add_action(ConnectDevice())
        self.internal_pipeline.add_action(ResetDevice())
        if self.has_prompts(parameters):
            self.internal_pipeline.add_action(AutoLoginAction())
            if self.test_has_shell(parameters):
                self.internal_pipeline.add_action(ExpectShellSession())
                if 'transfer_overlay' in parameters:
                    self.internal_pipeline.add_action(OverlayUnpack())
                self.internal_pipeline.add_action(ExportDeviceEnvironment())

    def run(self, connection, max_end_time):
        connection = self.get_namespace_data(action='shared',
                                             label='shared',
                                             key='connection',
                                             deepcopy=False)
        connection = super().run(connection, max_end_time)
        self.set_namespace_data(action='shared',
                                label='shared',
                                key='connection',
                                value=connection)
        return connection
Exemple #51
0
class FastbootFlashOrderAction(DeployAction):
    """
    Fastboot flash image.
    """

    name = "fastboot-flash-order-action"
    description = "Determine support for each flash operation"
    summary = "Handle reset and options for each flash url."

    def __init__(self):
        super(FastbootFlashOrderAction, self).__init__()
        self.retries = 3
        self.sleep = 10
        self.interrupt_prompt = None
        self.interrupt_string = None
        self.reboot = None

    def populate(self, parameters):
        self.internal_pipeline = Pipeline(parent=self,
                                          job=self.job,
                                          parameters=parameters)
        flash_cmds_order = self.job.device['flash_cmds_order']
        userlist = list(parameters['images'].keys())
        userlist.remove('yaml_line')
        flash_cmds = set(userlist).difference(set(flash_cmds_order))
        flash_cmds = flash_cmds_order + list(flash_cmds)
        self.internal_pipeline.add_action(ReadFeedback(repeat=True))
        for flash_cmd in flash_cmds:
            if flash_cmd not in parameters['images']:
                continue
            self.internal_pipeline.add_action(
                FastbootFlashAction(cmd=flash_cmd))
            self.reboot = parameters['images'][flash_cmd].get('reboot', None)
            if self.reboot == 'fastboot-reboot':
                self.internal_pipeline.add_action(FastbootReboot())
                self.internal_pipeline.add_action(ReadFeedback(repeat=True))
            elif self.reboot == 'fastboot-reboot-bootloader':
                self.internal_pipeline.add_action(FastbootRebootBootloader())
                self.internal_pipeline.add_action(ReadFeedback(repeat=True))
            elif self.reboot == 'hard-reset':
                self.internal_pipeline.add_action(PDUReboot())
                self.internal_pipeline.add_action(ReadFeedback(repeat=True))

    def validate(self):
        super(FastbootFlashOrderAction, self).validate()
        self.set_namespace_data(action=FastbootFlashAction.name,
                                label='interrupt',
                                key='reboot',
                                value=self.reboot)
        if 'fastboot_serial_number' not in self.job.device:
            self.errors = "device fastboot serial number missing"
        elif self.job.device['fastboot_serial_number'] == '0000000000':
            self.errors = "device fastboot serial number unset"
        if 'flash_cmds_order' not in self.job.device:
            self.errors = "device flash commands order missing"
        if 'fastboot_options' not in self.job.device:
            self.errors = "device fastboot options missing"
        elif not isinstance(self.job.device['fastboot_options'], list):
            self.errors = "device fastboot options is not a list"
Exemple #52
0
    def test_overlay_action(self, which_mock):
        parameters = {
            "dispatcher":
            {},  # fake dispatcher parameter. Normally added by parser
            "device_type": "beaglebone-black",
            "job_name": "uboot-pipeline",
            "job_timeout": "15m",
            "action_timeout": "5m",
            "priority": "medium",
            "actions": {
                "boot": {
                    "namespace": "common",
                    "method": "u-boot",
                    "commands": "ramdisk",
                    "prompts": ["linaro-test", "root@debian:~#"],
                },
                "deploy": {
                    "namespace": "common",
                    "ramdisk": {
                        "url": "initrd.gz",
                        "compression": "gz"
                    },
                    "kernel": {
                        "url": "zImage",
                        "type": "zimage"
                    },
                    "dtb": {
                        "url": "broken.dtb"
                    },
                },
            },
        }
        data = yaml_safe_load(Factory().create_device("bbb-01.jinja2")[0])
        device = NewDevice(data)
        job = Job(4212, parameters, None)
        job.device = device
        pipeline = Pipeline(job=job, parameters=parameters["actions"]["boot"])
        job.pipeline = pipeline
        overlay = BootloaderCommandOverlay()
        connection = MagicMock()
        connection.timeout = MagicMock()
        pipeline.add_action(overlay)
        overlay.set_namespace_data(
            action="uboot-prepare-kernel",
            label="bootcommand",
            key="bootcommand",
            value="bootz",
        )
        overlay.validate()
        overlay.run(connection, 100)
        ip_addr = dispatcher_ip(None)
        parsed = []
        kernel_addr = job.device["parameters"][overlay.bootcommand]["ramdisk"]
        ramdisk_addr = job.device["parameters"][overlay.bootcommand]["ramdisk"]
        dtb_addr = job.device["parameters"][overlay.bootcommand]["dtb"]
        kernel = parameters["actions"]["deploy"]["kernel"]["url"]
        ramdisk = parameters["actions"]["deploy"]["ramdisk"]["url"]
        dtb = parameters["actions"]["deploy"]["dtb"]["url"]

        substitution_dictionary = {
            "{SERVER_IP}":
            ip_addr,
            # the addresses need to be hexadecimal
            "{KERNEL_ADDR}":
            kernel_addr,
            "{DTB_ADDR}":
            dtb_addr,
            "{RAMDISK_ADDR}":
            ramdisk_addr,
            "{BOOTX}":
            "%s %s %s %s" %
            (overlay.bootcommand, kernel_addr, ramdisk_addr, dtb_addr),
            "{RAMDISK}":
            ramdisk,
            "{KERNEL}":
            kernel,
            "{DTB}":
            dtb,
        }
        params = device["actions"]["boot"]["methods"]
        params["u-boot"]["ramdisk"]["commands"] = substitute(
            params["u-boot"]["ramdisk"]["commands"], substitution_dictionary)

        commands = params["u-boot"]["ramdisk"]["commands"]
        self.assertIs(type(commands), list)
        self.assertIn("tftp 0x83000000 zImage", commands)
        self.assertIn("tftp 0x83000000 initrd.gz", commands)
        self.assertIn("setenv initrd_size ${filesize}", commands)
        self.assertIn("tftp 0x88000000 broken.dtb", commands)
        self.assertNotIn("setenv kernel_addr_r '{KERNEL_ADDR}'", commands)
        self.assertNotIn("setenv initrd_addr_r '{RAMDISK_ADDR}'", commands)
        self.assertNotIn("setenv fdt_addr_r '{DTB_ADDR}'", commands)

        for line in params["u-boot"]["ramdisk"]["commands"]:
            line = line.replace("{SERVER_IP}", ip_addr)
            # the addresses need to be hexadecimal
            line = line.replace("{KERNEL_ADDR}", kernel_addr)
            line = line.replace("{DTB_ADDR}", dtb_addr)
            line = line.replace("{RAMDISK_ADDR}", ramdisk_addr)
            line = line.replace(
                "{BOOTX}",
                "%s %s %s %s" %
                (overlay.bootcommand, kernel_addr, ramdisk_addr, dtb_addr),
            )
            line = line.replace("{RAMDISK}", ramdisk)
            line = line.replace("{KERNEL}", kernel)
            line = line.replace("{DTB}", dtb)
            parsed.append(line)
        self.assertNotIn("setenv kernel_addr_r '{KERNEL_ADDR}'", parsed)
        self.assertNotIn("setenv initrd_addr_r '{RAMDISK_ADDR}'", parsed)
        self.assertNotIn("setenv fdt_addr_r '{DTB_ADDR}'", parsed)
Exemple #53
0
class DeployIsoAction(DeployAction):
    """
    Prepare an empty image, pull the specified kernel and initrd
    out of the iso using loopback and then start QEMU with the
    ISO as a cdrom and empty image as the destination.
    """

    name = "deploy-iso-installer"
    description = "setup deployment for emulated installer"
    summary = "pull kernel and initrd out of iso"

    def __init__(self):
        """
        Uses the tftp directory for easier cleanup and for parity
        with the non-QEMU Debian Installer support.
        """
        super().__init__()
        self.preseed_path = None

    def validate(self):
        super().validate()
        suffix = os.path.join(*self.preseed_path.split("/")[-2:])
        self.set_namespace_data(action=self.name,
                                label="iso",
                                key="suffix",
                                value=suffix)
        which("in.tftpd")

    def populate(self, parameters):
        self.preseed_path = self.mkdtemp(override=filesystem.tftpd_dir())
        self.pipeline = Pipeline(parent=self,
                                 job=self.job,
                                 parameters=parameters)
        self.pipeline.add_action(IsoEmptyImage())
        # the preseed file needs to go into the dispatcher apache tmp directory.
        self.pipeline.add_action(
            DownloaderAction("preseed",
                             self.preseed_path,
                             params=parameters["images"]["preseed"]))
        self.pipeline.add_action(
            DownloaderAction("iso",
                             self.mkdtemp(),
                             params=parameters["images"]["iso"]))
        self.pipeline.add_action(IsoPullInstaller())
        self.pipeline.add_action(QemuCommandLine())
        # prepare overlay at this stage - make it available after installation.
        self.pipeline.add_action(
            OverlayAction())  # idempotent, includes testdef
        self.pipeline.add_action(ApplyOverlayGuest())
        self.pipeline.add_action(DeployDeviceEnvironment())
Exemple #54
0
class PrepareOverlayTftp(Action):
    """
    Extracts the ramdisk or nfsrootfs in preparation for the lava overlay
    """

    name = "prepare-tftp-overlay"
    description = "extract ramdisk or nfsrootfs in preparation for lava overlay"
    summary = "extract ramdisk or nfsrootfs"
    timeout_exception = InfrastructureError

    def populate(self, parameters):
        self.internal_pipeline = Pipeline(parent=self,
                                          job=self.job,
                                          parameters=parameters)
        self.internal_pipeline.add_action(
            ExtractNfsRootfs())  # idempotent, checks for nfsrootfs parameter
        self.internal_pipeline.add_action(
            OverlayAction())  # idempotent, includes testdef
        self.internal_pipeline.add_action(
            ExtractRamdisk())  # idempotent, checks for a ramdisk parameter
        self.internal_pipeline.add_action(
            ExtractModules())  # idempotent, checks for a modules parameter
        self.internal_pipeline.add_action(ApplyOverlayTftp())
        if "kernel" in parameters and "type" in parameters["kernel"]:
            self.internal_pipeline.add_action(PrepareKernelAction())
        self.internal_pipeline.add_action(ConfigurePreseedFile(
        ))  # idempotent, checks for a preseed parameter
        self.internal_pipeline.add_action(
            CompressRamdisk())  # idempotent, checks for a ramdisk parameter
        if "depthcharge" in self.job.device["actions"]["boot"]["methods"]:
            self.internal_pipeline.add_action(PrepareKernelAction())

    def run(self, connection, max_end_time):
        connection = super().run(connection, max_end_time)
        ramdisk = self.get_namespace_data(action="download-action",
                                          label="file",
                                          key="ramdisk")
        if ramdisk:  # nothing else to do
            return connection
        return connection
Exemple #55
0
    def test_overlay_action(self):  # pylint: disable=too-many-locals
        parameters = {
            'device_type': 'beaglebone-black',
            'job_name': 'uboot-pipeline',
            'job_timeout': '15m',
            'action_timeout': '5m',
            'priority': 'medium',
            'actions': {
                'boot': {
                    'method': 'u-boot',
                    'commands': 'ramdisk',
                    'type': 'bootz',
                    'prompts': ['linaro-test', 'root@debian:~#']
                },
                'deploy': {
                    'ramdisk': 'initrd.gz',
                    'kernel': 'zImage',
                    'dtb': 'broken.dtb'
                }
            }
        }
        device = NewDevice(
            os.path.join(os.path.dirname(__file__), '../devices/bbb-01.yaml'))
        job = Job(4212, parameters, None)
        job.device = device
        pipeline = Pipeline(job=job, parameters=parameters['actions']['boot'])
        job.pipeline = pipeline
        overlay = BootloaderCommandOverlay()
        pipeline.add_action(overlay)
        ip_addr = dispatcher_ip(None)
        parsed = []
        kernel_addr = job.device['parameters'][
            overlay.parameters['type']]['ramdisk']
        ramdisk_addr = job.device['parameters'][
            overlay.parameters['type']]['ramdisk']
        dtb_addr = job.device['parameters'][overlay.parameters['type']]['dtb']
        kernel = parameters['actions']['deploy']['kernel']
        ramdisk = parameters['actions']['deploy']['ramdisk']
        dtb = parameters['actions']['deploy']['dtb']

        substitution_dictionary = {
            '{SERVER_IP}':
            ip_addr,
            # the addresses need to be hexadecimal
            '{KERNEL_ADDR}':
            kernel_addr,
            '{DTB_ADDR}':
            dtb_addr,
            '{RAMDISK_ADDR}':
            ramdisk_addr,
            '{BOOTX}':
            "%s %s %s %s" %
            (overlay.parameters['type'], kernel_addr, ramdisk_addr, dtb_addr),
            '{RAMDISK}':
            ramdisk,
            '{KERNEL}':
            kernel,
            '{DTB}':
            dtb
        }
        params = device['actions']['boot']['methods']
        params['u-boot']['ramdisk']['commands'] = substitute(
            params['u-boot']['ramdisk']['commands'], substitution_dictionary)

        commands = params['u-boot']['ramdisk']['commands']
        self.assertIs(type(commands), list)
        self.assertIn("setenv loadkernel 'tftp ${kernel_addr_r} zImage'",
                      commands)
        self.assertIn(
            "setenv loadinitrd 'tftp ${initrd_addr_r} initrd.gz; setenv initrd_size ${filesize}'",
            commands)
        self.assertIn("setenv loadfdt 'tftp ${fdt_addr_r} broken.dtb'",
                      commands)
        self.assertNotIn("setenv kernel_addr_r '{KERNEL_ADDR}'", commands)
        self.assertNotIn("setenv initrd_addr_r '{RAMDISK_ADDR}'", commands)
        self.assertNotIn("setenv fdt_addr_r '{DTB_ADDR}'", commands)

        for line in params['u-boot']['ramdisk']['commands']:
            line = line.replace('{SERVER_IP}', ip_addr)
            # the addresses need to be hexadecimal
            line = line.replace('{KERNEL_ADDR}', kernel_addr)
            line = line.replace('{DTB_ADDR}', dtb_addr)
            line = line.replace('{RAMDISK_ADDR}', ramdisk_addr)
            line = line.replace(
                '{BOOTX}',
                "%s %s %s %s" % (overlay.parameters['type'], kernel_addr,
                                 ramdisk_addr, dtb_addr))
            line = line.replace('{RAMDISK}', ramdisk)
            line = line.replace('{KERNEL}', kernel)
            line = line.replace('{DTB}', dtb)
            parsed.append(line)
        self.assertIn("setenv loadkernel 'tftp ${kernel_addr_r} zImage'",
                      parsed)
        self.assertIn(
            "setenv loadinitrd 'tftp ${initrd_addr_r} initrd.gz; setenv initrd_size ${filesize}'",
            parsed)
        self.assertIn("setenv loadfdt 'tftp ${fdt_addr_r} broken.dtb'", parsed)
        self.assertNotIn("setenv kernel_addr_r '{KERNEL_ADDR}'", parsed)
        self.assertNotIn("setenv initrd_addr_r '{RAMDISK_ADDR}'", parsed)
        self.assertNotIn("setenv fdt_addr_r '{DTB_ADDR}'", parsed)
Exemple #56
0
class TestDefinitionAction(Action):

    name = "test-definition"
    description = "load test definitions into image"
    summary = "loading test definitions"

    def __init__(self):
        """
        The TestDefinitionAction installs each test definition into
        the overlay. It does not execute the scripts in the test
        definition, that is the job of the TestAction class.
        One TestDefinitionAction handles all test definitions for
        the current job.
        In addition, a TestOverlayAction is added to the pipeline
        to handle parts of the overlay which are test definition dependent.
        """
        super().__init__()
        self.test_list = None
        self.stages = 0
        self.run_levels = {}

    def populate(self, parameters):
        """
        Each time a test definition is processed by a handler, a new set of
        overlay files are needed, based on that test definition. Basic overlay
        files are created by TestOverlayAction. More complex scripts like the
        install:deps script and the main run script have custom Actions.
        """
        index = []
        self.pipeline = Pipeline(parent=self, job=self.job, parameters=parameters)
        self.test_list = identify_test_definitions(
            self.job.test_info, parameters["namespace"]
        )
        if self.test_list:
            self.set_namespace_data(
                action=self.name,
                label=self.name,
                key="test_list",
                value=self.test_list,
                parameters=parameters,
            )
        for testdefs in self.test_list:
            for testdef in testdefs:
                # namespace support allows only running the install steps for the relevant
                # deployment as the next deployment could be a different OS.
                handler = RepoAction.select(testdef["from"])()

                # set the full set of job YAML parameters for this handler as handler parameters.
                handler.job = self.job
                handler.parameters = testdef
                # store the correct test_name before appending to the local index
                handler.parameters["test_name"] = "%s_%s" % (
                    len(index),
                    handler.parameters["name"],
                )
                self.pipeline.add_action(handler)
                # a genuinely unique ID based on the *database* JobID and
                # pipeline level for reproducibility and tracking -
                # {DB-JobID}_{PipelineLevel}, e.g. 15432.0_3.5.4
                handler.uuid = "%s_%s" % (self.job.job_id, handler.level)
                handler.stage = self.stages
                self.run_levels[testdef["name"]] = self.stages

                # copy details into the overlay, one per handler but the same class each time.
                overlay = TestOverlayAction()
                overlay.job = self.job
                overlay.parameters = testdef
                overlay.parameters["test_name"] = handler.parameters["test_name"]
                overlay.test_uuid = handler.uuid

                # add install handler - uses job parameters
                installer = TestInstallAction()
                installer.job = self.job
                installer.parameters = testdef
                installer.parameters["test_name"] = handler.parameters["test_name"]
                installer.test_uuid = handler.uuid

                # add runsh handler - uses job parameters
                runsh = TestRunnerAction()
                runsh.job = self.job
                runsh.parameters = testdef
                runsh.parameters["test_name"] = handler.parameters["test_name"]
                runsh.test_uuid = handler.uuid

                index.append(handler.parameters["name"])

                # add overlay handlers to the pipeline
                self.pipeline.add_action(overlay)
                self.pipeline.add_action(installer)
                self.pipeline.add_action(runsh)
                self.set_namespace_data(
                    action="test-definition",
                    label="test-definition",
                    key="testdef_index",
                    value=index,
                    parameters=parameters,
                )
            self.stages += 1

    def validate(self):
        """
        TestDefinitionAction is part of the overlay and therefore part of the deployment -
        the internal pipeline then looks inside the job definition for details of the tests to deploy.
        Jobs with no test actions defined (empty test_list) are explicitly allowed.
        """
        if not self.job:
            self.errors = "missing job object"
            return
        if "actions" not in self.job.parameters:
            self.errors = "No actions defined in job parameters"
            return
        if not self.test_list:
            return

        exp = re.compile(DEFAULT_TESTDEF_NAME_CLASS)
        for testdefs in self.test_list:
            for testdef in testdefs:
                if "parameters" in testdef:  # optional
                    if not isinstance(testdef["parameters"], dict):
                        self.errors = "Invalid test definition parameters"
                if "from" not in testdef:
                    self.errors = "missing 'from' field in test definition %s" % testdef
                if "name" not in testdef:
                    self.errors = "missing 'name' field in test definition %s" % testdef
                else:
                    res = exp.match(testdef["name"])
                    if not res:
                        self.errors = (
                            "Invalid characters found in test definition name: %s"
                            % testdef["name"]
                        )
        super().validate()
        for testdefs in self.test_list:
            for testdef in testdefs:
                try:
                    RepoAction.select(testdef["from"])()
                except JobError as exc:
                    self.errors = str(exc)

    def run(self, connection, max_end_time):
        """
        Creates the list of test definitions for this Test

        :param connection: Connection object, if any.
        :param max_end_time: remaining time before block timeout.
        :return: the received Connection.
        """
        location = self.get_namespace_data(
            action="test", label="shared", key="location"
        )
        lava_test_results_dir = self.get_namespace_data(
            action="test", label="results", key="lava_test_results_dir"
        )
        if not location:
            raise LAVABug("Missing lava overlay location")
        if not os.path.exists(location):
            raise LAVABug("Unable to find overlay location")
        self.logger.info("Loading test definitions")

        # overlay_path is the location of the files before boot
        overlay_base = os.path.abspath("%s/%s" % (location, lava_test_results_dir))
        self.set_namespace_data(
            action="test",
            label="test-definition",
            key="overlay_dir",
            value=overlay_base,
        )

        connection = super().run(connection, max_end_time)

        self.logger.info("Creating lava-test-runner.conf files")
        for stage in range(self.stages):
            path = "%s/%s" % (overlay_base, stage)
            self.logger.debug(
                "Using lava-test-runner path: %s for stage %d", path, stage
            )
            with open(
                "%s/%s/lava-test-runner.conf" % (overlay_base, stage), "a"
            ) as runner_conf:
                for handler in self.pipeline.actions:
                    if isinstance(handler, RepoAction) and handler.stage == stage:
                        self.logger.debug("- %s", handler.parameters["test_name"])
                        runner_conf.write(handler.runner)

        return connection
Exemple #57
0
class BootGDBRetry(RetryAction):

    name = "boot-gdb-retry"
    description = "boot with gdb with retry and optional docker support"
    summary = "boot with gdb with retry"

    def __init__(self):
        super().__init__()
        self.gdb = None
        self.gdb_connection = None
        self.commands = []
        self.arguments = []
        self.wait_before_continue = 0
        self.container = None
        self.devices = []

    def validate(self):
        super().validate()
        method = self.job.device["actions"]["boot"]["methods"]["gdb"]
        if "parameters" not in method:
            self.errors = '"parameters" not defined in device configuration'
            return
        if "command" not in method["parameters"]:
            self.errors = (
                '"command" not defined under "parameters" in device configuration'
            )
            return
        self.gdb = method["parameters"]["command"]
        which(self.gdb)

        commands = self.parameters["commands"]
        if commands not in method:
            self.errors = "'%s' not available" % commands
            return
        self.commands = method[commands].get("commands")
        if not isinstance(self.commands, list):
            self.errors = "'commands' should be a list"

        self.arguments = method[commands].get("arguments")
        if not isinstance(self.arguments, list):
            self.errors = "'arguments' should be a list"
        self.wait_before_continue = method["parameters"].get(
            "wait_before_continue", 0)

        # If this is defined, we have to use docker
        if method[commands].get("docker", {}).get("use", False):
            which("docker")
            self.container = method[commands]["docker"].get("container")
            self.container = self.parameters.get("container", self.container)
            if self.container is None:
                self.errors = "a docker container should be defined"
            self.devices = method[commands]["docker"].get("devices", [])
        elif self.parameters.get("container"):
            self.errors = (
                "Requesting a docker container while docker is not used for this device"
            )

    def populate(self, parameters):
        self.pipeline = Pipeline(parent=self,
                                 job=self.job,
                                 parameters=parameters)
        if self.job.device.hard_reset_command:
            self.pipeline.add_action(ResetDevice())
        self.pipeline.add_action(WaitUSBSerialDeviceAction())
        self.pipeline.add_action(ConnectDevice())

    def run(self, connection, max_end_time):
        connection = super().run(connection, max_end_time)

        # Build the substitutions dictionary
        substitutions = {}
        paths = set()
        for action in self.get_namespace_keys("download-action"):
            filename = self.get_namespace_data(action="download-action",
                                               label=action,
                                               key="file")
            if filename is None:
                self.logger.warning(
                    "Empty value for action='download-action' label='%s' key='file'",
                    action,
                )
                continue
            substitutions["{%s}" % action.upper()] = filename
            paths.add(os.path.dirname(filename))

        # If needed, prepend with docker
        if self.container is None:
            cmd = self.gdb
        else:
            cmd = "docker run --rm -it --name lava-%s-%s" % (
                self.job.job_id,
                self.level,
            )
            for path in paths:
                cmd += " --volume %s:%s" % (path, path)
            for device in self.devices:
                cmd += " --device %s:%s:rw" % (device, device)
            cmd += " %s %s" % (self.container, self.gdb)

        for arg in substitute(self.arguments, substitutions):
            cmd += " " + arg

        # Start gdb
        self.logger.info("Starting gdb: %s", cmd)
        shell = ShellCommand(cmd, self.timeout, logger=self.logger)
        gdb = ShellSession(self.job, shell)
        gdb.prompt_str = "\\(gdb\\) "
        self.gdb_connection = gdb
        self.gdb_connection.wait()

        # Send all gdb commands
        for cmd in substitute(self.commands, substitutions):
            self.gdb_connection.sendline(cmd)
            self.gdb_connection.wait()

        # "continue" is send last
        if self.wait_before_continue:
            self.logger.debug("Sleeping %ss before sending 'continue'",
                              self.wait_before_continue)
            time.sleep(self.wait_before_continue)
        self.gdb_connection.sendline("continue")

        return connection

    def cleanup(self, connection):
        if self.gdb_connection is None:
            return
        if self.gdb_connection.raw_connection.isalive():
            self.logger.info("Stopping gdb cleanly")
            try:
                self.gdb_connection.wait(max_end_time=time.time() + 1)
                self.gdb_connection.sendline("set confirm no")
                self.gdb_connection.wait(max_end_time=time.time() + 1)
                self.gdb_connection.sendline("quit")
            except JobError:
                self.logger.warning("Unable to quit gdb, killing the process")
            finally:
                # Do not call finalise when using docker or this will kill
                # docker itself and not the underlying gdb.
                if self.container is None:
                    self.gdb_connection.finalise()
                else:
                    name = "lava-%s-%s" % (self.job.job_id, self.level)
                    self.logger.debug("Stopping container %s", name)
                    self.run_command(["docker", "stop", name], allow_fail=True)
Exemple #58
0
class PrepareOverlayTftp(Action):
    """
    Extracts the ramdisk or nfsrootfs in preparation for the lava overlay
    """

    name = "prepare-tftp-overlay"
    description = "extract ramdisk or nfsrootfs in preparation for lava overlay"
    summary = "extract ramdisk or nfsrootfs"

    def populate(self, parameters):
        self.internal_pipeline = Pipeline(parent=self,
                                          job=self.job,
                                          parameters=parameters)
        self.internal_pipeline.add_action(
            ExtractNfsRootfs())  # idempotent, checks for nfsrootfs parameter
        self.internal_pipeline.add_action(
            OverlayAction())  # idempotent, includes testdef
        self.internal_pipeline.add_action(
            ExtractRamdisk())  # idempotent, checks for a ramdisk parameter
        self.internal_pipeline.add_action(
            ExtractModules())  # idempotent, checks for a modules parameter
        self.internal_pipeline.add_action(ApplyOverlayTftp())
        if 'kernel' in parameters and 'type' in parameters['kernel']:
            self.internal_pipeline.add_action(PrepareKernelAction())
        self.internal_pipeline.add_action(ConfigurePreseedFile(
        ))  # idempotent, checks for a preseed parameter
        self.internal_pipeline.add_action(
            CompressRamdisk())  # idempotent, checks for a ramdisk parameter
        if 'depthcharge' in self.job.device['actions']['boot']['methods']:
            self.internal_pipeline.add_action(PrepareKernelAction())

    def run(self, connection, max_end_time, args=None):
        connection = super(PrepareOverlayTftp,
                           self).run(connection, max_end_time, args)
        ramdisk = self.get_namespace_data(action='download-action',
                                          label='file',
                                          key='ramdisk')
        if ramdisk:  # nothing else to do
            return connection
        return connection
Exemple #59
0
    def test_overlay_action(self):  # pylint: disable=too-many-locals
        parameters = {
            "device_type": "beaglebone-black",
            "job_name": "uboot-pipeline",
            "job_timeout": "15m",
            "action_timeout": "5m",
            "priority": "medium",
            "actions": {
                "boot": {
                    "method": "u-boot",
                    "commands": "ramdisk",
                    "type": "bootz",
                    "prompts": ["linaro-test", "root@debian:~#"],
                },
                "deploy": {
                    "ramdisk": "initrd.gz",
                    "kernel": "zImage",
                    "dtb": "broken.dtb",
                },
            },
        }
        device = NewDevice(
            os.path.join(os.path.dirname(__file__), "devices/bbb-01.yaml"))
        job = Job(4212, parameters, None)
        job.device = device
        pipeline = Pipeline(job=job, parameters=parameters["actions"]["boot"])
        job.pipeline = pipeline
        overlay = BootloaderCommandOverlay()
        pipeline.add_action(overlay)
        ip_addr = dispatcher_ip(None)
        parsed = []
        kernel_addr = job.device["parameters"][
            overlay.parameters["type"]]["ramdisk"]
        ramdisk_addr = job.device["parameters"][
            overlay.parameters["type"]]["ramdisk"]
        dtb_addr = job.device["parameters"][overlay.parameters["type"]]["dtb"]
        kernel = parameters["actions"]["deploy"]["kernel"]
        ramdisk = parameters["actions"]["deploy"]["ramdisk"]
        dtb = parameters["actions"]["deploy"]["dtb"]

        substitution_dictionary = {
            "{SERVER_IP}":
            ip_addr,
            # the addresses need to be hexadecimal
            "{KERNEL_ADDR}":
            kernel_addr,
            "{DTB_ADDR}":
            dtb_addr,
            "{RAMDISK_ADDR}":
            ramdisk_addr,
            "{BOOTX}":
            "%s %s %s %s" %
            (overlay.parameters["type"], kernel_addr, ramdisk_addr, dtb_addr),
            "{RAMDISK}":
            ramdisk,
            "{KERNEL}":
            kernel,
            "{DTB}":
            dtb,
        }
        params = device["actions"]["boot"]["methods"]
        params["u-boot"]["ramdisk"]["commands"] = substitute(
            params["u-boot"]["ramdisk"]["commands"], substitution_dictionary)

        commands = params["u-boot"]["ramdisk"]["commands"]
        self.assertIs(type(commands), list)
        self.assertIn("setenv loadkernel 'tftp ${kernel_addr_r} zImage'",
                      commands)
        self.assertIn(
            "setenv loadinitrd 'tftp ${initrd_addr_r} initrd.gz; setenv initrd_size ${filesize}'",
            commands,
        )
        self.assertIn("setenv loadfdt 'tftp ${fdt_addr_r} broken.dtb'",
                      commands)
        self.assertNotIn("setenv kernel_addr_r '{KERNEL_ADDR}'", commands)
        self.assertNotIn("setenv initrd_addr_r '{RAMDISK_ADDR}'", commands)
        self.assertNotIn("setenv fdt_addr_r '{DTB_ADDR}'", commands)

        for line in params["u-boot"]["ramdisk"]["commands"]:
            line = line.replace("{SERVER_IP}", ip_addr)
            # the addresses need to be hexadecimal
            line = line.replace("{KERNEL_ADDR}", kernel_addr)
            line = line.replace("{DTB_ADDR}", dtb_addr)
            line = line.replace("{RAMDISK_ADDR}", ramdisk_addr)
            line = line.replace(
                "{BOOTX}",
                "%s %s %s %s" % (overlay.parameters["type"], kernel_addr,
                                 ramdisk_addr, dtb_addr),
            )
            line = line.replace("{RAMDISK}", ramdisk)
            line = line.replace("{KERNEL}", kernel)
            line = line.replace("{DTB}", dtb)
            parsed.append(line)
        self.assertIn("setenv loadkernel 'tftp ${kernel_addr_r} zImage'",
                      parsed)
        self.assertIn(
            "setenv loadinitrd 'tftp ${initrd_addr_r} initrd.gz; setenv initrd_size ${filesize}'",
            parsed,
        )
        self.assertIn("setenv loadfdt 'tftp ${fdt_addr_r} broken.dtb'", parsed)
        self.assertNotIn("setenv kernel_addr_r '{KERNEL_ADDR}'", parsed)
        self.assertNotIn("setenv initrd_addr_r '{RAMDISK_ADDR}'", parsed)
        self.assertNotIn("setenv fdt_addr_r '{DTB_ADDR}'", parsed)
Exemple #60
0
class DownloadAction(DeployAction):  # pylint:disable=too-many-instance-attributes

    name = "download-deploy"
    description = "download files and copy to LXC if available"
    summary = "download deployment"

    def __init__(self):
        super().__init__()
        self.download_dir = None

    def validate(self):
        super().validate()
        self.set_namespace_data(action=self.name,
                                label='download-dir',
                                key='dir',
                                value=self.download_dir)

    def populate(self, parameters):
        self.internal_pipeline = Pipeline(parent=self,
                                          job=self.job,
                                          parameters=parameters)
        # Check if the device has a power command such as HiKey, Dragonboard,
        # etc. against device that doesn't like Nexus, etc.
        # This is required in order to power on the device so that when the
        # test job writer wants to perform some operation using a
        # lava-test-shell action that follows, this becomes mandatory. Think of
        # issuing any fastboot commands on the powered on device.
        #
        # NOTE: Add more power on strategies, if required for specific devices.
        if self.job.device.get('fastboot_via_uboot', False):
            self.internal_pipeline.add_action(ConnectDevice())
            self.internal_pipeline.add_action(UBootEnterFastbootAction())
        elif self.job.device.hard_reset_command:
            self.force_prompt = True
            self.internal_pipeline.add_action(ConnectDevice())
            self.internal_pipeline.add_action(ResetDevice())
        else:
            self.internal_pipeline.add_action(EnterFastbootAction())

        self.download_dir = self.mkdtemp()
        image_keys = sorted(parameters['images'].keys())
        for image in image_keys:
            if image != 'yaml_line':
                self.internal_pipeline.add_action(
                    DownloaderAction(image, self.download_dir))
        if self.test_needs_overlay(parameters):
            self.internal_pipeline.add_action(OverlayAction())
        self.internal_pipeline.add_action(CopyToLxcAction())