def populate(self, parameters): self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters) self.internal_pipeline.add_action(FastbootBootAction()) self.internal_pipeline.add_action(ConnectLxc()) self.internal_pipeline.add_action(WaitForAdbDevice())
class LxcAction(DeployAction): # pylint:disable=too-many-instance-attributes def __init__(self): super(LxcAction, self).__init__() self.name = "lxc-deploy" self.description = "download files and deploy using lxc" self.summary = "lxc deployment" def validate(self): super(LxcAction, self).validate() if LxcProtocol.name not in [protocol.name for protocol in self.job.protocols]: self.errors = "Invalid job - missing protocol" self.errors = infrastructure_error('lxc-create') lava_test_results_dir = self.parameters['deployment_data']['lava_test_results_dir'] self.data['lava_test_results_dir'] = lava_test_results_dir % self.job.job_id def populate(self, parameters): self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters) self.protocols = [protocol for protocol in self.job.protocols if protocol.name == LxcProtocol.name] self.set_common_data('lxc', 'name', self.protocols[0].lxc_name) self.set_common_data('lxc', 'distribution', self.protocols[0].lxc_dist) self.set_common_data('lxc', 'release', self.protocols[0].lxc_release) self.set_common_data('lxc', 'arch', self.protocols[0].lxc_arch) self.internal_pipeline.add_action(OverlayAction()) self.internal_pipeline.add_action(LxcCreateAction())
def populate(self, parameters): self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters) # the logic here can be upgraded in future if needed with more parameters to the deploy. if 'u-boot' in self.job.device['actions']['boot']['methods']: self.internal_pipeline.add_action(UBootPrepareKernelAction())
class PrepareOverlayScp(Action): """ Copy the overlay to the device using scp and then unpack remotely. Needs the device to be ready for SSH connection. """ def __init__(self): super(PrepareOverlayScp, self).__init__() self.name = "prepare-scp-overlay" self.summary = "scp the overlay to the remote device" self.description = "copy the overlay over an existing ssh connection" def validate(self): super(PrepareOverlayScp, self).validate() lava_test_results_dir = self.parameters['deployment_data']['lava_test_results_dir'] self.data['lava_test_results_dir'] = lava_test_results_dir % self.job.job_id environment = self.get_common_data('environment', 'env_dict') if not environment: environment = {} environment.update({"LC_ALL": "C.UTF-8", "LANG": "C"}) self.set_common_data('environment', 'env_dict', environment) def populate(self, parameters): self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters) self.internal_pipeline.add_action(ExtractRootfs()) # idempotent, checks for nfsrootfs parameter self.internal_pipeline.add_action(ExtractModules()) # idempotent, checks for a modules parameter def run(self, connection, args=None): connection = super(PrepareOverlayScp, self).run(connection, args) self.logger.info("Preparing to copy: %s" % os.path.basename(self.data['compress-overlay'].get('output'))) self.set_common_data('scp-deploy', 'overlay', self.data['compress-overlay'].get('output')) return connection
class DownloaderAction(RetryAction): """ The retry pipeline for downloads. To allow any deploy action to work with multinode, each call *must* set a unique path. """ def __init__(self, key, path): super(DownloaderAction, self).__init__() self.name = "download_retry" self.description = "download with retry" self.summary = "download-retry" self.key = key # the key in the parameters of what to download self.path = path # where to download def populate(self, parameters): self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters) # Find the right action according to the url url = urlparse.urlparse(parameters[self.key]) if url.scheme == "scp": action = ScpDownloadAction(self.key, self.path, url) elif url.scheme == "http" or url.scheme == "https": action = HttpDownloadAction(self.key, self.path, url) elif url.scheme == "file": action = FileDownloadAction(self.key, self.path, url) else: raise JobError("Unsupported url protocol scheme: %s" % url.scheme) self.internal_pipeline.add_action(action)
class DownloaderAction(RetryAction): """ The retry pipeline for downloads. To allow any deploy action to work with multinode, each call *must* set a unique path. """ def __init__(self, key, path): super(DownloaderAction, self).__init__() self.name = "download_retry" self.description = "download with retry" self.summary = "download-retry" self.key = key # the key in the parameters of what to download self.path = path # where to download def populate(self, parameters): self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters) # Find the right action according to the url if 'images' in parameters and self.key in parameters['images']: url = lavaurl.urlparse(parameters['images'][self.key]['url']) else: url = lavaurl.urlparse(parameters[self.key]['url']) if url.scheme == 'scp': action = ScpDownloadAction(self.key, self.path, url) elif url.scheme == 'http' or url.scheme == 'https': action = HttpDownloadAction(self.key, self.path, url) # pylint: disable=redefined-variable-type elif url.scheme == 'file': action = FileDownloadAction(self.key, self.path, url) # pylint: disable=redefined-variable-type else: raise JobError("Unsupported url protocol scheme: %s" % url.scheme) self.internal_pipeline.add_action(action)
class MountAction(DeployAction): """ Depending on the type of deployment, this needs to perform an OffsetAction, LoopCheckAction, LoopMountAction """ def __init__(self): super(MountAction, self).__init__() self.name = "mount_action" self.description = "mount with offset" self.summary = "mount loop" def validate(self): if not self.job: raise RuntimeError("No job object supplied to action") self.internal_pipeline.validate_actions() def populate(self, parameters): """ Needs to take account of the deployment type / image type etc. to determine which actions need to be added to the internal pipeline as part of the deployment selection step. """ if not self.job: raise RuntimeError("No job object supplied to action") # FIXME: not all mount operations will need these actions self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters) self.internal_pipeline.add_action(OffsetAction('image')) # FIXME: LoopCheckAction and LoopMountAction should be in only one Action self.internal_pipeline.add_action(LoopCheckAction()) self.internal_pipeline.add_action(LoopMountAction())
class DownloaderAction(RetryAction): """ The retry pipeline for downloads. To allow any deploy action to work with multinode, each call *must* set a unique path. """ def __init__(self, key, path): super(DownloaderAction, self).__init__() self.name = "download-retry" self.description = "download with retry" self.summary = "download-retry" self.key = key # the key in the parameters of what to download self.path = path # where to download def populate(self, parameters): self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters) # Find the right action according to the url if 'images' in parameters and self.key in parameters['images']: url = lavaurl.urlparse(parameters['images'][self.key]['url']) else: url = lavaurl.urlparse(parameters[self.key]['url']) if url.scheme == 'scp': action = ScpDownloadAction(self.key, self.path, url) elif url.scheme == 'http' or url.scheme == 'https': action = HttpDownloadAction(self.key, self.path, url) # pylint: disable=redefined-variable-type elif url.scheme == 'file': action = FileDownloadAction(self.key, self.path, url) # pylint: disable=redefined-variable-type else: raise JobError("Unsupported url protocol scheme: %s" % url.scheme) self.internal_pipeline.add_action(action)
def populate(self, parameters): self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters) tar_flags = parameters['deployment_data'][ 'tar_flags'] if 'tar_flags' in parameters['deployment_data'].keys( ) else '' self.set_namespace_data(action=self.name, label=self.name, key='tar_flags', value=tar_flags, parameters=parameters) self.internal_pipeline.add_action(OverlayAction()) for item in self.items: if item in parameters: self.internal_pipeline.add_action( DownloaderAction(item, path=self.mkdtemp()), parameters) self.set_namespace_data(action=self.name, label='scp', key=item, value=True, parameters=parameters) # we might not have anything to download, just the overlay to push self.internal_pipeline.add_action(PrepareOverlayScp()) # prepare the device environment settings in common data for enabling in the boot step self.internal_pipeline.add_action(DeployDeviceEnvironment())
def populate(self, parameters): self.expect_shell = parameters.get('expect_shell', True) self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters) self.internal_pipeline.add_action(BootloaderCommandOverlay()) self.internal_pipeline.add_action(ConnectDevice()) self.internal_pipeline.add_action(ResetDevice()) if parameters['method'] == 'grub-efi': self.internal_pipeline.add_action(UEFIMenuInterrupt()) self.internal_pipeline.add_action(GrubMenuSelector()) self.internal_pipeline.add_action(BootloaderInterrupt()) self.internal_pipeline.add_action(BootloaderCommandsAction()) if self.has_prompts(parameters): self.internal_pipeline.add_action(AutoLoginAction()) if self.test_has_shell(parameters): self.internal_pipeline.add_action(ExpectShellSession()) if 'transfer_overlay' in parameters: self.internal_pipeline.add_action(OverlayUnpack()) self.internal_pipeline.add_action(ExportDeviceEnvironment()) else: if self.has_boot_finished(parameters): self.logger.debug("Doing a boot without a shell (installer)") self.internal_pipeline.add_action(InstallerWait()) self.internal_pipeline.add_action(PowerOff())
def populate(self, parameters): self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters) self.internal_pipeline.add_action(OverlayAction()) if hasattr(self.job.device, 'power_state'): if self.job.device.power_state in ['on', 'off']: self.internal_pipeline.add_action(ConnectDevice()) self.internal_pipeline.add_action(PowerOn()) self.internal_pipeline.add_action(EnterFastbootAction()) self.internal_pipeline.add_action(LxcAddDeviceAction()) image_keys = list(parameters['images'].keys()) fastboot_dir = self.mkdtemp() # Add the required actions checks = [('image', FastbootUpdateAction), ('ptable', ApplyPtableAction), ('boot', ApplyBootAction), ('cache', ApplyCacheAction), ('userdata', ApplyUserdataAction), ('system', ApplySystemAction), ('vendor', ApplyVendorAction)] for (key, cls) in checks: if key in image_keys: download = DownloaderAction(key, fastboot_dir) download.max_retries = 3 # overridden by failure_retry in the parameters, if set. self.internal_pipeline.add_action(download) self.internal_pipeline.add_action(cls())
def populate(self, parameters): self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters) # customize the device configuration for this job self.internal_pipeline.add_action(UBootSecondaryMedia()) self.internal_pipeline.add_action(BootloaderCommandOverlay()) self.internal_pipeline.add_action(ConnectDevice()) self.internal_pipeline.add_action(UBootRetry())
def populate(self, parameters): """ Each time a test definition is processed by a handler, a new set of overlay files are needed, based on that test definition. Basic overlay files are created by TestOverlayAction. More complex scripts like the install:deps script and the main run script have custom Actions. """ index = OrderedDict() self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters) self.test_list = identify_test_definitions(self.job.parameters) if not self.test_list: return self.set_common_data(self.name, 'test_list', self.test_list[0]) for testdefs in self.test_list: for testdef in testdefs: # namespace support allows only running the install steps for the relevant # deployment as the next deployment could be a different OS. handler = RepoAction.select(testdef['from'])() # set the full set of job YAML parameters for this handler as handler parameters. handler.job = self.job handler.parameters = testdef # store the correct test_name before incrementing the local index dict handler.parameters['test_name'] = "%s_%s" % (len( list(index.keys())), handler.parameters['name']) self.internal_pipeline.add_action(handler) handler.uuid = "%s_%s" % (self.job.job_id, handler.level) # copy details into the overlay, one per handler but the same class each time. overlay = TestOverlayAction() overlay.job = self.job overlay.parameters = testdef overlay.parameters['test_name'] = handler.parameters[ 'test_name'] overlay.test_uuid = handler.uuid # add install handler - uses job parameters installer = TestInstallAction() installer.job = self.job installer.parameters = testdef installer.parameters['test_name'] = handler.parameters[ 'test_name'] installer.test_uuid = handler.uuid # add runsh handler - uses job parameters runsh = TestRunnerAction() runsh.job = self.job runsh.parameters = testdef runsh.parameters['test_name'] = handler.parameters['test_name'] runsh.test_uuid = handler.uuid index[len(list(index.keys()))] = handler.parameters['name'] # add overlay handlers to the pipeline self.internal_pipeline.add_action(overlay) self.internal_pipeline.add_action(installer) self.internal_pipeline.add_action(runsh) self.set_common_data(self.name, 'testdef_index', index)
def populate(self, parameters): self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters) path = self.mkdtemp() if 'uefi' in parameters: uefi_path = self.mkdtemp() download = DownloaderAction('uefi', uefi_path) download.max_retries = 3 self.internal_pipeline.add_action(download) # uefi option of QEMU needs a directory, not the filename self.set_common_data('image', 'uefi_dir', uefi_path) # just the path, not the filename # alternatively use the -bios option and standard image args for image in parameters['images'].keys(): if image != 'yaml_line': download = DownloaderAction(image, path) download.max_retries = 3 # overridden by failure_retry in the parameters, if set. self.internal_pipeline.add_action(download) if parameters['images'][image].get('format', '') == 'qcow2': self.internal_pipeline.add_action( QCowConversionAction(image)) self.internal_pipeline.add_action(CustomisationAction()) self.internal_pipeline.add_action( OverlayAction()) # idempotent, includes testdef self.internal_pipeline.add_action(ApplyOverlayGuest()) self.internal_pipeline.add_action(DeployDeviceEnvironment())
class BootCMSISRetry(RetryAction): def __init__(self): super(BootCMSISRetry, self).__init__() self.name = 'boot-cmsis-retry' self.description = "boot cmsis usb image with retry" self.summary = "boot cmsis usb image with retry" def validate(self): super(BootCMSISRetry, self).validate() method_params = self.job.device['actions']['boot']['methods'][ 'cmsis-dap']['parameters'] usb_mass_device = method_params.get('usb_mass_device', None) if not usb_mass_device: self.errors = "usb_mass_device unset" def populate(self, parameters): self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters) method_params = self.job.device['actions']['boot']['methods'][ 'cmsis-dap']['parameters'] usb_mass_device = method_params.get('usb_mass_device', None) resets_after_flash = method_params.get('resets_after_flash', True) if self.job.device.hard_reset_command: self.internal_pipeline.add_action(ResetDevice()) self.internal_pipeline.add_action( WaitDevicePathAction(usb_mass_device)) self.internal_pipeline.add_action(FlashCMSISAction()) if resets_after_flash: self.internal_pipeline.add_action(WaitUSBSerialDeviceAction()) self.internal_pipeline.add_action(ConnectDevice())
def populate(self, parameters): self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters) # customize the device configuration for this job self.internal_pipeline.add_action(ConnectDevice()) self.internal_pipeline.add_action(BootloaderDefaultsRetry())
class BootQemuRetry(RetryAction): def __init__(self): super(BootQemuRetry, self).__init__() self.name = 'boot_qemu_image' self.description = "boot image using QEMU command line" self.summary = "boot QEMU image" def validate(self): super(BootQemuRetry, self).validate() try: # FIXME: need a schema and do this inside the NewDevice with a QemuDevice class? (just for parsing) boot = self.job.device['actions']['boot']['methods']['qemu'] qemu_binary = which(boot['parameters']['command']) command = [qemu_binary] command.extend(boot['parameters'].get('options', [])) self.set_common_data('qemu-command', 'command', command) # FIXME: AttributeError is an InfrastructureError in fact except (KeyError, TypeError, AttributeError): self.errors = "Invalid parameters" def populate(self, parameters): self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters) self.internal_pipeline.add_action(CallQemuAction())
def test_add_action_to_pipeline(self): action = Action() action.name = "test-action" action.description = "test action only" action.summary = "starter" self.assertEqual(action.description, "test action only") self.assertEqual(action.summary, "starter") # action needs to be added to a top level pipe first with self.assertRaises(LAVABug): Pipeline(action) pipe = Pipeline() with self.assertRaises(LAVABug): pipe.add_action(None) with self.assertRaises(LAVABug): pipe.add_action(pipe) pipe.add_action(action) self.assertEqual(pipe.actions, [action]) self.assertEqual(action.level, "1") try: description = pipe.describe() except Exception as exc: # pylint: disable=broad-except self.fail(exc) self.assertIsNotNone(description) self.assertIsInstance(description, list) self.assertIn('description', description[0]) self.assertIn('level', description[0]) self.assertIn('summary', description[0]) self.assertIn('max_retries', description[0]) self.assertIn('timeout', description[0])
def populate(self, parameters): self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters) if 'ramdisk' in parameters: download = DownloaderAction('ramdisk', path=self.tftp_dir) download.max_retries = 3 # overridden by failure_retry in the parameters, if set. self.internal_pipeline.add_action(download) self.set_common_data('tftp', 'ramdisk', True) if 'kernel' in parameters: download = DownloaderAction('kernel', path=self.tftp_dir) download.max_retries = 3 self.internal_pipeline.add_action(download) if 'dtb' in parameters: download = DownloaderAction('dtb', path=self.tftp_dir) download.max_retries = 3 self.internal_pipeline.add_action(download) if 'nfsrootfs' in parameters: download = DownloaderAction('nfsrootfs', path=self.download_dir) download.max_retries = 3 self.internal_pipeline.add_action(download) if 'modules' in parameters: download = DownloaderAction('modules', path=self.tftp_dir) download.max_retries = 3 self.internal_pipeline.add_action(download) # TftpAction is a deployment, so once the files are in place, just do the overlay self.internal_pipeline.add_action(PrepareOverlayTftp()) self.internal_pipeline.add_action(DeployDeviceEnvironment())
class FakeAction(Action): """ Isolated Action which can be used to generate artificial exceptions. """ def __init__(self): super(TestAdjuvant.FakeAction, self).__init__() self.count = 1 self.name = "fake-action" self.summary = "fake action for unit tests" self.description = "fake, do not use outside unit tests" def populate(self, parameters): self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters) self.internal_pipeline.add_action(TestAdjuvant.FakeAdjuvant()) def run(self, connection, args=None): if connection: raise RuntimeError( "Fake action not meant to have a real connection") connection = TestAdjuvant.FakeConnection() self.count += 1 self.results = {'status': "failed"} self.data[TestAdjuvant.FakeAdjuvant.key()] = True return connection
class BootFastbootAction(BootAction): """ Provide for auto_login parameters in this boot stanza and re-establish the connection after boot. """ def __init__(self): super(BootFastbootAction, self).__init__() self.name = "fastboot-boot" self.summary = "fastboot boot" self.description = "fastboot boot into the system" def validate(self): super(BootFastbootAction, self).validate() sequences = self.job.device['actions']['boot']['methods'].get( 'fastboot', []) for sequence in sequences: if not _fastboot_sequence_map(sequence): self.errors = "Unknown boot sequence '%s'" % sequence def populate(self, parameters): self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters) sequences = self.job.device['actions']['boot']['methods'].get( 'fastboot', []) for sequence in sequences: mapped = _fastboot_sequence_map(sequence) if mapped[1]: self.internal_pipeline.add_action( mapped[0](device_actions=mapped[1])) elif mapped[0]: self.internal_pipeline.add_action(mapped[0]())
def populate(self, parameters): self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters) path = self.mkdtemp() if 'uefi' in parameters: uefi_path = self.mkdtemp() download = DownloaderAction('uefi', uefi_path) download.max_retries = 3 self.internal_pipeline.add_action(download) # uefi option of QEMU needs a directory, not the filename self.set_namespace_data(action=self.name, label='image', key='uefi_dir', value=uefi_path, parameters=parameters) # alternatively use the -bios option and standard image args for image in parameters['images'].keys(): if image != 'yaml_line': download = DownloaderAction(image, path) download.max_retries = 3 # overridden by failure_retry in the parameters, if set. self.internal_pipeline.add_action(download) if parameters['images'][image].get('format', '') == 'qcow2': self.internal_pipeline.add_action( QCowConversionAction(image)) self.internal_pipeline.add_action(ExtractNfsAction()) self.internal_pipeline.add_action(OverlayAction()) self.internal_pipeline.add_action(ApplyOverlayTftp()) self.internal_pipeline.add_action(DeployDeviceEnvironment())
class MountAction(DeployAction): """ Depending on the type of deployment, this needs to perform an OffsetAction, LoopCheckAction, LoopMountAction """ def __init__(self, key): super(MountAction, self).__init__() self.name = "mount_action" self.description = "mount with offset" self.summary = "mount loop" self.key = key def validate(self): if not self.job: raise RuntimeError("No job object supplied to action") self.internal_pipeline.validate_actions() def populate(self, parameters): """ Needs to take account of the deployment type / image type etc. to determine which actions need to be added to the internal pipeline as part of the deployment selection step. """ if not self.job: raise RuntimeError("No job object supplied to action") # FIXME: not all mount operations will need these actions self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters) self.internal_pipeline.add_action(OffsetAction(self.key)) # FIXME: LoopCheckAction and LoopMountAction should be in only one Action self.internal_pipeline.add_action(LoopCheckAction(self.key)) self.internal_pipeline.add_action(LoopMountAction(self.key))
def populate(self): """ validate allows this to be a lot simpler, no need to check if the key exists each time. """ index = {} self.internal_pipeline = Pipeline(parent=self, job=self.job) for testdef in self.parameters['test']['definitions']: if testdef['from'] == 'git': handler = GitRepoAction() elif testdef['from'] == 'bzr': handler = BzrRepoAction() elif testdef['from'] == 'tar': handler = TarRepoAction() elif testdef['from'] == 'url': handler = UrlRepoAction() else: self.errors = "unsupported handler" raise JobError("unsupported testdef handler: %s %s" % (testdef, testdef['from'])) # set the full set of job YAML parameters for this handler as handler parameters. handler.parameters = testdef # store the correct test_name before incrementing the local index dict handler.parameters['test_name'] = "%s_%s" % (len( index.keys()), handler.parameters['name']) index[len(index.keys())] = handler.parameters['name'] self.internal_pipeline.add_action(handler) # FIXME: the outer pipeline may add unwanted data to the parameters['test'] self.internal_pipeline.add_action(TestOverlayAction())
def _make_pipeline(self, params): pipeline = Pipeline() auto_login = AutoLoginAction() auto_login.section = "internal" auto_login.parameters = params pipeline.add_action(auto_login) return pipeline
def populate(self, parameters): self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters) self.internal_pipeline.add_action(ConnectSsh()) self.internal_pipeline.add_action(AutoLoginAction()) self.internal_pipeline.add_action(ExpectShellSession()) self.internal_pipeline.add_action(ExportDeviceEnvironment()) self.internal_pipeline.add_action(ScpOverlayUnpack())
class PrepareOverlayTftp(Action): """ Extracts the ramdisk or nfsrootfs in preparation for the lava overlay """ def __init__(self): super(PrepareOverlayTftp, self).__init__() self.name = "prepare-tftp-overlay" self.summary = "extract ramdisk or nfsrootfs" self.description = "extract ramdisk or nfsrootfs in preparation for lava overlay" def populate(self, parameters): self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters) self.internal_pipeline.add_action(ExtractNfsRootfs()) # idempotent, checks for nfsrootfs parameter self.internal_pipeline.add_action(OverlayAction()) # idempotent, includes testdef self.internal_pipeline.add_action(ExtractRamdisk()) # idempotent, checks for a ramdisk parameter self.internal_pipeline.add_action(ExtractModules()) # idempotent, checks for a modules parameter self.internal_pipeline.add_action(ApplyOverlayTftp()) self.internal_pipeline.add_action(CompressRamdisk()) # idempotent, checks for a ramdisk parameter def run(self, connection, args=None): connection = super(PrepareOverlayTftp, self).run(connection, args) ramdisk = self.get_common_data('file', 'ramdisk') if ramdisk: # nothing else to do return connection return connection
def populate(self, parameters): self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters) self.internal_pipeline.add_action(OverlayAction()) if hasattr(self.job.device, 'power_state'): if self.job.device.power_state in ['on', 'off']: self.internal_pipeline.add_action(ConnectDevice()) self.internal_pipeline.add_action(ResetDevice()) image_keys = list(parameters['images'].keys()) # Nexell extension if 'nexell_ext' in image_keys: self.logger.debug("SUKER: parameters in deploy/fastboot.py : " + str(parameters)) self.internal_pipeline.add_action( EnterNexellFastbootAction(parameters, 'deploy_script', 'deploy_command1', 'dir_name')) self.internal_pipeline.add_action( ApplyNexellDeployAction(parameters, 'deploy_script', 'deploy_command2', 'dir_name')) else: self.internal_pipeline.add_action(EnterFastbootAction()) self.internal_pipeline.add_action(LxcAddDeviceAction()) if 'image' in image_keys: download = DownloaderAction('image', self.fastboot_dir) download.max_retries = 3 # overridden by failure_retry in the parameters, if set. self.internal_pipeline.add_action(download) self.internal_pipeline.add_action(FastbootUpdateAction()) if 'ptable' in image_keys: download = DownloaderAction('ptable', self.fastboot_dir) download.max_retries = 3 # overridden by failure_retry in the parameters, if set. self.internal_pipeline.add_action(download) self.internal_pipeline.add_action(ApplyPtableAction()) if 'boot' in image_keys: download = DownloaderAction('boot', self.fastboot_dir) download.max_retries = 3 # overridden by failure_retry in the parameters, if set. self.internal_pipeline.add_action(download) self.internal_pipeline.add_action(ApplyBootAction()) if 'cache' in image_keys: download = DownloaderAction('cache', self.fastboot_dir) download.max_retries = 3 # overridden by failure_retry in the parameters, if set. self.internal_pipeline.add_action(download) self.internal_pipeline.add_action(ApplyCacheAction()) if 'userdata' in image_keys: download = DownloaderAction('userdata', self.fastboot_dir) download.max_retries = 3 # overridden by failure_retry in the parameters, if set. self.internal_pipeline.add_action(download) self.internal_pipeline.add_action(ApplyUserdataAction()) if 'system' in image_keys: download = DownloaderAction('system', self.fastboot_dir) download.max_retries = 3 # overridden by failure_retry in the parameters, if set. self.internal_pipeline.add_action(download) self.internal_pipeline.add_action(ApplySystemAction()) if 'vendor' in image_keys: download = DownloaderAction('vendor', self.fastboot_dir) download.max_retries = 3 # overridden by failure_retry in the parameters, if set. self.internal_pipeline.add_action(download) self.internal_pipeline.add_action(ApplyVendorAction())
def populate(self, parameters): self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters) if self.test_needs_overlay(parameters): self.internal_pipeline.add_action(CustomisationAction()) self.internal_pipeline.add_action(OverlayAction()) # Check if the device has a power command such as HiKey, Dragonboard, # etc. against device that doesn't like Nexus, etc. if self.job.device.get('fastboot_via_uboot', False): self.internal_pipeline.add_action(ConnectDevice()) self.internal_pipeline.add_action(UBootEnterFastbootAction()) elif self.job.device.power_command: self.force_prompt = True self.internal_pipeline.add_action(ConnectDevice()) self.internal_pipeline.add_action(ResetDevice()) else: self.internal_pipeline.add_action(EnterFastbootAction()) self.internal_pipeline.add_action(WaitUSBDeviceAction( device_actions=['add'])) fastboot_dir = self.mkdtemp() image_keys = list(parameters['images'].keys()) image_keys.sort() for image in image_keys: if image != 'yaml_line': self.internal_pipeline.add_action(DownloaderAction(image, fastboot_dir)) if parameters['images'][image].get('apply-overlay', False): if self.test_needs_overlay(parameters): self.internal_pipeline.add_action( ApplyOverlaySparseImage(image)) if self.test_needs_overlay(parameters) and \ self.test_needs_deployment(parameters): self.internal_pipeline.add_action( DeployDeviceEnvironment()) self.internal_pipeline.add_action(LxcAddDeviceAction()) self.internal_pipeline.add_action(FastbootFlashAction())
def populate(self, parameters): self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters) path = self.mkdtemp() if 'uefi' in parameters: uefi_path = self.mkdtemp() self.internal_pipeline.add_action( DownloaderAction('uefi', uefi_path)) # uefi option of QEMU needs a directory, not the filename self.set_namespace_data(action=self.name, label='image', key='uefi_dir', value=uefi_path, parameters=parameters) # alternatively use the -bios option and standard image args for image in parameters['images'].keys(): if image != 'yaml_line': self.internal_pipeline.add_action(DownloaderAction( image, path)) if parameters['images'][image].get('format', '') == 'qcow2': self.internal_pipeline.add_action( QCowConversionAction(image)) if self.test_needs_overlay(parameters): self.internal_pipeline.add_action(CustomisationAction()) self.internal_pipeline.add_action( OverlayAction()) # idempotent, includes testdef self.internal_pipeline.add_action(ApplyOverlayGuest()) if self.test_needs_deployment(parameters): self.internal_pipeline.add_action(DeployDeviceEnvironment())
def populate(self, parameters): self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters) self.internal_pipeline.add_action(LxcCreateAction()) self.internal_pipeline.add_action(OverlayAction()) self.internal_pipeline.add_action(ApplyLxcOverlay())
def populate(self, parameters): super(GrubSequenceAction, self).populate(parameters) self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters) sequences = self.job.device['actions']['boot']['methods']['grub'].get( 'sequence', []) for sequence in sequences: mapped = _grub_sequence_map(sequence) if mapped[1]: self.internal_pipeline.add_action(mapped[0](type=mapped[1])) elif mapped[0]: self.internal_pipeline.add_action(mapped[0]()) if self.has_prompts(parameters): self.internal_pipeline.add_action(AutoLoginAction()) if self.test_has_shell(parameters): self.internal_pipeline.add_action(ExpectShellSession()) if 'transfer_overlay' in parameters: self.internal_pipeline.add_action(OverlayUnpack()) self.internal_pipeline.add_action(ExportDeviceEnvironment()) else: if self.has_boot_finished(parameters): self.logger.debug("Doing a boot without a shell (installer)") self.internal_pipeline.add_action(InstallerWait()) self.internal_pipeline.add_action(PowerOff())
def populate(self, parameters): self.expect_shell = parameters.get('expect_shell', True) self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters) self.internal_pipeline.add_action(BootloaderSecondaryMedia()) self.internal_pipeline.add_action(BootloaderCommandOverlay()) self.internal_pipeline.add_action(ConnectDevice()) # FIXME: reset_device is a hikey hack due to fastboot/OTG issues # remove as part of LAVA-940 - convert to use fastboot-sequence reset_device = self.job.device['actions']['boot']['methods'].get( 'grub-efi', {}).get('reset_device', True) if parameters['method'] == 'grub-efi' and reset_device: # added unless the device specifies not to reset the device in grub. self.internal_pipeline.add_action(ResetDevice()) elif parameters['method'] == 'grub': self.internal_pipeline.add_action(ResetDevice()) if parameters['method'] == 'grub-efi': self.internal_pipeline.add_action(UEFIMenuInterrupt()) self.internal_pipeline.add_action(GrubMenuSelector()) self.internal_pipeline.add_action(BootloaderInterrupt()) self.internal_pipeline.add_action(BootloaderCommandsAction()) if self.has_prompts(parameters): self.internal_pipeline.add_action(AutoLoginAction()) if self.test_has_shell(parameters): self.internal_pipeline.add_action(ExpectShellSession()) if 'transfer_overlay' in parameters: self.internal_pipeline.add_action(OverlayUnpack()) self.internal_pipeline.add_action(ExportDeviceEnvironment()) else: if self.has_boot_finished(parameters): self.logger.debug("Doing a boot without a shell (installer)") self.internal_pipeline.add_action(InstallerWait()) self.internal_pipeline.add_action(PowerOff())
def test_overlay_action(self): # pylint: disable=too-many-locals parameters = { 'device_type': 'd02', 'job_name': 'grub-standard-ramdisk', 'job_timeout': '15m', 'action_timeout': '5m', 'priority': 'medium', 'output_dir': mkdtemp(), 'actions': { 'boot': { 'method': 'grub', 'commands': 'ramdisk', 'prompts': ['linaro-test', 'root@debian:~#'] }, 'deploy': { 'ramdisk': 'initrd.gz', 'kernel': 'zImage', 'dtb': 'broken.dtb' } } } device = NewDevice(os.path.join(os.path.dirname(__file__), '../devices/d02-01.yaml')) job = Job(4212, None, None, None, parameters) job.device = device pipeline = Pipeline(job=job, parameters=parameters['actions']['boot']) job.set_pipeline(pipeline) overlay = BootloaderCommandOverlay() pipeline.add_action(overlay) try: ip_addr = dispatcher_ip() except InfrastructureError as exc: raise RuntimeError("Unable to get dispatcher IP address: %s" % exc) parsed = [] kernel = parameters['actions']['deploy']['kernel'] ramdisk = parameters['actions']['deploy']['ramdisk'] dtb = parameters['actions']['deploy']['dtb'] substitution_dictionary = { '{SERVER_IP}': ip_addr, # the addresses need to be hexadecimal '{RAMDISK}': ramdisk, '{KERNEL}': kernel, '{DTB}': dtb } params = device['actions']['boot']['methods'] commands = params['grub']['ramdisk']['commands'] self.assertIn('net_bootp', commands) self.assertIn("linux (tftp,{SERVER_IP})/{KERNEL} console=ttyS0,115200 earlycon=uart8250,mmio32,0x80300000 root=/dev/ram0 ip=dhcp", commands) self.assertIn('initrd (tftp,{SERVER_IP})/{RAMDISK}', commands) self.assertIn('devicetree (tftp,{SERVER_IP})/{DTB}', commands) params['grub']['ramdisk']['commands'] = substitute(params['grub']['ramdisk']['commands'], substitution_dictionary) substituted_commands = params['grub']['ramdisk']['commands'] self.assertIs(type(substituted_commands), list) self.assertIn('net_bootp', substituted_commands) self.assertNotIn("linux (tftp,{SERVER_IP})/{KERNEL} console=ttyS0,115200 earlycon=uart8250,mmio32,0x80300000 root=/dev/ram0 ip=dhcp", substituted_commands) self.assertIn("linux (tftp,%s)/%s console=ttyS0,115200 earlycon=uart8250,mmio32,0x80300000 root=/dev/ram0 ip=dhcp" % (ip_addr, kernel), substituted_commands) self.assertNotIn('initrd (tftp,{SERVER_IP})/{RAMDISK}', parsed) self.assertNotIn('devicetree (tftp,{SERVER_IP})/{DTB}', parsed)
def populate(self, parameters): self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters) self.internal_pipeline.add_action(ConnectDevice()) self.internal_pipeline.add_action(PowerOn()) self.internal_pipeline.add_action(WaitDFUDeviceAction()) self.internal_pipeline.add_action(FlashDFUAction())
def populate(self, parameters): self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters) self.internal_pipeline.add_action(LxcStartAction()) self.internal_pipeline.add_action(ConnectLxc()) # Skip AutoLoginAction unconditionally as this action tries to parse kernel message # self.internal_pipeline.add_action(AutoLoginAction()) self.internal_pipeline.add_action(ExpectShellSession()) self.internal_pipeline.add_action(ExportDeviceEnvironment())
def test_runs_subaction(self): pipe = Pipeline() pipe.add_action(self.sub0) pipe.add_action(self.sub1) pipe.run_actions(None) self.assertTrue(self.sub0.ran) self.assertTrue(self.sub1.ran) self.assertNotEqual(self.sub0.elapsed_time, 0) self.assertNotEqual(self.sub1.elapsed_time, 0)
class TestShellRetry(RetryAction): def __init__(self): super(TestShellRetry, self).__init__() self.description = "Retry wrapper for lava-test-shell" self.summary = "Retry support for Lava Test Shell" self.name = "lava-test-retry" def populate(self, parameters): self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters) self.internal_pipeline.add_action(TestShellAction())
class BootQemuRetry(RetryAction): def __init__(self): super(BootQemuRetry, self).__init__() self.name = "boot_qemu_image" self.description = "boot image using QEMU command line" self.summary = "boot QEMU image" def populate(self, parameters): self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters) self.internal_pipeline.add_action(CallQemuAction())
class UnmountAction(RetryAction): def __init__(self): super(UnmountAction, self).__init__() self.name = "umount-retry" self.description = "retry support for umount" self.summary = "retry umount" def populate(self, parameters): self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters) self.internal_pipeline.add_action(Unmount())
class BootMonitoredQemu(BootAction): def __init__(self): super(BootMonitoredQemu, self).__init__() self.name = 'boot_image_monitor' self.description = "boot monitored image with retry" self.summary = "boot monitor with retry" def populate(self, parameters): self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters) self.internal_pipeline.add_action(BootQemuRetry())
class InternalRetryAction(RetryAction): def __init__(self): super(TestAction.InternalRetryAction, self).__init__() self.name = "internal-retry-action" self.section = 'internal' self.summary = "internal retry action for unit tests" self.description = "internal, do not use outside unit tests" def populate(self, parameters): self.internal_pipeline = Pipeline(parent=self, job=self.job) self.internal_pipeline.add_action(TestAction.FakeAction(), parameters)
class BootMonitoredPyOCDRetry(RetryAction): def __init__(self): super(BootMonitoredPyOCDRetry, self).__init__() self.name = 'boot_pyocd_image' self.description = "boot pyocd image using the command line interface" self.summary = "boot pyocd image" def populate(self, parameters): self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters) self.internal_pipeline.add_action(MonitorPyOCDAction()) self.internal_pipeline.add_action(ConnectDevice())
def test_overlay_action(self): # pylint: disable=too-many-locals parameters = { 'device_type': 'x86', 'job_name': 'ipxe-pipeline', 'job_timeout': '15m', 'action_timeout': '5m', 'priority': 'medium', 'output_dir': mkdtemp(), 'actions': { 'boot': { 'method': 'ipxe', 'commands': 'ramdisk', 'prompts': ['linaro-test', 'root@debian:~#'] }, 'deploy': { 'ramdisk': 'initrd.gz', 'kernel': 'zImage', } } } device = NewDevice(os.path.join(os.path.dirname(__file__), '../devices/x86-01.yaml')) job = Job(4212, None, parameters) job.device = device pipeline = Pipeline(job=job, parameters=parameters['actions']['boot']) job.set_pipeline(pipeline) overlay = BootloaderCommandOverlay() pipeline.add_action(overlay) try: ip_addr = dispatcher_ip() except InfrastructureError as exc: raise RuntimeError("Unable to get dispatcher IP address: %s" % exc) parsed = [] kernel = parameters['actions']['deploy']['kernel'] ramdisk = parameters['actions']['deploy']['ramdisk'] substitution_dictionary = { '{SERVER_IP}': ip_addr, '{RAMDISK}': ramdisk, '{KERNEL}': kernel, '{LAVA_MAC}': "00:00:00:00:00:00" } params = device['actions']['boot']['methods'] params['ipxe']['ramdisk']['commands'] = substitute(params['ipxe']['ramdisk']['commands'], substitution_dictionary) commands = params['ipxe']['ramdisk']['commands'] self.assertIs(type(commands), list) self.assertIn("dhcp net0", commands) self.assertIn("set console console=ttyS0,115200n8 lava_mac=00:00:00:00:00:00", commands) self.assertIn("set extraargs init=/sbin/init ip=dhcp", commands) self.assertNotIn("kernel tftp://{SERVER_IP}/{KERNEL} ${extraargs} ${console}", commands) self.assertNotIn("initrd tftp://{SERVER_IP}/{RAMDISK}", commands) self.assertIn("boot", commands)
def test_composite_action_aggregates_errors_from_sub_actions(self): # pylint: disable=invalid-name # Unable to call Action.validate() as there is no job in this unit test sub1 = Action() sub1.__errors__ = [1] sub2 = Action() sub2.name = "sub2" sub2.__errors__ = [2] pipe = Pipeline() sub1.name = "sub1" pipe.add_action(sub1) pipe.add_action(sub2) self.assertEqual([1, 2], pipe.errors)
class BootVMAction(BootAction): def __init__(self): super(BootVMAction, self).__init__() self.name = "boot-vm" self.summary = "boot a VM on a host" self.description = "Execute commands to boot a VM" def populate(self, parameters): self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters) self.internal_pipeline.add_action(ConnectDynamicSsh()) def validate(self): super(BootVMAction, self).validate() print "###### FIXME ########", self.parameters["commands"]
def test_create_internal_pipeline(self): action = Action() action.name = "internal_pipe" action.description = "test action only" action.summary = "starter" pipe = Pipeline() pipe.add_action(action) self.assertEqual(len(pipe.children[pipe]), 1) self.assertEqual(action.level, "1") action = Action() action.name = "child_action" action.summary = "child" action.description = "action implementing an internal pipe" with self.assertRaises(RuntimeError): Pipeline(action) pipe.add_action(action) self.assertEqual(action.level, "2") self.assertEqual(len(pipe.children[pipe]), 2) # a formal RetryAction would contain a pre-built pipeline which can be inserted directly retry_pipe = Pipeline(action) action = Action() action.name = "inside_action" action.description = "action inside the internal pipe" action.summary = "child" retry_pipe.add_action(action) self.assertEqual(len(retry_pipe.children[retry_pipe]), 1) self.assertEqual(action.level, "2.1")
class ScpOverlay(DeployAction): """ Prepares the overlay and copies it to the target """ def __init__(self): super(ScpOverlay, self).__init__() self.name = "scp-overlay" self.summary = "copy overlay to device" self.description = "prepare overlay and scp to device" self.section = 'deploy' self.items = [] def validate(self): super(ScpOverlay, self).validate() self.items = [ 'firmware', 'kernel', 'dtb', 'rootfs', 'modules' ] lava_test_results_dir = self.parameters['deployment_data']['lava_test_results_dir'] self.data['lava_test_results_dir'] = lava_test_results_dir % self.job.job_id def populate(self, parameters): self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters) tar_flags = parameters['deployment_data']['tar_flags'] if 'tar_flags' in parameters['deployment_data'].keys() else '' self.set_common_data(self.name, 'tar_flags', tar_flags) self.internal_pipeline.add_action(OverlayAction()) for item in self.items: if item in parameters: download = DownloaderAction(item, path=self.mkdtemp()) download.max_retries = 3 self.internal_pipeline.add_action(download, parameters) self.set_common_data('scp', item, True) # we might not have anything to download, just the overlay to push self.internal_pipeline.add_action(PrepareOverlayScp()) # prepare the device environment settings in common data for enabling in the boot step self.internal_pipeline.add_action(DeployDeviceEnvironment())
class DeployMonitoredAction(DeployAction): def __init__(self): super(DeployMonitoredAction, self).__init__() self.name = 'deploy-monitor' self.description = "deploy images without POSIX" self.summary = "deploy without requiring POSIX" def populate(self, parameters): self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters) path = self.mkdtemp() for image in parameters['images'].keys(): if image != 'yaml_line': download = DownloaderAction(image, path) download.max_retries = 3 # overridden by failure_retry in the parameters, if set. self.internal_pipeline.add_action(download)
def populate(self, parameters): self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters) # customize the device configuration for this job self.internal_pipeline.add_action(UBootSecondaryMedia()) self.internal_pipeline.add_action(UBootCommandOverlay()) self.internal_pipeline.add_action(ConnectDevice()) self.internal_pipeline.add_action(UBootRetry())
def populate(self, parameters): self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters) self.internal_pipeline.add_action(OverlayAction()) if hasattr(self.job.device, 'power_state'): if self.job.device.power_state in ['on', 'off']: self.internal_pipeline.add_action(ConnectDevice()) self.internal_pipeline.add_action(ResetDevice()) self.internal_pipeline.add_action(EnterFastbootAction()) for image in parameters['images'].keys(): if image != 'yaml_line': download = DownloaderAction(image, self.fastboot_dir) download.max_retries = 3 # overridden by failure_retry in the parameters, if set. self.internal_pipeline.add_action(download) if image == 'image': self.internal_pipeline.add_action(FastbootUpdateAction()) if image == 'ptable': self.internal_pipeline.add_action(ApplyPtableAction()) if image == 'boot': self.internal_pipeline.add_action(ApplyBootAction()) if image == 'cache': self.internal_pipeline.add_action(ApplyCacheAction()) if image == 'userdata': self.internal_pipeline.add_action(ApplyUserdataAction()) if image == 'system': self.internal_pipeline.add_action(ApplySystemAction())
class BootloaderDefaultsAction(BootAction): """ Wraps the Retry Action to allow for actions which precede the reset, e.g. Connect. """ def __init__(self): super(BootloaderDefaultsAction, self).__init__() self.name = "bootloader-defaults-action" self.description = "Autorun precanned bootloader entry" self.summary = "allow bootloader to run" def populate(self, parameters): self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters) # customize the device configuration for this job self.internal_pipeline.add_action(ConnectDevice()) self.internal_pipeline.add_action(BootloaderDefaultsRetry())
def parse(self, content, device, output_dir=None): self.loader = yaml.Loader(content) self.loader.compose_node = self.compose_node self.loader.construct_mapping = self.construct_mapping data = self.loader.get_single_data() job = Job(data) job.device = device job.parameters['output_dir'] = output_dir pipeline = Pipeline(job=job) for action_data in data['actions']: line = action_data.pop('yaml_line', None) for name in action_data: if name == "deploy": # allow the classmethod to check the parameters deploy = Deployment.select(device, action_data[name])(pipeline) deploy.action.parameters = action_data[name] # still need to pass the parameters to the instance if 'test' in data['actions']: deploy.action.parameters = action_data['test'] deploy.action.yaml_line = line device.deployment_data = deployment_data.get(deploy.action.parameters['os']) deploy.action.parameters = {'deployment_data': device.deployment_data} else: action_class = Action.find(name) # select the specific action of this class for this job action = action_class() # put parameters (like rootfs_type, results_dir) into the actions. if type(action_data[name]) == dict: action.parameters = action_data[name] elif name == "commands": # FIXME pass elif type(action_data[name]) == list: for param in action_data[name]: action.parameters = param action.summary = name pipeline.add_action(action) # uncomment for debug # print action.parameters # the only parameters sent to the job are job parameters # like job_name, logging_level or target_group. data.pop('actions') data['output_dir'] = output_dir job.set_pipeline(pipeline) return job
def populate(self, parameters): self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters) self.internal_pipeline.add_action(ExtractNfsRootfs()) # idempotent, checks for nfsrootfs parameter self.internal_pipeline.add_action(OverlayAction()) # idempotent, includes testdef self.internal_pipeline.add_action(ExtractRamdisk()) # idempotent, checks for a ramdisk parameter self.internal_pipeline.add_action(ExtractModules()) # idempotent, checks for a modules parameter self.internal_pipeline.add_action(ApplyOverlayTftp()) self.internal_pipeline.add_action(CompressRamdisk()) # idempotent, checks for a ramdisk parameter
def populate(self, parameters): self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters) self.internal_pipeline.add_action(BootQemuRetry()) # Add AutoLoginAction unconditionnally as this action does nothing if # the configuration does not contain 'auto_login' self.internal_pipeline.add_action(AutoLoginAction()) self.internal_pipeline.add_action(ExpectShellSession()) self.internal_pipeline.add_action(ExportDeviceEnvironment())