class LxcAction(DeployAction): # pylint:disable=too-many-instance-attributes def __init__(self): super(LxcAction, self).__init__() self.name = "lxc-deploy" self.description = "download files and deploy using lxc" self.summary = "lxc deployment" def validate(self): super(LxcAction, self).validate() if LxcProtocol.name not in [protocol.name for protocol in self.job.protocols]: self.errors = "Invalid job - missing protocol" self.errors = infrastructure_error('lxc-create') lava_test_results_dir = self.parameters['deployment_data']['lava_test_results_dir'] self.data['lava_test_results_dir'] = lava_test_results_dir % self.job.job_id def populate(self, parameters): self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters) self.protocols = [protocol for protocol in self.job.protocols if protocol.name == LxcProtocol.name] self.set_common_data('lxc', 'name', self.protocols[0].lxc_name) self.set_common_data('lxc', 'distribution', self.protocols[0].lxc_dist) self.set_common_data('lxc', 'release', self.protocols[0].lxc_release) self.set_common_data('lxc', 'arch', self.protocols[0].lxc_arch) self.internal_pipeline.add_action(OverlayAction()) self.internal_pipeline.add_action(LxcCreateAction())
class DownloaderAction(RetryAction): """ The retry pipeline for downloads. To allow any deploy action to work with multinode, each call *must* set a unique path. """ def __init__(self, key, path): super(DownloaderAction, self).__init__() self.name = "download_retry" self.description = "download with retry" self.summary = "download-retry" self.key = key # the key in the parameters of what to download self.path = path # where to download def populate(self, parameters): self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters) # Find the right action according to the url url = urlparse.urlparse(parameters[self.key]) if url.scheme == "scp": action = ScpDownloadAction(self.key, self.path, url) elif url.scheme == "http" or url.scheme == "https": action = HttpDownloadAction(self.key, self.path, url) elif url.scheme == "file": action = FileDownloadAction(self.key, self.path, url) else: raise JobError("Unsupported url protocol scheme: %s" % url.scheme) self.internal_pipeline.add_action(action)
class BootCMSISRetry(RetryAction): def __init__(self): super(BootCMSISRetry, self).__init__() self.name = 'boot-cmsis-retry' self.description = "boot cmsis usb image with retry" self.summary = "boot cmsis usb image with retry" def validate(self): super(BootCMSISRetry, self).validate() method_params = self.job.device['actions']['boot']['methods'][ 'cmsis-dap']['parameters'] usb_mass_device = method_params.get('usb_mass_device', None) if not usb_mass_device: self.errors = "usb_mass_device unset" def populate(self, parameters): self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters) method_params = self.job.device['actions']['boot']['methods'][ 'cmsis-dap']['parameters'] usb_mass_device = method_params.get('usb_mass_device', None) resets_after_flash = method_params.get('resets_after_flash', True) if self.job.device.hard_reset_command: self.internal_pipeline.add_action(ResetDevice()) self.internal_pipeline.add_action( WaitDevicePathAction(usb_mass_device)) self.internal_pipeline.add_action(FlashCMSISAction()) if resets_after_flash: self.internal_pipeline.add_action(WaitUSBSerialDeviceAction()) self.internal_pipeline.add_action(ConnectDevice())
class DownloaderAction(RetryAction): """ The retry pipeline for downloads. To allow any deploy action to work with multinode, each call *must* set a unique path. """ def __init__(self, key, path): super(DownloaderAction, self).__init__() self.name = "download_retry" self.description = "download with retry" self.summary = "download-retry" self.key = key # the key in the parameters of what to download self.path = path # where to download def populate(self, parameters): self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters) # Find the right action according to the url if 'images' in parameters and self.key in parameters['images']: url = lavaurl.urlparse(parameters['images'][self.key]['url']) else: url = lavaurl.urlparse(parameters[self.key]['url']) if url.scheme == 'scp': action = ScpDownloadAction(self.key, self.path, url) elif url.scheme == 'http' or url.scheme == 'https': action = HttpDownloadAction(self.key, self.path, url) # pylint: disable=redefined-variable-type elif url.scheme == 'file': action = FileDownloadAction(self.key, self.path, url) # pylint: disable=redefined-variable-type else: raise JobError("Unsupported url protocol scheme: %s" % url.scheme) self.internal_pipeline.add_action(action)
class BootFastbootAction(BootAction): """ Provide for auto_login parameters in this boot stanza and re-establish the connection after boot. """ def __init__(self): super(BootFastbootAction, self).__init__() self.name = "fastboot-boot" self.summary = "fastboot boot" self.description = "fastboot boot into the system" def validate(self): super(BootFastbootAction, self).validate() sequences = self.job.device['actions']['boot']['methods'].get( 'fastboot', []) for sequence in sequences: if not _fastboot_sequence_map(sequence): self.errors = "Unknown boot sequence '%s'" % sequence def populate(self, parameters): self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters) sequences = self.job.device['actions']['boot']['methods'].get( 'fastboot', []) for sequence in sequences: mapped = _fastboot_sequence_map(sequence) if mapped[1]: self.internal_pipeline.add_action( mapped[0](device_actions=mapped[1])) elif mapped[0]: self.internal_pipeline.add_action(mapped[0]())
class FakeAction(Action): """ Isolated Action which can be used to generate artificial exceptions. """ def __init__(self): super(TestAdjuvant.FakeAction, self).__init__() self.count = 1 self.name = "fake-action" self.summary = "fake action for unit tests" self.description = "fake, do not use outside unit tests" def populate(self, parameters): self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters) self.internal_pipeline.add_action(TestAdjuvant.FakeAdjuvant()) def run(self, connection, args=None): if connection: raise RuntimeError( "Fake action not meant to have a real connection") connection = TestAdjuvant.FakeConnection() self.count += 1 self.results = {'status': "failed"} self.data[TestAdjuvant.FakeAdjuvant.key()] = True return connection
class MountAction(DeployAction): """ Depending on the type of deployment, this needs to perform an OffsetAction, LoopCheckAction, LoopMountAction """ def __init__(self, key): super(MountAction, self).__init__() self.name = "mount_action" self.description = "mount with offset" self.summary = "mount loop" self.key = key def validate(self): if not self.job: raise RuntimeError("No job object supplied to action") self.internal_pipeline.validate_actions() def populate(self, parameters): """ Needs to take account of the deployment type / image type etc. to determine which actions need to be added to the internal pipeline as part of the deployment selection step. """ if not self.job: raise RuntimeError("No job object supplied to action") # FIXME: not all mount operations will need these actions self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters) self.internal_pipeline.add_action(OffsetAction(self.key)) # FIXME: LoopCheckAction and LoopMountAction should be in only one Action self.internal_pipeline.add_action(LoopCheckAction(self.key)) self.internal_pipeline.add_action(LoopMountAction(self.key))
class MountAction(DeployAction): """ Depending on the type of deployment, this needs to perform an OffsetAction, LoopCheckAction, LoopMountAction """ def __init__(self): super(MountAction, self).__init__() self.name = "mount_action" self.description = "mount with offset" self.summary = "mount loop" def validate(self): if not self.job: raise RuntimeError("No job object supplied to action") self.internal_pipeline.validate_actions() def populate(self, parameters): """ Needs to take account of the deployment type / image type etc. to determine which actions need to be added to the internal pipeline as part of the deployment selection step. """ if not self.job: raise RuntimeError("No job object supplied to action") # FIXME: not all mount operations will need these actions self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters) self.internal_pipeline.add_action(OffsetAction('image')) # FIXME: LoopCheckAction and LoopMountAction should be in only one Action self.internal_pipeline.add_action(LoopCheckAction()) self.internal_pipeline.add_action(LoopMountAction())
class DownloaderAction(RetryAction): """ The retry pipeline for downloads. To allow any deploy action to work with multinode, each call *must* set a unique path. """ def __init__(self, key, path): super(DownloaderAction, self).__init__() self.name = "download-retry" self.description = "download with retry" self.summary = "download-retry" self.key = key # the key in the parameters of what to download self.path = path # where to download def populate(self, parameters): self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters) # Find the right action according to the url if 'images' in parameters and self.key in parameters['images']: url = lavaurl.urlparse(parameters['images'][self.key]['url']) else: url = lavaurl.urlparse(parameters[self.key]['url']) if url.scheme == 'scp': action = ScpDownloadAction(self.key, self.path, url) elif url.scheme == 'http' or url.scheme == 'https': action = HttpDownloadAction(self.key, self.path, url) # pylint: disable=redefined-variable-type elif url.scheme == 'file': action = FileDownloadAction(self.key, self.path, url) # pylint: disable=redefined-variable-type else: raise JobError("Unsupported url protocol scheme: %s" % url.scheme) self.internal_pipeline.add_action(action)
class PrepareOverlayScp(Action): """ Copy the overlay to the device using scp and then unpack remotely. Needs the device to be ready for SSH connection. """ def __init__(self): super(PrepareOverlayScp, self).__init__() self.name = "prepare-scp-overlay" self.summary = "scp the overlay to the remote device" self.description = "copy the overlay over an existing ssh connection" def validate(self): super(PrepareOverlayScp, self).validate() lava_test_results_dir = self.parameters['deployment_data']['lava_test_results_dir'] self.data['lava_test_results_dir'] = lava_test_results_dir % self.job.job_id environment = self.get_common_data('environment', 'env_dict') if not environment: environment = {} environment.update({"LC_ALL": "C.UTF-8", "LANG": "C"}) self.set_common_data('environment', 'env_dict', environment) def populate(self, parameters): self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters) self.internal_pipeline.add_action(ExtractRootfs()) # idempotent, checks for nfsrootfs parameter self.internal_pipeline.add_action(ExtractModules()) # idempotent, checks for a modules parameter def run(self, connection, args=None): connection = super(PrepareOverlayScp, self).run(connection, args) self.logger.info("Preparing to copy: %s" % os.path.basename(self.data['compress-overlay'].get('output'))) self.set_common_data('scp-deploy', 'overlay', self.data['compress-overlay'].get('output')) return connection
def _make_pipeline(self, params): pipeline = Pipeline() auto_login = AutoLoginAction() auto_login.section = "internal" auto_login.parameters = params pipeline.add_action(auto_login) return pipeline
class BootQemuRetry(RetryAction): def __init__(self): super(BootQemuRetry, self).__init__() self.name = 'boot_qemu_image' self.description = "boot image using QEMU command line" self.summary = "boot QEMU image" def validate(self): super(BootQemuRetry, self).validate() try: # FIXME: need a schema and do this inside the NewDevice with a QemuDevice class? (just for parsing) boot = self.job.device['actions']['boot']['methods']['qemu'] qemu_binary = which(boot['parameters']['command']) command = [qemu_binary] command.extend(boot['parameters'].get('options', [])) self.set_common_data('qemu-command', 'command', command) # FIXME: AttributeError is an InfrastructureError in fact except (KeyError, TypeError, AttributeError): self.errors = "Invalid parameters" def populate(self, parameters): self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters) self.internal_pipeline.add_action(CallQemuAction())
class PrepareOverlayTftp(Action): """ Extracts the ramdisk or nfsrootfs in preparation for the lava overlay """ def __init__(self): super(PrepareOverlayTftp, self).__init__() self.name = "prepare-tftp-overlay" self.summary = "extract ramdisk or nfsrootfs" self.description = "extract ramdisk or nfsrootfs in preparation for lava overlay" def populate(self, parameters): self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters) self.internal_pipeline.add_action(ExtractNfsRootfs()) # idempotent, checks for nfsrootfs parameter self.internal_pipeline.add_action(OverlayAction()) # idempotent, includes testdef self.internal_pipeline.add_action(ExtractRamdisk()) # idempotent, checks for a ramdisk parameter self.internal_pipeline.add_action(ExtractModules()) # idempotent, checks for a modules parameter self.internal_pipeline.add_action(ApplyOverlayTftp()) self.internal_pipeline.add_action(CompressRamdisk()) # idempotent, checks for a ramdisk parameter def run(self, connection, args=None): connection = super(PrepareOverlayTftp, self).run(connection, args) ramdisk = self.get_common_data('file', 'ramdisk') if ramdisk: # nothing else to do return connection return connection
class PrepareOverlayScp(Action): """ Copy the overlay to the device using scp and then unpack remotely. Needs the device to be ready for SSH connection. """ def __init__(self): super(PrepareOverlayScp, self).__init__() self.name = "prepare-scp-overlay" self.summary = "scp the overlay to the remote device" self.description = "copy the overlay over an existing ssh connection" def validate(self): super(PrepareOverlayScp, self).validate() lava_test_results_dir = self.parameters['deployment_data']['lava_test_results_dir'] self.data['lava_test_results_dir'] = lava_test_results_dir % self.job.job_id environment = self.get_common_data('environment', 'env_dict') if not environment: environment = {} environment.update({"LC_ALL": "C.UTF-8", "LANG": "C"}) self.set_common_data('environment', 'env_dict', environment) def populate(self, parameters): self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters) self.internal_pipeline.add_action(ExtractRootfs()) # idempotent, checks for nfsrootfs parameter self.internal_pipeline.add_action(ExtractModules()) # idempotent, checks for a modules parameter def run(self, connection, args=None): connection = super(PrepareOverlayScp, self).run(connection, args) self.logger.info("Preparing to copy: %s" % os.path.basename(self.data['compress-overlay'].get('output'))) self.set_common_data('scp-deploy', 'overlay', self.data['compress-overlay'].get('output')) return connection
def test_overlay_action(self): # pylint: disable=too-many-locals parameters = { 'device_type': 'd02', 'job_name': 'grub-standard-ramdisk', 'job_timeout': '15m', 'action_timeout': '5m', 'priority': 'medium', 'output_dir': mkdtemp(), 'actions': { 'boot': { 'method': 'grub', 'commands': 'ramdisk', 'prompts': ['linaro-test', 'root@debian:~#'] }, 'deploy': { 'ramdisk': 'initrd.gz', 'kernel': 'zImage', 'dtb': 'broken.dtb' } } } device = NewDevice(os.path.join(os.path.dirname(__file__), '../devices/d02-01.yaml')) job = Job(4212, None, None, None, parameters) job.device = device pipeline = Pipeline(job=job, parameters=parameters['actions']['boot']) job.set_pipeline(pipeline) overlay = BootloaderCommandOverlay() pipeline.add_action(overlay) try: ip_addr = dispatcher_ip() except InfrastructureError as exc: raise RuntimeError("Unable to get dispatcher IP address: %s" % exc) parsed = [] kernel = parameters['actions']['deploy']['kernel'] ramdisk = parameters['actions']['deploy']['ramdisk'] dtb = parameters['actions']['deploy']['dtb'] substitution_dictionary = { '{SERVER_IP}': ip_addr, # the addresses need to be hexadecimal '{RAMDISK}': ramdisk, '{KERNEL}': kernel, '{DTB}': dtb } params = device['actions']['boot']['methods'] commands = params['grub']['ramdisk']['commands'] self.assertIn('net_bootp', commands) self.assertIn("linux (tftp,{SERVER_IP})/{KERNEL} console=ttyS0,115200 earlycon=uart8250,mmio32,0x80300000 root=/dev/ram0 ip=dhcp", commands) self.assertIn('initrd (tftp,{SERVER_IP})/{RAMDISK}', commands) self.assertIn('devicetree (tftp,{SERVER_IP})/{DTB}', commands) params['grub']['ramdisk']['commands'] = substitute(params['grub']['ramdisk']['commands'], substitution_dictionary) substituted_commands = params['grub']['ramdisk']['commands'] self.assertIs(type(substituted_commands), list) self.assertIn('net_bootp', substituted_commands) self.assertNotIn("linux (tftp,{SERVER_IP})/{KERNEL} console=ttyS0,115200 earlycon=uart8250,mmio32,0x80300000 root=/dev/ram0 ip=dhcp", substituted_commands) self.assertIn("linux (tftp,%s)/%s console=ttyS0,115200 earlycon=uart8250,mmio32,0x80300000 root=/dev/ram0 ip=dhcp" % (ip_addr, kernel), substituted_commands) self.assertNotIn('initrd (tftp,{SERVER_IP})/{RAMDISK}', parsed) self.assertNotIn('devicetree (tftp,{SERVER_IP})/{DTB}', parsed)
def test_runs_subaction(self): pipe = Pipeline() pipe.add_action(self.sub0) pipe.add_action(self.sub1) pipe.run_actions(None, None) self.assertTrue(self.sub0.ran) self.assertTrue(self.sub1.ran) self.assertNotEqual(self.sub0.timeout.elapsed_time, 0) self.assertNotEqual(self.sub1.timeout.elapsed_time, 0)
def test_runs_subaction(self): pipe = Pipeline() pipe.add_action(self.sub0) pipe.add_action(self.sub1) pipe.run_actions(None) self.assertTrue(self.sub0.ran) self.assertTrue(self.sub1.ran) self.assertNotEqual(self.sub0.elapsed_time, 0) self.assertNotEqual(self.sub1.elapsed_time, 0)
def test_overlay_action(self): # pylint: disable=too-many-locals parameters = { 'device_type': 'd02', 'job_name': 'grub-standard-ramdisk', 'job_timeout': '15m', 'action_timeout': '5m', 'priority': 'medium', 'output_dir': mkdtemp(), 'actions': { 'boot': { 'method': 'grub', 'commands': 'ramdisk', 'prompts': ['linaro-test', 'root@debian:~#'] }, 'deploy': { 'ramdisk': 'initrd.gz', 'kernel': 'zImage', 'dtb': 'broken.dtb' } } } device = NewDevice(os.path.join(os.path.dirname(__file__), '../devices/d02-01.yaml')) job = Job(4212, parameters, None) job.device = device pipeline = Pipeline(job=job, parameters=parameters['actions']['boot']) job.pipeline = pipeline overlay = BootloaderCommandOverlay() pipeline.add_action(overlay) ip_addr = dispatcher_ip(None) parsed = [] kernel = parameters['actions']['deploy']['kernel'] ramdisk = parameters['actions']['deploy']['ramdisk'] dtb = parameters['actions']['deploy']['dtb'] substitution_dictionary = { '{SERVER_IP}': ip_addr, # the addresses need to be hexadecimal '{RAMDISK}': ramdisk, '{KERNEL}': kernel, '{DTB}': dtb } params = device['actions']['boot']['methods'] commands = params['grub']['ramdisk']['commands'] self.assertIn('net_bootp', commands) self.assertIn("linux (tftp,{SERVER_IP})/{KERNEL} console=ttyS0,115200 earlycon=uart8250,mmio32,0x80300000 root=/dev/ram0 ip=dhcp", commands) self.assertIn('initrd (tftp,{SERVER_IP})/{RAMDISK}', commands) self.assertIn('devicetree (tftp,{SERVER_IP})/{DTB}', commands) params['grub']['ramdisk']['commands'] = substitute(params['grub']['ramdisk']['commands'], substitution_dictionary) substituted_commands = params['grub']['ramdisk']['commands'] self.assertIs(type(substituted_commands), list) self.assertIn('net_bootp', substituted_commands) self.assertNotIn("linux (tftp,{SERVER_IP})/{KERNEL} console=ttyS0,115200 earlycon=uart8250,mmio32,0x80300000 root=/dev/ram0 ip=dhcp", substituted_commands) self.assertIn("linux (tftp,%s)/%s console=ttyS0,115200 earlycon=uart8250,mmio32,0x80300000 root=/dev/ram0 ip=dhcp" % (ip_addr, kernel), substituted_commands) self.assertNotIn('initrd (tftp,{SERVER_IP})/{RAMDISK}', parsed) self.assertNotIn('devicetree (tftp,{SERVER_IP})/{DTB}', parsed)
class BootQemuRetry(RetryAction): def __init__(self): super(BootQemuRetry, self).__init__() self.name = "boot_qemu_image" self.description = "boot image using QEMU command line" self.summary = "boot QEMU image" def populate(self, parameters): self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters) self.internal_pipeline.add_action(CallQemuAction())
def __init__(self, parent): super(BootKVM, self).__init__(parent) self.action = BootQEMUImageAction() self.action.job = self.job parent.add_action(self.action) internal_pipeline = Pipeline(parent=self.action, job=self.job) if 'auto_login' in self.action.parameters: internal_pipeline.add_action(AutoLoginAction()) internal_pipeline.add_action(ExpectShellSession())
class UnmountAction(RetryAction): def __init__(self): super(UnmountAction, self).__init__() self.name = "umount-retry" self.description = "retry support for umount" self.summary = "retry umount" def populate(self, parameters): self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters) self.internal_pipeline.add_action(Unmount())
class TestShellRetry(RetryAction): def __init__(self): super(TestShellRetry, self).__init__() self.description = "Retry wrapper for lava-test-shell" self.summary = "Retry support for Lava Test Shell" self.name = "lava-test-retry" def populate(self, parameters): self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters) self.internal_pipeline.add_action(TestShellAction())
class BootQemuRetry(RetryAction): def __init__(self): super(BootQemuRetry, self).__init__() self.name = 'boot-qemu-image' self.description = "boot image using QEMU command line" self.summary = "boot QEMU image" def populate(self, parameters): self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters) self.internal_pipeline.add_action(CallQemuAction())
class TestMonitorRetry(RetryAction): def __init__(self): super(TestMonitorRetry, self).__init__() self.description = "Retry wrapper for lava-test-monitor" self.summary = "Retry support for Lava Test Monitoring" self.name = "lava-test-monitor-retry" def populate(self, parameters): self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters) self.internal_pipeline.add_action(TestMonitorAction())
class BootMonitoredQemu(BootAction): def __init__(self): super(BootMonitoredQemu, self).__init__() self.name = 'boot_image_monitor' self.description = "boot monitored image with retry" self.summary = "boot monitor with retry" def populate(self, parameters): self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters) self.internal_pipeline.add_action(BootQemuRetry())
def test_overlay_action(self): # pylint: disable=too-many-locals parameters = { 'device_type': 'x86', 'job_name': 'ipxe-pipeline', 'job_timeout': '15m', 'action_timeout': '5m', 'priority': 'medium', 'output_dir': mkdtemp(), 'actions': { 'boot': { 'method': 'ipxe', 'commands': 'ramdisk', 'prompts': ['linaro-test', 'root@debian:~#'] }, 'deploy': { 'ramdisk': 'initrd.gz', 'kernel': 'zImage', } } } device = NewDevice( os.path.join(os.path.dirname(__file__), '../devices/x86-01.yaml')) job = Job(4212, parameters, None) job.device = device pipeline = Pipeline(job=job, parameters=parameters['actions']['boot']) job.pipeline = pipeline overlay = BootloaderCommandOverlay() pipeline.add_action(overlay) ip_addr = dispatcher_ip(None) kernel = parameters['actions']['deploy']['kernel'] ramdisk = parameters['actions']['deploy']['ramdisk'] substitution_dictionary = { '{SERVER_IP}': ip_addr, '{RAMDISK}': ramdisk, '{KERNEL}': kernel, '{LAVA_MAC}': "00:00:00:00:00:00" } params = device['actions']['boot']['methods'] params['ipxe']['ramdisk']['commands'] = substitute( params['ipxe']['ramdisk']['commands'], substitution_dictionary) commands = params['ipxe']['ramdisk']['commands'] self.assertIs(type(commands), list) self.assertIn("dhcp net0", commands) self.assertIn( "set console console=ttyS0,115200n8 lava_mac=00:00:00:00:00:00", commands) self.assertIn("set extraargs init=/sbin/init ip=dhcp", commands) self.assertNotIn( "kernel tftp://{SERVER_IP}/{KERNEL} ${extraargs} ${console}", commands) self.assertNotIn("initrd tftp://{SERVER_IP}/{RAMDISK}", commands) self.assertIn("boot", commands)
class UnmountAction(RetryAction): def __init__(self): super(UnmountAction, self).__init__() self.name = "umount-retry" self.description = "retry support for umount" self.summary = "retry umount" def populate(self, parameters): self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters) self.internal_pipeline.add_action(Unmount())
class InternalRetryAction(RetryAction): def __init__(self): super(TestAction.InternalRetryAction, self).__init__() self.name = "internal-retry-action" self.section = 'internal' self.summary = "internal retry action for unit tests" self.description = "internal, do not use outside unit tests" def populate(self, parameters): self.internal_pipeline = Pipeline(parent=self, job=self.job) self.internal_pipeline.add_action(TestAction.FakeAction(), parameters)
class InternalRetryAction(RetryAction): def __init__(self): super(TestAction.InternalRetryAction, self).__init__() self.name = "internal-retry-action" self.section = 'internal' self.summary = "internal retry action for unit tests" self.description = "internal, do not use outside unit tests" def populate(self, parameters): self.internal_pipeline = Pipeline(parent=self, job=self.job) self.internal_pipeline.add_action(TestAction.FakeAction(), parameters)
class BootPyOCD(BootAction): def __init__(self): super(BootPyOCD, self).__init__() self.name = 'boot-pyocd-image' self.description = "boot pyocd image with retry" self.summary = "boot pyocd image with retry" def populate(self, parameters): self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters) self.internal_pipeline.add_action(BootPyOCDRetry())
class BootCMSIS(BootAction): def __init__(self): super(BootCMSIS, self).__init__() self.name = 'boot-cmsis' self.description = "boot cmsis usb image" self.summary = "boot cmsis usb image" def populate(self, parameters): self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters) self.internal_pipeline.add_action(BootCMSISRetry())
class BootMonitoredQemu(BootAction): def __init__(self): super(BootMonitoredQemu, self).__init__() self.name = 'boot_image_monitor' self.description = "boot monitored image with retry" self.summary = "boot monitor with retry" def populate(self, parameters): self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters) self.internal_pipeline.add_action(BootQemuRetry())
class BootMonitoredPyOCDRetry(RetryAction): def __init__(self): super(BootMonitoredPyOCDRetry, self).__init__() self.name = 'boot_pyocd_image' self.description = "boot pyocd image using the command line interface" self.summary = "boot pyocd image" def populate(self, parameters): self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters) self.internal_pipeline.add_action(MonitorPyOCDAction()) self.internal_pipeline.add_action(ConnectDevice())
def test_overlay_action(self): # pylint: disable=too-many-locals parameters = { 'device_type': 'x86', 'job_name': 'ipxe-pipeline', 'job_timeout': '15m', 'action_timeout': '5m', 'priority': 'medium', 'output_dir': mkdtemp(), 'actions': { 'boot': { 'method': 'ipxe', 'commands': 'ramdisk', 'prompts': ['linaro-test', 'root@debian:~#'] }, 'deploy': { 'ramdisk': 'initrd.gz', 'kernel': 'zImage', } } } device = NewDevice(os.path.join(os.path.dirname(__file__), '../devices/x86-01.yaml')) job = Job(4212, None, parameters) job.device = device pipeline = Pipeline(job=job, parameters=parameters['actions']['boot']) job.set_pipeline(pipeline) overlay = BootloaderCommandOverlay() pipeline.add_action(overlay) try: ip_addr = dispatcher_ip() except InfrastructureError as exc: raise RuntimeError("Unable to get dispatcher IP address: %s" % exc) parsed = [] kernel = parameters['actions']['deploy']['kernel'] ramdisk = parameters['actions']['deploy']['ramdisk'] substitution_dictionary = { '{SERVER_IP}': ip_addr, '{RAMDISK}': ramdisk, '{KERNEL}': kernel, '{LAVA_MAC}': "00:00:00:00:00:00" } params = device['actions']['boot']['methods'] params['ipxe']['ramdisk']['commands'] = substitute(params['ipxe']['ramdisk']['commands'], substitution_dictionary) commands = params['ipxe']['ramdisk']['commands'] self.assertIs(type(commands), list) self.assertIn("dhcp net0", commands) self.assertIn("set console console=ttyS0,115200n8 lava_mac=00:00:00:00:00:00", commands) self.assertIn("set extraargs init=/sbin/init ip=dhcp", commands) self.assertNotIn("kernel tftp://{SERVER_IP}/{KERNEL} ${extraargs} ${console}", commands) self.assertNotIn("initrd tftp://{SERVER_IP}/{RAMDISK}", commands) self.assertIn("boot", commands)
def test_composite_action_aggregates_errors_from_sub_actions(self): # pylint: disable=invalid-name # Unable to call Action.validate() as there is no job in this unit test sub1 = Action() sub1.__errors__ = [1] sub2 = Action() sub2.name = "sub2" sub2.__errors__ = [2] pipe = Pipeline() sub1.name = "sub1" pipe.add_action(sub1) pipe.add_action(sub2) self.assertEqual([1, 2], pipe.errors)
class BootPyOCDRetry(RetryAction): def __init__(self): super(BootPyOCDRetry, self).__init__() self.name = 'boot-pyocd-image' self.description = "boot pyocd image using the command line interface" self.summary = "boot pyocd image" def populate(self, parameters): self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters) self.internal_pipeline.add_action(FlashPyOCDAction()) self.internal_pipeline.add_action(ConnectDevice())
def test_composite_action_aggregates_errors_from_sub_actions(self): # pylint: disable=invalid-name # Unable to call Action.validate() as there is no job in this unit test sub1 = Action() sub1.__errors__ = [1] sub2 = Action() sub2.name = "sub2" sub2.__errors__ = [2] pipe = Pipeline() sub1.name = "sub1" pipe.add_action(sub1) pipe.add_action(sub2) self.assertEqual([1, 2], pipe.errors)
class BootVMAction(BootAction): def __init__(self): super(BootVMAction, self).__init__() self.name = "boot-vm" self.summary = "boot a VM on a host" self.description = "Execute commands to boot a VM" def populate(self, parameters): self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters) self.internal_pipeline.add_action(ConnectDynamicSsh()) def validate(self): super(BootVMAction, self).validate() print "###### FIXME ########", self.parameters["commands"]
class ScpOverlay(DeployAction): """ Prepares the overlay and copies it to the target """ def __init__(self): super(ScpOverlay, self).__init__() self.name = "scp-overlay" self.summary = "copy overlay to device" self.description = "prepare overlay and scp to device" self.section = 'deploy' self.items = [] def validate(self): super(ScpOverlay, self).validate() self.items = [ 'firmware', 'kernel', 'dtb', 'rootfs', 'modules' ] lava_test_results_dir = self.parameters['deployment_data']['lava_test_results_dir'] self.data['lava_test_results_dir'] = lava_test_results_dir % self.job.job_id def populate(self, parameters): self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters) tar_flags = parameters['deployment_data']['tar_flags'] if 'tar_flags' in parameters['deployment_data'].keys() else '' self.set_common_data(self.name, 'tar_flags', tar_flags) self.internal_pipeline.add_action(OverlayAction()) for item in self.items: if item in parameters: download = DownloaderAction(item, path=self.mkdtemp()) download.max_retries = 3 self.internal_pipeline.add_action(download, parameters) self.set_common_data('scp', item, True) # we might not have anything to download, just the overlay to push self.internal_pipeline.add_action(PrepareOverlayScp()) # prepare the device environment settings in common data for enabling in the boot step self.internal_pipeline.add_action(DeployDeviceEnvironment())
class ScpOverlay(DeployAction): """ Prepares the overlay and copies it to the target """ def __init__(self): super(ScpOverlay, self).__init__() self.name = "scp-overlay" self.summary = "copy overlay to device" self.description = "prepare overlay and scp to device" self.section = 'deploy' self.items = [] def validate(self): super(ScpOverlay, self).validate() self.items = [ 'firmware', 'kernel', 'dtb', 'rootfs', 'modules' ] lava_test_results_dir = self.parameters['deployment_data']['lava_test_results_dir'] self.data['lava_test_results_dir'] = lava_test_results_dir % self.job.job_id def populate(self, parameters): self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters) tar_flags = parameters['deployment_data']['tar_flags'] if 'tar_flags' in parameters['deployment_data'].keys() else '' self.set_common_data(self.name, 'tar_flags', tar_flags) self.internal_pipeline.add_action(OverlayAction()) for item in self.items: if item in parameters: download = DownloaderAction(item, path=self.mkdtemp()) download.max_retries = 3 self.internal_pipeline.add_action(download, parameters) self.set_common_data('scp', item, True) # we might not have anything to download, just the overlay to push self.internal_pipeline.add_action(PrepareOverlayScp()) # prepare the device environment settings in common data for enabling in the boot step self.internal_pipeline.add_action(DeployDeviceEnvironment())
class BootloaderDefaultsRetry(BootAction): def __init__(self): super(BootloaderDefaultsRetry, self).__init__() self.name = "uboot-retry" self.description = "interactive uboot retry action" self.summary = "uboot commands with retry" def populate(self, parameters): self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters) # establish a new connection before trying the reset self.internal_pipeline.add_action(ResetDevice()) self.internal_pipeline.add_action(MonitorBootloaderAutoBoot()) # wait # and set prompt to the uboot prompt # Add AutoLoginAction unconditionally as this action does nothing if # the configuration does not contain 'auto_login' self.internal_pipeline.add_action(AutoLoginAction()) self.internal_pipeline.add_action(ExpectShellSession()) # wait self.internal_pipeline.add_action(ExportDeviceEnvironment()) def validate(self): super(BootloaderDefaultsRetry, self).validate() self.set_namespace_data( action=self.name, label='bootloader_prompt', key='prompt', value=self.job.device['actions']['boot']['methods'] ['bootloader-defaults']['parameters']['bootloader_prompt']) def run(self, connection, max_end_time, args=None): connection = super(BootloaderDefaultsRetry, self).run(connection, max_end_time, args) self.logger.debug("Setting default test shell prompt") if not connection.prompt_str: connection.prompt_str = self.parameters['prompts'] self.logger.debug(connection.prompt_str) connection.timeout = self.connection_timeout self.wait(connection) self.logger.error(self.errors) res = 'failed' if self.errors else 'success' self.set_namespace_data(action='boot', label='shared', key='boot-result', value=res) self.set_namespace_data(action='shared', label='shared', key='connection', value=connection) return connection
class UBootAction(BootAction): """ Wraps the Retry Action to allow for actions which precede the reset, e.g. Connect. """ def __init__(self): super(UBootAction, self).__init__() self.name = "uboot-action" self.description = "interactive uboot action" self.summary = "pass uboot commands" def validate(self): super(UBootAction, self).validate() if 'type' in self.parameters: self.logger.warning( "Specifying a type in the boot action is deprecated. " "Please specify the kernel type in the deploy parameters.") def populate(self, parameters): self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters) # customize the device configuration for this job self.internal_pipeline.add_action(UBootSecondaryMedia()) self.internal_pipeline.add_action(BootloaderCommandOverlay()) self.internal_pipeline.add_action(ConnectDevice()) self.internal_pipeline.add_action(UBootRetry())
class FinalizeAction(Action): def __init__(self): """ The FinalizeAction is always added as the last Action in the top level pipeline by the parser. The tasks include finalising the connection (whatever is the last connection in the pipeline) and writing out the final pipeline structure containing the results as a logfile. """ super(FinalizeAction, self).__init__() self.name = "finalize" self.section = 'finalize' self.summary = "finalize the job" self.description = "finish the process and cleanup" self.ran = False self.parameters['namespace'] = 'common' def populate(self, parameters): self.internal_pipeline = Pipeline(job=self.job, parent=self, parameters=parameters) self.internal_pipeline.add_action(PowerOff()) def run(self, connection, max_end_time, args=None): """ The pexpect.spawn here is the ShellCommand not the ShellSession connection object. So call the finalise() function of the connection which knows about the raw_connection inside. The internal_pipeline of FinalizeAction is special - it needs to run even in the case of error / cancel. """ self.ran = True connection = super(FinalizeAction, self).run(connection, max_end_time, args) if connection: connection.finalise() # Finalize all connections associated with each namespace. connection = self.get_namespace_data(action='shared', label='shared', key='connection', deepcopy=False) if connection: connection.finalise() for protocol in self.job.protocols: protocol.finalise_protocol(self.job.device) def cleanup(self, connection): # avoid running Finalize in validate or unit tests if not self.ran and self.job.started: self.run(connection, None, None)
class BootVMAction(BootAction): def __init__(self): super(BootVMAction, self).__init__() self.name = "boot-vm" self.summary = "boot a VM on a host" self.description = "Execute commands to boot a VM" def populate(self, parameters): self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters) self.internal_pipeline.add_action(ConnectDynamicSsh()) def validate(self): super(BootVMAction, self).validate() print '###### FIXME ########', self.parameters['commands']
class BootloaderDefaultsAction(BootAction): """ Wraps the Retry Action to allow for actions which precede the reset, e.g. Connect. """ def __init__(self): super(BootloaderDefaultsAction, self).__init__() self.name = "bootloader-defaults-action" self.description = "Autorun precanned bootloader entry" self.summary = "allow bootloader to run" def populate(self, parameters): self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters) # customize the device configuration for this job self.internal_pipeline.add_action(ConnectDevice()) self.internal_pipeline.add_action(BootloaderDefaultsRetry())
class DeployMonitoredAction(DeployAction): def __init__(self): super(DeployMonitoredAction, self).__init__() self.name = 'deploy-monitor' self.description = "deploy images without POSIX" self.summary = "deploy without requiring POSIX" def populate(self, parameters): self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters) path = self.mkdtemp() for image in parameters['images'].keys(): if image != 'yaml_line': download = DownloaderAction(image, path) download.max_retries = 3 # overridden by failure_retry in the parameters, if set. self.internal_pipeline.add_action(download)
def parse(self, content, device, output_dir=None): self.loader = yaml.Loader(content) self.loader.compose_node = self.compose_node self.loader.construct_mapping = self.construct_mapping data = self.loader.get_single_data() job = Job(data) job.device = device job.parameters['output_dir'] = output_dir pipeline = Pipeline(job=job) for action_data in data['actions']: line = action_data.pop('yaml_line', None) for name in action_data: if name == "deploy": # allow the classmethod to check the parameters deploy = Deployment.select(device, action_data[name])(pipeline) deploy.action.parameters = action_data[name] # still need to pass the parameters to the instance if 'test' in data['actions']: deploy.action.parameters = action_data['test'] deploy.action.yaml_line = line device.deployment_data = deployment_data.get(deploy.action.parameters['os']) deploy.action.parameters = {'deployment_data': device.deployment_data} else: action_class = Action.find(name) # select the specific action of this class for this job action = action_class() # put parameters (like rootfs_type, results_dir) into the actions. if type(action_data[name]) == dict: action.parameters = action_data[name] elif name == "commands": # FIXME pass elif type(action_data[name]) == list: for param in action_data[name]: action.parameters = param action.summary = name pipeline.add_action(action) # uncomment for debug # print action.parameters # the only parameters sent to the job are job parameters # like job_name, logging_level or target_group. data.pop('actions') data['output_dir'] = output_dir job.set_pipeline(pipeline) return job
class BootQEMUImageAction(BootAction): def __init__(self): super(BootQEMUImageAction, self).__init__() self.name = 'boot_image_retry' self.description = "boot image with retry" self.summary = "boot with retry" def populate(self, parameters): self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters) self.internal_pipeline.add_action(BootQemuRetry()) self.internal_pipeline.add_action(LinuxKernelMessages()) # Add AutoLoginAction unconditionally as this action does nothing if # the configuration does not contain 'auto_login' self.internal_pipeline.add_action(AutoLoginAction()) self.internal_pipeline.add_action(ExpectShellSession()) self.internal_pipeline.add_action(ExportDeviceEnvironment())
class PrepareKernelAction(Action): """ Populate the pipeline with a kernel conversion action, if needed """ def __init__(self): super(PrepareKernelAction, self).__init__() self.name = "prepare-kernel" self.summary = "add a kernel conversion" self.description = "populates the pipeline with a kernel conversion action" def populate(self, parameters): self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters) # the logic here can be upgraded in future if needed with more parameters to the deploy. if 'u-boot' in self.job.device['actions']['boot']['methods']: self.internal_pipeline.add_action(UBootPrepareKernelAction())
class DeployMonitoredAction(DeployAction): def __init__(self): super(DeployMonitoredAction, self).__init__() self.name = 'deploy-monitor' self.description = "deploy images without POSIX" self.summary = "deploy without requiring POSIX" def populate(self, parameters): self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters) path = self.mkdtemp() for image in parameters['images'].keys(): if image != 'yaml_line': download = DownloaderAction(image, path) download.max_retries = 3 # overridden by failure_retry in the parameters, if set. self.internal_pipeline.add_action(download)
class FinalizeAction(Action): def __init__(self): """ The FinalizeAction is always added as the last Action in the top level pipeline by the parser. The tasks include finalising the connection (whatever is the last connection in the pipeline) and writing out the final pipeline structure containing the results as a logfile. """ super(FinalizeAction, self).__init__() self.name = "finalize" self.section = 'finalize' self.summary = "finalize the job" self.description = "finish the process and cleanup" def populate(self, parameters): self.internal_pipeline = Pipeline(job=self.job, parent=self, parameters=parameters) self.internal_pipeline.add_action(PowerOff()) def run(self, connection, args=None): """ The pexpect.spawn here is the ShellCommand not the ShellSession connection object. So call the finalise() function of the connection which knows about the raw_connection inside. The internal_pipeline of FinalizeAction is special - it needs to run even in the case of error / cancel. """ connection = super(FinalizeAction, self).run(connection, args) if connection: connection.finalise() for protocol in self.job.protocols: protocol.finalise_protocol() if self.errors: self.results = {'status': self.errors} self.logger.debug('status: %s' % self.errors) elif self.job.pipeline.errors: self.results = {'status': "Incomplete"} self.errors = "Incomplete" self.logger.error({ 'Status': 'Incomplete', 'Errors': self.job.pipeline.errors }) else: self.results = {'status': "Complete"} self.logger.debug("Status: Complete") with open("%s/results.yaml" % self.job.parameters['output_dir'], 'w') as results: results.write(yaml.dump(self.job.pipeline.describe()))
def test_multi_deploy(self): self.assertIsNotNone(self.parsed_data) job = Job(4212, None, self.parsed_data) pipeline = Pipeline(job=job) device = TestMultiDeploy.FakeDevice() self.assertIsNotNone(device) job.device = device job.parameters['output_dir'] = mkdtemp() job.set_pipeline(pipeline) counts = {} for action_data in self.parsed_data['actions']: for name in action_data: counts.setdefault(name, 1) if counts[name] >= 2: reset_context = ResetContext() reset_context.section = 'deploy' pipeline.add_action(reset_context) parameters = action_data[name] test_deploy = TestMultiDeploy.TestDeploy(pipeline, parameters, job) self.assertEqual( {'common': {}}, test_deploy.action.data ) counts[name] += 1 # check that only one action has the example set self.assertEqual( ['nowhere'], [detail['deploy']['example'] for detail in self.parsed_data['actions'] if 'example' in detail['deploy']] ) self.assertEqual( ['faked', 'valid'], [detail['deploy']['parameters'] for detail in self.parsed_data['actions'] if 'parameters' in detail['deploy']] ) self.assertIsInstance(pipeline.actions[0], TestMultiDeploy.TestDeployAction) self.assertIsInstance(pipeline.actions[1], ResetContext) self.assertIsInstance(pipeline.actions[2], TestMultiDeploy.TestDeployAction) self.assertIsInstance(pipeline.actions[3], ResetContext) self.assertIsInstance(pipeline.actions[4], TestMultiDeploy.TestDeployAction) job.validate() self.assertEqual([], job.pipeline.errors) job.run() self.assertNotEqual(pipeline.actions[0].data, {'common': {}, 'fake_deploy': pipeline.actions[0].parameters}) self.assertNotEqual(pipeline.actions[1].data, {'common': {}, 'fake_deploy': pipeline.actions[1].parameters}) self.assertEqual(pipeline.actions[2].data, {'common': {}, 'fake_deploy': pipeline.actions[4].parameters}) # check that values from previous DeployAction run actions have been cleared self.assertEqual(pipeline.actions[4].data, {'common': {}, 'fake_deploy': pipeline.actions[4].parameters})
class SshAction(RetryAction): """ Simple action to wrap AutoLoginAction and ExpectShellSession """ def __init__(self): super(SshAction, self).__init__() self.name = "login-ssh" self.summary = "login over ssh" self.description = "connect over ssh and ensure a shell is found" self.section = 'boot' def populate(self, parameters): self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters) self.internal_pipeline.add_action(ConnectSsh()) self.internal_pipeline.add_action(AutoLoginAction()) self.internal_pipeline.add_action(ExpectShellSession()) self.internal_pipeline.add_action(ExportDeviceEnvironment()) self.internal_pipeline.add_action(ScpOverlayUnpack())
class SshAction(RetryAction): """ Simple action to wrap AutoLoginAction and ExpectShellSession """ def __init__(self): super(SshAction, self).__init__() self.name = "login-ssh" self.summary = "login over ssh" self.description = "connect over ssh and ensure a shell is found" self.section = 'boot' def populate(self, parameters): self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters) self.internal_pipeline.add_action(ConnectSsh()) self.internal_pipeline.add_action(AutoLoginAction()) self.internal_pipeline.add_action(ExpectShellSession()) self.internal_pipeline.add_action(ExportDeviceEnvironment()) self.internal_pipeline.add_action(ScpOverlayUnpack())
class FinalizeAction(Action): def __init__(self): """ The FinalizeAction is always added as the last Action in the top level pipeline by the parser. The tasks include finalising the connection (whatever is the last connection in the pipeline) and writing out the final pipeline structure containing the results as a logfile. """ super(FinalizeAction, self).__init__() self.name = "finalize" self.section = 'finalize' self.summary = "finalize the job" self.description = "finish the process and cleanup" def populate(self, parameters): self.internal_pipeline = Pipeline(job=self.job, parent=self, parameters=parameters) self.internal_pipeline.add_action(PowerOff()) def run(self, connection, args=None): """ The pexpect.spawn here is the ShellCommand not the ShellSession connection object. So call the finalise() function of the connection which knows about the raw_connection inside. The internal_pipeline of FinalizeAction is special - it needs to run even in the case of error / cancel. """ connection = super(FinalizeAction, self).run(connection, args) if connection: connection.finalise() for protocol in self.job.protocols: protocol.finalise_protocol() if self.errors: self.results = {'status': self.errors} self.logger.debug('status: %s' % self.errors) elif self.job.pipeline.errors: self.results = {'status': "Incomplete"} self.errors = "Incomplete" self.logger.error({ 'Status': 'Incomplete', 'Errors': self.job.pipeline.errors}) else: self.results = {'status': "Complete"} self.logger.debug("Status: Complete") with open("%s/results.yaml" % self.job.parameters['output_dir'], 'w') as results: results.write(yaml.dump(self.job.pipeline.describe()))
class BootLxcAction(BootAction): """ Provide for auto_login parameters in this boot stanza and re-establish the connection after boot. """ def __init__(self): super(BootLxcAction, self).__init__() self.name = "lxc_boot" self.summary = "lxc boot" self.description = "lxc boot into the system" def populate(self, parameters): self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters) self.internal_pipeline.add_action(LxcStartAction()) self.internal_pipeline.add_action(ConnectLxc()) # Add AutoLoginAction unconditionally as this action does nothing if # the configuration does not contain 'auto_login' self.internal_pipeline.add_action(AutoLoginAction()) self.internal_pipeline.add_action(ExpectShellSession()) self.internal_pipeline.add_action(ExportDeviceEnvironment())
class ScpOverlay(DeployAction): """ Prepares the overlay and copies it to the target """ def __init__(self): super(ScpOverlay, self).__init__() self.name = "scp-overlay" self.summary = "copy overlay to device" self.description = "prepare overlay and scp to device" self.section = 'deploy' self.items = [] try: self.scp_dir = mkdtemp(basedir=DISPATCHER_DOWNLOAD_DIR) except OSError: # allows for unit tests to operate as normal user. self.suffix = '/' def validate(self): super(ScpOverlay, self).validate() self.items = [ 'firmware', 'kernel', 'dtb', 'rootfs', 'modules' ] lava_test_results_dir = self.parameters['deployment_data']['lava_test_results_dir'] # FIXME: apply job_id to other overlay classes when settings lava_test_results_dir self.data['lava_test_results_dir'] = lava_test_results_dir % self.job.job_id def populate(self, parameters): self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters) self.internal_pipeline.add_action(OverlayAction()) for item in self.items: if item in parameters: download = DownloaderAction(item, path=self.scp_dir) download.max_retries = 3 self.internal_pipeline.add_action(download, parameters) self.set_common_data('scp', item, True) # we might not have anything to download, just the overlay to push self.internal_pipeline.add_action(PrepareOverlayScp()) # prepare the device environment settings in common data for enabling in the boot step self.internal_pipeline.add_action(DeployDeviceEnvironment()) scp = Scp('overlay') self.internal_pipeline.add_action(scp)
class SafeAction(Action): """ Isolated test action which does not trigger the adjuvant """ def __init__(self): super(TestAdjuvant.SafeAction, self).__init__() self.name = "passing-action" self.summary = "fake action without adjuvant" self.description = "fake action runs without calling adjuvant" def populate(self, parameters): self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters) self.internal_pipeline.add_action(TestAdjuvant.FakeAdjuvant()) def run(self, connection, args=None): if connection: raise RuntimeError("Fake action not meant to have a real connection") connection = TestAdjuvant.FakeConnection() self.results = {'status': "passed"} self.data[TestAdjuvant.FakeAdjuvant.key()] = False return connection
class FakeAction(Action): """ Isolated Action which can be used to generate artificial exceptions. """ def __init__(self): super(TestTimeout.FakeAction, self).__init__() self.name = "fake-action" self.summary = "fake action for unit tests" self.description = "fake, do not use outside unit tests" def populate(self, parameters): self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters) self.internal_pipeline.add_action(TestAdjuvant.FakeAdjuvant()) def run(self, connection, args=None): if connection: raise RuntimeError("Fake action not meant to have a real connection") time.sleep(3) self.results = {'status': "failed"} return connection
def test_create_internal_pipeline(self): action = Action() action.name = "internal_pipe" action.description = "test action only" action.summary = "starter" pipe = Pipeline() pipe.add_action(action) self.assertEqual(len(pipe.children[pipe]), 1) self.assertEqual(action.level, "1") action = Action() action.name = "child_action" action.summary = "child" action.description = "action implementing an internal pipe" with self.assertRaises(RuntimeError): Pipeline(action) pipe.add_action(action) self.assertEqual(action.level, "2") self.assertEqual(len(pipe.children[pipe]), 2) # a formal RetryAction would contain a pre-built pipeline which can be inserted directly retry_pipe = Pipeline(action) action = Action() action.name = "inside_action" action.description = "action inside the internal pipe" action.summary = "child" retry_pipe.add_action(action) self.assertEqual(len(retry_pipe.children[retry_pipe]), 1) self.assertEqual(action.level, "2.1")