def __init__(self, command, lava_timeout, logger=None, cwd=None): if not lava_timeout or not isinstance(lava_timeout, Timeout): raise LAVABug( "ShellCommand needs a timeout set by the calling Action") if not logger: raise LAVABug("ShellCommand needs a logger") if sys.version_info[0] == 2: pexpect.spawn.__init__( self, command, timeout=lava_timeout.duration, cwd=cwd, logfile=ShellLogger(logger), ) elif sys.version_info[0] == 3: pexpect.spawn.__init__( self, command, timeout=lava_timeout.duration, cwd=cwd, logfile=ShellLogger(logger), encoding='utf-8', ) self.name = "ShellCommand" self.logger = logger # set a default newline character, but allow actions to override as neccessary self.linesep = LINE_SEPARATOR self.lava_timeout = lava_timeout
def run(self, connection, max_end_time, args=None): output = os.path.join(self.mkdtemp(), "overlay-%s.tar.gz" % self.level) location = self.get_namespace_data(action='test', label='shared', key='location') lava_test_results_dir = self.get_namespace_data(action='test', label='results', key='lava_test_results_dir') self.set_namespace_data(action='test', label='shared', key='output', value=output) if not location: raise LAVABug("Missing lava overlay location") if not os.path.exists(location): raise LAVABug("Unable to find overlay location") if not self.valid: self.logger.error(self.errors) return connection connection = super(CompressOverlay, self).run(connection, max_end_time, args) with chdir(location): try: with tarfile.open(output, "w:gz") as tar: tar.add(".%s" % lava_test_results_dir) # ssh authorization support if os.path.exists('./root/'): tar.add(".%s" % '/root/') except tarfile.TarError as exc: raise InfrastructureError("Unable to create lava overlay tarball: %s" % exc) self.set_namespace_data(action=self.name, label='output', key='file', value=output) return connection
def run(self, connection, max_end_time, args=None): connection = super(SshAuthorize, self).run(connection, max_end_time, args) if not self.identity_file: self.logger.debug("No authorisation required.") # idempotency return connection # add the authorization keys to the overlay location = self.get_namespace_data(action='test', label='shared', key='location') lava_test_results_dir = self.get_namespace_data(action='test', label='results', key='lava_test_results_dir') if not location: raise LAVABug("Missing lava overlay location") if not os.path.exists(location): raise LAVABug("Unable to find overlay location") lava_path = os.path.abspath("%s/%s" % (location, lava_test_results_dir)) output_file = '%s/%s' % (lava_path, os.path.basename(self.identity_file)) shutil.copyfile(self.identity_file, output_file) shutil.copyfile("%s.pub" % self.identity_file, "%s.pub" % output_file) if not self.active: # secondary connections only return connection self.logger.info("Adding SSH authorisation for %s.pub", os.path.basename(output_file)) user_sshdir = os.path.join(location, 'root', '.ssh') if not os.path.exists(user_sshdir): os.makedirs(user_sshdir, 0o755) # if /root/.ssh/authorized_keys exists in the test image it will be overwritten # the key exists in the lava_test_results_dir to allow test writers to work around this # after logging in via the identity_file set here authorize = os.path.join(user_sshdir, 'authorized_keys') self.logger.debug("Copying %s to %s", "%s.pub" % self.identity_file, authorize) shutil.copyfile("%s.pub" % self.identity_file, authorize) os.chmod(authorize, 0o600) return connection
def run(self, connection, max_end_time, args=None): """ Writes out file contents from lists, across multiple lines VAR="VAL1\n\ VAL2\n\ " The \n and \ are used to avoid unwanted whitespace, so are escaped. \n becomes \\n, \ becomes \\, which itself then needs \n to output: VAL1 VAL2 """ if not self.params: self.logger.debug("skipped %s", self.name) return connection location = self.get_namespace_data(action='test', label='shared', key='location') lava_test_results_dir = self.get_namespace_data(action='test', label='results', key='lava_test_results_dir') shell = self.get_namespace_data(action='test', label='shared', key='lava_test_sh_cmd') if not location: raise LAVABug("Missing lava overlay location") if not os.path.exists(location): raise LAVABug("Unable to find overlay location") lava_path = os.path.abspath("%s/%s" % (location, lava_test_results_dir)) scripts_to_copy = glob.glob(os.path.join(self.lava_vland_test_dir, 'lava-*')) self.logger.debug(self.lava_vland_test_dir) self.logger.debug({"lava_path": lava_path, "scripts": scripts_to_copy}) for fname in scripts_to_copy: with open(fname, 'r') as fin: foutname = os.path.basename(fname) output_file = '%s/bin/%s' % (lava_path, foutname) self.logger.debug("Creating %s", output_file) with open(output_file, 'w') as fout: fout.write("#!%s\n\n" % shell) # Target-specific scripts (add ENV to the generic ones) if foutname == 'lava-vland-self': fout.write(r'LAVA_VLAND_SELF="') for line in self.sysfs: fout.write(r"%s\n" % line) elif foutname == 'lava-vland-names': fout.write(r'LAVA_VLAND_NAMES="') for line in self.names: fout.write(r"%s\n" % line) elif foutname == 'lava-vland-tags': fout.write(r'LAVA_VLAND_TAGS="') if not self.tags: fout.write(r"\n") else: for line in self.tags: fout.write(r"%s\n" % line) fout.write('"\n\n') fout.write(fin.read()) os.fchmod(fout.fileno(), self.xmod) self.call_protocols() return connection
def run(self, connection, max_end_time, args=None): # pylint: disable=too-many-locals,too-many-branches,too-many-statements if self.role is None: self.logger.debug("skipped %s", self.name) return connection lava_test_results_dir = self.get_namespace_data(action='test', label='results', key='lava_test_results_dir') shell = self.get_namespace_data(action='test', label='shared', key='lava_test_sh_cmd') location = self.get_namespace_data(action='test', label='shared', key='location') if not location: raise LAVABug("Missing lava overlay location") if not os.path.exists(location): raise LAVABug("Unable to find overlay location") # the roles list can only be populated after the devices have been assigned # therefore, cannot be checked in validate which is executed at submission. if 'roles' not in self.job.parameters['protocols'][self.protocol]: raise LAVABug("multinode definition without complete list of roles after assignment") # Generic scripts lava_path = os.path.abspath("%s/%s" % (location, lava_test_results_dir)) scripts_to_copy = glob.glob(os.path.join(self.lava_multi_node_test_dir, 'lava-*')) self.logger.debug(self.lava_multi_node_test_dir) self.logger.debug("lava_path: %s", lava_path) self.logger.debug("scripts to copy %s", scripts_to_copy) for fname in scripts_to_copy: with open(fname, 'r') as fin: foutname = os.path.basename(fname) output_file = '%s/bin/%s' % (lava_path, foutname) self.logger.debug("Creating %s", output_file) with open(output_file, 'w') as fout: fout.write("#!%s\n\n" % shell) # Target-specific scripts (add ENV to the generic ones) if foutname == 'lava-group': fout.write('LAVA_GROUP="\n') for client_name in self.job.parameters['protocols'][self.protocol]['roles']: if client_name == 'yaml_line': continue role_line = self.job.parameters['protocols'][self.protocol]['roles'][client_name] self.logger.debug("group roles:\t%s\t%s", client_name, role_line) fout.write(r"\t%s\t%s\n" % (client_name, role_line)) fout.write('"\n') elif foutname == 'lava-role': fout.write("TARGET_ROLE='%s'\n" % self.job.parameters['protocols'][self.protocol]['role']) elif foutname == 'lava-self': fout.write("LAVA_HOSTNAME='%s'\n" % self.job.job_id) else: fout.write("LAVA_TEST_BIN='%s/bin'\n" % lava_test_results_dir) fout.write("LAVA_MULTI_NODE_CACHE='%s'\n" % self.lava_multi_node_cache_file) # always write out full debug logs fout.write("LAVA_MULTI_NODE_DEBUG='yes'\n") fout.write(fin.read()) os.fchmod(fout.fileno(), self.xmod) self.call_protocols() return connection
def _run(self): """ Run the pipeline under the run() wrapper that will catch the exceptions """ self.started = True # Setup the protocols for protocol in self.protocols: try: protocol.set_up() except LAVAError: raise except Exception as exc: self.logger.error("Unable to setup the protocols") self.logger.exception(traceback.format_exc()) raise LAVABug(exc) if not protocol.valid: msg = "protocol %s has errors: %s" % (protocol.name, protocol.errors) self.logger.exception(msg) raise JobError(msg) # Run the pipeline and wait for exceptions with self.timeout() as max_end_time: self.pipeline.run_actions(self.connection, max_end_time)
def run(self, connection, max_end_time, args=None): if connection: raise LAVABug( "Fake action not meant to have a real connection") time.sleep(3) self.results = {'status': "failed"} return connection
def validate(self, simulate=False): """ Public wrapper for the pipeline validation. Send a "fail" results if needed. """ label = "lava-dispatcher, installed at version: %s" % debian_package_version( split=False) self.logger.info(label) self.logger.info("start: 0 validate") start = time.time() success = False try: self._validate(simulate) except LAVAError as exc: raise except Exception as exc: # provide useful info on command line, e.g. failed unit tests. self.logger.exception(traceback.format_exc()) raise LAVABug(exc) else: success = True finally: if not success: self.cleanup(connection=None) self.logger.info("validate duration: %.02f", time.time() - start) self.logger.results({ "definition": "lava", "case": "validate", "result": "pass" if success else "fail" })
def run(self, connection, max_end_time, args=None): overlay_file = self.get_namespace_data(action='compress-overlay', label='output', key='file') if not overlay_file: raise LAVABug("Unable to find the overlay") self.logger.debug("Overlay: %s", overlay_file) guest_dir = self.mkdtemp() guest_file = os.path.join(guest_dir, self.guest_filename) self.set_namespace_data(action=self.name, label='guest', key='filename', value=guest_file) lava_test_results_dir = self.get_namespace_data( action='test', label='results', key='lava_test_results_dir') blkid = prepare_guestfs( guest_file, overlay_file, self.job.device['actions']['deploy'] ['methods']['image']['parameters']['guest']['size'], lava_test_results_dir) self.results = {'success': blkid} self.set_namespace_data(action=self.name, label='guest', key='UUID', value=blkid) return connection
def run(self, connection, max_end_time, args=None): if not self.parameters.get('ramdisk', None): # idempotency return connection ramdisk = self.get_namespace_data(action='download-action', label='ramdisk', key='file') if self.skip: self.logger.info("Not extracting ramdisk.") suffix = self.get_namespace_data(action='tftp-deploy', label='tftp', key='suffix') filename = os.path.join(suffix, "ramdisk", os.path.basename(ramdisk)) # declare the original ramdisk as the name to be used later. self.set_namespace_data(action='compress-ramdisk', label='file', key='ramdisk', value=filename) return connection = super(ExtractRamdisk, self).run(connection, max_end_time, args) ramdisk_dir = self.mkdtemp() extracted_ramdisk = os.path.join(ramdisk_dir, 'ramdisk') os.mkdir(extracted_ramdisk, 0o755) compression = self.parameters['ramdisk'].get('compression', None) suffix = "" if compression: suffix = ".%s" % compression ramdisk_compressed_data = os.path.join(ramdisk_dir, RAMDISK_FNAME + suffix) if self.parameters['ramdisk'].get('header', None) == 'u-boot': cmd = ('dd if=%s of=%s ibs=%s skip=1' % (ramdisk, ramdisk_compressed_data, UBOOT_DEFAULT_HEADER_LENGTH)).split(' ') try: self.run_command(cmd) except Exception: raise LAVABug('Unable to remove uboot header: %s' % ramdisk) else: # give the file a predictable name shutil.move(ramdisk, ramdisk_compressed_data) ramdisk_data = decompress_file(ramdisk_compressed_data, compression) with chdir(extracted_ramdisk): cmd = ('cpio -iud -F %s' % ramdisk_data).split(' ') if not self.run_command(cmd): raise JobError( 'Unable to extract cpio archive: %s - missing header definition (i.e. u-boot)?' % ramdisk_data) # tell other actions where the unpacked ramdisk can be found self.set_namespace_data(action=self.name, label='extracted_ramdisk', key='directory', value=extracted_ramdisk) self.set_namespace_data(action=self.name, label='ramdisk_file', key='file', value=ramdisk_data) return connection
def select(cls, device, parameters): candidates = cls.__subclasses__() # pylint: disable=no-member replies = {} willing = [] for c in candidates: res = c.accepts(device, parameters) if not isinstance(res, tuple): raise LAVABug( 'class %s accept function did not return a tuple' % c.__name__) if res[0]: willing.append(c) else: class_name = c.name if hasattr(c, 'name') else c.__name__ replies[class_name] = res[1] if len(willing) == 0: replies_string = "" for name, reply in replies.items(): replies_string += ("%s: %s\n" % (name, reply)) raise JobError( "None of the test strategies accepted your test parameters, reasons given:\n%s" % replies_string) willing.sort(key=lambda x: x.priority, reverse=True) return willing[0]
def run(self, connection, max_end_time, args=None): connection = super(MenuReset, self).run(connection, max_end_time, args) if not connection: raise LAVABug("%s needs a Connection") connection.check_char = '\n' connection.sendline('\n') # to catch the first prompt (remove for PDU?) return connection
def run(self, connection, max_end_time, args=None): connection = super(MenuConnect, self).run(connection, max_end_time, args) if not connection: raise LAVABug("%s needs a Connection") connection.check_char = '\n' connection.sendline('\n') # to catch the first prompt (remove for PDU?) connection.prompt_str = self.parameters['prompts'] if hasattr(self.job.device, 'power_state') and self.job.device.power_state not in ['on', 'off']: self.wait(connection) return connection
def validate(self): """ The reasoning here is that the RetryAction should be in charge of an internal pipeline so that the retry logic only occurs once and applies equally to the entire pipeline of the retry. """ super(RetryAction, self).validate() if not self.internal_pipeline: raise LAVABug( "Retry action %s needs to implement an internal pipeline" % self.name)
def rmtree(directory): """ Wrapper around shutil.rmtree to remove a directory tree while ignoring most errors. If called on a symbolic link, this function will raise a LAVABug. """ # TODO: consider how to handle problems if the directory has already been removed - # coding bugs may trigger this Runtime exception - implement before moving to production. try: shutil.rmtree(directory) except OSError as exc: raise LAVABug("Error when trying to remove '%s': %s" % (directory, exc))
def _api_select(self, data, action=None): if not data: raise TestError("[%s] Protocol called without any data." % self.name) if not action: raise LAVABug('LXC protocol needs to be called from an action.') for item in data: if 'request' not in item: raise LAVABug("[%s] Malformed protocol request data." % self.name) if 'pre-os-command' in item['request']: action.logger.info("[%s] Running pre OS command via protocol.", self.name) command = action.job.device.pre_os_command if not action.run_command(command.split(' '), allow_silent=True): raise InfrastructureError("%s failed" % command) continue elif 'pre-power-command' in item['request']: action.logger.info("[%s] Running pre-power-command via protocol.", self.name) command = action.job.device.pre_power_command if not action.run_command(command.split(' '), allow_silent=True): raise InfrastructureError("%s failed" % command) continue else: raise JobError("[%s] Unrecognised protocol request: %s" % (self.name, item))
def run(self, connection, max_end_time, args=None): if not connection: raise LAVABug("%s started without a connection already in use" % self.name) connection = super(EnableVExpressMassStorage, self).run(connection, max_end_time, args) # Issue command and check that you are returned to the prompt again connection.sendline('%s\n' % self.mcc_cmd) self.logger.debug("Changing prompt to '%s'", self.mcc_prompt) connection.prompt_str = self.mcc_prompt self.wait(connection) return connection
def run(self, connection, max_end_time, args=None): if not connection: raise LAVABug("%s started without a connection already in use" % self.name) connection = super(WaitFastBootInterrupt, self).run(connection, max_end_time, args) # device is to be put into a reset state, either by issuing 'reboot' or power-cycle connection.prompt_str = self.prompt self.logger.debug("Changing prompt to '%s'", connection.prompt_str) self.wait(connection) self.logger.debug("Sending '%s' to interrupt fastboot.", self.string) connection.sendline(self.string) return connection
def wait_udev_event(action='add', match_dict=None, subsystem=None, devtype=None, devicepath=None): if action not in ['add', 'remove', 'change']: raise LAVABug( "Invalid action for udev to wait for: %s, expected 'add' or 'remove'" % action) if match_dict: if not isinstance(match_dict, dict): raise LAVABug("match_dict was not a dict") else: if devicepath: if not isinstance(devicepath, str): raise LAVABug("devicepath was not a string") match_dict = {} else: raise LAVABug("Neither match_dict nor devicepath were set") if devtype and not subsystem: raise LAVABug("Cannot filter udev by devtype without a subsystem") match_dict['ACTION'] = action context = pyudev.Context() monitor = pyudev.Monitor.from_netlink(context) if devtype and subsystem: monitor.filter_by(subsystem, devtype) else: if subsystem: monitor.filter_by(subsystem) for device in iter(monitor.poll, None): same = _dict_compare(dict(device), match_dict) if same == set(match_dict.keys()): if devicepath: if devicepath in dict(device).get('DEVLINKS', '') or \ devicepath in dict(device).get('DEVNAME', ''): break else: break
def disconnect(self, reason): logger = logging.getLogger('dispatcher') if not self.tags: raise LAVABug("'disconnect' not implemented") try: if 'telnet' in self.tags: logger.info("Disconnecting from telnet: %s", reason) self.sendcontrol(']') self.sendline('quit', disconnecting=True) elif 'ssh' in self.tags: logger.info("Disconnecting from ssh: %s", reason) self.sendline('', disconnecting=True) self.sendline('~.', disconnecting=True) elif self.name == "LxcSession": logger.info("Disconnecting from lxc: %s", reason) self.sendline('', disconnecting=True) self.sendline('exit', disconnecting=True) else: raise LAVABug("'disconnect' not supported for %s" % self.tags) except ValueError: # protection against file descriptor == -1 logger.debug("Already disconnected") self.connected = False self.raw_connection.close(force=True) self.raw_connection = None
def populate(self, parameters): """ Needs to take account of the deployment type / image type etc. to determine which actions need to be added to the internal pipeline as part of the deployment selection step. """ if not self.job: raise LAVABug("No job object supplied to action") # FIXME: not all mount operations will need these actions self.internal_pipeline = Pipeline(parent=self, job=self.job, parameters=parameters) self.internal_pipeline.add_action(OffsetAction(self.key)) # FIXME: LoopCheckAction and LoopMountAction should be in only one Action self.internal_pipeline.add_action(LoopCheckAction(self.key)) self.internal_pipeline.add_action(LoopMountAction(self.key))
def wait(self, max_end_time=None): """ Simple wait without sendling blank lines as that causes the menu to advance without data which can cause blank entries and can cause the menu to exit to an unrecognised prompt. """ if not max_end_time: timeout = self.timeout.duration else: timeout = max_end_time - time.time() if timeout < 0: raise LAVABug("Invalid max_end_time value passed to wait()") try: return self.raw_connection.expect(self.prompt_str, timeout=timeout) except (TestError, pexpect.TIMEOUT): raise JobError("wait for prompt timed out")
def _validate(self, simulate): """ Validate the pipeline and raise an exception (that inherit from LAVAError) if it fails. If simulate is True, then print the pipeline description. """ self.logger.info("Start time: %s (UTC)", pytz.utc.localize(datetime.datetime.utcnow())) for protocol in self.protocols: try: protocol.configure(self.device, self) except LAVAError: self.logger.error("Configuration failed for protocol %s", protocol.name) raise except Exception as exc: self.logger.error("Configuration failed for protocol %s", protocol.name) self.logger.exception(traceback.format_exc()) raise LAVABug(exc) if not protocol.valid: msg = "protocol %s has errors: %s" % (protocol.name, protocol.errors) self.logger.exception(msg) raise JobError(msg) if simulate: # output the content and then any validation errors (python3 compatible) print(yaml.dump(self.describe())) # pylint: disable=superfluous-parens # Check that namespaces are used in all actions or none namespaces = set() for action in self.parameters["actions"]: action_name = list(action.keys())[0] namespaces.add(action[action_name]["namespace"]) # 'common' is a reserved namespace that should not be present with # other namespaces. if len(namespaces) > 1 and 'common' in namespaces: msg = "'common' is a reserved namespace that should not be present with other namespaces" self.logger.error(msg) self.logger.debug("Namespaces: %s", ", ".join(namespaces)) raise JobError(msg) # validate the pipeline self.pipeline.validate_actions()
def run(self, connection, max_end_time, args=None): if not connection: raise LAVABug("%s started without a connection already in use" % self.name) connection = super(BootloaderInterruptAction, self).run(connection, max_end_time, args) if self.needs_interrupt: connection.prompt_str = self.interrupt_prompt self.wait(connection) if self.interrupt_control_chars: for char in self.interrupt_control_chars: connection.sendcontrol(char) else: connection.sendline(self.interrupt_char) else: self.logger.info("Not interrupting bootloader, waiting for bootloader prompt") connection.prompt_str = self.bootloader_prompt self.wait(connection) self.set_namespace_data(action='interrupt', label='interrupt', key='at_bootloader_prompt', value=True) return connection
def run(self, connection, max_end_time): connection = super().run(connection, max_end_time) if not connection: raise LAVABug("Cannot unpack, no connection available.") filename = self.get_namespace_data(action='scp-deploy', label='scp-overlay-unpack', key='overlay') tar_flags = self.get_namespace_data(action='scp-overlay', label='scp-overlay', key='tar_flags') cmd = "tar %s -C / -xzf /%s" % (tar_flags, filename) connection.sendline(cmd) self.wait(connection) self.set_namespace_data(action='shared', label='shared', key='connection', value=connection) return connection
def run(self, connection, max_end_time, args=None): connection = super(OverlayUnpack, self).run(connection, max_end_time, args) if not connection: raise LAVABug("Cannot transfer overlay, no connection available.") ip_addr = dispatcher_ip(self.job.parameters['dispatcher']) overlay_full_path = self.get_namespace_data(action='compress-overlay', label='output', key='file') if not overlay_full_path: raise JobError("No overlay file identified for the transfer.") if not overlay_full_path.startswith(DISPATCHER_DOWNLOAD_DIR): raise ConfigurationError("overlay should already be in DISPATCHER_DOWNLOAD_DIR") overlay_path = overlay_full_path[len(DISPATCHER_DOWNLOAD_DIR) + 1:] overlay = os.path.basename(overlay_path) dwnld = self.parameters['transfer_overlay']['download_command'] dwnld += " http://%s/tmp/%s" % (ip_addr, overlay_path) unpack = self.parameters['transfer_overlay']['unpack_command'] unpack += ' ' + overlay connection.sendline("rm %s; %s && %s" % (overlay, dwnld, unpack)) return connection
def run(self, connection, max_end_time, args=None): connection = super(LoopCheckAction, self).run(connection, max_end_time, args) if not self.get_namespace_data( action=self.name, label=self.key, key='available_loops'): raise LAVABug("Unable to check available loop devices") args = ['/sbin/losetup', '-a'] pro = self.run_command(args) mounted_loops = len(pro.strip().split("\n")) if pro else 0 available_loops = self.get_namespace_data(action=self.name, label=self.key, key='available_loops') # FIXME: we should retry as this can happen and be fixed automatically # when one is unmounted if mounted_loops >= available_loops: raise InfrastructureError("Insufficient loopback devices?") self.logger.debug("available loops: %s", available_loops) self.logger.debug("mounted_loops: %s", mounted_loops) return connection
def run(self, connection, max_end_time, args=None): if not connection: raise LAVABug("%s started without a connection already in use" % self.name) connection = super(EnterVExpressMCC, self).run(connection, max_end_time, args) # Get possible prompts from device config connection.prompt_str = [self.autorun_prompt, self.mcc_prompt] self.logger.debug("Changing prompt to '%s'", connection.prompt_str) index = self.wait(connection) if connection.prompt_str[index] != self.mcc_prompt: self.logger.debug('Autorun enabled: interrupting..') connection.sendline('%s\n' % self.interrupt_char) connection.prompt_str = self.mcc_prompt self.wait(connection) else: self.logger.debug( 'Already at MCC prompt: autorun looks to be disabled') return connection
def listen_feedback(self, timeout): """ Listen to output and log as feedback Returns the number of characters read. """ index = 0 if not self.raw_connection: # connection has already been closed. return index if timeout < 0: raise LAVABug("Invalid timeout value passed to listen_feedback()") try: self.raw_connection.logfile.is_feedback = True index = self.raw_connection.expect([pexpect.EOF, pexpect.TIMEOUT], timeout=timeout) finally: self.raw_connection.logfile.is_feedback = False if index == 1: return len(self.raw_connection.before) return index
def copy_out_files(image, filenames, destination): """ Copies a list of files out of the image to the specified destination which must exist. Launching the guestfs is expensive, so copy out all files in one operation. The filenames list must contain unique filenames even if the source files exist in separate directories. """ if not isinstance(filenames, list): raise LAVABug('filenames must be a list') guest = guestfs.GuestFS(python_return_dict=True) guest.add_drive_ro(image) guest.launch() devices = guest.list_devices() if len(devices) != 1: raise InfrastructureError("Unable to prepare guestfs") guest.mount_ro(devices[0], '/') for filename in filenames: file_buf = guest.read_file(filename) with open(os.path.join(destination, os.path.basename(filename)), 'wb') as out: out.write(file_buf) guest.shutdown()