def run(self, connection, max_end_time, args=None): if not self.parameters.get('modules', None): # idempotency return connection connection = super(ExtractModules, self).run(connection, max_end_time, args) modules = self.get_namespace_data(action='download-action', label='modules', key='file') if not self.parameters.get('ramdisk', None): if not self.parameters.get('nfsrootfs', None): raise JobError("Unable to identify a location for the unpacked modules") # if both NFS and ramdisk are specified, apply modules to both # as the kernel may need some modules to raise the network and # will need other modules to support operations within the NFS if self.parameters.get('nfsrootfs', None): if not self.parameters['nfsrootfs'].get('install_modules', True): self.logger.info("Skipping applying overlay to NFS") return connection root = self.get_namespace_data( action='extract-rootfs', label='file', key='nfsroot' ) self.logger.info("extracting modules file %s to %s", modules, root) untar_file(modules, root) if self.parameters.get('ramdisk', None): if not self.parameters['ramdisk'].get('install_modules', True): self.logger.info("Not adding modules to the ramdisk.") return root = self.get_namespace_data( action='extract-overlay-ramdisk', label='extracted_ramdisk', key='directory') self.logger.info("extracting modules file %s to %s", modules, root) untar_file(modules, root) try: os.unlink(modules) except OSError as exc: raise InfrastructureError("Unable to remove tarball: '%s' - %s" % (modules, exc)) return connection
def run(self, connection, max_end_time, args=None): if not self.parameters['images'].get(self.param_key, None): # idempotency return connection connection = super(ExtractNfsAction, self).run(connection, max_end_time, args) root = self.get_namespace_data(action='download-action', label=self.param_key, key='file') root_dir = self.mkdtemp() untar_file(root, root_dir) self.set_namespace_data(action='extract-rootfs', label='file', key=self.file_key, value=root_dir) self.logger.debug("Extracted %s to %s", self.file_key, root_dir) if 'prefix' in self.parameters['images'][self.param_key]: prefix = self.parameters['images'][self.param_key]['prefix'] self.logger.warning("Adding '%s' prefix, any other content will not be visible.", prefix) # Grab the path already defined in super().run() and add the prefix root_dir = self.get_namespace_data( action='extract-rootfs', label='file', key=self.file_key ) root_dir = os.path.join(root_dir, prefix) # sets the directory into which the overlay is unpacked and which # is used in the substitutions into the bootloader command string. self.set_namespace_data(action='extract-rootfs', label='file', key=self.file_key, value=root_dir) return connection
def run(self, connection, max_end_time, args=None): if not self.parameters.get(self.param_key, None): # idempotency return connection connection = super(ExtractRootfs, self).run(connection, max_end_time, args) root = self.get_namespace_data(action='download-action', label=self.param_key, key='file') root_dir = self.mkdtemp() untar_file(root, root_dir) self.set_namespace_data(action='extract-rootfs', label='file', key=self.file_key, value=root_dir) self.logger.debug("Extracted %s to %s", self.file_key, root_dir) return connection
def run(self, connection, max_end_time, args=None): connection = super(ExtractVExpressRecoveryImage, self).run(connection, max_end_time, args) # copy recovery image to a temporary directory and unpack recovery_image = self.get_namespace_data(action='download-action', label=self.param_key, key='file') recovery_image_dir = self.mkdtemp() shutil.copy(recovery_image, recovery_image_dir) tmp_recovery_image = os.path.join(recovery_image_dir, os.path.basename(recovery_image)) if os.path.isfile(tmp_recovery_image): if self.compression == "zip": decompress_file(tmp_recovery_image, self.compression) elif self.compression == "gz": untar_file(tmp_recovery_image, recovery_image_dir) else: raise InfrastructureError("Unsupported compression for VExpress recovery: %s" % self.compression) os.remove(tmp_recovery_image) self.set_namespace_data(action='extract-vexpress-recovery-image', label='file', key=self.file_key, value=recovery_image_dir) self.logger.debug("Extracted %s to %s", self.file_key, recovery_image_dir) else: raise InfrastructureError("Unable to decompress recovery image") return connection
def run(self, connection, max_end_time, args=None): # pylint: disable=too-many-branches connection = super(ApplyOverlayTftp, self).run(connection, max_end_time, args) directory = None nfs_address = None overlay_file = None namespace = self.parameters.get('namespace', None) if self.parameters.get('nfsrootfs', None) is not None: if not self.parameters['nfsrootfs'].get('install_overlay', True): self.logger.info("[%s] Skipping applying overlay to NFS", namespace) return connection overlay_file = self.get_namespace_data(action='compress-overlay', label='output', key='file') directory = self.get_namespace_data(action='extract-rootfs', label='file', key='nfsroot') if overlay_file: self.logger.info("[%s] Applying overlay to NFS", namespace) elif self.parameters.get('images', {}).get('nfsrootfs', None) is not None: if not self.parameters['images']['nfsrootfs'].get('install_overlay', True): self.logger.info("[%s] Skipping applying overlay to NFS", namespace) return connection overlay_file = self.get_namespace_data(action='compress-overlay', label='output', key='file') directory = self.get_namespace_data(action='extract-rootfs', label='file', key='nfsroot') if overlay_file: self.logger.info("[%s] Applying overlay to NFS", namespace) elif self.parameters.get('persistent_nfs', None) is not None: if not self.parameters['persistent_nfs'].get('install_overlay', True): self.logger.info("[%s] Skipping applying overlay to persistent NFS", namespace) return connection overlay_file = self.get_namespace_data(action='compress-overlay', label='output', key='file') nfs_address = self.parameters['persistent_nfs'].get('address') if overlay_file: self.logger.info("[%s] Applying overlay to persistent NFS address %s", namespace, nfs_address) # need to mount the persistent NFS here. # We can't use self.mkdtemp() here because this directory should # not be removed if umount fails. directory = mkdtemp(autoremove=False) try: subprocess.check_output(['mount', '-t', 'nfs', nfs_address, directory]) except subprocess.CalledProcessError as exc: raise JobError(exc) elif self.parameters.get('ramdisk', None) is not None: if not self.parameters['ramdisk'].get('install_overlay', True): self.logger.info("[%s] Skipping applying overlay to ramdisk", namespace) return connection overlay_file = self.get_namespace_data(action='compress-overlay', label='output', key='file') directory = self.get_namespace_data(action='extract-overlay-ramdisk', label='extracted_ramdisk', key='directory') if overlay_file: self.logger.info("[%s] Applying overlay %s to ramdisk", namespace, overlay_file) elif self.parameters.get('rootfs', None) is not None: overlay_file = self.get_namespace_data(action='compress-overlay', label='output', key='file') directory = self.get_namespace_data(action='apply-overlay', label='file', key='root') else: self.logger.debug("[%s] No overlay directory", namespace) self.logger.debug(self.parameters) if self.parameters.get('os', None) == "centos_installer": # centos installer ramdisk doesnt like having anything other # than the kickstart config being inserted. Instead, make the # overlay accessible through tftp. Yuck. tftp_dir = os.path.dirname(self.get_namespace_data(action='download-action', label='ramdisk', key='file')) shutil.copy(overlay_file, tftp_dir) suffix = self.get_namespace_data(action='tftp-deploy', label='tftp', key='suffix') if not suffix: suffix = '' self.set_namespace_data(action=self.name, label='file', key='overlay', value=os.path.join(suffix, "ramdisk", os.path.basename(overlay_file))) if overlay_file: self.logger.debug("[%s] Applying overlay %s to directory %s", namespace, overlay_file, directory) untar_file(overlay_file, directory) if nfs_address: subprocess.check_output(['umount', directory]) os.rmdir(directory) # fails if the umount fails return connection
def run(self, connection, max_end_time): # pylint: disable=too-many-locals,too-many-branches,too-many-statements def progress_unknown_total(downloaded_sz, last_val): """ Compute progress when the size is unknown """ condition = downloaded_sz >= last_val + 25 * 1024 * 1024 return (condition, downloaded_sz, "progress %dMB" % (int(downloaded_sz / (1024 * 1024))) if condition else "") def progress_known_total(downloaded_sz, last_val): """ Compute progress when the size is known """ percent = math.floor(downloaded_sz / float(self.size) * 100) condition = percent >= last_val + 5 return (condition, percent, "progress %3d%% (%dMB)" % (percent, int(downloaded_sz / (1024 * 1024))) if condition else "") connection = super().run(connection, max_end_time) # self.cookies = self.job.context.config.lava_cookies # FIXME: work out how to restore md5 = hashlib.md5() sha256 = hashlib.sha256() # Create a fresh directory if the old one has been removed by a previous cleanup # (when retrying inside a RetryAction) try: os.makedirs(self.path, 0o755) except OSError as exc: if exc.errno != errno.EEXIST: raise InfrastructureError("Unable to create %s: %s" % (self.path, str(exc))) if 'images' in self.parameters and self.key in self.parameters[ 'images']: remote = self.parameters['images'][self.key] compression = self.parameters['images'][self.key].get( 'compression', False) else: remote = self.parameters[self.key] if self.key == 'ramdisk': compression = False self.logger.debug( "Not decompressing ramdisk as can be used compressed.") else: compression = self.parameters[self.key].get( 'compression', False) md5sum = remote.get('md5sum') sha256sum = remote.get('sha256sum') fname, _ = self._url_to_fname_suffix(self.path, compression) if os.path.isdir(fname): raise JobError("Download '%s' is a directory, not a file" % fname) if os.path.exists(fname): os.remove(fname) self.logger.info("downloading %s", remote['url']) self.logger.debug("saving as %s", fname) downloaded_size = 0 beginning = time.time() # Choose the progress bar (is the size known?) if self.size == -1: self.logger.debug("total size: unknown") last_value = -25 * 1024 * 1024 progress = progress_unknown_total else: self.logger.debug("total size: %d (%dMB)" % (self.size, int(self.size / (1024 * 1024)))) last_value = -5 progress = progress_known_total decompress_command = None if compression: if compression in self.decompress_command_map: decompress_command = self.decompress_command_map[compression] self.logger.info("Using %s to decompress %s", decompress_command, compression) else: self.logger.info( "Compression %s specified but not decompressing during download", compression) else: self.logger.debug("No compression specified") def update_progress(): nonlocal downloaded_size, last_value, md5, sha256 downloaded_size += len(buff) (printing, new_value, msg) = progress(downloaded_size, last_value) if printing: last_value = new_value self.logger.debug(msg) md5.update(buff) sha256.update(buff) if compression and decompress_command: try: with open(fname, 'wb') as dwnld_file: proc = subprocess.Popen([decompress_command], stdin=subprocess.PIPE, stdout=dwnld_file) except (IOError, OSError) as exc: msg = "Unable to open %s: %s" % (fname, exc.strerror) self.logger.error(msg) raise InfrastructureError(msg) with proc.stdin as pipe: for buff in self.reader(): update_progress() try: pipe.write(buff) except BrokenPipeError as exc: error_message = str(exc) self.logger.exception(error_message) msg = "Make sure the 'compression' is corresponding " \ "to the image file type." self.logger.error(msg) raise JobError(error_message) proc.wait() else: with open(fname, 'wb') as dwnld_file: for buff in self.reader(): update_progress() dwnld_file.write(buff) # Log the download speed ending = time.time() self.logger.info( "%dMB downloaded in %0.2fs (%0.2fMB/s)" % (downloaded_size / (1024 * 1024), round(ending - beginning, 2), round(downloaded_size / (1024 * 1024 * (ending - beginning)), 2))) # set the dynamic data into the context self.set_namespace_data(action='download-action', label=self.key, key='file', value=fname) self.set_namespace_data(action='download-action', label=self.key, key='md5', value=md5.hexdigest()) self.set_namespace_data(action='download-action', label=self.key, key='sha256', value=sha256.hexdigest()) # handle archive files if 'images' in self.parameters and self.key in self.parameters[ 'images']: archive = self.parameters['images'][self.key].get('archive', False) else: archive = self.parameters[self.key].get('archive') if archive: origin = fname target_fname = os.path.basename(origin).rstrip('.' + archive) target_fname_path = os.path.join(os.path.dirname(origin), target_fname) if os.path.exists(target_fname_path): os.remove(target_fname_path) if archive == 'tar': untar_file(origin, None, member=target_fname, outfile=target_fname_path) self.set_namespace_data(action='download-action', label=self.key, key='file', value=target_fname_path) self.set_namespace_data(action='download-action', label='file', key=self.key, value=target_fname) self.logger.debug("Using %s archive" % archive) if md5sum is not None: chk_md5sum = self.get_namespace_data(action='download-action', label=self.key, key='md5') if md5sum != chk_md5sum: self.logger.error("md5sum of downloaded content: %s" % chk_md5sum) self.logger.info( "sha256sum of downloaded content: %s" % (self.get_namespace_data(action='download-action', label=self.key, key='sha256'))) self.results = { 'fail': { 'md5': md5sum, 'download': chk_md5sum } } raise JobError("MD5 checksum for '%s' does not match." % fname) self.results = {'success': {'md5': md5sum}} if sha256sum is not None: chk_sha256sum = self.get_namespace_data(action='download-action', label=self.key, key='sha256') if sha256sum != chk_sha256sum: self.logger.info( "md5sum of downloaded content: %s" % (self.get_namespace_data( action='download-action', label=self.key, key='md5'))) self.logger.error("sha256sum of downloaded content: %s" % chk_sha256sum) self.results = { 'fail': { 'sha256': sha256sum, 'download': chk_sha256sum } } raise JobError("SHA256 checksum for '%s' does not match." % fname) self.results = {'success': {'sha256': sha256sum}} # certain deployments need prefixes set if self.parameters['to'] == 'tftp' or self.parameters['to'] == 'nbd': suffix = self.get_namespace_data(action='tftp-deploy', label='tftp', key='suffix') self.set_namespace_data(action='download-action', label='file', key=self.key, value=os.path.join( suffix, self.key, os.path.basename(fname))) elif self.parameters['to'] == 'iso-installer': suffix = self.get_namespace_data(action='deploy-iso-installer', label='iso', key='suffix') self.set_namespace_data(action='download-action', label='file', key=self.key, value=os.path.join( suffix, self.key, os.path.basename(fname))) else: self.set_namespace_data(action='download-action', label='file', key=self.key, value=fname) # xnbd protocoll needs to know the location nbdroot = self.get_namespace_data(action='download-action', label='file', key='nbdroot') if 'lava-xnbd' in self.parameters and nbdroot: self.parameters['lava-xnbd']['nbdroot'] = nbdroot self.results = { 'label': self.key, 'size': downloaded_size, 'md5sum': str( self.get_namespace_data(action='download-action', label=self.key, key='md5')), 'sha256sum': str( self.get_namespace_data(action='download-action', label=self.key, key='sha256')) } return connection
def run(self, connection, max_end_time, args=None): # pylint: disable=too-many-locals,too-many-branches,too-many-statements def progress_unknown_total(downloaded_sz, last_val): """ Compute progress when the size is unknown """ condition = downloaded_sz >= last_val + 25 * 1024 * 1024 return (condition, downloaded_sz, "progress %dMB" % (int(downloaded_sz / (1024 * 1024))) if condition else "") def progress_known_total(downloaded_sz, last_val): """ Compute progress when the size is known """ percent = math.floor(downloaded_sz / float(self.size) * 100) condition = percent >= last_val + 5 return (condition, percent, "progress %3d%% (%dMB)" % (percent, int(downloaded_sz / (1024 * 1024))) if condition else "") connection = super(DownloadHandler, self).run(connection, max_end_time, args) # self.cookies = self.job.context.config.lava_cookies # FIXME: work out how to restore md5 = hashlib.md5() sha256 = hashlib.sha256() # Create a fresh directory if the old one has been removed by a previous cleanup # (when retrying inside a RetryAction) try: os.makedirs(self.path, 0o755) except OSError as exc: if exc.errno != errno.EEXIST: raise InfrastructureError("Unable to create %s: %s" % (self.path, str(exc))) if 'images' in self.parameters and self.key in self.parameters['images']: remote = self.parameters['images'][self.key] compression = self.parameters['images'][self.key].get( 'compression', False) else: remote = self.parameters[self.key] if self.key == 'ramdisk': compression = False self.logger.debug( "Not decompressing ramdisk as can be used compressed.") else: compression = self.parameters[self.key].get('compression', False) md5sum = remote.get('md5sum', None) sha256sum = remote.get('sha256sum', None) fname, _ = self._url_to_fname_suffix(self.path, compression) if os.path.isdir(fname): raise JobError("Download '%s' is a directory, not a file" % fname) if os.path.exists(fname): os.remove(fname) self.logger.info("downloading %s", remote['url']) self.logger.debug("saving as %s", fname) downloaded_size = 0 beginning = time.time() # Choose the progress bar (is the size known?) if self.size == -1: self.logger.debug("total size: unknown") last_value = -25 * 1024 * 1024 progress = progress_unknown_total else: self.logger.debug("total size: %d (%dMB)" % (self.size, int(self.size / (1024 * 1024)))) last_value = -5 progress = progress_known_total decompress_command = None if compression: if compression == 'gz': decompress_command = 'gunzip' elif compression == 'bz2': decompress_command = 'bunzip2' elif compression == 'zip': decompress_command = 'unzip' else: decompress_command = 'unxz' self.logger.debug("Using %s decompression" % compression) else: self.logger.debug("No compression specified.") def update_progress(): nonlocal downloaded_size, last_value, md5, sha256 downloaded_size += len(buff) (printing, new_value, msg) = progress(downloaded_size, last_value) if printing: last_value = new_value self.logger.debug(msg) md5.update(buff) sha256.update(buff) if compression: try: with open(fname, 'wb') as dwnld_file: proc = subprocess.Popen([decompress_command], stdin=subprocess.PIPE, stdout=dwnld_file) except (IOError, OSError) as exc: msg = "Unable to open %s: %s" % (fname, exc.strerror) self.logger.error(msg) raise InfrastructureError(msg) with proc.stdin as pipe: for buff in self.reader(): update_progress() try: pipe.write(buff) except BrokenPipeError as exc: error_message = str(exc) self.logger.exception(error_message) msg = "Make sure the 'compression' is corresponding " \ "to the image file type." self.logger.error(msg) raise JobError(error_message) proc.wait() else: with open(fname, 'wb') as dwnld_file: for buff in self.reader(): update_progress() dwnld_file.write(buff) # Log the download speed ending = time.time() self.logger.info("%dMB downloaded in %0.2fs (%0.2fMB/s)" % (downloaded_size / (1024 * 1024), round(ending - beginning, 2), round(downloaded_size / (1024 * 1024 * (ending - beginning)), 2))) # set the dynamic data into the context self.set_namespace_data(action='download-action', label=self.key, key='file', value=fname) self.set_namespace_data(action='download-action', label=self.key, key='md5', value=md5.hexdigest()) self.set_namespace_data(action='download-action', label=self.key, key='sha256', value=sha256.hexdigest()) # handle archive files if 'images' in self.parameters and self.key in self.parameters['images']: archive = self.parameters['images'][self.key].get('archive', False) else: archive = self.parameters[self.key].get('archive', None) if archive: origin = fname target_fname = os.path.basename(origin).rstrip('.' + archive) target_fname_path = os.path.join(os.path.dirname(origin), target_fname) if os.path.exists(target_fname_path): os.remove(target_fname_path) if archive == 'tar': untar_file(origin, None, member=target_fname, outfile=target_fname_path) self.set_namespace_data(action='download-action', label=self.key, key='file', value=target_fname_path) self.set_namespace_data(action='download-action', label='file', key=self.key, value=target_fname) self.logger.debug("Using %s archive" % archive) if md5sum is not None: chk_md5sum = self.get_namespace_data(action='download-action', label=self.key, key='md5') if md5sum != chk_md5sum: self.logger.error("md5sum of downloaded content: %s" % chk_md5sum) self.logger.info("sha256sum of downloaded content: %s" % ( self.get_namespace_data(action='download-action', label=self.key, key='sha256'))) self.results = {'fail': { 'md5': md5sum, 'download': chk_md5sum}} raise JobError("MD5 checksum for '%s' does not match." % fname) self.results = {'success': {'md5': md5sum}} if sha256sum is not None: chk_sha256sum = self.get_namespace_data(action='download-action', label=self.key, key='sha256') if sha256sum != chk_sha256sum: self.logger.info("md5sum of downloaded content: %s" % ( self.get_namespace_data(action='download-action', label=self.key, key='md5'))) self.logger.error("sha256sum of downloaded content: %s" % chk_sha256sum) self.results = {'fail': { 'sha256': sha256sum, 'download': chk_sha256sum}} raise JobError("SHA256 checksum for '%s' does not match." % fname) self.results = {'success': {'sha256': sha256sum}} # certain deployments need prefixes set if self.parameters['to'] == 'tftp' or self.parameters['to'] == 'nbd': suffix = self.get_namespace_data(action='tftp-deploy', label='tftp', key='suffix') self.set_namespace_data(action='download-action', label='file', key=self.key, value=os.path.join(suffix, self.key, os.path.basename(fname))) elif self.parameters['to'] == 'iso-installer': suffix = self.get_namespace_data(action='deploy-iso-installer', label='iso', key='suffix') self.set_namespace_data(action='download-action', label='file', key=self.key, value=os.path.join(suffix, self.key, os.path.basename(fname))) else: self.set_namespace_data(action='download-action', label='file', key=self.key, value=fname) # xnbd protocoll needs to know the location nbdroot = self.get_namespace_data(action='download-action', label='file', key='nbdroot') if 'lava-xnbd' in self.parameters and nbdroot: self.parameters['lava-xnbd']['nbdroot'] = nbdroot self.results = { 'label': self.key, 'md5sum': str(self.get_namespace_data( action='download-action', label=self.key, key='md5')), 'sha256sum': str(self.get_namespace_data( action='download-action', label=self.key, key='sha256')) } return connection
def run(self, connection, max_end_time): # pylint: disable=too-many-branches connection = super().run(connection, max_end_time) directory = None nfs_address = None overlay_file = None namespace = self.parameters.get("namespace") if self.parameters.get("nfsrootfs") is not None: if not self.parameters["nfsrootfs"].get("install_overlay", True): self.logger.info("[%s] Skipping applying overlay to NFS", namespace) return connection overlay_file = self.get_namespace_data(action="compress-overlay", label="output", key="file") directory = self.get_namespace_data(action="extract-rootfs", label="file", key="nfsroot") if overlay_file: self.logger.info("[%s] Applying overlay to NFS", namespace) elif self.parameters.get("images", {}).get("nfsrootfs") is not None: if not self.parameters["images"]["nfsrootfs"].get( "install_overlay", True): self.logger.info("[%s] Skipping applying overlay to NFS", namespace) return connection overlay_file = self.get_namespace_data(action="compress-overlay", label="output", key="file") directory = self.get_namespace_data(action="extract-rootfs", label="file", key="nfsroot") if overlay_file: self.logger.info("[%s] Applying overlay to NFS", namespace) elif self.parameters.get("persistent_nfs") is not None: if not self.parameters["persistent_nfs"].get( "install_overlay", True): self.logger.info( "[%s] Skipping applying overlay to persistent NFS", namespace) return connection overlay_file = self.get_namespace_data(action="compress-overlay", label="output", key="file") nfs_address = self.parameters["persistent_nfs"].get("address") if overlay_file: self.logger.info( "[%s] Applying overlay to persistent NFS address %s", namespace, nfs_address, ) # need to mount the persistent NFS here. # We can't use self.mkdtemp() here because this directory should # not be removed if umount fails. directory = mkdtemp(autoremove=False) try: subprocess.check_output( # nosec - internal. ["mount", "-t", "nfs", nfs_address, directory]) except subprocess.CalledProcessError as exc: raise JobError(exc) elif self.parameters.get("ramdisk") is not None: if not self.parameters["ramdisk"].get("install_overlay", True): self.logger.info("[%s] Skipping applying overlay to ramdisk", namespace) return connection overlay_file = self.get_namespace_data(action="compress-overlay", label="output", key="file") directory = self.get_namespace_data( action="extract-overlay-ramdisk", label="extracted_ramdisk", key="directory", ) if overlay_file: self.logger.info("[%s] Applying overlay %s to ramdisk", namespace, overlay_file) elif self.parameters.get("rootfs") is not None: overlay_file = self.get_namespace_data(action="compress-overlay", label="output", key="file") directory = self.get_namespace_data(action="apply-overlay", label="file", key="root") else: self.logger.debug("[%s] No overlay directory", namespace) self.logger.debug(self.parameters) if self.parameters.get("os") == "centos_installer": # centos installer ramdisk doesnt like having anything other # than the kickstart config being inserted. Instead, make the # overlay accessible through tftp. Yuck. tftp_dir = os.path.dirname( self.get_namespace_data(action="download-action", label="ramdisk", key="file")) shutil.copy(overlay_file, tftp_dir) suffix = self.get_namespace_data(action="tftp-deploy", label="tftp", key="suffix") if not suffix: suffix = "" self.set_namespace_data( action=self.name, label="file", key="overlay", value=os.path.join(suffix, "ramdisk", os.path.basename(overlay_file)), ) if overlay_file: self.logger.debug( "[%s] Applying overlay %s to directory %s", namespace, overlay_file, directory, ) untar_file(overlay_file, directory) if nfs_address: subprocess.check_output(["umount", directory]) # nosec - internal. os.rmdir(directory) # fails if the umount fails return connection
def run(self, connection, max_end_time, args=None): # pylint: disable=too-many-branches connection = super(ApplyOverlayTftp, self).run(connection, max_end_time, args) directory = None nfs_address = None overlay_file = None namespace = self.parameters.get('namespace', None) if self.parameters.get('nfsrootfs', None) is not None: if not self.parameters['nfsrootfs'].get('install_overlay', True): self.logger.info("[%s] Skipping applying overlay to NFS", namespace) return connection overlay_file = self.get_namespace_data(action='compress-overlay', label='output', key='file') directory = self.get_namespace_data(action='extract-rootfs', label='file', key='nfsroot') if overlay_file: self.logger.info("[%s] Applying overlay to NFS", namespace) elif self.parameters.get('images', {}).get('nfsrootfs', None) is not None: if not self.parameters['images']['nfsrootfs'].get( 'install_overlay', True): self.logger.info("[%s] Skipping applying overlay to NFS", namespace) return connection overlay_file = self.get_namespace_data(action='compress-overlay', label='output', key='file') directory = self.get_namespace_data(action='extract-rootfs', label='file', key='nfsroot') if overlay_file: self.logger.info("[%s] Applying overlay to NFS", namespace) elif self.parameters.get('persistent_nfs', None) is not None: if not self.parameters['persistent_nfs'].get( 'install_overlay', True): self.logger.info( "[%s] Skipping applying overlay to persistent NFS", namespace) return connection overlay_file = self.get_namespace_data(action='compress-overlay', label='output', key='file') nfs_address = self.parameters['persistent_nfs'].get('address') if overlay_file: self.logger.info( "[%s] Applying overlay to persistent NFS address %s", namespace, nfs_address) # need to mount the persistent NFS here. # We can't use self.mkdtemp() here because this directory should # not be removed if umount fails. directory = mkdtemp(autoremove=False) try: subprocess.check_output( ['mount', '-t', 'nfs', nfs_address, directory]) except subprocess.CalledProcessError as exc: raise JobError(exc) elif self.parameters.get('ramdisk', None) is not None: if not self.parameters['ramdisk'].get('install_overlay', True): self.logger.info("[%s] Skipping applying overlay to ramdisk", namespace) return connection overlay_file = self.get_namespace_data(action='compress-overlay', label='output', key='file') directory = self.get_namespace_data( action='extract-overlay-ramdisk', label='extracted_ramdisk', key='directory') if overlay_file: self.logger.info("[%s] Applying overlay %s to ramdisk", namespace, overlay_file) elif self.parameters.get('rootfs', None) is not None: overlay_file = self.get_namespace_data(action='compress-overlay', label='output', key='file') directory = self.get_namespace_data(action='apply-overlay', label='file', key='root') else: self.logger.debug("[%s] No overlay directory", namespace) self.logger.debug(self.parameters) if self.parameters.get('os', None) == "centos_installer": # centos installer ramdisk doesnt like having anything other # than the kickstart config being inserted. Instead, make the # overlay accessible through tftp. Yuck. tftp_dir = os.path.dirname( self.get_namespace_data(action='download-action', label='ramdisk', key='file')) shutil.copy(overlay_file, tftp_dir) suffix = self.get_namespace_data(action='tftp-deploy', label='tftp', key='suffix') if not suffix: suffix = '' self.set_namespace_data(action=self.name, label='file', key='overlay', value=os.path.join( suffix, "ramdisk", os.path.basename(overlay_file))) if overlay_file: self.logger.debug("[%s] Applying overlay %s to directory %s", namespace, overlay_file, directory) untar_file(overlay_file, directory) if nfs_address: subprocess.check_output(['umount', directory]) os.rmdir(directory) # fails if the umount fails return connection
def run(self, connection, max_end_time): # pylint: disable=too-many-locals,too-many-branches,too-many-statements def progress_unknown_total(downloaded_sz, last_val): """ Compute progress when the size is unknown """ condition = downloaded_sz >= last_val + 25 * 1024 * 1024 return ( condition, downloaded_sz, "progress %dMB" % (int(downloaded_sz / (1024 * 1024))) if condition else "", ) def progress_known_total(downloaded_sz, last_val): """ Compute progress when the size is known """ percent = math.floor(downloaded_sz / float(self.size) * 100) condition = percent >= last_val + 5 return ( condition, percent, "progress %3d%% (%dMB)" % (percent, int(downloaded_sz / (1024 * 1024))) if condition else "", ) connection = super().run(connection, max_end_time) # self.cookies = self.job.context.config.lava_cookies # FIXME: work out how to restore md5 = hashlib.md5() # nosec - not being used for cryptography. sha256 = hashlib.sha256() # Create a fresh directory if the old one has been removed by a previous cleanup # (when retrying inside a RetryAction) try: os.makedirs(self.path, 0o755) except OSError as exc: if exc.errno != errno.EEXIST: raise InfrastructureError("Unable to create %s: %s" % (self.path, str(exc))) if "images" in self.parameters and self.key in self.parameters[ "images"]: remote = self.parameters["images"][self.key] compression = self.parameters["images"][self.key].get( "compression", False) else: remote = self.parameters[self.key] if self.key == "ramdisk": compression = False self.logger.debug( "Not decompressing ramdisk as can be used compressed.") else: compression = self.parameters[self.key].get( "compression", False) md5sum = remote.get("md5sum") sha256sum = remote.get("sha256sum") fname, _ = self._url_to_fname_suffix(self.path, compression) if os.path.isdir(fname): raise JobError("Download '%s' is a directory, not a file" % fname) if os.path.exists(fname): os.remove(fname) self.logger.info("downloading %s", remote["url"]) self.logger.debug("saving as %s", fname) downloaded_size = 0 beginning = time.time() # Choose the progress bar (is the size known?) if self.size == -1: self.logger.debug("total size: unknown") last_value = -25 * 1024 * 1024 progress = progress_unknown_total else: self.logger.debug("total size: %d (%dMB)" % (self.size, int(self.size / (1024 * 1024)))) last_value = -5 progress = progress_known_total decompress_command = None if compression: if compression in self.decompress_command_map: decompress_command = self.decompress_command_map[compression] self.logger.info("Using %s to decompress %s", decompress_command, compression) else: self.logger.info( "Compression %s specified but not decompressing during download", compression, ) else: self.logger.debug("No compression specified") def update_progress(): nonlocal downloaded_size, last_value, md5, sha256 downloaded_size += len(buff) (printing, new_value, msg) = progress(downloaded_size, last_value) if printing: last_value = new_value self.logger.debug(msg) md5.update(buff) sha256.update(buff) if compression and decompress_command: try: with open(fname, "wb") as dwnld_file: proc = subprocess.Popen( # nosec - internal. [decompress_command], stdin=subprocess.PIPE, stdout=dwnld_file) except OSError as exc: msg = "Unable to open %s: %s" % (fname, exc.strerror) self.logger.error(msg) raise InfrastructureError(msg) with proc.stdin as pipe: for buff in self.reader(): update_progress() try: pipe.write(buff) except BrokenPipeError as exc: error_message = str(exc) self.logger.exception(error_message) msg = ("Make sure the 'compression' is corresponding " "to the image file type.") self.logger.error(msg) raise JobError(error_message) proc.wait() else: with open(fname, "wb") as dwnld_file: for buff in self.reader(): update_progress() dwnld_file.write(buff) # Log the download speed ending = time.time() self.logger.info("%dMB downloaded in %0.2fs (%0.2fMB/s)" % ( downloaded_size / (1024 * 1024), round(ending - beginning, 2), round(downloaded_size / (1024 * 1024 * (ending - beginning)), 2), )) # If the remote server uses "Content-Encoding: gzip", this calculation will be wrong # because requests will decompress the file on the fly, creating a larger file than # LAVA expects. if self.size > 0: if self.size != downloaded_size: raise InfrastructureError( "Download finished (%i bytes) but was not expected size (%i bytes), check your networking." % (downloaded_size, self.size)) # set the dynamic data into the context self.set_namespace_data(action="download-action", label=self.key, key="file", value=fname) self.set_namespace_data(action="download-action", label=self.key, key="md5", value=md5.hexdigest()) self.set_namespace_data( action="download-action", label=self.key, key="sha256", value=sha256.hexdigest(), ) # handle archive files if "images" in self.parameters and self.key in self.parameters[ "images"]: archive = self.parameters["images"][self.key].get("archive", False) else: archive = self.parameters[self.key].get("archive") if archive: origin = fname target_fname = os.path.basename(origin).rstrip("." + archive) target_fname_path = os.path.join(os.path.dirname(origin), target_fname) if os.path.exists(target_fname_path): os.remove(target_fname_path) if archive == "tar": untar_file(origin, None, member=target_fname, outfile=target_fname_path) self.set_namespace_data( action="download-action", label=self.key, key="file", value=target_fname_path, ) self.set_namespace_data( action="download-action", label="file", key=self.key, value=target_fname, ) self.logger.debug("Using %s archive" % archive) if md5sum is not None: chk_md5sum = self.get_namespace_data(action="download-action", label=self.key, key="md5") if md5sum != chk_md5sum: self.logger.error("md5sum of downloaded content: %s" % chk_md5sum) self.logger.info( "sha256sum of downloaded content: %s" % (self.get_namespace_data(action="download-action", label=self.key, key="sha256"))) self.results = { "fail": { "md5": md5sum, "download": chk_md5sum } } raise JobError("MD5 checksum for '%s' does not match." % fname) self.results = {"success": {"md5": md5sum}} if sha256sum is not None: chk_sha256sum = self.get_namespace_data(action="download-action", label=self.key, key="sha256") if sha256sum != chk_sha256sum: self.logger.info( "md5sum of downloaded content: %s" % (self.get_namespace_data( action="download-action", label=self.key, key="md5"))) self.logger.error("sha256sum of downloaded content: %s" % chk_sha256sum) self.results = { "fail": { "sha256": sha256sum, "download": chk_sha256sum } } raise JobError("SHA256 checksum for '%s' does not match." % fname) self.results = {"success": {"sha256": sha256sum}} # certain deployments need prefixes set if self.parameters["to"] == "tftp" or self.parameters["to"] == "nbd": suffix = self.get_namespace_data(action="tftp-deploy", label="tftp", key="suffix") self.set_namespace_data( action="download-action", label="file", key=self.key, value=os.path.join(suffix, self.key, os.path.basename(fname)), ) elif self.parameters["to"] == "iso-installer": suffix = self.get_namespace_data(action="deploy-iso-installer", label="iso", key="suffix") self.set_namespace_data( action="download-action", label="file", key=self.key, value=os.path.join(suffix, self.key, os.path.basename(fname)), ) else: self.set_namespace_data(action="download-action", label="file", key=self.key, value=fname) # xnbd protocoll needs to know the location nbdroot = self.get_namespace_data(action="download-action", label="file", key="nbdroot") if "lava-xnbd" in self.parameters and nbdroot: self.parameters["lava-xnbd"]["nbdroot"] = nbdroot self.results = { "label": self.key, "size": downloaded_size, "md5sum": str( self.get_namespace_data(action="download-action", label=self.key, key="md5")), "sha256sum": str( self.get_namespace_data(action="download-action", label=self.key, key="sha256")), } return connection
def run(self, connection, max_end_time, args=None): # pylint: disable=too-many-locals,too-many-branches,too-many-statements def progress_unknown_total(downloaded_sz, last_val): """ Compute progress when the size is unknown """ condition = downloaded_sz >= last_val + 25 * 1024 * 1024 return (condition, downloaded_sz, "progress %dMB" % (int(downloaded_sz / (1024 * 1024))) if condition else "") def progress_known_total(downloaded_sz, last_val): """ Compute progress when the size is known """ percent = math.floor(downloaded_sz / float(self.size) * 100) condition = percent >= last_val + 5 return (condition, percent, "progress %3d%% (%dMB)" % (percent, int(downloaded_sz / (1024 * 1024))) if condition else "") connection = super(DownloadHandler, self).run(connection, max_end_time, args) # self.cookies = self.job.context.config.lava_cookies # FIXME: work out how to restore md5 = hashlib.md5() sha256 = hashlib.sha256() # Create a fresh directory if the old one has been removed by a previous cleanup # (when retrying inside a RetryAction) try: os.makedirs(self.path, 0o755) except OSError as exc: if exc.errno != errno.EEXIST: raise InfrastructureError("Unable to create %s: %s" % (self.path, str(exc))) # Download the file with self._decompressor_stream() as (writer, fname): if 'images' in self.parameters and self.key in self.parameters[ 'images']: remote = self.parameters['images'][self.key] else: remote = self.parameters[self.key] md5sum = remote.get('md5sum', None) sha256sum = remote.get('sha256sum', None) self.logger.info("downloading %s", remote['url']) self.logger.debug("saving as %s", fname) downloaded_size = 0 beginning = time.time() # Choose the progress bar (is the size known?) if self.size == -1: self.logger.debug("total size: unknown") last_value = -25 * 1024 * 1024 progress = progress_unknown_total else: self.logger.debug("total size: %d (%dMB)" % (self.size, int(self.size / (1024 * 1024)))) last_value = -5 progress = progress_known_total # Download the file and log the progresses for buff in self.reader(): downloaded_size += len(buff) (printing, new_value, msg) = progress(downloaded_size, last_value) if printing: last_value = new_value self.logger.debug(msg) md5.update(buff) sha256.update(buff) writer(buff) # Log the download speed ending = time.time() self.logger.info( "%dMB downloaded in %0.2fs (%0.2fMB/s)" % (downloaded_size / (1024 * 1024), round(ending - beginning, 2), round(downloaded_size / (1024 * 1024 * (ending - beginning)), 2))) # set the dynamic data into the context self.set_namespace_data(action='download-action', label=self.key, key='file', value=fname) self.set_namespace_data(action='download-action', label=self.key, key='md5', value=md5.hexdigest()) self.set_namespace_data(action='download-action', label=self.key, key='sha256', value=sha256.hexdigest()) # handle archive files if 'images' in self.parameters and self.key in self.parameters[ 'images']: archive = self.parameters['images'][self.key].get('archive', False) else: archive = self.parameters[self.key].get('archive', None) if archive: origin = fname target_fname = os.path.basename(origin).rstrip('.' + archive) target_fname_path = os.path.join(os.path.dirname(origin), target_fname) if os.path.exists(target_fname_path): os.remove(target_fname_path) if archive == 'tar': untar_file(origin, None, member=target_fname, outfile=target_fname_path) self.set_namespace_data(action='download-action', label=self.key, key='file', value=target_fname_path) self.set_namespace_data(action='download-action', label='file', key=self.key, value=target_fname) self.logger.debug("Using %s archive" % archive) if md5sum is not None: chk_md5sum = self.get_namespace_data(action='download-action', label=self.key, key='md5') if md5sum != chk_md5sum: self.logger.error("md5sum of downloaded content: %s" % chk_md5sum) self.logger.info( "sha256sum of downloaded content: %s" % (self.get_namespace_data(action='download-action', label=self.key, key='sha256'))) self.results = { 'fail': { 'md5': md5sum, 'download': chk_md5sum } } raise JobError("MD5 checksum for '%s' does not match." % fname) self.results = {'success': {'md5': md5sum}} if sha256sum is not None: chk_sha256sum = self.get_namespace_data(action='download-action', label=self.key, key='sha256') if sha256sum != chk_sha256sum: self.logger.info( "md5sum of downloaded content: %s" % (self.get_namespace_data( action='download-action', label=self.key, key='md5'))) self.logger.error("sha256sum of downloaded content: %s" % chk_sha256sum) self.results = { 'fail': { 'sha256': sha256sum, 'download': chk_sha256sum } } raise JobError("SHA256 checksum for '%s' does not match." % fname) self.results = {'success': {'sha256': sha256sum}} # certain deployments need prefixes set if self.parameters['to'] == 'tftp' or self.parameters['to'] == 'nbd': suffix = self.get_namespace_data(action='tftp-deploy', label='tftp', key='suffix') self.set_namespace_data(action='download-action', label='file', key=self.key, value=os.path.join( suffix, self.key, os.path.basename(fname))) elif self.parameters['to'] == 'iso-installer': suffix = self.get_namespace_data(action='deploy-iso-installer', label='iso', key='suffix') self.set_namespace_data(action='download-action', label='file', key=self.key, value=os.path.join( suffix, self.key, os.path.basename(fname))) else: self.set_namespace_data(action='download-action', label='file', key=self.key, value=fname) # xnbd protocoll needs to know the location nbdroot = self.get_namespace_data(action='download-action', label='file', key='nbdroot') if 'lava-xnbd' in self.parameters and nbdroot: self.parameters['lava-xnbd']['nbdroot'] = nbdroot self.results = { 'label': self.key, 'md5sum': str( self.get_namespace_data(action='download-action', label=self.key, key='md5')), 'sha256sum': str( self.get_namespace_data(action='download-action', label=self.key, key='sha256')) } return connection