def run(self, connection, args=None): connection = super(ApplyOverlayTftp, self).run(connection, args) overlay_file = None directory = None nfs_url = None if self.parameters.get('nfsrootfs', None) is not None: overlay_file = self.data['compress-overlay'].get('output') directory = self.get_common_data('file', 'nfsroot') self.logger.info("Applying overlay to NFS") elif self.parameters.get('nfs_url', None) is not None: nfs_url = self.parameters.get('nfs_url') overlay_file = self.data['compress-overlay'].get('output') self.logger.info("Applying overlay to persistent NFS") # need to mount the persistent NFS here. directory = mkdtemp(autoremove=False) try: subprocess.check_output(['mount', '-t', 'nfs', nfs_url, directory]) except subprocess.CalledProcessError as exc: raise JobError(exc) elif self.parameters.get('ramdisk', None) is not None: overlay_file = self.data['compress-overlay'].get('output') directory = self.data['extract-overlay-ramdisk']['extracted_ramdisk'] self.logger.info("Applying overlay to ramdisk") elif self.parameters.get('rootfs', None) is not None: overlay_file = self.data['compress-overlay'].get('output') directory = self.get_common_data('file', 'root') else: self.logger.debug("No overlay directory") self.logger.debug(self.parameters) untar_file(overlay_file, directory) if nfs_url: subprocess.check_output(['umount', directory]) os.rmdir(directory) # fails if the umount fails return connection
def run(self, connection, args=None): if not self.parameters.get(self.param_key, None): # idempotency return connection connection = super(ExtractRootfs, self).run(connection, args) root = self.data['download_action'][self.param_key]['file'] root_dir = self.mkdtemp() untar_file(root, root_dir) self.set_common_data('file', self.file_key, root_dir) self.logger.debug("Extracted %s to %s", self.file_key, root_dir) return connection
def run(self, connection, args=None): if not self.data['compress-overlay'].get('output'): raise RuntimeError("Unable to find the overlay") if not os.path.ismount(self.data['loop_mount']['mntdir']): raise RuntimeError("Image overlay requested to be applied but %s is not a mountpoint" % self.data['loop_mount']['mntdir']) connection = super(ApplyOverlayImage, self).run(connection, args) # use tarfile module - no SELinux support here yet untar_file(self.data['compress-overlay'].get('output'), self.data['loop_mount']['mntdir']) return connection
def run(self, connection, args=None): connection = super(ApplyOverlayTftp, self).run(connection, args) overlay_file = None directory = None nfs_url = None if self.parameters.get('nfsrootfs', None) is not None: overlay_file = self.data['compress-overlay'].get('output') directory = self.get_common_data('file', 'nfsroot') self.logger.info("Applying overlay to NFS") elif self.parameters.get('nfs_url', None) is not None: nfs_url = self.parameters.get('nfs_url') overlay_file = self.data['compress-overlay'].get('output') self.logger.info("Applying overlay to persistent NFS") # need to mount the persistent NFS here. # We can't use self.mkdtemp() here because this directory should # not be removed if umount fails. directory = mkdtemp(autoremove=False) try: subprocess.check_output(['mount', '-t', 'nfs', nfs_url, directory]) except subprocess.CalledProcessError as exc: raise JobError(exc) elif self.parameters.get('ramdisk', None) is not None: overlay_file = self.data['compress-overlay'].get('output') directory = self.data['extract-overlay-ramdisk']['extracted_ramdisk'] self.logger.info("Applying overlay to ramdisk") elif self.parameters.get('rootfs', None) is not None: overlay_file = self.data['compress-overlay'].get('output') directory = self.get_common_data('file', 'root') else: self.logger.debug("No overlay directory") self.logger.debug(self.parameters) if self.parameters.get('os', None) == "centos_installer": # centos installer ramdisk doesnt like having anything other # than the kickstart config being inserted. Instead, make the # overlay accessible through tftp. Yuck. tftp_dir = os.path.dirname(self.data['download_action']['ramdisk']['file']) shutil.copy(overlay_file, tftp_dir) suffix = self.data['tftp-deploy'].get('suffix', '') self.set_common_data('file', 'overlay', os.path.join(suffix, os.path.basename(overlay_file))) if nfs_url: subprocess.check_output(['umount', directory]) os.rmdir(directory) # fails if the umount fails if overlay_file: untar_file(overlay_file, directory) if nfs_url: subprocess.check_output(['umount', directory]) os.rmdir(directory) # fails if the umount fails return connection
def run(self, connection, max_end_time, args=None): if not self.parameters.get(self.param_key, None): # idempotency return connection connection = super(ExtractRootfs, self).run(connection, max_end_time, args) root = self.get_namespace_data(action='download-action', label=self.param_key, key='file') root_dir = self.mkdtemp() untar_file(root, root_dir) self.set_namespace_data(action='extract-rootfs', label='file', key=self.file_key, value=root_dir) self.logger.debug("Extracted %s to %s", self.file_key, root_dir) return connection
def copy_overlay_to_sparse_fs(image, overlay): """copy_overlay_to_sparse_fs """ mnt_dir = mkdtemp() ext4_img = image + '.ext4' subprocess.check_output(['/usr/bin/simg2img', image, ext4_img], stderr=subprocess.STDOUT) subprocess.check_output(['/bin/mount', '-o', 'loop', ext4_img, mnt_dir], stderr=subprocess.STDOUT) if os.path.exists(overlay[:-3]): os.unlink(overlay[:-3]) decompressed_overlay = decompress_file(overlay, 'gz') untar_file(decompressed_overlay, mnt_dir) subprocess.check_output(['/bin/umount', mnt_dir], stderr=subprocess.STDOUT) subprocess.check_output(['/usr/bin/img2simg', ext4_img, image], stderr=subprocess.STDOUT) os.remove(ext4_img)
def run(self, connection, args=None): if not self.parameters.get('modules', None): # idempotency return connection connection = super(ExtractModules, self).run(connection, args) if not self.parameters.get('ramdisk', None): if not self.parameters.get('nfsrootfs', None): raise JobError("Unable to identify a location for the unpacked modules") else: root = self.get_common_data('file', 'nfsroot') else: root = self.data['extract-overlay-ramdisk']['extracted_ramdisk'] modules = self.data['download_action']['modules']['file'] self.logger.info("extracting modules file %s to %s", modules, root) untar_file(modules, root) try: os.unlink(modules) except OSError as exc: raise RuntimeError("Unable to remove tarball: '%s' - %s" % (modules, exc)) return connection
def run(self, connection, args=None): if not self.parameters.get(self.param_key, None): # idempotency return connection connection = super(ExtractNfsRootfs, self).run(connection, args) root = self.data['download_action'][self.param_key]['file'] root_dir = mkdtemp(basedir=DISPATCHER_DOWNLOAD_DIR) untar_file(root, root_dir) if 'prefix' in self.parameters[self.param_key]: prefix = self.parameters[self.param_key]['prefix'] self.logger.warning( "Adding '%s' prefix, any other content will not be visible." % prefix) self.rootdir = os.path.join(root_dir, prefix) else: self.rootdir = root_dir # sets the directory into which the overlay is unpacked and # which is used in the substitutions into the bootloader command string. self.set_common_data('file', self.file_key, self.rootdir) self.logger.debug("Extracted %s to %s", self.file_key, self.rootdir) return connection
def run(self, connection, max_end_time, args=None): if not self.parameters.get('modules', None): # idempotency return connection connection = super(ExtractModules, self).run(connection, max_end_time, args) modules = self.get_namespace_data(action='download-action', label='modules', key='file') if not self.parameters.get('ramdisk', None): if not self.parameters.get('nfsrootfs', None): raise JobError( "Unable to identify a location for the unpacked modules") # if both NFS and ramdisk are specified, apply modules to both # as the kernel may need some modules to raise the network and # will need other modules to support operations within the NFS if self.parameters.get('nfsrootfs', None): if not self.parameters['nfsrootfs'].get('install_modules', True): self.logger.info("Skipping applying overlay to NFS") return connection root = self.get_namespace_data(action='extract-rootfs', label='file', key='nfsroot') self.logger.info("extracting modules file %s to %s", modules, root) untar_file(modules, root) if self.parameters.get('ramdisk', None): if not self.parameters['ramdisk'].get('install_modules', True): self.logger.info("Not adding modules to the ramdisk.") return root = self.get_namespace_data(action='extract-overlay-ramdisk', label='extracted_ramdisk', key='directory') self.logger.info("extracting modules file %s to %s", modules, root) untar_file(modules, root) try: os.unlink(modules) except OSError as exc: raise InfrastructureError("Unable to remove tarball: '%s' - %s" % (modules, exc)) return connection
def run(self, connection, args=None): if not self.parameters.get('modules', None): # idempotency return connection connection = super(ExtractModules, self).run(connection, args) modules = self.data['download_action']['modules']['file'] if not self.parameters.get('ramdisk', None): if not self.parameters.get('nfsrootfs', None): raise JobError("Unable to identify a location for the unpacked modules") # if both NFS and ramdisk are specified, apply modules to both # as the kernel may need some modules to raise the network and # will need other modules to support operations within the NFS if self.parameters.get('nfsrootfs', None): root = self.get_common_data('file', 'nfsroot') self.logger.info("extracting modules file %s to %s", modules, root) untar_file(modules, root) if self.parameters.get('ramdisk', None): root = self.data['extract-overlay-ramdisk']['extracted_ramdisk'] self.logger.info("extracting modules file %s to %s", modules, root) untar_file(modules, root) try: os.unlink(modules) except OSError as exc: raise RuntimeError("Unable to remove tarball: '%s' - %s" % (modules, exc)) return connection
def run(self, connection, max_end_time, args=None): # pylint: disable=too-many-locals,too-many-branches,too-many-statements def progress_unknown_total(downloaded_sz, last_val): """ Compute progress when the size is unknown """ condition = downloaded_sz >= last_val + 25 * 1024 * 1024 return (condition, downloaded_sz, "progress %dMB" % (int(downloaded_sz / (1024 * 1024))) if condition else "") def progress_known_total(downloaded_sz, last_val): """ Compute progress when the size is known """ percent = math.floor(downloaded_sz / float(self.size) * 100) condition = percent >= last_val + 5 return (condition, percent, "progress %3d%% (%dMB)" % (percent, int(downloaded_sz / (1024 * 1024))) if condition else "") connection = super(DownloadHandler, self).run(connection, max_end_time, args) # self.cookies = self.job.context.config.lava_cookies # FIXME: work out how to restore md5 = hashlib.md5() sha256 = hashlib.sha256() # Create a fresh directory if the old one has been removed by a previous cleanup # (when retrying inside a RetryAction) try: os.makedirs(self.path, 0o755) except OSError as exc: if exc.errno != errno.EEXIST: raise InfrastructureError("Unable to create %s: %s" % (self.path, str(exc))) # Download the file with self._decompressor_stream() as (writer, fname): if 'images' in self.parameters and self.key in self.parameters[ 'images']: remote = self.parameters['images'][self.key] else: remote = self.parameters[self.key] md5sum = remote.get('md5sum', None) sha256sum = remote.get('sha256sum', None) self.logger.info("downloading %s", remote['url']) self.logger.debug("saving as %s", fname) downloaded_size = 0 beginning = time.time() # Choose the progress bar (is the size known?) if self.size == -1: self.logger.debug("total size: unknown") last_value = -25 * 1024 * 1024 progress = progress_unknown_total else: self.logger.debug("total size: %d (%dMB)" % (self.size, int(self.size / (1024 * 1024)))) last_value = -5 progress = progress_known_total # Download the file and log the progresses for buff in self.reader(): downloaded_size += len(buff) (printing, new_value, msg) = progress(downloaded_size, last_value) if printing: last_value = new_value self.logger.debug(msg) md5.update(buff) sha256.update(buff) writer(buff) # Log the download speed ending = time.time() self.logger.info( "%dMB downloaded in %0.2fs (%0.2fMB/s)" % (downloaded_size / (1024 * 1024), round(ending - beginning, 2), round(downloaded_size / (1024 * 1024 * (ending - beginning)), 2))) # set the dynamic data into the context self.set_namespace_data(action='download-action', label=self.key, key='file', value=fname) self.set_namespace_data(action='download-action', label=self.key, key='md5', value=md5.hexdigest()) self.set_namespace_data(action='download-action', label=self.key, key='sha256', value=sha256.hexdigest()) # handle archive files if 'images' in self.parameters and self.key in self.parameters[ 'images']: archive = self.parameters['images'][self.key].get('archive', False) else: archive = self.parameters[self.key].get('archive', None) if archive: origin = fname target_fname = os.path.basename(origin).rstrip('.' + archive) target_fname_path = os.path.join(os.path.dirname(origin), target_fname) if os.path.exists(target_fname_path): os.remove(target_fname_path) if archive == 'tar': untar_file(origin, None, member=target_fname, outfile=target_fname_path) self.set_namespace_data(action='download-action', label=self.key, key='file', value=target_fname_path) self.set_namespace_data(action='download-action', label='file', key=self.key, value=target_fname) self.logger.debug("Using %s archive" % archive) if md5sum is not None: chk_md5sum = self.get_namespace_data(action='download-action', label=self.key, key='md5') if md5sum != chk_md5sum: self.logger.error("md5sum of downloaded content: %s" % chk_md5sum) self.logger.info( "sha256sum of downloaded content: %s" % (self.get_namespace_data(action='download-action', label=self.key, key='sha256'))) self.results = { 'fail': { 'md5': md5sum, 'download': chk_md5sum } } raise JobError("MD5 checksum for '%s' does not match." % fname) self.results = {'success': {'md5': md5sum}} if sha256sum is not None: chk_sha256sum = self.get_namespace_data(action='download-action', label=self.key, key='sha256') if sha256sum != chk_sha256sum: self.logger.info( "md5sum of downloaded content: %s" % (self.get_namespace_data( action='download-action', label=self.key, key='md5'))) self.logger.error("sha256sum of downloaded content: %s" % chk_sha256sum) self.results = { 'fail': { 'sha256': sha256sum, 'download': chk_sha256sum } } raise JobError("SHA256 checksum for '%s' does not match." % fname) self.results = {'success': {'sha256': sha256sum}} # certain deployments need prefixes set if self.parameters['to'] == 'tftp': suffix = self.get_namespace_data(action='tftp-deploy', label='tftp', key='suffix') self.set_namespace_data(action='download-action', label='file', key=self.key, value=os.path.join( suffix, self.key, os.path.basename(fname))) elif self.parameters['to'] == 'iso-installer': suffix = self.get_namespace_data(action='deploy-iso-installer', label='iso', key='suffix') self.set_namespace_data(action='download-action', label='file', key=self.key, value=os.path.join( suffix, self.key, os.path.basename(fname))) else: self.set_namespace_data(action='download-action', label='file', key=self.key, value=fname) self.results = { 'label': self.key, 'md5sum': str( self.get_namespace_data(action='download-action', label=self.key, key='md5')), 'sha256sum': str( self.get_namespace_data(action='download-action', label=self.key, key='sha256')) } return connection
def run(self, connection, args=None): # pylint: disable=too-many-locals,too-many-branches,too-many-statements def progress_unknown_total(downloaded_size, last_value): """ Compute progress when the size is unknown """ condition = downloaded_size >= last_value + 25 * 1024 * 1024 return (condition, downloaded_size, "progress %dMB" % (int(downloaded_size / (1024 * 1024))) if condition else "") def progress_known_total(downloaded_size, last_value): """ Compute progress when the size is known """ percent = math.floor(downloaded_size / float(self.size) * 100) condition = percent >= last_value + 5 return (condition, percent, "progress %3d%% (%dMB)" % (percent, int(downloaded_size / (1024 * 1024))) if condition else "") connection = super(DownloadHandler, self).run(connection, args) # self.cookies = self.job.context.config.lava_cookies # FIXME: work out how to restore md5 = hashlib.md5() sha256 = hashlib.sha256() with self._decompressor_stream() as (writer, fname): if 'images' in self.parameters and self.key in self.parameters['images']: remote = self.parameters['images'][self.key] else: remote = self.parameters[self.key] md5sum = remote.get('md5sum', None) sha256sum = remote.get('sha256sum', None) self.logger.info("downloading %s as %s" % (remote['url'], fname)) downloaded_size = 0 beginning = time.time() # Choose the progress bar (is the size known?) if self.size == -1: self.logger.debug("total size: unknown") last_value = -25 * 1024 * 1024 progress = progress_unknown_total else: self.logger.debug("total size: %d (%dMB)" % (self.size, int(self.size / (1024 * 1024)))) last_value = -5 progress = progress_known_total # Download the file and log the progresses for buff in self.reader(): downloaded_size += len(buff) (printing, new_value, msg) = progress(downloaded_size, last_value) if printing: last_value = new_value self.logger.debug(msg) md5.update(buff) sha256.update(buff) writer(buff) # Log the download speed ending = time.time() self.logger.info("%dMB downloaded in %0.2fs (%0.2fMB/s)" % (downloaded_size / (1024 * 1024), round(ending - beginning, 2), round(downloaded_size / (1024 * 1024 * (ending - beginning)), 2))) # set the dynamic data into the context self.data['download_action'][self.key]['file'] = fname self.data['download_action'][self.key]['md5'] = md5.hexdigest() self.data['download_action'][self.key]['sha256'] = sha256.hexdigest() # handle archive files archive = False if 'images' in self.parameters and self.key in self.parameters['images']: archive = self.parameters['images'][self.key].get('archive', False) else: archive = self.parameters[self.key].get('archive', None) if archive: origin = fname target_fname = os.path.basename(origin).rstrip('.' + archive) target_fname_path = os.path.join(os.path.dirname(origin), target_fname) if os.path.exists(target_fname_path): os.remove(target_fname_path) if archive == 'tar': untar_file(origin, None, member=target_fname, outfile=target_fname_path) self.data['download_action'][self.key]['file'] = target_fname_path self.set_common_data('file', self.key, target_fname) self.logger.debug("Using %s archive" % archive) if md5sum is not None: if md5sum != self.data['download_action'][self.key]['md5']: self.logger.error("md5sum of downloaded content: %s" % ( self.data['download_action'][self.key]['md5'])) self.logger.info("sha256sum of downloaded content: %s" % ( self.data['download_action'][self.key]['sha256'])) self.results = {'fail': { 'md5': md5sum, 'download': self.data['download_action'][self.key]['md5']}} raise JobError("MD5 checksum for '%s' does not match." % fname) self.results = {'success': {'md5': md5sum}} if sha256sum is not None: if sha256sum != self.data['download_action'][self.key]['sha256']: self.logger.info("md5sum of downloaded content: %s" % ( self.data['download_action'][self.key]['md5'])) self.logger.error("sha256sum of downloaded content: %s" % ( self.data['download_action'][self.key]['sha256'])) self.results = {'fail': { 'sha256': sha256sum, 'download': self.data['download_action'][self.key]['sha256']}} raise JobError("SHA256 checksum for '%s' does not match." % fname) self.results = {'success': {'sha256': sha256sum}} # certain deployments need prefixes set if self.parameters['to'] == 'tftp': suffix = self.data['tftp-deploy'].get('suffix', '') self.set_common_data('file', self.key, os.path.join(suffix, os.path.basename(fname))) elif self.parameters['to'] == 'iso-installer': suffix = self.data['deploy-iso-installer'].get('suffix', '') self.set_common_data('file', self.key, os.path.join(suffix, os.path.basename(fname))) else: self.set_common_data('file', self.key, fname) self.logger.info("md5sum of downloaded content: %s" % (self.data['download_action'][self.key]['md5'])) self.logger.info("sha256sum of downloaded content: %s" % (self.data['download_action'][self.key]['sha256'])) return connection
def run(self, connection, args=None): # pylint: disable=too-many-locals,too-many-branches,too-many-statements def progress_unknown_total(downloaded_size, last_value): """ Compute progress when the size is unknown """ condition = downloaded_size >= last_value + 25 * 1024 * 1024 return (condition, downloaded_size, "progress %dMB" % (int(downloaded_size / (1024 * 1024))) if condition else "") def progress_known_total(downloaded_size, last_value): """ Compute progress when the size is known """ percent = math.floor(downloaded_size / float(self.size) * 100) condition = percent >= last_value + 5 return (condition, percent, "progress %3d%% (%dMB)" % (percent, int(downloaded_size / (1024 * 1024))) if condition else "") connection = super(DownloadHandler, self).run(connection, args) # self.cookies = self.job.context.config.lava_cookies # FIXME: work out how to restore md5 = hashlib.md5() sha256 = hashlib.sha256() with self._decompressor_stream() as (writer, fname): if 'images' in self.parameters and self.key in self.parameters[ 'images']: remote = self.parameters['images'][self.key] else: remote = self.parameters[self.key] md5sum = remote.get('md5sum', None) sha256sum = remote.get('sha256sum', None) self.logger.info("downloading %s as %s" % (remote['url'], fname)) downloaded_size = 0 beginning = time.time() # Choose the progress bar (is the size known?) if self.size == -1: self.logger.debug("total size: unknown") last_value = -25 * 1024 * 1024 progress = progress_unknown_total else: self.logger.debug("total size: %d (%dMB)" % (self.size, int(self.size / (1024 * 1024)))) last_value = -5 progress = progress_known_total # Download the file and log the progresses for buff in self.reader(): downloaded_size += len(buff) (printing, new_value, msg) = progress(downloaded_size, last_value) if printing: last_value = new_value self.logger.debug(msg) md5.update(buff) sha256.update(buff) writer(buff) # Log the download speed ending = time.time() self.logger.info( "%dMB downloaded in %0.2fs (%0.2fMB/s)" % (downloaded_size / (1024 * 1024), round(ending - beginning, 2), round(downloaded_size / (1024 * 1024 * (ending - beginning)), 2))) # set the dynamic data into the context self.data['download_action'][self.key]['file'] = fname self.data['download_action'][self.key]['md5'] = md5.hexdigest() self.data['download_action'][self.key]['sha256'] = sha256.hexdigest() # handle archive files archive = False if 'images' in self.parameters and self.key in self.parameters[ 'images']: archive = self.parameters['images'][self.key].get('archive', False) else: archive = self.parameters[self.key].get('archive', None) if archive: origin = fname target_fname = os.path.basename(origin).rstrip('.' + archive) target_fname_path = os.path.join(os.path.dirname(origin), target_fname) if os.path.exists(target_fname_path): os.remove(target_fname_path) if archive == 'tar': untar_file(origin, None, member=target_fname, outfile=target_fname_path) self.data['download_action'][ self.key]['file'] = target_fname_path self.set_common_data('file', self.key, target_fname) self.logger.debug("Using %s archive" % archive) if md5sum is not None: if md5sum != self.data['download_action'][self.key]['md5']: self.logger.error( "md5sum of downloaded content: %s" % (self.data['download_action'][self.key]['md5'])) self.logger.info( "sha256sum of downloaded content: %s" % (self.data['download_action'][self.key]['sha256'])) self.results = { 'fail': { 'md5': md5sum, 'download': self.data['download_action'][self.key]['md5'] } } raise JobError("MD5 checksum for '%s' does not match." % fname) self.results = {'success': {'md5': md5sum}} if sha256sum is not None: if sha256sum != self.data['download_action'][self.key]['sha256']: self.logger.info( "md5sum of downloaded content: %s" % (self.data['download_action'][self.key]['md5'])) self.logger.error( "sha256sum of downloaded content: %s" % (self.data['download_action'][self.key]['sha256'])) self.results = { 'fail': { 'sha256': sha256sum, 'download': self.data['download_action'][self.key]['sha256'] } } raise JobError("SHA256 checksum for '%s' does not match." % fname) self.results = {'success': {'sha256': sha256sum}} # certain deployments need prefixes set if self.parameters['to'] == 'tftp': suffix = self.data['tftp-deploy'].get('suffix', '') self.set_common_data('file', self.key, os.path.join(suffix, os.path.basename(fname))) elif self.parameters['to'] == 'iso-installer': suffix = self.data['deploy-iso-installer'].get('suffix', '') self.set_common_data('file', self.key, os.path.join(suffix, os.path.basename(fname))) else: self.set_common_data('file', self.key, fname) self.logger.info("md5sum of downloaded content: %s" % (self.data['download_action'][self.key]['md5'])) self.logger.info("sha256sum of downloaded content: %s" % (self.data['download_action'][self.key]['sha256'])) return connection
def run(self, connection, max_end_time, args=None): # pylint: disable=too-many-branches connection = super(ApplyOverlayTftp, self).run(connection, max_end_time, args) directory = None nfs_address = None overlay_file = None namespace = self.parameters.get('namespace', None) if self.parameters.get('nfsrootfs', None) is not None: if not self.parameters['nfsrootfs'].get('install_overlay', True): self.logger.info("[%s] Skipping applying overlay to NFS", namespace) return connection overlay_file = self.get_namespace_data(action='compress-overlay', label='output', key='file') directory = self.get_namespace_data(action='extract-rootfs', label='file', key='nfsroot') if overlay_file: self.logger.info("[%s] Applying overlay to NFS", namespace) elif self.parameters.get('images', {}).get('nfsrootfs', None) is not None: if not self.parameters['images']['nfsrootfs'].get( 'install_overlay', True): self.logger.info("[%s] Skipping applying overlay to NFS", namespace) return connection overlay_file = self.get_namespace_data(action='compress-overlay', label='output', key='file') directory = self.get_namespace_data(action='extract-rootfs', label='file', key='nfsroot') if overlay_file: self.logger.info("[%s] Applying overlay to NFS", namespace) elif self.parameters.get('persistent_nfs', None) is not None: if not self.parameters['persistent_nfs'].get( 'install_overlay', True): self.logger.info( "[%s] Skipping applying overlay to persistent NFS", namespace) return connection overlay_file = self.get_namespace_data(action='compress-overlay', label='output', key='file') nfs_address = self.parameters['persistent_nfs'].get('address') if overlay_file: self.logger.info( "[%s] Applying overlay to persistent NFS address %s", namespace, nfs_address) # need to mount the persistent NFS here. # We can't use self.mkdtemp() here because this directory should # not be removed if umount fails. directory = mkdtemp(autoremove=False) try: subprocess.check_output( ['mount', '-t', 'nfs', nfs_address, directory]) except subprocess.CalledProcessError as exc: raise JobError(exc) elif self.parameters.get('ramdisk', None) is not None: if not self.parameters['ramdisk'].get('install_overlay', True): self.logger.info("[%s] Skipping applying overlay to ramdisk", namespace) return connection overlay_file = self.get_namespace_data(action='compress-overlay', label='output', key='file') directory = self.get_namespace_data( action='extract-overlay-ramdisk', label='extracted_ramdisk', key='directory') if overlay_file: self.logger.info("[%s] Applying overlay %s to ramdisk", namespace, overlay_file) elif self.parameters.get('rootfs', None) is not None: overlay_file = self.get_namespace_data(action='compress-overlay', label='output', key='file') directory = self.get_namespace_data(action='apply-overlay', label='file', key='root') else: self.logger.debug("[%s] No overlay directory", namespace) self.logger.debug(self.parameters) if self.parameters.get('os', None) == "centos_installer": # centos installer ramdisk doesnt like having anything other # than the kickstart config being inserted. Instead, make the # overlay accessible through tftp. Yuck. tftp_dir = os.path.dirname( self.get_namespace_data(action='download-action', label='ramdisk', key='file')) shutil.copy(overlay_file, tftp_dir) suffix = self.get_namespace_data(action='tftp-deploy', label='tftp', key='suffix') if not suffix: suffix = '' self.set_namespace_data(action=self.name, label='file', key='overlay', value=os.path.join( suffix, "ramdisk", os.path.basename(overlay_file))) if overlay_file: self.logger.debug("[%s] Applying overlay %s to directory %s", namespace, overlay_file, directory) untar_file(overlay_file, directory) if nfs_address: subprocess.check_output(['umount', directory]) os.rmdir(directory) # fails if the umount fails return connection