def image_to_raw(image_href, path, path_tmp): with fileutils.remove_path_on_error(path_tmp): fmt = get_source_format(image_href, path_tmp) if fmt != "raw": staged = "%s.converted" % path utils.is_memory_insufficent(raise_if_fail=True) LOG.debug("%(image)s was %(format)s, converting to raw", { 'image': image_href, 'format': fmt }) with fileutils.remove_path_on_error(staged): disk_utils.convert_image(path_tmp, staged, 'raw') os.unlink(path_tmp) data = disk_utils.qemu_img_info(staged) if data.file_format != "raw": raise exception.ImageConvertFailed( image_id=image_href, reason=_("Converted to raw, but format is " "now %s") % data.file_format) os.rename(staged, path) else: os.rename(path_tmp, path)
def fetch_to_raw(context, image_href, path, max_size=0): path_tmp = "%s.part" % path fetch(context, image_href, path_tmp, max_size=max_size) with fileutils.remove_path_on_error(path_tmp): data = qemu_img_info(path_tmp) fmt = data.file_format if fmt is None: raise exception.ImageUnacceptable( reason=_("'qemu-img info' parsing failed."), image_id=image_href) backing_file = data.backing_file if backing_file is not None: raise exception.ImageUnacceptable(image_id=image_href, reason=(_("fmt=%(fmt)s backed by: %(backing_file)s") % {'fmt': fmt, 'backing_file': backing_file})) # We can't generally shrink incoming images, so disallow # images > size of the flavor we're booting. Checking here avoids # an immediate DoS where we convert large qcow images to raw # (which may compress well but not be sparse). # TODO(p-draigbrady): loop through all flavor sizes, so that # we might continue here and not discard the download. # If we did that we'd have to do the higher level size checks # irrespective of whether the base image was prepared or not. disk_size = data.virtual_size if max_size and max_size < disk_size: LOG.error(_LE('%(base)s virtual size %(disk_size)s ' 'larger than flavor root disk size %(size)s'), {'base': path, 'disk_size': disk_size, 'size': max_size}) raise exception.FlavorDiskSmallerThanImage( flavor_size=max_size, image_size=disk_size) if fmt != "raw" and CONF.force_raw_images: staged = "%s.converted" % path LOG.debug("%s was %s, converting to raw", image_href, fmt) with fileutils.remove_path_on_error(staged): try: convert_image(path_tmp, staged, fmt, 'raw') except exception.ImageUnacceptable as exp: # re-raise to include image_href raise exception.ImageUnacceptable(image_id=image_href, reason=_("Unable to convert image to raw: %(exp)s") % {'exp': exp}) os.unlink(path_tmp) data = qemu_img_info(staged) if data.file_format != "raw": raise exception.ImageUnacceptable(image_id=image_href, reason=_("Converted to raw, but format is now %s") % data.file_format) os.rename(staged, path) else: os.rename(path_tmp, path)
def create_image(self, prepare_template, base, size, *args, **kwargs): filename = os.path.basename(base) # Copy main file of ploop disk, restore DiskDescriptor.xml for it # and resize if necessary @utils.synchronized(filename, external=True, lock_path=self.lock_path) def _copy_ploop_image(base, target, size): # Ploop disk is a directory with data file(root.hds) and # DiskDescriptor.xml, so create this dir fileutils.ensure_tree(target) image_path = os.path.join(target, "root.hds") libvirt_utils.copy_image(base, image_path) utils.execute('ploop', 'restore-descriptor', '-f', self.pcs_format, target, image_path) if size: self.resize_image(size) # Generating means that we create empty ploop disk generating = 'image_id' not in kwargs remove_func = functools.partial(fileutils.delete_if_exists, remove=shutil.rmtree) if generating: if os.path.exists(self.path): return with fileutils.remove_path_on_error(self.path, remove=remove_func): prepare_template(target=self.path, *args, **kwargs) else: # Create ploop disk from glance image if not os.path.exists(base): prepare_template(target=base, *args, **kwargs) else: # Disk already exists in cache, just update time libvirt_utils.update_mtime(base) self.verify_base_size(base, size) if os.path.exists(self.path): return # Get format for ploop disk if CONF.force_raw_images: self.pcs_format = "raw" else: image_meta = IMAGE_API.get(kwargs["context"], kwargs["image_id"]) format = image_meta.get("disk_format") if format == "ploop": self.pcs_format = "expanded" elif format == "raw": self.pcs_format = "raw" else: reason = _("Ploop image backend doesn't support images in" " %s format. You should either set" " force_raw_images=True in config or upload an" " image in ploop or raw format.") % format raise exception.ImageUnacceptable( image_id=kwargs["image_id"], reason=reason) with fileutils.remove_path_on_error(self.path, remove=remove_func): _copy_ploop_image(base, self.path, size)
def image_to_raw(image_href, path, path_tmp): with fileutils.remove_path_on_error(path_tmp): data = disk_utils.qemu_img_info(path_tmp) fmt = data.file_format if fmt is None: raise exception.ImageUnacceptable( reason=_("'qemu-img info' parsing failed."), image_id=image_href) backing_file = data.backing_file if backing_file is not None: raise exception.ImageUnacceptable( image_id=image_href, reason=_("fmt=%(fmt)s backed by: %(backing_file)s") % {'fmt': fmt, 'backing_file': backing_file}) if fmt != "raw": staged = "%s.converted" % path LOG.debug("%(image)s was %(format)s, converting to raw" % {'image': image_href, 'format': fmt}) with fileutils.remove_path_on_error(staged): disk_utils.convert_image(path_tmp, staged, 'raw') os.unlink(path_tmp) data = disk_utils.qemu_img_info(staged) if data.file_format != "raw": raise exception.ImageConvertFailed( image_id=image_href, reason=_("Converted to raw, but format is " "now %s") % data.file_format) os.rename(staged, path) else: os.rename(path_tmp, path)
def create_image(self, prepare_template, base, size, *args, **kwargs): filename = os.path.basename(base) # Copy main file of ploop disk, restore DiskDescriptor.xml for it # and resize if necessary @utils.synchronized(filename, external=True, lock_path=self.lock_path) def _copy_ploop_image(base, target, size): # Ploop disk is a directory with data file(root.hds) and # DiskDescriptor.xml, so create this dir fileutils.ensure_tree(target) image_path = os.path.join(target, "root.hds") libvirt_utils.copy_image(base, image_path) utils.execute('ploop', 'restore-descriptor', '-f', self.pcs_format, target, image_path) if size: self.resize_image(size) # Generating means that we create empty ploop disk generating = 'image_id' not in kwargs remove_func = functools.partial(fileutils.delete_if_exists, remove=shutil.rmtree) if generating: if os.path.exists(self.path): return with fileutils.remove_path_on_error(self.path, remove=remove_func): prepare_template(target=self.path, *args, **kwargs) else: # Create ploop disk from glance image if not os.path.exists(base): prepare_template(target=base, *args, **kwargs) else: # Disk already exists in cache, just update time nova.privsep.path.utime(base) self.verify_base_size(base, size) if os.path.exists(self.path): return # Get format for ploop disk if CONF.force_raw_images: self.pcs_format = "raw" else: image_meta = IMAGE_API.get(kwargs["context"], kwargs["image_id"]) format = image_meta.get("disk_format") if format == "ploop": self.pcs_format = "expanded" elif format == "raw": self.pcs_format = "raw" else: reason = _("Ploop image backend doesn't support images in" " %s format. You should either set" " force_raw_images=True in config or upload an" " image in ploop or raw format.") % format raise exception.ImageUnacceptable( image_id=kwargs["image_id"], reason=reason) with fileutils.remove_path_on_error(self.path, remove=remove_func): _copy_ploop_image(base, self.path, size)
def image_to_raw(image_href, path, path_tmp): with fileutils.remove_path_on_error(path_tmp): data = disk_utils.qemu_img_info(path_tmp) fmt = data.file_format if fmt is None: raise exception.ImageUnacceptable( reason=_("'qemu-img info' parsing failed."), image_id=image_href) backing_file = data.backing_file if backing_file is not None: raise exception.ImageUnacceptable( image_id=image_href, reason=_("fmt=%(fmt)s backed by: %(backing_file)s") % {'fmt': fmt, 'backing_file': backing_file}) if fmt != "raw": staged = "%s.converted" % path LOG.debug("%(image)s was %(format)s, converting to raw", {'image': image_href, 'format': fmt}) with fileutils.remove_path_on_error(staged): disk_utils.convert_image(path_tmp, staged, 'raw') os.unlink(path_tmp) data = disk_utils.qemu_img_info(staged) if data.file_format != "raw": raise exception.ImageConvertFailed( image_id=image_href, reason=_("Converted to raw, but format is " "now %s") % data.file_format) os.rename(staged, path) else: os.rename(path_tmp, path)
def fetch_to_raw(context, image_href, path): path_tmp = "%s.part" % path fetch(context, image_href, path_tmp) with fileutils.remove_path_on_error(path_tmp): data = qemu_img_info(path_tmp) fmt = data.file_format if fmt is None: raise exception.ImageUnacceptable( reason=_("'qemu-img info' parsing failed."), image_id=image_href) backing_file = data.backing_file if backing_file is not None: raise exception.ImageUnacceptable( image_id=image_href, reason=(_("fmt=%(fmt)s backed by: %(backing_file)s") % { 'fmt': fmt, 'backing_file': backing_file })) if CONF.force_raw_images: force_convert = (fmt != "raw") else: # WRS: we have seen problems with backing file formats # other than qcow2/raw, so we unconditionally # force those formats to raw. force_convert = (fmt != 'qcow2') and (fmt != "raw") if force_convert: LOG.info("Ignore force_raw_images:convert %(fmt)s to raw", {'fmt': fmt}) if force_convert: staged = "%s.converted" % path LOG.debug("%s was %s, converting to raw", image_href, fmt) with fileutils.remove_path_on_error(staged): try: convert_image(path_tmp, staged, fmt, 'raw') except exception.ImageUnacceptable as exp: # re-raise to include image_href raise exception.ImageUnacceptable( image_id=image_href, reason=_("Unable to convert image to raw: %(exp)s") % {'exp': exp}) os.unlink(path_tmp) data = qemu_img_info(staged) if data.file_format != "raw": raise exception.ImageUnacceptable( image_id=image_href, reason=_("Converted to raw, but format is now %s") % data.file_format) os.rename(staged, path) else: os.rename(path_tmp, path)
def test_make_drive_unknown_format(self): self.flags(config_drive_format='vfat') try: with zvmconfigdrive.ZVMConfigDriveBuilder( instance_md=self.inst_md) as c: self.assertRaises(exception.ConfigDriveUnknownFormat, c.make_drive, self._file_name) finally: fileutils.remove_path_on_error(self._file_path)
def create_image(self, prepare_template, base, size, *args, **kwargs): filename = self._get_lock_name(base) @utils.synchronized(filename, external=True, lock_path=self.lock_path) def copy_qcow2_image(base, target, size): # TODO(pbrady): Consider copying the cow image here # with preallocation=metadata set for performance reasons. # This would be keyed on a 'preallocate_images' setting. #libvirt_utils.create_cow_image(base, target) disk_format = kwargs.get('disk_format', '') if disk_format == 'iso': libvirt_utils.create_image('qcow2', target, size) else: libvirt_utils.create_cow_image(base, target) if size: image = imgmodel.LocalFileImage(target, imgmodel.FORMAT_QCOW2) disk.extend(image, size) # Download the unmodified base image unless we already have a copy. if not os.path.exists(base): prepare_template(target=base, *args, **kwargs) # NOTE(ankit): Update the mtime of the base file so the image # cache manager knows it is in use. libvirt_utils.update_mtime(base) self.verify_base_size(base, size) legacy_backing_size = None legacy_base = base # Determine whether an existing qcow2 disk uses a legacy backing by # actually looking at the image itself and parsing the output of the # backing file it expects to be using. if os.path.exists(self.path): backing_path = libvirt_utils.get_disk_backing_file(self.path) if backing_path is not None: backing_file = os.path.basename(backing_path) backing_parts = backing_file.rpartition('_') if backing_file != backing_parts[-1] and \ backing_parts[-1].isdigit(): legacy_backing_size = int(backing_parts[-1]) legacy_base += '_%d' % legacy_backing_size legacy_backing_size *= units.Gi # Create the legacy backing file if necessary. if legacy_backing_size: if not os.path.exists(legacy_base): with fileutils.remove_path_on_error(legacy_base): libvirt_utils.copy_image(base, legacy_base) image = imgmodel.LocalFileImage(legacy_base, imgmodel.FORMAT_QCOW2) disk.extend(image, legacy_backing_size) if not os.path.exists(self.path): with fileutils.remove_path_on_error(self.path): copy_qcow2_image(base, self.path, size)
def test_create_configdrive_tgz(self): self._file_path = CONF.tempdir fileutils.ensure_tree(self._file_path) try: with zvmconfigdrive.ZVMConfigDriveBuilder( instance_md=self.inst_md) as c: c.make_drive(self._file_name) self.assertTrue(os.path.exists(self._file_name)) finally: fileutils.remove_path_on_error(self._file_path)
def create_image(self, prepare_template, base, size, *args, **kwargs): filename = self._get_lock_name(base) @utils.synchronized(filename, external=True, lock_path=self.lock_path) def copy_qcow2_image(base, target, size): # TODO(pbrady): Consider copying the cow image here # with preallocation=metadata set for performance reasons. # This would be keyed on a 'preallocate_images' setting. libvirt_utils.create_cow_image(base, target) if size: image = imgmodel.LocalFileImage(target, imgmodel.FORMAT_QCOW2) disk.extend(image, size) # Download the unmodified base image unless we already have a copy. if not os.path.exists(base): prepare_template(target=base, max_size=size, *args, **kwargs) # NOTE(ankit): Update the mtime of the base file so the image # cache manager knows it is in use. libvirt_utils.update_mtime(base) self.verify_base_size(base, size) legacy_backing_size = None legacy_base = base # Determine whether an existing qcow2 disk uses a legacy backing by # actually looking at the image itself and parsing the output of the # backing file it expects to be using. if os.path.exists(self.path): backing_path = libvirt_utils.get_disk_backing_file(self.path) if backing_path is not None: backing_file = os.path.basename(backing_path) backing_parts = backing_file.rpartition('_') if backing_file != backing_parts[-1] and \ backing_parts[-1].isdigit(): legacy_backing_size = int(backing_parts[-1]) legacy_base += '_%d' % legacy_backing_size legacy_backing_size *= units.Gi # Create the legacy backing file if necessary. if legacy_backing_size: if not os.path.exists(legacy_base): with fileutils.remove_path_on_error(legacy_base): libvirt_utils.copy_image(base, legacy_base) image = imgmodel.LocalFileImage(legacy_base, imgmodel.FORMAT_QCOW2) disk.extend(image, legacy_backing_size) if not os.path.exists(self.path): with fileutils.remove_path_on_error(self.path): copy_qcow2_image(base, self.path, size)
def create_image(self, prepare_template, base, size, *args, **kwargs): filename = self._get_lock_name(base) @utils.synchronized(filename, external=True, lock_path=self.lock_path) def copy_raw_image(base, target, size): libvirt_utils.copy_image(base, target) if size: image = imgmodel.LocalFileImage(target, self.driver_format) disk.extend(image, size) generating = 'image_id' not in kwargs if generating: if not self.exists(): # Generating image in place prepare_template(target=self.path, *args, **kwargs) else: if not os.path.exists(base): prepare_template(target=base, *args, **kwargs) # NOTE(mikal): Update the mtime of the base file so the image # cache manager knows it is in use. libvirt_utils.update_mtime(base) self.verify_base_size(base, size) if not os.path.exists(self.path): with fileutils.remove_path_on_error(self.path): copy_raw_image(base, self.path, size) self.correct_format()
def create_image(self, prepare_template, base, size, *args, **kwargs): filename = self._get_lock_name(base) @utils.synchronized(filename, external=True, lock_path=self.lock_path) def copy_raw_image(base, target, size): libvirt_utils.copy_image(base, target) if size: # class Raw is misnamed, format may not be 'raw' in all cases image = imgmodel.LocalFileImage(target, self.driver_format) disk.extend(image, size) generating = 'image_id' not in kwargs if generating: if not self.check_image_exists(): # Generating image in place prepare_template(target=self.path, *args, **kwargs) else: if not os.path.exists(base): prepare_template(target=base, max_size=size, *args, **kwargs) # NOTE(mikal): Update the mtime of the base file so the image # cache manager knows it is in use. libvirt_utils.update_mtime(base) self.verify_base_size(base, size) if not os.path.exists(self.path): with fileutils.remove_path_on_error(self.path): copy_raw_image(base, self.path, size) self.correct_format()
def fetch(context, image_href, path, trusted_certs=None): with fileutils.remove_path_on_error(path): with compute_utils.disk_ops_semaphore: IMAGE_API.download(context, image_href, dest_path=path, trusted_certs=trusted_certs)
def create_image(self, prepare_template, base, size, *args, **kwargs): filename = self._get_lock_name(base) @utils.synchronized(filename, external=True, lock_path=self.lock_path) def copy_raw_image(base, target, size): libvirt_utils.copy_image(base, target) if size: # class Raw is misnamed, format may not be 'raw' in all cases image = imgmodel.LocalFileImage(target, self.driver_format) disk.extend(image, size) generating = 'image_id' not in kwargs if generating: if not self.check_image_exists(): # Generating image in place prepare_template(target=self.path, *args, **kwargs) else: if not os.path.exists(base): prepare_template(target=base, max_size=size, *args, **kwargs) self.verify_base_size(base, size) if not os.path.exists(self.path): with fileutils.remove_path_on_error(self.path): copy_raw_image(base, self.path, size) self.correct_format()
def fetch(context, image_service, image_id, path, _user_id, _project_id): # TODO(vish): Improve context handling and add owner and auth data # when it is added to glance. Right now there is no # auth checking in glance, so we assume that access was # checked before we got here. start_time = timeutils.utcnow() with fileutils.remove_path_on_error(path): with open(path, "wb") as image_file: try: image_service.download(context, image_id, image_file) except IOError as e: with excutils.save_and_reraise_exception(): if e.errno == errno.ENOSPC: # TODO(eharney): Fire an async error message for this LOG.error(_LE("No space left in image_conversion_dir " "path (%(path)s) while fetching " "image %(image)s."), {'path': os.path.dirname(path), 'image': image_id}) duration = timeutils.delta_seconds(start_time, timeutils.utcnow()) # NOTE(jdg): use a default of 1, mostly for unit test, but in # some incredible event this is 0 (cirros image?) don't barf if duration < 1: duration = 1 fsz_mb = os.stat(image_file.name).st_size / units.Mi mbps = (fsz_mb / duration) msg = ("Image fetch details: dest %(dest)s, size %(sz).2f MB, " "duration %(duration).2f sec") LOG.debug(msg, {"dest": image_file.name, "sz": fsz_mb, "duration": duration}) msg = _LI("Image download %(sz).2f MB at %(mbps).2f MB/s") LOG.info(msg, {"sz": fsz_mb, "mbps": mbps})
def fetch(context, image_href, path, _user_id, _project_id, max_size=0): with fileutils.remove_path_on_error(path): with Timer() as t: IMAGE_API.download(context, image_href, dest_path=path) LOG.debug( "pf9image download: Time taken to download file: %.03f sec." % t.interval)
def fetch_verify_image(context: context.RequestContext, image_service: glance.GlanceImageService, image_id: str, dest: str) -> None: fetch(context, image_service, image_id, dest, None, None) image_meta = image_service.show(context, image_id) with fileutils.remove_path_on_error(dest): has_meta = False if not image_meta else True try: format_raw = True if image_meta['disk_format'] == 'raw' else False except TypeError: format_raw = False data = get_qemu_data(image_id, has_meta, format_raw, dest, True) # We can only really do verification of the image if we have # qemu data to use if data is not None: fmt = data.file_format if fmt is None: raise exception.ImageUnacceptable( reason=_("'qemu-img info' parsing failed."), image_id=image_id) backing_file = data.backing_file if backing_file is not None: raise exception.ImageUnacceptable( image_id=image_id, reason=(_("fmt=%(fmt)s backed by: %(backing_file)s") % { 'fmt': fmt, 'backing_file': backing_file }))
def fetch_verify_image(context, image_service, image_id, dest, user_id=None, project_id=None, size=None, run_as_root=True): fetch(context, image_service, image_id, dest, None, None) with fileutils.remove_path_on_error(dest): data = qemu_img_info(dest, run_as_root=run_as_root) fmt = data.file_format if fmt is None: raise exception.ImageUnacceptable( reason=_("'qemu-img info' parsing failed."), image_id=image_id) backing_file = data.backing_file if backing_file is not None: raise exception.ImageUnacceptable( image_id=image_id, reason=(_("fmt=%(fmt)s backed by: %(backing_file)s") % {'fmt': fmt, 'backing_file': backing_file})) # NOTE(xqueralt): If the image virtual size doesn't fit in the # requested volume there is no point on resizing it because it will # generate an unusable image. if size is not None and data.virtual_size > size: params = {'image_size': data.virtual_size, 'volume_size': size} reason = _("Size is %(image_size)dGB and doesn't fit in a " "volume of size %(volume_size)dGB.") % params raise exception.ImageUnacceptable(image_id=image_id, reason=reason)
def verify_glance_image_signature(context, image_service, image_id, path): verifier = None image_meta = image_service.show(context, image_id) image_properties = image_meta.get('properties', {}) img_signature = image_properties.get('img_signature') img_sig_hash_method = image_properties.get('img_signature_hash_method') img_sig_cert_uuid = image_properties.get('img_signature_certificate_uuid') img_sig_key_type = image_properties.get('img_signature_key_type') if all(m is None for m in [img_signature, img_sig_cert_uuid, img_sig_hash_method, img_sig_key_type]): # NOTE(tommylikehu): We won't verify the image signature # if none of the signature metadata presents. return False if any(m is None for m in [img_signature, img_sig_cert_uuid, img_sig_hash_method, img_sig_key_type]): LOG.error('Image signature metadata for image %s is ' 'incomplete.', image_id) raise exception.InvalidSignatureImage(image_id=image_id) try: verifier = signature_utils.get_verifier( context=context, img_signature_certificate_uuid=img_sig_cert_uuid, img_signature_hash_method=img_sig_hash_method, img_signature=img_signature, img_signature_key_type=img_sig_key_type, ) except cursive_exception.SignatureVerificationError: message = _('Failed to get verifier for image: %s') % image_id LOG.error(message) raise exception.ImageSignatureVerificationException( reason=message) if verifier: with fileutils.remove_path_on_error(path): with open(path, "rb") as tem_file: try: tpool.execute(_verify_image, tem_file, verifier) LOG.info('Image signature verification succeeded ' 'for image: %s', image_id) return True except cryptography.exceptions.InvalidSignature: message = _('Image signature verification ' 'failed for image: %s') % image_id LOG.error(message) raise exception.ImageSignatureVerificationException( reason=message) except Exception as ex: message = _('Failed to verify signature for ' 'image: %(image)s due to ' 'error: %(error)s ') % {'image': image_id, 'error': six.text_type(ex)} LOG.error(message) raise exception.ImageSignatureVerificationException( reason=message) return False
def fetch(context, image_service, image_id, path, _user_id, _project_id): # TODO(vish): Improve context handling and add owner and auth data # when it is added to glance. Right now there is no # auth checking in glance, so we assume that access was # checked before we got here. start_time = timeutils.utcnow() with fileutils.remove_path_on_error(path): with open(path, "wb") as image_file: image_service.download(context, image_id, image_file) duration = timeutils.delta_seconds(start_time, timeutils.utcnow()) # NOTE(jdg): use a default of 1, mostly for unit test, but in # some incredible event this is 0 (cirros image?) don't barf if duration < 1: duration = 1 fsz_mb = os.stat(image_file.name).st_size / units.Mi mbps = (fsz_mb / duration) msg = ("Image fetch details: dest %(dest)s, size %(sz).2f MB, " "duration %(duration).2f sec") LOG.debug(msg, { "dest": image_file.name, "sz": fsz_mb, "duration": duration }) msg = _LI("Image download %(sz).2f MB at %(mbps).2f MB/s") LOG.info(msg, {"sz": fsz_mb, "mbps": mbps})
def fetch_verify_image(context, image_service, image_id, dest, user_id=None, project_id=None, size=None, run_as_root=True): fetch(context, image_service, image_id, dest, None, None) with fileutils.remove_path_on_error(dest): data = qemu_img_info(dest, run_as_root=run_as_root) fmt = data.file_format if fmt is None: raise exception.ImageUnacceptable( reason=_("'qemu-img info' parsing failed."), image_id=image_id) backing_file = data.backing_file if backing_file is not None: raise exception.ImageUnacceptable( image_id=image_id, reason=(_("fmt=%(fmt)s backed by: %(backing_file)s") % { 'fmt': fmt, 'backing_file': backing_file })) # NOTE(xqueralt): If the image virtual size doesn't fit in the # requested volume there is no point on resizing it because it will # generate an unusable image. if size is not None and data.virtual_size > size: params = {'image_size': data.virtual_size, 'volume_size': size} reason = _("Size is %(image_size)dGB and doesn't fit in a " "volume of size %(volume_size)dGB.") % params raise exception.ImageUnacceptable(image_id=image_id, reason=reason)
def fetch(context, image_href, path, force_raw=False): with fileutils.remove_path_on_error(path): with open(path, "wb") as image_file: fetch_into(context, image_href, image_file) if force_raw: image_to_raw(image_href, path, "%s.part" % path)
def fetch_to_raw(context, image_href, path): path_tmp = "%s.part" % path fetch(context, image_href, path_tmp) with fileutils.remove_path_on_error(path_tmp): data = qemu_img_info(path_tmp) fmt = data.file_format if fmt is None: raise exception.ImageUnacceptable( reason=_("'qemu-img info' parsing failed."), image_id=image_href) backing_file = data.backing_file if backing_file is not None: raise exception.ImageUnacceptable( image_id=image_href, reason=(_("fmt=%(fmt)s backed by: %(backing_file)s") % { 'fmt': fmt, 'backing_file': backing_file })) if fmt != "raw" and CONF.force_raw_images: staged = "%s.converted" % path LOG.debug("%s was %s, converting to raw", image_href, fmt) with fileutils.remove_path_on_error(staged): try: convert_image(path_tmp, staged, fmt, 'raw') except exception.ImageUnacceptable as exp: # re-raise to include image_href raise exception.ImageUnacceptable( image_id=image_href, reason=_("Unable to convert image to raw: %(exp)s") % {'exp': exp}) os.unlink(path_tmp) data = qemu_img_info(staged) if data.file_format != "raw": raise exception.ImageUnacceptable( image_id=image_href, reason=_("Converted to raw, but format is now %s") % data.file_format) os.rename(staged, path) else: os.rename(path_tmp, path)
def test_no_error(self): tmpfile = tempfile.mktemp() open(tmpfile, "w") with fileutils.remove_path_on_error(tmpfile): pass self.assertTrue(os.path.exists(tmpfile)) os.unlink(tmpfile)
def test_no_error(self): tmpfile = tempfile.mktemp() open(tmpfile, 'w') with fileutils.remove_path_on_error(tmpfile): pass self.assertTrue(os.path.exists(tmpfile)) os.unlink(tmpfile)
def test_error(self): tmpfile = tempfile.mktemp() open(tmpfile, "w") try: with fileutils.remove_path_on_error(tmpfile): raise Exception except Exception: self.assertFalse(os.path.exists(tmpfile))
def test_remove_dir(self): tmpdir = tempfile.mktemp() os.mkdir(tmpdir) try: with fileutils.remove_path_on_error(tmpdir, lambda path: fileutils.delete_if_exists(path, os.rmdir)): raise Exception except Exception: self.assertFalse(os.path.exists(tmpdir))
def test_error(self): tmpfile = tempfile.mktemp() open(tmpfile, 'w') try: with fileutils.remove_path_on_error(tmpfile): raise Exception except Exception: self.assertFalse(os.path.exists(tmpfile))
def create_image(self, prepare_template, base, size, *args, **kwargs): filename = self._get_lock_name(base) isuki = False print 'whr [cr im] %s' % time.time() #for key in kwargs: # print "whr kwargs: %s: %s" % (key, kwargs[key]) # if key == 'isuki': # isuki = True isuki = 'isuki' in kwargs time_ref = time.time() time_prev = time_ref if isuki: print 'whr trueeeeee' @utils.synchronized(filename, external=True, lock_path=self.lock_path) def copy_raw_image(base, target, size): libvirt_utils.copy_image(base, target) time_ref = time.time() time_prev = time_ref if size: image = imgmodel.LocalFileImage(target, self.driver_format) disk.extend(image, size) print 'whr [cr im] 0 %0.06f' % (time.time() - time_prev) print 'whr [cr im] 1 %0.06f' % (time.time() - time_prev) time_prev = time.time() generating = 'image_id' not in kwargs if generating: if not self.exists(): print 'whr [cr im] 3 %0.06f' % (time.time() - time_prev) time_prev = time.time() # Generating image in place prepare_template(target=self.path, *args, **kwargs) else: if not os.path.exists(base): print 'whr [cr im] 4 %0.06f' % (time.time() - time_prev) time_prev = time.time() prepare_template(target=base, *args, **kwargs) # NOTE(mikal): Update the mtime of the base file so the image # cache manager knows it is in use. if not isuki: libvirt_utils.update_mtime(base) self.verify_base_size(base, size) print 'whr [cr im] 5.0 %0.06f' % (time.time() - time_prev) time_prev = time.time() if not os.path.exists(self.path): with fileutils.remove_path_on_error(self.path): copy_raw_image(base, self.path, size) print 'whr [cr im] 5 %0.06f' % (time.time() - time_prev) time_prev = time.time() #self.correct_format() print 'whr [cr im] 6 %0.06f' % (time.time() - time_prev) time_prev = time.time()
def copy_volume_to_image(self, context, volume, image_service, image_meta): tmp_dir = self._image_conversion_dir() tmp_file = os.path.join(tmp_dir, volume["name"] + "-" + image_meta["id"]) with fileutils.remove_path_on_error(tmp_file): args = ["rbd", "export", "--pool", self.configuration.rbd_pool, volume["name"], tmp_file] args.extend(self._ceph_args()) self._try_execute(*args) image_utils.upload_volume(context, image_service, image_meta, tmp_file) os.unlink(tmp_file)
def _get_lxd_manifest(self, instance, image_meta): """Creates the LXD manifest, needed for split images :param instance: nova instance :param image_meta: image metadata dictionary """ LOG.debug('_get_lxd_manifest called for instance', instance=instance) metadata_yaml = None try: # Create a basic LXD manifest from the image properties image_arch = image_meta.properties.get('hw_architecture') if image_arch is None: image_arch = arch.from_host() metadata = { 'architecture': image_arch, 'creation_date': int(os.stat(self.container_image).st_ctime) } metadata_yaml = (json.dumps(metadata, sort_keys=True, indent=4, separators=(',', ': '), ensure_ascii=False).encode('utf-8') + b"\n") except Exception as ex: with excutils.save_and_reraise_exception(): LOG.error(_LE('Failed to generate manifest for %(image)s: ' '%(reason)s'), {'image': instance.name, 'ex': ex}, instance=instance) try: # Compress the manifest using tar target_tarball = tarfile.open(self.container_manifest, "w:") metadata_file = tarfile.TarInfo() metadata_file.size = len(metadata_yaml) metadata_file.name = "metadata.yaml" target_tarball.addfile(metadata_file, io.BytesIO(metadata_yaml)) target_tarball.close() except Exception as ex: with excutils.save_and_reraise_exception(): LOG.error(_LE('Failed to generate manifest tarball for' ' %(image)s: %(reason)s'), {'image': instance.name, 'ex': ex}, instance=instance) try: # Compress the manifest further using xz with fileutils.remove_path_on_error(self.container_manifest): utils.execute('xz', '-9', self.container_manifest, check_exit_code=[0, 1]) except processutils.ProcessExecutionError as ex: with excutils.save_and_reraise_exception: LOG.error(_LE('Failed to compress manifest for %(image)s:' ' %(ex)s'), {'image': instance.image_ref, 'ex': ex}, instance=instance)
def test_remove(self): tmpfile = tempfile.mktemp() open(tmpfile, "w") try: with fileutils.remove_path_on_error(tmpfile, remove=lambda x: x): raise Exception except Exception: self.assertTrue(os.path.exists(tmpfile)) os.unlink(tmpfile)
def test_remove(self): tmpfile = tempfile.mktemp() open(tmpfile, 'w') try: with fileutils.remove_path_on_error(tmpfile, remove=lambda x: x): raise Exception except Exception: self.assertTrue(os.path.exists(tmpfile)) os.unlink(tmpfile)
def fetch_to_raw(context, image_href, path): path_tmp = "%s.part" % path fetch(context, image_href, path_tmp) with fileutils.remove_path_on_error(path_tmp): data = qemu_img_info(path_tmp) fmt = data.file_format if fmt is None: raise exception.ImageUnacceptable( reason=_("'qemu-img info' parsing failed."), image_id=image_href) backing_file = data.backing_file if backing_file is not None: raise exception.ImageUnacceptable(image_id=image_href, reason=(_("fmt=%(fmt)s backed by: %(backing_file)s") % {'fmt': fmt, 'backing_file': backing_file})) if fmt != "raw" and CONF.force_raw_images: staged = "%s.converted" % path LOG.debug("%s was %s, converting to raw", image_href, fmt) with fileutils.remove_path_on_error(staged): try: convert_image(path_tmp, staged, fmt, 'raw') except exception.ImageUnacceptable as exp: # re-raise to include image_href raise exception.ImageUnacceptable(image_id=image_href, reason=_("Unable to convert image to raw: %(exp)s") % {'exp': exp}) os.unlink(path_tmp) data = qemu_img_info(staged) if data.file_format != "raw": raise exception.ImageUnacceptable(image_id=image_href, reason=_("Converted to raw, but format is now %s") % data.file_format) os.rename(staged, path) else: os.rename(path_tmp, path)
def test_remove_dir(self): tmpdir = tempfile.mktemp() os.mkdir(tmpdir) try: with fileutils.remove_path_on_error( tmpdir, lambda path: fileutils.delete_if_exists(path, os.rmdir)): raise Exception except Exception: self.assertFalse(os.path.exists(tmpdir))
def fetch(context, image_href, path): with fileutils.remove_path_on_error(path): # WRS: execute operation with disk concurrency sema LOG.info('virt.images.fetch(): acquiring disk_op_sema') with utils.disk_op_sema: LOG.info('fetch: Downloading %(src)s to %(dest)s', { 'src': image_href, 'dest': path }) IMAGE_API.download(context, image_href, dest_path=path) LOG.info('fetch: Downloading %(dest)s completed', {'dest': path})
def _fetch_image(self, context, instance): """Fetch an image from glance :param context: nova security object :param instance: the nova instance object """ LOG.debug('_fetch_image called for instance', instance=instance) with fileutils.remove_path_on_error(self.container_image): IMAGE_API.download(context, instance.image_ref, dest_path=self.container_image)
def copy_volume_to_image(self, context, volume, image_service, image_meta): tmp_dir = self._image_conversion_dir() tmp_file = os.path.join(tmp_dir, volume.name + '-' + image_meta['id']) with fileutils.remove_path_on_error(tmp_file): args = ['rbd', 'export', '--pool', self.configuration.rbd_pool, volume.name, tmp_file] args.extend(self._ceph_args()) self._try_execute(*args) image_utils.upload_volume(context, image_service, image_meta, tmp_file) os.unlink(tmp_file)
def copy_volume_to_image(self, context, volume, image_service, image_meta): tmp_dir = self._image_conversion_dir() tmp_file = os.path.join(tmp_dir, volume.name + '-' + image_meta['id']) with fileutils.remove_path_on_error(tmp_file): args = [ 'rbd', 'export', '--pool', self.configuration.rbd_pool, volume.name, tmp_file ] args.extend(self._ceph_args()) self._try_execute(*args) image_utils.upload_volume(context, image_service, image_meta, tmp_file) os.unlink(tmp_file)
def copy_volume_to_image(self, context, volume, image_service, image_meta): tmp_dir = volume_utils.image_conversion_dir() tmp_file = os.path.join(tmp_dir, volume.name + '-' + image_meta['id']) with fileutils.remove_path_on_error(tmp_file): vol_name = utils.convert_str(volume.name) self._try_execute( 'qemu-img', 'convert', '-f', 'raw', 'vitastor:image=' + vol_name.replace(':', '\\:') + self._qemu_args(), '-O', 'raw', tmp_file) # FIXME: Copy directly if the destination image is also in Vitastor volume_utils.upload_volume(context, image_service, image_meta, tmp_file, volume) os.unlink(tmp_file)
def create_image(self, prepare_template, base, size, *args, **kwargs): filename = os.path.split(base)[-1] @utils.synchronized(filename, external=True, lock_path=self.lock_path) def create_ploop_image(base, target, size): image_path = os.path.join(target, "root.hds") libvirt_utils.copy_image(base, image_path) utils.execute('ploop', 'restore-descriptor', '-f', self.pcs_format, target, image_path) if size: dd_path = os.path.join(self.path, "DiskDescriptor.xml") utils.execute('ploop', 'grow', '-s', '%dK' % (size >> 10), dd_path, run_as_root=True) if not os.path.exists(self.path): if CONF.force_raw_images: self.pcs_format = "raw" else: image_meta = IMAGE_API.get(kwargs["context"], kwargs["image_id"]) format = image_meta.get("disk_format") if format == "ploop": self.pcs_format = "expanded" elif format == "raw": self.pcs_format = "raw" else: reason = _("PCS doesn't support images in %s format." " You should either set force_raw_images=True" " in config or upload an image in ploop" " or raw format.") % format raise exception.ImageUnacceptable( image_id=kwargs["image_id"], reason=reason) if not os.path.exists(base): prepare_template(target=base, max_size=size, *args, **kwargs) self.verify_base_size(base, size) if os.path.exists(self.path): return fileutils.ensure_tree(self.path) remove_func = functools.partial(fileutils.delete_if_exists, remove=shutil.rmtree) with fileutils.remove_path_on_error(self.path, remove=remove_func): create_ploop_image(base, self.path, size)
def fetch(context: context.RequestContext, image_service: glance.GlanceImageService, image_id: str, path: str, _user_id, _project_id) -> None: # TODO(vish): Improve context handling and add owner and auth data # when it is added to glance. Right now there is no # auth checking in glance, so we assume that access was # checked before we got here. start_time = timeutils.utcnow() with fileutils.remove_path_on_error(path): with open(path, "wb") as image_file: try: image_service.download(context, image_id, tpool.Proxy(image_file)) except IOError as e: if e.errno == errno.ENOSPC: params = {'path': os.path.dirname(path), 'image': image_id} reason = _("No space left in image_conversion_dir " "path (%(path)s) while fetching " "image %(image)s.") % params LOG.exception(reason) raise exception.ImageTooBig(image_id=image_id, reason=reason) reason = ("IOError: %(errno)s %(strerror)s" % { 'errno': e.errno, 'strerror': e.strerror }) LOG.error(reason) raise exception.ImageDownloadFailed(image_href=image_id, reason=reason) duration = timeutils.delta_seconds(start_time, timeutils.utcnow()) # NOTE(jdg): use a default of 1, mostly for unit test, but in # some incredible event this is 0 (cirros image?) don't barf if duration < 1: duration = 1 fsz_mb = os.stat(image_file.name).st_size / units.Mi mbps = (fsz_mb / duration) msg = ("Image fetch details: dest %(dest)s, size %(sz).2f MB, " "duration %(duration).2f sec") LOG.debug(msg, { "dest": image_file.name, "sz": fsz_mb, "duration": duration }) msg = "Image download %(sz).2f MB at %(mbps).2f MB/s" LOG.info(msg, {"sz": fsz_mb, "mbps": mbps})
def fetch(context, image_href, path, force_raw=False): # TODO(vish): Improve context handling and add owner and auth data # when it is added to glance. Right now there is no # auth checking in glance, so we assume that access was # checked before we got here. image_service = service.get_image_service(image_href, context=context) LOG.debug("Using %(image_service)s to download image %(image_href)s.", { 'image_service': image_service.__class__, 'image_href': image_href }) with fileutils.remove_path_on_error(path): with open(path, "wb") as image_file: image_service.download(image_href, image_file) if force_raw: image_to_raw(image_href, path, "%s.part" % path)
def fetch(context, image_href, path, force_raw=False): # TODO(vish): Improve context handling and add owner and auth data # when it is added to glance. Right now there is no # auth checking in glance, so we assume that access was # checked before we got here. image_service = service.get_image_service(image_href, context=context) LOG.debug("Using %(image_service)s to download image %(image_href)s." % {'image_service': image_service.__class__, 'image_href': image_href}) with fileutils.remove_path_on_error(path): with open(path, "wb") as image_file: image_service.download(image_href, image_file) if force_raw: image_to_raw(image_href, path, "%s.part" % path)
def fetch(context, image_service, image_id, path, _user_id, _project_id): # TODO(vish): Improve context handling and add owner and auth data # when it is added to glance. Right now there is no # auth checking in glance, so we assume that access was # checked before we got here. start_time = timeutils.utcnow() with fileutils.remove_path_on_error(path): with open(path, "wb") as image_file: try: image_service.download(context, image_id, tpool.Proxy(image_file)) except IOError as e: if e.errno == errno.ENOSPC: params = {'path': os.path.dirname(path), 'image': image_id} reason = _("No space left in image_conversion_dir " "path (%(path)s) while fetching " "image %(image)s.") % params LOG.exception(reason) raise exception.ImageTooBig(image_id=image_id, reason=reason) reason = ("IOError: %(errno)s %(strerror)s" % {'errno': e.errno, 'strerror': e.strerror}) LOG.error(reason) raise exception.ImageDownloadFailed(image_href=image_id, reason=reason) duration = timeutils.delta_seconds(start_time, timeutils.utcnow()) # NOTE(jdg): use a default of 1, mostly for unit test, but in # some incredible event this is 0 (cirros image?) don't barf if duration < 1: duration = 1 fsz_mb = os.stat(image_file.name).st_size / units.Mi mbps = (fsz_mb / duration) msg = ("Image fetch details: dest %(dest)s, size %(sz).2f MB, " "duration %(duration).2f sec") LOG.debug(msg, {"dest": image_file.name, "sz": fsz_mb, "duration": duration}) msg = "Image download %(sz).2f MB at %(mbps).2f MB/s" LOG.info(msg, {"sz": fsz_mb, "mbps": mbps})
def fetch_verify_image(context, image_service, image_id, dest, user_id=None, project_id=None, size=None, run_as_root=True): fetch(context, image_service, image_id, dest, None, None) image_meta = image_service.show(context, image_id) with fileutils.remove_path_on_error(dest): has_meta = False if not image_meta else True try: format_raw = True if image_meta['disk_format'] == 'raw' else False except TypeError: format_raw = False data = get_qemu_data(image_id, has_meta, format_raw, dest, run_as_root) # We can only really do verification of the image if we have # qemu data to use if data is not None: fmt = data.file_format if fmt is None: raise exception.ImageUnacceptable( reason=_("'qemu-img info' parsing failed."), image_id=image_id) backing_file = data.backing_file if backing_file is not None: raise exception.ImageUnacceptable( image_id=image_id, reason=(_("fmt=%(fmt)s backed by: %(backing_file)s") % {'fmt': fmt, 'backing_file': backing_file})) # NOTE(xqueralt): If the image virtual size doesn't fit in the # requested volume there is no point on resizing it because it will # generate an unusable image. if size is not None and data.virtual_size > size: params = {'image_size': data.virtual_size, 'volume_size': size} reason = _("Size is %(image_size)dGB and doesn't fit in a " "volume of size %(volume_size)dGB.") % params raise exception.ImageUnacceptable(image_id=image_id, reason=reason)
def fetch(context, image_service, image_id, path, _user_id, _project_id): # TODO(vish): Improve context handling and add owner and auth data # when it is added to glance. Right now there is no # auth checking in glance, so we assume that access was # checked before we got here. start_time = timeutils.utcnow() with fileutils.remove_path_on_error(path): with open(path, "wb") as image_file: image_service.download(context, image_id, image_file) duration = timeutils.delta_seconds(start_time, timeutils.utcnow()) # NOTE(jdg): use a default of 1, mostly for unit test, but in # some incredible event this is 0 (cirros image?) don't barf if duration < 1: duration = 1 fsz_mb = os.stat(image_file.name).st_size / units.Mi mbps = (fsz_mb / duration) msg = ("Image fetch details: dest %(dest)s, size %(sz).2f MB, " "duration %(duration).2f sec") LOG.debug(msg, {"dest": image_file.name, "sz": fsz_mb, "duration": duration}) msg = _LI("Image download %(sz).2f MB at %(mbps).2f MB/s") LOG.info(msg, {"sz": fsz_mb, "mbps": mbps})
def fetch(context, image_href, path, _user_id, _project_id, max_size=0): with fileutils.remove_path_on_error(path): IMAGE_API.download(context, image_href, dest_path=path)
def fetch(context, image_href, path): with fileutils.remove_path_on_error(path): IMAGE_API.download(context, image_href, dest_path=path)