def delete_image_cache(context, image_id, image_meta=None): """Deleting an image from cache""" global g_image_id, g_done_event, g_delete_lock, g_is_cache_raw_enabled if not g_is_cache_raw_enabled or not g_is_cache_raw_enabled.value: # Check & delete image cache only if RAW Caching is enabled # Note: UTs pass should pass without modifications return LOG.info(_LI("Deleting image %s from cache") % image_id) # Check if we are caching, race issues with cache_raw_status in the meta # so don't rely on it with g_delete_lock: if g_image_id.value == image_id: # We can't delete an image that it's caching. LOG.info( _LI("Image %s is caching, waiting for operation to " "complete before deleting it") % image_id) g_delete_lock.wait(1200) # wait for caching to complete image_meta = registry.get_image_metadata(context, image_id) if not image_meta: # Glance V2 API is providing a different metadata format, # therefore cannot be reused image_meta = registry.get_image_metadata(context, image_id) url = image_meta['properties'].get('cache_raw_url') if url: _del_rbd_image(url, image_meta['id'])
def get_image_meta_or_404(self, request, image_id): """ Grabs the image metadata for an image with a supplied identifier or raises an HTTPNotFound (404) response :param request: The WSGI/Webob Request object :param image_id: The opaque image identifier :raises HTTPNotFound if image does not exist """ context = request.context try: return registry.get_image_metadata(context, image_id) except exception.NotFound: msg = "Image with identifier %s not found" % image_id LOG.debug(msg) raise webob.exc.HTTPNotFound(msg, request=request, content_type='text/plain') except exception.Forbidden: msg = "Forbidden image access" LOG.debug(msg) raise webob.exc.HTTPForbidden(msg, request=request, content_type='text/plain')
def create_image_cache(context, image_id): """Enqueue an image for caching if needed""" global g_job_queue, g_is_cache_raw_enabled if not g_is_cache_raw_enabled or not g_is_cache_raw_enabled.value: # Check & delete image cache only if RAW Caching is enabled # Note: UTs should pass without modifications return image_meta = registry.get_image_metadata(context, image_id) if not _is_caching_needed(image_meta): LOG.info(_LE("Caching not needed for:%s") % image_id) return # Schedule image for caching only if RAW Caching is enabled if not g_is_cache_raw_enabled.value: del image_meta['properties']['cache_raw'] registry.update_image_metadata(context, image_id, image_meta, purge_props=True) return # Make sure we have all of the fields and that they are correctly set image_meta['properties']['cache_raw_status'] = 'Queued' image_meta['properties']['cache_raw_size'] = '-' if 'cache_raw_error' in image_meta['properties']: del image_meta['properties']['cache_raw_error'] registry.update_image_metadata(context, image_id, image_meta, purge_props=True) LOG.info(_LI("Enqueuing image for conversion: %s") % image_id) g_job_queue.put(image_id)
def _get_v1_image_metadata(self, request, image_id): """ Retrieves image metadata using registry for v1 api and creates dictionary-like mash-up of image core and custom properties. """ try: image_metadata = registry.get_image_metadata(request.context, image_id) return utils.create_mashup_dict(image_metadata) except exception.NotFound as e: LOG.debug("No metadata found for image '%s'" % image_id) raise webob.exc.HTTPNotFound(explanation=e.msg, request=request)
def _get_v1_image_metadata(self, request, image_id): """ Retrieves image metadata using registry for v1 api and creates dictionary-like mash-up of image core and custom properties. """ try: image_metadata = registry.get_image_metadata( request.context, image_id) return utils.create_mashup_dict(image_metadata) except exception.NotFound as e: LOG.debug("No metadata found for image '%s'", image_id) raise webob.exc.HTTPNotFound(explanation=e.msg, request=request)
def _process_v1_request(self, request, image_id, image_iterator): image_meta = registry.get_image_metadata(request.context, image_id) # Don't display location if 'location' in image_meta: del image_meta['location'] self._verify_metadata(image_meta) response = webob.Response(request=request) raw_response = { 'image_iterator': image_iterator, 'image_meta': image_meta, } return self.serializer.show(response, raw_response)
def _cache_img_to_raw(context, image_id): """Cache an image to RAW""" # Start caching LOG.info(_LI("Caching image %s") % image_id) image_meta = registry.get_image_metadata(context, image_id) image_meta_to_update = {'properties': {'cache_raw_status': 'Caching'}} registry.update_image_metadata(context, image_id, image_meta_to_update) # Set the paths base = CONF.cache_raw_conversion_dir + "/" + image_id orig_file = base + "_orig" converted_file = base + "_raw" converted_image = image_id + "_raw" # Get cluster fsid ceph_cfg_file = CONF.glance_store.rbd_store_ceph_conf with rados.Rados(conffile=ceph_cfg_file) as cluster: fsid = cluster.get_fsid() dest_url = "rbd://%s/%s/%s/%s" % (fsid, CONF.glance_store.rbd_store_pool, converted_image, "snap") # Do the conversion _fetch_to_file(image_id, image_meta, orig_file) try: _convert_to_volume_format(orig_file, converted_file, 'raw', image_id) except exception.ConvertToSameFormat as ex: raise exception.ImageUncacheable( image_id=image_id, reason=_("The format of the image is (%(fmt)s) " "not (%(orig)s), please specify the correct format " "when creating the image") % { 'fmt': ex.fmt, 'orig': image_meta.get('disk_format') }) with reserve_space(image_id, _get_sparse_file_size(converted_file), CONF.glance_store.rbd_store_pool): _import_from_file(converted_file, dest_url, image_id) # Cleanup os.unlink(orig_file) os.unlink(converted_file) # Finalize caching image_size = _get_rbd_image_size(dest_url, image_id) image_meta_to_update['properties']['cache_raw_status'] = 'Cached' image_meta_to_update['properties']['cache_raw_url'] = dest_url image_meta_to_update['properties']['cache_raw_size'] = image_size registry.update_image_metadata(context, image_id, image_meta_to_update) LOG.info(_LI("Caching completed for image: %s") % image_id)
def fetch_image_into_cache(self, image_id): ctx = context.RequestContext(is_admin=True, show_deleted=True) try: image_meta = registry.get_image_metadata(ctx, image_id) if image_meta['status'] != 'active': LOG.warn(_("Image '%s' is not active. Not caching."), image_id) return False except exception.NotFound: LOG.warn(_("No metadata found for image '%s'"), image_id) return False location = image_meta['location'] image_data, image_size = glance.store.get_from_backend(ctx, location) LOG.debug(_("Caching image '%s'"), image_id) self.cache.cache_image_iter(image_id, image_data) return True
def delete(self, location, context=None): """Takes a `glance_store.location.Location` object that indicates where to find the image file to delete :param location: `glance_store.location.Location` object, supplied from glance_store.location.get_location_from_uri() :raises NotFound if image does not exist """ ami_id = location.store_location.ami_id image_id = location.store_location.image_id image_info = registry.get_image_metadata(context, image_id) project_id = image_info['owner'] aws_client = self._get_ec2_client(context, project_id) aws_imgs = aws_client.describe_images(Owners=['self'])['Images'] for img in aws_imgs: if ami_id == img.get('ImageId'): LOG.warn('**** ID of ami being deleted: {}'.format(ami_id)) aws_client.deregister_image(ImageId=ami_id)
def get_size(self, location, context=None): """ Takes a `glance_store.location.Location` object that indicates where to find the image file, and returns the size :param location `glance_store.location.Location` object, supplied from glance_store.location.get_location_from_uri() :retval int: size of image file in bytes """ ami_id = location.store_location.ami_id image_id = location.store_location.image_id image_info = registry.get_image_metadata(context, image_id) project_id = image_info['owner'] ec2_resource = self._get_ec2_resource(context, project_id) image = ec2_resource.Image(ami_id) size = self._get_size_from_properties(image_info) if size >= 0: LOG.debug('Got image size from properties as %d' % size) # Convert size in gb to bytes size *= units.Gi return size try: image.load() # no size info for instance-store volumes, so return 1 in that case # Setting size as 0 fails multiple checks in glance required for # successful creation of image record. size = 1 if image.root_device_type == 'ebs': for bdm in image.block_device_mappings: if 'Ebs' in bdm and 'VolumeSize' in bdm['Ebs']: LOG.debug('ebs info: %s' % bdm['Ebs']) size += bdm['Ebs']['VolumeSize'] # convert size in gb to bytes size *= units.Gi except botocore.exceptions.ClientError as ce: if ce.response['Error']['Code'] == 'InvalidAMIID.NotFound': raise exceptions.ImageDataNotFound() else: raise exceptions.GlanceStoreException( ce.response['Error']['Code']) return size
def fetch_image_into_cache(self, image_id): ctx = context.RequestContext(is_admin=True, show_deleted=True) try: image_meta = registry.get_image_metadata(ctx, image_id) if image_meta["status"] != "active": LOG.warn(_("Image '%s' is not active. Not caching."), image_id) return False except exception.NotFound: LOG.warn(_("No metadata found for image '%s'"), image_id) return False location = image_meta["location"] image_data, image_size = glance.store.get_from_backend(ctx, location) LOG.debug(_("Caching image '%s'"), image_id) cache_tee_iter = self.cache.cache_tee_iter(image_id, image_data, image_meta["checksum"]) # Image is tee'd into cache and checksum verified # as we iterate list(cache_tee_iter) return True
def get_image_meta_or_404(self, request, image_id): """ Grabs the image metadata for an image with a supplied identifier or raises an HTTPNotFound (404) response :param request: The WSGI/Webob Request object :param image_id: The opaque image identifier :raises HTTPNotFound: if image does not exist """ context = request.context try: return registry.get_image_metadata(context, image_id) except exception.NotFound: LOG.debug("Image with identifier %s not found", image_id) msg = _("Image with identifier %s not found") % image_id raise webob.exc.HTTPNotFound( msg, request=request, content_type='text/plain') except exception.Forbidden: LOG.debug("Forbidden image access") raise webob.exc.HTTPForbidden(_("Forbidden image access"), request=request, content_type='text/plain')
def fetch_image_into_cache(self, image_id): ctx = context.RequestContext(is_admin=True, show_deleted=True) try: image_meta = registry.get_image_metadata(ctx, image_id) if image_meta['status'] != 'active': LOG.warn( _LW("Image '%s' is not active. Not caching.") % image_id) return False except exception.NotFound: LOG.warn(_LW("No metadata found for image '%s'") % image_id) return False location = image_meta['location'] image_data, image_size = glance.store.get_from_backend(ctx, location) LOG.debug("Caching image '%s'" % image_id) cache_tee_iter = self.cache.cache_tee_iter(image_id, image_data, image_meta['checksum']) # Image is tee'd into cache and checksum verified # as we iterate list(cache_tee_iter) return True
def upload_data_to_store(req, image_meta, image_data, store, notifier): """ Upload image data to specified store. Upload image data to the store and cleans up on error. """ image_id = image_meta['id'] db_api = glance.db.get_api(v1_mode=True) image_size = image_meta.get('size') try: # By default image_data will be passed as CooperativeReader object. # But if 'user_storage_quota' is enabled and 'remaining' is not None # then it will be passed as object of LimitingReader to # 'store_add_to_backend' method. image_data = utils.CooperativeReader(image_data) remaining = glance.api.common.check_quota( req.context, image_size, db_api, image_id=image_id) if remaining is not None: image_data = utils.LimitingReader(image_data, remaining) (uri, size, checksum, location_metadata) = store_api.store_add_to_backend( image_meta['id'], image_data, image_meta['size'], store, context=req.context) location_data = {'url': uri, 'metadata': location_metadata, 'status': 'active'} try: # recheck the quota in case there were simultaneous uploads that # did not provide the size glance.api.common.check_quota( req.context, size, db_api, image_id=image_id) except exception.StorageQuotaFull: with excutils.save_and_reraise_exception(): LOG.info(_LI('Cleaning up %s after exceeding ' 'the quota'), image_id) store_utils.safe_delete_from_backend( req.context, image_meta['id'], location_data) def _kill_mismatched(image_meta, attr, actual): supplied = image_meta.get(attr) if supplied and supplied != actual: msg = (_("Supplied %(attr)s (%(supplied)s) and " "%(attr)s generated from uploaded image " "(%(actual)s) did not match. Setting image " "status to 'killed'.") % {'attr': attr, 'supplied': supplied, 'actual': actual}) LOG.error(msg) safe_kill(req, image_id, 'saving') initiate_deletion(req, location_data, image_id) raise webob.exc.HTTPBadRequest(explanation=msg, content_type="text/plain", request=req) # Verify any supplied size/checksum value matches size/checksum # returned from store when adding image _kill_mismatched(image_meta, 'size', size) _kill_mismatched(image_meta, 'checksum', checksum) # Update the database with the checksum returned # from the backend store LOG.debug("Updating image %(image_id)s data. " "Checksum set to %(checksum)s, size set " "to %(size)d", {'image_id': image_id, 'checksum': checksum, 'size': size}) update_data = {'checksum': checksum, 'size': size} try: try: state = 'saving' image_meta = registry.update_image_metadata(req.context, image_id, update_data, from_state=state) except exception.Duplicate: image = registry.get_image_metadata(req.context, image_id) if image['status'] == 'deleted': raise exception.ImageNotFound() else: raise except exception.NotAuthenticated as e: # Delete image data due to possible token expiration. LOG.debug("Authentication error - the token may have " "expired during file upload. Deleting image data for " " %s " % image_id) initiate_deletion(req, location_data, image_id) raise webob.exc.HTTPUnauthorized(explanation=e.msg, request=req) except exception.ImageNotFound: msg = _("Image %s could not be found after upload. The image may" " have been deleted during the upload.") % image_id LOG.info(msg) # NOTE(jculp): we need to clean up the datastore if an image # resource is deleted while the image data is being uploaded # # We get "location_data" from above call to store.add(), any # exceptions that occur there handle this same issue internally, # Since this is store-agnostic, should apply to all stores. initiate_deletion(req, location_data, image_id) raise webob.exc.HTTPPreconditionFailed(explanation=msg, request=req, content_type='text/plain') except store_api.StoreAddDisabled: msg = _("Error in store configuration. Adding images to store " "is disabled.") LOG.exception(msg) safe_kill(req, image_id, 'saving') notifier.error('image.upload', msg) raise webob.exc.HTTPGone(explanation=msg, request=req, content_type='text/plain') except (store_api.Duplicate, exception.Duplicate) as e: msg = (_("Attempt to upload duplicate image: %s") % encodeutils.exception_to_unicode(e)) LOG.warn(msg) # NOTE(dosaboy): do not delete the image since it is likely that this # conflict is a result of another concurrent upload that will be # successful. notifier.error('image.upload', msg) raise webob.exc.HTTPConflict(explanation=msg, request=req, content_type="text/plain") except exception.Forbidden as e: msg = (_("Forbidden upload attempt: %s") % encodeutils.exception_to_unicode(e)) LOG.warn(msg) safe_kill(req, image_id, 'saving') notifier.error('image.upload', msg) raise webob.exc.HTTPForbidden(explanation=msg, request=req, content_type="text/plain") except store_api.StorageFull as e: msg = (_("Image storage media is full: %s") % encodeutils.exception_to_unicode(e)) LOG.error(msg) safe_kill(req, image_id, 'saving') notifier.error('image.upload', msg) raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg, request=req, content_type='text/plain') except store_api.StorageWriteDenied as e: msg = (_("Insufficient permissions on image storage media: %s") % encodeutils.exception_to_unicode(e)) LOG.error(msg) safe_kill(req, image_id, 'saving') notifier.error('image.upload', msg) raise webob.exc.HTTPServiceUnavailable(explanation=msg, request=req, content_type='text/plain') except exception.ImageSizeLimitExceeded as e: msg = (_("Denying attempt to upload image larger than %d bytes.") % CONF.image_size_cap) LOG.warn(msg) safe_kill(req, image_id, 'saving') notifier.error('image.upload', msg) raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg, request=req, content_type='text/plain') except exception.StorageQuotaFull as e: msg = (_("Denying attempt to upload image because it exceeds the " "quota: %s") % encodeutils.exception_to_unicode(e)) LOG.warn(msg) safe_kill(req, image_id, 'saving') notifier.error('image.upload', msg) raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg, request=req, content_type='text/plain') except webob.exc.HTTPError: # NOTE(bcwaldon): Ideally, we would just call 'raise' here, # but something in the above function calls is affecting the # exception context and we must explicitly re-raise the # caught exception. msg = _LE("Received HTTP error while uploading image %s") % image_id notifier.error('image.upload', msg) with excutils.save_and_reraise_exception(): LOG.exception(msg) safe_kill(req, image_id, 'saving') except (ValueError, IOError) as e: msg = _("Client disconnected before sending all data to backend") LOG.warn(msg) safe_kill(req, image_id, 'saving') raise webob.exc.HTTPBadRequest(explanation=msg, content_type="text/plain", request=req) except Exception as e: msg = _("Failed to upload image %s") % image_id LOG.exception(msg) safe_kill(req, image_id, 'saving') notifier.error('image.upload', msg) raise webob.exc.HTTPInternalServerError(explanation=msg, request=req, content_type='text/plain') return image_meta, location_data
def upload_data_to_store(req, image_meta, image_data, store, notifier): """ Upload image data to specified store. Upload image data to the store and cleans up on error. """ image_id = image_meta['id'] db_api = glance.db.get_api() image_size = image_meta.get('size') try: # By default image_data will be passed as CooperativeReader object. # But if 'user_storage_quota' is enabled and 'remaining' is not None # then it will be passed as object of LimitingReader to # 'store_add_to_backend' method. image_data = utils.CooperativeReader(image_data) remaining = glance.api.common.check_quota( req.context, image_size, db_api, image_id=image_id) if remaining is not None: image_data = utils.LimitingReader(image_data, remaining) (uri, size, checksum, location_metadata) = store_api.store_add_to_backend( image_meta['id'], image_data, image_meta['size'], store, context=req.context) location_data = {'url': uri, 'metadata': location_metadata, 'status': 'active'} try: # recheck the quota in case there were simultaneous uploads that # did not provide the size glance.api.common.check_quota( req.context, size, db_api, image_id=image_id) except exception.StorageQuotaFull: with excutils.save_and_reraise_exception(): LOG.info(_LI('Cleaning up %s after exceeding ' 'the quota') % image_id) store_utils.safe_delete_from_backend( req.context, image_meta['id'], location_data) def _kill_mismatched(image_meta, attr, actual): supplied = image_meta.get(attr) if supplied and supplied != actual: msg = (_("Supplied %(attr)s (%(supplied)s) and " "%(attr)s generated from uploaded image " "(%(actual)s) did not match. Setting image " "status to 'killed'.") % {'attr': attr, 'supplied': supplied, 'actual': actual}) LOG.error(msg) safe_kill(req, image_id, 'saving') initiate_deletion(req, location_data, image_id) raise webob.exc.HTTPBadRequest(explanation=msg, content_type="text/plain", request=req) # Verify any supplied size/checksum value matches size/checksum # returned from store when adding image _kill_mismatched(image_meta, 'size', size) _kill_mismatched(image_meta, 'checksum', checksum) # Update the database with the checksum returned # from the backend store LOG.debug("Updating image %(image_id)s data. " "Checksum set to %(checksum)s, size set " "to %(size)d", {'image_id': image_id, 'checksum': checksum, 'size': size}) update_data = {'checksum': checksum, 'size': size} try: try: state = 'saving' image_meta = registry.update_image_metadata(req.context, image_id, update_data, from_state=state) except exception.Duplicate: image = registry.get_image_metadata(req.context, image_id) if image['status'] == 'deleted': raise exception.ImageNotFound() else: raise except exception.ImageNotFound: msg = _LI("Image %s could not be found after upload. The image may" " have been deleted during the upload.") % image_id LOG.info(msg) # NOTE(jculp): we need to clean up the datastore if an image # resource is deleted while the image data is being uploaded # # We get "location_data" from above call to store.add(), any # exceptions that occur there handle this same issue internally, # Since this is store-agnostic, should apply to all stores. initiate_deletion(req, location_data, image_id) raise webob.exc.HTTPPreconditionFailed(explanation=msg, request=req, content_type='text/plain') except store_api.StoreAddDisabled: msg = _("Error in store configuration. Adding images to store " "is disabled.") LOG.exception(msg) safe_kill(req, image_id, 'saving') notifier.error('image.upload', msg) raise webob.exc.HTTPGone(explanation=msg, request=req, content_type='text/plain') except exception.Duplicate as e: msg = (_("Attempt to upload duplicate image: %s") % utils.exception_to_str(e)) LOG.warn(msg) # NOTE(dosaboy): do not delete the image since it is likely that this # conflict is a result of another concurrent upload that will be # successful. notifier.error('image.upload', msg) raise webob.exc.HTTPConflict(explanation=msg, request=req, content_type="text/plain") except exception.Forbidden as e: msg = (_("Forbidden upload attempt: %s") % utils.exception_to_str(e)) LOG.warn(msg) safe_kill(req, image_id, 'saving') notifier.error('image.upload', msg) raise webob.exc.HTTPForbidden(explanation=msg, request=req, content_type="text/plain") except store_api.StorageFull as e: msg = (_("Image storage media is full: %s") % utils.exception_to_str(e)) LOG.error(msg) safe_kill(req, image_id, 'saving') notifier.error('image.upload', msg) raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg, request=req, content_type='text/plain') except store_api.StorageWriteDenied as e: msg = (_("Insufficient permissions on image storage media: %s") % utils.exception_to_str(e)) LOG.error(msg) safe_kill(req, image_id, 'saving') notifier.error('image.upload', msg) raise webob.exc.HTTPServiceUnavailable(explanation=msg, request=req, content_type='text/plain') except exception.ImageSizeLimitExceeded as e: msg = (_("Denying attempt to upload image larger than %d bytes.") % CONF.image_size_cap) LOG.warn(msg) safe_kill(req, image_id, 'saving') notifier.error('image.upload', msg) raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg, request=req, content_type='text/plain') except exception.StorageQuotaFull as e: msg = (_("Denying attempt to upload image because it exceeds the " "quota: %s") % utils.exception_to_str(e)) LOG.warn(msg) safe_kill(req, image_id, 'saving') notifier.error('image.upload', msg) raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg, request=req, content_type='text/plain') except webob.exc.HTTPError: # NOTE(bcwaldon): Ideally, we would just call 'raise' here, # but something in the above function calls is affecting the # exception context and we must explicitly re-raise the # caught exception. msg = _LE("Received HTTP error while uploading image %s") % image_id notifier.error('image.upload', msg) with excutils.save_and_reraise_exception(): LOG.exception(msg) safe_kill(req, image_id, 'saving') except (ValueError, IOError) as e: msg = _("Client disconnected before sending all data to backend") LOG.warn(msg) safe_kill(req, image_id, 'saving') raise webob.exc.HTTPBadRequest(explanation=msg, content_type="text/plain", request=req) except Exception as e: msg = _("Failed to upload image %s") % image_id LOG.exception(msg) safe_kill(req, image_id, 'saving') notifier.error('image.upload', msg) raise webob.exc.HTTPInternalServerError(explanation=msg, request=req, content_type='text/plain') return image_meta, location_data