def _upload_to_store(self, data, verifier, store=None, size=None): """ Upload data to store :param data: data to upload to store :param verifier: for signature verification :param store: store to upload data to :param size: data size :return: """ hashing_algo = self.image.os_hash_algo or CONF['hashing_algorithm'] if CONF.enabled_backends: (location, size, checksum, multihash, loc_meta) = self.store_api.add_with_multihash( CONF, self.image.image_id, utils.LimitingReader(utils.CooperativeReader(data), CONF.image_size_cap), size, store, hashing_algo, context=self.context, verifier=verifier) else: (location, size, checksum, multihash, loc_meta) = self.store_api.add_to_backend_with_multihash( CONF, self.image.image_id, utils.LimitingReader(utils.CooperativeReader(data), CONF.image_size_cap), size, hashing_algo, context=self.context, verifier=verifier) self._verify_signature(verifier, location, loc_meta) for attr, data in { "size": size, "os_hash_value": multihash, "checksum": checksum }.items(): self._verify_uploaded_data(data, attr) self.image.locations.append({ 'url': location, 'metadata': loc_meta, 'status': 'active' }) self.image.checksum = checksum self.image.os_hash_value = multihash self.image.size = size self.image.os_hash_algo = hashing_algo
def set_data(self, data, size=None): remaining = glance.api.common.check_quota(self.context, size, self.db_api, image_id=self.image.image_id) if remaining is not None: # NOTE(jbresnah) we are trying to enforce a quota, put a limit # reader on the data data = utils.LimitingReader(data, remaining) try: self.image.set_data(data, size=size) except exception.ImageSizeLimitExceeded as ex: raise exception.StorageQuotaFull(image_size=size, remaining=remaining) # NOTE(jbresnah) If two uploads happen at the same time and neither # properly sets the size attribute than there is a race condition # that will allow for the quota to be broken. Thus we must recheck # the quota after the upload and thus after we know the size try: glance.api.common.check_quota(self.context, self.image.size, self.db_api, image_id=self.image.image_id) except exception.StorageQuotaFull: LOG.info( _('Cleaning up %s after exceeding the quota.') % self.image.image_id) location = self.image.locations[0]['url'] glance.store.safe_delete_from_backend(location, self.context, self.image.image_id) raise
def set_data(self, data, size=None): if size is None: size = 0 # NOTE(markwash): zero -> unknown size location, size, checksum, loc_meta = self.store_api.add_to_backend( CONF, self.image.image_id, utils.LimitingReader(utils.CooperativeReader(data), CONF.image_size_cap), size, context=self.context) # Verify the signature (if correct properties are present) if (signature_utils.should_verify_signature( self.image.extra_properties)): # NOTE(bpoulos): if verification fails, exception will be raised result = signature_utils.verify_signature( self.context, checksum, self.image.extra_properties) if result: LOG.info(_LI("Successfully verified signature for image %s"), self.image.image_id) self.image.locations = [{ 'url': location, 'metadata': loc_meta, 'status': 'active' }] self.image.size = size self.image.checksum = checksum self.image.status = 'active'
def _consume_all_read(): bytes_read = 0 data = six.StringIO("*" * BYTES) reader = utils.LimitingReader(data, BYTES - 1) byte = reader.read(1) while len(byte) != 0: bytes_read += 1 byte = reader.read(1)
def test_limiting_reader(self): """Ensure limiting reader class accesses all bytes of file""" BYTES = 1024 bytes_read = 0 data = six.StringIO("*" * BYTES) for chunk in utils.LimitingReader(data, BYTES): bytes_read += len(chunk) self.assertEqual(BYTES, bytes_read) bytes_read = 0 data = six.StringIO("*" * BYTES) reader = utils.LimitingReader(data, BYTES) byte = reader.read(1) while len(byte) != 0: bytes_read += 1 byte = reader.read(1) self.assertEqual(BYTES, bytes_read)
def test_cleanup_when_add_image_exception(self): if pymongo is None: msg = 'GridFS store can not add images, skip test.' self.skipTest(msg) self.assertRaises(exception.ImageSizeLimitExceeded, self.store.add, 'fake_image_id', utils.LimitingReader(six.StringIO('xx'), 1), 2) self.assertEqual(self.store.fs.called_commands, ['exists', 'put', 'delete'])
def set_data(self, data, size=None): if size is None: size = 0 # NOTE(markwash): zero -> unknown size location, size, checksum, loc_meta = self.store_api.add_to_backend( self.context, CONF.default_store, self.image.image_id, utils.LimitingReader(utils.CooperativeReader(data), CONF.image_size_cap), size) self.image.locations = [{'url': location, 'metadata': loc_meta}] self.image.size = size self.image.checksum = checksum self.image.status = 'active'
def upload_to_store(self, data, size): location, ret_size, checksum, loc_meta = self.store_api.add_to_backend( CONF, self.blob.item_key, utils.LimitingReader(utils.CooperativeReader(data), CONF.image_size_cap), size, context=self.context) self.blob.size = ret_size self.blob.locations = [{'status': 'active', 'value': location}] self.blob.checksum = checksum
def set_data(self, data, size=None): if size is None: size = 0 # NOTE(markwash): zero -> unknown size # Create the verifier for signature verification (if correct properties # are present) extra_props = self.image.extra_properties if (signature_utils.should_create_verifier(extra_props)): # NOTE(bpoulos): if creating verifier fails, exception will be # raised img_signature = extra_props[signature_utils.SIGNATURE] hash_method = extra_props[signature_utils.HASH_METHOD] key_type = extra_props[signature_utils.KEY_TYPE] cert_uuid = extra_props[signature_utils.CERT_UUID] verifier = signature_utils.get_verifier( context=self.context, img_signature_certificate_uuid=cert_uuid, img_signature_hash_method=hash_method, img_signature=img_signature, img_signature_key_type=key_type) else: verifier = None location, size, checksum, loc_meta = self.store_api.add_to_backend( CONF, self.image.image_id, utils.LimitingReader(utils.CooperativeReader(data), CONF.image_size_cap), size, context=self.context, verifier=verifier) # NOTE(bpoulos): if verification fails, exception will be raised if verifier: try: verifier.verify() LOG.info(_LI("Successfully verified signature for image %s"), self.image.image_id) except crypto_exception.InvalidSignature: self.store_api.delete_from_backend(location, context=self.context) raise cursive_exception.SignatureVerificationError( _('Signature verification failed')) self.image.locations = [{ 'url': location, 'metadata': loc_meta, 'status': 'active' }] self.image.size = size self.image.checksum = checksum self.image.status = 'active'
def test_cleanup_when_add_image_exception(self): called_commands = [] def _fake_run_command(self, command, data, *params): called_commands.append(command) self.stubs.Set(glance.store.sheepdog.SheepdogImage, '_run_command', _fake_run_command) self.assertRaises(exception.ImageSizeLimitExceeded, self.store.add, 'fake_image_id', utils.LimitingReader(StringIO.StringIO('xx'), 1), 2) self.assertEqual(called_commands, ['list -r', 'create', 'delete'])
def set_data(self, data, size=None): remaining = glance.api.common.check_quota(self.context, size, self.db_api, image_id=self.image.image_id) if remaining is not None: # NOTE(jbresnah) we are trying to enforce a quota, put a limit # reader on the data data = utils.LimitingReader(data, remaining) try: self.image.set_data(data, size=size) except exception.ImageSizeLimitExceeded: raise exception.StorageQuotaFull(image_size=size, remaining=remaining) # NOTE(jbresnah) If two uploads happen at the same time and neither # properly sets the size attribute[1] then there is a race condition # that will allow for the quota to be broken[2]. Thus we must recheck # the quota after the upload and thus after we know the size. # # Also, when an upload doesn't set the size properly then the call to # check_quota above returns None and so utils.LimitingReader is not # used above. Hence the store (e.g. filesystem store) may have to # download the entire file before knowing the actual file size. Here # also we need to check for the quota again after the image has been # downloaded to the store. # # [1] For e.g. when using chunked transfers the 'Content-Length' # header is not set. # [2] For e.g.: # - Upload 1 does not exceed quota but upload 2 exceeds quota. # Both uploads are to different locations # - Upload 2 completes before upload 1 and writes image.size. # - Immediately, upload 1 completes and (over)writes image.size # with the smaller size. # - Now, to glance, image has not exceeded quota but, in # reality, the quota has been exceeded. try: glance.api.common.check_quota(self.context, self.image.size, self.db_api, image_id=self.image.image_id) except exception.StorageQuotaFull: with excutils.save_and_reraise_exception(): LOG.info( _('Cleaning up %s after exceeding the quota.') % self.image.image_id) location = self.image.locations[0]['url'] glance.store.safe_delete_from_backend(self.context, location, self.image.image_id)
def test_image_size_exceeded_exception(self): def _fake_write(*args, **kwargs): if 'write' not in self.called_commands_actual: self.called_commands_actual.append('write') raise exception.ImageSizeLimitExceeded def _fake_delete_image(*args, **kwargs): self.called_commands_actual.append('delete') self.stubs.Set(mock_rbd.Image, 'write', _fake_write) self.stubs.Set(self.store, '_delete_image', _fake_delete_image) data = utils.LimitingReader(self.data_iter, self.data_len) self.assertRaises(exception.ImageSizeLimitExceeded, self.store.add, 'fake_image_id', data, self.data_len + 1) self.called_commands_expected = ['write', 'delete']
def test_cleanup_when_add_image_exception(self): if rbd is None: msg = 'RBD store can not add images, skip test.' self.skipTest(msg) called_commands = [] class FakeConnection(object): @contextlib.contextmanager def open_ioctx(self, *args, **kwargs): yield None class FakeImage(object): def write(self, *args, **kwargs): called_commands.append('write') return FAKE_CHUNKSIZE @contextlib.contextmanager def _fake_rados(*args, **kwargs): yield FakeConnection() @contextlib.contextmanager def _fake_image(*args, **kwargs): yield FakeImage() def _fake_create_image(*args, **kwargs): called_commands.append('create') return StoreLocation({'image': 'fake_image', 'snapshot': 'fake_snapshot'}) def _fake_delete_image(*args, **kwargs): called_commands.append('delete') self.stubs.Set(rados, 'Rados', _fake_rados) self.stubs.Set(rbd, 'Image', _fake_image) self.stubs.Set(self.store, '_create_image', _fake_create_image) self.stubs.Set(self.store, '_delete_image', _fake_delete_image) self.assertRaises(exception.ImageSizeLimitExceeded, self.store.add, 'fake_image_id', utils.LimitingReader(StringIO.StringIO('xx'), 1), 2) self.assertEqual(called_commands, ['create', 'write', 'delete'])
def set_data(self, data, size=None): if size is None: size = 0 # NOTE(markwash): zero -> unknown size # Create the verifier for signature verification (if correct properties # are present) if (signature_utils.should_create_verifier( self.image.extra_properties)): # NOTE(bpoulos): if creating verifier fails, exception will be # raised verifier = signature_utils.get_verifier( self.context, self.image.extra_properties) else: verifier = None location, size, checksum, loc_meta = self.store_api.add_to_backend( CONF, self.image.image_id, utils.LimitingReader(utils.CooperativeReader(data), CONF.image_size_cap), size, context=self.context, verifier=verifier) self._verify_signature_if_needed(checksum) # NOTE(bpoulos): if verification fails, exception will be raised if verifier: try: verifier.verify() LOG.info(_LI("Successfully verified signature for image %s"), self.image.image_id) except crypto_exception.InvalidSignature: raise exception.SignatureVerificationError( _('Signature verification failed')) self.image.locations = [{ 'url': location, 'metadata': loc_meta, 'status': 'active' }] self.image.size = size self.image.checksum = checksum self.image.status = 'active'
def set_data(self, data, size=None): if size is None: size = 0 # NOTE(markwash): zero -> unknown size location, size, checksum, loc_meta = self.store_api.add_to_backend( CONF, self.image.image_id, utils.LimitingReader(utils.CooperativeReader(data), CONF.image_size_cap), size, context=self.context) self._verify_signature_if_needed(checksum) self.image.locations = [{ 'url': location, 'metadata': loc_meta, 'status': 'active' }] self.image.size = size self.image.checksum = checksum self.image.status = 'active'
def _deserialize(self, request): result = {} try: result['image_meta'] = utils.get_image_meta_from_headers(request) except exception.Invalid: image_size_str = request.headers['x-image-meta-size'] msg = _("Incoming image size of %s was not convertible to " "an integer.") % image_size_str raise HTTPBadRequest(explanation=msg, request=request) image_meta = result['image_meta'] image_meta = validate_image_meta(request, image_meta) if request.content_length: image_size = request.content_length elif 'size' in image_meta: image_size = image_meta['size'] else: image_size = None data = request.body_file if self.has_body(request) else None if image_size is None and data is not None: data = utils.LimitingReader(data, CONF.image_size_cap) #NOTE(bcwaldon): this is a hack to make sure the downstream code # gets the correct image data request.body_file = data elif image_size > CONF.image_size_cap: max_image_size = CONF.image_size_cap msg = _("Denying attempt to upload image larger than %d bytes.") LOG.warn(msg % max_image_size) raise HTTPBadRequest(explanation=msg % max_image_size, request=request) result['image_data'] = data return result
def stage(self, req, image_id, data, size): try: ks_quota.enforce_image_staging_total(req.context, req.context.owner) except exception.LimitExceeded as e: raise webob.exc.HTTPRequestEntityTooLarge(explanation=str(e), request=req) image_repo = self.gateway.get_repo(req.context, authorization_layer=False) # NOTE(abhishekk): stage API call does not have its own policy but # it requires get_image access, this is the right place to check # whether user has access to image or not try: image = image_repo.get(image_id) except exception.NotFound as e: raise webob.exc.HTTPNotFound(explanation=e.msg) api_pol = api_policy.ImageAPIPolicy(req.context, image, enforcer=self.policy) try: api_pol.modify_image() except exception.Forbidden as e: # NOTE(abhishekk): This will throw Forbidden if S-RBAC is not # enabled raise webob.exc.HTTPForbidden(explanation=e.msg) # NOTE(jokke): this is horrible way to do it but as long as # glance_store is in a shape it is, the only way. Don't hold me # accountable for it. # TODO(abhishekk): After removal of backend module from glance_store # need to change this to use multi_backend module. def _build_staging_store(): conf = cfg.ConfigOpts() try: backend.register_opts(conf) except cfg.DuplicateOptError: pass conf.set_override('filesystem_store_datadir', CONF.node_staging_uri[7:], group='glance_store') staging_store = backend._load_store(conf, 'file') try: staging_store.configure() except AttributeError: msg = _("'node_staging_uri' is not set correctly. Could not " "load staging store.") raise exception.BadStoreUri(message=msg) return staging_store # NOTE(abhishekk): Use reserved 'os_glance_staging_store' for staging # the data, the else part will be removed once multiple backend feature # is declared as stable. if CONF.enabled_backends: staging_store = glance_store.get_store_from_store_identifier( 'os_glance_staging_store') else: staging_store = _build_staging_store() try: image.status = 'uploading' image_repo.save(image, from_state='queued') ks_quota.enforce_image_count_uploading(req.context, req.context.owner) try: uri, size, id, store_info = staging_store.add( image_id, utils.LimitingReader(utils.CooperativeReader(data), CONF.image_size_cap), 0) image.size = size except glance_store.Duplicate: msg = _("The image %s has data on staging") % image_id raise webob.exc.HTTPConflict(explanation=msg) # NOTE(danms): Record this worker's # worker_self_reference_url in the image metadata so we # know who has the staging data. self_url = CONF.worker_self_reference_url or CONF.public_endpoint if self_url: image.extra_properties['os_glance_stage_host'] = self_url image_repo.save(image, from_state='uploading') except exception.NotFound as e: raise webob.exc.HTTPNotFound(explanation=e.msg) except glance_store.StorageFull as e: msg = _("Image storage media " "is full: %s") % encodeutils.exception_to_unicode(e) LOG.error(msg) self._unstage(image_repo, image, staging_store) raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg, request=req) except exception.StorageQuotaFull as e: msg = _("Image exceeds the storage " "quota: %s") % encodeutils.exception_to_unicode(e) LOG.debug(msg) self._unstage(image_repo, image, staging_store) raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg, request=req) except exception.ImageSizeLimitExceeded as e: msg = _("The incoming image is " "too large: %s") % encodeutils.exception_to_unicode(e) LOG.debug(msg) self._unstage(image_repo, image, staging_store) raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg, request=req) except exception.LimitExceeded as e: LOG.debug(str(e)) self._unstage(image_repo, image, staging_store) raise webob.exc.HTTPRequestEntityTooLarge(explanation=str(e), request=req) except glance_store.StorageWriteDenied as e: msg = _("Insufficient permissions on image " "storage media: %s") % encodeutils.exception_to_unicode(e) LOG.error(msg) self._unstage(image_repo, image, staging_store) raise webob.exc.HTTPServiceUnavailable(explanation=msg, request=req) except exception.InvalidImageStatusTransition as e: msg = encodeutils.exception_to_unicode(e) LOG.debug(msg) raise webob.exc.HTTPConflict(explanation=e.msg, request=req) except Exception: with excutils.save_and_reraise_exception(): LOG.exception( _LE("Failed to stage image data due to " "internal error")) self._restore(image_repo, image)
def _consume_all_iter(): bytes_read = 0 data = six.StringIO("*" * BYTES) for chunk in utils.LimitingReader(data, BYTES - 1): bytes_read += len(chunk)
def test_object_chunking(self): """Upload an image that is split into multiple swift objects. We specifically check the case that image_size % swift_store_large_object_chunk_size != 0 to ensure we aren't losing image data. """ self.config( swift_store_large_object_size=2, # 2 MB swift_store_large_object_chunk_size=2, # 2 MB ) store = self.get_store() image_id = uuidutils.generate_uuid() image_size = 5242880 # 5 MB image_data = StringIO.StringIO('X' * image_size) image_checksum = 'eb7f8c3716b9f059cee7617a4ba9d0d3' uri, add_size, add_checksum = store.add(image_id, image_data, image_size) self.assertEqual(image_size, add_size) self.assertEqual(image_checksum, add_checksum) location = glance.store.location.Location( self.store_name, store.get_store_location_class(), uri=uri, image_id=image_id) # Store interface should still be respected even though # we are storing images in multiple Swift objects (get_iter, get_size) = store.get(location) self.assertEqual(5242880, get_size) self.assertEqual('X' * 5242880, ''.join(get_iter)) # The object should have a manifest pointing to the chunks # of image data swift_location = location.store_location headers = swift_head_object(self.swift_client, swift_location.container, swift_location.obj) manifest = headers.get('x-object-manifest') self.assertTrue(manifest) # Verify the objects in the manifest exist manifest_container, manifest_prefix = manifest.split('/', 1) container = swift_get_container(self.swift_client, manifest_container, prefix=manifest_prefix) segments = [segment['name'] for segment in container[1]] for segment in segments: headers = swift_head_object(self.swift_client, manifest_container, segment) self.assertTrue(headers.get('content-length')) # Since we used a 5 MB image with a 2 MB chunk size, we should # expect to see three data objects self.assertEqual(3, len(segments), 'Got segments %s' % segments) # Add an object that should survive the delete operation non_image_obj = image_id + '0' swift_put_object(self.swift_client, manifest_container, non_image_obj, 'XXX') store.delete(location) # Verify the segments in the manifest are all gone for segment in segments: self.assertRaises(swiftclient.ClientException, swift_head_object, self.swift_client, manifest_container, segment) # Verify the manifest is gone too self.assertRaises(swiftclient.ClientException, swift_head_object, self.swift_client, manifest_container, swift_location.obj) # Verify that the non-image object was not deleted headers = swift_head_object(self.swift_client, manifest_container, non_image_obj) self.assertTrue(headers.get('content-length')) # Clean up self.swift_client.delete_object(manifest_container, non_image_obj) # Simulate exceeding 'image_size_cap' setting image_data = StringIO.StringIO('X' * image_size) image_data = common_utils.LimitingReader(image_data, image_size - 1) image_id = uuidutils.generate_uuid() self.assertRaises(exception.ImageSizeLimitExceeded, store.add, image_id, image_data, image_size) # Verify written segments have been deleted container = swift_get_container(self.swift_client, manifest_container, prefix=image_id) segments = [segment['name'] for segment in container[1]] self.assertEqual(0, len(segments), 'Got segments %s' % segments)
def stage(self, req, image_id, data, size): image_repo = self.gateway.get_repo(req.context) image = None # NOTE(jokke): this is horrible way to do it but as long as # glance_store is in a shape it is, the only way. Don't hold me # accountable for it. # TODO(abhishekk): After removal of backend module from glance_store # need to change this to use multi_backend module. def _build_staging_store(): conf = cfg.ConfigOpts() try: backend.register_opts(conf) except cfg.DuplicateOptError: pass conf.set_override('filesystem_store_datadir', CONF.node_staging_uri[7:], group='glance_store') staging_store = backend._load_store(conf, 'file') try: staging_store.configure() except AttributeError: msg = _("'node_staging_uri' is not set correctly. Could not " "load staging store.") raise exception.BadStoreUri(message=msg) return staging_store staging_store = _build_staging_store() try: image = image_repo.get(image_id) image.status = 'uploading' image_repo.save(image, from_state='queued') try: staging_store.add( image_id, utils.LimitingReader( utils.CooperativeReader(data), CONF.image_size_cap), 0) except glance_store.Duplicate as e: msg = _("The image %s has data on staging") % image_id raise webob.exc.HTTPConflict(explanation=msg) except exception.NotFound as e: raise webob.exc.HTTPNotFound(explanation=e.msg) except glance_store.StorageFull as e: msg = _("Image storage media " "is full: %s") % encodeutils.exception_to_unicode(e) LOG.error(msg) self._unstage(image_repo, image, staging_store) raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg, request=req) except exception.StorageQuotaFull as e: msg = _("Image exceeds the storage " "quota: %s") % encodeutils.exception_to_unicode(e) LOG.debug(msg) self._unstage(image_repo, image, staging_store) raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg, request=req) except exception.ImageSizeLimitExceeded as e: msg = _("The incoming image is " "too large: %s") % encodeutils.exception_to_unicode(e) LOG.debug(msg) self._unstage(image_repo, image, staging_store) raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg, request=req) except glance_store.StorageWriteDenied as e: msg = _("Insufficient permissions on image " "storage media: %s") % encodeutils.exception_to_unicode(e) LOG.error(msg) self._unstage(image_repo, image, staging_store) raise webob.exc.HTTPServiceUnavailable(explanation=msg, request=req) except exception.InvalidImageStatusTransition as e: msg = encodeutils.exception_to_unicode(e) LOG.debug(msg) raise webob.exc.HTTPConflict(explanation=e.msg, request=req) except Exception as e: with excutils.save_and_reraise_exception(): LOG.exception(_LE("Failed to stage image data due to " "internal error")) self._restore(image_repo, image)
def upload_data_to_store(req, image_meta, image_data, store, notifier): """ Upload image data to specified store. Upload image data to the store and cleans up on error. """ image_id = image_meta['id'] db_api = glance.db.get_api() image_size = image_meta.get('size') try: remaining = glance.api.common.check_quota(req.context, image_size, db_api, image_id=image_id) if remaining is not None: image_data = utils.LimitingReader(image_data, remaining) (uri, size, checksum, location_metadata) = store_api.store_add_to_backend( image_meta['id'], utils.CooperativeReader(image_data), image_meta['size'], store) location_data = { 'url': uri, 'metadata': location_metadata, 'status': 'active' } try: # recheck the quota in case there were simultaneous uploads that # did not provide the size glance.api.common.check_quota(req.context, size, db_api, image_id=image_id) except exception.StorageQuotaFull: with excutils.save_and_reraise_exception(): LOG.info( _('Cleaning up %s after exceeding ' 'the quota') % image_id) store_utils.safe_delete_from_backend(req.context, image_meta['id'], location_data) def _kill_mismatched(image_meta, attr, actual): supplied = image_meta.get(attr) if supplied and supplied != actual: msg = (_("Supplied %(attr)s (%(supplied)s) and " "%(attr)s generated from uploaded image " "(%(actual)s) did not match. Setting image " "status to 'killed'.") % { 'attr': attr, 'supplied': supplied, 'actual': actual }) LOG.error(msg) safe_kill(req, image_id) initiate_deletion(req, location_data, image_id) raise webob.exc.HTTPBadRequest(explanation=msg, content_type="text/plain", request=req) # Verify any supplied size/checksum value matches size/checksum # returned from store when adding image _kill_mismatched(image_meta, 'size', size) _kill_mismatched(image_meta, 'checksum', checksum) # Update the database with the checksum returned # from the backend store LOG.debug( "Updating image %(image_id)s data. " "Checksum set to %(checksum)s, size set " "to %(size)d", { 'image_id': image_id, 'checksum': checksum, 'size': size }) update_data = {'checksum': checksum, 'size': size} try: image_meta = registry.update_image_metadata( req.context, image_id, update_data) except exception.NotFound as e: msg = _("Image %s could not be found after upload. The image may " "have been deleted during the upload.") % image_id LOG.info(msg) # NOTE(jculp): we need to clean up the datastore if an image # resource is deleted while the image data is being uploaded # # We get "location_data" from above call to store.add(), any # exceptions that occur there handle this same issue internally, # Since this is store-agnostic, should apply to all stores. initiate_deletion(req, location_data, image_id) raise webob.exc.HTTPPreconditionFailed(explanation=msg, request=req, content_type='text/plain') except exception.Duplicate as e: msg = u"Attempt to upload duplicate image: %s" % e LOG.debug(msg) # NOTE(dosaboy): do not delete the image since it is likely that this # conflict is a result of another concurrent upload that will be # successful. notifier.error('image.upload', msg) raise webob.exc.HTTPConflict(explanation=msg, request=req, content_type="text/plain") except exception.Forbidden as e: msg = u"Forbidden upload attempt: %s" % e LOG.debug(msg) safe_kill(req, image_id) notifier.error('image.upload', msg) raise webob.exc.HTTPForbidden(explanation=msg, request=req, content_type="text/plain") except exception.StorageFull as e: msg = _("Image storage media is full: %s") % utils.exception_to_str(e) LOG.error(msg) safe_kill(req, image_id) notifier.error('image.upload', msg) raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg, request=req, content_type='text/plain') except exception.StorageWriteDenied as e: msg = (_("Insufficient permissions on image storage media: %s") % utils.exception_to_str(e)) LOG.error(msg) safe_kill(req, image_id) notifier.error('image.upload', msg) raise webob.exc.HTTPServiceUnavailable(explanation=msg, request=req, content_type='text/plain') except exception.ImageSizeLimitExceeded as e: msg = (_("Denying attempt to upload image larger than %d bytes.") % CONF.image_size_cap) LOG.info(msg) safe_kill(req, image_id) notifier.error('image.upload', msg) raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg, request=req, content_type='text/plain') except exception.StorageQuotaFull as e: msg = (_("Denying attempt to upload image because it exceeds the ." "quota: %s") % utils.exception_to_str(e)) LOG.info(msg) safe_kill(req, image_id) notifier.error('image.upload', msg) raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg, request=req, content_type='text/plain') except webob.exc.HTTPError: #NOTE(bcwaldon): Ideally, we would just call 'raise' here, # but something in the above function calls is affecting the # exception context and we must explicitly re-raise the # caught exception. msg = _("Received HTTP error while uploading image %s") % image_id notifier.error('image.upload', msg) with excutils.save_and_reraise_exception(): LOG.exception(msg) safe_kill(req, image_id) except (ValueError, IOError) as e: msg = "Client disconnected before sending all data to backend" LOG.debug(msg) safe_kill(req, image_id) raise webob.exc.HTTPBadRequest(explanation=msg, content_type="text/plain", request=req) except Exception as e: msg = _("Failed to upload image %s") % image_id LOG.exception(msg) safe_kill(req, image_id) notifier.error('image.upload', msg) raise webob.exc.HTTPInternalServerError(explanation=msg, request=req, content_type='text/plain') return image_meta, location_data