def add(self, image_id, image_file, image_size, context=None, verifier=None): """ Stores an image file with supplied identifier to the backend storage system and returns a tuple containing information about the stored image. :param image_id: The opaque image identifier :param image_file: The image data to write, as a file-like object :param image_size: The size of the image data to write, in bytes :param verifier: An object used to verify signatures for images :retval: tuple of URL in backing store, bytes written, and checksum :raises: `glance_store.exceptions.Duplicate` if the image already existed """ image = SheepdogImage(self.addr, self.port, image_id, self.WRITE_CHUNKSIZE) if image.exist(): raise exceptions.Duplicate( _("Sheepdog image %s already exists") % image_id) location = StoreLocation( { 'image': image_id, 'addr': self.addr, 'port': self.port }, self.conf) image.create(image_size) try: offset = 0 checksum = hashlib.md5() chunks = utils.chunkreadable(image_file, self.WRITE_CHUNKSIZE) for chunk in chunks: chunk_length = len(chunk) # If the image size provided is zero we need to do # a resize for the amount we are writing. This will # be slower so setting a higher chunk size may # speed things up a bit. if image_size == 0: image.resize(offset + chunk_length) image.write(chunk, offset, chunk_length) offset += chunk_length checksum.update(chunk) if verifier: verifier.update(chunk) except Exception: # Note(zhiyan): clean up already received data when # error occurs such as ImageSizeLimitExceeded exceptions. with excutils.save_and_reraise_exception(): image.delete() return (location.get_uri(), offset, checksum.hexdigest(), {})
def _get_response(self, location, verb): if not hasattr(self, 'session'): self.session = requests.Session() ca_bundle = self.conf.glance_store.https_ca_certificates_file disable_https = self.conf.glance_store.https_insecure self.session.verify = ca_bundle if ca_bundle else not disable_https self.session.proxies = self.conf.glance_store.http_proxy_information return self.session.request(verb, location.get_uri(), stream=True, allow_redirects=False)
def add(self, image_id, image_file, image_size, context=None, verifier=None): """ Stores an image file with supplied identifier to the backend storage system and returns a tuple containing information about the stored image. :param image_id: The opaque image identifier :param image_file: The image data to write, as a file-like object :param image_size: The size of the image data to write, in bytes :param verifier: An object used to verify signatures for images :retval: tuple of URL in backing store, bytes written, and checksum :raises: `glance_store.exceptions.Duplicate` if the image already existed """ image = SheepdogImage(self.addr, self.port, image_id, self.WRITE_CHUNKSIZE) if image.exist(): raise exceptions.Duplicate(_("Sheepdog image %s already exists") % image_id) location = StoreLocation({ 'image': image_id, 'addr': self.addr, 'port': self.port }, self.conf) image.create(image_size) try: offset = 0 checksum = hashlib.md5() chunks = utils.chunkreadable(image_file, self.WRITE_CHUNKSIZE) for chunk in chunks: chunk_length = len(chunk) # If the image size provided is zero we need to do # a resize for the amount we are writing. This will # be slower so setting a higher chunk size may # speed things up a bit. if image_size == 0: image.resize(offset + chunk_length) image.write(chunk, offset, chunk_length) offset += chunk_length checksum.update(chunk) if verifier: verifier.update(chunk) except Exception: # Note(zhiyan): clean up already received data when # error occurs such as ImageSizeLimitExceeded exceptions. with excutils.save_and_reraise_exception(): image.delete() return (location.get_uri(), offset, checksum.hexdigest(), {})
def add(self, image_id, image_file, image_size, context=None): """ Stores an image file with supplied identifier to the backend storage system and returns a tuple containing information about the stored image. :param image_id: The opaque image identifier :param image_file: The image data to write, as a file-like object :param image_size: The size of the image data to write, in bytes :retval tuple of URL in backing store, bytes written, and checksum :raises `glance_store.exceptions.Duplicate` if the image already existed """ image = SheepdogImage(self.addr, self.port, image_id, self.WRITE_CHUNKSIZE) if image.exist(): raise exceptions.Duplicate(_("Sheepdog image %s already exists") % image_id) location = StoreLocation({'image': image_id}, self.conf) checksum = hashlib.md5() image.create(image_size) try: total = left = image_size while left > 0: length = min(self.chunk_size, left) data = image_file.read(length) image.write(data, total - left, length) left -= length checksum.update(data) except Exception: # Note(zhiyan): clean up already received data when # error occurs such as ImageSizeLimitExceeded exceptions. with excutils.save_and_reraise_exception(): image.delete() return (location.get_uri(), image_size, checksum.hexdigest(), {})
def add(self, image_id, image_file, image_size, context=None): """ Stores an image file with supplied identifier to the backend storage system and returns a tuple containing information about the stored image. :param image_id: The opaque image identifier :param image_file: The image data to write, as a file-like object :param image_size: The size of the image data to write, in bytes :retval tuple of URL in backing store, bytes written, and checksum :raises `glance_store.exceptions.Duplicate` if the image already existed """ image = SheepdogImage(self.addr, self.port, image_id, self.WRITE_CHUNKSIZE) if image.exist(): raise exceptions.Duplicate( _("Sheepdog image %s already exists") % image_id) location = StoreLocation({'image': image_id}) checksum = hashlib.md5() image.create(image_size) try: total = left = image_size while left > 0: length = min(self.chunk_size, left) data = image_file.read(length) image.write(data, total - left, length) left -= length checksum.update(data) except Exception: # Note(zhiyan): clean up already received data when # error occurs such as ImageSizeLimitExceeded exceptions. with excutils.save_and_reraise_exception(): image.delete() return (location.get_uri(), image_size, checksum.hexdigest(), {})
def add(self, image_id, image_file, image_size, connection=None, context=None, verifier=None): location = self.create_location(image_id, context=context) if not connection: connection = self.get_connection(location, context=context) self._create_container_if_missing(location.container, connection) LOG.debug("Adding image object '%(obj_name)s' " "to Swift" % dict(obj_name=location.obj)) try: if image_size > 0 and image_size < self.large_object_size: # Image size is known, and is less than large_object_size. # Send to Swift with regular PUT. obj_etag = connection.put_object(location.container, location.obj, image_file, content_length=image_size) else: # Write the image into Swift in chunks. chunk_id = 1 if image_size > 0: total_chunks = str(int( math.ceil(float(image_size) / float(self.large_object_chunk_size)))) else: # image_size == 0 is when we don't know the size # of the image. This can occur with older clients # that don't inspect the payload size. LOG.debug("Cannot determine image size. Adding as a " "segmented object to Swift.") total_chunks = '?' checksum = hashlib.md5() written_chunks = [] combined_chunks_size = 0 while True: chunk_size = self.large_object_chunk_size if image_size == 0: content_length = None else: left = image_size - combined_chunks_size if left == 0: break if chunk_size > left: chunk_size = left content_length = chunk_size chunk_name = "%s-%05d" % (location.obj, chunk_id) reader = ChunkReader(image_file, checksum, chunk_size, verifier) if reader.is_zero_size is True: LOG.debug('Not writing zero-length chunk.') break try: chunk_etag = connection.put_object( location.container, chunk_name, reader, content_length=content_length) written_chunks.append(chunk_name) except Exception: # Delete orphaned segments from swift backend with excutils.save_and_reraise_exception(): LOG.exception(_("Error during chunked upload to " "backend, deleting stale chunks")) self._delete_stale_chunks(connection, location.container, written_chunks) bytes_read = reader.bytes_read msg = ("Wrote chunk %(chunk_name)s (%(chunk_id)d/" "%(total_chunks)s) of length %(bytes_read)d " "to Swift returning MD5 of content: " "%(chunk_etag)s" % {'chunk_name': chunk_name, 'chunk_id': chunk_id, 'total_chunks': total_chunks, 'bytes_read': bytes_read, 'chunk_etag': chunk_etag}) LOG.debug(msg) chunk_id += 1 combined_chunks_size += bytes_read # In the case we have been given an unknown image size, # set the size to the total size of the combined chunks. if image_size == 0: image_size = combined_chunks_size # Now we write the object manifest and return the # manifest's etag... manifest = "%s/%s-" % (location.container, location.obj) headers = {'ETag': hashlib.md5(b"").hexdigest(), 'X-Object-Manifest': manifest} # The ETag returned for the manifest is actually the # MD5 hash of the concatenated checksums of the strings # of each chunk...so we ignore this result in favour of # the MD5 of the entire image file contents, so that # users can verify the image file contents accordingly connection.put_object(location.container, location.obj, None, headers=headers) obj_etag = checksum.hexdigest() # NOTE: We return the user and key here! Have to because # location is used by the API server to return the actual # image data. We *really* should consider NOT returning # the location attribute from GET /images/<ID> and # GET /images/details if sutils.is_multiple_swift_store_accounts_enabled(self.conf): include_creds = False else: include_creds = True return (location.get_uri(credentials_included=include_creds), image_size, obj_etag, {}) except swiftclient.ClientException as e: if e.http_status == http_client.CONFLICT: msg = _("Swift already has an image at this location") raise exceptions.Duplicate(message=msg) msg = (_(u"Failed to add object to Swift.\n" "Got error from Swift: %s.") % encodeutils.exception_to_unicode(e)) LOG.error(msg) raise glance_store.BackendException(msg)
def add(self, image_id, image_file, image_size, connection=None, context=None): location = self.create_location(image_id, context=context) if not connection: connection = self.get_connection(location, context=context) self._create_container_if_missing(location.container, connection) LOG.debug("Adding image object '%(obj_name)s' " "to Swift" % dict(obj_name=location.obj)) try: if image_size > 0 and image_size < self.large_object_size: # Image size is known, and is less than large_object_size. # Send to Swift with regular PUT. obj_etag = connection.put_object(location.container, location.obj, image_file, content_length=image_size) else: # Write the image into Swift in chunks. chunk_id = 1 if image_size > 0: total_chunks = str( int( math.ceil( float(image_size) / float(self.large_object_chunk_size)))) else: # image_size == 0 is when we don't know the size # of the image. This can occur with older clients # that don't inspect the payload size. LOG.debug("Cannot determine image size. Adding as a " "segmented object to Swift.") total_chunks = '?' checksum = hashlib.md5() written_chunks = [] combined_chunks_size = 0 while True: chunk_size = self.large_object_chunk_size if image_size == 0: content_length = None else: left = image_size - combined_chunks_size if left == 0: break if chunk_size > left: chunk_size = left content_length = chunk_size chunk_name = "%s-%05d" % (location.obj, chunk_id) reader = ChunkReader(image_file, checksum, chunk_size) try: chunk_etag = connection.put_object( location.container, chunk_name, reader, content_length=content_length) written_chunks.append(chunk_name) except Exception: # Delete orphaned segments from swift backend with excutils.save_and_reraise_exception(): LOG.exception( _("Error during chunked upload to " "backend, deleting stale chunks")) self._delete_stale_chunks(connection, location.container, written_chunks) bytes_read = reader.bytes_read msg = ("Wrote chunk %(chunk_name)s (%(chunk_id)d/" "%(total_chunks)s) of length %(bytes_read)d " "to Swift returning MD5 of content: " "%(chunk_etag)s" % { 'chunk_name': chunk_name, 'chunk_id': chunk_id, 'total_chunks': total_chunks, 'bytes_read': bytes_read, 'chunk_etag': chunk_etag }) LOG.debug(msg) if bytes_read == 0: # Delete the last chunk, because it's of zero size. # This will happen if size == 0. LOG.debug("Deleting final zero-length chunk") connection.delete_object(location.container, chunk_name) break chunk_id += 1 combined_chunks_size += bytes_read # In the case we have been given an unknown image size, # set the size to the total size of the combined chunks. if image_size == 0: image_size = combined_chunks_size # Now we write the object manifest and return the # manifest's etag... manifest = "%s/%s-" % (location.container, location.obj) headers = { 'ETag': hashlib.md5("").hexdigest(), 'X-Object-Manifest': manifest } # The ETag returned for the manifest is actually the # MD5 hash of the concatenated checksums of the strings # of each chunk...so we ignore this result in favour of # the MD5 of the entire image file contents, so that # users can verify the image file contents accordingly connection.put_object(location.container, location.obj, None, headers=headers) obj_etag = checksum.hexdigest() # NOTE: We return the user and key here! Have to because # location is used by the API server to return the actual # image data. We *really* should consider NOT returning # the location attribute from GET /images/<ID> and # GET /images/details if sutils.is_multiple_swift_store_accounts_enabled(self.conf): include_creds = False else: include_creds = True return (location.get_uri(credentials_included=include_creds), image_size, obj_etag, {}) except swiftclient.ClientException as e: if e.http_status == httplib.CONFLICT: msg = _("Swift already has an image at this location") raise exceptions.Duplicate(message=msg) msg = (_(u"Failed to add object to Swift.\n" "Got error from Swift: %s.") % cutils.exception_to_str(e)) LOG.error(msg) raise glance_store.BackendException(msg)
def add(self, image_id, image_file, image_size, hashing_algo, context=None, verifier=None): """ Stores an image file with supplied identifier to the backend storage system and returns a tuple containing information about the stored image. :param image_id: The opaque image identifier :param image_file: The image data to write, as a file-like object :param image_size: The size of the image data to write, in bytes :param hashing_algo: A hashlib algorithm identifier (string) :param context: A context object :param verifier: An object used to verify signatures for images :returns: tuple of: (1) URL in backing store, (2) bytes written, (3) checksum, (4) multihash value, and (5) a dictionary with storage system specific information :raises: `glance_store.exceptions.Duplicate` if the image already exists """ image = SheepdogImage(self.addr, self.port, image_id, self.WRITE_CHUNKSIZE) if image.exist(): raise exceptions.Duplicate(_("Sheepdog image %s already exists") % image_id) location = StoreLocation({ 'image': image_id, 'addr': self.addr, 'port': self.port }, self.conf, backend_group=self.backend_group) image.create(image_size) try: offset = 0 os_hash_value = hashlib.new(str(hashing_algo)) checksum = hashlib.md5() chunks = utils.chunkreadable(image_file, self.WRITE_CHUNKSIZE) for chunk in chunks: chunk_length = len(chunk) # If the image size provided is zero we need to do # a resize for the amount we are writing. This will # be slower so setting a higher chunk size may # speed things up a bit. if image_size == 0: image.resize(offset + chunk_length) image.write(chunk, offset, chunk_length) offset += chunk_length os_hash_value.update(chunk) checksum.update(chunk) if verifier: verifier.update(chunk) except Exception: # Note(zhiyan): clean up already received data when # error occurs such as ImageSizeLimitExceeded exceptions. with excutils.save_and_reraise_exception(): image.delete() metadata = {} if self.backend_group: metadata['backend'] = u"%s" % self.backend_group return (location.get_uri(), offset, checksum.hexdigest(), os_hash_value.hexdigest(), metadata)