def add(self, image_id, image_file, image_size, context=None, verifier=None): """ Stores an image file with supplied identifier to the backend storage system and returns a tuple containing information about the stored image. :param image_id: The opaque image identifier :param image_file: The image data to write, as a file-like object :param image_size: The size of the image data to write, in bytes :param verifier: An object used to verify signatures for images :returns: tuple of URL in backing store, bytes written, and checksum :raises: `glance_store.exceptions.Duplicate` if the image already existed """ image = SheepdogImage(self.addr, self.port, image_id, self.WRITE_CHUNKSIZE) if image.exist(): raise exceptions.Duplicate( _("Sheepdog image %s already exists") % image_id) location = StoreLocation( { 'image': image_id, 'addr': self.addr, 'port': self.port }, self.conf) image.create(image_size) try: offset = 0 checksum = hashlib.md5() chunks = utils.chunkreadable(image_file, self.WRITE_CHUNKSIZE) for chunk in chunks: chunk_length = len(chunk) # If the image size provided is zero we need to do # a resize for the amount we are writing. This will # be slower so setting a higher chunk size may # speed things up a bit. if image_size == 0: image.resize(offset + chunk_length) image.write(chunk, offset, chunk_length) offset += chunk_length checksum.update(chunk) if verifier: verifier.update(chunk) except Exception: # Note(zhiyan): clean up already received data when # error occurs such as ImageSizeLimitExceeded exceptions. with excutils.save_and_reraise_exception(): image.delete() return (location.get_uri(), offset, checksum.hexdigest(), {})
def add(self, image_id, image_file, image_size, context=None): """Stores an image file with supplied identifier to the backend storage system and returns a tuple containing information about the stored image. :param image_id: The opaque image identifier :param image_file: The image data to write, as a file-like object :param image_size: The size of the image data to write, in bytes :retval tuple of URL in backing store, bytes written, checksum and a dictionary with storage system specific information :raises `glance.common.exceptions.Duplicate` if the image already existed `glance.common.exceptions.UnexpectedStatus` if the upload request returned an unexpected status. The expected responses are 201 Created and 200 OK. """ checksum = hashlib.md5() image_file = _Reader(image_file, checksum) loc = StoreLocation({ 'scheme': self.scheme, 'server_host': self.server_host, 'image_dir': self.store_image_dir, 'datacenter_path': self.datacenter_path, 'datastore_name': self.datastore_name, 'image_id': image_id }) cookie = self._build_vim_cookie_header( self._session.vim.client.options.transport.cookiejar) headers = { 'Connection': 'Keep-Alive', 'Cookie': cookie, 'Transfer-Encoding': 'chunked' } try: conn = self._get_http_conn('PUT', loc, headers, content=image_file) res = conn.getresponse() except Exception: with excutils.save_and_reraise_exception(): LOG.exception( _('Failed to upload content of image ' '%(image)s') % {'image': image_id}) if res.status == httplib.CONFLICT: raise exceptions.Duplicate( _("Image file %(image_id)s already " "exists!") % {'image_id': image_id}) if res.status not in (httplib.CREATED, httplib.OK): msg = (_('Failed to upload content of image %(image)s') % { 'image': image_id }) LOG.error(msg) raise exceptions.UnexpectedStatus(status=res.status, body=res.read()) return (loc.get_uri(), image_file.size, checksum.hexdigest(), {})
def add(self, image_id, image_file, image_size, context=None): """ Stores an image file with supplied identifier to the backend storage system and returns a tuple containing information about the stored image. :param image_id: The opaque image identifier :param image_file: The image data to write, as a file-like object :param image_size: The size of the image data to write, in bytes :retval tuple of URL in backing store, bytes written, checksum and a dictionary with storage system specific information :raises `glance_store.exceptions.Duplicate` if the image already existed """ loc = StoreLocation({'image_id': image_id}, self.conf) if self.fs.exists(image_id): raise exceptions.Duplicate( _("GridFS already has an image at " "location %s") % loc.get_uri()) LOG.debug( _("Adding a new image to GridFS with " "id %(iid)s and size %(size)s") % dict(iid=image_id, size=image_size)) try: self.fs.put(image_file, _id=image_id) image = self._get_file(loc) except Exception: # Note(zhiyan): clean up already received data when # error occurs such as ImageSizeLimitExceeded exception. with excutils.save_and_reraise_exception(): self.fs.delete(image_id) LOG.debug( _("Uploaded image %(iid)s, " "md5 %(md)s, length %(len)s to GridFS") % dict(iid=image._id, md=image.md5, len=image.length)) return (loc.get_uri(), image.length, image.md5, {})
def add(self, image_id, image_file, image_size, context=None): """ Stores an image file with supplied identifier to the backend storage system and returns a tuple containing information about the stored image. :param image_id: The opaque image identifier :param image_file: The image data to write, as a file-like object :param image_size: The size of the image data to write, in bytes :retval tuple of URL in backing store, bytes written, and checksum :raises `glance_store.exceptions.Duplicate` if the image already existed """ image = SheepdogImage(self.addr, self.port, image_id, self.WRITE_CHUNKSIZE) if image.exist(): raise exceptions.Duplicate( _("Sheepdog image %s already exists") % image_id) location = StoreLocation({'image': image_id}) checksum = hashlib.md5() image.create(image_size) try: total = left = image_size while left > 0: length = min(self.chunk_size, left) data = image_file.read(length) image.write(data, total - left, length) left -= length checksum.update(data) except Exception: # Note(zhiyan): clean up already received data when # error occurs such as ImageSizeLimitExceeded exceptions. with excutils.save_and_reraise_exception(): image.delete() return (location.get_uri(), image_size, checksum.hexdigest(), {})
def add_image_file(self, full_data_path, image_file): """ Add image file or return exception """ try: LOG.debug("attempting to create image file in irods '%s'" % full_data_path) file_object = self.irods_conn_object.data_objects.create( full_data_path) except: LOG.error("file with same name exists in the same path") raise exceptions.Duplicate( _("image file %s already exists " + "or no perms") % filepath) LOG.debug("performing the write") checksum = hashlib.md5() bytes_written = 0 try: with file_object.open('r+') as f: for buf in utils.chunkreadable(image_file, ChunkedFile.default_chunk_size): bytes_written += len(buf) checksum.update(buf) f.write(buf) except Exception as e: # let's attempt an delete file_object.unlink() reason = _('paritial write, transfer failed') LOG.error(e) raise exceptions.StorageWriteDenied(reason) finally: self.irods_conn_object.cleanup() file_object = None checksum_hex = checksum.hexdigest() LOG.debug( _("Wrote %(bytes_written)d bytes to %(full_data_path)s, " "checksum = %(checksum_hex)s") % locals()) return [bytes_written, checksum_hex]
def add(self, image_id, image_file, image_size, context=None): """Stores an image file with supplied identifier to the backend storage system and returns a tuple containing information about the stored image. :param image_id: The opaque image identifier :param image_file: The image data to write, as a file-like object :param image_size: The size of the image data to write, in bytes :retval tuple of URL in backing store, bytes written, checksum and a dictionary with storage system specific information :raises `glance.common.exceptions.Duplicate` if the image already existed `glance.common.exceptions.UnexpectedStatus` if the upload request returned an unexpected status. The expected responses are 201 Created and 200 OK. """ ds = self.select_datastore(image_size) if image_size > 0: headers = {'Content-Length': image_size} image_file = _Reader(image_file) else: # NOTE (arnaud): use chunk encoding when the image is still being # generated by the server (ex: stream optimized disks generated by # Nova). headers = {'Transfer-Encoding': 'chunked'} image_file = _ChunkReader(image_file) loc = StoreLocation( { 'scheme': self.scheme, 'server_host': self.server_host, 'image_dir': self.store_image_dir, 'datacenter_path': ds.datacenter.path, 'datastore_name': ds.name, 'image_id': image_id }, self.conf) # NOTE(arnaud): use a decorator when the config is not tied to self cookie = self._build_vim_cookie_header(True) headers = dict(headers) headers['Cookie'] = cookie conn_class = self._get_http_conn_class() conn = conn_class(loc.server_host) url = urllib.parse.quote('%s?%s' % (loc.path, loc.query)) try: conn.request('PUT', url, image_file, headers) except IOError as e: # When a session is not authenticated, the socket is closed by # the server after sending the response. http_client has an open # issue with https that raises Broken Pipe # error instead of returning the response. # See http://bugs.python.org/issue16062. Here, we log the error # and continue to look into the response. msg = _LE('Communication error sending http %(method)s request ' 'to the url %(url)s.\n' 'Got IOError %(e)s') % { 'method': 'PUT', 'url': url, 'e': e } LOG.error(msg) except Exception: with excutils.save_and_reraise_exception(): LOG.exception( _LE('Failed to upload content of image ' '%(image)s'), {'image': image_id}) res = conn.getresponse() if res.status == http_client.CONFLICT: raise exceptions.Duplicate( _("Image file %(image_id)s already " "exists!") % {'image_id': image_id}) if res.status not in (http_client.CREATED, http_client.OK): msg = (_LE('Failed to upload content of image %(image)s. ' 'The request returned an unexpected status: %(status)s.' '\nThe response body:\n%(body)s') % { 'image': image_id, 'status': res.status, 'body': getattr(res, 'body', None) }) LOG.error(msg) raise exceptions.BackendException(msg) return (loc.get_uri(), image_file.size, image_file.checksum.hexdigest(), {})
def add(self, image_id, image_file, image_size, context=None): """ Stores an image file with supplied identifier to the backend storage system and returns a tuple containing information about the stored image. :param image_id: The opaque image identifier :param image_file: The image data to write, as a file-like object :param image_size: The size of the image data to write, in bytes :retval tuple of URL in backing store, bytes written, checksum and a dictionary with storage system specific information :raises `glance_store.exceptions.Duplicate` if the image already existed :note By default, the backend writes the image data to a file `/<DATADIR>/<ID>`, where <DATADIR> is the value of the filesystem_store_datadir configuration option and <ID> is the supplied image ID. """ datadir = self._find_best_datadir(image_size) filepath = os.path.join(datadir, str(image_id)) if os.path.exists(filepath): raise exceptions.Duplicate(image=filepath) checksum = hashlib.md5() bytes_written = 0 try: with open(filepath, 'wb') as f: for buf in utils.chunkreadable(image_file, self.WRITE_CHUNKSIZE): bytes_written += len(buf) checksum.update(buf) f.write(buf) except IOError as e: if e.errno != errno.EACCES: self._delete_partial(filepath, image_id) errors = {errno.EFBIG: exceptions.StorageFull(), errno.ENOSPC: exceptions.StorageFull(), errno.EACCES: exceptions.StorageWriteDenied()} raise errors.get(e.errno, e) except Exception: with excutils.save_and_reraise_exception(): self._delete_partial(filepath, image_id) checksum_hex = checksum.hexdigest() metadata = self._get_metadata(filepath) LOG.debug(_("Wrote %(bytes_written)d bytes to %(filepath)s with " "checksum %(checksum_hex)s"), {'bytes_written': bytes_written, 'filepath': filepath, 'checksum_hex': checksum_hex}) if self.conf.glance_store.filesystem_store_file_perm > 0: perm = int(str(self.conf.glance_store.filesystem_store_file_perm), 8) try: os.chmod(filepath, perm) except (IOError, OSError): LOG.warn(_LW("Unable to set permission to image: %s") % filepath) return ('file://%s' % filepath, bytes_written, checksum_hex, metadata)
def add(self, image_id, image_file, image_size, context=None): """ Stores an image file with supplied identifier to the backend storage system and returns a tuple containing information about the stored image. :param image_id: The opaque image identifier :param image_file: The image data to write, as a file-like object :param image_size: The size of the image data to write, in bytes :retval tuple of URL in backing store, bytes written, checksum and a dictionary with storage system specific information :raises `glance_store.exceptions.Duplicate` if the image already existed """ checksum = hashlib.md5() image_name = str(image_id) with rados.Rados(conffile=self.conf_file, rados_id=self.user) as conn: fsid = None if hasattr(conn, 'get_fsid'): fsid = conn.get_fsid() with conn.open_ioctx(self.pool) as ioctx: order = int(math.log(self.WRITE_CHUNKSIZE, 2)) LOG.debug('creating image %s with order %d and size %d', image_name, order, image_size) if image_size == 0: LOG.warning( _("since image size is zero we will be doing " "resize-before-write for each chunk which " "will be considerably slower than normal")) try: loc = self._create_image(fsid, ioctx, image_name, image_size, order) except rbd.ImageExists: raise exceptions.Duplicate( message=_('RBD image %s already exists') % image_id) try: with rbd.Image(ioctx, image_name) as image: bytes_written = 0 offset = 0 chunks = utils.chunkreadable(image_file, self.WRITE_CHUNKSIZE) for chunk in chunks: # If the image size provided is zero we need to do # a resize for the amount we are writing. This will # be slower so setting a higher chunk size may # speed things up a bit. if image_size == 0: chunk_length = len(chunk) length = offset + chunk_length bytes_written += chunk_length LOG.debug( _("resizing image to %s KiB") % (length / 1024)) image.resize(length) LOG.debug( _("writing chunk at offset %s") % (offset)) offset += image.write(chunk, offset) checksum.update(chunk) if loc.snapshot: image.create_snap(loc.snapshot) image.protect_snap(loc.snapshot) except Exception as exc: # Delete image if one was created try: self._delete_image(loc.image, loc.snapshot) except exceptions.NotFound: pass raise exc # Make sure we send back the image size whether provided or inferred. if image_size == 0: image_size = bytes_written return (loc.get_uri(), image_size, checksum.hexdigest(), {})
def add(self, image_id, image_file, image_size, context=None): """ Stores an image file with supplied identifier to the backend storage system and returns a tuple containing information about the stored image. :param image_id: The opaque image identifier :param image_file: The image data to write, as a file-like object :param image_size: The size of the image data to write, in bytes :retval tuple of URL in backing store, bytes written, checksum and a dictionary with storage system specific information :raises `glance_store.exceptions.Duplicate` if the image already existed S3 writes the image data using the scheme: s3://<ACCESS_KEY>:<SECRET_KEY>@<S3_URL>/<BUCKET>/<OBJ> where: <USER> = ``s3_store_user`` <KEY> = ``s3_store_key`` <S3_HOST> = ``s3_store_host`` <BUCKET> = ``s3_store_bucket`` <ID> = The id of the image being added """ from boto.s3.connection import S3Connection loc = StoreLocation({ 'scheme': self.scheme, 'bucket': self.bucket, 'key': image_id, 's3serviceurl': self.full_s3_host, 'accesskey': self.access_key, 'secretkey': self.secret_key }) uformat = self.conf.glance_store.s3_store_bucket_url_format calling_format = get_calling_format(s3_store_bucket_url_format=uformat) s3_conn = S3Connection(loc.accesskey, loc.secretkey, host=loc.s3serviceurl, is_secure=(loc.scheme == 's3+https'), calling_format=calling_format) create_bucket_if_missing(self.bucket, s3_conn) bucket_obj = get_bucket(s3_conn, self.bucket) obj_name = str(image_id) def _sanitize(uri): return re.sub('//.*:.*@', '//s3_store_secret_key:s3_store_access_key@', uri) key = bucket_obj.get_key(obj_name) if key and key.exists(): raise exceptions.Duplicate(message=_("S3 already has an image at " "location %s") % _sanitize(loc.get_uri())) msg = _("Adding image object to S3 using (s3_host=%(s3_host)s, " "access_key=%(access_key)s, bucket=%(bucket)s, " "key=%(obj_name)s)") % ({ 's3_host': self.s3_host, 'access_key': self.access_key, 'bucket': self.bucket, 'obj_name': obj_name }) LOG.debug(msg) key = bucket_obj.new_key(obj_name) # We need to wrap image_file, which is a reference to the # webob.Request.body_file, with a seekable file-like object, # otherwise the call to set_contents_from_file() will die # with an error about Input object has no method 'seek'. We # might want to call webob.Request.make_body_seekable(), but # unfortunately, that method copies the entire image into # memory and results in LP Bug #818292 occurring. So, here # we write temporary file in as memory-efficient manner as # possible and then supply the temporary file to S3. We also # take this opportunity to calculate the image checksum while # writing the tempfile, so we don't need to call key.compute_md5() msg = _("Writing request body file to temporary file " "for %s") % _sanitize(loc.get_uri()) LOG.debug(msg) tmpdir = self.s3_store_object_buffer_dir temp_file = tempfile.NamedTemporaryFile(dir=tmpdir) checksum = hashlib.md5() for chunk in utils.chunkreadable(image_file, self.WRITE_CHUNKSIZE): checksum.update(chunk) temp_file.write(chunk) temp_file.flush() msg = (_("Uploading temporary file to S3 for %s") % _sanitize(loc.get_uri())) LOG.debug(msg) # OK, now upload the data into the key key.set_contents_from_file(open(temp_file.name, 'r+b'), replace=False) size = key.size checksum_hex = checksum.hexdigest() LOG.debug( _("Wrote %(size)d bytes to S3 key named %(obj_name)s " "with checksum %(checksum_hex)s"), { 'size': size, 'obj_name': obj_name, 'checksum_hex': checksum_hex }) return (loc.get_uri(), size, checksum_hex, {})
def add(self, image_id, image_file, image_size, hashing_algo, context=None, verifier=None): """Stores an image file with supplied identifier to the backend storage system and returns a tuple containing information about the stored image. :param image_id: The opaque image identifier :param image_file: The image data to write, as a file-like object :param image_size: The size of the image data to write, in bytes :param hashing_algo: A hashlib algorithm identifier (string) :param context: A context object :param verifier: An object used to verify signatures for images :returns: tuple of: (1) URL in backing store, (2) bytes written, (3) checksum, (4) multihash value, and (5) a dictionary with storage system specific information :raises: `glance_store.exceptions.Duplicate` if the image already exists :raises: `glance.common.exceptions.UnexpectedStatus` if the upload request returned an unexpected status. The expected responses are 201 Created and 200 OK. """ ds = self.select_datastore(image_size) image_file = _Reader(image_file, hashing_algo, verifier) headers = {} if image_size > 0: headers.update({'Content-Length': six.text_type(image_size)}) data = image_file else: data = utils.chunkiter(image_file, CHUNKSIZE) loc = StoreLocation( { 'scheme': self.scheme, 'server_host': self.server_host, 'image_dir': self.store_image_dir, 'datacenter_path': ds.datacenter.path, 'datastore_name': ds.name, 'image_id': image_id }, self.conf, backend_group=self.backend_group) # NOTE(arnaud): use a decorator when the config is not tied to self cookie = self._build_vim_cookie_header(True) headers = dict(headers) headers.update({'Cookie': cookie}) session = new_session(self.api_insecure, self.ca_file) url = loc.https_url try: response = session.put(url, data=data, headers=headers) except IOError as e: # TODO(sigmavirus24): Figure out what the new exception type would # be in requests. # When a session is not authenticated, the socket is closed by # the server after sending the response. http_client has an open # issue with https that raises Broken Pipe # error instead of returning the response. # See http://bugs.python.org/issue16062. Here, we log the error # and continue to look into the response. msg = _LE('Communication error sending http %(method)s request ' 'to the url %(url)s.\n' 'Got IOError %(e)s') % { 'method': 'PUT', 'url': url, 'e': e } LOG.error(msg) raise exceptions.BackendException(msg) except Exception: with excutils.save_and_reraise_exception(): LOG.exception( _LE('Failed to upload content of image ' '%(image)s'), {'image': image_id}) res = response.raw if res.status == requests.codes.conflict: raise exceptions.Duplicate( _("Image file %(image_id)s already " "exists!") % {'image_id': image_id}) if res.status not in (requests.codes.created, requests.codes.ok): msg = (_LE('Failed to upload content of image %(image)s. ' 'The request returned an unexpected status: %(status)s.' '\nThe response body:\n%(body)s') % { 'image': image_id, 'status': res.status, 'body': getattr(res, 'body', None) }) LOG.error(msg) raise exceptions.BackendException(msg) metadata = {} if self.backend_group: metadata['backend'] = u"%s" % self.backend_group return (loc.get_uri(), image_file.size, image_file.checksum.hexdigest(), image_file.os_hash_value.hexdigest(), metadata)
def add(self, image_id, image_file, image_size, connection=None, context=None): location = self.create_location(image_id, context=context) if not connection: connection = self.get_connection(location, context=context) self._create_container_if_missing(location.container, connection) LOG.debug("Adding image object '%(obj_name)s' " "to Swift" % dict(obj_name=location.obj)) try: if image_size > 0 and image_size < self.large_object_size: # Image size is known, and is less than large_object_size. # Send to Swift with regular PUT. obj_etag = connection.put_object(location.container, location.obj, image_file, content_length=image_size) else: # Write the image into Swift in chunks. chunk_id = 1 if image_size > 0: total_chunks = str( int( math.ceil( float(image_size) / float(self.large_object_chunk_size)))) else: # image_size == 0 is when we don't know the size # of the image. This can occur with older clients # that don't inspect the payload size. LOG.debug("Cannot determine image size. Adding as a " "segmented object to Swift.") total_chunks = '?' checksum = hashlib.md5() written_chunks = [] combined_chunks_size = 0 while True: chunk_size = self.large_object_chunk_size if image_size == 0: content_length = None else: left = image_size - combined_chunks_size if left == 0: break if chunk_size > left: chunk_size = left content_length = chunk_size chunk_name = "%s-%05d" % (location.obj, chunk_id) reader = ChunkReader(image_file, checksum, chunk_size) try: chunk_etag = connection.put_object( location.container, chunk_name, reader, content_length=content_length) written_chunks.append(chunk_name) except Exception: # Delete orphaned segments from swift backend with excutils.save_and_reraise_exception(): LOG.exception( _("Error during chunked upload to " "backend, deleting stale chunks")) self._delete_stale_chunks(connection, location.container, written_chunks) bytes_read = reader.bytes_read msg = ("Wrote chunk %(chunk_name)s (%(chunk_id)d/" "%(total_chunks)s) of length %(bytes_read)d " "to Swift returning MD5 of content: " "%(chunk_etag)s" % { 'chunk_name': chunk_name, 'chunk_id': chunk_id, 'total_chunks': total_chunks, 'bytes_read': bytes_read, 'chunk_etag': chunk_etag }) LOG.debug(msg) if bytes_read == 0: # Delete the last chunk, because it's of zero size. # This will happen if size == 0. LOG.debug("Deleting final zero-length chunk") connection.delete_object(location.container, chunk_name) break chunk_id += 1 combined_chunks_size += bytes_read # In the case we have been given an unknown image size, # set the size to the total size of the combined chunks. if image_size == 0: image_size = combined_chunks_size # Now we write the object manifest and return the # manifest's etag... manifest = "%s/%s-" % (location.container, location.obj) headers = { 'ETag': hashlib.md5("").hexdigest(), 'X-Object-Manifest': manifest } # The ETag returned for the manifest is actually the # MD5 hash of the concatenated checksums of the strings # of each chunk...so we ignore this result in favour of # the MD5 of the entire image file contents, so that # users can verify the image file contents accordingly connection.put_object(location.container, location.obj, None, headers=headers) obj_etag = checksum.hexdigest() # NOTE: We return the user and key here! Have to because # location is used by the API server to return the actual # image data. We *really* should consider NOT returning # the location attribute from GET /images/<ID> and # GET /images/details if sutils.is_multiple_swift_store_accounts_enabled(self.conf): include_creds = False else: include_creds = True return (location.get_uri(credentials_included=include_creds), image_size, obj_etag, {}) except swiftclient.ClientException as e: if e.http_status == httplib.CONFLICT: msg = _("Swift already has an image at this location") raise exceptions.Duplicate(message=msg) msg = (_(u"Failed to add object to Swift.\n" "Got error from Swift: %s.") % cutils.exception_to_str(e)) LOG.error(msg) raise glance_store.BackendException(msg)
def add(self, image_id, image_file, image_size, hashing_algo, context=None, verifier=None): """ Stores an image file with supplied identifier to the backend storage system and returns a tuple containing information about the stored image. :param image_id: The opaque image identifier :param image_file: The image data to write, as a file-like object :param image_size: The size of the image data to write, in bytes :param hashing_algo: A hashlib algorithm identifier (string) :param context: A context object :param verifier: An object used to verify signatures for images :returns: tuple of: (1) URL in backing store, (2) bytes written, (3) checksum, (4) multihash value, and (5) a dictionary with storage system specific information :raises: `glance_store.exceptions.Duplicate` if the image already exists """ os_hash_value = utils.get_hasher(hashing_algo, False) checksum = utils.get_hasher('md5', False) image_name = str(image_id) with self.get_connection(conffile=self.conf_file, rados_id=self.user) as conn: fsid = None if hasattr(conn, 'get_fsid'): # Librados's get_fsid is represented as binary # in py3 instead of str as it is in py2. # This is causing problems with ceph. # Decode binary to str fixes these issues. # Fix with encodeutils.safe_decode CAN BE REMOVED # after librados's fix will be stable. # # More informations: # https://bugs.launchpad.net/glance-store/+bug/1816721 # https://bugs.launchpad.net/cinder/+bug/1816468 # https://tracker.ceph.com/issues/38381 fsid = encodeutils.safe_decode(conn.get_fsid()) with conn.open_ioctx(self.pool) as ioctx: order = int(math.log(self.WRITE_CHUNKSIZE, 2)) LOG.debug('creating image %s with order %d and size %d', image_name, order, image_size) if image_size == 0: LOG.warning( _LW("Since image size is zero we will be " "doing resize-before-write which will be " "slower than normal")) try: loc = self._create_image(fsid, conn, ioctx, image_name, image_size, order) except rbd.ImageExists: msg = _('RBD image %s already exists') % image_id raise exceptions.Duplicate(message=msg) try: with rbd.Image(ioctx, image_name) as image: bytes_written = 0 offset = 0 chunks = utils.chunkreadable(image_file, self.WRITE_CHUNKSIZE) for chunk in chunks: # NOTE(jokke): If we don't know image size we need # to resize it on write. The resize amount will # ramp up to 8 gigs. chunk_length = len(chunk) self.size = self._resize_on_write( image, image_size, bytes_written, chunk_length) bytes_written += chunk_length if not (self.thin_provisioning and not any(chunk)): image.write(chunk, offset) offset += chunk_length os_hash_value.update(chunk) checksum.update(chunk) if verifier: verifier.update(chunk) # Lets trim the image in case we overshoot with resize if image_size == 0: image.resize(bytes_written) if loc.snapshot: image.create_snap(loc.snapshot) image.protect_snap(loc.snapshot) except rbd.NoSpace: log_msg = (_LE("Failed to store image %(img_name)s " "insufficient space available") % { 'img_name': image_name }) LOG.error(log_msg) # Delete image if one was created try: target_pool = loc.pool or self.pool self._delete_image(target_pool, loc.image, loc.snapshot) except exceptions.NotFound: pass raise exceptions.StorageFull(message=log_msg) except Exception as exc: log_msg = (_LE("Failed to store image %(img_name)s " "Store Exception %(store_exc)s") % { 'img_name': image_name, 'store_exc': exc }) LOG.error(log_msg) # Delete image if one was created try: target_pool = loc.pool or self.pool self._delete_image(target_pool, loc.image, loc.snapshot) except exceptions.NotFound: pass raise exc # Make sure we send back the image size whether provided or inferred. if image_size == 0: image_size = bytes_written # Add store backend information to location metadata metadata = {} if self.backend_group: metadata['store'] = u"%s" % self.backend_group return (loc.get_uri(), image_size, checksum.hexdigest(), os_hash_value.hexdigest(), metadata)
def add(self, image_id, image_file, image_size, context=None): """Stores an image file with supplied identifier to the backend storage system and returns a tuple containing information about the stored image. :param image_id: The opaque image identifier :param image_file: The image data to write, as a file-like object :param image_size: The size of the image data to write, in bytes :retval tuple of URL in backing store, bytes written, checksum and a dictionary with storage system specific information :raises `glance.common.exceptions.Duplicate` if the image already existed `glance.common.exceptions.UnexpectedStatus` if the upload request returned an unexpected status. The expected responses are 201 Created and 200 OK. """ if image_size > 0: headers = {'Content-Length': image_size} image_file = _Reader(image_file) else: # NOTE (arnaud): use chunk encoding when the image is still being # generated by the server (ex: stream optimized disks generated by # Nova). headers = {'Transfer-Encoding': 'chunked'} image_file = _ChunkReader(image_file) loc = StoreLocation( { 'scheme': self.scheme, 'server_host': self.server_host, 'image_dir': self.store_image_dir, 'datacenter_path': self.datacenter_path, 'datastore_name': self.datastore_name, 'image_id': image_id }, self.conf) # NOTE(arnaud): use a decorator when the config is not tied to self for i in range(self.api_retry_count + 1): cookie = self._build_vim_cookie_header( self._session.vim.client.options.transport.cookiejar) headers = dict(headers.items() + {'Cookie': cookie}.items()) try: conn = self._get_http_conn('PUT', loc, headers, content=image_file) res = conn.getresponse() except Exception: with excutils.save_and_reraise_exception(): LOG.exception( _LE('Failed to upload content of image ' '%(image)s'), {'image': image_id}) if res.status == httplib.UNAUTHORIZED: self._create_session() image_file.rewind() continue if res.status == httplib.CONFLICT: raise exceptions.Duplicate( _("Image file %(image_id)s already " "exists!") % {'image_id': image_id}) if res.status not in (httplib.CREATED, httplib.OK): msg = (_LE('Failed to upload content of image %(image)s') % { 'image': image_id }) LOG.error(msg) raise exceptions.UnexpectedStatus(status=res.status, body=res.read()) break return (loc.get_uri(), image_file.size, image_file.checksum.hexdigest(), {})
def add(self, image_id, image_file, image_size, hashing_algo, context=None, verifier=None): """ Stores an image file with supplied identifier to the backend storage system and returns a tuple containing information about the stored image. :param image_id: The opaque image identifier :param image_file: The image data to write, as a file-like object :param image_size: The size of the image data to write, in bytes :param hashing_algo: A hashlib algorithm identifier (string) :param context: A context object :param verifier: An object used to verify signatures for images :returns: tuple of: (1) URL in backing store, (2) bytes written, (3) checksum, (4) multihash value, and (5) a dictionary with storage system specific information :raises: `glance_store.exceptions.Duplicate` if the image already exists """ checksum = hashlib.md5() os_hash_value = hashlib.new(str(hashing_algo)) image_name = str(image_id) with self.get_connection(conffile=self.conf_file, rados_id=self.user) as conn: fsid = None if hasattr(conn, 'get_fsid'): fsid = conn.get_fsid() with conn.open_ioctx(self.pool) as ioctx: order = int(math.log(self.WRITE_CHUNKSIZE, 2)) LOG.debug('creating image %s with order %d and size %d', image_name, order, image_size) if image_size == 0: LOG.warning( _("since image size is zero we will be doing " "resize-before-write for each chunk which " "will be considerably slower than normal")) try: loc = self._create_image(fsid, conn, ioctx, image_name, image_size, order) except rbd.ImageExists: msg = _('RBD image %s already exists') % image_id raise exceptions.Duplicate(message=msg) try: with rbd.Image(ioctx, image_name) as image: bytes_written = 0 offset = 0 chunks = utils.chunkreadable(image_file, self.WRITE_CHUNKSIZE) for chunk in chunks: # If the image size provided is zero we need to do # a resize for the amount we are writing. This will # be slower so setting a higher chunk size may # speed things up a bit. if image_size == 0: chunk_length = len(chunk) length = offset + chunk_length bytes_written += chunk_length LOG.debug( _("resizing image to %s KiB") % (length / units.Ki)) image.resize(length) LOG.debug( _("writing chunk at offset %s") % (offset)) offset += image.write(chunk, offset) os_hash_value.update(chunk) checksum.update(chunk) if verifier: verifier.update(chunk) if loc.snapshot: image.create_snap(loc.snapshot) image.protect_snap(loc.snapshot) except Exception as exc: log_msg = (_LE("Failed to store image %(img_name)s " "Store Exception %(store_exc)s") % { 'img_name': image_name, 'store_exc': exc }) LOG.error(log_msg) # Delete image if one was created try: target_pool = loc.pool or self.pool self._delete_image(target_pool, loc.image, loc.snapshot) except exceptions.NotFound: pass raise exc # Make sure we send back the image size whether provided or inferred. if image_size == 0: image_size = bytes_written # Add store backend information to location metadata metadata = {} if self.backend_group: metadata['backend'] = u"%s" % self.backend_group return (loc.get_uri(), image_size, checksum.hexdigest(), os_hash_value.hexdigest(), metadata)
def add(self, image_id, image_file, image_size, context=None): """ Stores an image file with supplied identifier to the backend storage system and returns a tuple containing information about the stored image. :param image_id: The opaque image identifier :param image_file: The image data to write, as a file-like object :param image_size: The size of the image data to write, in bytes :retval tuple of URL in backing store, bytes written, checksum and a dictionary with storage system specific information :raises `glance_store.exceptions.Duplicate` if the image already existed S3 writes the image data using the scheme: s3://<ACCESS_KEY>:<SECRET_KEY>@<S3_URL>/<BUCKET>/<OBJ> where: <USER> = ``s3_store_user`` <KEY> = ``s3_store_key`` <S3_HOST> = ``s3_store_host`` <BUCKET> = ``s3_store_bucket`` <ID> = The id of the image being added """ loc = StoreLocation( { 'scheme': self.scheme, 'bucket': self.bucket, 'key': image_id, 's3serviceurl': self.full_s3_host, 'accesskey': self.access_key, 'secretkey': self.secret_key }, self.conf) s3_conn = self._create_connection(loc) create_bucket_if_missing(self.conf, self.bucket, s3_conn) bucket_obj = get_bucket(s3_conn, self.bucket) obj_name = str(image_id) key = bucket_obj.get_key(obj_name) if key and key.exists(): raise exceptions.Duplicate(message=_("S3 already has an image at " "location %s") % self._sanitize(loc.get_uri())) msg = _("Adding image object to S3 using (s3_host=%(s3_host)s, " "access_key=%(access_key)s, bucket=%(bucket)s, " "key=%(obj_name)s)") % ({ 's3_host': self.s3_host, 'access_key': self.access_key, 'bucket': self.bucket, 'obj_name': obj_name }) LOG.debug(msg) LOG.debug("Uploading an image file to S3 for %s" % self._sanitize(loc.get_uri())) if image_size < self.s3_store_large_object_size: return self.add_singlepart(image_file, bucket_obj, obj_name, loc) else: return self.add_multipart(image_file, image_size, bucket_obj, obj_name, loc)
def add(self, image_id, image_file, image_size, hashing_algo, context=None, verifier=None): """ Stores an image file with supplied identifier to the backend storage system and returns a tuple containing information about the stored image. :param image_id: The opaque image identifier :param image_file: The image data to write, as a file-like object :param image_size: The size of the image data to write, in bytes :param hashing_algo: A hashlib algorithm identifier (string) :param context: The request context :param verifier: An object used to verify signatures for images :returns: tuple of: (1) URL in backing store, (2) bytes written, (3) checksum, (4) multihash value, and (5) a dictionary with storage system specific information :raises: `glance_store.exceptions.Duplicate` if the image already exists :note:: By default, the backend writes the image data to a file `/<DATADIR>/<ID>`, where <DATADIR> is the value of the filesystem_store_datadir configuration option and <ID> is the supplied image ID. """ datadir = self._find_best_datadir(image_size) filepath = os.path.join(datadir, str(image_id)) if os.path.exists(filepath): raise exceptions.Duplicate(image=filepath) os_hash_value = hashlib.new(str(hashing_algo)) checksum = hashlib.md5() bytes_written = 0 try: with open(filepath, 'wb') as f: for buf in utils.chunkreadable(image_file, self.WRITE_CHUNKSIZE): bytes_written += len(buf) os_hash_value.update(buf) checksum.update(buf) if verifier: verifier.update(buf) f.write(buf) except IOError as e: if e.errno != errno.EACCES: self._delete_partial(filepath, image_id) errors = {errno.EFBIG: exceptions.StorageFull(), errno.ENOSPC: exceptions.StorageFull(), errno.EACCES: exceptions.StorageWriteDenied()} raise errors.get(e.errno, e) except Exception: with excutils.save_and_reraise_exception(): self._delete_partial(filepath, image_id) hash_hex = os_hash_value.hexdigest() checksum_hex = checksum.hexdigest() metadata = self._get_metadata(filepath) LOG.debug(("Wrote %(bytes_written)d bytes to %(filepath)s with " "checksum %(checksum_hex)s and multihash %(hash_hex)s"), {'bytes_written': bytes_written, 'filepath': filepath, 'checksum_hex': checksum_hex, 'hash_hex': hash_hex}) if self.backend_group: fstore_perm = getattr( self.conf, self.backend_group).filesystem_store_file_perm else: fstore_perm = self.conf.glance_store.filesystem_store_file_perm if fstore_perm > 0: perm = int(str(fstore_perm), 8) try: os.chmod(filepath, perm) except (IOError, OSError): LOG.warning(_LW("Unable to set permission to image: %s") % filepath) # Add store backend information to location metadata if self.backend_group: metadata['backend'] = u"%s" % self.backend_group return ('file://%s' % filepath, bytes_written, checksum_hex, hash_hex, metadata)
def add(self, image_id, image_file, image_size, hashing_algo, context=None, verifier=None): """ Stores an image file with supplied identifier to the backend storage system and returns a tuple containing information about the stored image. :param image_id: The opaque image identifier :param image_file: The image data to write, as a file-like object :param image_size: The size of the image data to write, in bytes :param hashing_algo: A hashlib algorithm identifier (string) :param context: A context object :param verifier: An object used to verify signatures for images :returns: tuple of: (1) URL in backing store, (2) bytes written, (3) checksum, (4) multihash value, and (5) a dictionary with storage system specific information :raises: `glance_store.exceptions.Duplicate` if the image already exists """ loc = StoreLocation(store_specs={ 'scheme': self.scheme, 'bucket': self.bucket, 'key': image_id, 's3serviceurl': self.full_s3_host, 'accesskey': self.access_key, 'secretkey': self.secret_key }, conf=self.conf, backend_group=self.backend_group) s3_client, bucket, key = self._operation_set(loc) if not self._bucket_exists(s3_client, bucket): if self._option_get('s3_store_create_bucket_on_put'): self._create_bucket(s3_client, self._option_get('s3_store_host'), bucket) else: msg = (_("The bucket %s does not exist in " "S3. Please set the " "s3_store_create_bucket_on_put option " "to add bucket to S3 automatically.") % bucket) raise glance_store.BackendException(msg) LOG.debug( "Adding image object to S3 using (s3_host=%(s3_host)s, " "access_key=%(access_key)s, bucket=%(bucket)s, " "key=%(key)s)", { 's3_host': self.s3_host, 'access_key': loc.accesskey, 'bucket': bucket, 'key': key }) if not self._object_exists(s3_client, bucket, key): if image_size < self.s3_store_large_object_size: return self._add_singlepart(s3_client=s3_client, image_file=image_file, bucket=bucket, key=key, loc=loc, hashing_algo=hashing_algo, verifier=verifier) return self._add_multipart(s3_client=s3_client, image_file=image_file, image_size=image_size, bucket=bucket, key=key, loc=loc, hashing_algo=hashing_algo, verifier=verifier) LOG.warning( "S3 already has an image with bucket ID %(bucket)s, " "key %(key)s", { 'bucket': bucket, 'key': key }) raise exceptions.Duplicate(image=key)