Ejemplo n.º 1
0
    def add_singlepart(self, image_file, bucket_obj, obj_name, loc, verifier):
        """
        Stores an image file with a single part upload to S3 backend

        :param image_file: The image data to write, as a file-like object
        :param bucket_obj: S3 bucket object
        :param obj_name: The object name to be stored(image identifier)
        :param verifier: An object used to verify signatures for images
        :loc: The Store Location Info
        """

        key = bucket_obj.new_key(obj_name)

        # We need to wrap image_file, which is a reference to the
        # webob.Request.body_file, with a seekable file-like object,
        # otherwise the call to set_contents_from_file() will die
        # with an error about Input object has no method 'seek'. We
        # might want to call webob.Request.make_body_seekable(), but
        # unfortunately, that method copies the entire image into
        # memory and results in LP Bug #818292 occurring. So, here
        # we write temporary file in as memory-efficient manner as
        # possible and then supply the temporary file to S3. We also
        # take this opportunity to calculate the image checksum while
        # writing the tempfile, so we don't need to call key.compute_md5()

        msg = ("Writing request body file to temporary file "
               "for %s") % self._sanitize(loc.get_uri())
        LOG.debug(msg)

        tmpdir = self.s3_store_object_buffer_dir
        temp_file = tempfile.NamedTemporaryFile(dir=tmpdir)
        checksum = hashlib.md5()
        for chunk in utils.chunkreadable(image_file, self.WRITE_CHUNKSIZE):
            checksum.update(chunk)
            if verifier:
                verifier.update(chunk)
            temp_file.write(chunk)
        temp_file.flush()

        msg = ("Uploading temporary file to S3 "
               "for %s") % self._sanitize(loc.get_uri())
        LOG.debug(msg)

        # OK, now upload the data into the key
        key.set_contents_from_file(open(temp_file.name, 'rb'),
                                   replace=False)
        size = key.size
        checksum_hex = checksum.hexdigest()

        LOG.debug("Wrote %(size)d bytes to S3 key named %(obj_name)s "
                  "with checksum %(checksum_hex)s" %
                  {'size': size,
                   'obj_name': obj_name,
                   'checksum_hex': checksum_hex})

        return (loc.get_uri(), size, checksum_hex, {})
Ejemplo n.º 2
0
    def add(self, image_id, image_file, image_size, context=None,
            verifier=None):
        """
        Stores an image file with supplied identifier to the backend
        storage system and returns a tuple containing information
        about the stored image.

        :param image_id: The opaque image identifier
        :param image_file: The image data to write, as a file-like object
        :param image_size: The size of the image data to write, in bytes
        :param verifier: An object used to verify signatures for images

        :retval: tuple of URL in backing store, bytes written, and checksum
        :raises: `glance_store.exceptions.Duplicate` if the image already
                existed
        """

        image = SheepdogImage(self.addr, self.port, image_id,
                              self.WRITE_CHUNKSIZE)
        if image.exist():
            raise exceptions.Duplicate(_("Sheepdog image %s already exists")
                                       % image_id)

        location = StoreLocation({
            'image': image_id,
            'addr': self.addr,
            'port': self.port
        }, self.conf)

        image.create(image_size)

        try:
            offset = 0
            checksum = hashlib.md5()
            chunks = utils.chunkreadable(image_file, self.WRITE_CHUNKSIZE)
            for chunk in chunks:
                chunk_length = len(chunk)
                # If the image size provided is zero we need to do
                # a resize for the amount we are writing. This will
                # be slower so setting a higher chunk size may
                # speed things up a bit.
                if image_size == 0:
                    image.resize(offset + chunk_length)
                image.write(chunk, offset, chunk_length)
                offset += chunk_length
                checksum.update(chunk)
                if verifier:
                    verifier.update(chunk)
        except Exception:
            # Note(zhiyan): clean up already received data when
            # error occurs such as ImageSizeLimitExceeded exceptions.
            with excutils.save_and_reraise_exception():
                image.delete()

        return (location.get_uri(), offset, checksum.hexdigest(), {})
Ejemplo n.º 3
0
    def add_singlepart(self, image_file, bucket_obj, obj_name, loc):
        """
        Stores an image file with a single part upload to S3 backend

        :param image_file: The image data to write, as a file-like object
        :param bucket_obj: S3 bucket object
        :param obj_name: The object name to be stored(image identifier)
        :loc: The Store Location Info
        """

        key = bucket_obj.new_key(obj_name)

        # We need to wrap image_file, which is a reference to the
        # webob.Request.body_file, with a seekable file-like object,
        # otherwise the call to set_contents_from_file() will die
        # with an error about Input object has no method 'seek'. We
        # might want to call webob.Request.make_body_seekable(), but
        # unfortunately, that method copies the entire image into
        # memory and results in LP Bug #818292 occurring. So, here
        # we write temporary file in as memory-efficient manner as
        # possible and then supply the temporary file to S3. We also
        # take this opportunity to calculate the image checksum while
        # writing the tempfile, so we don't need to call key.compute_md5()

        msg = ("Writing request body file to temporary file "
               "for %s") % self._sanitize(loc.get_uri())
        LOG.debug(msg)

        tmpdir = self.s3_store_object_buffer_dir
        temp_file = tempfile.NamedTemporaryFile(dir=tmpdir)
        checksum = hashlib.md5()
        for chunk in utils.chunkreadable(image_file, self.WRITE_CHUNKSIZE):
            checksum.update(chunk)
            temp_file.write(chunk)
        temp_file.flush()

        msg = ("Uploading temporary file to S3 "
               "for %s") % self._sanitize(loc.get_uri())
        LOG.debug(msg)

        # OK, now upload the data into the key
        key.set_contents_from_file(open(temp_file.name, 'rb'), replace=False)
        size = key.size
        checksum_hex = checksum.hexdigest()

        LOG.debug("Wrote %(size)d bytes to S3 key named %(obj_name)s "
                  "with checksum %(checksum_hex)s" % {
                      'size': size,
                      'obj_name': obj_name,
                      'checksum_hex': checksum_hex
                  })

        return (loc.get_uri(), size, checksum_hex, {})
Ejemplo n.º 4
0
    def _add_singlepart(self, s3_client, image_file, bucket, key, loc,
                        hashing_algo, verifier):
        """Stores an image file with a single part upload to S3 backend.

        :param s3_client: An object with credentials to connect to S3
        :param image_file: The image data to write, as a file-like object
        :param bucket: S3 bucket name
        :param key: The object name to be stored (image identifier)
        :param loc: `glance_store.location.Location` object, supplied
                    from glance_store.location.get_location_from_uri()
        :param hashing_algo: A hashlib algorithm identifier (string)
        :param verifier: An object used to verify signatures for images
        :returns: tuple of: (1) URL in backing store, (2) bytes written,
                  (3) checksum, (4) multihash value, and (5) a dictionary
                  with storage system specific information
        """
        os_hash_value = utils.get_hasher(hashing_algo, False)
        checksum = utils.get_hasher('md5', False)
        image_data = b''
        image_size = 0
        for chunk in utils.chunkreadable(image_file, self.WRITE_CHUNKSIZE):
            image_data += chunk
            image_size += len(chunk)
            os_hash_value.update(chunk)
            checksum.update(chunk)
            if verifier:
                verifier.update(chunk)

        s3_client.put_object(Body=image_data, Bucket=bucket, Key=key)
        hash_hex = os_hash_value.hexdigest()
        checksum_hex = checksum.hexdigest()

        # Add store backend information to location metadata
        metadata = {}
        if self.backend_group:
            metadata['store'] = self.backend_group

        LOG.debug(
            "Wrote %(size)d bytes to S3 key named %(key)s "
            "with checksum %(checksum)s", {
                'size': image_size,
                'key': key,
                'checksum': checksum_hex
            })

        return loc.get_uri(), image_size, checksum_hex, hash_hex, metadata
Ejemplo n.º 5
0
    def add_image_file(self, full_data_path, image_file):
        """
        Add image file or return exception
        """

        try:
            LOG.debug("attempting to create image file in irods '%s'" %
                      full_data_path)
            file_object = self.irods_conn_object.data_objects.create(
                full_data_path)
        except:
            LOG.error("file with same name exists in the same path")
            raise exceptions.Duplicate(
                _("image file %s already exists " + "or no perms") % filepath)

        LOG.debug("performing the write")
        checksum = hashlib.md5()
        bytes_written = 0

        try:
            with file_object.open('r+') as f:
                for buf in utils.chunkreadable(image_file,
                                               ChunkedFile.default_chunk_size):
                    bytes_written += len(buf)
                    checksum.update(buf)
                    f.write(buf)
        except Exception as e:
            # let's attempt an delete
            file_object.unlink()
            reason = _('paritial write, transfer failed')
            LOG.error(e)
            raise exceptions.StorageWriteDenied(reason)
        finally:
            self.irods_conn_object.cleanup()
            file_object = None
            checksum_hex = checksum.hexdigest()

            LOG.debug(
                _("Wrote %(bytes_written)d bytes to %(full_data_path)s, "
                  "checksum = %(checksum_hex)s") % locals())
            return [bytes_written, checksum_hex]
    def add_image_file(self, full_data_path, image_file):
        """
        Add image file or return exception
        """

        try:
            LOG.debug("attempting to create image file in irods '%s'" %
                      full_data_path)
            file_object = self.irods_conn_object.data_objects.create(
                full_data_path)
        except:
            LOG.error("file with same name exists in the same path")
            raise exceptions.Duplicate(_("image file %s already exists "
                                        + "or no perms")
                                      % filepath)

        LOG.debug("performing the write")
        checksum = hashlib.md5()
        bytes_written = 0

        try:
            with file_object.open('r+') as f:
                for buf in utils.chunkreadable(image_file,
                                               ChunkedFile.default_chunk_size):
                    bytes_written += len(buf)
                    checksum.update(buf)
                    f.write(buf)
        except Exception as e:
            # let's attempt an delete
            file_object.unlink()
            reason = _('paritial write, transfer failed')
            LOG.error(e)
            raise exceptions.StorageWriteDenied(reason)
        finally:
            self.irods_conn_object.cleanup()
            file_object=None
            checksum_hex = checksum.hexdigest()

            LOG.debug(_("Wrote %(bytes_written)d bytes to %(full_data_path)s, "
                        "checksum = %(checksum_hex)s") % locals())
            return [bytes_written, checksum_hex]
Ejemplo n.º 7
0
    def add(self, image_id, image_file, image_size, context=None):
        """
        Stores an image file with supplied identifier to the backend
        storage system and returns a tuple containing information
        about the stored image.

        :param image_id: The opaque image identifier
        :param image_file: The image data to write, as a file-like object
        :param image_size: The size of the image data to write, in bytes

        :retval tuple of URL in backing store, bytes written, checksum
                and a dictionary with storage system specific information
        :raises `glance_store.exceptions.Duplicate` if the image already
                existed

        :note By default, the backend writes the image data to a file
              `/<DATADIR>/<ID>`, where <DATADIR> is the value of
              the filesystem_store_datadir configuration option and <ID>
              is the supplied image ID.
        """

        datadir = self._find_best_datadir(image_size)
        filepath = os.path.join(datadir, str(image_id))

        if os.path.exists(filepath):
            raise exceptions.Duplicate(image=filepath)

        checksum = hashlib.md5()
        bytes_written = 0
        try:
            with open(filepath, 'wb') as f:
                for buf in utils.chunkreadable(image_file,
                                               self.WRITE_CHUNKSIZE):
                    bytes_written += len(buf)
                    checksum.update(buf)
                    f.write(buf)
        except IOError as e:
            if e.errno != errno.EACCES:
                self._delete_partial(filepath, image_id)
            errors = {errno.EFBIG: exceptions.StorageFull(),
                      errno.ENOSPC: exceptions.StorageFull(),
                      errno.EACCES: exceptions.StorageWriteDenied()}
            raise errors.get(e.errno, e)
        except Exception:
            with excutils.save_and_reraise_exception():
                self._delete_partial(filepath, image_id)

        checksum_hex = checksum.hexdigest()
        metadata = self._get_metadata(filepath)

        LOG.debug(_("Wrote %(bytes_written)d bytes to %(filepath)s with "
                    "checksum %(checksum_hex)s"),
                  {'bytes_written': bytes_written,
                   'filepath': filepath,
                   'checksum_hex': checksum_hex})

        if self.conf.glance_store.filesystem_store_file_perm > 0:
            perm = int(str(self.conf.glance_store.filesystem_store_file_perm),
                       8)
            try:
                os.chmod(filepath, perm)
            except (IOError, OSError):
                LOG.warn(_LW("Unable to set permission to image: %s") %
                         filepath)

        return ('file://%s' % filepath, bytes_written, checksum_hex, metadata)
Ejemplo n.º 8
0
    def add(self, image_id, image_file, image_size, context=None):
        """
        Stores an image file with supplied identifier to the backend
        storage system and returns a tuple containing information
        about the stored image.

        :param image_id: The opaque image identifier
        :param image_file: The image data to write, as a file-like object
        :param image_size: The size of the image data to write, in bytes

        :retval tuple of URL in backing store, bytes written, checksum
                and a dictionary with storage system specific information
        :raises `glance_store.exceptions.Duplicate` if the image already
                existed
        """
        checksum = hashlib.md5()
        image_name = str(image_id)
        with rados.Rados(conffile=self.conf_file, rados_id=self.user) as conn:
            fsid = None
            if hasattr(conn, 'get_fsid'):
                fsid = conn.get_fsid()
            with conn.open_ioctx(self.pool) as ioctx:
                order = int(math.log(self.WRITE_CHUNKSIZE, 2))
                LOG.debug('creating image %s with order %d and size %d',
                          image_name, order, image_size)
                if image_size == 0:
                    LOG.warning(
                        _("since image size is zero we will be doing "
                          "resize-before-write for each chunk which "
                          "will be considerably slower than normal"))

                try:
                    loc = self._create_image(fsid, ioctx, image_name,
                                             image_size, order)
                except rbd.ImageExists:
                    raise exceptions.Duplicate(
                        message=_('RBD image %s already exists') % image_id)
                try:
                    with rbd.Image(ioctx, image_name) as image:
                        bytes_written = 0
                        offset = 0
                        chunks = utils.chunkreadable(image_file,
                                                     self.WRITE_CHUNKSIZE)
                        for chunk in chunks:
                            # If the image size provided is zero we need to do
                            # a resize for the amount we are writing. This will
                            # be slower so setting a higher chunk size may
                            # speed things up a bit.
                            if image_size == 0:
                                chunk_length = len(chunk)
                                length = offset + chunk_length
                                bytes_written += chunk_length
                                LOG.debug(
                                    _("resizing image to %s KiB") %
                                    (length / 1024))
                                image.resize(length)
                            LOG.debug(
                                _("writing chunk at offset %s") % (offset))
                            offset += image.write(chunk, offset)
                            checksum.update(chunk)
                        if loc.snapshot:
                            image.create_snap(loc.snapshot)
                            image.protect_snap(loc.snapshot)
                except Exception as exc:
                    # Delete image if one was created
                    try:
                        self._delete_image(loc.image, loc.snapshot)
                    except exceptions.NotFound:
                        pass

                    raise exc

        # Make sure we send back the image size whether provided or inferred.
        if image_size == 0:
            image_size = bytes_written

        return (loc.get_uri(), image_size, checksum.hexdigest(), {})
Ejemplo n.º 9
0
    def add_multipart(self, image_file, image_size, bucket_obj, obj_name, loc):
        """
        Stores an image file with a multi part upload to S3 backend

        :param image_file: The image data to write, as a file-like object
        :param bucket_obj: S3 bucket object
        :param obj_name: The object name to be stored(image identifier)
        :loc: The Store Location Info
        """

        checksum = hashlib.md5()
        pool_size = self.s3_store_thread_pools
        pool = eventlet.greenpool.GreenPool(size=pool_size)
        mpu = bucket_obj.initiate_multipart_upload(obj_name)
        LOG.debug("Multipart initiate key=%(obj_name)s, "
                  "UploadId=%(UploadId)s" % {
                      'obj_name': obj_name,
                      'UploadId': mpu.id
                  })
        cstart = 0
        plist = []

        chunk_size = int(math.ceil(float(image_size) / MAX_PART_NUM))
        write_chunk_size = max(self.s3_store_large_object_chunk_size,
                               chunk_size)
        it = utils.chunkreadable(image_file, self.WRITE_CHUNKSIZE)
        buffered_chunk = b''
        while True:
            try:
                buffered_clen = len(buffered_chunk)
                if buffered_clen < write_chunk_size:
                    # keep reading data
                    read_chunk = next(it)
                    buffered_chunk += read_chunk
                    continue
                else:
                    write_chunk = buffered_chunk[:write_chunk_size]
                    remained_data = buffered_chunk[write_chunk_size:]
                    checksum.update(write_chunk)
                    fp = six.BytesIO(write_chunk)
                    fp.seek(0)
                    part = UploadPart(mpu, fp, cstart + 1, len(write_chunk))
                    pool.spawn_n(run_upload, part)
                    plist.append(part)
                    cstart += 1
                    buffered_chunk = remained_data
            except StopIteration:
                if len(buffered_chunk) > 0:
                    # Write the last chunk data
                    write_chunk = buffered_chunk
                    checksum.update(write_chunk)
                    fp = six.BytesIO(write_chunk)
                    fp.seek(0)
                    part = UploadPart(mpu, fp, cstart + 1, len(write_chunk))
                    pool.spawn_n(run_upload, part)
                    plist.append(part)
                break

        pedict = {}
        total_size = 0
        pool.waitall()

        for part in plist:
            pedict.update(part.etag)
            total_size += part.size

        success = True
        for part in plist:
            if not part.success:
                success = False

        if success:
            # Complete
            xml = get_mpu_xml(pedict)
            bucket_obj.complete_multipart_upload(obj_name, mpu.id, xml)
            checksum_hex = checksum.hexdigest()
            LOG.info(
                _LI("Multipart complete key=%(obj_name)s "
                    "UploadId=%(UploadId)s "
                    "Wrote %(total_size)d bytes to S3 key"
                    "named %(obj_name)s "
                    "with checksum %(checksum_hex)s") % {
                        'obj_name': obj_name,
                        'UploadId': mpu.id,
                        'total_size': total_size,
                        'checksum_hex': checksum_hex
                    })
            return (loc.get_uri(), total_size, checksum_hex, {})
        else:
            # Abort
            bucket_obj.cancel_multipart_upload(obj_name, mpu.id)
            LOG.error(
                _LE("Some parts failed to upload to S3. "
                    "Aborted the object key=%(obj_name)s") %
                {'obj_name': obj_name})
            msg = (_("Failed to add image object to S3. "
                     "key=%(obj_name)s") % {
                         'obj_name': obj_name
                     })
            raise glance_store.BackendException(msg)
Ejemplo n.º 10
0
    def add(self, image_id, image_file, image_size, context=None):
        """
        Stores an image file with supplied identifier to the backend
        storage system and returns a tuple containing information
        about the stored image.

        :param image_id: The opaque image identifier
        :param image_file: The image data to write, as a file-like object
        :param image_size: The size of the image data to write, in bytes

        :retval tuple of URL in backing store, bytes written, checksum
                and a dictionary with storage system specific information
        :raises `glance_store.exceptions.Duplicate` if the image already
                existed

        S3 writes the image data using the scheme:
            s3://<ACCESS_KEY>:<SECRET_KEY>@<S3_URL>/<BUCKET>/<OBJ>
        where:
            <USER> = ``s3_store_user``
            <KEY> = ``s3_store_key``
            <S3_HOST> = ``s3_store_host``
            <BUCKET> = ``s3_store_bucket``
            <ID> = The id of the image being added
        """
        from boto.s3.connection import S3Connection

        loc = StoreLocation({'scheme': self.scheme,
                             'bucket': self.bucket,
                             'key': image_id,
                             's3serviceurl': self.full_s3_host,
                             'accesskey': self.access_key,
                             'secretkey': self.secret_key})

        uformat = self.conf.glance_store.s3_store_bucket_url_format
        calling_format = get_calling_format(s3_store_bucket_url_format=uformat)

        s3_conn = S3Connection(loc.accesskey, loc.secretkey,
                               host=loc.s3serviceurl,
                               is_secure=(loc.scheme == 's3+https'),
                               calling_format=calling_format)

        create_bucket_if_missing(self.bucket, s3_conn)

        bucket_obj = get_bucket(s3_conn, self.bucket)
        obj_name = str(image_id)

        def _sanitize(uri):
            return re.sub('//.*:.*@',
                          '//s3_store_secret_key:s3_store_access_key@',
                          uri)

        key = bucket_obj.get_key(obj_name)
        if key and key.exists():
            raise exceptions.Duplicate(message=_("S3 already has an image at "
                                                 "location %s") %
                                       _sanitize(loc.get_uri()))

        msg = _("Adding image object to S3 using (s3_host=%(s3_host)s, "
                "access_key=%(access_key)s, bucket=%(bucket)s, "
                "key=%(obj_name)s)") % ({'s3_host': self.s3_host,
                                         'access_key': self.access_key,
                                         'bucket': self.bucket,
                                         'obj_name': obj_name})
        LOG.debug(msg)

        key = bucket_obj.new_key(obj_name)

        # We need to wrap image_file, which is a reference to the
        # webob.Request.body_file, with a seekable file-like object,
        # otherwise the call to set_contents_from_file() will die
        # with an error about Input object has no method 'seek'. We
        # might want to call webob.Request.make_body_seekable(), but
        # unfortunately, that method copies the entire image into
        # memory and results in LP Bug #818292 occurring. So, here
        # we write temporary file in as memory-efficient manner as
        # possible and then supply the temporary file to S3. We also
        # take this opportunity to calculate the image checksum while
        # writing the tempfile, so we don't need to call key.compute_md5()

        msg = _("Writing request body file to temporary file "
                "for %s") % _sanitize(loc.get_uri())
        LOG.debug(msg)

        tmpdir = self.s3_store_object_buffer_dir
        temp_file = tempfile.NamedTemporaryFile(dir=tmpdir)
        checksum = hashlib.md5()
        for chunk in utils.chunkreadable(image_file, self.WRITE_CHUNKSIZE):
            checksum.update(chunk)
            temp_file.write(chunk)
        temp_file.flush()

        msg = (_("Uploading temporary file to S3 for %s") %
               _sanitize(loc.get_uri()))
        LOG.debug(msg)

        # OK, now upload the data into the key
        key.set_contents_from_file(open(temp_file.name, 'r+b'), replace=False)
        size = key.size
        checksum_hex = checksum.hexdigest()

        LOG.debug(_("Wrote %(size)d bytes to S3 key named %(obj_name)s "
                    "with checksum %(checksum_hex)s"),
                  {'size': size, 'obj_name': obj_name,
                   'checksum_hex': checksum_hex})

        return (loc.get_uri(), size, checksum_hex, {})
Ejemplo n.º 11
0
    def add_multipart(self, image_file, image_size, bucket_obj, obj_name, loc):
        """
        Stores an image file with a multi part upload to S3 backend

        :param image_file: The image data to write, as a file-like object
        :param bucket_obj: S3 bucket object
        :param obj_name: The object name to be stored(image identifier)
        :loc: The Store Location Info
        """

        checksum = hashlib.md5()
        pool_size = self.s3_store_thread_pools
        pool = eventlet.greenpool.GreenPool(size=pool_size)
        mpu = bucket_obj.initiate_multipart_upload(obj_name)
        LOG.debug("Multipart initiate key=%(obj_name)s, "
                  "UploadId=%(UploadId)s" %
                  {'obj_name': obj_name,
                   'UploadId': mpu.id})
        cstart = 0
        plist = []

        chunk_size = int(math.ceil(float(image_size) / MAX_PART_NUM))
        write_chunk_size = max(self.s3_store_large_object_chunk_size,
                               chunk_size)
        it = utils.chunkreadable(image_file, self.WRITE_CHUNKSIZE)
        buffered_chunk = ''
        while True:
            try:
                buffered_clen = len(buffered_chunk)
                if buffered_clen < write_chunk_size:
                    # keep reading data
                    read_chunk = next(it)
                    buffered_chunk += read_chunk
                    continue
                else:
                    write_chunk = buffered_chunk[:write_chunk_size]
                    remained_data = buffered_chunk[write_chunk_size:]
                    checksum.update(write_chunk)
                    fp = six.BytesIO(write_chunk)
                    fp.seek(0)
                    part = UploadPart(mpu, fp, cstart + 1, len(write_chunk))
                    pool.spawn_n(run_upload, part)
                    plist.append(part)
                    cstart += 1
                    buffered_chunk = remained_data
            except StopIteration:
                if len(buffered_chunk) > 0:
                    # Write the last chunk data
                    write_chunk = buffered_chunk
                    checksum.update(write_chunk)
                    fp = six.BytesIO(write_chunk)
                    fp.seek(0)
                    part = UploadPart(mpu, fp, cstart + 1, len(write_chunk))
                    pool.spawn_n(run_upload, part)
                    plist.append(part)
                break

        pedict = {}
        total_size = 0
        pool.waitall()

        for part in plist:
            pedict.update(part.etag)
            total_size += part.size

        success = True
        for part in plist:
            if not part.success:
                success = False

        if success:
            # Complete
            xml = get_mpu_xml(pedict)
            bucket_obj.complete_multipart_upload(obj_name,
                                                 mpu.id,
                                                 xml)
            checksum_hex = checksum.hexdigest()
            LOG.info(_LI("Multipart complete key=%(obj_name)s "
                         "UploadId=%(UploadId)s "
                         "Wrote %(total_size)d bytes to S3 key"
                         "named %(obj_name)s "
                         "with checksum %(checksum_hex)s") %
                     {'obj_name': obj_name,
                      'UploadId': mpu.id,
                      'total_size': total_size,
                      'checksum_hex': checksum_hex})
            return (loc.get_uri(), total_size, checksum_hex, {})
        else:
            # Abort
            bucket_obj.cancel_multipart_upload(obj_name, mpu.id)
            LOG.error(_LE("Some parts failed to upload to S3. "
                          "Aborted the object key=%(obj_name)s") %
                      {'obj_name': obj_name})
            msg = (_("Failed to add image object to S3. "
                     "key=%(obj_name)s") % {'obj_name': obj_name})
            raise glance_store.BackendException(msg)
Ejemplo n.º 12
0
    def add(self,
            image_id,
            image_file,
            image_size,
            hashing_algo,
            context=None,
            verifier=None):
        """
        Stores an image file with supplied identifier to the backend
        storage system and returns a tuple containing information
        about the stored image.

        :param image_id: The opaque image identifier
        :param image_file: The image data to write, as a file-like object
        :param image_size: The size of the image data to write, in bytes
        :param hashing_algo: A hashlib algorithm identifier (string)
        :param context: A context object
        :param verifier: An object used to verify signatures for images

        :returns: tuple of: (1) URL in backing store, (2) bytes written,
                  (3) checksum, (4) multihash value, and (5) a dictionary
                  with storage system specific information
        :raises: `glance_store.exceptions.Duplicate` if the image already
                 exists
        """
        os_hash_value = utils.get_hasher(hashing_algo, False)
        checksum = utils.get_hasher('md5', False)
        image_name = str(image_id)
        with self.get_connection(conffile=self.conf_file,
                                 rados_id=self.user) as conn:
            fsid = None
            if hasattr(conn, 'get_fsid'):
                # Librados's get_fsid is represented as binary
                # in py3 instead of str as it is in py2.
                # This is causing problems with ceph.
                # Decode binary to str fixes these issues.
                # Fix with encodeutils.safe_decode CAN BE REMOVED
                # after librados's fix will be stable.
                #
                # More informations:
                # https://bugs.launchpad.net/glance-store/+bug/1816721
                # https://bugs.launchpad.net/cinder/+bug/1816468
                # https://tracker.ceph.com/issues/38381
                fsid = encodeutils.safe_decode(conn.get_fsid())
            with conn.open_ioctx(self.pool) as ioctx:
                order = int(math.log(self.WRITE_CHUNKSIZE, 2))
                LOG.debug('creating image %s with order %d and size %d',
                          image_name, order, image_size)
                if image_size == 0:
                    LOG.warning(
                        _LW("Since image size is zero we will be "
                            "doing resize-before-write which will be "
                            "slower than normal"))

                try:
                    loc = self._create_image(fsid, conn, ioctx, image_name,
                                             image_size, order)
                except rbd.ImageExists:
                    msg = _('RBD image %s already exists') % image_id
                    raise exceptions.Duplicate(message=msg)

                try:
                    with rbd.Image(ioctx, image_name) as image:
                        bytes_written = 0
                        offset = 0
                        chunks = utils.chunkreadable(image_file,
                                                     self.WRITE_CHUNKSIZE)
                        for chunk in chunks:
                            # NOTE(jokke): If we don't know image size we need
                            # to resize it on write. The resize amount will
                            # ramp up to 8 gigs.
                            chunk_length = len(chunk)
                            self.size = self._resize_on_write(
                                image, image_size, bytes_written, chunk_length)
                            bytes_written += chunk_length
                            if not (self.thin_provisioning and not any(chunk)):
                                image.write(chunk, offset)
                            offset += chunk_length
                            os_hash_value.update(chunk)
                            checksum.update(chunk)
                            if verifier:
                                verifier.update(chunk)

                        # Lets trim the image in case we overshoot with resize
                        if image_size == 0:
                            image.resize(bytes_written)

                        if loc.snapshot:
                            image.create_snap(loc.snapshot)
                            image.protect_snap(loc.snapshot)
                except rbd.NoSpace:
                    log_msg = (_LE("Failed to store image %(img_name)s "
                                   "insufficient space available") % {
                                       'img_name': image_name
                                   })
                    LOG.error(log_msg)

                    # Delete image if one was created
                    try:
                        target_pool = loc.pool or self.pool
                        self._delete_image(target_pool, loc.image,
                                           loc.snapshot)
                    except exceptions.NotFound:
                        pass

                    raise exceptions.StorageFull(message=log_msg)
                except Exception as exc:
                    log_msg = (_LE("Failed to store image %(img_name)s "
                                   "Store Exception %(store_exc)s") % {
                                       'img_name': image_name,
                                       'store_exc': exc
                                   })
                    LOG.error(log_msg)

                    # Delete image if one was created
                    try:
                        target_pool = loc.pool or self.pool
                        self._delete_image(target_pool, loc.image,
                                           loc.snapshot)
                    except exceptions.NotFound:
                        pass

                    raise exc

        # Make sure we send back the image size whether provided or inferred.
        if image_size == 0:
            image_size = bytes_written

        # Add store backend information to location metadata
        metadata = {}
        if self.backend_group:
            metadata['store'] = u"%s" % self.backend_group

        return (loc.get_uri(), image_size, checksum.hexdigest(),
                os_hash_value.hexdigest(), metadata)
Ejemplo n.º 13
0
    def add(self, image_id, image_file, image_size, context=None):
        """
        Stores an image file with supplied identifier to the backend
        storage system and returns a tuple containing information
        about the stored image.

        :param image_id: The opaque image identifier
        :param image_file: The image data to write, as a file-like object
        :param image_size: The size of the image data to write, in bytes

        :retval tuple of URL in backing store, bytes written, checksum
                and a dictionary with storage system specific information
        :raises `glance_store.exceptions.Duplicate` if the image already
                existed

        S3 writes the image data using the scheme:
            s3://<ACCESS_KEY>:<SECRET_KEY>@<S3_URL>/<BUCKET>/<OBJ>
        where:
            <USER> = ``s3_store_user``
            <KEY> = ``s3_store_key``
            <S3_HOST> = ``s3_store_host``
            <BUCKET> = ``s3_store_bucket``
            <ID> = The id of the image being added
        """
        from boto.s3.connection import S3Connection

        loc = StoreLocation({
            'scheme': self.scheme,
            'bucket': self.bucket,
            'key': image_id,
            's3serviceurl': self.full_s3_host,
            'accesskey': self.access_key,
            'secretkey': self.secret_key
        })

        uformat = self.conf.glance_store.s3_store_bucket_url_format
        calling_format = get_calling_format(s3_store_bucket_url_format=uformat)

        s3_conn = S3Connection(loc.accesskey,
                               loc.secretkey,
                               host=loc.s3serviceurl,
                               is_secure=(loc.scheme == 's3+https'),
                               calling_format=calling_format)

        create_bucket_if_missing(self.bucket, s3_conn)

        bucket_obj = get_bucket(s3_conn, self.bucket)
        obj_name = str(image_id)

        def _sanitize(uri):
            return re.sub('//.*:.*@',
                          '//s3_store_secret_key:s3_store_access_key@', uri)

        key = bucket_obj.get_key(obj_name)
        if key and key.exists():
            raise exceptions.Duplicate(message=_("S3 already has an image at "
                                                 "location %s") %
                                       _sanitize(loc.get_uri()))

        msg = _("Adding image object to S3 using (s3_host=%(s3_host)s, "
                "access_key=%(access_key)s, bucket=%(bucket)s, "
                "key=%(obj_name)s)") % ({
                    's3_host': self.s3_host,
                    'access_key': self.access_key,
                    'bucket': self.bucket,
                    'obj_name': obj_name
                })
        LOG.debug(msg)

        key = bucket_obj.new_key(obj_name)

        # We need to wrap image_file, which is a reference to the
        # webob.Request.body_file, with a seekable file-like object,
        # otherwise the call to set_contents_from_file() will die
        # with an error about Input object has no method 'seek'. We
        # might want to call webob.Request.make_body_seekable(), but
        # unfortunately, that method copies the entire image into
        # memory and results in LP Bug #818292 occurring. So, here
        # we write temporary file in as memory-efficient manner as
        # possible and then supply the temporary file to S3. We also
        # take this opportunity to calculate the image checksum while
        # writing the tempfile, so we don't need to call key.compute_md5()

        msg = _("Writing request body file to temporary file "
                "for %s") % _sanitize(loc.get_uri())
        LOG.debug(msg)

        tmpdir = self.s3_store_object_buffer_dir
        temp_file = tempfile.NamedTemporaryFile(dir=tmpdir)
        checksum = hashlib.md5()
        for chunk in utils.chunkreadable(image_file, self.WRITE_CHUNKSIZE):
            checksum.update(chunk)
            temp_file.write(chunk)
        temp_file.flush()

        msg = (_("Uploading temporary file to S3 for %s") %
               _sanitize(loc.get_uri()))
        LOG.debug(msg)

        # OK, now upload the data into the key
        key.set_contents_from_file(open(temp_file.name, 'r+b'), replace=False)
        size = key.size
        checksum_hex = checksum.hexdigest()

        LOG.debug(
            _("Wrote %(size)d bytes to S3 key named %(obj_name)s "
              "with checksum %(checksum_hex)s"), {
                  'size': size,
                  'obj_name': obj_name,
                  'checksum_hex': checksum_hex
              })

        return (loc.get_uri(), size, checksum_hex, {})
Ejemplo n.º 14
0
    def _add_multipart(self, s3_client, image_file, image_size, bucket, key,
                       loc, hashing_algo, verifier):
        """Stores an image file with a multi part upload to S3 backend.

        :param s3_client: An object with credentials to connect to S3
        :param image_file: The image data to write, as a file-like object
        :param bucket: S3 bucket name
        :param key: The object name to be stored (image identifier)
        :param loc: `glance_store.location.Location` object, supplied
                    from glance_store.location.get_location_from_uri()
        :param hashing_algo: A hashlib algorithm identifier (string)
        :param verifier: An object used to verify signatures for images
        :returns: tuple of: (1) URL in backing store, (2) bytes written,
                  (3) checksum, (4) multihash value, and (5) a dictionary
                  with storage system specific information
        """
        os_hash_value = utils.get_hasher(hashing_algo, False)
        checksum = utils.get_hasher('md5', False)
        pool_size = self.s3_store_thread_pools
        pool = eventlet.greenpool.GreenPool(size=pool_size)
        mpu = s3_client.create_multipart_upload(Bucket=bucket, Key=key)
        upload_id = mpu['UploadId']
        LOG.debug("Multipart initiate key=%(key)s, UploadId=%(UploadId)s", {
            'key': key,
            'UploadId': upload_id
        })
        cstart = 0
        plist = []

        chunk_size = int(math.ceil(float(image_size) / MAX_PART_NUM))
        write_chunk_size = max(self.s3_store_large_object_chunk_size,
                               chunk_size)
        it = utils.chunkreadable(image_file, self.WRITE_CHUNKSIZE)
        buffered_chunk = b''
        while True:
            try:
                buffered_clen = len(buffered_chunk)
                if buffered_clen < write_chunk_size:
                    # keep reading data
                    read_chunk = next(it)
                    buffered_chunk += read_chunk
                    continue
                else:
                    write_chunk = buffered_chunk[:write_chunk_size]
                    remained_data = buffered_chunk[write_chunk_size:]
                    os_hash_value.update(write_chunk)
                    checksum.update(write_chunk)
                    if verifier:
                        verifier.update(write_chunk)
                    fp = six.BytesIO(write_chunk)
                    fp.seek(0)
                    part = UploadPart(mpu, fp, cstart + 1, len(write_chunk))
                    pool.spawn_n(run_upload, s3_client, bucket, key, part)
                    plist.append(part)
                    cstart += 1
                    buffered_chunk = remained_data
            except StopIteration:
                if len(buffered_chunk) > 0:
                    # Write the last chunk data
                    write_chunk = buffered_chunk
                    os_hash_value.update(write_chunk)
                    checksum.update(write_chunk)
                    if verifier:
                        verifier.update(write_chunk)
                    fp = six.BytesIO(write_chunk)
                    fp.seek(0)
                    part = UploadPart(mpu, fp, cstart + 1, len(write_chunk))
                    pool.spawn_n(run_upload, s3_client, bucket, key, part)
                    plist.append(part)
                break

        pedict = {}
        total_size = 0
        pool.waitall()

        for part in plist:
            pedict.update(part.etag)
            total_size += part.size

        success = True
        for part in plist:
            if not part.success:
                success = False

        if success:
            # Complete
            mpu_list = self._get_mpu_list(pedict)
            s3_client.complete_multipart_upload(Bucket=bucket,
                                                Key=key,
                                                MultipartUpload=mpu_list,
                                                UploadId=upload_id)
            hash_hex = os_hash_value.hexdigest()
            checksum_hex = checksum.hexdigest()

            # Add store backend information to location metadata
            metadata = {}
            if self.backend_group:
                metadata['store'] = self.backend_group

            LOG.info(
                "Multipart complete key=%(key)s "
                "UploadId=%(UploadId)s "
                "Wrote %(total_size)d bytes to S3 key "
                "named %(key)s "
                "with checksum %(checksum)s", {
                    'key': key,
                    'UploadId': upload_id,
                    'total_size': total_size,
                    'checksum': checksum_hex
                })
            return loc.get_uri(), total_size, checksum_hex, hash_hex, metadata

        # Abort
        s3_client.abort_multipart_upload(Bucket=bucket,
                                         Key=key,
                                         UploadId=upload_id)
        LOG.error("Some parts failed to upload to S3. "
                  "Aborted the key=%s", key)
        msg = _("Failed to add image object to S3. key=%s") % key
        raise glance_store.BackendException(msg)
Ejemplo n.º 15
0
    def add(self,
            image_id,
            image_file,
            image_size,
            hashing_algo,
            context=None,
            verifier=None):
        """
        Stores an image file with supplied identifier to the backend
        storage system and returns a tuple containing information
        about the stored image.

        :param image_id: The opaque image identifier
        :param image_file: The image data to write, as a file-like object
        :param image_size: The size of the image data to write, in bytes
        :param hashing_algo: A hashlib algorithm identifier (string)
        :param context: A context object
        :param verifier: An object used to verify signatures for images

        :returns: tuple of: (1) URL in backing store, (2) bytes written,
                  (3) checksum, (4) multihash value, and (5) a dictionary
                  with storage system specific information
        :raises: `glance_store.exceptions.Duplicate` if the image already
                 exists
        """
        checksum = hashlib.md5()
        os_hash_value = hashlib.new(str(hashing_algo))
        image_name = str(image_id)
        with self.get_connection(conffile=self.conf_file,
                                 rados_id=self.user) as conn:
            fsid = None
            if hasattr(conn, 'get_fsid'):
                fsid = conn.get_fsid()
            with conn.open_ioctx(self.pool) as ioctx:
                order = int(math.log(self.WRITE_CHUNKSIZE, 2))
                LOG.debug('creating image %s with order %d and size %d',
                          image_name, order, image_size)
                if image_size == 0:
                    LOG.warning(
                        _("since image size is zero we will be doing "
                          "resize-before-write for each chunk which "
                          "will be considerably slower than normal"))

                try:
                    loc = self._create_image(fsid, conn, ioctx, image_name,
                                             image_size, order)
                except rbd.ImageExists:
                    msg = _('RBD image %s already exists') % image_id
                    raise exceptions.Duplicate(message=msg)

                try:
                    with rbd.Image(ioctx, image_name) as image:
                        bytes_written = 0
                        offset = 0
                        chunks = utils.chunkreadable(image_file,
                                                     self.WRITE_CHUNKSIZE)
                        for chunk in chunks:
                            # If the image size provided is zero we need to do
                            # a resize for the amount we are writing. This will
                            # be slower so setting a higher chunk size may
                            # speed things up a bit.
                            if image_size == 0:
                                chunk_length = len(chunk)
                                length = offset + chunk_length
                                bytes_written += chunk_length
                                LOG.debug(
                                    _("resizing image to %s KiB") %
                                    (length / units.Ki))
                                image.resize(length)
                            LOG.debug(
                                _("writing chunk at offset %s") % (offset))
                            offset += image.write(chunk, offset)
                            os_hash_value.update(chunk)
                            checksum.update(chunk)
                            if verifier:
                                verifier.update(chunk)
                        if loc.snapshot:
                            image.create_snap(loc.snapshot)
                            image.protect_snap(loc.snapshot)
                except Exception as exc:
                    log_msg = (_LE("Failed to store image %(img_name)s "
                                   "Store Exception %(store_exc)s") % {
                                       'img_name': image_name,
                                       'store_exc': exc
                                   })
                    LOG.error(log_msg)

                    # Delete image if one was created
                    try:
                        target_pool = loc.pool or self.pool
                        self._delete_image(target_pool, loc.image,
                                           loc.snapshot)
                    except exceptions.NotFound:
                        pass

                    raise exc

        # Make sure we send back the image size whether provided or inferred.
        if image_size == 0:
            image_size = bytes_written

        # Add store backend information to location metadata
        metadata = {}
        if self.backend_group:
            metadata['backend'] = u"%s" % self.backend_group

        return (loc.get_uri(), image_size, checksum.hexdigest(),
                os_hash_value.hexdigest(), metadata)
Ejemplo n.º 16
0
    def add(self, image_id, image_file, image_size, context=None):
        """
        Stores an image file with supplied identifier to the backend
        storage system and returns a tuple containing information
        about the stored image.

        :param image_id: The opaque image identifier
        :param image_file: The image data to write, as a file-like object
        :param image_size: The size of the image data to write, in bytes

        :retval tuple of URL in backing store, bytes written, checksum
                and a dictionary with storage system specific information
        :raises `glance_store.exceptions.Duplicate` if the image already
                existed
        """
        checksum = hashlib.md5()
        image_name = str(image_id)
        with rados.Rados(conffile=self.conf_file, rados_id=self.user) as conn:
            fsid = None
            if hasattr(conn, 'get_fsid'):
                fsid = conn.get_fsid()
            with conn.open_ioctx(self.pool) as ioctx:
                order = int(math.log(self.WRITE_CHUNKSIZE, 2))
                LOG.debug('creating image %s with order %d and size %d',
                          image_name, order, image_size)
                if image_size == 0:
                    LOG.warning(_("since image size is zero we will be doing "
                                  "resize-before-write for each chunk which "
                                  "will be considerably slower than normal"))

                try:
                    loc = self._create_image(fsid, conn, ioctx, image_name,
                                             image_size, order)
                except rbd.ImageExists:
                    msg = _('RBD image %s already exists') % image_id
                    raise exceptions.Duplicate(message=msg)

                try:
                    with rbd.Image(ioctx, image_name) as image:
                        bytes_written = 0
                        offset = 0
                        chunks = utils.chunkreadable(image_file,
                                                     self.WRITE_CHUNKSIZE)
                        for chunk in chunks:
                            # If the image size provided is zero we need to do
                            # a resize for the amount we are writing. This will
                            # be slower so setting a higher chunk size may
                            # speed things up a bit.
                            if image_size == 0:
                                chunk_length = len(chunk)
                                length = offset + chunk_length
                                bytes_written += chunk_length
                                LOG.debug(_("resizing image to %s KiB") %
                                          (length / units.Ki))
                                image.resize(length)
                            LOG.debug(_("writing chunk at offset %s") %
                                      (offset))
                            offset += image.write(chunk, offset)
                            checksum.update(chunk)
                        if loc.snapshot:
                            image.create_snap(loc.snapshot)
                            image.protect_snap(loc.snapshot)
                except Exception as exc:
                    log_msg = (_LE("Failed to store image %(img_name)s "
                                   "Store Exception %(store_exc)s") %
                               {'img_name': image_name,
                                'store_exc': exc})
                    LOG.error(log_msg)

                    # Delete image if one was created
                    try:
                        target_pool = loc.pool or self.pool
                        self._delete_image(target_pool, loc.image,
                                           loc.snapshot)
                    except exceptions.NotFound:
                        pass

                    raise exc

        # Make sure we send back the image size whether provided or inferred.
        if image_size == 0:
            image_size = bytes_written

        return (loc.get_uri(), image_size, checksum.hexdigest(), {})
Ejemplo n.º 17
0
    def add(self, image_id, image_file, image_size, hashing_algo, context=None,
            verifier=None):
        """
        Stores an image file with supplied identifier to the backend
        storage system and returns a tuple containing information
        about the stored image.

        :param image_id: The opaque image identifier
        :param image_file: The image data to write, as a file-like object
        :param image_size: The size of the image data to write, in bytes
        :param hashing_algo: A hashlib algorithm identifier (string)
        :param context: A context object
        :param verifier: An object used to verify signatures for images

        :returns: tuple of: (1) URL in backing store, (2) bytes written,
                  (3) checksum, (4) multihash value, and (5) a dictionary
                  with storage system specific information
        :raises: `glance_store.exceptions.Duplicate` if the image already
                 exists
        """
        checksum = hashlib.md5()
        os_hash_value = hashlib.new(str(hashing_algo))
        image_name = str(image_id)
        with self.get_connection(conffile=self.conf_file,
                                 rados_id=self.user) as conn:
            fsid = None
            if hasattr(conn, 'get_fsid'):
                # Librados's get_fsid is represented as binary
                # in py3 instead of str as it is in py2.
                # This is causing problems with ceph.
                # Decode binary to str fixes these issues.
                # Fix with encodeutils.safe_decode CAN BE REMOVED
                # after librados's fix will be stable.
                #
                # More informations:
                # https://bugs.launchpad.net/glance-store/+bug/1816721
                # https://bugs.launchpad.net/cinder/+bug/1816468
                # https://tracker.ceph.com/issues/38381
                fsid = encodeutils.safe_decode(conn.get_fsid())
            with conn.open_ioctx(self.pool) as ioctx:
                order = int(math.log(self.WRITE_CHUNKSIZE, 2))
                LOG.debug('creating image %s with order %d and size %d',
                          image_name, order, image_size)
                if image_size == 0:
                    LOG.warning(_("since image size is zero we will be doing "
                                  "resize-before-write for each chunk which "
                                  "will be considerably slower than normal"))

                try:
                    loc = self._create_image(fsid, conn, ioctx, image_name,
                                             image_size, order)
                except rbd.ImageExists:
                    msg = _('RBD image %s already exists') % image_id
                    raise exceptions.Duplicate(message=msg)

                try:
                    with rbd.Image(ioctx, image_name) as image:
                        bytes_written = 0
                        offset = 0
                        chunks = utils.chunkreadable(image_file,
                                                     self.WRITE_CHUNKSIZE)
                        for chunk in chunks:
                            # If the image size provided is zero we need to do
                            # a resize for the amount we are writing. This will
                            # be slower so setting a higher chunk size may
                            # speed things up a bit.
                            if image_size == 0:
                                chunk_length = len(chunk)
                                length = offset + chunk_length
                                bytes_written += chunk_length
                                LOG.debug(_("resizing image to %s KiB") %
                                          (length / units.Ki))
                                image.resize(length)
                            LOG.debug(_("writing chunk at offset %s") %
                                      (offset))
                            offset += image.write(chunk, offset)
                            os_hash_value.update(chunk)
                            checksum.update(chunk)
                            if verifier:
                                verifier.update(chunk)
                        if loc.snapshot:
                            image.create_snap(loc.snapshot)
                            image.protect_snap(loc.snapshot)
                except rbd.NoSpace:
                    log_msg = (_LE("Failed to store image %(img_name)s "
                                   "insufficient space available") %
                               {'img_name': image_name})
                    LOG.error(log_msg)

                    # Delete image if one was created
                    try:
                        target_pool = loc.pool or self.pool
                        self._delete_image(target_pool, loc.image,
                                           loc.snapshot)
                    except exceptions.NotFound:
                        pass

                    raise exceptions.StorageFull(message=log_msg)
                except Exception as exc:
                    log_msg = (_LE("Failed to store image %(img_name)s "
                                   "Store Exception %(store_exc)s") %
                               {'img_name': image_name,
                                'store_exc': exc})
                    LOG.error(log_msg)

                    # Delete image if one was created
                    try:
                        target_pool = loc.pool or self.pool
                        self._delete_image(target_pool, loc.image,
                                           loc.snapshot)
                    except exceptions.NotFound:
                        pass

                    raise exc

        # Make sure we send back the image size whether provided or inferred.
        if image_size == 0:
            image_size = bytes_written

        # Add store backend information to location metadata
        metadata = {}
        if self.backend_group:
            metadata['backend'] = u"%s" % self.backend_group

        return (loc.get_uri(),
                image_size,
                checksum.hexdigest(),
                os_hash_value.hexdigest(),
                metadata)
Ejemplo n.º 18
0
    def add(self, image_id, image_file, image_size, hashing_algo, context=None,
            verifier=None):
        """
        Stores an image file with supplied identifier to the backend
        storage system and returns a tuple containing information
        about the stored image.

        :param image_id: The opaque image identifier
        :param image_file: The image data to write, as a file-like object
        :param image_size: The size of the image data to write, in bytes
        :param hashing_algo: A hashlib algorithm identifier (string)
        :param context: The request context
        :param verifier: An object used to verify signatures for images

        :returns: tuple of: (1) URL in backing store, (2) bytes written,
                  (3) checksum, (4) multihash value, and (5) a dictionary
                  with storage system specific information
        :raises: `glance_store.exceptions.Duplicate` if the image already
                 exists

        :note:: By default, the backend writes the image data to a file
              `/<DATADIR>/<ID>`, where <DATADIR> is the value of
              the filesystem_store_datadir configuration option and <ID>
              is the supplied image ID.
        """

        datadir = self._find_best_datadir(image_size)
        filepath = os.path.join(datadir, str(image_id))

        if os.path.exists(filepath):
            raise exceptions.Duplicate(image=filepath)
        os_hash_value = hashlib.new(str(hashing_algo))
        checksum = hashlib.md5()
        bytes_written = 0
        try:
            with open(filepath, 'wb') as f:
                for buf in utils.chunkreadable(image_file,
                                               self.WRITE_CHUNKSIZE):
                    bytes_written += len(buf)
                    os_hash_value.update(buf)
                    checksum.update(buf)
                    if verifier:
                        verifier.update(buf)
                    f.write(buf)
        except IOError as e:
            if e.errno != errno.EACCES:
                self._delete_partial(filepath, image_id)
            errors = {errno.EFBIG: exceptions.StorageFull(),
                      errno.ENOSPC: exceptions.StorageFull(),
                      errno.EACCES: exceptions.StorageWriteDenied()}
            raise errors.get(e.errno, e)
        except Exception:
            with excutils.save_and_reraise_exception():
                self._delete_partial(filepath, image_id)

        hash_hex = os_hash_value.hexdigest()
        checksum_hex = checksum.hexdigest()
        metadata = self._get_metadata(filepath)

        LOG.debug(("Wrote %(bytes_written)d bytes to %(filepath)s with "
                   "checksum %(checksum_hex)s and multihash %(hash_hex)s"),
                  {'bytes_written': bytes_written,
                   'filepath': filepath,
                   'checksum_hex': checksum_hex,
                   'hash_hex': hash_hex})

        if self.backend_group:
            fstore_perm = getattr(
                self.conf, self.backend_group).filesystem_store_file_perm
        else:
            fstore_perm = self.conf.glance_store.filesystem_store_file_perm

        if fstore_perm > 0:
            perm = int(str(fstore_perm), 8)
            try:
                os.chmod(filepath, perm)
            except (IOError, OSError):
                LOG.warning(_LW("Unable to set permission to image: %s") %
                            filepath)

        # Add store backend information to location metadata
        if self.backend_group:
            metadata['backend'] = u"%s" % self.backend_group

        return ('file://%s' % filepath,
                bytes_written,
                checksum_hex,
                hash_hex,
                metadata)
Ejemplo n.º 19
0
    def add(self, image_id, image_file, image_size, hashing_algo, context=None,
            verifier=None):
        """
        Stores an image file with supplied identifier to the backend
        storage system and returns a tuple containing information
        about the stored image.

        :param image_id: The opaque image identifier
        :param image_file: The image data to write, as a file-like object
        :param image_size: The size of the image data to write, in bytes
        :param hashing_algo: A hashlib algorithm identifier (string)
        :param context: A context object
        :param verifier: An object used to verify signatures for images

        :returns: tuple of: (1) URL in backing store, (2) bytes written,
                  (3) checksum, (4) multihash value, and (5) a dictionary
                  with storage system specific information
        :raises: `glance_store.exceptions.Duplicate` if the image already
                 exists
        """

        image = SheepdogImage(self.addr, self.port, image_id,
                              self.WRITE_CHUNKSIZE)
        if image.exist():
            raise exceptions.Duplicate(_("Sheepdog image %s already exists")
                                       % image_id)

        location = StoreLocation({
            'image': image_id,
            'addr': self.addr,
            'port': self.port
        }, self.conf, backend_group=self.backend_group)

        image.create(image_size)

        try:
            offset = 0
            os_hash_value = hashlib.new(str(hashing_algo))
            checksum = hashlib.md5()
            chunks = utils.chunkreadable(image_file, self.WRITE_CHUNKSIZE)
            for chunk in chunks:
                chunk_length = len(chunk)
                # If the image size provided is zero we need to do
                # a resize for the amount we are writing. This will
                # be slower so setting a higher chunk size may
                # speed things up a bit.
                if image_size == 0:
                    image.resize(offset + chunk_length)
                image.write(chunk, offset, chunk_length)
                offset += chunk_length
                os_hash_value.update(chunk)
                checksum.update(chunk)
                if verifier:
                    verifier.update(chunk)
        except Exception:
            # Note(zhiyan): clean up already received data when
            # error occurs such as ImageSizeLimitExceeded exceptions.
            with excutils.save_and_reraise_exception():
                image.delete()

        metadata = {}
        if self.backend_group:
            metadata['backend'] = u"%s" % self.backend_group

        return (location.get_uri(),
                offset,
                checksum.hexdigest(),
                os_hash_value.hexdigest(),
                metadata)