コード例 #1
0
    def _create_container_if_missing(self, container, connection):
        """
        Creates a missing container in Swift if the
        ``swift_store_create_container_on_put`` option is set.

        :param container: Name of container to create
        :param connection: Connection to swift service
        """
        try:
            connection.head_container(container)
        except swiftclient.ClientException as e:
            if e.http_status == httplib.NOT_FOUND:
                if self.conf.glance_store.swift_store_create_container_on_put:
                    try:
                        msg = (_LI("Creating swift container %(container)s") %
                               {
                                   'container': container
                               })
                        LOG.info(msg)
                        connection.put_container(container)
                    except swiftclient.ClientException as e:
                        msg = (_("Failed to add container to Swift.\n"
                                 "Got error from Swift: %s.") %
                               cutils.exception_to_str(e))
                        raise glance_store.BackendException(msg)
                else:
                    msg = (_("The container %(container)s does not exist in "
                             "Swift. Please set the "
                             "swift_store_create_container_on_put option"
                             "to add container to Swift automatically.") % {
                                 'container': container
                             })
                    raise glance_store.BackendException(msg)
            else:
                raise
コード例 #2
0
def create_bucket_if_missing(conf, bucket, s3_conn):
    """
    Creates a missing bucket in S3 if the
    ``s3_store_create_bucket_on_put`` option is set.

    :param conf: Configuration
    :param bucket: Name of bucket to create
    :param s3_conn: Connection to S3
    """
    from boto.exception import S3ResponseError
    try:
        s3_conn.get_bucket(bucket)
    except S3ResponseError as e:
        if e.status == http_client.NOT_FOUND:
            if conf.glance_store.s3_store_create_bucket_on_put:
                host = conf.glance_store.s3_store_host
                location = get_s3_location(host)
                try:
                    s3_conn.create_bucket(bucket, location=location)
                except S3ResponseError as e:
                    msg = (_("Failed to add bucket to S3.\n"
                             "Got error from S3: %s.") %
                           utils.exception_to_str(e))
                    raise glance_store.BackendException(msg)
            else:
                msg = (_("The bucket %(bucket)s does not exist in "
                         "S3. Please set the "
                         "s3_store_create_bucket_on_put option "
                         "to add bucket to S3 automatically.") % {
                             'bucket': bucket
                         })
                raise glance_store.BackendException(msg)
コード例 #3
0
ファイル: sheepdog.py プロジェクト: red-alert/glance_store
    def _run_command(self, command, data, *params):
        cmd = ['collie', 'vdi']
        cmd.extend(command.split(' '))
        cmd.extend(['-a', self.addr, '-p', self.port, self.name])
        cmd.extend(params)

        try:
            return processutils.execute(*cmd, process_input=data)[0]
        except processutils.ProcessExecutionError as exc:
            LOG.error(exc)
            raise glance_store.BackendException(exc)
コード例 #4
0
    def _run_command(self, command, data, *params):
        cmd = ("collie vdi %(command)s -a %(addr)s -p %(port)d %(name)s "
               "%(params)s" % {
                   "command": command,
                   "addr": self.addr,
                   "port": self.port,
                   "name": self.name,
                   "params": " ".join(map(str, params))
               })

        try:
            return processutils.execute(cmd, process_input=data)[0]
        except processutils.ProcessExecutionError as exc:
            LOG.error(exc)
            raise glance_store.BackendException(exc)
コード例 #5
0
    def add_multipart(self, image_file, image_size, bucket_obj, obj_name, loc):
        """
        Stores an image file with a multi part upload to S3 backend

        :param image_file: The image data to write, as a file-like object
        :param bucket_obj: S3 bucket object
        :param obj_name: The object name to be stored(image identifier)
        :loc: The Store Location Info
        """

        checksum = hashlib.md5()
        pool_size = self.s3_store_thread_pools
        pool = eventlet.greenpool.GreenPool(size=pool_size)
        mpu = bucket_obj.initiate_multipart_upload(obj_name)
        LOG.debug("Multipart initiate key=%(obj_name)s, "
                  "UploadId=%(UploadId)s" % {
                      'obj_name': obj_name,
                      'UploadId': mpu.id
                  })
        cstart = 0
        plist = []

        chunk_size = int(math.ceil(float(image_size) / MAX_PART_NUM))
        write_chunk_size = max(self.s3_store_large_object_chunk_size,
                               chunk_size)
        it = utils.chunkreadable(image_file, self.WRITE_CHUNKSIZE)
        buffered_chunk = b''
        while True:
            try:
                buffered_clen = len(buffered_chunk)
                if buffered_clen < write_chunk_size:
                    # keep reading data
                    read_chunk = next(it)
                    buffered_chunk += read_chunk
                    continue
                else:
                    write_chunk = buffered_chunk[:write_chunk_size]
                    remained_data = buffered_chunk[write_chunk_size:]
                    checksum.update(write_chunk)
                    fp = six.BytesIO(write_chunk)
                    fp.seek(0)
                    part = UploadPart(mpu, fp, cstart + 1, len(write_chunk))
                    pool.spawn_n(run_upload, part)
                    plist.append(part)
                    cstart += 1
                    buffered_chunk = remained_data
            except StopIteration:
                if len(buffered_chunk) > 0:
                    # Write the last chunk data
                    write_chunk = buffered_chunk
                    checksum.update(write_chunk)
                    fp = six.BytesIO(write_chunk)
                    fp.seek(0)
                    part = UploadPart(mpu, fp, cstart + 1, len(write_chunk))
                    pool.spawn_n(run_upload, part)
                    plist.append(part)
                break

        pedict = {}
        total_size = 0
        pool.waitall()

        for part in plist:
            pedict.update(part.etag)
            total_size += part.size

        success = True
        for part in plist:
            if not part.success:
                success = False

        if success:
            # Complete
            xml = get_mpu_xml(pedict)
            bucket_obj.complete_multipart_upload(obj_name, mpu.id, xml)
            checksum_hex = checksum.hexdigest()
            LOG.info(
                _LI("Multipart complete key=%(obj_name)s "
                    "UploadId=%(UploadId)s "
                    "Wrote %(total_size)d bytes to S3 key"
                    "named %(obj_name)s "
                    "with checksum %(checksum_hex)s") % {
                        'obj_name': obj_name,
                        'UploadId': mpu.id,
                        'total_size': total_size,
                        'checksum_hex': checksum_hex
                    })
            return (loc.get_uri(), total_size, checksum_hex, {})
        else:
            # Abort
            bucket_obj.cancel_multipart_upload(obj_name, mpu.id)
            LOG.error(
                _LE("Some parts failed to upload to S3. "
                    "Aborted the object key=%(obj_name)s") %
                {'obj_name': obj_name})
            msg = (_("Failed to add image object to S3. "
                     "key=%(obj_name)s") % {
                         'obj_name': obj_name
                     })
            raise glance_store.BackendException(msg)
コード例 #6
0
    def add(self,
            image_id,
            image_file,
            image_size,
            connection=None,
            context=None):
        location = self.create_location(image_id, context=context)
        if not connection:
            connection = self.get_connection(location, context=context)

        self._create_container_if_missing(location.container, connection)

        LOG.debug("Adding image object '%(obj_name)s' "
                  "to Swift" % dict(obj_name=location.obj))
        try:
            if image_size > 0 and image_size < self.large_object_size:
                # Image size is known, and is less than large_object_size.
                # Send to Swift with regular PUT.
                obj_etag = connection.put_object(location.container,
                                                 location.obj,
                                                 image_file,
                                                 content_length=image_size)
            else:
                # Write the image into Swift in chunks.
                chunk_id = 1
                if image_size > 0:
                    total_chunks = str(
                        int(
                            math.ceil(
                                float(image_size) /
                                float(self.large_object_chunk_size))))
                else:
                    # image_size == 0 is when we don't know the size
                    # of the image. This can occur with older clients
                    # that don't inspect the payload size.
                    LOG.debug("Cannot determine image size. Adding as a "
                              "segmented object to Swift.")
                    total_chunks = '?'

                checksum = hashlib.md5()
                written_chunks = []
                combined_chunks_size = 0
                while True:
                    chunk_size = self.large_object_chunk_size
                    if image_size == 0:
                        content_length = None
                    else:
                        left = image_size - combined_chunks_size
                        if left == 0:
                            break
                        if chunk_size > left:
                            chunk_size = left
                        content_length = chunk_size

                    chunk_name = "%s-%05d" % (location.obj, chunk_id)
                    reader = ChunkReader(image_file, checksum, chunk_size)
                    try:
                        chunk_etag = connection.put_object(
                            location.container,
                            chunk_name,
                            reader,
                            content_length=content_length)
                        written_chunks.append(chunk_name)
                    except Exception:
                        # Delete orphaned segments from swift backend
                        with excutils.save_and_reraise_exception():
                            LOG.exception(
                                _("Error during chunked upload to "
                                  "backend, deleting stale chunks"))
                            self._delete_stale_chunks(connection,
                                                      location.container,
                                                      written_chunks)

                    bytes_read = reader.bytes_read
                    msg = ("Wrote chunk %(chunk_name)s (%(chunk_id)d/"
                           "%(total_chunks)s) of length %(bytes_read)d "
                           "to Swift returning MD5 of content: "
                           "%(chunk_etag)s" % {
                               'chunk_name': chunk_name,
                               'chunk_id': chunk_id,
                               'total_chunks': total_chunks,
                               'bytes_read': bytes_read,
                               'chunk_etag': chunk_etag
                           })
                    LOG.debug(msg)

                    if bytes_read == 0:
                        # Delete the last chunk, because it's of zero size.
                        # This will happen if size == 0.
                        LOG.debug("Deleting final zero-length chunk")
                        connection.delete_object(location.container,
                                                 chunk_name)
                        break

                    chunk_id += 1
                    combined_chunks_size += bytes_read

                # In the case we have been given an unknown image size,
                # set the size to the total size of the combined chunks.
                if image_size == 0:
                    image_size = combined_chunks_size

                # Now we write the object manifest and return the
                # manifest's etag...
                manifest = "%s/%s-" % (location.container, location.obj)
                headers = {
                    'ETag': hashlib.md5("").hexdigest(),
                    'X-Object-Manifest': manifest
                }

                # The ETag returned for the manifest is actually the
                # MD5 hash of the concatenated checksums of the strings
                # of each chunk...so we ignore this result in favour of
                # the MD5 of the entire image file contents, so that
                # users can verify the image file contents accordingly
                connection.put_object(location.container,
                                      location.obj,
                                      None,
                                      headers=headers)
                obj_etag = checksum.hexdigest()

            # NOTE: We return the user and key here! Have to because
            # location is used by the API server to return the actual
            # image data. We *really* should consider NOT returning
            # the location attribute from GET /images/<ID> and
            # GET /images/details
            if sutils.is_multiple_swift_store_accounts_enabled(self.conf):
                include_creds = False
            else:
                include_creds = True

            return (location.get_uri(credentials_included=include_creds),
                    image_size, obj_etag, {})
        except swiftclient.ClientException as e:
            if e.http_status == httplib.CONFLICT:
                msg = _("Swift already has an image at this location")
                raise exceptions.Duplicate(message=msg)

            msg = (_(u"Failed to add object to Swift.\n"
                     "Got error from Swift: %s.") % cutils.exception_to_str(e))
            LOG.error(msg)
            raise glance_store.BackendException(msg)
コード例 #7
0
    def _add_multipart(self, s3_client, image_file, image_size, bucket, key,
                       loc, hashing_algo, verifier):
        """Stores an image file with a multi part upload to S3 backend.

        :param s3_client: An object with credentials to connect to S3
        :param image_file: The image data to write, as a file-like object
        :param bucket: S3 bucket name
        :param key: The object name to be stored (image identifier)
        :param loc: `glance_store.location.Location` object, supplied
                    from glance_store.location.get_location_from_uri()
        :param hashing_algo: A hashlib algorithm identifier (string)
        :param verifier: An object used to verify signatures for images
        :returns: tuple of: (1) URL in backing store, (2) bytes written,
                  (3) checksum, (4) multihash value, and (5) a dictionary
                  with storage system specific information
        """
        os_hash_value = utils.get_hasher(hashing_algo, False)
        checksum = utils.get_hasher('md5', False)
        pool_size = self.s3_store_thread_pools
        pool = eventlet.greenpool.GreenPool(size=pool_size)
        mpu = s3_client.create_multipart_upload(Bucket=bucket, Key=key)
        upload_id = mpu['UploadId']
        LOG.debug("Multipart initiate key=%(key)s, UploadId=%(UploadId)s", {
            'key': key,
            'UploadId': upload_id
        })
        cstart = 0
        plist = []

        chunk_size = int(math.ceil(float(image_size) / MAX_PART_NUM))
        write_chunk_size = max(self.s3_store_large_object_chunk_size,
                               chunk_size)
        it = utils.chunkreadable(image_file, self.WRITE_CHUNKSIZE)
        buffered_chunk = b''
        while True:
            try:
                buffered_clen = len(buffered_chunk)
                if buffered_clen < write_chunk_size:
                    # keep reading data
                    read_chunk = next(it)
                    buffered_chunk += read_chunk
                    continue
                else:
                    write_chunk = buffered_chunk[:write_chunk_size]
                    remained_data = buffered_chunk[write_chunk_size:]
                    os_hash_value.update(write_chunk)
                    checksum.update(write_chunk)
                    if verifier:
                        verifier.update(write_chunk)
                    fp = six.BytesIO(write_chunk)
                    fp.seek(0)
                    part = UploadPart(mpu, fp, cstart + 1, len(write_chunk))
                    pool.spawn_n(run_upload, s3_client, bucket, key, part)
                    plist.append(part)
                    cstart += 1
                    buffered_chunk = remained_data
            except StopIteration:
                if len(buffered_chunk) > 0:
                    # Write the last chunk data
                    write_chunk = buffered_chunk
                    os_hash_value.update(write_chunk)
                    checksum.update(write_chunk)
                    if verifier:
                        verifier.update(write_chunk)
                    fp = six.BytesIO(write_chunk)
                    fp.seek(0)
                    part = UploadPart(mpu, fp, cstart + 1, len(write_chunk))
                    pool.spawn_n(run_upload, s3_client, bucket, key, part)
                    plist.append(part)
                break

        pedict = {}
        total_size = 0
        pool.waitall()

        for part in plist:
            pedict.update(part.etag)
            total_size += part.size

        success = True
        for part in plist:
            if not part.success:
                success = False

        if success:
            # Complete
            mpu_list = self._get_mpu_list(pedict)
            s3_client.complete_multipart_upload(Bucket=bucket,
                                                Key=key,
                                                MultipartUpload=mpu_list,
                                                UploadId=upload_id)
            hash_hex = os_hash_value.hexdigest()
            checksum_hex = checksum.hexdigest()

            # Add store backend information to location metadata
            metadata = {}
            if self.backend_group:
                metadata['store'] = self.backend_group

            LOG.info(
                "Multipart complete key=%(key)s "
                "UploadId=%(UploadId)s "
                "Wrote %(total_size)d bytes to S3 key "
                "named %(key)s "
                "with checksum %(checksum)s", {
                    'key': key,
                    'UploadId': upload_id,
                    'total_size': total_size,
                    'checksum': checksum_hex
                })
            return loc.get_uri(), total_size, checksum_hex, hash_hex, metadata

        # Abort
        s3_client.abort_multipart_upload(Bucket=bucket,
                                         Key=key,
                                         UploadId=upload_id)
        LOG.error("Some parts failed to upload to S3. "
                  "Aborted the key=%s", key)
        msg = _("Failed to add image object to S3. key=%s") % key
        raise glance_store.BackendException(msg)
コード例 #8
0
    def add(self,
            image_id,
            image_file,
            image_size,
            hashing_algo,
            context=None,
            verifier=None):
        """
        Stores an image file with supplied identifier to the backend
        storage system and returns a tuple containing information
        about the stored image.

        :param image_id: The opaque image identifier
        :param image_file: The image data to write, as a file-like object
        :param image_size: The size of the image data to write, in bytes
        :param hashing_algo: A hashlib algorithm identifier (string)
        :param context: A context object
        :param verifier: An object used to verify signatures for images

        :returns: tuple of: (1) URL in backing store, (2) bytes written,
                  (3) checksum, (4) multihash value, and (5) a dictionary
                  with storage system specific information
        :raises: `glance_store.exceptions.Duplicate` if the image already
                 exists
        """
        loc = StoreLocation(store_specs={
            'scheme': self.scheme,
            'bucket': self.bucket,
            'key': image_id,
            's3serviceurl': self.full_s3_host,
            'accesskey': self.access_key,
            'secretkey': self.secret_key
        },
                            conf=self.conf,
                            backend_group=self.backend_group)

        s3_client, bucket, key = self._operation_set(loc)

        if not self._bucket_exists(s3_client, bucket):
            if self._option_get('s3_store_create_bucket_on_put'):
                self._create_bucket(s3_client,
                                    self._option_get('s3_store_host'), bucket)
            else:
                msg = (_("The bucket %s does not exist in "
                         "S3. Please set the "
                         "s3_store_create_bucket_on_put option "
                         "to add bucket to S3 automatically.") % bucket)
                raise glance_store.BackendException(msg)

        LOG.debug(
            "Adding image object to S3 using (s3_host=%(s3_host)s, "
            "access_key=%(access_key)s, bucket=%(bucket)s, "
            "key=%(key)s)", {
                's3_host': self.s3_host,
                'access_key': loc.accesskey,
                'bucket': bucket,
                'key': key
            })

        if not self._object_exists(s3_client, bucket, key):
            if image_size < self.s3_store_large_object_size:
                return self._add_singlepart(s3_client=s3_client,
                                            image_file=image_file,
                                            bucket=bucket,
                                            key=key,
                                            loc=loc,
                                            hashing_algo=hashing_algo,
                                            verifier=verifier)

            return self._add_multipart(s3_client=s3_client,
                                       image_file=image_file,
                                       image_size=image_size,
                                       bucket=bucket,
                                       key=key,
                                       loc=loc,
                                       hashing_algo=hashing_algo,
                                       verifier=verifier)
        LOG.warning(
            "S3 already has an image with bucket ID %(bucket)s, "
            "key %(key)s", {
                'bucket': bucket,
                'key': key
            })
        raise exceptions.Duplicate(image=key)
コード例 #9
0
 def test_backend_exception(self):
     msg = glance_store.BackendException()
     self.assertIn(u'', encodeutils.exception_to_unicode(msg))