Ejemplo n.º 1
0
    def create(self, req, body):
        """Instruct Cinder to manage a storage object.

        Manages an existing backend storage object (e.g. a Linux logical
        volume or a SAN disk) by creating the Cinder objects required to manage
        it, and possibly renaming the backend storage object
        (driver dependent)

        From an API perspective, this operation behaves very much like a
        volume creation operation, except that properties such as image,
        snapshot and volume references don't make sense, because we are taking
        an existing storage object into Cinder management.

        Required HTTP Body:

        .. code-block:: json

         {
           "volume": {
             "host": "<Cinder host on which the existing storage resides>",
             "cluster": "<Cinder cluster on which the storage resides>",
             "ref": "<Driver-specific reference to existing storage object>"
           }
         }

        See the appropriate Cinder drivers' implementations of the
        manage_volume method to find out the accepted format of 'ref'.

        This API call will return with an error if any of the above elements
        are missing from the request, or if the 'host' element refers to a
        cinder host that is not registered.

        The volume will later enter the error state if it is discovered that
        'ref' is bad.

        Optional elements to 'volume' are::

         name               A name for the new volume.
         description        A description for the new volume.
         volume_type        ID or name of a volume type to associate with
                            the new Cinder volume. Does not necessarily
                            guarantee that the managed volume will have the
                            properties described in the volume_type. The
                            driver may choose to fail if it identifies that
                            the specified volume_type is not compatible with
                            the backend storage object.
         metadata           Key/value pairs to be associated with the new
                            volume.
         availability_zone  The availability zone to associate with the new
                            volume.
         bootable           If set to True, marks the volume as bootable.

        """
        context = req.environ['cinder.context']
        context.authorize(policy.MANAGE_POLICY)

        self.assert_valid_body(body, 'volume')

        volume = body['volume']
        self.validate_name_and_description(volume)

        # Check that the required keys are present, return an error if they
        # are not.
        if 'ref' not in volume:
            raise exception.MissingRequired(element='ref')

        cluster_name, host = common.get_cluster_host(req, volume,
                                                     mv.VOLUME_MIGRATE_CLUSTER)

        LOG.debug('Manage volume request body: %s', body)

        kwargs = {}
        req_volume_type = volume.get('volume_type', None)
        if req_volume_type:
            try:
                kwargs['volume_type'] = volume_types.get_by_name_or_id(
                    context, req_volume_type)
            except exception.VolumeTypeNotFound:
                msg = _("Cannot find requested '%s' "
                        "volume type") % req_volume_type
                raise exception.InvalidVolumeType(reason=msg)
        else:
            kwargs['volume_type'] = {}

        kwargs['name'] = volume.get('name', None)
        kwargs['description'] = volume.get('description', None)
        kwargs['metadata'] = volume.get('metadata', None)
        kwargs['availability_zone'] = volume.get('availability_zone', None)
        kwargs['bootable'] = utils.get_bool_param('bootable', volume)

        utils.check_metadata_properties(kwargs['metadata'])

        try:
            new_volume = self.volume_api.manage_existing(
                context, host, cluster_name, volume['ref'], **kwargs)
        except exception.ServiceNotFound:
            msg = _("%(name)s '%(value)s' not found") % {
                'name': 'Host' if host else 'Cluster',
                'value': host or cluster_name
            }
            raise exception.ServiceUnavailable(message=msg)

        utils.add_visible_admin_metadata(new_volume)

        return self._view_builder.detail(req, new_volume)
Ejemplo n.º 2
0
    def execute(self, context, size, snapshot, image_id, source_volume,
                availability_zone, volume_type, metadata, key_manager,
                source_replica, consistencygroup, cgsnapshot):

        utils.check_exclusive_options(snapshot=snapshot,
                                      imageRef=image_id,
                                      source_volume=source_volume)
        policy.enforce_action(context, ACTION)

        # TODO(harlowja): what guarantee is there that the snapshot or source
        # volume will remain available after we do this initial verification??
        snapshot_id = self._extract_snapshot(snapshot)
        source_volid = self._extract_source_volume(source_volume)
        source_replicaid = self._extract_source_replica(source_replica)
        size = self._extract_size(size, source_volume, snapshot)
        consistencygroup_id = self._extract_consistencygroup(consistencygroup)
        cgsnapshot_id = self._extract_cgsnapshot(cgsnapshot)

        self._check_image_metadata(context, image_id, size)

        availability_zone = self._extract_availability_zone(availability_zone,
                                                            snapshot,
                                                            source_volume)

        # TODO(joel-coffman): This special handling of snapshots to ensure that
        # their volume type matches the source volume is too convoluted. We
        # should copy encryption metadata from the encrypted volume type to the
        # volume upon creation and propagate that information to each snapshot.
        # This strategy avoids any dependency upon the encrypted volume type.
        def_vol_type = volume_types.get_default_volume_type()
        if not volume_type and not source_volume and not snapshot:
            image_volume_type = self._get_image_volume_type(context, image_id)
            volume_type = (image_volume_type if image_volume_type else
                           def_vol_type)

        # When creating a clone of a replica (replication test), we can't
        # use the volume type of the replica, therefore, we use the default.
        # NOTE(ronenkat): this assumes the default type is not replicated.
        if source_replicaid:
            volume_type = def_vol_type

        volume_type_id = self._get_volume_type_id(volume_type,
                                                  source_volume, snapshot)

        if image_id and volume_types.is_encrypted(context, volume_type_id):
            msg = _('Create encrypted volumes with type %(type)s '
                    'from image %(image)s is not supported.')
            msg = msg % {'type': volume_type_id,
                         'image': image_id, }
            raise exception.InvalidInput(reason=msg)

        encryption_key_id = self._get_encryption_key_id(key_manager,
                                                        context,
                                                        volume_type_id,
                                                        snapshot,
                                                        source_volume)

        specs = {}
        if volume_type_id:
            qos_specs = volume_types.get_volume_type_qos_specs(volume_type_id)
            if qos_specs['qos_specs']:
                specs = qos_specs['qos_specs'].get('specs', {})
        if not specs:
            # to make sure we don't pass empty dict
            specs = None

        utils.check_metadata_properties(metadata)

        return {
            'size': size,
            'snapshot_id': snapshot_id,
            'source_volid': source_volid,
            'availability_zone': availability_zone,
            'volume_type': volume_type,
            'volume_type_id': volume_type_id,
            'encryption_key_id': encryption_key_id,
            'qos_specs': specs,
            'source_replicaid': source_replicaid,
            'consistencygroup_id': consistencygroup_id,
            'cgsnapshot_id': cgsnapshot_id,
        }
Ejemplo n.º 3
0
    def create(self,
               context,
               name,
               description,
               volume_id,
               container,
               incremental=False,
               availability_zone=None,
               force=False,
               snapshot_id=None,
               metadata=None):
        """Make the RPC call to create a volume backup."""
        check_policy(context, 'create')
        utils.check_metadata_properties(metadata)
        volume = self.volume_api.get(context, volume_id)
        snapshot = None
        if snapshot_id:
            snapshot = self.volume_api.get_snapshot(context, snapshot_id)

            if volume_id != snapshot.volume_id:
                msg = (_('Volume %(vol1)s does not match with '
                         'snapshot.volume_id %(vol2)s.') % {
                             'vol1': volume_id,
                             'vol2': snapshot.volume_id
                         })
                raise exception.InvalidVolume(reason=msg)
            if snapshot['status'] not in ["available"]:
                msg = (_('Snapshot to be backed up must be available, '
                         'but the current status is "%s".') %
                       snapshot['status'])
                raise exception.InvalidSnapshot(reason=msg)
        elif volume['status'] not in ["available", "in-use"]:
            msg = (_('Volume to be backed up must be available '
                     'or in-use, but the current status is "%s".') %
                   volume['status'])
            raise exception.InvalidVolume(reason=msg)
        elif volume['status'] in ["in-use"] and not force:
            msg = _('Backing up an in-use volume must use ' 'the force flag.')
            raise exception.InvalidVolume(reason=msg)

        previous_status = volume['status']
        volume_host = volume_utils.extract_host(volume.host, 'host')
        host = self._get_available_backup_service_host(
            volume_host, volume.availability_zone)

        # Reserve a quota before setting volume status and backup status
        try:
            reserve_opts = {'backups': 1, 'backup_gigabytes': volume['size']}
            reservations = QUOTAS.reserve(context, **reserve_opts)
        except exception.OverQuota as e:
            quota_utils.process_reserve_over_quota(context,
                                                   e,
                                                   resource='backups',
                                                   size=volume.size)
        # Find the latest backup and use it as the parent backup to do an
        # incremental backup.
        latest_backup = None
        if incremental:
            backups = objects.BackupList.get_all_by_volume(
                context.elevated(), volume_id)
            if backups.objects:
                # NOTE(xyang): The 'data_timestamp' field records the time
                # when the data on the volume was first saved. If it is
                # a backup from volume, 'data_timestamp' will be the same
                # as 'created_at' for a backup. If it is a backup from a
                # snapshot, 'data_timestamp' will be the same as
                # 'created_at' for a snapshot.
                # If not backing up from snapshot, the backup with the latest
                # 'data_timestamp' will be the parent; If backing up from
                # snapshot, the backup with the latest 'data_timestamp' will
                # be chosen only if 'data_timestamp' is earlier than the
                # 'created_at' timestamp of the snapshot; Otherwise, the
                # backup will not be chosen as the parent.
                # For example, a volume has a backup taken at 8:00, then
                # a snapshot taken at 8:10, and then a backup at 8:20.
                # When taking an incremental backup of the snapshot, the
                # parent should be the backup at 8:00, not 8:20, and the
                # 'data_timestamp' of this new backup will be 8:10.
                latest_backup = max(
                    backups.objects,
                    key=lambda x: x['data_timestamp']
                    if (not snapshot or (snapshot and x['data_timestamp'] <
                                         snapshot['created_at'])) else
                    datetime(1, 1, 1, 1, 1, 1, tzinfo=timezone('UTC')))
            else:
                msg = _('No backups available to do an incremental backup.')
                raise exception.InvalidBackup(reason=msg)

        parent_id = None
        if latest_backup:
            parent_id = latest_backup.id
            if latest_backup['status'] != fields.BackupStatus.AVAILABLE:
                msg = _('The parent backup must be available for '
                        'incremental backup.')
                raise exception.InvalidBackup(reason=msg)

        data_timestamp = None
        if snapshot_id:
            snapshot = objects.Snapshot.get_by_id(context, snapshot_id)
            data_timestamp = snapshot.created_at
            self.db.snapshot_update(
                context, snapshot_id,
                {'status': fields.SnapshotStatus.BACKING_UP})
        else:
            self.db.volume_update(context, volume_id, {
                'status': 'backing-up',
                'previous_status': previous_status
            })

        backup = None
        try:
            kwargs = {
                'user_id': context.user_id,
                'project_id': context.project_id,
                'display_name': name,
                'display_description': description,
                'volume_id': volume_id,
                'status': fields.BackupStatus.CREATING,
                'container': container,
                'parent_id': parent_id,
                'size': volume['size'],
                'host': host,
                'snapshot_id': snapshot_id,
                'data_timestamp': data_timestamp,
                'metadata': metadata or {}
            }
            backup = objects.Backup(context=context, **kwargs)
            backup.create()
            if not snapshot_id:
                backup.data_timestamp = backup.created_at
                backup.save()
            QUOTAS.commit(context, reservations)
        except Exception:
            with excutils.save_and_reraise_exception():
                try:
                    if backup and 'id' in backup:
                        backup.destroy()
                finally:
                    QUOTAS.rollback(context, reservations)

        # TODO(DuncanT): In future, when we have a generic local attach,
        #                this can go via the scheduler, which enables
        #                better load balancing and isolation of services
        self.backup_rpcapi.create_backup(context, backup)

        return backup
Ejemplo n.º 4
0
    def create(self, context, name, description, volume_id,
               container, incremental=False, availability_zone=None,
               force=False, snapshot_id=None, metadata=None):
        """Make the RPC call to create a volume backup."""
        check_policy(context, 'create')
        utils.check_metadata_properties(metadata)
        volume = self.volume_api.get(context, volume_id)
        snapshot = None
        if snapshot_id:
            snapshot = self.volume_api.get_snapshot(context, snapshot_id)

            if volume_id != snapshot.volume_id:
                msg = (_('Volume %(vol1)s does not match with '
                         'snapshot.volume_id %(vol2)s.')
                       % {'vol1': volume_id,
                          'vol2': snapshot.volume_id})
                raise exception.InvalidVolume(reason=msg)
            if snapshot['status'] not in ["available"]:
                msg = (_('Snapshot to be backed up must be available, '
                         'but the current status is "%s".')
                       % snapshot['status'])
                raise exception.InvalidSnapshot(reason=msg)
        elif volume['status'] not in ["available", "in-use"]:
            msg = (_('Volume to be backed up must be available '
                     'or in-use, but the current status is "%s".')
                   % volume['status'])
            raise exception.InvalidVolume(reason=msg)
        elif volume['status'] in ["in-use"] and not force:
            msg = _('Backing up an in-use volume must use '
                    'the force flag.')
            raise exception.InvalidVolume(reason=msg)

        previous_status = volume['status']
        volume_host = volume_utils.extract_host(volume.host, 'host')
        host = self._get_available_backup_service_host(
            volume_host, volume.availability_zone)

        # Reserve a quota before setting volume status and backup status
        try:
            reserve_opts = {'backups': 1,
                            'backup_gigabytes': volume['size']}
            reservations = QUOTAS.reserve(context, **reserve_opts)
        except exception.OverQuota as e:
            quota_utils.process_reserve_over_quota(
                context, e,
                resource='backups',
                size=volume.size)
        # Find the latest backup and use it as the parent backup to do an
        # incremental backup.
        latest_backup = None
        if incremental:
            backups = objects.BackupList.get_all_by_volume(context.elevated(),
                                                           volume_id)
            if backups.objects:
                # NOTE(xyang): The 'data_timestamp' field records the time
                # when the data on the volume was first saved. If it is
                # a backup from volume, 'data_timestamp' will be the same
                # as 'created_at' for a backup. If it is a backup from a
                # snapshot, 'data_timestamp' will be the same as
                # 'created_at' for a snapshot.
                # If not backing up from snapshot, the backup with the latest
                # 'data_timestamp' will be the parent; If backing up from
                # snapshot, the backup with the latest 'data_timestamp' will
                # be chosen only if 'data_timestamp' is earlier than the
                # 'created_at' timestamp of the snapshot; Otherwise, the
                # backup will not be chosen as the parent.
                # For example, a volume has a backup taken at 8:00, then
                # a snapshot taken at 8:10, and then a backup at 8:20.
                # When taking an incremental backup of the snapshot, the
                # parent should be the backup at 8:00, not 8:20, and the
                # 'data_timestamp' of this new backup will be 8:10.
                latest_backup = max(
                    backups.objects,
                    key=lambda x: x['data_timestamp']
                    if (not snapshot or (snapshot and x['data_timestamp']
                                         < snapshot['created_at']))
                    else datetime(1, 1, 1, 1, 1, 1, tzinfo=timezone('UTC')))
            else:
                msg = _('No backups available to do an incremental backup.')
                raise exception.InvalidBackup(reason=msg)

        parent_id = None
        if latest_backup:
            parent_id = latest_backup.id
            if latest_backup['status'] != fields.BackupStatus.AVAILABLE:
                msg = _('The parent backup must be available for '
                        'incremental backup.')
                raise exception.InvalidBackup(reason=msg)

        data_timestamp = None
        if snapshot_id:
            snapshot = objects.Snapshot.get_by_id(context, snapshot_id)
            data_timestamp = snapshot.created_at
            self.db.snapshot_update(
                context, snapshot_id,
                {'status': fields.SnapshotStatus.BACKING_UP})
        else:
            self.db.volume_update(context, volume_id,
                                  {'status': 'backing-up',
                                   'previous_status': previous_status})

        backup = None
        try:
            kwargs = {
                'user_id': context.user_id,
                'project_id': context.project_id,
                'display_name': name,
                'display_description': description,
                'volume_id': volume_id,
                'status': fields.BackupStatus.CREATING,
                'container': container,
                'parent_id': parent_id,
                'size': volume['size'],
                'host': host,
                'snapshot_id': snapshot_id,
                'data_timestamp': data_timestamp,
                'metadata': metadata or {}
            }
            backup = objects.Backup(context=context, **kwargs)
            backup.create()
            if not snapshot_id:
                backup.data_timestamp = backup.created_at
                backup.save()
            QUOTAS.commit(context, reservations)
        except Exception:
            with excutils.save_and_reraise_exception():
                try:
                    if backup and 'id' in backup:
                        backup.destroy()
                finally:
                    QUOTAS.rollback(context, reservations)

        # TODO(DuncanT): In future, when we have a generic local attach,
        #                this can go via the scheduler, which enables
        #                better load balancing and isolation of services
        self.backup_rpcapi.create_backup(context, backup)

        return backup
Ejemplo n.º 5
0
    def create(self, req, body):
        """Instruct Cinder to manage a storage object.

        Manages an existing backend storage object (e.g. a Linux logical
        volume or a SAN disk) by creating the Cinder objects required to manage
        it, and possibly renaming the backend storage object
        (driver dependent)

        From an API perspective, this operation behaves very much like a
        volume creation operation, except that properties such as image,
        snapshot and volume references don't make sense, because we are taking
        an existing storage object into Cinder management.

        Required HTTP Body:

        .. code-block:: json

         {
           'volume':
           {
             'host': <Cinder host on which the existing storage resides>,
             'ref':  <Driver-specific reference to existing storage object>,
           }
         }

        See the appropriate Cinder drivers' implementations of the
        manage_volume method to find out the accepted format of 'ref'.

        This API call will return with an error if any of the above elements
        are missing from the request, or if the 'host' element refers to a
        cinder host that is not registered.

        The volume will later enter the error state if it is discovered that
        'ref' is bad.

        Optional elements to 'volume' are::

         name               A name for the new volume.
         description        A description for the new volume.
         volume_type        ID or name of a volume type to associate with
                            the new Cinder volume. Does not necessarily
                            guarantee that the managed volume will have the
                            properties described in the volume_type. The
                            driver may choose to fail if it identifies that
                            the specified volume_type is not compatible with
                            the backend storage object.
         metadata           Key/value pairs to be associated with the new
                            volume.
         availability_zone  The availability zone to associate with the new
                            volume.
         bootable           If set to True, marks the volume as bootable.

        """
        context = req.environ['cinder.context']
        authorize_manage(context)

        self.assert_valid_body(body, 'volume')

        volume = body['volume']
        self.validate_name_and_description(volume)

        # Check that the required keys are present, return an error if they
        # are not.
        required_keys = set(['ref', 'host'])
        missing_keys = list(required_keys - set(volume.keys()))

        if missing_keys:
            msg = _("The following elements are required: %s") % \
                ', '.join(missing_keys)
            raise exc.HTTPBadRequest(explanation=msg)

        LOG.debug('Manage volume request body: %s', body)

        kwargs = {}
        req_volume_type = volume.get('volume_type', None)
        if req_volume_type:
            # Not found exception will be handled at the wsgi level
            kwargs['volume_type'] = (volume_types.get_by_name_or_id(
                context, req_volume_type))
        else:
            kwargs['volume_type'] = {}

        kwargs['name'] = volume.get('name', None)
        kwargs['description'] = volume.get('description', None)
        kwargs['metadata'] = volume.get('metadata', None)
        kwargs['availability_zone'] = volume.get('availability_zone', None)
        kwargs['bootable'] = utils.get_bool_param('bootable', volume)

        utils.check_metadata_properties(kwargs['metadata'])

        # Not found exception will be handled at wsgi level
        new_volume = self.volume_api.manage_existing(context, volume['host'],
                                                     volume['ref'], **kwargs)

        utils.add_visible_admin_metadata(new_volume)

        return self._view_builder.detail(req, new_volume)
Ejemplo n.º 6
0
    def create(self, req, body):
        """Instruct Cinder to manage a storage object.

        Manages an existing backend storage object (e.g. a Linux logical
        volume or a SAN disk) by creating the Cinder objects required to manage
        it, and possibly renaming the backend storage object
        (driver dependent)

        From an API perspective, this operation behaves very much like a
        volume creation operation, except that properties such as image,
        snapshot and volume references don't make sense, because we are taking
        an existing storage object into Cinder management.

        Required HTTP Body:

        .. code-block:: json

         {
           'volume':
           {
             'host': <Cinder host on which the existing storage resides>,
             'ref':  <Driver-specific reference to existing storage object>,
           }
         }

        See the appropriate Cinder drivers' implementations of the
        manage_volume method to find out the accepted format of 'ref'.

        This API call will return with an error if any of the above elements
        are missing from the request, or if the 'host' element refers to a
        cinder host that is not registered.

        The volume will later enter the error state if it is discovered that
        'ref' is bad.

        Optional elements to 'volume' are::

         name               A name for the new volume.
         description        A description for the new volume.
         volume_type        ID or name of a volume type to associate with
                            the new Cinder volume. Does not necessarily
                            guarantee that the managed volume will have the
                            properties described in the volume_type. The
                            driver may choose to fail if it identifies that
                            the specified volume_type is not compatible with
                            the backend storage object.
         metadata           Key/value pairs to be associated with the new
                            volume.
         availability_zone  The availability zone to associate with the new
                            volume.
         bootable           If set to True, marks the volume as bootable.

        """
        context = req.environ['cinder.context']
        authorize_manage(context)

        self.assert_valid_body(body, 'volume')

        volume = body['volume']
        self.validate_name_and_description(volume)

        # Check that the required keys are present, return an error if they
        # are not.
        required_keys = set(['ref', 'host'])
        missing_keys = list(required_keys - set(volume.keys()))

        if missing_keys:
            msg = _("The following elements are required: %s") % \
                ', '.join(missing_keys)
            raise exc.HTTPBadRequest(explanation=msg)

        LOG.debug('Manage volume request body: %s', body)

        kwargs = {}
        req_volume_type = volume.get('volume_type', None)
        if req_volume_type:
            # Not found exception will be handled at the wsgi level
            kwargs['volume_type'] = (
                volume_types.get_by_name_or_id(context, req_volume_type))
        else:
            kwargs['volume_type'] = {}

        kwargs['name'] = volume.get('name', None)
        kwargs['description'] = volume.get('description', None)
        kwargs['metadata'] = volume.get('metadata', None)
        kwargs['availability_zone'] = volume.get('availability_zone', None)
        kwargs['bootable'] = utils.get_bool_param('bootable', volume)

        utils.check_metadata_properties(kwargs['metadata'])

        # Not found exception will be handled at wsgi level
        new_volume = self.volume_api.manage_existing(context,
                                                     volume['host'],
                                                     volume['ref'],
                                                     **kwargs)

        utils.add_visible_admin_metadata(new_volume)

        return self._view_builder.detail(req, new_volume)
Ejemplo n.º 7
0
    def create(self, req, body):
        """Instruct Cinder to manage a storage object.

        Manages an existing backend storage object (e.g. a Linux logical
        volume or a SAN disk) by creating the Cinder objects required to manage
        it, and possibly renaming the backend storage object
        (driver dependent)

        From an API perspective, this operation behaves very much like a
        volume creation operation, except that properties such as image,
        snapshot and volume references don't make sense, because we are taking
        an existing storage object into Cinder management.

        Required HTTP Body:

        .. code-block:: json

         {
           "volume": {
             "host": "<Cinder host on which the existing storage resides>",
             "cluster": "<Cinder cluster on which the storage resides>",
             "ref": "<Driver-specific reference to existing storage object>"
           }
         }

        See the appropriate Cinder drivers' implementations of the
        manage_volume method to find out the accepted format of 'ref'.

        This API call will return with an error if any of the above elements
        are missing from the request, or if the 'host' element refers to a
        cinder host that is not registered.

        The volume will later enter the error state if it is discovered that
        'ref' is bad.

        Optional elements to 'volume' are::

         name               A name for the new volume.
         description        A description for the new volume.
         volume_type        ID or name of a volume type to associate with
                            the new Cinder volume. Does not necessarily
                            guarantee that the managed volume will have the
                            properties described in the volume_type. The
                            driver may choose to fail if it identifies that
                            the specified volume_type is not compatible with
                            the backend storage object.
         metadata           Key/value pairs to be associated with the new
                            volume.
         availability_zone  The availability zone to associate with the new
                            volume.
         bootable           If set to True, marks the volume as bootable.

        """
        context = req.environ['cinder.context']
        context.authorize(policy.MANAGE_POLICY)

        self.assert_valid_body(body, 'volume')

        volume = body['volume']
        self.validate_name_and_description(volume)

        # Check that the required keys are present, return an error if they
        # are not.
        if 'ref' not in volume:
            raise exception.MissingRequired(element='ref')

        cluster_name, host = common.get_cluster_host(
            req, volume, mv.VOLUME_MIGRATE_CLUSTER)

        LOG.debug('Manage volume request body: %s', body)

        kwargs = {}
        req_volume_type = volume.get('volume_type', None)
        if req_volume_type:
            try:
                kwargs['volume_type'] = volume_types.get_by_name_or_id(
                    context, req_volume_type)
            except exception.VolumeTypeNotFound:
                msg = _("Cannot find requested '%s' "
                        "volume type") % req_volume_type
                raise exception.InvalidVolumeType(reason=msg)
        else:
            kwargs['volume_type'] = {}

        kwargs['name'] = volume.get('name', None)
        kwargs['description'] = volume.get('description', None)
        kwargs['metadata'] = volume.get('metadata', None)
        kwargs['availability_zone'] = volume.get('availability_zone', None)
        kwargs['bootable'] = utils.get_bool_param('bootable', volume)

        utils.check_metadata_properties(kwargs['metadata'])

        try:
            new_volume = self.volume_api.manage_existing(context,
                                                         host,
                                                         cluster_name,
                                                         volume['ref'],
                                                         **kwargs)
        except exception.ServiceNotFound:
            msg = _("%(name)s '%(value)s' not found") % {
                'name': 'Host' if host else 'Cluster',
                'value': host or cluster_name}
            raise exception.ServiceUnavailable(message=msg)

        utils.add_visible_admin_metadata(new_volume)

        return self._view_builder.detail(req, new_volume)