Example #1
0
    def create(self, context, volume_id, display_name):
        """Creates an entry in the transfers table."""
        volume_api.check_policy(context, 'create_transfer')
        LOG.info(_LI("Generating transfer record for volume %s"), volume_id)
        volume_ref = self.db.volume_get(context, volume_id)
        if volume_ref['status'] != "available":
            raise exception.InvalidVolume(reason=_("status must be available"))

        volume_utils.notify_about_volume_usage(context, volume_ref,
                                               "transfer.create.start")
        # The salt is just a short random string.
        salt = self._get_random_string(CONF.volume_transfer_salt_length)
        auth_key = self._get_random_string(CONF.volume_transfer_key_length)
        crypt_hash = self._get_crypt_hash(salt, auth_key)

        # TODO(ollie): Transfer expiry needs to be implemented.
        transfer_rec = {
            'volume_id': volume_id,
            'display_name': display_name,
            'salt': salt,
            'crypt_hash': crypt_hash,
            'expires_at': None
        }

        try:
            transfer = self.db.transfer_create(context, transfer_rec)
        except Exception:
            LOG.error(_LE("Failed to create transfer record "
                          "for %s"), volume_id)
            raise
        volume_utils.notify_about_volume_usage(context, volume_ref,
                                               "transfer.create.end")
        return {
            'id': transfer['id'],
            'volume_id': transfer['volume_id'],
            'display_name': transfer['display_name'],
            'auth_key': auth_key,
            'created_at': transfer['created_at']
        }
Example #2
0
    def _prepare_backup(self, backup):
        """Prepare the backup process and return the backup metadata."""
        volume = self.db.volume_get(self.context, backup.volume_id)

        if volume['size'] <= 0:
            err = _('volume size %d is invalid.') % volume['size']
            raise exception.InvalidVolume(reason=err)

        container = self._create_container(backup)

        object_prefix = self._generate_object_name_prefix(backup)
        backup.service_metadata = object_prefix
        backup.save()

        volume_size_bytes = volume['size'] * units.Gi
        availability_zone = self.az
        LOG.debug(
            'starting backup of volume: %(volume_id)s,'
            ' volume size: %(volume_size_bytes)d, object names'
            ' prefix %(object_prefix)s, availability zone:'
            ' %(availability_zone)s', {
                'volume_id': backup.volume_id,
                'volume_size_bytes': volume_size_bytes,
                'object_prefix': object_prefix,
                'availability_zone': availability_zone,
            })
        object_meta = {
            'id': 1,
            'list': [],
            'prefix': object_prefix,
            'volume_meta': None
        }
        object_sha256 = {'id': 1, 'sha256s': [], 'prefix': object_prefix}
        extra_metadata = self.get_extra_metadata(backup, volume)
        if extra_metadata is not None:
            object_meta['extra_metadata'] = extra_metadata

        return (object_meta, object_sha256, extra_metadata, container,
                volume_size_bytes)
Example #3
0
    def _create_cloned_volume(self, volume, src_vref):
        LOG.info(_LI('Cloning volume %(src)s to volume %(dst)s'), {
            'src': src_vref.id,
            'dst': volume.id
        })

        if src_vref.status != 'available':
            msg = _("Volume status must be 'available'.")
            raise exception.InvalidVolume(msg)

        volume_name = CONF.volume_name_template % volume.id

        # Create fake snapshot object
        snap_attrs = [
            'volume_name', 'size', 'volume_size', 'name', 'volume_id', 'id',
            'volume'
        ]
        Snapshot = collections.namedtuple('Snapshot', snap_attrs)

        temp_snapshot = Snapshot(id=src_vref.id,
                                 volume_name=volume_name,
                                 size=src_vref.size,
                                 volume_size=src_vref.size,
                                 name='clone-snap-%s' % src_vref.id,
                                 volume_id=src_vref.id,
                                 volume=volume)

        self._create_snapshot_ploop(temp_snapshot)
        try:
            volume.provider_location = src_vref.provider_location
            info_path = self._local_path_volume_info(volume)
            snap_info = {'active': 'volume-%s' % volume.id}
            self._write_info_file(info_path, snap_info)
            self._copy_volume_from_snapshot(temp_snapshot, volume, volume.size)

        finally:
            self.delete_snapshot(temp_snapshot)

        return {'provider_location': src_vref.provider_location}
Example #4
0
 def test_create_attachment_in_use_volume_multiattach_false(self,
                                                            mock_reserve):
     """Negative test for creating an attachment on an in-use volume."""
     req = fakes.HTTPRequest.blank('/v3/%s/attachments' %
                                   fake.PROJECT_ID,
                                   version=mv.NEW_ATTACH)
     body = {
         "attachment":
             {
                 "connector": None,
                 "instance_uuid": fake.UUID1,
                 "volume_uuid": self.volume1.id
             },
     }
     mock_reserve.side_effect = (
         exception.InvalidVolume(
             reason="Volume %s status must be available or "
                    "downloading" % self.volume1.id))
     # Note that if we were using the full WSGi stack, the
     # ResourceExceptionHandler would convert this to an HTTPBadRequest.
     self.assertRaises(exception.InvalidVolume,
                       self.controller.create, req, body=body)
Example #5
0
    def _create_cloned_volume(self, volume, src_vref):
        LOG.info(
            _('Cloning volume %(src)s to volume %(dst)s') % {
                'src': src_vref['id'],
                'dst': volume['id']
            })

        if src_vref['status'] != 'available':
            msg = _("Volume status must be 'available'.")
            raise exception.InvalidVolume(msg)

        volume_name = CONF.volume_name_template % volume['id']

        volume_info = {
            'provider_location': src_vref['provider_location'],
            'size': src_vref['size'],
            'id': volume['id'],
            'name': volume_name,
            'status': src_vref['status']
        }
        temp_snapshot = {
            'volume_name': volume_name,
            'size': src_vref['size'],
            'volume_size': src_vref['size'],
            'name': 'clone-snap-%s' % src_vref['id'],
            'volume_id': src_vref['id'],
            'id': 'tmp-snap-%s' % src_vref['id'],
            'volume': src_vref
        }
        self._create_snapshot(temp_snapshot)
        try:
            self._copy_volume_from_snapshot(temp_snapshot, volume_info,
                                            volume['size'])

        finally:
            self._delete_snapshot(temp_snapshot)

        return {'provider_location': src_vref['provider_location']}
Example #6
0
    def _prepare_backup(self, backup):
        """Prepare the backup process and return the backup metadata."""
        backup_id = backup['id']
        volume_id = backup['volume_id']
        volume = self.db.volume_get(self.context, volume_id)

        if volume['size'] <= 0:
            err = _('volume size %d is invalid.') % volume['size']
            raise exception.InvalidVolume(reason=err)

        try:
            container = self._create_container(self.context, backup)
        except socket.error as err:
            raise exception.SwiftConnectionFailed(reason=err)

        object_prefix = self._generate_swift_object_name_prefix(backup)
        backup['service_metadata'] = object_prefix
        self.db.backup_update(self.context, backup_id,
                              {'service_metadata': object_prefix})
        volume_size_bytes = volume['size'] * units.Gi
        availability_zone = self.az
        LOG.debug(
            'starting backup of volume: %(volume_id)s to swift,'
            ' volume size: %(volume_size_bytes)d, swift object names'
            ' prefix %(object_prefix)s, availability zone:'
            ' %(availability_zone)s' % {
                'volume_id': volume_id,
                'volume_size_bytes': volume_size_bytes,
                'object_prefix': object_prefix,
                'availability_zone': availability_zone,
            })
        object_meta = {
            'id': 1,
            'list': [],
            'prefix': object_prefix,
            'volume_meta': None
        }
        return object_meta, container
Example #7
0
    def _delete_snapshot(self, snapshot):
        """Delete snapshot.

        If the volume does not have a backing or the snapshot does not exist
        then simply pass, else delete the snapshot.
        Snapshot deletion of only available volume is supported.

        :param snapshot: Snapshot object
        """

        volume = snapshot['volume']
        if volume['status'] != 'available':
            msg = _("Delete snapshot of volume not supported in state: %s.")
            LOG.error(msg % volume['status'])
            raise exception.InvalidVolume(msg % volume['status'])
        backing = self.volumeops.get_backing(snapshot['volume_name'])
        if not backing:
            LOG.info(_("There is no backing, and so there is no "
                       "snapshot: %s.") % snapshot['name'])
        else:
            self.volumeops.delete_snapshot(backing, snapshot['name'])
            LOG.info(_("Successfully deleted snapshot: %s.") %
                     snapshot['name'])
Example #8
0
    def _create_snapshot(self, snapshot):
        """Creates a snapshot.

        If the volume does not have a backing then simply pass, else create
        a snapshot.
        Snapshot of only available volume is supported.

        :param snapshot: Snapshot object
        """

        volume = snapshot['volume']
        if volume['status'] != 'available':
            msg = _("Snapshot of volume not supported in state: %s.")
            LOG.error(msg % volume['status'])
            raise exception.InvalidVolume(msg % volume['status'])
        backing = self.volumeops.get_backing(snapshot['volume_name'])
        if not backing:
            LOG.info(_("There is no backing, so will not create "
                       "snapshot: %s.") % snapshot['name'])
            return
        self.volumeops.create_snapshot(backing, snapshot['name'],
                                       snapshot['display_description'])
        LOG.info(_("Successfully created snapshot: %s.") % snapshot['name'])
Example #9
0
    def create_snapshot(self, snapshot):
        """Create snapshot from volume."""

        LOG.debug('enter: create_snapshot: create %(snap)s from %(vol)s.', {
            'snap': snapshot['name'],
            'vol': snapshot['volume']['name']
        })

        status = snapshot['volume']['status']
        if status not in ['available', 'in-use']:
            msg = (_('create_snapshot: Volume status must be "available" or '
                     '"in-use" for snapshot. The invalid status is %s.') %
                   status)
            raise exception.InvalidVolume(msg)

        self._create_and_copy_vdisk_data(snapshot['volume']['name'],
                                         snapshot['volume']['id'],
                                         snapshot['name'], snapshot['id'])

        LOG.debug('leave: create_snapshot: create %(snap)s from %(vol)s.', {
            'snap': snapshot['name'],
            'vol': snapshot['volume']['name']
        })
Example #10
0
    def extend(self, context, volume, new_size):
        if volume['status'] != 'available':
            msg = _('Volume status must be available to extend.')
            raise exception.InvalidVolume(reason=msg)

        size_increase = (int(new_size)) - volume['size']
        if size_increase <= 0:
            msg = (_("New size for extend must be greater "
                     "than current size. (current: %(size)s, "
                     "extended: %(new_size)s)") % {'new_size': new_size,
                                                   'size': volume['size']})
            raise exception.InvalidInput(reason=msg)

        try:
            reservations = QUOTAS.reserve(context, gigabytes=+size_increase)
        except exception.OverQuota as exc:
            usages = exc.kwargs['usages']
            quotas = exc.kwargs['quotas']

            def _consumed(name):
                return (usages[name]['reserved'] + usages[name]['in_use'])

            msg = _("Quota exceeded for %(s_pid)s, tried to extend volume by "
                    "%(s_size)sG, (%(d_consumed)dG of %(d_quota)dG already "
                    "consumed).")
            LOG.error(msg % {'s_pid': context.project_id,
                             's_size': size_increase,
                             'd_consumed': _consumed('gigabytes'),
                             'd_quota': quotas['gigabytes']})
            raise exception.VolumeSizeExceedsAvailableQuota(
                requested=size_increase,
                consumed=_consumed('gigabytes'),
                quota=quotas['gigabytes'])

        self.update(context, volume, {'status': 'extending'})
        self.volume_rpcapi.extend_volume(context, volume, new_size,
                                         reservations)
Example #11
0
    def restore_backup(self, context, backup, volume_id):
        """Restore volume backups from configured backup service."""
        LOG.info(
            _LI('Restore backup started, backup: %(backup_id)s '
                'volume: %(volume_id)s.'), {
                    'backup_id': backup.id,
                    'volume_id': volume_id
                })

        volume = self.db.volume_get(context, volume_id)
        volume_host = volume_utils.extract_host(volume['host'], 'backend')
        backend = self._get_volume_backend(host=volume_host)
        self._notify_about_backup_usage(context, backup, "restore.start")

        backup.host = self.host
        backup.save()

        expected_status = 'restoring-backup'
        actual_status = volume['status']
        if actual_status != expected_status:
            err = (_('Restore backup aborted, expected volume status '
                     '%(expected_status)s but got %(actual_status)s.') % {
                         'expected_status': expected_status,
                         'actual_status': actual_status
                     })
            backup.status = fields.BackupStatus.AVAILABLE
            backup.save()
            raise exception.InvalidVolume(reason=err)

        expected_status = fields.BackupStatus.RESTORING
        actual_status = backup['status']
        if actual_status != expected_status:
            err = (_('Restore backup aborted: expected backup status '
                     '%(expected_status)s but got %(actual_status)s.') % {
                         'expected_status': expected_status,
                         'actual_status': actual_status
                     })
            self._update_backup_error(backup, context, err)
            self.db.volume_update(context, volume_id, {'status': 'error'})
            raise exception.InvalidBackup(reason=err)

        if volume['size'] > backup['size']:
            LOG.info(
                _LI('Volume: %(vol_id)s, size: %(vol_size)d is '
                    'larger than backup: %(backup_id)s, '
                    'size: %(backup_size)d, continuing with restore.'), {
                        'vol_id': volume['id'],
                        'vol_size': volume['size'],
                        'backup_id': backup['id'],
                        'backup_size': backup['size']
                    })

        backup_service = self._map_service_to_driver(backup['service'])
        configured_service = self.driver_name
        if backup_service != configured_service:
            err = _('Restore backup aborted, the backup service currently'
                    ' configured [%(configured_service)s] is not the'
                    ' backup service that was used to create this'
                    ' backup [%(backup_service)s].') % {
                        'configured_service': configured_service,
                        'backup_service': backup_service,
                    }
            backup.status = fields.BackupStatus.AVAILABLE
            backup.save()
            self.db.volume_update(context, volume_id, {'status': 'error'})
            raise exception.InvalidBackup(reason=err)

        try:
            # NOTE(flaper87): Verify the driver is enabled
            # before going forward. The exception will be caught,
            # the volume status will be set back to available and
            # the backup status to 'error'
            utils.require_driver_initialized(self._get_driver(backend))

            backup_service = self.service.get_backup_driver(context)
            self._get_driver(backend).restore_backup(context, backup, volume,
                                                     backup_service)
        except Exception:
            with excutils.save_and_reraise_exception():
                self.db.volume_update(context, volume_id,
                                      {'status': 'error_restoring'})
                backup.status = fields.BackupStatus.AVAILABLE
                backup.save()

        self.db.volume_update(context, volume_id, {'status': 'available'})
        backup.status = fields.BackupStatus.AVAILABLE
        backup.save()
        LOG.info(
            _LI('Restore backup finished, backup %(backup_id)s restored'
                ' to volume %(volume_id)s.'), {
                    'backup_id': backup.id,
                    'volume_id': volume_id
                })
        self._notify_about_backup_usage(context, backup, "restore.end")
Example #12
0
    def create_backup(self, context, backup):
        """Create volume backups using configured backup service."""
        volume_id = backup.volume_id
        volume = self.db.volume_get(context, volume_id)
        previous_status = volume.get('previous_status', None)
        LOG.info(
            _LI('Create backup started, backup: %(backup_id)s '
                'volume: %(volume_id)s.'), {
                    'backup_id': backup.id,
                    'volume_id': volume_id
                })

        self._notify_about_backup_usage(context, backup, "create.start")
        volume_host = volume_utils.extract_host(volume['host'], 'backend')
        backend = self._get_volume_backend(host=volume_host)

        backup.host = self.host
        backup.service = self.driver_name
        backup.save()

        expected_status = 'backing-up'
        actual_status = volume['status']
        if actual_status != expected_status:
            err = _('Create backup aborted, expected volume status '
                    '%(expected_status)s but got %(actual_status)s.') % {
                        'expected_status': expected_status,
                        'actual_status': actual_status,
                    }
            self._update_backup_error(backup, context, err)
            raise exception.InvalidVolume(reason=err)

        expected_status = fields.BackupStatus.CREATING
        actual_status = backup.status
        if actual_status != expected_status:
            err = _('Create backup aborted, expected backup status '
                    '%(expected_status)s but got %(actual_status)s.') % {
                        'expected_status': expected_status,
                        'actual_status': actual_status,
                    }
            self._update_backup_error(backup, context, err)
            backup.save()
            raise exception.InvalidBackup(reason=err)

        try:
            # NOTE(flaper87): Verify the driver is enabled
            # before going forward. The exception will be caught,
            # the volume status will be set back to available and
            # the backup status to 'error'
            utils.require_driver_initialized(self._get_driver(backend))

            backup_service = self.service.get_backup_driver(context)
            self._get_driver(backend).backup_volume(context, backup,
                                                    backup_service)
        except Exception as err:
            with excutils.save_and_reraise_exception():
                self.db.volume_update(
                    context, volume_id, {
                        'status': previous_status,
                        'previous_status': 'error_backing-up'
                    })
                self._update_backup_error(backup, context, six.text_type(err))

        # Restore the original status.
        self.db.volume_update(context, volume_id, {
            'status': previous_status,
            'previous_status': 'backing-up'
        })
        backup.status = fields.BackupStatus.AVAILABLE
        backup.size = volume['size']
        backup.availability_zone = self.az
        backup.save()
        # Handle the num_dependent_backups of parent backup when child backup
        # has created successfully.
        if backup.parent_id:
            parent_backup = objects.Backup.get_by_id(context, backup.parent_id)
            parent_backup.num_dependent_backups += 1
            parent_backup.save()
        LOG.info(_LI('Create backup finished. backup: %s.'), backup.id)
        self._notify_about_backup_usage(context, backup, "create.end")
Example #13
0
    def accept(self, context, transfer_id, auth_key):
        """Accept a volume that has been offered for transfer."""
        # We must use an elevated context to see the volume that is still
        # owned by the donor.
        volume_api.check_policy(context, 'accept_transfer')
        transfer = self.db.transfer_get(context.elevated(), transfer_id)

        crypt_hash = self._get_crypt_hash(transfer['salt'], auth_key)
        if crypt_hash != transfer['crypt_hash']:
            msg = (_("Attempt to transfer %s with invalid auth key.") %
                   transfer_id)
            LOG.error(msg)
            raise exception.InvalidAuthKey(reason=msg)

        volume_id = transfer['volume_id']
        vol_ref = self.db.volume_get(context.elevated(), volume_id)
        if vol_ref['consistencygroup_id']:
            msg = _("Volume %s must not be part of a consistency "
                    "group.") % vol_ref['id']
            LOG.error(msg)
            raise exception.InvalidVolume(reason=msg)

        volume_utils.notify_about_volume_usage(context, vol_ref,
                                               "transfer.accept.start")

        try:
            reserve_opts = {'volumes': 1, 'gigabytes': vol_ref.size}
            QUOTAS.add_volume_type_opts(context, reserve_opts,
                                        vol_ref.volume_type_id)
            reservations = QUOTAS.reserve(context, **reserve_opts)
        except exception.OverQuota as e:
            overs = e.kwargs['overs']
            usages = e.kwargs['usages']
            quotas = e.kwargs['quotas']

            def _consumed(name):
                return (usages[name]['reserved'] + usages[name]['in_use'])

            for over in overs:
                if 'gigabytes' in over:
                    msg = _LW("Quota exceeded for %(s_pid)s, tried to create "
                              "%(s_size)sG volume (%(d_consumed)dG of "
                              "%(d_quota)dG already consumed)")
                    LOG.warning(
                        msg, {
                            's_pid': context.project_id,
                            's_size': vol_ref['size'],
                            'd_consumed': _consumed(over),
                            'd_quota': quotas[over]
                        })
                    raise exception.VolumeSizeExceedsAvailableQuota(
                        requested=vol_ref['size'],
                        consumed=_consumed(over),
                        quota=quotas[over])
                elif 'volumes' in over:
                    msg = _LW("Quota exceeded for %(s_pid)s, tried to create "
                              "volume (%(d_consumed)d volumes "
                              "already consumed)")
                    LOG.warning(msg, {
                        's_pid': context.project_id,
                        'd_consumed': _consumed(over)
                    })
                    raise exception.VolumeLimitExceeded(allowed=quotas[over],
                                                        name=over)

        try:
            donor_id = vol_ref['project_id']
            reserve_opts = {'volumes': -1, 'gigabytes': -vol_ref.size}
            QUOTAS.add_volume_type_opts(context, reserve_opts,
                                        vol_ref.volume_type_id)
            donor_reservations = QUOTAS.reserve(context.elevated(),
                                                project_id=donor_id,
                                                **reserve_opts)
        except Exception:
            donor_reservations = None
            LOG.exception(
                _LE("Failed to update quota donating volume"
                    " transfer id %s"), transfer_id)

        try:
            # Transfer ownership of the volume now, must use an elevated
            # context.
            self.volume_api.accept_transfer(context, vol_ref, context.user_id,
                                            context.project_id)
            self.db.transfer_accept(context.elevated(), transfer_id,
                                    context.user_id, context.project_id)
            QUOTAS.commit(context, reservations)
            if donor_reservations:
                QUOTAS.commit(context, donor_reservations, project_id=donor_id)
            LOG.info(_LI("Volume %s has been transferred."), volume_id)
        except Exception:
            with excutils.save_and_reraise_exception():
                QUOTAS.rollback(context, reservations)
                if donor_reservations:
                    QUOTAS.rollback(context,
                                    donor_reservations,
                                    project_id=donor_id)

        vol_ref = self.db.volume_get(context, volume_id)
        volume_utils.notify_about_volume_usage(context, vol_ref,
                                               "transfer.accept.end")
        return {
            'id': transfer_id,
            'display_name': transfer['display_name'],
            'volume_id': vol_ref['id']
        }
Example #14
0
    def _get_iscsi_properties(self, volume):
        """Gets iscsi configuration

        We ideally get saved information in the volume entity, but fall back
        to discovery if need be. Discovery may be completely removed in the
        future.

        The properties are:

        :target_discovered:    boolean indicating whether discovery was used

        :target_iqn:    the IQN of the iSCSI target

        :target_portal:    the portal of the iSCSI target

        :target_lun:    the lun of the iSCSI target

        :volume_id:    the uuid of the volume

        :auth_method:, :auth_username:, :auth_password:

            the authentication details. Right now, either auth_method is not
            present meaning no authentication, or auth_method == `CHAP`
            meaning use CHAP with the specified credentials.

        :access_mode:    the volume access mode allow client used
                         ('rw' or 'ro' currently supported)
        """

        properties = {}

        location = volume['provider_location']

        if location:
            # provider_location is the same format as iSCSI discovery output
            properties['target_discovered'] = False
        else:
            location = self._do_iscsi_discovery(volume)

            if not location:
                msg = (_("Could not find iSCSI export for volume %s") %
                       (volume['name']))
                raise exception.InvalidVolume(reason=msg)

            LOG.debug(("ISCSI Discovery: Found %s") % (location))
            properties['target_discovered'] = True

        results = location.split(" ")
        properties['target_portal'] = results[0].split(",")[0]
        properties['target_iqn'] = results[1]
        try:
            properties['target_lun'] = int(results[2])
        except (IndexError, ValueError):
            # NOTE(jdg): The following is carried over from the existing
            # code.  The trick here is that different targets use different
            # default lun numbers, the base driver with tgtadm uses 1
            # others like LIO use 0.
            if (self.configuration.volume_driver in [
                    'cinder.volume.drivers.lvm.LVMISCSIDriver',
                    'cinder.volume.drivers.lvm.ThinLVMVolumeDriver'
            ] and self.configuration.iscsi_helper == 'tgtadm'):
                properties['target_lun'] = 1
            else:
                properties['target_lun'] = 0

        properties['volume_id'] = volume['id']

        auth = volume['provider_auth']
        if auth:
            (auth_method, auth_username, auth_secret) = auth.split()

            properties['auth_method'] = auth_method
            properties['auth_username'] = auth_username
            properties['auth_password'] = auth_secret

        geometry = volume.get('provider_geometry', None)
        if geometry:
            (physical_block_size, logical_block_size) = geometry.split()
            properties['physical_block_size'] = physical_block_size
            properties['logical_block_size'] = logical_block_size

        encryption_key_id = volume.get('encryption_key_id', None)
        properties['encrypted'] = encryption_key_id is not None

        return properties
Example #15
0
    def retype(self, context, volume, new_type, migration_policy=None):
        """Attempt to modify the type associated with an existing volume."""
        if volume['status'] not in ['available', 'in-use']:
            msg = _('Unable to update type due to incorrect status '
                    'on volume: %s') % volume['id']
            LOG.error(msg)
            raise exception.InvalidVolume(reason=msg)

        if volume['migration_status'] is not None:
            msg = (_("Volume %s is already part of an active migration.") %
                   volume['id'])
            LOG.error(msg)
            raise exception.InvalidVolume(reason=msg)

        if migration_policy and migration_policy not in ['on-demand', 'never']:
            msg = _('migration_policy must be \'on-demand\' or \'never\', '
                    'passed: %s') % new_type
            LOG.error(msg)
            raise exception.InvalidInput(reason=msg)

        # Support specifying volume type by ID or name
        try:
            if uuidutils.is_uuid_like(new_type):
                vol_type = volume_types.get_volume_type(context, new_type)
            else:
                vol_type = volume_types.get_volume_type_by_name(
                    context, new_type)
        except exception.InvalidVolumeType:
            msg = _('Invalid volume_type passed: %s') % new_type
            LOG.error(msg)
            raise exception.InvalidInput(reason=msg)

        vol_type_id = vol_type['id']
        vol_type_qos_id = vol_type['qos_specs_id']

        old_vol_type = None
        old_vol_type_id = volume['volume_type_id']
        old_vol_type_qos_id = None

        # Error if the original and new type are the same
        if volume['volume_type_id'] == vol_type_id:
            msg = (_('New volume_type same as original: %s') % new_type)
            LOG.error(msg)
            raise exception.InvalidInput(reason=msg)

        if volume['volume_type_id']:
            old_vol_type = volume_types.get_volume_type(
                context, old_vol_type_id)
            old_vol_type_qos_id = old_vol_type['qos_specs_id']

        # We don't support changing encryption requirements yet
        old_enc = volume_types.get_volume_type_encryption(
            context, old_vol_type_id)
        new_enc = volume_types.get_volume_type_encryption(context, vol_type_id)
        if old_enc != new_enc:
            msg = _('Retype cannot change encryption requirements')
            raise exception.InvalidInput(reason=msg)

        # We don't support changing QoS at the front-end yet for in-use volumes
        # TODO(avishay): Call Nova to change QoS setting (libvirt has support
        # - virDomainSetBlockIoTune() - Nova does not have support yet).
        if (volume['status'] != 'available'
                and old_vol_type_qos_id != vol_type_qos_id):
            for qos_id in [old_vol_type_qos_id, vol_type_qos_id]:
                if qos_id:
                    specs = qos_specs.get_qos_specs(context.elevated(), qos_id)
                    if specs['qos_specs']['consumer'] != 'back-end':
                        msg = _('Retype cannot change front-end qos specs for '
                                'in-use volumes')
                        raise exception.InvalidInput(reason=msg)

        # We're checking here in so that we can report any quota issues as
        # early as possible, but won't commit until we change the type. We
        # pass the reservations onward in case we need to roll back.
        reservations = quota_utils.get_volume_type_reservation(
            context, volume, vol_type_id)

        self.update(context, volume, {'status': 'retyping'})

        request_spec = {
            'volume_properties': volume,
            'volume_id': volume['id'],
            'volume_type': vol_type,
            'migration_policy': migration_policy,
            'quota_reservations': reservations
        }

        self.scheduler_rpcapi.retype(context,
                                     CONF.volume_topic,
                                     volume['id'],
                                     request_spec=request_spec,
                                     filter_properties={})
Example #16
0
 def update_readonly_flag(self, context, volume, flag):
     if volume['status'] != 'available':
         msg = _('Volume status must be available to update readonly flag.')
         raise exception.InvalidVolume(reason=msg)
     self.update_volume_admin_metadata(context.elevated(), volume,
                                       {'readonly': str(flag)})
Example #17
0
    def create_backup(self, context, backup):
        """Create volume backups using configured backup service."""
        volume_id = backup.volume_id
        volume = objects.Volume.get_by_id(context, volume_id)
        previous_status = volume.get('previous_status', None)
        LOG.info(
            _LI('Create backup started, backup: %(backup_id)s '
                'volume: %(volume_id)s.'), {
                    'backup_id': backup.id,
                    'volume_id': volume_id
                })

        self._notify_about_backup_usage(context, backup, "create.start")

        backup.host = self.host
        backup.service = self.driver_name
        backup.availability_zone = self.az
        backup.save()

        expected_status = 'backing-up'
        actual_status = volume['status']
        if actual_status != expected_status:
            err = _('Create backup aborted, expected volume status '
                    '%(expected_status)s but got %(actual_status)s.') % {
                        'expected_status': expected_status,
                        'actual_status': actual_status,
                    }
            self._update_backup_error(backup, err)
            raise exception.InvalidVolume(reason=err)

        expected_status = fields.BackupStatus.CREATING
        actual_status = backup.status
        if actual_status != expected_status:
            err = _('Create backup aborted, expected backup status '
                    '%(expected_status)s but got %(actual_status)s.') % {
                        'expected_status': expected_status,
                        'actual_status': actual_status,
                    }
            self._update_backup_error(backup, err)
            backup.save()
            raise exception.InvalidBackup(reason=err)

        try:
            self._run_backup(context, backup, volume)
        except Exception as err:
            with excutils.save_and_reraise_exception():
                self.db.volume_update(
                    context, volume_id, {
                        'status': previous_status,
                        'previous_status': 'error_backing-up'
                    })
                self._update_backup_error(backup, six.text_type(err))

        # Restore the original status.
        self.db.volume_update(context, volume_id, {
            'status': previous_status,
            'previous_status': 'backing-up'
        })
        backup.status = fields.BackupStatus.AVAILABLE
        backup.size = volume['size']
        backup.save()

        # Handle the num_dependent_backups of parent backup when child backup
        # has created successfully.
        if backup.parent_id:
            parent_backup = objects.Backup.get_by_id(context, backup.parent_id)
            parent_backup.num_dependent_backups += 1
            parent_backup.save()
        LOG.info(_LI('Create backup finished. backup: %s.'), backup.id)
        self._notify_about_backup_usage(context, backup, "create.end")
Example #18
0
 def check_detach(self, volume):
     # TODO(vish): abstract status checking?
     if volume['status'] != "in-use":
         msg = _("status must be in-use to detach")
         raise exception.InvalidVolume(reason=msg)
Example #19
0
    def restore(self, context, backup_id, volume_id=None, name=None):
        """Make the RPC call to restore a volume backup."""
        backup = self.get(context, backup_id)
        context.authorize(policy.RESTORE_POLICY, target_obj=backup)
        if backup['status'] != fields.BackupStatus.AVAILABLE:
            msg = _('Backup status must be available')
            raise exception.InvalidBackup(reason=msg)

        size = backup['size']
        if size is None:
            msg = _('Backup to be restored has invalid size')
            raise exception.InvalidBackup(reason=msg)

        # Create a volume if none specified. If a volume is specified check
        # it is large enough for the backup
        if volume_id is None:
            if name is None:
                name = 'restore_backup_%s' % backup_id

            description = 'auto-created_from_restore_from_backup'

            LOG.info(
                "Creating volume of %(size)s GB for restore of "
                "backup %(backup_id)s.", {
                    'size': size,
                    'backup_id': backup_id
                })
            volume = self.volume_api.create(context, size, name, description)
            volume_id = volume['id']

            while True:
                volume = self.volume_api.get(context, volume_id)
                if volume['status'] != 'creating':
                    break
                greenthread.sleep(1)

            if volume['status'] == "error":
                msg = (_('Error while creating volume %(volume_id)s '
                         'for restoring backup %(backup_id)s.') % {
                             'volume_id': volume_id,
                             'backup_id': backup_id
                         })
                raise exception.InvalidVolume(reason=msg)
        else:
            volume = self.volume_api.get(context, volume_id)

        if volume['status'] != "available":
            msg = _('Volume to be restored to must be available')
            raise exception.InvalidVolume(reason=msg)

        LOG.debug('Checking backup size %(bs)s against volume size %(vs)s', {
            'bs': size,
            'vs': volume['size']
        })
        if size > volume['size']:
            msg = (_('volume size %(volume_size)d is too small to restore '
                     'backup of size %(size)d.') % {
                         'volume_size': volume['size'],
                         'size': size
                     })
            raise exception.InvalidVolume(reason=msg)

        LOG.info(
            "Overwriting volume %(volume_id)s with restore of "
            "backup %(backup_id)s", {
                'volume_id': volume_id,
                'backup_id': backup_id
            })

        # Setting the status here rather than setting at start and unrolling
        # for each error condition, it should be a very small window
        backup.host = self._get_available_backup_service_host(
            backup.host, backup.availability_zone)
        backup.status = fields.BackupStatus.RESTORING
        backup.restore_volume_id = volume.id
        backup.save()
        self.db.volume_update(context, volume_id,
                              {'status': 'restoring-backup'})

        self.backup_rpcapi.restore_backup(context, backup.host, backup,
                                          volume_id)

        d = {
            'backup_id': backup_id,
            'volume_id': volume_id,
            'volume_name': volume['display_name'],
        }

        return d
Example #20
0
    def _validate_add_volumes(self, context, volumes, add_volumes_list, group):
        add_volumes_new = ""
        for volume in volumes:
            if volume['id'] in add_volumes_list:
                # Volume already in group. Remove from add_volumes.
                add_volumes_list.remove(volume['id'])

        for add_vol in add_volumes_list:
            try:
                add_vol_ref = self.db.volume_get(context, add_vol)
            except exception.VolumeNotFound:
                msg = (_("Cannot add volume %(volume_id)s to "
                         "group %(group_id)s because volume cannot be "
                         "found.") % {
                             'volume_id': add_vol,
                             'group_id': group.id
                         })
                raise exception.InvalidVolume(reason=msg)
            orig_group = add_vol_ref.get('group_id', None)
            if orig_group:
                # If volume to be added is already in the group to be updated,
                # it should have been removed from the add_volumes_list in the
                # beginning of this function. If we are here, it means it is
                # in a different group.
                msg = (_("Cannot add volume %(volume_id)s to group "
                         "%(group_id)s because it is already in "
                         "group %(orig_group)s.") % {
                             'volume_id': add_vol_ref['id'],
                             'group_id': group.id,
                             'orig_group': orig_group
                         })
                raise exception.InvalidVolume(reason=msg)
            if add_vol_ref:
                add_vol_type_id = add_vol_ref.get('volume_type_id', None)
                if not add_vol_type_id:
                    msg = (_("Cannot add volume %(volume_id)s to group "
                             "%(group_id)s because it has no volume "
                             "type.") % {
                                 'volume_id': add_vol_ref['id'],
                                 'group_id': group.id
                             })
                    raise exception.InvalidVolume(reason=msg)
                vol_type_ids = [v_type.id for v_type in group.volume_types]
                if add_vol_type_id not in vol_type_ids:
                    msg = (_("Cannot add volume %(volume_id)s to group "
                             "%(group_id)s because volume type "
                             "%(volume_type)s is not supported by the "
                             "group.") % {
                                 'volume_id': add_vol_ref['id'],
                                 'group_id': group.id,
                                 'volume_type': add_vol_type_id
                             })
                    raise exception.InvalidVolume(reason=msg)
                if (add_vol_ref['status']
                        not in VALID_ADD_VOL_TO_GROUP_STATUS):
                    msg = (_("Cannot add volume %(volume_id)s to group "
                             "%(group_id)s because volume is in an "
                             "invalid state: %(status)s. Valid states are: "
                             "%(valid)s.") % {
                                 'volume_id': add_vol_ref['id'],
                                 'group_id': group.id,
                                 'status': add_vol_ref['status'],
                                 'valid': VALID_ADD_VOL_TO_GROUP_STATUS
                             })
                    raise exception.InvalidVolume(reason=msg)

                # group.host and add_vol_ref['host'] are in this format:
                # 'host@backend#pool'. Extract host (host@backend) before
                # doing comparison.
                vol_host = vol_utils.extract_host(add_vol_ref['host'])
                group_host = vol_utils.extract_host(group.host)
                if group_host != vol_host:
                    raise exception.InvalidVolume(
                        reason=_("Volume is not local to this node."))

                # Volume exists. It will be added to CG.
                if add_volumes_new:
                    add_volumes_new += ","
                add_volumes_new += add_vol_ref['id']

            else:
                msg = (_("Cannot add volume %(volume_id)s to group "
                         "%(group_id)s because volume does not exist.") % {
                             'volume_id': add_vol_ref['id'],
                             'group_id': group.id
                         })
                raise exception.InvalidVolume(reason=msg)

        return add_volumes_new
Example #21
0
    def _get_iscsi_properties(self, volume, multipath=False):
        """Gets iscsi configuration

        We ideally get saved information in the volume entity, but fall back
        to discovery if need be. Discovery may be completely removed in the
        future.

        The properties are:

        :target_discovered:    boolean indicating whether discovery was used

        :target_iqn:    the IQN of the iSCSI target

        :target_portal:    the portal of the iSCSI target

        :target_lun:    the lun of the iSCSI target

        :volume_id:    the uuid of the volume

        :auth_method:, :auth_username:, :auth_password:

            the authentication details. Right now, either auth_method is not
            present meaning no authentication, or auth_method == `CHAP`
            meaning use CHAP with the specified credentials.

        :discard:    boolean indicating if discard is supported

        In some of drivers that support multiple connections (for multipath
        and for single path with failover on connection failure), it returns
        :target_iqns, :target_portals, :target_luns, which contain lists of
        multiple values. The main portal information is also returned in
        :target_iqn, :target_portal, :target_lun for backward compatibility.

        Note that some of drivers don't return :target_portals even if they
        support multipath. Then the connector should use sendtargets discovery
        to find the other portals if it supports multipath.
        """

        properties = {}

        location = volume['provider_location']

        if location:
            # provider_location is the same format as iSCSI discovery output
            properties['target_discovered'] = False
        else:
            location = self._do_iscsi_discovery(volume)

            if not location:
                msg = (_("Could not find iSCSI export for volume %s") %
                       (volume['name']))
                raise exception.InvalidVolume(reason=msg)

            LOG.debug(("ISCSI Discovery: Found %s") % (location))
            properties['target_discovered'] = True

        results = location.split(" ")
        portals = results[0].split(",")[0].split(";")
        iqn = results[1]
        nr_portals = len(portals)
        try:
            lun = int(results[2])
        except (IndexError, ValueError):
            # NOTE(jdg): The following is carried over from the existing
            # code.  The trick here is that different targets use different
            # default lun numbers, the base driver with tgtadm uses 1
            # others like LIO use 0.
            if (self.configuration.volume_driver
                    == 'cinder.volume.drivers.lvm.ThinLVMVolumeDriver'
                    and self.configuration.iscsi_helper == 'tgtadm'):
                lun = 1
            else:
                lun = 0

        if nr_portals > 1 or multipath:
            properties['target_portals'] = portals
            properties['target_iqns'] = [iqn] * nr_portals
            properties['target_luns'] = [lun] * nr_portals
        properties['target_portal'] = portals[0]
        properties['target_iqn'] = iqn
        properties['target_lun'] = lun

        properties['volume_id'] = volume['id']

        auth = volume['provider_auth']
        if auth:
            (auth_method, auth_username, auth_secret) = auth.split()

            properties['auth_method'] = auth_method
            properties['auth_username'] = auth_username
            properties['auth_password'] = auth_secret

        geometry = volume.get('provider_geometry', None)
        if geometry:
            (physical_block_size, logical_block_size) = geometry.split()
            properties['physical_block_size'] = physical_block_size
            properties['logical_block_size'] = logical_block_size

        encryption_key_id = volume.get('encryption_key_id', None)
        properties['encrypted'] = encryption_key_id is not None

        return properties
Example #22
0
    def restore_backup(self, context, backup, volume_id):
        """Restore volume backups from configured backup service."""
        LOG.info(
            _LI('Restore backup started, backup: %(backup_id)s '
                'volume: %(volume_id)s.'), {
                    'backup_id': backup.id,
                    'volume_id': volume_id
                })

        volume = objects.Volume.get_by_id(context, volume_id)
        self._notify_about_backup_usage(context, backup, "restore.start")

        backup.host = self.host
        backup.save()

        expected_status = 'restoring-backup'
        actual_status = volume['status']
        if actual_status != expected_status:
            err = (_('Restore backup aborted, expected volume status '
                     '%(expected_status)s but got %(actual_status)s.') % {
                         'expected_status': expected_status,
                         'actual_status': actual_status
                     })
            backup.status = fields.BackupStatus.AVAILABLE
            backup.save()
            raise exception.InvalidVolume(reason=err)

        expected_status = fields.BackupStatus.RESTORING
        actual_status = backup['status']
        if actual_status != expected_status:
            err = (_('Restore backup aborted: expected backup status '
                     '%(expected_status)s but got %(actual_status)s.') % {
                         'expected_status': expected_status,
                         'actual_status': actual_status
                     })
            self._update_backup_error(backup, err)
            self.db.volume_update(context, volume_id, {'status': 'error'})
            raise exception.InvalidBackup(reason=err)

        if volume['size'] > backup['size']:
            LOG.info(
                _LI('Volume: %(vol_id)s, size: %(vol_size)d is '
                    'larger than backup: %(backup_id)s, '
                    'size: %(backup_size)d, continuing with restore.'), {
                        'vol_id': volume['id'],
                        'vol_size': volume['size'],
                        'backup_id': backup['id'],
                        'backup_size': backup['size']
                    })

        backup_service = self._map_service_to_driver(backup['service'])
        configured_service = self.driver_name
        if backup_service != configured_service:
            err = _('Restore backup aborted, the backup service currently'
                    ' configured [%(configured_service)s] is not the'
                    ' backup service that was used to create this'
                    ' backup [%(backup_service)s].') % {
                        'configured_service': configured_service,
                        'backup_service': backup_service,
                    }
            backup.status = fields.BackupStatus.AVAILABLE
            backup.save()
            self.db.volume_update(context, volume_id, {'status': 'error'})
            raise exception.InvalidBackup(reason=err)

        try:
            self._run_restore(context, backup, volume)
        except Exception:
            with excutils.save_and_reraise_exception():
                self.db.volume_update(context, volume_id,
                                      {'status': 'error_restoring'})
                backup.status = fields.BackupStatus.AVAILABLE
                backup.save()

        self.db.volume_update(context, volume_id, {'status': 'available'})
        backup.status = fields.BackupStatus.AVAILABLE
        backup.save()
        LOG.info(
            _LI('Restore backup finished, backup %(backup_id)s restored'
                ' to volume %(volume_id)s.'), {
                    'backup_id': backup.id,
                    'volume_id': volume_id
                })
        self._notify_about_backup_usage(context, backup, "restore.end")
Example #23
0
    def _get_iscsi_properties(self, volume):
        """Gets iscsi configuration.

        We ideally get saved information in the volume entity, but fall back
        to discovery if need be. Discovery may be completely removed in future
        The properties are:

        :target_discovered:    boolean indicating whether discovery was used

        :target_iqn:    the IQN of the iSCSI target

        :target_portal:    the portal of the iSCSI target

        :target_lun:    the lun of the iSCSI target

        :volume_id:    the id of the volume (currently used by xen)

        :auth_method:, :auth_username:, :auth_password:

            the authentication details. Right now, either auth_method is not
            present meaning no authentication, or auth_method == `CHAP`
            meaning use CHAP with the specified credentials.
        """
        properties = {}

        location = self._do_iscsi_discovery(volume)
        if not location:
            raise exception.InvalidVolume(_("Could not find iSCSI export "
                                          " for volume %s") %
                                          (volume['name']))

        LOG.debug(_("ISCSI Discovery: Found %s") % (location))
        properties['target_discovered'] = True

        device_info = self.common.find_device_number(volume)
        if device_info is None or device_info['hostlunid'] is None:
            exception_message = (_("Cannot find device number for volume %s")
                                 % volume['name'])
            raise exception.VolumeBackendAPIException(data=exception_message)

        device_number = device_info['hostlunid']
        storage_system = device_info['storagesystem']

        # sp is "SP_A" or "SP_B"
        sp = device_info['owningsp']
        endpoints = []
        if sp:
            # endpointss example:
            # [iqn.1992-04.com.emc:cx.apm00123907237.a8,
            # iqn.1992-04.com.emc:cx.apm00123907237.a9]
            endpoints = self.common._find_iscsi_protocol_endpoints(
                sp, storage_system)

        foundEndpoint = False
        for loc in location:
            results = loc.split(" ")
            properties['target_portal'] = results[0].split(",")[0]
            properties['target_iqn'] = results[1]
            # owning sp is None for VMAX
            # for VNX, find the target_iqn that matches the endpoint
            # target_iqn example: iqn.1992-04.com.emc:cx.apm00123907237.a8
            # or iqn.1992-04.com.emc:cx.apm00123907237.b8
            if not sp:
                break
            for endpoint in endpoints:
                if properties['target_iqn'] == endpoint:
                    LOG.debug(_("Found iSCSI endpoint: %s") % endpoint)
                    foundEndpoint = True
                    break
            if foundEndpoint:
                break

        if sp and not foundEndpoint:
            LOG.warn(_("ISCSI endpoint not found for SP %(sp)s on "
                     "storage system %(storage)s.")
                     % {'sp': sp,
                        'storage': storage_system})

        properties['target_lun'] = device_number

        properties['volume_id'] = volume['id']

        auth = volume['provider_auth']
        if auth:
            (auth_method, auth_username, auth_secret) = auth.split()

            properties['auth_method'] = auth_method
            properties['auth_username'] = auth_username
            properties['auth_password'] = auth_secret

        LOG.debug(_("ISCSI properties: %s") % (properties))

        return properties
Example #24
0
    def _create_snapshot(self,
                         context,
                         volume,
                         name,
                         description,
                         force=False,
                         metadata=None):
        check_policy(context, 'create_snapshot', volume)

        if volume['migration_status'] is not None:
            # Volume is migrating, wait until done
            msg = _("Snapshot cannot be created while volume is migrating")
            raise exception.InvalidVolume(reason=msg)

        if ((not force) and (volume['status'] != "available")):
            msg = _("must be available")
            raise exception.InvalidVolume(reason=msg)

        try:
            if CONF.no_snapshot_gb_quota:
                reserve_opts = {'snapshots': 1}
            else:
                reserve_opts = {'snapshots': 1, 'gigabytes': volume['size']}
            QUOTAS.add_volume_type_opts(context, reserve_opts,
                                        volume.get('volume_type_id'))
            reservations = QUOTAS.reserve(context, **reserve_opts)
        except exception.OverQuota as e:
            overs = e.kwargs['overs']
            usages = e.kwargs['usages']
            quotas = e.kwargs['quotas']

            def _consumed(name):
                return (usages[name]['reserved'] + usages[name]['in_use'])

            for over in overs:
                if 'gigabytes' in over:
                    msg = _("Quota exceeded for %(s_pid)s, tried to create "
                            "%(s_size)sG snapshot (%(d_consumed)dG of "
                            "%(d_quota)dG already consumed)")
                    LOG.warn(
                        msg % {
                            's_pid': context.project_id,
                            's_size': volume['size'],
                            'd_consumed': _consumed(over),
                            'd_quota': quotas[over]
                        })
                    raise exception.VolumeSizeExceedsAvailableQuota(
                        requested=volume['size'],
                        consumed=_consumed('gigabytes'),
                        quota=quotas['gigabytes'])
                elif 'snapshots' in over:
                    msg = _("Quota exceeded for %(s_pid)s, tried to create "
                            "snapshot (%(d_consumed)d snapshots "
                            "already consumed)")

                    LOG.warn(msg % {
                        's_pid': context.project_id,
                        'd_consumed': _consumed(over)
                    })
                    raise exception.SnapshotLimitExceeded(allowed=quotas[over])

        self._check_metadata_properties(metadata)
        options = {
            'volume_id': volume['id'],
            'user_id': context.user_id,
            'project_id': context.project_id,
            'status': "creating",
            'progress': '0%',
            'volume_size': volume['size'],
            'display_name': name,
            'display_description': description,
            'volume_type_id': volume['volume_type_id'],
            'encryption_key_id': volume['encryption_key_id'],
            'metadata': metadata
        }

        try:
            snapshot = self.db.snapshot_create(context, options)
            QUOTAS.commit(context, reservations)
        except Exception:
            with excutils.save_and_reraise_exception():
                try:
                    self.db.snapshot_destroy(context, volume['id'])
                finally:
                    QUOTAS.rollback(context, reservations)

        self.volume_rpcapi.create_snapshot(context, volume, snapshot)

        return snapshot
Example #25
0
    def update(self, context, group, name, description, add_volumes,
               remove_volumes):
        """Update group."""
        if group.status != c_fields.GroupStatus.AVAILABLE:
            msg = _("Group status must be available, "
                    "but current status is: %s.") % group.status
            raise exception.InvalidGroup(reason=msg)

        add_volumes_list = []
        remove_volumes_list = []
        if add_volumes:
            add_volumes = add_volumes.strip(',')
            add_volumes_list = add_volumes.split(',')
        if remove_volumes:
            remove_volumes = remove_volumes.strip(',')
            remove_volumes_list = remove_volumes.split(',')

        invalid_uuids = []
        for uuid in add_volumes_list:
            if uuid in remove_volumes_list:
                invalid_uuids.append(uuid)
        if invalid_uuids:
            msg = _("UUIDs %s are in both add and remove volume "
                    "list.") % invalid_uuids
            raise exception.InvalidVolume(reason=msg)

        volumes = self.db.volume_get_all_by_generic_group(context, group.id)

        # Validate name.
        if name == group.name:
            name = None

        # Validate description.
        if description == group.description:
            description = None

        # Validate volumes in add_volumes and remove_volumes.
        add_volumes_new = ""
        remove_volumes_new = ""
        if add_volumes_list:
            add_volumes_new = self._validate_add_volumes(
                context, volumes, add_volumes_list, group)
        if remove_volumes_list:
            remove_volumes_new = self._validate_remove_volumes(
                volumes, remove_volumes_list, group)

        if (name is None and description is None and not add_volumes_new
                and not remove_volumes_new):
            msg = (_("Cannot update group %(group_id)s "
                     "because no valid name, description, add_volumes, "
                     "or remove_volumes were provided.") % {
                         'group_id': group.id
                     })
            raise exception.InvalidGroup(reason=msg)

        fields = {'updated_at': timeutils.utcnow()}

        # Update name and description in db now. No need to
        # to send them over through an RPC call.
        if name is not None:
            fields['name'] = name
        if description is not None:
            fields['description'] = description
        if not add_volumes_new and not remove_volumes_new:
            # Only update name or description. Set status to available.
            fields['status'] = 'available'
        else:
            fields['status'] = 'updating'

        group.update(fields)
        group.save()

        # Do an RPC call only if the update request includes
        # adding/removing volumes. add_volumes_new and remove_volumes_new
        # are strings of volume UUIDs separated by commas with no spaces
        # in between.
        if add_volumes_new or remove_volumes_new:
            self.volume_rpcapi.update_group(context,
                                            group,
                                            add_volumes=add_volumes_new,
                                            remove_volumes=remove_volumes_new)
Example #26
0
    def create_backup(self, context, backup):
        """Create volume backups using configured backup service."""
        volume_id = backup.volume_id
        snapshot_id = backup.snapshot_id
        volume = objects.Volume.get_by_id(context, volume_id)
        snapshot = objects.Snapshot.get_by_id(
            context, snapshot_id) if snapshot_id else None
        previous_status = volume.get('previous_status', None)
        updates = {}
        if snapshot_id:
            log_message = ('Create backup started, backup: %(backup_id)s '
                           'volume: %(volume_id)s snapshot: %(snapshot_id)s.'
                           % {'backup_id': backup.id,
                              'volume_id': volume_id,
                              'snapshot_id': snapshot_id})
        else:
            log_message = ('Create backup started, backup: %(backup_id)s '
                           'volume: %(volume_id)s.'
                           % {'backup_id': backup.id,
                              'volume_id': volume_id})
        LOG.info(log_message)

        self._notify_about_backup_usage(context, backup, "create.start")

        backup.host = self.host
        backup.service = self.driver_name
        backup.availability_zone = self.az
        backup.save()

        expected_status = "backing-up"
        if snapshot_id:
            actual_status = snapshot['status']
            if actual_status != expected_status:
                err = _('Create backup aborted, expected snapshot status '
                        '%(expected_status)s but got %(actual_status)s.') % {
                    'expected_status': expected_status,
                    'actual_status': actual_status,
                }
                self._update_backup_error(backup, err)
                raise exception.InvalidSnapshot(reason=err)
        else:
            actual_status = volume['status']
            if actual_status != expected_status:
                err = _('Create backup aborted, expected volume status '
                        '%(expected_status)s but got %(actual_status)s.') % {
                    'expected_status': expected_status,
                    'actual_status': actual_status,
                }
                self._update_backup_error(backup, err)
                raise exception.InvalidVolume(reason=err)

        expected_status = fields.BackupStatus.CREATING
        actual_status = backup.status
        if actual_status != expected_status:
            err = _('Create backup aborted, expected backup status '
                    '%(expected_status)s but got %(actual_status)s.') % {
                'expected_status': expected_status,
                'actual_status': actual_status,
            }
            self._update_backup_error(backup, err)
            raise exception.InvalidBackup(reason=err)

        try:
            if not self.is_working():
                err = _('Create backup aborted due to backup service is down')
                self._update_backup_error(backup, err)
                raise exception.InvalidBackup(reason=err)
            updates = self._run_backup(context, backup, volume)
        except Exception as err:
            with excutils.save_and_reraise_exception():
                if snapshot_id:
                    snapshot.status = fields.SnapshotStatus.AVAILABLE
                    snapshot.save()
                else:
                    self.db.volume_update(
                        context, volume_id,
                        {'status': previous_status,
                         'previous_status': 'error_backing-up'})
                self._update_backup_error(backup, six.text_type(err))

        # Restore the original status.
        if snapshot_id:
            self.db.snapshot_update(context, snapshot_id,
                                    {'status': fields.BackupStatus.AVAILABLE})
        else:
            self.db.volume_update(context, volume_id,
                                  {'status': previous_status,
                                   'previous_status': 'backing-up'})
        backup.status = fields.BackupStatus.AVAILABLE
        backup.size = volume['size']

        if updates:
            backup.update(updates)
        backup.save()

        # Handle the num_dependent_backups of parent backup when child backup
        # has created successfully.
        if backup.parent_id:
            parent_backup = objects.Backup.get_by_id(context,
                                                     backup.parent_id)
            parent_backup.num_dependent_backups += 1
            parent_backup.save()
        LOG.info('Create backup finished. backup: %s.', backup.id)
        self._notify_about_backup_usage(context, backup, "create.end")
Example #27
0
    def create(self,
               context,
               name,
               description,
               volume_id,
               container,
               incremental=False,
               availability_zone=None,
               force=False,
               snapshot_id=None,
               metadata=None):
        """Make the RPC call to create a volume backup."""
        volume = self.volume_api.get(context, volume_id)
        context.authorize(policy.CREATE_POLICY, target_obj=volume)
        snapshot = None
        if snapshot_id:
            snapshot = self.volume_api.get_snapshot(context, snapshot_id)

            if volume_id != snapshot.volume_id:
                msg = (_('Volume %(vol1)s does not match with '
                         'snapshot.volume_id %(vol2)s.') % {
                             'vol1': volume_id,
                             'vol2': snapshot.volume_id
                         })
                raise exception.InvalidVolume(reason=msg)
            if snapshot['status'] not in ["available"]:
                msg = (_('Snapshot to be backed up must be available, '
                         'but the current status is "%s".') %
                       snapshot['status'])
                raise exception.InvalidSnapshot(reason=msg)
        elif volume['status'] not in ["available", "in-use"]:
            msg = (_('Volume to be backed up must be available '
                     'or in-use, but the current status is "%s".') %
                   volume['status'])
            raise exception.InvalidVolume(reason=msg)
        elif volume['status'] in ["in-use"] and not force:
            msg = _('Backing up an in-use volume must use ' 'the force flag.')
            raise exception.InvalidVolume(reason=msg)

        previous_status = volume['status']
        volume_host = volume_utils.extract_host(volume.host, 'host')
        availability_zone = availability_zone or volume.availability_zone
        host = self._get_available_backup_service_host(volume_host,
                                                       availability_zone)

        # Reserve a quota before setting volume status and backup status
        try:
            reserve_opts = {'backups': 1, 'backup_gigabytes': volume['size']}
            reservations = QUOTAS.reserve(context, **reserve_opts)
        except exception.OverQuota as e:
            quota_utils.process_reserve_over_quota(context,
                                                   e,
                                                   resource='backups',
                                                   size=volume.size)
        # Find the latest backup and use it as the parent backup to do an
        # incremental backup.
        latest_backup = None
        if incremental:
            backups = objects.BackupList.get_all_by_volume(
                context.elevated(), volume_id)
            if backups.objects:
                # NOTE(xyang): The 'data_timestamp' field records the time
                # when the data on the volume was first saved. If it is
                # a backup from volume, 'data_timestamp' will be the same
                # as 'created_at' for a backup. If it is a backup from a
                # snapshot, 'data_timestamp' will be the same as
                # 'created_at' for a snapshot.
                # If not backing up from snapshot, the backup with the latest
                # 'data_timestamp' will be the parent; If backing up from
                # snapshot, the backup with the latest 'data_timestamp' will
                # be chosen only if 'data_timestamp' is earlier than the
                # 'created_at' timestamp of the snapshot; Otherwise, the
                # backup will not be chosen as the parent.
                # For example, a volume has a backup taken at 8:00, then
                # a snapshot taken at 8:10, and then a backup at 8:20.
                # When taking an incremental backup of the snapshot, the
                # parent should be the backup at 8:00, not 8:20, and the
                # 'data_timestamp' of this new backup will be 8:10.
                latest_backup = max(
                    backups.objects,
                    key=lambda x: x['data_timestamp']
                    if (not snapshot or (snapshot and x['data_timestamp'] <
                                         snapshot['created_at'])) else
                    datetime(1, 1, 1, 1, 1, 1, tzinfo=timezone('UTC')))
            else:
                QUOTAS.rollback(context, reservations)
                msg = _('No backups available to do an incremental backup.')
                raise exception.InvalidBackup(reason=msg)

        parent_id = None
        parent = None

        if latest_backup:
            parent = latest_backup
            parent_id = latest_backup.id
            if latest_backup['status'] != fields.BackupStatus.AVAILABLE:
                QUOTAS.rollback(context, reservations)
                msg = _('The parent backup must be available for '
                        'incremental backup.')
                raise exception.InvalidBackup(reason=msg)

        data_timestamp = None
        if snapshot_id:
            snapshot = objects.Snapshot.get_by_id(context, snapshot_id)
            data_timestamp = snapshot.created_at
            self.db.snapshot_update(
                context, snapshot_id,
                {'status': fields.SnapshotStatus.BACKING_UP})
        else:
            self.db.volume_update(context, volume_id, {
                'status': 'backing-up',
                'previous_status': previous_status
            })

        backup = None
        try:
            kwargs = {
                'user_id': context.user_id,
                'project_id': context.project_id,
                'display_name': name,
                'display_description': description,
                'volume_id': volume_id,
                'status': fields.BackupStatus.CREATING,
                'container': container,
                'parent_id': parent_id,
                'size': volume['size'],
                'host': host,
                'availability_zone': availability_zone,
                'snapshot_id': snapshot_id,
                'data_timestamp': data_timestamp,
                'parent': parent,
                'metadata': metadata or {}
            }
            backup = objects.Backup(context=context, **kwargs)
            backup.create()
            if not snapshot_id:
                backup.data_timestamp = backup.created_at
                backup.save()
            QUOTAS.commit(context, reservations)
        except Exception:
            with excutils.save_and_reraise_exception():
                try:
                    if backup and 'id' in backup:
                        backup.destroy()
                finally:
                    QUOTAS.rollback(context, reservations)

        # TODO(DuncanT): In future, when we have a generic local attach,
        #                this can go via the scheduler, which enables
        #                better load balancing and isolation of services
        self.backup_rpcapi.create_backup(context, backup)

        return backup
Example #28
0
    def update(self,
               context,
               group,
               name,
               description,
               add_volumes,
               remove_volumes,
               allow_empty=False):
        """Update consistency group."""
        add_volumes_list = []
        remove_volumes_list = []
        if add_volumes:
            add_volumes = add_volumes.strip(',')
            add_volumes_list = add_volumes.split(',')
        if remove_volumes:
            remove_volumes = remove_volumes.strip(',')
            remove_volumes_list = remove_volumes.split(',')

        invalid_uuids = []
        for uuid in add_volumes_list:
            if uuid in remove_volumes_list:
                invalid_uuids.append(uuid)
        if invalid_uuids:
            msg = _("UUIDs %s are in both add and remove volume "
                    "list.") % invalid_uuids
            raise exception.InvalidVolume(reason=msg)

        # Validate name.
        if name == group.name:
            name = None

        # Validate description.
        if description == group.description:
            description = None
        self._check_update(group, name, description, add_volumes,
                           remove_volumes, allow_empty)

        fields = {'updated_at': timeutils.utcnow()}

        # Update name and description in db now. No need to
        # to send them over through an RPC call.
        if allow_empty:
            if name is not None:
                fields['name'] = name
            if description is not None:
                fields['description'] = description
        else:
            if name:
                fields['name'] = name
            if description:
                fields['description'] = description

        # NOTE(geguileo): We will use the updating status in the CG as a lock
        # mechanism to prevent volume add/remove races with other API, while we
        # figure out if we really need to add or remove volumes.
        if add_volumes or remove_volumes:
            fields['status'] = c_fields.ConsistencyGroupStatus.UPDATING

            # We cannot modify the members of this CG if the CG is being used
            # to create another CG or a CGsnapshot is being created
            filters = [
                ~db.cg_creating_from_src(cg_id=group.id),
                ~db.cgsnapshot_creating_from_src()
            ]
        else:
            filters = []

        expected = {'status': c_fields.ConsistencyGroupStatus.AVAILABLE}
        if not group.conditional_update(fields, expected, filters):
            msg = _("Cannot update consistency group %s, status must be "
                    "available, and it cannot be the source for an ongoing "
                    "CG or CG Snapshot creation.") % group.id
            raise exception.InvalidConsistencyGroup(reason=msg)

        # Now the CG is "locked" for updating
        try:
            # Validate volumes in add_volumes and remove_volumes.
            add_volumes_new = self._validate_add_volumes(
                context, group.volumes, add_volumes_list, group)
            remove_volumes_new = self._validate_remove_volumes(
                group.volumes, remove_volumes_list, group)

            self._check_update(group, name, description, add_volumes_new,
                               remove_volumes_new, allow_empty)
        except Exception:
            # If we have an error on the volume_lists we must return status to
            # available as we were doing before removing API races
            with excutils.save_and_reraise_exception():
                group.status = c_fields.ConsistencyGroupStatus.AVAILABLE
                group.save()

        # Do an RPC call only if the update request includes
        # adding/removing volumes. add_volumes_new and remove_volumes_new
        # are strings of volume UUIDs separated by commas with no spaces
        # in between.
        if add_volumes_new or remove_volumes_new:
            self.volume_rpcapi.update_consistencygroup(
                context,
                group,
                add_volumes=add_volumes_new,
                remove_volumes=remove_volumes_new)
        # If there are no new volumes to add or remove and we had changed
        # the status to updating, turn it back to available
        elif group.status == c_fields.ConsistencyGroupStatus.UPDATING:
            group.status = c_fields.ConsistencyGroupStatus.AVAILABLE
            group.save()
Example #29
0
    def smis_get_iscsi_properties(self, volume, connector):
        """Gets iscsi configuration.

        We ideally get saved information in the volume entity, but fall back
        to discovery if need be. Discovery may be completely removed in future
        The properties are:
        :target_discovered:    boolean indicating whether discovery was used
        :target_iqn:    the IQN of the iSCSI target
        :target_portal:    the portal of the iSCSI target
        :target_lun:    the lun of the iSCSI target
        :volume_id:    the UUID of the volume
        :auth_method:, :auth_username:, :auth_password:
            the authentication details. Right now, either auth_method is not
            present meaning no authentication, or auth_method == `CHAP`
            meaning use CHAP with the specified credentials.
        """
        properties = {}

        location = self.smis_do_iscsi_discovery(volume)
        if not location:
            raise exception.InvalidVolume(
                _("Could not find iSCSI export "
                  " for volume %(volumeName)s.") %
                {'volumeName': volume['name']})

        LOG.debug("ISCSI Discovery: Found %s", location)
        properties['target_discovered'] = True

        device_info = self.common.find_device_number(volume, connector['host'])

        if device_info is None or device_info['hostlunid'] is None:
            exception_message = (_("Cannot find device number for volume "
                                   "%(volumeName)s.") % {
                                       'volumeName': volume['name']
                                   })
            raise exception.VolumeBackendAPIException(data=exception_message)

        device_number = device_info['hostlunid']

        LOG.info(_LI("location is: %(location)s"), {'location': location})

        for loc in location:
            results = loc.split(" ")
            properties['target_portal'] = results[0].split(",")[0]
            properties['target_iqn'] = results[1]

        properties['target_lun'] = device_number

        properties['volume_id'] = volume['id']

        LOG.info(_LI("ISCSI properties: %(properties)s"),
                 {'properties': properties})
        LOG.info(_LI("ISCSI volume is: %(volume)s"), {'volume': volume})

        if 'provider_auth' in volume:
            auth = volume['provider_auth']
            LOG.info(_LI("AUTH properties: %(authProps)s"),
                     {'authProps': auth})

            if auth is not None:
                (auth_method, auth_username, auth_secret) = auth.split()

                properties['auth_method'] = auth_method
                properties['auth_username'] = auth_username
                properties['auth_password'] = auth_secret

                LOG.info(_LI("AUTH properties: %s."), properties)

        return properties
Example #30
0
    def delete(self, context, volume, force=False, unmanage_only=False):
        if context.is_admin and context.project_id != volume['project_id']:
            project_id = volume['project_id']
        else:
            project_id = context.project_id

        volume_id = volume['id']
        if not volume['host']:
            volume_utils.notify_about_volume_usage(context, volume,
                                                   "delete.start")
            # NOTE(vish): scheduling failed, so delete it
            # Note(zhiteng): update volume quota reservation
            try:
                reserve_opts = {'volumes': -1, 'gigabytes': -volume['size']}
                QUOTAS.add_volume_type_opts(context, reserve_opts,
                                            volume['volume_type_id'])
                reservations = QUOTAS.reserve(context,
                                              project_id=project_id,
                                              **reserve_opts)
            except Exception:
                reservations = None
                LOG.exception(_("Failed to update quota for deleting volume"))
            self.db.volume_destroy(context.elevated(), volume_id)

            if reservations:
                QUOTAS.commit(context, reservations, project_id=project_id)

            volume_utils.notify_about_volume_usage(context, volume,
                                                   "delete.end")
            return
        if not force and volume['status'] not in [
                "available", "error", "error_restoring", "error_extending"
        ]:
            msg = _("Volume status must be available or error, "
                    "but current status is: %s") % volume['status']
            raise exception.InvalidVolume(reason=msg)

        if volume['attach_status'] == "attached":
            # Volume is still attached, need to detach first
            raise exception.VolumeAttached(volume_id=volume_id)

        if volume['migration_status'] is not None:
            # Volume is migrating, wait until done
            msg = _("Volume cannot be deleted while migrating")
            raise exception.InvalidVolume(reason=msg)

        snapshots = self.db.snapshot_get_all_for_volume(context, volume_id)
        if len(snapshots):
            msg = _("Volume still has %d dependent snapshots") % len(snapshots)
            raise exception.InvalidVolume(reason=msg)

        # If the volume is encrypted, delete its encryption key from the key
        # manager. This operation makes volume deletion an irreversible process
        # because the volume cannot be decrypted without its key.
        encryption_key_id = volume.get('encryption_key_id', None)
        if encryption_key_id is not None:
            self.key_manager.delete_key(context, encryption_key_id)

        now = timeutils.utcnow()
        self.db.volume_update(context, volume_id, {
            'status': 'deleting',
            'terminated_at': now
        })

        self.volume_rpcapi.delete_volume(context, volume, unmanage_only)