Ejemplo n.º 1
0
def is_snapshot_rollback_available(client, snap_name):
    snapshot_info = client.get_snapshot_info_by_name(snap_name)

    running_status = snapshot_info.get("running_status")
    health_status = snapshot_info.get("health_status")

    if running_status not in (constants.SNAPSHOT_RUNNING_STATUS_ONLINE,
                              constants.SNAPSHOT_RUNNING_STATUS_ROLLBACKING):
        err_msg = (_("The running status %(status)s of snapshot %(name)s.") % {
            "status": running_status,
            "name": snap_name
        })
        LOG.error(err_msg)
        raise exception.InvalidSnapshot(reason=err_msg)

    if health_status not in (constants.SNAPSHOT_HEALTH_STATS_NORMAL, ):
        err_msg = (_("The health status %(status)s of snapshot %(name)s.") % {
            "status": running_status,
            "name": snap_name
        })
        LOG.error(err_msg)
        raise exception.InvalidSnapshot(reason=err_msg)

    if constants.SNAPSHOT_RUNNING_STATUS_ONLINE == snapshot_info.get(
            'running_status'):
        return True

    return False
Ejemplo n.º 2
0
    def create(self, context, volume_id, display_name, no_snapshots=False):
        """Creates an entry in the transfers table."""
        LOG.info("Generating transfer record for volume %s", volume_id)
        volume_ref = objects.Volume.get_by_id(context, volume_id)
        context.authorize(policy.CREATE_POLICY, target_obj=volume_ref)
        if volume_ref['status'] != "available":
            raise exception.InvalidVolume(reason=_("status must be available"))
        if volume_ref['encryption_key_id'] is not None:
            raise exception.InvalidVolume(
                reason=_("transferring encrypted volume is not supported"))

        if not no_snapshots:
            snapshots = self.db.snapshot_get_all_for_volume(context, volume_id)
            for snapshot in snapshots:
                if snapshot['status'] != "available":
                    msg = _("snapshot: %s status must be "
                            "available") % snapshot['id']
                    raise exception.InvalidSnapshot(reason=msg)
                if snapshot.get('encryption_key_id'):
                    msg = _("snapshot: %s encrypted snapshots cannot be "
                            "transferred") % snapshot['id']
                    raise exception.InvalidSnapshot(reason=msg)

        volume_utils.notify_about_volume_usage(context, volume_ref,
                                               "transfer.create.start")
        # The salt is just a short random string.
        salt = self._get_random_string(CONF.volume_transfer_salt_length)
        auth_key = self._get_random_string(CONF.volume_transfer_key_length)
        crypt_hash = self._get_crypt_hash(salt, auth_key)

        # TODO(ollie): Transfer expiry needs to be implemented.
        transfer_rec = {
            'volume_id': volume_id,
            'display_name': display_name,
            'salt': salt,
            'crypt_hash': crypt_hash,
            'expires_at': None,
            'no_snapshots': no_snapshots,
            'source_project_id': volume_ref['project_id']
        }

        try:
            transfer = self.db.transfer_create(context, transfer_rec)
        except Exception:
            LOG.error("Failed to create transfer record for %s", volume_id)
            raise
        volume_utils.notify_about_volume_usage(context, volume_ref,
                                               "transfer.create.end")
        return {
            'id': transfer['id'],
            'volume_id': transfer['volume_id'],
            'display_name': transfer['display_name'],
            'auth_key': auth_key,
            'created_at': transfer['created_at'],
            'no_snapshots': transfer['no_snapshots'],
            'source_project_id': transfer['source_project_id'],
            'destination_project_id': transfer['destination_project_id'],
            'accepted': transfer['accepted']
        }
Ejemplo n.º 3
0
    def create_volume_from_snapshot(self, volume, snapshot):
        """Create volume from snapshot."""

        LOG.debug(
            'enter: create_volume_from_snapshot: create %(vol)s from '
            '%(snap)s.', {'vol': volume['name'], 'snap': snapshot['name']})

        if volume['size'] != snapshot['volume_size']:
            msg = _('create_volume_from_snapshot: Volume size is different '
                    'from snapshot based volume.')
            LOG.error(msg)
            raise exception.VolumeDriverException(message=msg)

        status = snapshot['status']
        if status != 'available':
            msg = (_('create_volume_from_snapshot: Snapshot status '
                     'must be "available" for creating volume. '
                     'The invalid status is: %s.') % status)
            raise exception.InvalidSnapshot(msg)

        self._create_and_copy_vdisk_data(snapshot['name'],
                                         snapshot['id'],
                                         volume['name'],
                                         volume['id'])

        LOG.debug(
            'leave: create_volume_from_snapshot: create %(vol)s from '
            '%(snap)s.', {'vol': volume['name'], 'snap': snapshot['name']})
Ejemplo n.º 4
0
 def _get_infinidat_snapshot_by_name(self, name):
     snapshots = self._get('volumes?name=%s' % name)
     if len(snapshots) != 1:
         msg = _('Snapshot "%s" not found') % name
         LOG.error(msg)
         raise exception.InvalidSnapshot(reason=msg)
     return snapshots[0]
Ejemplo n.º 5
0
 def _get_infinidat_snapshot_by_name(self, name):
     snapshot = self._system.volumes.safe_get(name=name)
     if snapshot is None:
         msg = _('Snapshot "%s" not found') % name
         LOG.error(msg)
         raise exception.InvalidSnapshot(reason=msg)
     return snapshot
Ejemplo n.º 6
0
    def _create_volume_from_snapshot(self, volume, snapshot):
        """Creates a volume from a snapshot.

        Snapshot must not be the active snapshot. (offline)
        """

        LOG.debug('Creating volume %(vol)s from snapshot %(snap)s', {
            'vol': volume.id,
            'snap': snapshot.id
        })

        if snapshot.status not in ['available', 'backing-up']:
            msg = _('Snapshot status must be "available" or "backing-up" to '
                    'clone. But is: %(status)s') % {
                        'status': snapshot.status
                    }

            raise exception.InvalidSnapshot(msg)

        self._ensure_shares_mounted()

        volume.provider_location = self._find_share(volume)

        self._copy_volume_from_snapshot(snapshot, volume, volume.size)

        return {'provider_location': volume.provider_location}
Ejemplo n.º 7
0
 def delete_snapshot(self, context, snapshot, force=False):
     if not force and snapshot['status'] not in ["available", "error"]:
         msg = _("Volume Snapshot status must be available or error")
         raise exception.InvalidSnapshot(reason=msg)
     self.db.snapshot_update(context, snapshot['id'],
                             {'status': 'deleting'})
     volume = self.db.volume_get(context, snapshot['volume_id'])
     self.volume_rpcapi.delete_snapshot(context, snapshot, volume['host'])
Ejemplo n.º 8
0
    def revert_to_snapshot(self, context, volume, snapshot):
        user = self.configuration.curve_user
        volume_path = "%s/%s" % (self.configuration.volume_dir, volume['name'])
        snapshot_uuid = self._get_snapshot_uuid(snapshot)
        if snapshot_uuid is None:
            msg = "snapshot %s does't has backend uuid" % snapshot_uuid
            raise exception.InvalidSnapshot(msg)

        self.cbd_client.revert_to_snapshot(volume_path, snapshot_uuid,
                                           volume['size'], user)
Ejemplo n.º 9
0
def is_snapshot_rollback_available(client, snapshot_id):
    snapshot_info = client.get_snapshot_info(snapshot_id)
    running_status = snapshot_info.get("RUNNINGSTATUS")
    health_status = snapshot_info.get("HEALTHSTATUS")
    if running_status not in (constants.SNAPSHOT_RUNNING_STATUS_ACTIVATED,
                              constants.SNAPSHOT_RUNNING_STATUS_ROLLINGBACK):
        err_msg = (_("The running status %(status)s of snapshot %(name)s.") % {
            "status": running_status,
            "name": snapshot_id
        })
        LOG.error(err_msg)
        raise exception.InvalidSnapshot(reason=err_msg)
    if health_status not in (constants.SNAPSHOT_HEALTH_STATUS_NORMAL, ):
        err_msg = (_("The health status %(status)s of snapshot %(name)s.") % {
            "status": running_status,
            "name": snapshot_id
        })
        LOG.error(err_msg)
        raise exception.InvalidSnapshot(reason=err_msg)
    if constants.SNAPSHOT_RUNNING_STATUS_ACTIVATED == snapshot_info.get(
            'RUNNINGSTATUS'):
        return True
    return False
Ejemplo n.º 10
0
    def _create_volume_from_snapshot(self, volume, snapshot):
        """Creates a volume from a snapshot.

        Snapshot must not be the active snapshot. (offline)
        """

        if snapshot['status'] != 'available':
            msg = _('Snapshot status must be "available" to clone.')
            raise exception.InvalidSnapshot(msg)

        self._ensure_shares_mounted()

        volume['provider_location'] = self._find_share(volume['size'])

        self._do_create_volume(volume)

        self._copy_volume_from_snapshot(snapshot, volume, volume['size'])

        return {'provider_location': volume['provider_location']}
Ejemplo n.º 11
0
    def create_volume_from_snapshot(self, volume, snapshot):
        """Creates a volume from a snapshot."""
        user = self.configuration.curve_user

        snapshot_uuid = self._get_snapshot_uuid(snapshot)
        volume_path = "/%s/%s" % (user, volume['name'])
        if snapshot_uuid:
            self.cbd_client.clone(snapshot_uuid, volume_path, user,
                                  snapshot['volume_size'])

            if int(volume['size']) > int(snapshot['volume_size']):
                self._resize(volume)

            provider_location = "cbd://%s/%s" % (self._get_fsid(),
                                                 self.configuration.curve_pool)
            return {'provider_location': provider_location}

        msg = "snapshot %s does't has backend uuid" % snapshot_uuid
        raise exception.InvalidSnapshot(msg)
Ejemplo n.º 12
0
    def _extract_snapshot(snapshot):
        """Extracts the snapshot id from the provided snapshot (if provided).

        This function validates the input snapshot dict and checks that the
        status of that snapshot is valid for creating a volume from.
        """

        snapshot_id = None
        if snapshot is not None:
            if snapshot['status'] not in SNAPSHOT_PROCEED_STATUS:
                msg = _("Originating snapshot status must be one"
                        " of %s values")
                msg = msg % (", ".join(SNAPSHOT_PROCEED_STATUS))
                # TODO(harlowja): what happens if the status changes after this
                # initial snapshot status check occurs??? Seems like someone
                # could delete the snapshot after this check passes but before
                # the volume is officially created?
                raise exception.InvalidSnapshot(reason=msg)
            snapshot_id = snapshot['id']
        return snapshot_id
Ejemplo n.º 13
0
 def revert_to_snapshot(self, context, volume, snapshot):
     volume_name = self._convert_name(volume.name)
     snapshot_name = self._convert_name(snapshot.name)
     ret = self._cmd.rollback_snapshot(snapshot_name, volume_name)
     if ret['key'] == 303:
         raise exception.VolumeNotFound(volume_id=volume_name)
     elif ret['key'] == 505:
         raise exception.SnapshotNotFound(snapshot_id=snapshot_name)
     elif ret['key'] == 506:
         msg = (_('Snapshot %s is not the latest one.') % snapshot_name)
         raise exception.InvalidSnapshot(reason=msg)
     elif ret['key'] != 0:
         msg = (_('Failed to revert volume %(vol)s to snapshot %(snap)s, '
                  'code=%(ret)s, error=%(msg)s.') % {
                      'vol': volume_name,
                      'snap': snapshot_name,
                      'ret': ret['key'],
                      'msg': ret['msg']
                  })
         raise exception.VolumeBackendAPIException(data=msg)
Ejemplo n.º 14
0
    def create_backup(self, context, backup):
        """Create volume backups using configured backup service."""
        volume_id = backup.volume_id
        snapshot_id = backup.snapshot_id
        volume = objects.Volume.get_by_id(context, volume_id)
        snapshot = objects.Snapshot.get_by_id(
            context, snapshot_id) if snapshot_id else None
        previous_status = volume.get('previous_status', None)
        updates = {}
        if snapshot_id:
            log_message = ('Create backup started, backup: %(backup_id)s '
                           'volume: %(volume_id)s snapshot: %(snapshot_id)s.'
                           % {'backup_id': backup.id,
                              'volume_id': volume_id,
                              'snapshot_id': snapshot_id})
        else:
            log_message = ('Create backup started, backup: %(backup_id)s '
                           'volume: %(volume_id)s.'
                           % {'backup_id': backup.id,
                              'volume_id': volume_id})
        LOG.info(log_message)

        self._notify_about_backup_usage(context, backup, "create.start")

        backup.host = self.host
        backup.service = self.driver_name
        backup.availability_zone = self.az
        backup.save()

        expected_status = "backing-up"
        if snapshot_id:
            actual_status = snapshot['status']
            if actual_status != expected_status:
                err = _('Create backup aborted, expected snapshot status '
                        '%(expected_status)s but got %(actual_status)s.') % {
                    'expected_status': expected_status,
                    'actual_status': actual_status,
                }
                self._update_backup_error(backup, err)
                raise exception.InvalidSnapshot(reason=err)
        else:
            actual_status = volume['status']
            if actual_status != expected_status:
                err = _('Create backup aborted, expected volume status '
                        '%(expected_status)s but got %(actual_status)s.') % {
                    'expected_status': expected_status,
                    'actual_status': actual_status,
                }
                self._update_backup_error(backup, err)
                raise exception.InvalidVolume(reason=err)

        expected_status = fields.BackupStatus.CREATING
        actual_status = backup.status
        if actual_status != expected_status:
            err = _('Create backup aborted, expected backup status '
                    '%(expected_status)s but got %(actual_status)s.') % {
                'expected_status': expected_status,
                'actual_status': actual_status,
            }
            self._update_backup_error(backup, err)
            raise exception.InvalidBackup(reason=err)

        try:
            if not self.is_working():
                err = _('Create backup aborted due to backup service is down')
                self._update_backup_error(backup, err)
                raise exception.InvalidBackup(reason=err)
            updates = self._run_backup(context, backup, volume)
        except Exception as err:
            with excutils.save_and_reraise_exception():
                if snapshot_id:
                    snapshot.status = fields.SnapshotStatus.AVAILABLE
                    snapshot.save()
                else:
                    self.db.volume_update(
                        context, volume_id,
                        {'status': previous_status,
                         'previous_status': 'error_backing-up'})
                self._update_backup_error(backup, six.text_type(err))

        # Restore the original status.
        if snapshot_id:
            self.db.snapshot_update(context, snapshot_id,
                                    {'status': fields.BackupStatus.AVAILABLE})
        else:
            self.db.volume_update(context, volume_id,
                                  {'status': previous_status,
                                   'previous_status': 'backing-up'})
        backup.status = fields.BackupStatus.AVAILABLE
        backup.size = volume['size']

        if updates:
            backup.update(updates)
        backup.save()

        # Handle the num_dependent_backups of parent backup when child backup
        # has created successfully.
        if backup.parent_id:
            parent_backup = objects.Backup.get_by_id(context,
                                                     backup.parent_id)
            parent_backup.num_dependent_backups += 1
            parent_backup.save()
        LOG.info('Create backup finished. backup: %s.', backup.id)
        self._notify_about_backup_usage(context, backup, "create.end")
Ejemplo n.º 15
0
    def create(self,
               context,
               size,
               name,
               description,
               snapshot=None,
               image_id=None,
               volume_type=None,
               metadata=None,
               availability_zone=None,
               source_volume=None):

        if ((snapshot is not None) and (source_volume is not None)):
            msg = (_("May specify either snapshot, "
                     "or src volume but not both!"))
            raise exception.InvalidInput(reason=msg)

        check_policy(context, 'create')
        if snapshot is not None:
            if snapshot['status'] != "available":
                msg = _("status must be available")
                raise exception.InvalidSnapshot(reason=msg)
            if not size:
                size = snapshot['volume_size']

            snapshot_id = snapshot['id']
        else:
            snapshot_id = None

        if source_volume is not None:
            if source_volume['status'] == "error":
                msg = _("Unable to clone volumes that are in an error state")
                raise exception.InvalidSourceVolume(reason=msg)
            if not size:
                size = source_volume['size']
            else:
                if size < source_volume['size']:
                    msg = _("Clones currently must be "
                            ">= original volume size.")
                    raise exception.InvalidInput(reason=msg)
            source_volid = source_volume['id']
        else:
            source_volid = None

        def as_int(s):
            try:
                return int(s)
            except (ValueError, TypeError):
                return s

        # tolerate size as stringified int
        size = as_int(size)

        if not isinstance(size, int) or size <= 0:
            msg = (
                _("Volume size '%s' must be an integer and greater than 0") %
                size)
            raise exception.InvalidInput(reason=msg)

        if (image_id and not (source_volume or snapshot)):
            # check image existence
            image_meta = self.image_service.show(context, image_id)
            image_size_in_gb = (int(image_meta['size']) + GB - 1) / GB
            #check image size is not larger than volume size.
            if image_size_in_gb > size:
                msg = _('Size of specified image is larger than volume size.')
                raise exception.InvalidInput(reason=msg)

        try:
            reservations = QUOTAS.reserve(context, volumes=1, gigabytes=size)
        except exception.OverQuota as e:
            overs = e.kwargs['overs']
            usages = e.kwargs['usages']
            quotas = e.kwargs['quotas']

            def _consumed(name):
                return (usages[name]['reserved'] + usages[name]['in_use'])

            if 'gigabytes' in overs:
                msg = _("Quota exceeded for %(s_pid)s, tried to create "
                        "%(s_size)sG volume (%(d_consumed)dG of %(d_quota)dG "
                        "already consumed)")
                LOG.warn(
                    msg % {
                        's_pid': context.project_id,
                        's_size': size,
                        'd_consumed': _consumed('gigabytes'),
                        'd_quota': quotas['gigabytes']
                    })
                raise exception.VolumeSizeExceedsAvailableQuota()
            elif 'volumes' in overs:
                msg = _("Quota exceeded for %(s_pid)s, tried to create "
                        "volume (%(d_consumed)d volumes"
                        "already consumed)")
                LOG.warn(
                    msg % {
                        's_pid': context.project_id,
                        'd_consumed': _consumed('volumes')
                    })
                raise exception.VolumeLimitExceeded(allowed=quotas['volumes'])

        if availability_zone is None:
            availability_zone = FLAGS.storage_availability_zone

        if not volume_type and not source_volume:
            volume_type = volume_types.get_default_volume_type()

        if not volume_type and source_volume:
            volume_type_id = source_volume['volume_type_id']
        else:
            volume_type_id = volume_type.get('id')

        self._check_metadata_properties(context, metadata)
        options = {
            'size': size,
            'user_id': context.user_id,
            'project_id': context.project_id,
            'snapshot_id': snapshot_id,
            'availability_zone': availability_zone,
            'status': "creating",
            'attach_status': "detached",
            'display_name': name,
            'display_description': description,
            'volume_type_id': volume_type_id,
            'metadata': metadata,
            'source_volid': source_volid
        }

        try:
            volume = self.db.volume_create(context, options)
            QUOTAS.commit(context, reservations)
        except Exception:
            with excutils.save_and_reraise_exception():
                try:
                    self.db.volume_destroy(context, volume['id'])
                finally:
                    QUOTAS.rollback(context, reservations)

        request_spec = {
            'volume_properties': options,
            'volume_type': volume_type,
            'volume_id': volume['id'],
            'snapshot_id': volume['snapshot_id'],
            'image_id': image_id,
            'source_volid': volume['source_volid']
        }

        filter_properties = {}

        self._cast_create_volume(context, request_spec, filter_properties)

        return volume
Ejemplo n.º 16
0
    def create(self,
               context,
               name,
               description,
               volume_id,
               container,
               incremental=False,
               availability_zone=None,
               force=False,
               snapshot_id=None):
        """Make the RPC call to create a volume backup."""
        check_policy(context, 'create')
        volume = self.volume_api.get(context, volume_id)
        snapshot = None
        if snapshot_id:
            snapshot = self.volume_api.get_snapshot(context, snapshot_id)

        if volume['status'] not in ["available", "in-use"]:
            msg = (_('Volume to be backed up must be available '
                     'or in-use, but the current status is "%s".') %
                   volume['status'])
            raise exception.InvalidVolume(reason=msg)
        elif volume['status'] in ["in-use"] and not snapshot_id and not force:
            msg = _('Backing up an in-use volume must use ' 'the force flag.')
            raise exception.InvalidVolume(reason=msg)
        elif snapshot_id and snapshot['status'] not in ["available"]:
            msg = (_('Snapshot to be backed up must be available, '
                     'but the current status is "%s".') % snapshot['status'])
            raise exception.InvalidSnapshot(reason=msg)

        previous_status = volume['status']
        volume_host = volume_utils.extract_host(volume['host'], 'host')
        if not self._is_backup_service_enabled(volume['availability_zone'],
                                               volume_host):
            raise exception.ServiceNotFound(service_id='cinder-backup')

        # Reserve a quota before setting volume status and backup status
        try:
            reserve_opts = {'backups': 1, 'backup_gigabytes': volume['size']}
            reservations = QUOTAS.reserve(context, **reserve_opts)
        except exception.OverQuota as e:
            overs = e.kwargs['overs']
            usages = e.kwargs['usages']
            quotas = e.kwargs['quotas']

            def _consumed(resource_name):
                return (usages[resource_name]['reserved'] +
                        usages[resource_name]['in_use'])

            for over in overs:
                if 'gigabytes' in over:
                    msg = _LW("Quota exceeded for %(s_pid)s, tried to create "
                              "%(s_size)sG backup (%(d_consumed)dG of "
                              "%(d_quota)dG already consumed)")
                    LOG.warning(
                        msg, {
                            's_pid': context.project_id,
                            's_size': volume['size'],
                            'd_consumed': _consumed(over),
                            'd_quota': quotas[over]
                        })
                    raise exception.VolumeBackupSizeExceedsAvailableQuota(
                        requested=volume['size'],
                        consumed=_consumed('backup_gigabytes'),
                        quota=quotas['backup_gigabytes'])
                elif 'backups' in over:
                    msg = _LW("Quota exceeded for %(s_pid)s, tried to create "
                              "backups (%(d_consumed)d backups "
                              "already consumed)")

                    LOG.warning(msg, {
                        's_pid': context.project_id,
                        'd_consumed': _consumed(over)
                    })
                    raise exception.BackupLimitExceeded(allowed=quotas[over])

        # Find the latest backup and use it as the parent backup to do an
        # incremental backup.
        latest_backup = None
        if incremental:
            backups = objects.BackupList.get_all_by_volume(
                context.elevated(), volume_id)
            if backups.objects:
                # NOTE(xyang): The 'data_timestamp' field records the time
                # when the data on the volume was first saved. If it is
                # a backup from volume, 'data_timestamp' will be the same
                # as 'created_at' for a backup. If it is a backup from a
                # snapshot, 'data_timestamp' will be the same as
                # 'created_at' for a snapshot.
                # If not backing up from snapshot, the backup with the latest
                # 'data_timestamp' will be the parent; If backing up from
                # snapshot, the backup with the latest 'data_timestamp' will
                # be chosen only if 'data_timestamp' is earlier than the
                # 'created_at' timestamp of the snapshot; Otherwise, the
                # backup will not be chosen as the parent.
                # For example, a volume has a backup taken at 8:00, then
                # a snapshot taken at 8:10, and then a backup at 8:20.
                # When taking an incremental backup of the snapshot, the
                # parent should be the backup at 8:00, not 8:20, and the
                # 'data_timestamp' of this new backup will be 8:10.
                latest_backup = max(
                    backups.objects,
                    key=lambda x: x['data_timestamp']
                    if (not snapshot or (snapshot and x['data_timestamp'] <
                                         snapshot['created_at'])) else
                    datetime(1, 1, 1, 1, 1, 1, tzinfo=timezone('UTC')))
            else:
                msg = _('No backups available to do an incremental backup.')
                raise exception.InvalidBackup(reason=msg)

        parent_id = None
        if latest_backup:
            parent_id = latest_backup.id
            if latest_backup['status'] != fields.BackupStatus.AVAILABLE:
                msg = _('The parent backup must be available for '
                        'incremental backup.')
                raise exception.InvalidBackup(reason=msg)

        data_timestamp = None
        if snapshot_id:
            snapshot = objects.Snapshot.get_by_id(context, snapshot_id)
            data_timestamp = snapshot.created_at

        self.db.volume_update(context, volume_id, {
            'status': 'backing-up',
            'previous_status': previous_status
        })
        try:
            kwargs = {
                'user_id': context.user_id,
                'project_id': context.project_id,
                'display_name': name,
                'display_description': description,
                'volume_id': volume_id,
                'status': fields.BackupStatus.CREATING,
                'container': container,
                'parent_id': parent_id,
                'size': volume['size'],
                'host': volume_host,
                'snapshot_id': snapshot_id,
                'data_timestamp': data_timestamp,
            }
            backup = objects.Backup(context=context, **kwargs)
            backup.create()
            if not snapshot_id:
                backup.data_timestamp = backup.created_at
                backup.save()
            QUOTAS.commit(context, reservations)
        except Exception:
            with excutils.save_and_reraise_exception():
                try:
                    backup.destroy()
                finally:
                    QUOTAS.rollback(context, reservations)

        # TODO(DuncanT): In future, when we have a generic local attach,
        #                this can go via the scheduler, which enables
        #                better load balancing and isolation of services
        self.backup_rpcapi.create_backup(context, backup)

        return backup
Ejemplo n.º 17
0
class SnapshotTestCase(base.BaseVolumeTestCase):
    def setUp(self, *args, **kwargs):
        super(SnapshotTestCase, self).setUp()
        db.volume_type_create(
            self.context, v2_fakes.fake_default_type_get(fake.VOLUME_TYPE2_ID))
        self.vol_type = db.volume_type_get_by_name(self.context,
                                                   'vol_type_name')

    def test_delete_snapshot_frozen(self):
        service = tests_utils.create_service(self.context, {'frozen': True})
        volume = tests_utils.create_volume(self.context, host=service.host)
        snapshot = tests_utils.create_snapshot(self.context, volume.id)
        self.assertRaises(exception.InvalidInput,
                          self.volume_api.delete_snapshot, self.context,
                          snapshot)

    @ddt.data('create_snapshot', 'create_snapshot_force')
    def test_create_snapshot_frozen(self, method):
        service = tests_utils.create_service(self.context, {'frozen': True})
        volume = tests_utils.create_volume(self.context, host=service.host)
        method = getattr(self.volume_api, method)
        self.assertRaises(exception.InvalidInput, method, self.context, volume,
                          'name', 'desc')

    def test_create_snapshot_driver_not_initialized(self):
        volume_src = tests_utils.create_volume(self.context,
                                               **self.volume_params)
        self.volume.create_volume(self.context, volume_src)
        snapshot_id = create_snapshot(volume_src['id'],
                                      size=volume_src['size'])['id']
        snapshot_obj = objects.Snapshot.get_by_id(self.context, snapshot_id)

        self.volume.driver._initialized = False

        self.assertRaises(exception.DriverNotInitialized,
                          self.volume.create_snapshot, self.context,
                          snapshot_obj)

        # NOTE(flaper87): The volume status should be error.
        self.assertEqual(fields.SnapshotStatus.ERROR, snapshot_obj.status)

        # lets cleanup the mess
        self.volume.driver._initialized = True
        self.volume.delete_snapshot(self.context, snapshot_obj)
        self.volume.delete_volume(self.context, volume_src)

    @mock.patch('cinder.tests.unit.fake_notifier.FakeNotifier._notify')
    def test_create_delete_snapshot(self, mock_notify):
        """Test snapshot can be created and deleted."""
        volume = tests_utils.create_volume(
            self.context,
            availability_zone=CONF.storage_availability_zone,
            **self.volume_params)

        mock_notify.assert_not_called()

        self.volume.create_volume(self.context, volume)

        self.assert_notify_called(
            mock_notify,
            (['INFO', 'volume.create.start'], ['INFO', 'volume.create.end']),
            any_order=True)

        snapshot = create_snapshot(volume['id'], size=volume['size'])
        snapshot_id = snapshot.id
        self.volume.create_snapshot(self.context, snapshot)
        self.assertEqual(
            snapshot_id,
            objects.Snapshot.get_by_id(self.context, snapshot_id).id)

        self.assert_notify_called(
            mock_notify,
            (['INFO', 'volume.create.start'], ['INFO', 'volume.create.end'], [
                'INFO', 'snapshot.create.start'
            ], ['INFO', 'snapshot.create.end']),
            any_order=True)

        self.volume.delete_snapshot(self.context, snapshot)
        self.assert_notify_called(mock_notify, ([
            'INFO', 'volume.create.start'
        ], ['INFO', 'volume.create.end'], ['INFO', 'snapshot.create.start'], [
            'INFO', 'snapshot.create.end'
        ], ['INFO', 'snapshot.delete.start'], ['INFO', 'snapshot.delete.end']),
                                  any_order=True)

        snap = objects.Snapshot.get_by_id(
            context.get_admin_context(read_deleted='yes'), snapshot_id)
        self.assertEqual(fields.SnapshotStatus.DELETED, snap.status)
        self.assertRaises(exception.NotFound, db.snapshot_get, self.context,
                          snapshot_id)
        self.volume.delete_volume(self.context, volume)

    def test_create_delete_snapshot_with_metadata(self):
        """Test snapshot can be created with metadata and deleted."""
        test_meta = {'fake_key': 'fake_value'}
        volume = tests_utils.create_volume(self.context, **self.volume_params)
        snapshot = create_snapshot(volume['id'],
                                   size=volume['size'],
                                   metadata=test_meta)
        snapshot_id = snapshot.id

        result_dict = snapshot.metadata

        self.assertEqual(test_meta, result_dict)
        self.volume.delete_snapshot(self.context, snapshot)
        self.assertRaises(exception.NotFound, db.snapshot_get, self.context,
                          snapshot_id)

    def test_delete_snapshot_another_cluster_fails(self):
        """Test delete of snapshot from another cluster fails."""
        self.volume.cluster = 'mycluster'
        volume = tests_utils.create_volume(self.context,
                                           status='available',
                                           size=1,
                                           host=CONF.host + 'fake',
                                           cluster_name=self.volume.cluster)
        snapshot = create_snapshot(volume.id, size=volume.size)

        self.volume.delete_snapshot(self.context, snapshot)
        self.assertRaises(exception.NotFound, db.snapshot_get, self.context,
                          snapshot.id)

    @mock.patch.object(
        db,
        'snapshot_create',
        side_effect=exception.InvalidSnapshot('Create snapshot in db failed!'))
    def test_create_snapshot_failed_db_snapshot(self, mock_snapshot):
        """Test exception handling when create snapshot in db failed."""
        test_volume = tests_utils.create_volume(self.context,
                                                status='available',
                                                host=CONF.host)
        volume_api = cinder.volume.api.API()
        self.assertRaises(exception.InvalidSnapshot,
                          volume_api.create_snapshot, self.context,
                          test_volume, 'fake_name', 'fake_description')

    @mock.patch('cinder.objects.volume.Volume.get_by_id')
    def test_create_snapshot_in_db_invalid_volume_status(self, mock_get):
        test_volume1 = tests_utils.create_volume(self.context,
                                                 status='available',
                                                 host=CONF.host)
        test_volume2 = tests_utils.create_volume(self.context,
                                                 status='deleting',
                                                 host=CONF.host)
        mock_get.return_value = test_volume2
        volume_api = cinder.volume.api.API()

        self.assertRaises(exception.InvalidVolume,
                          volume_api.create_snapshot_in_db,
                          self.context,
                          test_volume1,
                          "fake_snapshot_name",
                          "fake_description",
                          False, {},
                          None,
                          commit_quota=False)

    @mock.patch('cinder.objects.volume.Volume.get_by_id')
    def test_create_snapshot_in_db_invalid_metadata(self, mock_get):
        test_volume = tests_utils.create_volume(self.context,
                                                status='available',
                                                host=CONF.host)
        mock_get.return_value = test_volume
        volume_api = cinder.volume.api.API()

        with mock.patch.object(QUOTAS, 'add_volume_type_opts'),\
            mock.patch.object(QUOTAS, 'reserve') as mock_reserve,\
                mock.patch.object(QUOTAS, 'commit') as mock_commit:
            self.assertRaises(exception.InvalidInput,
                              volume_api.create_snapshot_in_db,
                              self.context,
                              test_volume,
                              "fake_snapshot_name",
                              "fake_description",
                              False,
                              "fake_metadata",
                              None,
                              commit_quota=True)
            mock_reserve.assert_not_called()
            mock_commit.assert_not_called()

    def test_create_snapshot_failed_maintenance(self):
        """Test exception handling when create snapshot in maintenance."""
        test_volume = tests_utils.create_volume(self.context,
                                                status='maintenance',
                                                host=CONF.host)
        volume_api = cinder.volume.api.API()
        self.assertRaises(exception.InvalidVolume, volume_api.create_snapshot,
                          self.context, test_volume, 'fake_name',
                          'fake_description')

    @mock.patch.object(
        QUOTAS,
        'commit',
        side_effect=exception.QuotaError('Snapshot quota commit failed!'))
    def test_create_snapshot_failed_quota_commit(self, mock_snapshot):
        """Test exception handling when snapshot quota commit failed."""
        test_volume = tests_utils.create_volume(self.context,
                                                status='available',
                                                host=CONF.host)
        volume_api = cinder.volume.api.API()
        self.assertRaises(exception.QuotaError, volume_api.create_snapshot,
                          self.context, test_volume, 'fake_name',
                          'fake_description')

    @mock.patch.object(QUOTAS,
                       'reserve',
                       side_effect=OVER_SNAPSHOT_QUOTA_EXCEPTION)
    def test_create_snapshot_failed_quota_reserve(self, mock_reserve):
        """Test exception handling when snapshot quota reserve failed."""
        test_volume = tests_utils.create_volume(self.context,
                                                status='available',
                                                host=CONF.host)
        volume_api = cinder.volume.api.API()
        self.assertRaises(exception.SnapshotLimitExceeded,
                          volume_api.create_snapshot, self.context,
                          test_volume, 'fake_name', 'fake_description')

    @mock.patch.object(QUOTAS,
                       'reserve',
                       side_effect=OVER_SNAPSHOT_QUOTA_EXCEPTION)
    def test_create_snapshots_in_db_failed_quota_reserve(self, mock_reserve):
        """Test exception handling when snapshot quota reserve failed."""
        test_volume = tests_utils.create_volume(self.context,
                                                status='available',
                                                host=CONF.host)
        volume_api = cinder.volume.api.API()
        self.assertRaises(exception.SnapshotLimitExceeded,
                          volume_api.create_snapshots_in_db, self.context,
                          [test_volume], 'fake_name', 'fake_description',
                          fake.CONSISTENCY_GROUP_ID)

    def test_create_snapshot_failed_host_is_None(self):
        """Test exception handling when create snapshot and host is None."""
        test_volume = tests_utils.create_volume(self.context, host=None)
        volume_api = cinder.volume.api.API()
        self.assertRaises(exception.InvalidVolume, volume_api.create_snapshot,
                          self.context, test_volume, 'fake_name',
                          'fake_description')

    def test_create_snapshot_force(self):
        """Test snapshot in use can be created forcibly."""

        instance_uuid = '12345678-1234-5678-1234-567812345678'
        # create volume and attach to the instance
        volume = tests_utils.create_volume(self.context, **self.volume_params)
        self.volume.create_volume(self.context, volume)
        values = {
            'volume_id': volume['id'],
            'instance_uuid': instance_uuid,
            'attach_status': fields.VolumeAttachStatus.ATTACHING,
        }
        attachment = db.volume_attach(self.context, values)
        db.volume_attached(self.context, attachment['id'], instance_uuid, None,
                           '/dev/sda1')

        volume_api = cinder.volume.api.API()
        volume = volume_api.get(self.context, volume['id'])
        self.assertRaises(exception.InvalidVolume, volume_api.create_snapshot,
                          self.context, volume, 'fake_name',
                          'fake_description')
        snapshot_ref = volume_api.create_snapshot_force(
            self.context, volume, 'fake_name', 'fake_description')
        snapshot_ref.destroy()
        db.volume_destroy(self.context, volume['id'])

        # create volume and attach to the host
        volume = tests_utils.create_volume(self.context, **self.volume_params)
        self.volume.create_volume(self.context, volume)
        values = {
            'volume_id': volume['id'],
            'attached_host': 'fake_host',
            'attach_status': fields.VolumeAttachStatus.ATTACHING,
        }
        attachment = db.volume_attach(self.context, values)
        db.volume_attached(self.context, attachment['id'], None, 'fake_host',
                           '/dev/sda1')

        volume_api = cinder.volume.api.API()
        volume = volume_api.get(self.context, volume['id'])
        self.assertRaises(exception.InvalidVolume, volume_api.create_snapshot,
                          self.context, volume, 'fake_name',
                          'fake_description')
        snapshot_ref = volume_api.create_snapshot_force(
            self.context, volume, 'fake_name', 'fake_description')
        snapshot_ref.destroy()
        db.volume_destroy(self.context, volume['id'])

    @mock.patch('cinder.image.image_utils.qemu_img_info')
    def test_create_snapshot_from_bootable_volume(self, mock_qemu_info):
        """Test create snapshot from bootable volume."""
        # create bootable volume from image
        volume = self._create_volume_from_image()
        volume_id = volume['id']
        self.assertEqual('available', volume['status'])
        self.assertTrue(volume['bootable'])

        image_info = imageutils.QemuImgInfo()
        image_info.virtual_size = '1073741824'
        mock_qemu_info.return_value = image_info

        # get volume's volume_glance_metadata
        ctxt = context.get_admin_context()
        vol_glance_meta = db.volume_glance_metadata_get(ctxt, volume_id)
        self.assertTrue(bool(vol_glance_meta))

        # create snapshot from bootable volume
        snap = create_snapshot(volume_id)
        self.volume.create_snapshot(ctxt, snap)

        # get snapshot's volume_glance_metadata
        snap_glance_meta = db.volume_snapshot_glance_metadata_get(
            ctxt, snap.id)
        self.assertTrue(bool(snap_glance_meta))

        # ensure that volume's glance metadata is copied
        # to snapshot's glance metadata
        self.assertEqual(len(vol_glance_meta), len(snap_glance_meta))
        vol_glance_dict = {x.key: x.value for x in vol_glance_meta}
        snap_glance_dict = {x.key: x.value for x in snap_glance_meta}
        self.assertDictEqual(vol_glance_dict, snap_glance_dict)

        # ensure that snapshot's status is changed to 'available'
        self.assertEqual(fields.SnapshotStatus.AVAILABLE, snap.status)

        # cleanup resource
        snap.destroy()
        db.volume_destroy(ctxt, volume_id)

    @mock.patch('cinder.image.image_utils.qemu_img_info')
    def test_create_snapshot_from_bootable_volume_fail(self, mock_qemu_info):
        """Test create snapshot from bootable volume.

        But it fails to volume_glance_metadata_copy_to_snapshot.
        As a result, status of snapshot is changed to ERROR.
        """
        # create bootable volume from image
        volume = self._create_volume_from_image()
        volume_id = volume['id']
        self.assertEqual('available', volume['status'])
        self.assertTrue(volume['bootable'])

        image_info = imageutils.QemuImgInfo()
        image_info.virtual_size = '1073741824'
        mock_qemu_info.return_value = image_info

        # get volume's volume_glance_metadata
        ctxt = context.get_admin_context()
        vol_glance_meta = db.volume_glance_metadata_get(ctxt, volume_id)
        self.assertTrue(bool(vol_glance_meta))
        snap = create_snapshot(volume_id)
        self.assertEqual(36, len(snap.id))  # dynamically-generated UUID
        self.assertEqual('creating', snap.status)

        # set to return DB exception
        with mock.patch.object(db, 'volume_glance_metadata_copy_to_snapshot')\
                as mock_db:
            mock_db.side_effect = exception.MetadataCopyFailure(
                reason="Because of DB service down.")
            # create snapshot from bootable volume
            self.assertRaises(exception.MetadataCopyFailure,
                              self.volume.create_snapshot, ctxt, snap)

        # get snapshot's volume_glance_metadata
        self.assertRaises(exception.GlanceMetadataNotFound,
                          db.volume_snapshot_glance_metadata_get, ctxt,
                          snap.id)

        # ensure that status of snapshot is 'error'
        self.assertEqual(fields.SnapshotStatus.ERROR, snap.status)

        # cleanup resource
        snap.destroy()
        db.volume_destroy(ctxt, volume_id)

    def test_create_snapshot_from_bootable_volume_with_volume_metadata_none(
            self):
        volume = tests_utils.create_volume(self.context, **self.volume_params)
        volume_id = volume['id']

        self.volume.create_volume(self.context, volume)
        # set bootable flag of volume to True
        db.volume_update(self.context, volume_id, {'bootable': True})

        snapshot = create_snapshot(volume['id'])
        self.volume.create_snapshot(self.context, snapshot)
        self.assertRaises(exception.GlanceMetadataNotFound,
                          db.volume_snapshot_glance_metadata_get, self.context,
                          snapshot.id)

        # ensure that status of snapshot is 'available'
        self.assertEqual(fields.SnapshotStatus.AVAILABLE, snapshot.status)

        # cleanup resource
        snapshot.destroy()
        db.volume_destroy(self.context, volume_id)

    def test_create_snapshot_during_encryption_key_migration(self):
        fixed_key_id = '00000000-0000-0000-0000-000000000000'
        volume = tests_utils.create_volume(self.context, **self.volume_params)
        volume['encryption_key_id'] = fixed_key_id
        volume_id = volume['id']

        self.volume.create_volume(self.context, volume)

        kwargs = {'encryption_key_id': fixed_key_id}
        snapshot = create_snapshot(volume['id'], **kwargs)

        self.assertEqual(fixed_key_id, snapshot.encryption_key_id)
        db.volume_update(self.context, volume_id,
                         {'encryption_key_id': fake.ENCRYPTION_KEY_ID})

        self.volume.create_snapshot(self.context, snapshot)

        snap_db = db.snapshot_get(self.context, snapshot.id)
        self.assertEqual(fake.ENCRYPTION_KEY_ID, snap_db.encryption_key_id)

        # cleanup resource
        snapshot.destroy()
        db.volume_destroy(self.context, volume_id)

    def test_delete_busy_snapshot(self):
        """Test snapshot can be created and deleted."""

        self.volume.driver.vg = fake_lvm.FakeBrickLVM('cinder-volumes', False,
                                                      None, 'default')

        volume = tests_utils.create_volume(self.context, **self.volume_params)
        volume_id = volume['id']
        self.volume.create_volume(self.context, volume)
        snapshot = create_snapshot(volume_id, size=volume['size'])
        self.volume.create_snapshot(self.context, snapshot)

        with mock.patch.object(self.volume.driver,
                               'delete_snapshot',
                               side_effect=exception.SnapshotIsBusy(
                                   snapshot_name='fake')) as mock_del_snap:
            snapshot_id = snapshot.id
            self.volume.delete_snapshot(self.context, snapshot)
            snapshot_ref = objects.Snapshot.get_by_id(self.context,
                                                      snapshot_id)
            self.assertEqual(snapshot_id, snapshot_ref.id)
            self.assertEqual(fields.SnapshotStatus.AVAILABLE,
                             snapshot_ref.status)
            mock_del_snap.assert_called_once_with(snapshot)

    @test.testtools.skipIf(sys.platform == "darwin", "SKIP on OSX")
    def test_delete_no_dev_fails(self):
        """Test delete snapshot with no dev file fails."""
        self.mock_object(os.path, 'exists', lambda x: False)
        self.volume.driver.vg = fake_lvm.FakeBrickLVM('cinder-volumes', False,
                                                      None, 'default')

        volume = tests_utils.create_volume(self.context, **self.volume_params)
        volume_id = volume['id']
        self.volume.create_volume(self.context, volume)
        snapshot = create_snapshot(volume_id)
        snapshot_id = snapshot.id
        self.volume.create_snapshot(self.context, snapshot)

        with mock.patch.object(self.volume.driver,
                               'delete_snapshot',
                               side_effect=exception.SnapshotIsBusy(
                                   snapshot_name='fake')) as mock_del_snap:
            self.volume.delete_snapshot(self.context, snapshot)
            snapshot_ref = objects.Snapshot.get_by_id(self.context,
                                                      snapshot_id)
            self.assertEqual(snapshot_id, snapshot_ref.id)
            self.assertEqual(fields.SnapshotStatus.AVAILABLE,
                             snapshot_ref.status)
            mock_del_snap.assert_called_once_with(snapshot)

    def test_force_delete_snapshot(self):
        """Test snapshot can be forced to delete."""
        fake_volume = tests_utils.create_volume(self.context)
        fake_snapshot = tests_utils.create_snapshot(self.context,
                                                    fake_volume.id,
                                                    status='error_deleting')
        # 'error_deleting' snapshot can't be deleted
        self.assertRaises(exception.InvalidSnapshot,
                          self.volume_api.delete_snapshot, self.context,
                          fake_snapshot)

        # delete with force
        self.volume_api.delete_snapshot(self.context,
                                        fake_snapshot,
                                        force=True)

        # status is deleting
        fake_snapshot.refresh()
        self.assertEqual(fields.SnapshotStatus.DELETING, fake_snapshot.status)

    def test_volume_api_update_snapshot(self):
        # create raw snapshot
        volume = tests_utils.create_volume(self.context, **self.volume_params)
        snapshot = create_snapshot(volume['id'])
        snapshot_id = snapshot.id
        self.assertIsNone(snapshot.display_name)
        # use volume.api to update name
        volume_api = cinder.volume.api.API()
        update_dict = {'display_name': 'test update name'}
        volume_api.update_snapshot(self.context, snapshot, update_dict)
        # read changes from db
        snap = objects.Snapshot.get_by_id(context.get_admin_context(),
                                          snapshot_id)
        self.assertEqual('test update name', snap.display_name)

    @mock.patch.object(QUOTAS,
                       'reserve',
                       side_effect=OVER_SNAPSHOT_QUOTA_EXCEPTION)
    def test_existing_snapshot_failed_quota_reserve(self, mock_reserve):
        vol = tests_utils.create_volume(self.context)
        snap = tests_utils.create_snapshot(self.context, vol.id)
        with mock.patch.object(
                self.volume.driver,
                'manage_existing_snapshot_get_size') as mock_get_size:
            mock_get_size.return_value = 1
            self.assertRaises(exception.SnapshotLimitExceeded,
                              self.volume.manage_existing_snapshot,
                              self.context, snap)

    def test_delete_snapshot_driver_not_initialized(self):
        volume = tests_utils.create_volume(self.context, **self.volume_params)
        snapshot = tests_utils.create_snapshot(self.context, volume.id)

        self.volume.driver._initialized = False
        self.assertRaises(exception.DriverNotInitialized,
                          self.volume.delete_snapshot, self.context, snapshot)

        snapshot.refresh()
        self.assertEqual(fields.SnapshotStatus.ERROR_DELETING, snapshot.status)
Ejemplo n.º 18
0
    def create(self, context, size, name, description, snapshot=None,
                image_id=None, volume_type=None, metadata=None,
                availability_zone=None):
        check_policy(context, 'create')
        if snapshot is not None:
            if snapshot['status'] != "available":
                msg = _("status must be available")
                raise exception.InvalidSnapshot(reason=msg)
            if not size:
                size = snapshot['volume_size']

            snapshot_id = snapshot['id']
        else:
            snapshot_id = None

        def as_int(s):
            try:
                return int(s)
            except ValueError:
                return s

        # tolerate size as stringified int
        size = as_int(size)

        if not isinstance(size, int) or size <= 0:
            msg = (_("Volume size '%s' must be an integer and greater than 0")
                   % size)
            raise exception.InvalidInput(reason=msg)
        try:
            reservations = QUOTAS.reserve(context, volumes=1, gigabytes=size)
        except exception.OverQuota as e:
            overs = e.kwargs['overs']
            usages = e.kwargs['usages']
            quotas = e.kwargs['quotas']

            def _consumed(name):
                return (usages[name]['reserved'] + usages[name]['in_use'])

            pid = context.project_id
            if 'gigabytes' in overs:
                consumed = _consumed('gigabytes')
                quota = quotas['gigabytes']
                LOG.warn(_("Quota exceeded for %(pid)s, tried to create "
                           "%(size)sG volume (%(consumed)dG of %(quota)dG "
                           "already consumed)") % locals())
                raise exception.VolumeSizeExceedsAvailableQuota()
            elif 'volumes' in overs:
                consumed = _consumed('volumes')
                LOG.warn(_("Quota exceeded for %(pid)s, tried to create "
                           "volume (%(consumed)d volumes already consumed)")
                           % locals())
                raise exception.VolumeLimitExceeded(allowed=quotas['volumes'])

        if image_id:
            # check image existence
            image_meta = self.image_service.show(context, image_id)
            image_size_in_gb = (int(image_meta['size']) + GB - 1) / GB
            #check image size is not larger than volume size.
            if image_size_in_gb > size:
                msg = _('Size of specified image is larger than volume size.')
                raise exception.InvalidInput(reason=msg)

        if availability_zone is None:
            availability_zone = FLAGS.storage_availability_zone

        if volume_type is None:
            volume_type_id = None
        else:
            volume_type_id = volume_type.get('id', None)

        options = {
            'size': size,
            'user_id': context.user_id,
            'project_id': context.project_id,
            'snapshot_id': snapshot_id,
            'availability_zone': availability_zone,
            'status': "creating",
            'attach_status': "detached",
            'display_name': name,
            'display_description': description,
            'volume_type_id': volume_type_id,
            'metadata': metadata,
            }

        volume = self.db.volume_create(context, options)

        QUOTAS.commit(context, reservations)

        self._cast_create_volume(context, volume['id'], snapshot_id,
                                 image_id)
        return volume
Ejemplo n.º 19
0
    def create(self,
               context,
               size,
               name,
               description,
               snapshot=None,
               image_id=None,
               volume_type=None,
               metadata=None,
               availability_zone=None,
               source_volume=None,
               scheduler_hints=None):

        exclusive_options = (snapshot, image_id, source_volume)
        exclusive_options_set = sum(1 for option in exclusive_options
                                    if option is not None)
        if exclusive_options_set > 1:
            msg = (_("May specify only one of snapshot, imageRef "
                     "or source volume"))
            raise exception.InvalidInput(reason=msg)

        check_policy(context, 'create')
        if snapshot is not None:
            if snapshot['status'] != "available":
                msg = _("status must be available")
                raise exception.InvalidSnapshot(reason=msg)
            if not size:
                size = snapshot['volume_size']
            elif size < snapshot['volume_size']:
                msg = _("Volume size cannot be lesser than"
                        " the Snapshot size")
                raise exception.InvalidInput(reason=msg)
            snapshot_id = snapshot['id']
        else:
            snapshot_id = None

        if source_volume is not None:
            if source_volume['status'] == "error":
                msg = _("Unable to clone volumes that are in an error state")
                raise exception.InvalidSourceVolume(reason=msg)
            if not size:
                size = source_volume['size']
            else:
                if size < source_volume['size']:
                    msg = _("Clones currently must be "
                            ">= original volume size.")
                    raise exception.InvalidInput(reason=msg)
            source_volid = source_volume['id']
        else:
            source_volid = None

        def as_int(s):
            try:
                return int(s)
            except (ValueError, TypeError):
                return s

        # tolerate size as stringified int
        size = as_int(size)

        if not isinstance(size, int) or size <= 0:
            msg = (
                _("Volume size '%s' must be an integer and greater than 0") %
                size)
            raise exception.InvalidInput(reason=msg)

        if (image_id and not (source_volume or snapshot)):
            # check image existence
            image_meta = self.image_service.show(context, image_id)
            image_size_in_gb = (int(image_meta['size']) + GB - 1) / GB
            #check image size is not larger than volume size.
            if image_size_in_gb > size:
                msg = _('Size of specified image is larger than volume size.')
                raise exception.InvalidInput(reason=msg)
            # Check image minDisk requirement is met for the particular volume
            if size < image_meta.get('min_disk', 0):
                msg = _('Image minDisk size is larger than the volume size.')
                raise exception.InvalidInput(reason=msg)

        if availability_zone is None:
            if snapshot is not None:
                availability_zone = snapshot['volume']['availability_zone']
            elif source_volume is not None:
                availability_zone = source_volume['availability_zone']
            else:
                availability_zone = CONF.storage_availability_zone
        else:
            self._check_availabilty_zone(availability_zone)

        if CONF.cloned_volume_same_az:
            if (snapshot and snapshot['volume']['availability_zone'] !=
                    availability_zone):
                msg = _("Volume must be in the same "
                        "availability zone as the snapshot")
                raise exception.InvalidInput(reason=msg)
            elif source_volume and \
                    source_volume['availability_zone'] != availability_zone:
                msg = _("Volume must be in the same "
                        "availability zone as the source volume")
                raise exception.InvalidInput(reason=msg)

        if not volume_type and not source_volume:
            volume_type = volume_types.get_default_volume_type()

        if not volume_type and source_volume:
            volume_type_id = source_volume['volume_type_id']
        else:
            volume_type_id = volume_type.get('id')

        try:
            reserve_opts = {'volumes': 1, 'gigabytes': size}
            QUOTAS.add_volume_type_opts(context, reserve_opts, volume_type_id)
            reservations = QUOTAS.reserve(context, **reserve_opts)
        except exception.OverQuota as e:
            overs = e.kwargs['overs']
            usages = e.kwargs['usages']
            quotas = e.kwargs['quotas']

            def _consumed(name):
                return (usages[name]['reserved'] + usages[name]['in_use'])

            for over in overs:
                if 'gigabytes' in over:
                    msg = _("Quota exceeded for %(s_pid)s, tried to create "
                            "%(s_size)sG volume (%(d_consumed)dG of "
                            "%(d_quota)dG already consumed)")
                    LOG.warn(
                        msg % {
                            's_pid': context.project_id,
                            's_size': size,
                            'd_consumed': _consumed(over),
                            'd_quota': quotas[over]
                        })
                    raise exception.VolumeSizeExceedsAvailableQuota()
                elif 'volumes' in over:
                    msg = _("Quota exceeded for %(s_pid)s, tried to create "
                            "volume (%(d_consumed)d volumes"
                            "already consumed)")
                    LOG.warn(msg % {
                        's_pid': context.project_id,
                        'd_consumed': _consumed(over)
                    })
                    raise exception.VolumeLimitExceeded(allowed=quotas[over])

        self._check_metadata_properties(context, metadata)
        options = {
            'size': size,
            'user_id': context.user_id,
            'project_id': context.project_id,
            'snapshot_id': snapshot_id,
            'availability_zone': availability_zone,
            'status': "creating",
            'attach_status': "detached",
            'display_name': name,
            'display_description': description,
            'volume_type_id': volume_type_id,
            'metadata': metadata,
            'source_volid': source_volid
        }

        try:
            volume = self.db.volume_create(context, options)
            QUOTAS.commit(context, reservations)
        except Exception:
            with excutils.save_and_reraise_exception():
                try:
                    self.db.volume_destroy(context, volume['id'])
                finally:
                    QUOTAS.rollback(context, reservations)

        request_spec = {
            'volume_properties': options,
            'volume_type': volume_type,
            'volume_id': volume['id'],
            'snapshot_id': volume['snapshot_id'],
            'image_id': image_id,
            'source_volid': volume['source_volid']
        }

        if scheduler_hints:
            filter_properties = {'scheduler_hints': scheduler_hints}
        else:
            filter_properties = {}

        self._cast_create_volume(context, request_spec, filter_properties)

        return volume
Ejemplo n.º 20
0
    def create(self,
               context,
               name,
               description,
               volume_id,
               container,
               incremental=False,
               availability_zone=None,
               force=False,
               snapshot_id=None,
               metadata=None):
        """Make the RPC call to create a volume backup."""
        volume = self.volume_api.get(context, volume_id)
        context.authorize(policy.CREATE_POLICY, target_obj=volume)
        snapshot = None
        if snapshot_id:
            snapshot = self.volume_api.get_snapshot(context, snapshot_id)

            if volume_id != snapshot.volume_id:
                msg = (_('Volume %(vol1)s does not match with '
                         'snapshot.volume_id %(vol2)s.') % {
                             'vol1': volume_id,
                             'vol2': snapshot.volume_id
                         })
                raise exception.InvalidVolume(reason=msg)
            if snapshot['status'] not in ["available"]:
                msg = (_('Snapshot to be backed up must be available, '
                         'but the current status is "%s".') %
                       snapshot['status'])
                raise exception.InvalidSnapshot(reason=msg)
        elif volume['status'] not in ["available", "in-use"]:
            msg = (_('Volume to be backed up must be available '
                     'or in-use, but the current status is "%s".') %
                   volume['status'])
            raise exception.InvalidVolume(reason=msg)
        elif volume['status'] in ["in-use"] and not force:
            msg = _('Backing up an in-use volume must use ' 'the force flag.')
            raise exception.InvalidVolume(reason=msg)

        previous_status = volume['status']
        volume_host = volume_utils.extract_host(volume.host, 'host')
        availability_zone = availability_zone or volume.availability_zone
        host = self._get_available_backup_service_host(volume_host,
                                                       availability_zone)

        # Reserve a quota before setting volume status and backup status
        try:
            reserve_opts = {'backups': 1, 'backup_gigabytes': volume['size']}
            reservations = QUOTAS.reserve(context, **reserve_opts)
        except exception.OverQuota as e:
            quota_utils.process_reserve_over_quota(context,
                                                   e,
                                                   resource='backups',
                                                   size=volume.size)
        # Find the latest backup and use it as the parent backup to do an
        # incremental backup.
        latest_backup = None
        if incremental:
            backups = objects.BackupList.get_all_by_volume(
                context.elevated(), volume_id)
            if backups.objects:
                # NOTE(xyang): The 'data_timestamp' field records the time
                # when the data on the volume was first saved. If it is
                # a backup from volume, 'data_timestamp' will be the same
                # as 'created_at' for a backup. If it is a backup from a
                # snapshot, 'data_timestamp' will be the same as
                # 'created_at' for a snapshot.
                # If not backing up from snapshot, the backup with the latest
                # 'data_timestamp' will be the parent; If backing up from
                # snapshot, the backup with the latest 'data_timestamp' will
                # be chosen only if 'data_timestamp' is earlier than the
                # 'created_at' timestamp of the snapshot; Otherwise, the
                # backup will not be chosen as the parent.
                # For example, a volume has a backup taken at 8:00, then
                # a snapshot taken at 8:10, and then a backup at 8:20.
                # When taking an incremental backup of the snapshot, the
                # parent should be the backup at 8:00, not 8:20, and the
                # 'data_timestamp' of this new backup will be 8:10.
                latest_backup = max(
                    backups.objects,
                    key=lambda x: x['data_timestamp']
                    if (not snapshot or (snapshot and x['data_timestamp'] <
                                         snapshot['created_at'])) else
                    datetime(1, 1, 1, 1, 1, 1, tzinfo=timezone('UTC')))
            else:
                QUOTAS.rollback(context, reservations)
                msg = _('No backups available to do an incremental backup.')
                raise exception.InvalidBackup(reason=msg)

        parent_id = None
        parent = None

        if latest_backup:
            parent = latest_backup
            parent_id = latest_backup.id
            if latest_backup['status'] != fields.BackupStatus.AVAILABLE:
                QUOTAS.rollback(context, reservations)
                msg = _('The parent backup must be available for '
                        'incremental backup.')
                raise exception.InvalidBackup(reason=msg)

        data_timestamp = None
        if snapshot_id:
            snapshot = objects.Snapshot.get_by_id(context, snapshot_id)
            data_timestamp = snapshot.created_at
            self.db.snapshot_update(
                context, snapshot_id,
                {'status': fields.SnapshotStatus.BACKING_UP})
        else:
            self.db.volume_update(context, volume_id, {
                'status': 'backing-up',
                'previous_status': previous_status
            })

        backup = None
        try:
            kwargs = {
                'user_id': context.user_id,
                'project_id': context.project_id,
                'display_name': name,
                'display_description': description,
                'volume_id': volume_id,
                'status': fields.BackupStatus.CREATING,
                'container': container,
                'parent_id': parent_id,
                'size': volume['size'],
                'host': host,
                'availability_zone': availability_zone,
                'snapshot_id': snapshot_id,
                'data_timestamp': data_timestamp,
                'parent': parent,
                'metadata': metadata or {}
            }
            backup = objects.Backup(context=context, **kwargs)
            backup.create()
            if not snapshot_id:
                backup.data_timestamp = backup.created_at
                backup.save()
            QUOTAS.commit(context, reservations)
        except Exception:
            with excutils.save_and_reraise_exception():
                try:
                    if backup and 'id' in backup:
                        backup.destroy()
                finally:
                    QUOTAS.rollback(context, reservations)

        # TODO(DuncanT): In future, when we have a generic local attach,
        #                this can go via the scheduler, which enables
        #                better load balancing and isolation of services
        self.backup_rpcapi.create_backup(context, backup)

        return backup
Ejemplo n.º 21
0
    def delete_snapshot(self, snapshot):
        """Delete a snapshot.

        If volume status is 'available', delete snapshot here in Cinder
        using qemu-img.

        If volume status is 'in-use', calculate what qcow2 files need to
        merge, and call to Nova to perform this operation.

        """

        LOG.debug(_('deleting snapshot %s') % snapshot['id'])

        volume_status = snapshot['volume']['status']
        if volume_status not in ['available', 'in-use']:
            msg = _('Volume status must be "available" or "in-use".')
            raise exception.InvalidVolume(msg)

        # Determine the true snapshot file for this snapshot
        #  based on the .info file
        info_path = self._local_path_volume(snapshot['volume']) + '.info'
        snap_info = self._read_info_file(info_path)
        snapshot_file = snap_info[snapshot['id']]

        LOG.debug(_('snapshot_file for this snap is %s') % snapshot_file)

        snapshot_path = '%s/%s' % (self._local_volume_dir(snapshot['volume']),
                                   snapshot_file)

        if not os.path.exists(snapshot_path):
            msg = _('Snapshot file at %s does not exist.') % snapshot_path
            raise exception.InvalidSnapshot(msg)

        vol_path = self._local_volume_dir(snapshot['volume'])

        # Find what file has this as its backing file
        active_file = self.get_active_image_from_info(snapshot['volume'])
        active_file_path = '%s/%s' % (vol_path, active_file)

        if volume_status == 'in-use':
            # Online delete
            context = snapshot['context']

            base_file = self._get_backing_file_for_path(snapshot_path)
            if base_file is None:
                # There should always be at least the original volume
                # file as base.
                msg = _('No base file found for %s.') % snapshot_path
                raise exception.GlusterfsException(msg)
            base_id = None
            info_path = self._local_path_volume(snapshot['volume']) + '.info'
            snap_info = self._read_info_file(info_path)
            for key, value in snap_info.iteritems():
                if value == base_file and key != 'active':
                    base_id = key
                    break
            if base_id is None:
                # This means we are deleting the oldest snapshot
                msg = _('No %(base_id)s found for %(file)s') % {
                    'base_id': 'base_id',
                    'file': snapshot_file}
                LOG.debug(msg)

            online_delete_info = {
                'active_file': active_file,
                'snapshot_file': snapshot_file,
                'base_file': base_file,
                'base_id': base_id
            }

            return self._delete_snapshot_online(context,
                                                snapshot,
                                                online_delete_info)

        if snapshot_file == active_file:
            # Need to merge snapshot_file into its backing file
            # There is no top file
            #      T0       |        T1         |
            #     base      |   snapshot_file   | None
            # (guaranteed to|  (being deleted)  |
            #    exist)     |                   |

            base_file = self._get_backing_file_for_path(snapshot_path)
            snapshot_file_path = '%s/%s' % (vol_path, snapshot_file)

            self._qemu_img_commit(snapshot_file_path)
            self._execute('rm', '-f', snapshot_file_path, run_as_root=True)

            # Remove snapshot_file from info
            info_path = self._local_path_volume(snapshot['volume']) + '.info'
            snap_info = self._read_info_file(info_path)

            del(snap_info[snapshot['id']])
            # Active file has changed
            snap_info['active'] = base_file
            self._write_info_file(info_path, snap_info)
        else:
            #    T0         |      T1        |     T2         |       T3
            #    base       |  snapshot_file |  higher_file   |  highest_file
            #(guaranteed to | (being deleted)|(guaranteed to  |  (may exist,
            #  exist, not   |                | exist, being   |needs ptr update
            #  used here)   |                | committed down)|     if so)

            backing_chain = self._get_backing_chain_for_path(active_file_path)
            # This file is guaranteed to exist since we aren't operating on
            # the active file.
            higher_file = next((os.path.basename(f['filename'])
                                for f in backing_chain
                                if f.get('backing-filename', '') ==
                                snapshot_file),
                               None)
            if higher_file is None:
                msg = _('No file found with %s as backing file.') %\
                    snapshot_file
                raise exception.GlusterfsException(msg)

            snap_info = self._read_info_file(info_path)
            higher_id = next((i for i in snap_info
                              if snap_info[i] == higher_file
                              and i != 'active'),
                             None)
            if higher_id is None:
                msg = _('No snap found with %s as backing file.') %\
                    higher_file
                raise exception.GlusterfsException(msg)

            # Is there a file depending on higher_file?
            highest_file = next((os.path.basename(f['filename'])
                                for f in backing_chain
                                if f.get('backing-filename', '') ==
                                higher_file),
                                None)
            if highest_file is None:
                msg = _('No file depends on %s.') % higher_file
                LOG.debug(msg)

            # Committing higher_file into snapshot_file
            # And update pointer in highest_file
            higher_file_path = '%s/%s' % (vol_path, higher_file)
            self._qemu_img_commit(higher_file_path)
            if highest_file is not None:
                highest_file_path = '%s/%s' % (vol_path, highest_file)
                snapshot_file_fmt = self._get_file_format_for_path(
                    '%s/%s' % (vol_path, snapshot_file))

                backing_fmt = ('-F', snapshot_file_fmt)
                self._execute('qemu-img', 'rebase', '-u',
                              '-b', snapshot_file,
                              highest_file_path, *backing_fmt,
                              run_as_root=True)
            self._execute('rm', '-f', higher_file_path, run_as_root=True)

            # Remove snapshot_file from info
            info_path = self._local_path_volume(snapshot['volume']) + '.info'
            snap_info = self._read_info_file(info_path)
            del(snap_info[snapshot['id']])
            snap_info[higher_id] = snapshot_file
            if higher_file == active_file:
                if highest_file is not None:
                    msg = _('Check condition failed: '
                            '%s expected to be None.') % 'highest_file'
                    raise exception.GlusterfsException(msg)
                # Active file has changed
                snap_info['active'] = snapshot_file
            self._write_info_file(info_path, snap_info)