Ejemplo n.º 1
0
    def _validate_remove_volumes(self, volumes, remove_volumes_list, group):
        # Validate volumes in remove_volumes.
        remove_volumes_new = ""
        for volume in volumes:
            if volume['id'] in remove_volumes_list:
                if volume['status'] not in VALID_REMOVE_VOL_FROM_CG_STATUS:
                    msg = (_("Cannot remove volume %(volume_id)s from "
                             "consistency group %(group_id)s because volume "
                             "is in an invalid state: %(status)s. Valid "
                             "states are: %(valid)s.") % {
                                 'volume_id': volume['id'],
                                 'group_id': group.id,
                                 'status': volume['status'],
                                 'valid': VALID_REMOVE_VOL_FROM_CG_STATUS
                             })
                    raise exception.InvalidVolume(reason=msg)
                # Volume currently in CG. It will be removed from CG.
                if remove_volumes_new:
                    remove_volumes_new += ","
                remove_volumes_new += volume['id']

        for rem_vol in remove_volumes_list:
            if rem_vol not in remove_volumes_new:
                msg = (_("Cannot remove volume %(volume_id)s from "
                         "consistency group %(group_id)s because it "
                         "is not in the group.") % {
                             'volume_id': rem_vol,
                             'group_id': group.id
                         })
                raise exception.InvalidVolume(reason=msg)

        return remove_volumes_new
Ejemplo n.º 2
0
    def _prepare_backup(self, backup):
        """Prepare the backup process and return the backup metadata."""
        volume = self.db.volume_get(self.context, backup.volume_id)

        if volume['size'] <= 0:
            err = _('volume size %d is invalid.') % volume['size']
            raise exception.InvalidVolume(reason=err)

        container = self._create_container(self.context, backup)

        object_prefix = self._generate_object_name_prefix(backup)
        backup.service_metadata = object_prefix
        backup.save()

        volume_size_bytes = volume['size'] * units.Gi
        availability_zone = self.az
        LOG.debug('starting backup of volume: %(volume_id)s,'
                  ' volume size: %(volume_size_bytes)d, object names'
                  ' prefix %(object_prefix)s, availability zone:'
                  ' %(availability_zone)s',
                  {
                      'volume_id': backup.volume_id,
                      'volume_size_bytes': volume_size_bytes,
                      'object_prefix': object_prefix,
                      'availability_zone': availability_zone,
                  })
        object_meta = {'id': 1, 'list': [], 'prefix': object_prefix,
                       'volume_meta': None}
        object_sha256 = {'id': 1, 'sha256s': [], 'prefix': object_prefix}
        extra_metadata = self.get_extra_metadata(backup, volume)
        if extra_metadata is not None:
            object_meta['extra_metadata'] = extra_metadata

        return (object_meta, object_sha256, extra_metadata, container,
                volume_size_bytes)
Ejemplo n.º 3
0
    def create(self, context, volume_id, display_name):
        """Creates an entry in the transfers table."""
        volume_api.check_policy(context, 'create_transfer')
        LOG.info(_LI("Generating transfer record for volume %s"), volume_id)
        volume_ref = self.db.volume_get(context, volume_id)
        if volume_ref['status'] != "available":
            raise exception.InvalidVolume(reason=_("status must be available"))

        volume_utils.notify_about_volume_usage(context, volume_ref,
                                               "transfer.create.start")
        # The salt is just a short random string.
        salt = self._get_random_string(CONF.volume_transfer_salt_length)
        auth_key = self._get_random_string(CONF.volume_transfer_key_length)
        crypt_hash = self._get_crypt_hash(salt, auth_key)

        # TODO(ollie): Transfer expiry needs to be implemented.
        transfer_rec = {
            'volume_id': volume_id,
            'display_name': display_name,
            'salt': salt,
            'crypt_hash': crypt_hash,
            'expires_at': None
        }

        try:
            transfer = self.db.transfer_create(context, transfer_rec)
        except Exception:
            LOG.error(_LE("Failed to create transfer record "
                          "for %s"), volume_id)
            raise
        volume_utils.notify_about_volume_usage(context, volume_ref,
                                               "transfer.create.end")
        return {
            'id': transfer['id'],
            'volume_id': transfer['volume_id'],
            'display_name': transfer['display_name'],
            'auth_key': auth_key,
            'created_at': transfer['created_at']
        }
Ejemplo n.º 4
0
    def _validate_add_volumes(self, context, volumes, add_volumes_list, group):
        add_volumes_new = ""
        for volume in volumes:
            if volume['id'] in add_volumes_list:
                # Volume already in CG. Remove from add_volumes.
                add_volumes_list.remove(volume['id'])

        for add_vol in add_volumes_list:
            try:
                add_vol_ref = self.db.volume_get(context, add_vol)
            except exception.VolumeNotFound:
                msg = (_("Cannot add volume %(volume_id)s to consistency "
                         "group %(group_id)s because volume cannot be "
                         "found.") % {
                             'volume_id': add_vol,
                             'group_id': group.id
                         })
                raise exception.InvalidVolume(reason=msg)
            orig_group = add_vol_ref.get('consistencygroup_id', None)
            if orig_group:
                # If volume to be added is already in the group to be updated,
                # it should have been removed from the add_volumes_list in the
                # beginning of this function. If we are here, it means it is
                # in a different group.
                msg = (_("Cannot add volume %(volume_id)s to consistency "
                         "group %(group_id)s because it is already in "
                         "consistency group %(orig_group)s.") % {
                             'volume_id': add_vol_ref['id'],
                             'group_id': group.id,
                             'orig_group': orig_group
                         })
                raise exception.InvalidVolume(reason=msg)
            if add_vol_ref:
                add_vol_type_id = add_vol_ref.get('volume_type_id', None)
                if not add_vol_type_id:
                    msg = (_("Cannot add volume %(volume_id)s to consistency "
                             "group %(group_id)s because it has no volume "
                             "type.") % {
                                 'volume_id': add_vol_ref['id'],
                                 'group_id': group.id
                             })
                    raise exception.InvalidVolume(reason=msg)
                if add_vol_type_id not in group.volume_type_id:
                    msg = (_("Cannot add volume %(volume_id)s to consistency "
                             "group %(group_id)s because volume type "
                             "%(volume_type)s is not supported by the "
                             "group.") % {
                                 'volume_id': add_vol_ref['id'],
                                 'group_id': group.id,
                                 'volume_type': add_vol_type_id
                             })
                    raise exception.InvalidVolume(reason=msg)
                if (add_vol_ref['status'] not in VALID_ADD_VOL_TO_CG_STATUS):
                    msg = (_("Cannot add volume %(volume_id)s to consistency "
                             "group %(group_id)s because volume is in an "
                             "invalid state: %(status)s. Valid states are: "
                             "%(valid)s.") % {
                                 'volume_id': add_vol_ref['id'],
                                 'group_id': group.id,
                                 'status': add_vol_ref['status'],
                                 'valid': VALID_ADD_VOL_TO_CG_STATUS
                             })
                    raise exception.InvalidVolume(reason=msg)

                # group.host and add_vol_ref['host'] are in this format:
                # 'host@backend#pool'. Extract host (host@backend) before
                # doing comparison.
                vol_host = vol_utils.extract_host(add_vol_ref['host'])
                group_host = vol_utils.extract_host(group.host)
                if group_host != vol_host:
                    raise exception.InvalidVolume(
                        reason=_("Volume is not local to this node."))

                # Volume exists. It will be added to CG.
                if add_volumes_new:
                    add_volumes_new += ","
                add_volumes_new += add_vol_ref['id']

            else:
                msg = (_("Cannot add volume %(volume_id)s to consistency "
                         "group %(group_id)s because volume does not exist.") %
                       {
                           'volume_id': add_vol_ref['id'],
                           'group_id': group.id
                       })
                raise exception.InvalidVolume(reason=msg)

        return add_volumes_new
Ejemplo n.º 5
0
    def update(self, context, group, name, description, add_volumes,
               remove_volumes):
        """Update consistency group."""
        if group.status != c_fields.ConsistencyGroupStatus.AVAILABLE:
            msg = _("Consistency group status must be available, "
                    "but current status is: %s.") % group.status
            raise exception.InvalidConsistencyGroup(reason=msg)

        add_volumes_list = []
        remove_volumes_list = []
        if add_volumes:
            add_volumes = add_volumes.strip(',')
            add_volumes_list = add_volumes.split(',')
        if remove_volumes:
            remove_volumes = remove_volumes.strip(',')
            remove_volumes_list = remove_volumes.split(',')

        invalid_uuids = []
        for uuid in add_volumes_list:
            if uuid in remove_volumes_list:
                invalid_uuids.append(uuid)
        if invalid_uuids:
            msg = _("UUIDs %s are in both add and remove volume "
                    "list.") % invalid_uuids
            raise exception.InvalidVolume(reason=msg)

        volumes = self.db.volume_get_all_by_group(context, group.id)

        # Validate name.
        if not name or name == group.name:
            name = None

        # Validate description.
        if not description or description == group.description:
            description = None

        # Validate volumes in add_volumes and remove_volumes.
        add_volumes_new = ""
        remove_volumes_new = ""
        if add_volumes_list:
            add_volumes_new = self._validate_add_volumes(
                context, volumes, add_volumes_list, group)
        if remove_volumes_list:
            remove_volumes_new = self._validate_remove_volumes(
                volumes, remove_volumes_list, group)

        if (not name and not description and not add_volumes_new
                and not remove_volumes_new):
            msg = (_("Cannot update consistency group %(group_id)s "
                     "because no valid name, description, add_volumes, "
                     "or remove_volumes were provided.") % {
                         'group_id': group.id
                     })
            raise exception.InvalidConsistencyGroup(reason=msg)

        fields = {'updated_at': timeutils.utcnow()}

        # Update name and description in db now. No need to
        # to send them over through an RPC call.
        if name:
            fields['name'] = name
        if description:
            fields['description'] = description
        if not add_volumes_new and not remove_volumes_new:
            # Only update name or description. Set status to available.
            fields['status'] = 'available'
        else:
            fields['status'] = 'updating'

        group.update(fields)
        group.save()

        # Do an RPC call only if the update request includes
        # adding/removing volumes. add_volumes_new and remove_volumes_new
        # are strings of volume UUIDs separated by commas with no spaces
        # in between.
        if add_volumes_new or remove_volumes_new:
            self.jacket_rpcapi.update_consistencygroup(
                context,
                group,
                add_volumes=add_volumes_new,
                remove_volumes=remove_volumes_new)
Ejemplo n.º 6
0
    def accept(self, context, transfer_id, auth_key):
        """Accept a volume that has been offered for transfer."""
        # We must use an elevated context to see the volume that is still
        # owned by the donor.
        volume_api.check_policy(context, 'accept_transfer')
        transfer = self.db.transfer_get(context.elevated(), transfer_id)

        crypt_hash = self._get_crypt_hash(transfer['salt'], auth_key)
        if crypt_hash != transfer['crypt_hash']:
            msg = (_("Attempt to transfer %s with invalid auth key.") %
                   transfer_id)
            LOG.error(msg)
            raise exception.InvalidAuthKey(reason=msg)

        volume_id = transfer['volume_id']
        vol_ref = self.db.volume_get(context.elevated(), volume_id)
        if vol_ref['consistencygroup_id']:
            msg = _("Volume %s must not be part of a consistency "
                    "group.") % vol_ref['id']
            LOG.error(msg)
            raise exception.InvalidVolume(reason=msg)

        volume_utils.notify_about_volume_usage(context, vol_ref,
                                               "transfer.accept.start")

        try:
            reserve_opts = {'volumes': 1, 'gigabytes': vol_ref.size}
            QUOTAS.add_volume_type_opts(context, reserve_opts,
                                        vol_ref.volume_type_id)
            reservations = QUOTAS.reserve(context, **reserve_opts)
        except exception.OverQuota as e:
            overs = e.kwargs['overs']
            usages = e.kwargs['usages']
            quotas = e.kwargs['quotas']

            def _consumed(name):
                return (usages[name]['reserved'] + usages[name]['in_use'])

            for over in overs:
                if 'gigabytes' in over:
                    msg = _LW("Quota exceeded for %(s_pid)s, tried to create "
                              "%(s_size)sG volume (%(d_consumed)dG of "
                              "%(d_quota)dG already consumed)")
                    LOG.warning(
                        msg, {
                            's_pid': context.project_id,
                            's_size': vol_ref['size'],
                            'd_consumed': _consumed(over),
                            'd_quota': quotas[over]
                        })
                    raise exception.VolumeSizeExceedsAvailableQuota(
                        requested=vol_ref['size'],
                        consumed=_consumed(over),
                        quota=quotas[over])
                elif 'volumes' in over:
                    msg = _LW("Quota exceeded for %(s_pid)s, tried to create "
                              "volume (%(d_consumed)d volumes "
                              "already consumed)")
                    LOG.warning(msg, {
                        's_pid': context.project_id,
                        'd_consumed': _consumed(over)
                    })
                    raise exception.VolumeLimitExceeded(allowed=quotas[over],
                                                        name=over)

        try:
            donor_id = vol_ref['project_id']
            reserve_opts = {'volumes': -1, 'gigabytes': -vol_ref.size}
            QUOTAS.add_volume_type_opts(context, reserve_opts,
                                        vol_ref.volume_type_id)
            donor_reservations = QUOTAS.reserve(context.elevated(),
                                                project_id=donor_id,
                                                **reserve_opts)
        except Exception:
            donor_reservations = None
            LOG.exception(
                _LE("Failed to update quota donating volume"
                    " transfer id %s"), transfer_id)

        try:
            # Transfer ownership of the volume now, must use an elevated
            # context.
            self.volume_api.accept_transfer(context, vol_ref, context.user_id,
                                            context.project_id)
            self.db.transfer_accept(context.elevated(), transfer_id,
                                    context.user_id, context.project_id)
            QUOTAS.commit(context, reservations)
            if donor_reservations:
                QUOTAS.commit(context, donor_reservations, project_id=donor_id)
            LOG.info(_LI("Volume %s has been transferred."), volume_id)
        except Exception:
            with excutils.save_and_reraise_exception():
                QUOTAS.rollback(context, reservations)
                if donor_reservations:
                    QUOTAS.rollback(context,
                                    donor_reservations,
                                    project_id=donor_id)

        vol_ref = self.db.volume_get(context, volume_id)
        volume_utils.notify_about_volume_usage(context, vol_ref,
                                               "transfer.accept.end")
        return {
            'id': transfer_id,
            'display_name': transfer['display_name'],
            'volume_id': vol_ref['id']
        }
Ejemplo n.º 7
0
    def _get_iscsi_properties(self, volume, multipath=False):
        """Gets iscsi configuration

        We ideally get saved information in the volume entity, but fall back
        to discovery if need be. Discovery may be completely removed in the
        future.

        The properties are:

        :target_discovered:    boolean indicating whether discovery was used

        :target_iqn:    the IQN of the iSCSI target

        :target_portal:    the portal of the iSCSI target

        :target_lun:    the lun of the iSCSI target

        :volume_id:    the uuid of the volume

        :auth_method:, :auth_username:, :auth_password:

            the authentication details. Right now, either auth_method is not
            present meaning no authentication, or auth_method == `CHAP`
            meaning use CHAP with the specified credentials.

        :discard:    boolean indicating if discard is supported

        In some of drivers that support multiple connections (for multipath
        and for single path with failover on connection failure), it returns
        :target_iqns, :target_portals, :target_luns, which contain lists of
        multiple values. The main portal information is also returned in
        :target_iqn, :target_portal, :target_lun for backward compatibility.

        Note that some of drivers don't return :target_portals even if they
        support multipath. Then the connector should use sendtargets discovery
        to find the other portals if it supports multipath.
        """

        properties = {}

        location = volume['provider_location']

        if location:
            # provider_location is the same format as iSCSI discovery output
            properties['target_discovered'] = False
        else:
            location = self._do_iscsi_discovery(volume)

            if not location:
                msg = (_("Could not find iSCSI export for volume %s") %
                        (volume['name']))
                raise exception.InvalidVolume(reason=msg)

            LOG.debug(("ISCSI Discovery: Found %s") % (location))
            properties['target_discovered'] = True

        results = location.split(" ")
        portals = results[0].split(",")[0].split(";")
        iqn = results[1]
        nr_portals = len(portals)
        try:
            lun = int(results[2])
        except (IndexError, ValueError):
            # NOTE(jdg): The following is carried over from the existing
            # code.  The trick here is that different targets use different
            # default lun numbers, the base driver with tgtadm uses 1
            # others like LIO use 0.
            if (self.configuration.volume_driver ==
                    'jacket.storage.volume.drivers.lvm.ThinLVMVolumeDriver' and
                    self.configuration.iscsi_helper == 'tgtadm'):
                lun = 1
            else:
                lun = 0

        if nr_portals > 1 or multipath:
            properties['target_portals'] = portals
            properties['target_iqns'] = [iqn] * nr_portals
            properties['target_luns'] = [lun] * nr_portals
        properties['target_portal'] = portals[0]
        properties['target_iqn'] = iqn
        properties['target_lun'] = lun

        properties['volume_id'] = volume['id']

        auth = volume['provider_auth']
        if auth:
            (auth_method, auth_username, auth_secret) = auth.split()

            properties['auth_method'] = auth_method
            properties['auth_username'] = auth_username
            properties['auth_password'] = auth_secret

        geometry = volume.get('provider_geometry', None)
        if geometry:
            (physical_block_size, logical_block_size) = geometry.split()
            properties['physical_block_size'] = physical_block_size
            properties['logical_block_size'] = logical_block_size

        encryption_key_id = volume.get('encryption_key_id', None)
        properties['encrypted'] = encryption_key_id is not None

        return properties
Ejemplo n.º 8
0
    def restore_backup(self, context, backup, volume_id):
        """Restore volume backups from configured backup service."""
        LOG.info(
            _LI('Restore backup started, backup: %(backup_id)s '
                'volume: %(volume_id)s.'), {
                    'backup_id': backup.id,
                    'volume_id': volume_id
                })

        volume = storage.Volume.get_by_id(context, volume_id)
        self._notify_about_backup_usage(context, backup, "restore.start")

        backup.host = self.host
        backup.save()

        expected_status = 'restoring-backup'
        actual_status = volume['status']
        if actual_status != expected_status:
            err = (_('Restore backup aborted, expected volume status '
                     '%(expected_status)s but got %(actual_status)s.') % {
                         'expected_status': expected_status,
                         'actual_status': actual_status
                     })
            backup.status = fields.BackupStatus.AVAILABLE
            backup.save()
            raise exception.InvalidVolume(reason=err)

        expected_status = fields.BackupStatus.RESTORING
        actual_status = backup['status']
        if actual_status != expected_status:
            err = (_('Restore backup aborted: expected backup status '
                     '%(expected_status)s but got %(actual_status)s.') % {
                         'expected_status': expected_status,
                         'actual_status': actual_status
                     })
            self._update_backup_error(backup, context, err)
            self.db.volume_update(context, volume_id, {'status': 'error'})
            raise exception.InvalidBackup(reason=err)

        if volume['size'] > backup['size']:
            LOG.info(
                _LI('Volume: %(vol_id)s, size: %(vol_size)d is '
                    'larger than backup: %(backup_id)s, '
                    'size: %(backup_size)d, continuing with restore.'), {
                        'vol_id': volume['id'],
                        'vol_size': volume['size'],
                        'backup_id': backup['id'],
                        'backup_size': backup['size']
                    })

        backup_service = self._map_service_to_driver(backup['service'])
        configured_service = self.driver_name
        if backup_service != configured_service:
            err = _('Restore backup aborted, the backup service currently'
                    ' configured [%(configured_service)s] is not the'
                    ' backup service that was used to create this'
                    ' backup [%(backup_service)s].') % {
                        'configured_service': configured_service,
                        'backup_service': backup_service,
                    }
            backup.status = fields.BackupStatus.AVAILABLE
            backup.save()
            self.db.volume_update(context, volume_id, {'status': 'error'})
            raise exception.InvalidBackup(reason=err)

        try:
            self._run_restore(context, backup, volume)
        except Exception:
            with excutils.save_and_reraise_exception():
                self.db.volume_update(context, volume_id,
                                      {'status': 'error_restoring'})
                backup.status = fields.BackupStatus.AVAILABLE
                backup.save()

        self.db.volume_update(context, volume_id, {'status': 'available'})
        backup.status = fields.BackupStatus.AVAILABLE
        backup.save()
        LOG.info(
            _LI('Restore backup finished, backup %(backup_id)s restored'
                ' to volume %(volume_id)s.'), {
                    'backup_id': backup.id,
                    'volume_id': volume_id
                })
        self._notify_about_backup_usage(context, backup, "restore.end")
Ejemplo n.º 9
0
    def create_backup(self, context, backup):
        """Create volume backups using configured backup service."""
        volume_id = backup.volume_id
        volume = storage.Volume.get_by_id(context, volume_id)
        previous_status = volume.get('previous_status', None)
        LOG.info(
            _LI('Create backup started, backup: %(backup_id)s '
                'volume: %(volume_id)s.'), {
                    'backup_id': backup.id,
                    'volume_id': volume_id
                })

        self._notify_about_backup_usage(context, backup, "create.start")

        backup.host = self.host
        backup.service = self.driver_name
        backup.availability_zone = self.az
        backup.save()

        expected_status = 'backing-up'
        actual_status = volume['status']
        if actual_status != expected_status:
            err = _('Create backup aborted, expected volume status '
                    '%(expected_status)s but got %(actual_status)s.') % {
                        'expected_status': expected_status,
                        'actual_status': actual_status,
                    }
            self._update_backup_error(backup, context, err)
            raise exception.InvalidVolume(reason=err)

        expected_status = fields.BackupStatus.CREATING
        actual_status = backup.status
        if actual_status != expected_status:
            err = _('Create backup aborted, expected backup status '
                    '%(expected_status)s but got %(actual_status)s.') % {
                        'expected_status': expected_status,
                        'actual_status': actual_status,
                    }
            self._update_backup_error(backup, context, err)
            backup.save()
            raise exception.InvalidBackup(reason=err)

        try:
            self._run_backup(context, backup, volume)
        except Exception as err:
            with excutils.save_and_reraise_exception():
                self.db.volume_update(
                    context, volume_id, {
                        'status': previous_status,
                        'previous_status': 'error_backing-up'
                    })
                self._update_backup_error(backup, context, six.text_type(err))

        # Restore the original status.
        self.db.volume_update(context, volume_id, {
            'status': previous_status,
            'previous_status': 'backing-up'
        })
        backup.status = fields.BackupStatus.AVAILABLE
        backup.size = volume['size']
        backup.save()

        # Handle the num_dependent_backups of parent backup when child backup
        # has created successfully.
        if backup.parent_id:
            parent_backup = storage.Backup.get_by_id(context, backup.parent_id)
            parent_backup.num_dependent_backups += 1
            parent_backup.save()
        LOG.info(_LI('Create backup finished. backup: %s.'), backup.id)
        self._notify_about_backup_usage(context, backup, "create.end")
Ejemplo n.º 10
0
    def restore(self, context, backup_id, volume_id=None, name=None):
        """Make the RPC call to restore a volume backup."""
        check_policy(context, 'restore')
        backup = self.get(context, backup_id)
        if backup['status'] != fields.BackupStatus.AVAILABLE:
            msg = _('Backup status must be available')
            raise exception.InvalidBackup(reason=msg)

        size = backup['size']
        if size is None:
            msg = _('Backup to be restored has invalid size')
            raise exception.InvalidBackup(reason=msg)

        # Create a volume if none specified. If a volume is specified check
        # it is large enough for the backup
        if volume_id is None:
            if name is None:
                name = 'restore_backup_%s' % backup_id

            description = 'auto-created_from_restore_from_backup'

            LOG.info(_LI("Creating volume of %(size)s GB for restore of "
                         "backup %(backup_id)s."), {
                             'size': size,
                             'backup_id': backup_id
                         },
                     context=context)
            volume = self.volume_api.create(context, size, name, description)
            volume_id = volume['id']

            while True:
                volume = self.volume_api.get(context, volume_id)
                if volume['status'] != 'creating':
                    break
                greenthread.sleep(1)
        else:
            volume = self.volume_api.get(context, volume_id)

        if volume['status'] != "available":
            msg = _('Volume to be restored to must be available')
            raise exception.InvalidVolume(reason=msg)

        LOG.debug('Checking backup size %(bs)s against volume size %(vs)s', {
            'bs': size,
            'vs': volume['size']
        })
        if size > volume['size']:
            msg = (_('volume size %(volume_size)d is too small to restore '
                     'backup of size %(size)d.') % {
                         'volume_size': volume['size'],
                         'size': size
                     })
            raise exception.InvalidVolume(reason=msg)

        LOG.info(_LI("Overwriting volume %(volume_id)s with restore of "
                     "backup %(backup_id)s"), {
                         'volume_id': volume_id,
                         'backup_id': backup_id
                     },
                 context=context)

        # Setting the status here rather than setting at start and unrolling
        # for each error condition, it should be a very small window
        backup.host = self._get_available_backup_service_host(
            backup.host, backup.availability_zone, volume_host=volume.host)
        backup.status = fields.BackupStatus.RESTORING
        backup.restore_volume_id = volume.id
        backup.save()
        self.db.volume_update(context, volume_id,
                              {'status': 'restoring-backup'})

        self.backup_rpcapi.restore_backup(context, backup.host, backup,
                                          volume_id)

        d = {
            'backup_id': backup_id,
            'volume_id': volume_id,
            'volume_name': volume['display_name'],
        }

        return d
Ejemplo n.º 11
0
    def create(self,
               context,
               name,
               description,
               volume_id,
               container,
               incremental=False,
               availability_zone=None,
               force=False,
               snapshot_id=None):
        """Make the RPC call to create a volume backup."""
        check_policy(context, 'create')
        volume = self.volume_api.get(context, volume_id)
        snapshot = None
        if snapshot_id:
            snapshot = self.volume_api.get_snapshot(context, snapshot_id)

            if volume_id != snapshot.volume_id:
                msg = (_('Volume %(vol1)s does not match with '
                         'snapshot.volume_id %(vol2)s.') % {
                             'vol1': volume_id,
                             'vol2': snapshot.volume_id
                         })
                raise exception.InvalidVolume(reason=msg)
        if volume['status'] not in ["available", "in-use"]:
            msg = (_('Volume to be backed up must be available '
                     'or in-use, but the current status is "%s".') %
                   volume['status'])
            raise exception.InvalidVolume(reason=msg)
        elif volume['status'] in ["in-use"] and not snapshot_id and not force:
            msg = _('Backing up an in-use volume must use ' 'the force flag.')
            raise exception.InvalidVolume(reason=msg)
        elif snapshot_id and snapshot['status'] not in ["available"]:
            msg = (_('Snapshot to be backed up must be available, '
                     'but the current status is "%s".') % snapshot['status'])
            raise exception.InvalidSnapshot(reason=msg)

        previous_status = volume['status']
        host = self._get_available_backup_service_host(
            None, volume.availability_zone,
            volume_utils.extract_host(volume.host, 'host'))

        # Reserve a quota before setting volume status and backup status
        try:
            reserve_opts = {'backups': 1, 'backup_gigabytes': volume['size']}
            reservations = QUOTAS.reserve(context, **reserve_opts)
        except exception.OverQuota as e:
            overs = e.kwargs['overs']
            usages = e.kwargs['usages']
            quotas = e.kwargs['quotas']

            def _consumed(resource_name):
                return (usages[resource_name]['reserved'] +
                        usages[resource_name]['in_use'])

            for over in overs:
                if 'gigabytes' in over:
                    msg = _LW("Quota exceeded for %(s_pid)s, tried to create "
                              "%(s_size)sG backup (%(d_consumed)dG of "
                              "%(d_quota)dG already consumed)")
                    LOG.warning(
                        msg, {
                            's_pid': context.project_id,
                            's_size': volume['size'],
                            'd_consumed': _consumed(over),
                            'd_quota': quotas[over]
                        })
                    raise exception.VolumeBackupSizeExceedsAvailableQuota(
                        requested=volume['size'],
                        consumed=_consumed('backup_gigabytes'),
                        quota=quotas['backup_gigabytes'])
                elif 'backups' in over:
                    msg = _LW("Quota exceeded for %(s_pid)s, tried to create "
                              "backups (%(d_consumed)d backups "
                              "already consumed)")

                    LOG.warning(msg, {
                        's_pid': context.project_id,
                        'd_consumed': _consumed(over)
                    })
                    raise exception.BackupLimitExceeded(allowed=quotas[over])

        # Find the latest backup and use it as the parent backup to do an
        # incremental backup.
        latest_backup = None
        if incremental:
            backups = storage.BackupList.get_all_by_volume(
                context.elevated(), volume_id)
            if backups.objects:
                # NOTE(xyang): The 'data_timestamp' field records the time
                # when the data on the volume was first saved. If it is
                # a backup from volume, 'data_timestamp' will be the same
                # as 'created_at' for a backup. If it is a backup from a
                # snapshot, 'data_timestamp' will be the same as
                # 'created_at' for a snapshot.
                # If not backing up from snapshot, the backup with the latest
                # 'data_timestamp' will be the parent; If backing up from
                # snapshot, the backup with the latest 'data_timestamp' will
                # be chosen only if 'data_timestamp' is earlier than the
                # 'created_at' timestamp of the snapshot; Otherwise, the
                # backup will not be chosen as the parent.
                # For example, a volume has a backup taken at 8:00, then
                # a snapshot taken at 8:10, and then a backup at 8:20.
                # When taking an incremental backup of the snapshot, the
                # parent should be the backup at 8:00, not 8:20, and the
                # 'data_timestamp' of this new backup will be 8:10.
                latest_backup = max(
                    backups.objects,
                    key=lambda x: x['data_timestamp']
                    if (not snapshot or (snapshot and x['data_timestamp'] <
                                         snapshot['created_at'])) else
                    datetime(1, 1, 1, 1, 1, 1, tzinfo=timezone('UTC')))
            else:
                msg = _('No backups available to do an incremental backup.')
                raise exception.InvalidBackup(reason=msg)

        parent_id = None
        if latest_backup:
            parent_id = latest_backup.id
            if latest_backup['status'] != fields.BackupStatus.AVAILABLE:
                msg = _('The parent backup must be available for '
                        'incremental backup.')
                raise exception.InvalidBackup(reason=msg)

        data_timestamp = None
        if snapshot_id:
            snapshot = storage.Snapshot.get_by_id(context, snapshot_id)
            data_timestamp = snapshot.created_at

        self.db.volume_update(context, volume_id, {
            'status': 'backing-up',
            'previous_status': previous_status
        })

        backup = None
        try:
            kwargs = {
                'user_id': context.user_id,
                'project_id': context.project_id,
                'display_name': name,
                'display_description': description,
                'volume_id': volume_id,
                'status': fields.BackupStatus.CREATING,
                'container': container,
                'parent_id': parent_id,
                'size': volume['size'],
                'host': host,
                'snapshot_id': snapshot_id,
                'data_timestamp': data_timestamp,
            }
            backup = storage.Backup(context=context, **kwargs)
            backup.create()
            if not snapshot_id:
                backup.data_timestamp = backup.created_at
                backup.save()
            QUOTAS.commit(context, reservations)
        except Exception:
            with excutils.save_and_reraise_exception():
                try:
                    if backup and 'id' in backup:
                        backup.destroy()
                finally:
                    QUOTAS.rollback(context, reservations)

        # TODO(DuncanT): In future, when we have a generic local attach,
        #                this can go via the scheduler, which enables
        #                better load balancing and isolation of services
        self.backup_rpcapi.create_backup(context, backup)

        return backup