Example #1
0
    def create(self, context, name, description, volume_id,
               container, incremental=False, availability_zone=None,
               force=False):
        """Make the RPC call to create a volume backup."""
        check_policy(context, 'create')
        volume = self.volume_api.get(context, volume_id)

        if volume['status'] not in ["available", "in-use"]:
            msg = (_('Volume to be backed up must be available '
                     'or in-use, but the current status is "%s".')
                   % volume['status'])
            raise exception.InvalidVolume(reason=msg)
        elif volume['status'] in ["in-use"] and not force:
            msg = _('Backing up an in-use volume must use '
                    'the force flag.')
            raise exception.InvalidVolume(reason=msg)

        previous_status = volume['status']
        volume_host = volume_utils.extract_host(volume['host'], 'host')
        if not self._is_backup_service_enabled(volume, volume_host):
            raise exception.ServiceNotFound(service_id='cinder-backup')

        # do quota reserver before setting volume status and backup status
        try:
            reserve_opts = {'backups': 1,
                            'backup_gigabytes': volume['size']}
            reservations = QUOTAS.reserve(context, **reserve_opts)
        except exception.OverQuota as e:
            overs = e.kwargs['overs']
            usages = e.kwargs['usages']
            quotas = e.kwargs['quotas']

            def _consumed(resource_name):
                return (usages[resource_name]['reserved'] +
                        usages[resource_name]['in_use'])

            for over in overs:
                if 'gigabytes' in over:
                    msg = _LW("Quota exceeded for %(s_pid)s, tried to create "
                              "%(s_size)sG backup (%(d_consumed)dG of "
                              "%(d_quota)dG already consumed)")
                    LOG.warning(msg, {'s_pid': context.project_id,
                                      's_size': volume['size'],
                                      'd_consumed': _consumed(over),
                                      'd_quota': quotas[over]})
                    raise exception.VolumeBackupSizeExceedsAvailableQuota(
                        requested=volume['size'],
                        consumed=_consumed('backup_gigabytes'),
                        quota=quotas['backup_gigabytes'])
                elif 'backups' in over:
                    msg = _LW("Quota exceeded for %(s_pid)s, tried to create "
                              "backups (%(d_consumed)d backups "
                              "already consumed)")

                    LOG.warning(msg, {'s_pid': context.project_id,
                                      'd_consumed': _consumed(over)})
                    raise exception.BackupLimitExceeded(
                        allowed=quotas[over])

        # Find the latest backup of the volume and use it as the parent
        # backup to do an incremental backup.
        latest_backup = None
        if incremental:
            backups = objects.BackupList.get_all_by_volume(context.elevated(),
                                                           volume_id)
            if backups.objects:
                latest_backup = max(backups.objects,
                                    key=lambda x: x['created_at'])
            else:
                msg = _('No backups available to do an incremental backup.')
                raise exception.InvalidBackup(reason=msg)

        parent_id = None
        if latest_backup:
            parent_id = latest_backup.id
            if latest_backup['status'] != "available":
                msg = _('The parent backup must be available for '
                        'incremental backup.')
                raise exception.InvalidBackup(reason=msg)

        self.db.volume_update(context, volume_id,
                              {'status': 'backing-up',
                               'previous_status': previous_status})
        try:
            kwargs = {
                'user_id': context.user_id,
                'project_id': context.project_id,
                'display_name': name,
                'display_description': description,
                'volume_id': volume_id,
                'status': 'creating',
                'container': container,
                'parent_id': parent_id,
                'size': volume['size'],
                'host': volume_host,
            }
            backup = objects.Backup(context=context, **kwargs)
            backup.create()
            QUOTAS.commit(context, reservations)
        except Exception:
            with excutils.save_and_reraise_exception():
                try:
                    backup.destroy()
                finally:
                    QUOTAS.rollback(context, reservations)

        # TODO(DuncanT): In future, when we have a generic local attach,
        #                this can go via the scheduler, which enables
        #                better load balancing and isolation of services
        self.backup_rpcapi.create_backup(context, backup)

        return backup
Example #2
0
File: api.py Project: abusse/cinder
    def create(self,
               context,
               name,
               description,
               volume_id,
               container,
               availability_zone=None):
        """Make the RPC call to create a volume backup."""
        check_policy(context, 'create')
        volume = self.volume_api.get(context, volume_id)
        if volume['status'] != "available":
            msg = _('Volume to be backed up must be available')
            raise exception.InvalidVolume(reason=msg)
        volume_host = volume_utils.extract_host(volume['host'], 'host')
        if not self._is_backup_service_enabled(volume, volume_host):
            raise exception.ServiceNotFound(service_id='cinder-backup')

        # do quota reserver before setting volume status and backup status
        try:
            reserve_opts = {'backups': 1, 'backup_gigabytes': volume['size']}
            reservations = QUOTAS.reserve(context, **reserve_opts)
        except exception.OverQuota as e:
            overs = e.kwargs['overs']
            usages = e.kwargs['usages']
            quotas = e.kwargs['quotas']

            def _consumed(resource_name):
                return (usages[resource_name]['reserved'] +
                        usages[resource_name]['in_use'])

            for over in overs:
                if 'gigabytes' in over:
                    msg = _LW("Quota exceeded for %(s_pid)s, tried to create "
                              "%(s_size)sG backup (%(d_consumed)dG of "
                              "%(d_quota)dG already consumed)")
                    LOG.warn(
                        msg % {
                            's_pid': context.project_id,
                            's_size': volume['size'],
                            'd_consumed': _consumed(over),
                            'd_quota': quotas[over]
                        })
                    raise exception.VolumeBackupSizeExceedsAvailableQuota(
                        requested=volume['size'],
                        consumed=_consumed('backup_gigabytes'),
                        quota=quotas['backup_gigabytes'])
                elif 'backups' in over:
                    msg = _LW("Quota exceeded for %(s_pid)s, tried to create "
                              "backups (%(d_consumed)d backups "
                              "already consumed)")

                    LOG.warn(msg % {
                        's_pid': context.project_id,
                        'd_consumed': _consumed(over)
                    })
                    raise exception.BackupLimitExceeded(allowed=quotas[over])

        self.db.volume_update(context, volume_id, {'status': 'backing-up'})
        options = {
            'user_id': context.user_id,
            'project_id': context.project_id,
            'display_name': name,
            'display_description': description,
            'volume_id': volume_id,
            'status': 'creating',
            'container': container,
            'size': volume['size'],
            'host': volume_host,
        }
        try:
            backup = self.db.backup_create(context, options)
            QUOTAS.commit(context, reservations)
        except Exception:
            with excutils.save_and_reraise_exception():
                try:
                    self.db.backup_destroy(context, backup['id'])
                finally:
                    QUOTAS.rollback(context, reservations)

        # TODO(DuncanT): In future, when we have a generic local attach,
        #                this can go via the scheduler, which enables
        #                better load balancing and isolation of services
        self.backup_rpcapi.create_backup(context, backup['host'], backup['id'],
                                         volume_id)

        return backup
Example #3
0
    def create_instance_backup(self,
                               context,
                               instance_uuid,
                               name,
                               description,
                               volume_ids,
                               container,
                               incremental=False,
                               availability_zone=None,
                               force=True):
        """Make the RPC call to create backup for volume-based instance."""
        # Use the same policy as backup creatation
        check_policy(context, 'create')

        server = nova.API().get_server(context, instance_uuid)
        if server.status not in [
                "ACTIVE", "SHUTOFF", "PAUSED", "SUSPENDED", "SHELVED_OFFLOADED"
        ]:
            msg = (_("Instance %(instance_uuid)s in %(status)s status "
                     "which is not allowed to be backed up.") % {
                         'instance_uuid': instance_uuid,
                         'status': server.status
                     })
            raise exception.InvalidInstanceStatus(reason=msg)

        volumes = [
            self.volume_api.get(context, volume_id) for volume_id in volume_ids
        ]

        for volume in volumes:
            # Verify all volumes are in 'in-use' state
            if volume['status'] != "in-use":
                msg = (_('Volume to be backed up must be in-use '
                         'but the current status is "%s".') % volume['status'])
                raise exception.InvalidVolume(reason=msg)

            # Verify backup service is enabled on host
            volume_host = volume_utils.extract_host(volume['host'], 'host')
            if not self._is_backup_service_enabled(volume, volume_host):
                raise exception.ServiceNotFound(service_id='cinder-backup')

        backups = []
        inst_backup_kwargs = []

        # Add a 32-bit UUID prefix to display_description, in order to
        # distinguish which backups are created at the same time
        desc_prefix = str(uuid.uuid4()).replace('-', '')

        for volume in volumes:
            # Reserve a quota before setting volume status and backup status
            try:
                reserve_opts = {
                    'backups': 1,
                    'backup_gigabytes': volume['size']
                }
                LOG.info(
                    _LI("create_instance_backup "
                        "reserve_opts: %(reserve_opts)s"),
                    {'reserve_opts': reserve_opts})
                reservations = QUOTAS.reserve(context, **reserve_opts)
            except exception.OverQuota as e:
                overs = e.kwargs['overs']
                usages = e.kwargs['usages']
                quotas = e.kwargs['quotas']

                # reset status for the other volumes and
                # remove the related backup
                for backup in backups:
                    self.db.volume_update(context, backup['volume_id'],
                                          {'status': 'in-use'})
                    self.db.backup_update(context, backup['id'],
                                          {'status': 'error'})
                    self.delete(context, backup['id'])

                def _consumed(resource_name):
                    return (usages[resource_name]['reserved'] +
                            usages[resource_name]['in_use'])

                for over in overs:
                    if 'gigabytes' in over:
                        msg = _LW("Quota exceeded for %(s_pid)s, tried to "
                                  "create "
                                  "%(s_size)sG backup "
                                  "(%(d_consumed)dG of "
                                  "%(d_quota)dG already consumed)")
                        LOG.warning(
                            msg, {
                                's_pid': context.project_id,
                                's_size': volume['size'],
                                'd_consumed': _consumed(over),
                                'd_quota': quotas[over]
                            })
                        raise exception.VolumeBackupSizeExceedsAvailableQuota(
                            requested=volume['size'],
                            consumed=_consumed('backup_gigabytes'),
                            quota=quotas['backup_gigabytes'])
                    elif 'backups' in over:
                        msg = _LW("Quota exceeded for %(s_pid)s, tried to "
                                  "create backups (%(d_consumed)d backups "
                                  "already consumed)")
                        LOG.warning(
                            msg, {
                                's_pid': context.project_id,
                                'd_consumed': _consumed(over)
                            })
                        raise exception.BackupLimitExceeded(
                            allowed=quotas[over])

            # Since Ceph doesn't use parent_id to determine an incremental
            # backup, comment this part.
            #
            # Find the latest backup of the volume and use it as the parent
            # backup to do an incremental backup.
            # latest_backup = None
            # if incremental:
            #     backups = \
            #              objects.BackupList.get_all_by_volume(context.elevated(),
            #                                                   volume['id'])
            #     if backups.objects:
            #         latest_backup = max(backups.objects,
            #                             key=lambda x: x['created_at'])
            #     else:
            #         msg = _('No backups available \
            #                  to do an incremental backup.')
            #         raise exception.InvalidBackup(reason=msg)
            latest_backup = None
            # Added for periodic backup
            if getattr(context, 'periodic', False):
                latest_backup = None
                description = PERIODICSTR + description if description \
                    else PERIODICSTR
            else:
                if incremental:
                    all_backups = self.db.\
                        backup_get_all_by_volume(context.elevated(),
                                                 volume['id'])
                    if all_backups:
                        normal_backups = []
                        for bk in all_backups:
                            if not bk.display_description or \
                                    PERIODICSTR not in bk.display_description:
                                normal_backups.append(bk)
                        if normal_backups:
                            latest_backup = max(normal_backups,
                                                key=lambda x: x['created_at'])

            parent_id = None
            if latest_backup:
                if latest_backup['status'] == "available":
                    parent_id = latest_backup['id']
                    LOG.info(
                        _LI("Found parent backup %(bak)s for volume "
                            "%(volume)s. Do an incremental backup."), {
                                'bak': latest_backup['id'],
                                'volume': volume['id']
                            })
                elif latest_backup['status'] == "creating":
                    msg = _('The parent backup is creating.')
                    LOG.info(_LI("The parent backup %(bak)s is creating."),
                             {'bak': latest_backup['id']})
                    raise exception.InvalidBackup(reason=msg)
                else:
                    LOG.info(
                        _LI("No backups available to do an incremental "
                            "backup, do a full backup for "
                            "volume %(volume)s."), {'volume': volume['id']})
            else:
                LOG.info(
                    _LI("No backups available to do an incremental "
                        "backup, do a full backup for volume %(volume)s."),
                    {'volume': volume['id']})

            options = {
                'user_id':
                context.user_id,
                'project_id':
                context.project_id,
                'display_name':
                name,
                'display_description': (lambda x: desc_prefix + x
                                        if x else desc_prefix)(description),
                'volume_id':
                volume['id'],
                'status':
                'creating',
                'container':
                container,
                'parent_id':
                parent_id,
                # Set backup size to "0" which means
                # it's not available. Backup driver
                # will return the exact size when
                # backing up is done. We lined up with OP
                # that when backup is in "creating" status,
                # OP will show "--" in the "size" field
                # instead of "0".
                # 'size': volume['size'],
                'size':
                0,
                'host':
                volume_host,
            }

            # (maqi) Use volume display_description field to save volume
            # previous_status since volumes in Kilo don't have
            # previous_status field in database
            previous_status = volume['status']
            self.db.volume_update(context, volume['id'], {
                'status': 'backing-up',
                'display_description': previous_status
            })

            try:
                backup = self.db.backup_create(context, options)
                QUOTAS.commit(context, reservations)
            except Exception:
                with excutils.save_and_reraise_exception():
                    try:
                        self.db.backup_destroy(context, backup['id'])
                    finally:
                        QUOTAS.rollback(context, reservations)
            backups.append(backup)
            kwargs = {
                'host': backup['host'],
                'backup_id': backup['id'],
                'volume_id': volume['id'],
            }
            inst_backup_kwargs.append(kwargs)

        self.backup_rpcapi.create_instance_backup(context, instance_uuid,
                                                  inst_backup_kwargs)
        LOG.debug(
            "I am ready to return from create_instance_backup"
            "with result: %(backups)s", {'backups': backups})
        return backups
Example #4
0
    def create(self,
               context,
               name,
               description,
               volume_id,
               container,
               incremental=False,
               availability_zone=None,
               force=False,
               snapshot_id=None):
        """Make the RPC call to create a volume backup."""
        check_policy(context, 'create')
        volume = self.volume_api.get(context, volume_id)
        snapshot = None
        if snapshot_id:
            snapshot = self.volume_api.get_snapshot(context, snapshot_id)

        if volume['status'] not in ["available", "in-use"]:
            msg = (_('Volume to be backed up must be available '
                     'or in-use, but the current status is "%s".') %
                   volume['status'])
            raise exception.InvalidVolume(reason=msg)
        elif volume['status'] in ["in-use"] and not snapshot_id and not force:
            msg = _('Backing up an in-use volume must use ' 'the force flag.')
            raise exception.InvalidVolume(reason=msg)
        elif snapshot_id and snapshot['status'] not in ["available"]:
            msg = (_('Snapshot to be backed up must be available, '
                     'but the current status is "%s".') % snapshot['status'])
            raise exception.InvalidSnapshot(reason=msg)

        previous_status = volume['status']
        volume_host = volume_utils.extract_host(volume['host'], 'host')
        if not self._is_backup_service_enabled(volume['availability_zone'],
                                               volume_host):
            raise exception.ServiceNotFound(service_id='cinder-backup')

        # Reserve a quota before setting volume status and backup status
        try:
            reserve_opts = {'backups': 1, 'backup_gigabytes': volume['size']}
            reservations = QUOTAS.reserve(context, **reserve_opts)
        except exception.OverQuota as e:
            overs = e.kwargs['overs']
            usages = e.kwargs['usages']
            quotas = e.kwargs['quotas']

            def _consumed(resource_name):
                return (usages[resource_name]['reserved'] +
                        usages[resource_name]['in_use'])

            for over in overs:
                if 'gigabytes' in over:
                    msg = _LW("Quota exceeded for %(s_pid)s, tried to create "
                              "%(s_size)sG backup (%(d_consumed)dG of "
                              "%(d_quota)dG already consumed)")
                    LOG.warning(
                        msg, {
                            's_pid': context.project_id,
                            's_size': volume['size'],
                            'd_consumed': _consumed(over),
                            'd_quota': quotas[over]
                        })
                    raise exception.VolumeBackupSizeExceedsAvailableQuota(
                        requested=volume['size'],
                        consumed=_consumed('backup_gigabytes'),
                        quota=quotas['backup_gigabytes'])
                elif 'backups' in over:
                    msg = _LW("Quota exceeded for %(s_pid)s, tried to create "
                              "backups (%(d_consumed)d backups "
                              "already consumed)")

                    LOG.warning(msg, {
                        's_pid': context.project_id,
                        'd_consumed': _consumed(over)
                    })
                    raise exception.BackupLimitExceeded(allowed=quotas[over])

        # Find the latest backup and use it as the parent backup to do an
        # incremental backup.
        latest_backup = None
        if incremental:
            backups = objects.BackupList.get_all_by_volume(
                context.elevated(), volume_id)
            if backups.objects:
                # NOTE(xyang): The 'data_timestamp' field records the time
                # when the data on the volume was first saved. If it is
                # a backup from volume, 'data_timestamp' will be the same
                # as 'created_at' for a backup. If it is a backup from a
                # snapshot, 'data_timestamp' will be the same as
                # 'created_at' for a snapshot.
                # If not backing up from snapshot, the backup with the latest
                # 'data_timestamp' will be the parent; If backing up from
                # snapshot, the backup with the latest 'data_timestamp' will
                # be chosen only if 'data_timestamp' is earlier than the
                # 'created_at' timestamp of the snapshot; Otherwise, the
                # backup will not be chosen as the parent.
                # For example, a volume has a backup taken at 8:00, then
                # a snapshot taken at 8:10, and then a backup at 8:20.
                # When taking an incremental backup of the snapshot, the
                # parent should be the backup at 8:00, not 8:20, and the
                # 'data_timestamp' of this new backup will be 8:10.
                latest_backup = max(
                    backups.objects,
                    key=lambda x: x['data_timestamp']
                    if (not snapshot or (snapshot and x['data_timestamp'] <
                                         snapshot['created_at'])) else
                    datetime(1, 1, 1, 1, 1, 1, tzinfo=timezone('UTC')))
            else:
                msg = _('No backups available to do an incremental backup.')
                raise exception.InvalidBackup(reason=msg)

        parent_id = None
        if latest_backup:
            parent_id = latest_backup.id
            if latest_backup['status'] != fields.BackupStatus.AVAILABLE:
                msg = _('The parent backup must be available for '
                        'incremental backup.')
                raise exception.InvalidBackup(reason=msg)

        data_timestamp = None
        if snapshot_id:
            snapshot = objects.Snapshot.get_by_id(context, snapshot_id)
            data_timestamp = snapshot.created_at

        self.db.volume_update(context, volume_id, {
            'status': 'backing-up',
            'previous_status': previous_status
        })
        try:
            kwargs = {
                'user_id': context.user_id,
                'project_id': context.project_id,
                'display_name': name,
                'display_description': description,
                'volume_id': volume_id,
                'status': fields.BackupStatus.CREATING,
                'container': container,
                'parent_id': parent_id,
                'size': volume['size'],
                'host': volume_host,
                'snapshot_id': snapshot_id,
                'data_timestamp': data_timestamp,
            }
            backup = objects.Backup(context=context, **kwargs)
            backup.create()
            if not snapshot_id:
                backup.data_timestamp = backup.created_at
                backup.save()
            QUOTAS.commit(context, reservations)
        except Exception:
            with excutils.save_and_reraise_exception():
                try:
                    backup.destroy()
                finally:
                    QUOTAS.rollback(context, reservations)

        # TODO(DuncanT): In future, when we have a generic local attach,
        #                this can go via the scheduler, which enables
        #                better load balancing and isolation of services
        self.backup_rpcapi.create_backup(context, backup)

        return backup
Example #5
0
    def create(self,
               context,
               name,
               description,
               volume_id,
               container,
               incremental=False,
               availability_zone=None):
        """Make the RPC call to create a volume backup."""
        check_policy(context, 'create')
        volume = self.volume_api.get(context, volume_id)

        if volume['status'] != "available":
            msg = _('Volume to be backed up must be available')
            raise exception.InvalidVolume(reason=msg)

        volume_host = volume_utils.extract_host(volume['host'], 'host')
        if not self._is_backup_service_enabled(volume, volume_host):
            raise exception.ServiceNotFound(service_id='cinder-backup')

        # do quota reserver before setting volume status and backup status
        try:
            reserve_opts = {'backups': 1, 'backup_gigabytes': volume['size']}
            reservations = QUOTAS.reserve(context, **reserve_opts)
        except exception.OverQuota as e:
            overs = e.kwargs['overs']
            usages = e.kwargs['usages']
            quotas = e.kwargs['quotas']

            def _consumed(resource_name):
                return (usages[resource_name]['reserved'] +
                        usages[resource_name]['in_use'])

            for over in overs:
                if 'gigabytes' in over:
                    msg = _LW("Quota exceeded for %(s_pid)s, tried to create "
                              "%(s_size)sG backup (%(d_consumed)dG of "
                              "%(d_quota)dG already consumed)")
                    LOG.warning(
                        msg, {
                            's_pid': context.project_id,
                            's_size': volume['size'],
                            'd_consumed': _consumed(over),
                            'd_quota': quotas[over]
                        })
                    raise exception.VolumeBackupSizeExceedsAvailableQuota(
                        requested=volume['size'],
                        consumed=_consumed('backup_gigabytes'),
                        quota=quotas['backup_gigabytes'])
                elif 'backups' in over:
                    msg = _LW("Quota exceeded for %(s_pid)s, tried to create "
                              "backups (%(d_consumed)d backups "
                              "already consumed)")

                    LOG.warning(msg, {
                        's_pid': context.project_id,
                        'd_consumed': _consumed(over)
                    })
                    raise exception.BackupLimitExceeded(allowed=quotas[over])

        # Find the latest backup of the volume and use it as the parent
        # backup to do an incremental backup.
        latest_backup = None
        # Added for periodic backup
        if getattr(context, 'periodic', False):
            latest_backup = None
            if description:
                description = PERIODICSTR + description
            else:
                description = PERIODICSTR
        else:
            if incremental:
                backups = self.db.backup_get_all_by_volume(
                    context.elevated(), volume_id)
                if backups:
                    normal_backups = []
                    for bk in backups:
                        if not bk.display_description or \
                                PERIODICSTR not in bk.display_description:
                            LOG.debug("Found normal backup %(bak)s "
                                      "for volume %(vol)s." % {
                                          "bak": bk.id,
                                          "vol": volume_id
                                      })
                            normal_backups.append(bk)
                    if normal_backups:
                        LOG.debug(
                            "The normal backups for volume "
                            "%(vol)s: %(baks)s." % {
                                "vol": volume_id,
                                "baks": [bk.id for bk in normal_backups]
                            })
                        latest_backup = max(normal_backups,
                                            key=lambda x: x['created_at'])
        parent_id = None
        if latest_backup:
            if latest_backup['status'] == "available":
                parent_id = latest_backup['id']
                LOG.info(
                    _LI("Found parent backup %(bak)s for volume "
                        "%(volume)s. Do an incremental backup."), {
                            'bak': latest_backup['id'],
                            'volume': volume['id']
                        })
            elif latest_backup['status'] == "creating":
                msg = _('The parent backup is creating.')
                LOG.info(_LI("The parent backup %(bak)s is creating."),
                         {'bak': latest_backup['id']})
                raise exception.InvalidBackup(reason=msg)
            else:
                LOG.info(
                    _LI("No backups available to do an incremental "
                        "backup, do a full backup for volume %(volume)s."),
                    {'volume': volume['id']})
        else:
            LOG.info(
                _LI("No backups available to do an incremental "
                    "backup, do a full backup for volume %(volume)s."),
                {'volume': volume['id']})

        self.db.volume_update(context, volume_id, {'status': 'backing-up'})

        options = {
            'user_id': context.user_id,
            'project_id': context.project_id,
            'display_name': name,
            'display_description': description,
            'volume_id': volume_id,
            'status': 'creating',
            'container': container,
            'parent_id': parent_id,
            # Set backup size to "0" which means
            # it's not available. Backup driver
            # will return the exact size when
            # backing up is done. We lined up with OP
            # that when backup is in "creating" status,
            # OP will show "--" in the "size" field
            # instead of "0".
            # 'size': volume['size'],
            'size': 0,
            'host': volume_host,
        }
        try:
            backup = self.db.backup_create(context, options)
            QUOTAS.commit(context, reservations)
        except Exception:
            with excutils.save_and_reraise_exception():
                try:
                    self.db.backup_destroy(context, backup['id'])
                finally:
                    QUOTAS.rollback(context, reservations)

        # TODO(DuncanT): In future, when we have a generic local attach,
        #                this can go via the scheduler, which enables
        #                better load balancing and isolation of services
        self.backup_rpcapi.create_backup(context, backup['host'], backup['id'],
                                         volume_id)

        return backup