Ejemplo n.º 1
0
    def delete(self, context, backup, force=False):
        """Make the RPC call to delete a volume backup.

        Call backup manager to execute backup delete or force delete operation.
        :param context: running context
        :param backup: the dict of backup that is got from DB.
        :param force: indicate force delete or not
        :raises: InvalidBackup
        :raises: BackupDriverException
        :raises: ServiceNotFound
        """
        check_policy(context, 'delete')
        if not force and backup.status not in [
                fields.BackupStatus.AVAILABLE, fields.BackupStatus.ERROR
        ]:
            msg = _('Backup status must be available or error')
            raise exception.InvalidBackup(reason=msg)
        if force and not self._check_support_to_force_delete(
                context, backup.host):
            msg = _('force delete')
            raise exception.NotSupportedOperation(operation=msg)

        # Don't allow backup to be deleted if there are incremental
        # backups dependent on it.
        deltas = self.get_all(context, search_opts={'parent_id': backup.id})
        if deltas and len(deltas):
            msg = _('Incremental backups exist for this backup.')
            raise exception.InvalidBackup(reason=msg)

        backup.status = fields.BackupStatus.DELETING
        backup.host = self._get_available_backup_service_host(
            backup.host, backup.availability_zone)
        backup.save()
        self.backup_rpcapi.delete_backup(context, backup)
Ejemplo n.º 2
0
    def export_record(self, context, backup):
        """Export all volume backup metadata details to allow clean import.

        Export backup metadata so it could be re-imported into the database
        without any prerequisite in the backup database.

        :param context: running context
        :param backup: backup object to export
        :returns: backup_record - a description of how to import the backup
        :returns: contains 'backup_url' - how to import the backup, and
        :returns: 'backup_service' describing the needed driver.
        :raises: InvalidBackup
        """
        LOG.info(_LI('Export record started, backup: %s.'), backup.id)

        expected_status = fields.BackupStatus.AVAILABLE
        actual_status = backup.status
        if actual_status != expected_status:
            err = (_('Export backup aborted, expected backup status '
                     '%(expected_status)s but got %(actual_status)s.') % {
                         'expected_status': expected_status,
                         'actual_status': actual_status
                     })
            raise exception.InvalidBackup(reason=err)

        backup_record = {}
        backup_record['backup_service'] = backup.service
        backup_service = self._map_service_to_driver(backup.service)
        configured_service = self.driver_name
        if backup_service != configured_service:
            err = (_('Export record aborted, the backup service currently'
                     ' configured [%(configured_service)s] is not the'
                     ' backup service that was used to create this'
                     ' backup [%(backup_service)s].') % {
                         'configured_service': configured_service,
                         'backup_service': backup_service
                     })
            raise exception.InvalidBackup(reason=err)

        # Call driver to create backup description string
        try:
            backup_service = self.service.get_backup_driver(context)
            driver_info = backup_service.export_record(backup)
            backup_url = backup.encode_record(driver_info=driver_info)
            backup_record['backup_url'] = backup_url
        except Exception as err:
            msg = six.text_type(err)
            raise exception.InvalidBackup(reason=msg)

        LOG.info(_LI('Export record finished, backup %s exported.'), backup.id)
        return backup_record
Ejemplo n.º 3
0
def _get_volume_realpath(volume_file, volume_id):
    """Get the real path for the volume block device.

    If the volume is not a block device or a regular file issue an
    InvalidBackup exception.

    :param volume_file: file object representing the volume
    :param volume_id: Volume id for backup or as restore target
    :raises: InvalidBackup
    :returns: str -- real path of volume device
    :returns: str -- backup mode to be used
    """

    try:
        # Get real path
        volume_path = os.path.realpath(volume_file.name)
        # Verify that path is a block device
        volume_mode = os.stat(volume_path).st_mode
        if stat.S_ISBLK(volume_mode):
            backup_mode = 'image'
        elif stat.S_ISREG(volume_mode):
            backup_mode = 'file'
        else:
            err = (_('backup: %(vol_id)s failed. '
                     '%(path)s is unexpected file type. Block or regular '
                     'files supported, actual file mode is %(vol_mode)s.') % {
                         'vol_id': volume_id,
                         'path': volume_path,
                         'vol_mode': volume_mode
                     })
            LOG.error(err)
            raise exception.InvalidBackup(reason=err)

    except AttributeError:
        err = (_('backup: %(vol_id)s failed. Cannot obtain real path '
                 'to volume at %(path)s.') % {
                     'vol_id': volume_id,
                     'path': volume_file
                 })
        LOG.error(err)
        raise exception.InvalidBackup(reason=err)
    except OSError:
        err = (_('backup: %(vol_id)s failed. '
                 '%(path)s is not a file.') % {
                     'vol_id': volume_id,
                     'path': volume_path
                 })
        LOG.error(err)
        raise exception.InvalidBackup(reason=err)
    return volume_path, backup_mode
Ejemplo n.º 4
0
def _get_backup_metadata(backup, operation):
    """Return metadata persisted with backup object."""
    try:
        svc_dict = json.loads(backup.service_metadata)
        backup_path = svc_dict.get('backup_path')
        backup_mode = svc_dict.get('backup_mode')
    except TypeError:
        # for backwards compatibility
        vol_prefix = CONF.backup_tsm_volume_prefix
        backup_id = backup['id']
        backup_path = utils.make_dev_path('%s-%s' % (vol_prefix, backup_id))
        backup_mode = 'image'

    if backup_mode not in VALID_BACKUP_MODES:
        volume_id = backup['volume_id']
        backup_id = backup['id']
        err = (_('%(op)s: backup %(bck_id)s, volume %(vol_id)s failed. '
                 'Backup object has unexpected mode. Image or file '
                 'backups supported, actual mode is %(vol_mode)s.') % {
                     'op': operation,
                     'bck_id': backup_id,
                     'vol_id': volume_id,
                     'vol_mode': backup_mode
                 })
        LOG.error(err)
        raise exception.InvalidBackup(reason=err)
    return backup_path, backup_mode
Ejemplo n.º 5
0
def _make_link(volume_path, backup_path, vol_id):
    """Create a hard link for the volume block device.

    The IBM TSM client performs an image backup on a block device.
    The name of the block device is the backup prefix plus the backup id

    :param volume_path: real device path name for volume
    :param backup_path: path name TSM will use as volume to backup
    :param vol_id: id of volume to backup (for reporting)

    :raises: InvalidBackup
    """

    try:
        utils.execute('ln',
                      volume_path,
                      backup_path,
                      run_as_root=True,
                      check_exit_code=True)
    except processutils.ProcessExecutionError as exc:
        err = (_('backup: %(vol_id)s failed to create device hardlink '
                 'from %(vpath)s to %(bpath)s.\n'
                 'stdout: %(out)s\n stderr: %(err)s') % {
                     'vol_id': vol_id,
                     'vpath': volume_path,
                     'bpath': backup_path,
                     'out': exc.stdout,
                     'err': exc.stderr
                 })
        LOG.error(err)
        raise exception.InvalidBackup(reason=err)
Ejemplo n.º 6
0
 def close(self):
     reader = six.BytesIO(self.data)
     try:
         etag = self.conn.put_object(self.container,
                                     self.object_name,
                                     reader,
                                     content_length=len(self.data))
     except socket.error as err:
         raise exception.SwiftConnectionFailed(reason=err)
     LOG.debug('swift MD5 for %(object_name)s: %(etag)s', {
         'object_name': self.object_name,
         'etag': etag,
     })
     md5 = hashlib.md5(self.data).hexdigest()
     LOG.debug('backup MD5 for %(object_name)s: %(md5)s', {
         'object_name': self.object_name,
         'md5': md5
     })
     if etag != md5:
         err = _('error writing object to swift, MD5 of object in '
                 'swift %(etag)s is not the same as MD5 of object sent '
                 'to swift %(md5)s'), {
                     'etag': etag,
                     'md5': md5
                 }
         raise exception.InvalidBackup(reason=err)
     return md5
Ejemplo n.º 7
0
 def close(self):
     media = http.MediaIoBaseUpload(six.BytesIO(self.data),
                                    'application/octet-stream',
                                    chunksize=self.chunk_size,
                                    resumable=self.resumable)
     resp = self.conn.objects().insert(
         bucket=self.bucket,
         name=self.object_name,
         body={},
         media_body=media).execute(num_retries=self.num_retries)
     etag = resp['md5Hash']
     md5 = hashlib.md5(self.data).digest()
     if six.PY3:
         md5 = md5.encode('utf-8')
         etag = etag.encode('utf-8')
     md5 = base64.b64encode(md5)
     if etag != md5:
         err = _('MD5 of object: %(object_name)s before: '
                 '%(md5)s and after: %(etag)s is not same.') % {
                     'object_name': self.object_name,
                     'md5': md5,
                     'etag': etag,
                 }
         raise exception.InvalidBackup(reason=err)
     else:
         LOG.debug(
             'MD5 before: %(md5)s and after: %(etag)s '
             'writing object: %(object_name)s in GCS.', {
                 'etag': etag,
                 'md5': md5,
                 'object_name': self.object_name,
             })
         return md5
Ejemplo n.º 8
0
    def export_record(self, context, backup_id):
        """Make the RPC call to export a volume backup.

        Call backup manager to execute backup export.

        :param context: running context
        :param backup_id: backup id to export
        :returns: dictionary -- a description of how to import the backup
        :returns: contains 'backup_url' and 'backup_service'
        :raises: InvalidBackup
        """
        check_policy(context, 'backup-export')
        backup = self.get(context, backup_id)
        if backup['status'] != fields.BackupStatus.AVAILABLE:
            msg = (_('Backup status must be available and not %s.') %
                   backup['status'])
            raise exception.InvalidBackup(reason=msg)

        LOG.debug(
            "Calling RPCAPI with context: "
            "%(ctx)s, host: %(host)s, backup: %(id)s.", {
                'ctx': context,
                'host': backup['host'],
                'id': backup['id']
            })

        backup.host = self._get_available_backup_service_host(
            backup.host, backup.availability_zone)
        backup.save()
        export_data = self.backup_rpcapi.export_record(context, backup)

        return export_data
Ejemplo n.º 9
0
    def restore(self, backup, volume_id, volume_file):
        """Restore the given volume backup from backup repository."""
        backup_id = backup['id']
        container = backup['container']
        object_prefix = backup['service_metadata']
        LOG.debug('starting restore of backup %(object_prefix)s '
                  'container: %(container)s, to volume %(volume_id)s, '
                  'backup: %(backup_id)s.',
                  {
                      'object_prefix': object_prefix,
                      'container': container,
                      'volume_id': volume_id,
                      'backup_id': backup_id,
                  })
        metadata = self._read_metadata(backup)
        metadata_version = metadata['version']
        LOG.debug('Restoring backup version %s', metadata_version)
        try:
            restore_func = getattr(self, self.DRIVER_VERSION_MAPPING.get(
                metadata_version))
        except TypeError:
            err = (_('No support to restore backup version %s')
                   % metadata_version)
            raise exception.InvalidBackup(reason=err)

        # Build a list of backups based on parent_id. A full backup
        # will be the last one in the list.
        backup_list = []
        backup_list.append(backup)
        current_backup = backup
        while current_backup.parent_id:
            prev_backup = storage.Backup.get_by_id(self.context,
                                                   current_backup.parent_id)
            backup_list.append(prev_backup)
            current_backup = prev_backup

        # Do a full restore first, then layer the incremental backups
        # on top of it in order.
        index = len(backup_list) - 1
        while index >= 0:
            backup1 = backup_list[index]
            index = index - 1
            metadata = self._read_metadata(backup1)
            restore_func(backup1, volume_id, metadata, volume_file)

            volume_meta = metadata.get('volume_meta', None)
            try:
                if volume_meta:
                    self.put_metadata(volume_id, volume_meta)
                else:
                    LOG.debug("No volume metadata in this backup.")
            except exception.BackupMetadataUnsupportedVersion:
                msg = _("Metadata restore failed due to incompatible version.")
                LOG.error(msg)
                raise exception.BackupOperationError(msg)

        LOG.debug('restore %(backup_id)s to %(volume_id)s finished.',
                  {'backup_id': backup_id, 'volume_id': volume_id})
Ejemplo n.º 10
0
    def _do_restore(self, backup_path, restore_path, vol_id, backup_mode):
        """Perform the actual restore operation.

        :param backup_path: the path the backup was created from, this
        identifies the backup to tsm
        :param restore_path: volume path to restore into
        :param vol_id: volume id
        :param backup_mode: mode used to create the backup ('image' or 'file')
        :raises: InvalidBackup
        """

        restore_attrs = {'Total number of objects restored': '1'}
        restore_cmd = ['dsmc', 'restore']
        if _image_mode(backup_mode):
            restore_cmd.append('image')
            restore_cmd.append('-noprompt')  # suppress prompt
        else:
            restore_cmd.append('-replace=yes')  # suppress prompt

        restore_cmd.extend(
            ['-quiet',
             '-password=%s' % self.tsm_password, backup_path])

        if restore_path != backup_path:
            restore_cmd.append(restore_path)

        out, err = utils.execute(*restore_cmd,
                                 run_as_root=True,
                                 check_exit_code=False)

        success = _check_dsmc_output(out, restore_attrs)
        if not success:
            err = (_('restore: %(vol_id)s failed.\n'
                     'stdout: %(out)s\n stderr: %(err)s.') % {
                         'vol_id': vol_id,
                         'out': out,
                         'err': err
                     })
            LOG.error(err)
            raise exception.InvalidBackup(reason=err)
Ejemplo n.º 11
0
    def _do_backup(self, backup_path, vol_id, backup_mode):
        """Perform the actual backup operation.

       :param backup_path: volume path
       :param vol_id: volume id
       :param backup_mode: file mode of source volume; 'image' or 'file'
       :raises: InvalidBackup
        """

        backup_attrs = {'Total number of objects backed up': '1'}
        compr_flag = 'yes' if CONF.backup_tsm_compression else 'no'

        backup_cmd = ['dsmc', 'backup']
        if _image_mode(backup_mode):
            backup_cmd.append('image')
        backup_cmd.extend([
            '-quiet',
            '-compression=%s' % compr_flag,
            '-password=%s' % self.tsm_password, backup_path
        ])

        out, err = utils.execute(*backup_cmd,
                                 run_as_root=True,
                                 check_exit_code=False)

        success = _check_dsmc_output(out, backup_attrs, exact_match=False)
        if not success:
            err = (_('backup: %(vol_id)s failed to obtain backup '
                     'success notification from server.\n'
                     'stdout: %(out)s\n stderr: %(err)s') % {
                         'vol_id': vol_id,
                         'out': out,
                         'err': err
                     })
            LOG.error(err)
            raise exception.InvalidBackup(reason=err)
Ejemplo n.º 12
0
    def _restore_v1(self, backup, volume_id, metadata, volume_file):
        """Restore a v1 volume backup."""
        backup_id = backup['id']
        LOG.debug('v1 volume backup restore of %s started.', backup_id)
        extra_metadata = metadata.get('extra_metadata')
        container = backup['container']
        metadata_objects = metadata['storage']
        metadata_object_names = []
        for obj in metadata_objects:
            metadata_object_names.extend(obj.keys())
        LOG.debug('metadata_object_names = %s.', metadata_object_names)
        prune_list = [self._metadata_filename(backup),
                      self._sha256_filename(backup)]
        object_names = [object_name for object_name in
                        self._generate_object_names(backup)
                        if object_name not in prune_list]
        if sorted(object_names) != sorted(metadata_object_names):
            err = _('restore_backup aborted, actual object list '
                    'does not match object list stored in metadata.')
            raise exception.InvalidBackup(reason=err)

        for metadata_object in metadata_objects:
            object_name, obj = list(metadata_object.items())[0]
            LOG.debug('restoring object. backup: %(backup_id)s, '
                      'container: %(container)s, object name: '
                      '%(object_name)s, volume: %(volume_id)s.',
                      {
                          'backup_id': backup_id,
                          'container': container,
                          'object_name': object_name,
                          'volume_id': volume_id,
                      })

            with self.get_object_reader(
                    container, object_name,
                    extra_metadata=extra_metadata) as reader:
                body = reader.read()
            compression_algorithm = metadata_object[object_name]['compression']
            decompressor = self._get_compressor(compression_algorithm)
            volume_file.seek(obj['offset'])
            if decompressor is not None:
                LOG.debug('decompressing data using %s algorithm',
                          compression_algorithm)
                decompressed = decompressor.decompress(body)
                volume_file.write(decompressed)
            else:
                volume_file.write(body)

            # force flush every write to avoid long blocking write on close
            volume_file.flush()

            # Be tolerant to IO implementations that do not support fileno()
            try:
                fileno = volume_file.fileno()
            except IOError:
                LOG.info(_LI("volume_file does not support "
                             "fileno() so skipping "
                             "fsync()"))
            else:
                os.fsync(fileno)

            # Restoring a backup to a volume can take some time. Yield so other
            # threads can run, allowing for among other things the service
            # status to be updated
            eventlet.sleep(0)
        LOG.debug('v1 volume backup restore of %s finished.',
                  backup_id)
Ejemplo n.º 13
0
    def create(self,
               context,
               name,
               description,
               volume_id,
               container,
               incremental=False,
               availability_zone=None,
               force=False,
               snapshot_id=None):
        """Make the RPC call to create a volume backup."""
        check_policy(context, 'create')
        volume = self.volume_api.get(context, volume_id)
        snapshot = None
        if snapshot_id:
            snapshot = self.volume_api.get_snapshot(context, snapshot_id)

            if volume_id != snapshot.volume_id:
                msg = (_('Volume %(vol1)s does not match with '
                         'snapshot.volume_id %(vol2)s.') % {
                             'vol1': volume_id,
                             'vol2': snapshot.volume_id
                         })
                raise exception.InvalidVolume(reason=msg)
        if volume['status'] not in ["available", "in-use"]:
            msg = (_('Volume to be backed up must be available '
                     'or in-use, but the current status is "%s".') %
                   volume['status'])
            raise exception.InvalidVolume(reason=msg)
        elif volume['status'] in ["in-use"] and not snapshot_id and not force:
            msg = _('Backing up an in-use volume must use ' 'the force flag.')
            raise exception.InvalidVolume(reason=msg)
        elif snapshot_id and snapshot['status'] not in ["available"]:
            msg = (_('Snapshot to be backed up must be available, '
                     'but the current status is "%s".') % snapshot['status'])
            raise exception.InvalidSnapshot(reason=msg)

        previous_status = volume['status']
        host = self._get_available_backup_service_host(
            None, volume.availability_zone,
            volume_utils.extract_host(volume.host, 'host'))

        # Reserve a quota before setting volume status and backup status
        try:
            reserve_opts = {'backups': 1, 'backup_gigabytes': volume['size']}
            reservations = QUOTAS.reserve(context, **reserve_opts)
        except exception.OverQuota as e:
            overs = e.kwargs['overs']
            usages = e.kwargs['usages']
            quotas = e.kwargs['quotas']

            def _consumed(resource_name):
                return (usages[resource_name]['reserved'] +
                        usages[resource_name]['in_use'])

            for over in overs:
                if 'gigabytes' in over:
                    msg = _LW("Quota exceeded for %(s_pid)s, tried to create "
                              "%(s_size)sG backup (%(d_consumed)dG of "
                              "%(d_quota)dG already consumed)")
                    LOG.warning(
                        msg, {
                            's_pid': context.project_id,
                            's_size': volume['size'],
                            'd_consumed': _consumed(over),
                            'd_quota': quotas[over]
                        })
                    raise exception.VolumeBackupSizeExceedsAvailableQuota(
                        requested=volume['size'],
                        consumed=_consumed('backup_gigabytes'),
                        quota=quotas['backup_gigabytes'])
                elif 'backups' in over:
                    msg = _LW("Quota exceeded for %(s_pid)s, tried to create "
                              "backups (%(d_consumed)d backups "
                              "already consumed)")

                    LOG.warning(msg, {
                        's_pid': context.project_id,
                        'd_consumed': _consumed(over)
                    })
                    raise exception.BackupLimitExceeded(allowed=quotas[over])

        # Find the latest backup and use it as the parent backup to do an
        # incremental backup.
        latest_backup = None
        if incremental:
            backups = storage.BackupList.get_all_by_volume(
                context.elevated(), volume_id)
            if backups.objects:
                # NOTE(xyang): The 'data_timestamp' field records the time
                # when the data on the volume was first saved. If it is
                # a backup from volume, 'data_timestamp' will be the same
                # as 'created_at' for a backup. If it is a backup from a
                # snapshot, 'data_timestamp' will be the same as
                # 'created_at' for a snapshot.
                # If not backing up from snapshot, the backup with the latest
                # 'data_timestamp' will be the parent; If backing up from
                # snapshot, the backup with the latest 'data_timestamp' will
                # be chosen only if 'data_timestamp' is earlier than the
                # 'created_at' timestamp of the snapshot; Otherwise, the
                # backup will not be chosen as the parent.
                # For example, a volume has a backup taken at 8:00, then
                # a snapshot taken at 8:10, and then a backup at 8:20.
                # When taking an incremental backup of the snapshot, the
                # parent should be the backup at 8:00, not 8:20, and the
                # 'data_timestamp' of this new backup will be 8:10.
                latest_backup = max(
                    backups.objects,
                    key=lambda x: x['data_timestamp']
                    if (not snapshot or (snapshot and x['data_timestamp'] <
                                         snapshot['created_at'])) else
                    datetime(1, 1, 1, 1, 1, 1, tzinfo=timezone('UTC')))
            else:
                msg = _('No backups available to do an incremental backup.')
                raise exception.InvalidBackup(reason=msg)

        parent_id = None
        if latest_backup:
            parent_id = latest_backup.id
            if latest_backup['status'] != fields.BackupStatus.AVAILABLE:
                msg = _('The parent backup must be available for '
                        'incremental backup.')
                raise exception.InvalidBackup(reason=msg)

        data_timestamp = None
        if snapshot_id:
            snapshot = storage.Snapshot.get_by_id(context, snapshot_id)
            data_timestamp = snapshot.created_at

        self.db.volume_update(context, volume_id, {
            'status': 'backing-up',
            'previous_status': previous_status
        })

        backup = None
        try:
            kwargs = {
                'user_id': context.user_id,
                'project_id': context.project_id,
                'display_name': name,
                'display_description': description,
                'volume_id': volume_id,
                'status': fields.BackupStatus.CREATING,
                'container': container,
                'parent_id': parent_id,
                'size': volume['size'],
                'host': host,
                'snapshot_id': snapshot_id,
                'data_timestamp': data_timestamp,
            }
            backup = storage.Backup(context=context, **kwargs)
            backup.create()
            if not snapshot_id:
                backup.data_timestamp = backup.created_at
                backup.save()
            QUOTAS.commit(context, reservations)
        except Exception:
            with excutils.save_and_reraise_exception():
                try:
                    if backup and 'id' in backup:
                        backup.destroy()
                finally:
                    QUOTAS.rollback(context, reservations)

        # TODO(DuncanT): In future, when we have a generic local attach,
        #                this can go via the scheduler, which enables
        #                better load balancing and isolation of services
        self.backup_rpcapi.create_backup(context, backup)

        return backup
Ejemplo n.º 14
0
    def _get_import_backup(self, context, backup_url):
        """Prepare database backup record for import.

        This method decodes provided backup_url and expects to find the id of
        the backup in there.

        Then checks the DB for the presence of this backup record and if it
        finds it and is not deleted it will raise an exception because the
        record cannot be created or used.

        If the record is in deleted status then we must be trying to recover
        this record, so we'll reuse it.

        If the record doesn't already exist we create it with provided id.

        :param context: running context
        :param backup_url: backup description to be used by the backup driver
        :return: BackupImport object
        :raises: InvalidBackup
        :raises: InvalidInput
        """
        # Deserialize string backup record into a dictionary
        backup_record = storage.Backup.decode_record(backup_url)

        # ID is a required field since it's what links incremental backups
        if 'id' not in backup_record:
            msg = _('Provided backup record is missing an id')
            raise exception.InvalidInput(reason=msg)

        kwargs = {
            'user_id': context.user_id,
            'project_id': context.project_id,
            'volume_id': '0000-0000-0000-0000',
            'status': fields.BackupStatus.CREATING,
        }

        try:
            # Try to get the backup with that ID in all projects even among
            # deleted entries.
            backup = storage.BackupImport.get_by_id(context,
                                                    backup_record['id'],
                                                    read_deleted='yes',
                                                    project_only=False)

            # If record exists and it's not deleted we cannot proceed with the
            # import
            if backup.status != fields.BackupStatus.DELETED:
                msg = _('Backup already exists in database.')
                raise exception.InvalidBackup(reason=msg)

            # Otherwise we'll "revive" delete backup record
            backup.update(kwargs)
            backup.save()

        except exception.BackupNotFound:
            # If record doesn't exist create it with the specific ID
            backup = storage.BackupImport(context=context,
                                          id=backup_record['id'],
                                          **kwargs)
            backup.create()

        return backup
Ejemplo n.º 15
0
    def backup(self, backup, volume_file, backup_metadata=True):
        """Backup the given volume.

           If backup['parent_id'] is given, then an incremental backup
           is performed.
        """
        if self.chunk_size_bytes % self.sha_block_size_bytes:
            err = _('Chunk size is not multiple of '
                    'block size for creating hash.')
            raise exception.InvalidBackup(reason=err)

        # Read the shafile of the parent backup if backup['parent_id']
        # is given.
        parent_backup_shafile = None
        parent_backup = None
        if backup.parent_id:
            parent_backup = storage.Backup.get_by_id(self.context,
                                                     backup.parent_id)
            parent_backup_shafile = self._read_sha256file(parent_backup)
            parent_backup_shalist = parent_backup_shafile['sha256s']
            if (parent_backup_shafile['chunk_size'] !=
                    self.sha_block_size_bytes):
                err = (_('Hash block size has changed since the last '
                         'backup. New hash block size: %(new)s. Old hash '
                         'block size: %(old)s. Do a full backup.')
                       % {'old': parent_backup_shafile['chunk_size'],
                          'new': self.sha_block_size_bytes})
                raise exception.InvalidBackup(reason=err)
            # If the volume size increased since the last backup, fail
            # the incremental backup and ask user to do a full backup.
            if backup.size > parent_backup.size:
                err = _('Volume size increased since the last '
                        'backup. Do a full backup.')
                raise exception.InvalidBackup(reason=err)

        (object_meta, object_sha256, extra_metadata, container,
         volume_size_bytes) = self._prepare_backup(backup)

        counter = 0
        total_block_sent_num = 0

        # There are two mechanisms to send the progress notification.
        # 1. The notifications are periodically sent in a certain interval.
        # 2. The notifications are sent after a certain number of chunks.
        # Both of them are working simultaneously during the volume backup,
        # when "chunked" backup drivers are deployed.
        def _notify_progress():
            self._send_progress_notification(self.context, backup,
                                             object_meta,
                                             total_block_sent_num,
                                             volume_size_bytes)
        timer = loopingcall.FixedIntervalLoopingCall(
            _notify_progress)
        if self.enable_progress_timer:
            timer.start(interval=self.backup_timer_interval)

        sha256_list = object_sha256['sha256s']
        shaindex = 0
        is_backup_canceled = False
        while True:
            # First of all, we check the status of this backup. If it
            # has been changed to delete or has been deleted, we cancel the
            # backup process to do forcing delete.
            backup = storage.Backup.get_by_id(self.context, backup.id)
            if backup.status in (fields.BackupStatus.DELETING,
                                 fields.BackupStatus.DELETED):
                is_backup_canceled = True
                # To avoid the chunk left when deletion complete, need to
                # clean up the object of chunk again.
                self.delete(backup)
                LOG.debug('Cancel the backup process of %s.', backup.id)
                break
            data_offset = volume_file.tell()
            data = volume_file.read(self.chunk_size_bytes)
            if data == b'':
                break

            # Calculate new shas with the datablock.
            shalist = []
            off = 0
            datalen = len(data)
            while off < datalen:
                chunk_start = off
                chunk_end = chunk_start + self.sha_block_size_bytes
                if chunk_end > datalen:
                    chunk_end = datalen
                chunk = data[chunk_start:chunk_end]
                sha = hashlib.sha256(chunk).hexdigest()
                shalist.append(sha)
                off += self.sha_block_size_bytes
            sha256_list.extend(shalist)

            # If parent_backup is not None, that means an incremental
            # backup will be performed.
            if parent_backup:
                # Find the extent that needs to be backed up.
                extent_off = -1
                for idx, sha in enumerate(shalist):
                    if sha != parent_backup_shalist[shaindex]:
                        if extent_off == -1:
                            # Start of new extent.
                            extent_off = idx * self.sha_block_size_bytes
                    else:
                        if extent_off != -1:
                            # We've reached the end of extent.
                            extent_end = idx * self.sha_block_size_bytes
                            segment = data[extent_off:extent_end]
                            self._backup_chunk(backup, container, segment,
                                               data_offset + extent_off,
                                               object_meta,
                                               extra_metadata)
                            extent_off = -1
                    shaindex += 1

                # The last extent extends to the end of data buffer.
                if extent_off != -1:
                    extent_end = datalen
                    segment = data[extent_off:extent_end]
                    self._backup_chunk(backup, container, segment,
                                       data_offset + extent_off,
                                       object_meta, extra_metadata)
                    extent_off = -1
            else:  # Do a full backup.
                self._backup_chunk(backup, container, data, data_offset,
                                   object_meta, extra_metadata)

            # Notifications
            total_block_sent_num += self.data_block_num
            counter += 1
            if counter == self.data_block_num:
                # Send the notification to Ceilometer when the chunk
                # number reaches the data_block_num.  The backup percentage
                # is put in the metadata as the extra information.
                self._send_progress_notification(self.context, backup,
                                                 object_meta,
                                                 total_block_sent_num,
                                                 volume_size_bytes)
                # Reset the counter
                counter = 0

        # Stop the timer.
        timer.stop()
        # If backup has been cancelled we have nothing more to do
        # but timer.stop().
        if is_backup_canceled:
            return
        # All the data have been sent, the backup_percent reaches 100.
        self._send_progress_end(self.context, backup, object_meta)

        object_sha256['sha256s'] = sha256_list
        if backup_metadata:
            try:
                self._backup_metadata(backup, object_meta)
            # Whatever goes wrong, we want to log, cleanup, and re-raise.
            except Exception as err:
                with excutils.save_and_reraise_exception():
                    LOG.exception(_LE("Backup volume metadata failed: %s."),
                                  err)
                    self.delete(backup)

        self._finalize_backup(backup, container, object_meta, object_sha256)
Ejemplo n.º 16
0
    def restore_backup(self, context, backup, volume_id):
        """Restore volume backups from configured backup service."""
        LOG.info(
            _LI('Restore backup started, backup: %(backup_id)s '
                'volume: %(volume_id)s.'), {
                    'backup_id': backup.id,
                    'volume_id': volume_id
                })

        volume = storage.Volume.get_by_id(context, volume_id)
        self._notify_about_backup_usage(context, backup, "restore.start")

        backup.host = self.host
        backup.save()

        expected_status = 'restoring-backup'
        actual_status = volume['status']
        if actual_status != expected_status:
            err = (_('Restore backup aborted, expected volume status '
                     '%(expected_status)s but got %(actual_status)s.') % {
                         'expected_status': expected_status,
                         'actual_status': actual_status
                     })
            backup.status = fields.BackupStatus.AVAILABLE
            backup.save()
            raise exception.InvalidVolume(reason=err)

        expected_status = fields.BackupStatus.RESTORING
        actual_status = backup['status']
        if actual_status != expected_status:
            err = (_('Restore backup aborted: expected backup status '
                     '%(expected_status)s but got %(actual_status)s.') % {
                         'expected_status': expected_status,
                         'actual_status': actual_status
                     })
            self._update_backup_error(backup, context, err)
            self.db.volume_update(context, volume_id, {'status': 'error'})
            raise exception.InvalidBackup(reason=err)

        if volume['size'] > backup['size']:
            LOG.info(
                _LI('Volume: %(vol_id)s, size: %(vol_size)d is '
                    'larger than backup: %(backup_id)s, '
                    'size: %(backup_size)d, continuing with restore.'), {
                        'vol_id': volume['id'],
                        'vol_size': volume['size'],
                        'backup_id': backup['id'],
                        'backup_size': backup['size']
                    })

        backup_service = self._map_service_to_driver(backup['service'])
        configured_service = self.driver_name
        if backup_service != configured_service:
            err = _('Restore backup aborted, the backup service currently'
                    ' configured [%(configured_service)s] is not the'
                    ' backup service that was used to create this'
                    ' backup [%(backup_service)s].') % {
                        'configured_service': configured_service,
                        'backup_service': backup_service,
                    }
            backup.status = fields.BackupStatus.AVAILABLE
            backup.save()
            self.db.volume_update(context, volume_id, {'status': 'error'})
            raise exception.InvalidBackup(reason=err)

        try:
            self._run_restore(context, backup, volume)
        except Exception:
            with excutils.save_and_reraise_exception():
                self.db.volume_update(context, volume_id,
                                      {'status': 'error_restoring'})
                backup.status = fields.BackupStatus.AVAILABLE
                backup.save()

        self.db.volume_update(context, volume_id, {'status': 'available'})
        backup.status = fields.BackupStatus.AVAILABLE
        backup.save()
        LOG.info(
            _LI('Restore backup finished, backup %(backup_id)s restored'
                ' to volume %(volume_id)s.'), {
                    'backup_id': backup.id,
                    'volume_id': volume_id
                })
        self._notify_about_backup_usage(context, backup, "restore.end")
Ejemplo n.º 17
0
    def create_backup(self, context, backup):
        """Create volume backups using configured backup service."""
        volume_id = backup.volume_id
        volume = storage.Volume.get_by_id(context, volume_id)
        previous_status = volume.get('previous_status', None)
        LOG.info(
            _LI('Create backup started, backup: %(backup_id)s '
                'volume: %(volume_id)s.'), {
                    'backup_id': backup.id,
                    'volume_id': volume_id
                })

        self._notify_about_backup_usage(context, backup, "create.start")

        backup.host = self.host
        backup.service = self.driver_name
        backup.availability_zone = self.az
        backup.save()

        expected_status = 'backing-up'
        actual_status = volume['status']
        if actual_status != expected_status:
            err = _('Create backup aborted, expected volume status '
                    '%(expected_status)s but got %(actual_status)s.') % {
                        'expected_status': expected_status,
                        'actual_status': actual_status,
                    }
            self._update_backup_error(backup, context, err)
            raise exception.InvalidVolume(reason=err)

        expected_status = fields.BackupStatus.CREATING
        actual_status = backup.status
        if actual_status != expected_status:
            err = _('Create backup aborted, expected backup status '
                    '%(expected_status)s but got %(actual_status)s.') % {
                        'expected_status': expected_status,
                        'actual_status': actual_status,
                    }
            self._update_backup_error(backup, context, err)
            backup.save()
            raise exception.InvalidBackup(reason=err)

        try:
            self._run_backup(context, backup, volume)
        except Exception as err:
            with excutils.save_and_reraise_exception():
                self.db.volume_update(
                    context, volume_id, {
                        'status': previous_status,
                        'previous_status': 'error_backing-up'
                    })
                self._update_backup_error(backup, context, six.text_type(err))

        # Restore the original status.
        self.db.volume_update(context, volume_id, {
            'status': previous_status,
            'previous_status': 'backing-up'
        })
        backup.status = fields.BackupStatus.AVAILABLE
        backup.size = volume['size']
        backup.save()

        # Handle the num_dependent_backups of parent backup when child backup
        # has created successfully.
        if backup.parent_id:
            parent_backup = storage.Backup.get_by_id(context, backup.parent_id)
            parent_backup.num_dependent_backups += 1
            parent_backup.save()
        LOG.info(_LI('Create backup finished. backup: %s.'), backup.id)
        self._notify_about_backup_usage(context, backup, "create.end")
Ejemplo n.º 18
0
    def delete(self, backup):
        """Delete the given backup from TSM server.

        :param backup: backup information for volume
        :raises: InvalidBackup
        """

        delete_attrs = {'Total number of objects deleted': '1'}
        delete_path, backup_mode = _get_backup_metadata(backup, 'restore')

        LOG.debug('Delete started for backup: %(backup)s, mode: %(mode)s.', {
            'backup': backup.id,
            'mode': backup_mode
        })

        try:
            out, err = utils.execute('dsmc',
                                     'delete',
                                     'backup',
                                     '-quiet',
                                     '-noprompt',
                                     '-objtype=%s' % backup_mode,
                                     '-password=%s' % self.tsm_password,
                                     delete_path,
                                     run_as_root=True,
                                     check_exit_code=False)

        except processutils.ProcessExecutionError as exc:
            err = (_('delete: %(vol_id)s failed to run dsmc with '
                     'stdout: %(out)s\n stderr: %(err)s') % {
                         'vol_id': backup.volume_id,
                         'out': exc.stdout,
                         'err': exc.stderr
                     })
            LOG.error(err)
            raise exception.InvalidBackup(reason=err)
        except exception.Error as exc:
            err = (_('delete: %(vol_id)s failed to run dsmc '
                     'due to invalid arguments with '
                     'stdout: %(out)s\n stderr: %(err)s') % {
                         'vol_id': backup.volume_id,
                         'out': exc.stdout,
                         'err': exc.stderr
                     })
            LOG.error(err)
            raise exception.InvalidBackup(reason=err)

        success = _check_dsmc_output(out, delete_attrs)
        if not success:
            # log error if tsm cannot delete the backup object
            # but do not raise exception so that storage backup
            # object can be removed.
            LOG.error(
                _LE('delete: %(vol_id)s failed with '
                    'stdout: %(out)s\n stderr: %(err)s'), {
                        'vol_id': backup.volume_id,
                        'out': out,
                        'err': err
                    })

        LOG.debug('Delete %s finished.', backup['id'])
Ejemplo n.º 19
0
    def delete_backup(self, context, backup):
        """Delete volume backup from configured backup service."""
        LOG.info(_LI('Delete backup started, backup: %s.'), backup.id)

        self._notify_about_backup_usage(context, backup, "delete.start")
        backup.host = self.host
        backup.save()

        expected_status = fields.BackupStatus.DELETING
        actual_status = backup.status
        if actual_status != expected_status:
            err = _('Delete_backup aborted, expected backup status '
                    '%(expected_status)s but got %(actual_status)s.') \
                % {'expected_status': expected_status,
                   'actual_status': actual_status}
            self._update_backup_error(backup, context, err)
            raise exception.InvalidBackup(reason=err)

        backup_service = self._map_service_to_driver(backup['service'])
        if backup_service is not None:
            configured_service = self.driver_name
            if backup_service != configured_service:
                err = _('Delete backup aborted, the backup service currently'
                        ' configured [%(configured_service)s] is not the'
                        ' backup service that was used to create this'
                        ' backup [%(backup_service)s].')\
                    % {'configured_service': configured_service,
                       'backup_service': backup_service}
                self._update_backup_error(backup, context, err)
                raise exception.InvalidBackup(reason=err)

            try:
                backup_service = self.service.get_backup_driver(context)
                backup_service.delete(backup)
            except Exception as err:
                with excutils.save_and_reraise_exception():
                    self._update_backup_error(backup, context,
                                              six.text_type(err))

        # Get reservations
        try:
            reserve_opts = {
                'backups': -1,
                'backup_gigabytes': -backup.size,
            }
            reservations = QUOTAS.reserve(context,
                                          project_id=backup.project_id,
                                          **reserve_opts)
        except Exception:
            reservations = None
            LOG.exception(_LE("Failed to update usages deleting backup"))

        backup.destroy()
        # If this backup is incremental backup, handle the
        # num_dependent_backups of parent backup
        if backup.parent_id:
            parent_backup = storage.Backup.get_by_id(context, backup.parent_id)
            if parent_backup.has_dependent_backups:
                parent_backup.num_dependent_backups -= 1
                parent_backup.save()
        # Commit the reservations
        if reservations:
            QUOTAS.commit(context, reservations, project_id=backup.project_id)

        LOG.info(_LI('Delete backup finished, backup %s deleted.'), backup.id)
        self._notify_about_backup_usage(context, backup, "delete.end")
Ejemplo n.º 20
0
    def restore(self, backup, volume_id, volume_file):
        """Restore the given volume backup from TSM server.

        :param backup: backup information for volume
        :param volume_id: volume id
        :param volume_file: file object representing the volume
        :raises: InvalidBackup
        """

        # backup_path is the path that was originally backed up.
        backup_path, backup_mode = _get_backup_metadata(backup, 'restore')

        LOG.debug(
            'Starting restore of backup from TSM '
            'to volume %(volume_id)s, '
            'backup: %(backup_id)s, '
            'mode: %(mode)s.', {
                'volume_id': volume_id,
                'backup_id': backup.id,
                'mode': backup_mode
            })

        # volume_path is the path to restore into.  This may
        # be different than the original volume.
        volume_path, unused = _get_volume_realpath(volume_file, volume_id)

        restore_path = _create_unique_device_link(backup.id, volume_path,
                                                  volume_id, backup_mode)

        try:
            self._do_restore(backup_path, restore_path, volume_id, backup_mode)
        except processutils.ProcessExecutionError as exc:
            err = (_('restore: %(vol_id)s failed to run dsmc '
                     'on %(bpath)s.\n'
                     'stdout: %(out)s\n stderr: %(err)s') % {
                         'vol_id': volume_id,
                         'bpath': restore_path,
                         'out': exc.stdout,
                         'err': exc.stderr
                     })
            LOG.error(err)
            raise exception.InvalidBackup(reason=err)
        except exception.Error as exc:
            err = (_('restore: %(vol_id)s failed to run dsmc '
                     'due to invalid arguments '
                     'on %(bpath)s.\n'
                     'stdout: %(out)s\n stderr: %(err)s') % {
                         'vol_id': volume_id,
                         'bpath': restore_path,
                         'out': exc.stdout,
                         'err': exc.stderr
                     })
            LOG.error(err)
            raise exception.InvalidBackup(reason=err)

        finally:
            _cleanup_device_hardlink(restore_path, volume_path, volume_id)

        LOG.debug('Restore %(backup_id)s to %(volume_id)s finished.', {
            'backup_id': backup.id,
            'volume_id': volume_id
        })
Ejemplo n.º 21
0
    def backup(self, backup, volume_file, backup_metadata=False):
        """Backup the given volume to TSM.

        TSM performs a backup of a volume. The volume_file is used
        to determine the path of the block device that TSM will back-up.

        :param backup: backup information for volume
        :param volume_file: file object representing the volume
        :param backup_metadata: whether or not to backup volume metadata
        :raises InvalidBackup
        """

        # TODO(dosaboy): this needs implementing (see backup.drivers.ceph for
        #                an example)
        if backup_metadata:
            msg = _("Volume metadata backup requested but this driver does "
                    "not yet support this feature.")
            raise exception.InvalidBackup(reason=msg)

        volume_path, backup_mode = _get_volume_realpath(
            volume_file, backup.volume_id)
        LOG.debug(
            'Starting backup of volume: %(volume_id)s to TSM,'
            ' volume path: %(volume_path)s, mode: %(mode)s.', {
                'volume_id': backup.volume_id,
                'volume_path': volume_path,
                'mode': backup_mode
            })

        backup_path = _create_unique_device_link(backup.id, volume_path,
                                                 backup.volume_id, backup_mode)

        service_metadata = {
            'backup_mode': backup_mode,
            'backup_path': backup_path
        }
        backup.service_metadata = json.dumps(service_metadata)
        backup.save()

        try:
            self._do_backup(backup_path, backup.volume_id, backup_mode)
        except processutils.ProcessExecutionError as exc:
            err = (_('backup: %(vol_id)s failed to run dsmc '
                     'on %(bpath)s.\n'
                     'stdout: %(out)s\n stderr: %(err)s') % {
                         'vol_id': backup.volume_id,
                         'bpath': backup_path,
                         'out': exc.stdout,
                         'err': exc.stderr
                     })
            LOG.error(err)
            raise exception.InvalidBackup(reason=err)
        except exception.Error as exc:
            err = (_('backup: %(vol_id)s failed to run dsmc '
                     'due to invalid arguments '
                     'on %(bpath)s.\n'
                     'stdout: %(out)s\n stderr: %(err)s') % {
                         'vol_id': backup.volume_id,
                         'bpath': backup_path,
                         'out': exc.stdout,
                         'err': exc.stderr
                     })
            LOG.error(err)
            raise exception.InvalidBackup(reason=err)

        finally:
            _cleanup_device_hardlink(backup_path, volume_path,
                                     backup.volume_id)

        LOG.debug('Backup %s finished.', backup.id)
Ejemplo n.º 22
0
    def import_record(self, context, backup, backup_service, backup_url,
                      backup_hosts):
        """Import all volume backup metadata details to the backup db.

        :param context: running context
        :param backup: The new backup object for the import
        :param backup_service: The needed backup driver for import
        :param backup_url: An identifier string to locate the backup
        :param backup_hosts: Potential hosts to execute the import
        :raises: InvalidBackup
        :raises: ServiceNotFound
        """
        LOG.info(_LI('Import record started, backup_url: %s.'), backup_url)

        # Can we import this backup?
        if (backup_service != self.driver_name):
            # No, are there additional potential backup hosts in the list?
            if len(backup_hosts) > 0:
                # try the next host on the list, maybe he can import
                first_host = backup_hosts.pop()
                self.backup_rpcapi.import_record(context, first_host, backup,
                                                 backup_service, backup_url,
                                                 backup_hosts)
            else:
                # empty list - we are the last host on the list, fail
                err = _('Import record failed, cannot find backup '
                        'service to perform the import. Request service '
                        '%(service)s') % {
                            'service': backup_service
                        }
                self._update_backup_error(backup, context, err)
                raise exception.ServiceNotFound(service_id=backup_service)
        else:
            # Yes...
            try:
                # Deserialize backup record information
                backup_options = backup.decode_record(backup_url)

                # Extract driver specific info and pass it to the driver
                driver_options = backup_options.pop('driver_info', {})
                backup_service = self.service.get_backup_driver(context)
                backup_service.import_record(backup, driver_options)
            except Exception as err:
                msg = six.text_type(err)
                self._update_backup_error(backup, context, msg)
                raise exception.InvalidBackup(reason=msg)

            required_import_options = {
                'display_name', 'display_description', 'container', 'size',
                'service_metadata', 'service', 'object_count', 'id'
            }

            # Check for missing fields in imported data
            missing_opts = required_import_options - set(backup_options)
            if missing_opts:
                msg = (_('Driver successfully decoded imported backup data, '
                         'but there are missing fields (%s).') %
                       ', '.join(missing_opts))
                self._update_backup_error(backup, context, msg)
                raise exception.InvalidBackup(reason=msg)

            # Confirm the ID from the record in the DB is the right one
            backup_id = backup_options['id']
            if backup_id != backup.id:
                msg = (_('Trying to import backup metadata from id %(meta_id)s'
                         ' into backup %(id)s.') % {
                             'meta_id': backup_id,
                             'id': backup.id
                         })
                self._update_backup_error(backup, context, msg)
                raise exception.InvalidBackup(reason=msg)

            # Overwrite some fields
            backup_options['status'] = fields.BackupStatus.AVAILABLE
            backup_options['service'] = self.driver_name
            backup_options['availability_zone'] = self.az
            backup_options['host'] = self.host

            # Remove some values which are not actual fields and some that
            # were set by the API node
            for key in ('name', 'user_id', 'project_id'):
                backup_options.pop(key, None)

            # Update the database
            backup.update(backup_options)
            backup.save()

            # Verify backup
            try:
                if isinstance(backup_service, driver.BackupDriverWithVerify):
                    backup_service.verify(backup.id)
                else:
                    LOG.warning(
                        _LW('Backup service %(service)s does not '
                            'support verify. Backup id %(id)s is '
                            'not verified. Skipping verify.'), {
                                'service': self.driver_name,
                                'id': backup.id
                            })
            except exception.InvalidBackup as err:
                with excutils.save_and_reraise_exception():
                    self._update_backup_error(backup, context,
                                              six.text_type(err))

            LOG.info(
                _LI('Import record id %s metadata from driver '
                    'finished.'), backup.id)
Ejemplo n.º 23
0
    def restore(self, context, backup_id, volume_id=None, name=None):
        """Make the RPC call to restore a volume backup."""
        check_policy(context, 'restore')
        backup = self.get(context, backup_id)
        if backup['status'] != fields.BackupStatus.AVAILABLE:
            msg = _('Backup status must be available')
            raise exception.InvalidBackup(reason=msg)

        size = backup['size']
        if size is None:
            msg = _('Backup to be restored has invalid size')
            raise exception.InvalidBackup(reason=msg)

        # Create a volume if none specified. If a volume is specified check
        # it is large enough for the backup
        if volume_id is None:
            if name is None:
                name = 'restore_backup_%s' % backup_id

            description = 'auto-created_from_restore_from_backup'

            LOG.info(_LI("Creating volume of %(size)s GB for restore of "
                         "backup %(backup_id)s."), {
                             'size': size,
                             'backup_id': backup_id
                         },
                     context=context)
            volume = self.volume_api.create(context, size, name, description)
            volume_id = volume['id']

            while True:
                volume = self.volume_api.get(context, volume_id)
                if volume['status'] != 'creating':
                    break
                greenthread.sleep(1)
        else:
            volume = self.volume_api.get(context, volume_id)

        if volume['status'] != "available":
            msg = _('Volume to be restored to must be available')
            raise exception.InvalidVolume(reason=msg)

        LOG.debug('Checking backup size %(bs)s against volume size %(vs)s', {
            'bs': size,
            'vs': volume['size']
        })
        if size > volume['size']:
            msg = (_('volume size %(volume_size)d is too small to restore '
                     'backup of size %(size)d.') % {
                         'volume_size': volume['size'],
                         'size': size
                     })
            raise exception.InvalidVolume(reason=msg)

        LOG.info(_LI("Overwriting volume %(volume_id)s with restore of "
                     "backup %(backup_id)s"), {
                         'volume_id': volume_id,
                         'backup_id': backup_id
                     },
                 context=context)

        # Setting the status here rather than setting at start and unrolling
        # for each error condition, it should be a very small window
        backup.host = self._get_available_backup_service_host(
            backup.host, backup.availability_zone, volume_host=volume.host)
        backup.status = fields.BackupStatus.RESTORING
        backup.restore_volume_id = volume.id
        backup.save()
        self.db.volume_update(context, volume_id,
                              {'status': 'restoring-backup'})

        self.backup_rpcapi.restore_backup(context, backup.host, backup,
                                          volume_id)

        d = {
            'backup_id': backup_id,
            'volume_id': volume_id,
            'volume_name': volume['display_name'],
        }

        return d
Ejemplo n.º 24
0
    def reset_status(self, context, backup, status):
        """Reset volume backup status.

        :param context: running context
        :param backup: The backup object for reset status operation
        :param status: The status to be set
        :raises: InvalidBackup
        :raises: BackupVerifyUnsupportedDriver
        :raises: AttributeError
        """
        LOG.info(
            _LI('Reset backup status started, backup_id: '
                '%(backup_id)s, status: %(status)s.'), {
                    'backup_id': backup.id,
                    'status': status
                })

        backup_service = self._map_service_to_driver(backup.service)
        LOG.info(_LI('Backup service: %s.'), backup_service)
        if backup_service is not None:
            configured_service = self.driver_name
            if backup_service != configured_service:
                err = _('Reset backup status aborted, the backup service'
                        ' currently configured [%(configured_service)s] '
                        'is not the backup service that was used to create'
                        ' this backup [%(backup_service)s].') % \
                    {'configured_service': configured_service,
                     'backup_service': backup_service}
                raise exception.InvalidBackup(reason=err)
            # Verify backup
            try:
                # check whether the backup is ok or not
                if (status == fields.BackupStatus.AVAILABLE
                        and backup['status'] != fields.BackupStatus.RESTORING):
                    # check whether we could verify the backup is ok or not
                    if isinstance(backup_service,
                                  driver.BackupDriverWithVerify):
                        backup_service.verify(backup.id)
                        backup.status = status
                        backup.save()
                    # driver does not support verify function
                    else:
                        msg = (_('Backup service %(configured_service)s '
                                 'does not support verify. Backup id'
                                 ' %(id)s is not verified. '
                                 'Skipping verify.') % {
                                     'configured_service': self.driver_name,
                                     'id': backup.id
                                 })
                        raise exception.BackupVerifyUnsupportedDriver(
                            reason=msg)
                # reset status to error or from restoring to available
                else:
                    if (status == fields.BackupStatus.ERROR or
                        (status == fields.BackupStatus.AVAILABLE
                         and backup.status == fields.BackupStatus.RESTORING)):
                        backup.status = status
                        backup.save()
            except exception.InvalidBackup:
                with excutils.save_and_reraise_exception():
                    LOG.error(
                        _LE("Backup id %s is not invalid. "
                            "Skipping reset."), backup.id)
            except exception.BackupVerifyUnsupportedDriver:
                with excutils.save_and_reraise_exception():
                    LOG.error(
                        _LE('Backup service %(configured_service)s '
                            'does not support verify. Backup id '
                            '%(id)s is not verified. '
                            'Skipping verify.'), {
                                'configured_service': self.driver_name,
                                'id': backup.id
                            })
            except AttributeError:
                msg = (_('Backup service %(service)s does not support '
                         'verify. Backup id %(id)s is not verified. '
                         'Skipping reset.') % {
                             'service': self.driver_name,
                             'id': backup.id
                         })
                LOG.error(msg)
                raise exception.BackupVerifyUnsupportedDriver(reason=msg)

            # Needs to clean temporary volumes and snapshots.
            try:
                self._cleanup_temp_volumes_snapshots_for_one_backup(
                    context, backup)
            except Exception:
                LOG.exception(
                    _LE("Problem cleaning temp volumes and "
                        "snapshots for backup %(bkup)s."), {'bkup': backup.id})

            # send notification to ceilometer
            notifier_info = {'id': backup.id, 'update': {'status': status}}
            notifier = rpc.get_notifier('backupStatusUpdate')
            notifier.info(context, "backups.reset_status.end", notifier_info)