def delete_backup(self, context, backup): """Delete volume backup from configured backup service.""" LOG.info('Delete backup started, backup: %s.', backup.id) self._notify_about_backup_usage(context, backup, "delete.start") expected_status = fields.BackupStatus.DELETING actual_status = backup.status if actual_status != expected_status: err = _('Delete_backup aborted, expected backup status ' '%(expected_status)s but got %(actual_status)s.') \ % {'expected_status': expected_status, 'actual_status': actual_status} volume_utils.update_backup_error(backup, err) raise exception.InvalidBackup(reason=err) if backup.service and not self.is_working(): err = _('Delete backup is aborted due to backup service is down.') status = fields.BackupStatus.ERROR_DELETING volume_utils.update_backup_error(backup, err, status) raise exception.InvalidBackup(reason=err) if not self._is_our_backup(backup): err = _('Delete backup aborted, the backup service currently' ' configured [%(configured_service)s] is not the' ' backup service that was used to create this' ' backup [%(backup_service)s].')\ % {'configured_service': self.driver_name, 'backup_service': backup.service} volume_utils.update_backup_error(backup, err) raise exception.InvalidBackup(reason=err) if backup.service: try: backup_service = self.service(context) backup_service.delete_backup(backup) except Exception as err: with excutils.save_and_reraise_exception(): volume_utils.update_backup_error(backup, str(err)) # Get reservations try: reserve_opts = { 'backups': -1, 'backup_gigabytes': -backup.size, } reservations = QUOTAS.reserve(context, project_id=backup.project_id, **reserve_opts) except Exception: reservations = None LOG.exception("Failed to update usages deleting backup") if backup.encryption_key_id is not None: volume_utils.delete_encryption_key(context, key_manager.API(CONF), backup.encryption_key_id) backup.encryption_key_id = None backup.save() backup.destroy() # If this backup is incremental backup, handle the # num_dependent_backups of parent backup if backup.parent_id: parent_backup = objects.Backup.get_by_id(context, backup.parent_id) if parent_backup.has_dependent_backups: parent_backup.num_dependent_backups -= 1 parent_backup.save() # Commit the reservations if reservations: QUOTAS.commit(context, reservations, project_id=backup.project_id) LOG.info('Delete backup finished, backup %s deleted.', backup.id) self._notify_about_backup_usage(context, backup, "delete.end")
def _run_restore(self, context, backup, volume): orig_key_id = volume.encryption_key_id backup_service = self.service(context) properties = volume_utils.brick_get_connector_properties() secure_enabled = (self.volume_rpcapi.secure_file_operations_enabled( context, volume)) attach_info = self._attach_device(context, volume, properties) # NOTE(geguileo): Not all I/O disk operations properly do greenthread # context switching and may end up blocking the greenthread, so we go # with native threads proxy-wrapping the device file object. try: device_path = attach_info['device']['path'] open_mode = 'rb+' if os.name == 'nt' else 'wb' if (isinstance(device_path, str) and not os.path.isdir(device_path)): if secure_enabled: with open(device_path, open_mode) as device_file: backup_service.restore(backup, volume.id, tpool.Proxy(device_file)) else: with utils.temporary_chown(device_path): with open(device_path, open_mode) as device_file: backup_service.restore(backup, volume.id, tpool.Proxy(device_file)) # device_path is already file-like so no need to open it else: backup_service.restore(backup, volume.id, tpool.Proxy(device_path)) except exception.BackupRestoreCancel: raise except Exception: LOG.exception( 'Restoring backup %(backup_id)s to volume ' '%(volume_id)s failed.', { 'backup_id': backup.id, 'volume_id': volume.id }) raise finally: self._detach_device(context, attach_info, volume, properties, force=True) # Regardless of whether the restore was successful, do some # housekeeping to ensure the restored volume's encryption key ID is # unique, and any previous key ID is deleted. Start by fetching fresh # info on the restored volume. restored_volume = objects.Volume.get_by_id(context, volume.id) restored_key_id = restored_volume.encryption_key_id if restored_key_id != orig_key_id: LOG.info( 'Updating encryption key ID for volume %(volume_id)s ' 'from backup %(backup_id)s.', { 'volume_id': volume.id, 'backup_id': backup.id }) key_mgr = key_manager.API(CONF) if orig_key_id is not None: LOG.debug('Deleting original volume encryption key ID.') volume_utils.delete_encryption_key(context, key_mgr, orig_key_id) if backup.encryption_key_id is None: # This backup predates the current code that stores the cloned # key ID in the backup database. Fortunately, the key ID # restored from the backup data _is_ a clone of the original # volume's key ID, so grab it. LOG.debug('Gleaning backup encryption key ID from metadata.') backup.encryption_key_id = restored_key_id backup.save() # Clone the key ID again to ensure every restored volume has # a unique key ID. The volume's key ID should not be the same # as the backup.encryption_key_id (the copy made when the backup # was first created). new_key_id = volume_utils.clone_encryption_key( context, key_mgr, backup.encryption_key_id) restored_volume.encryption_key_id = new_key_id restored_volume.save() else: LOG.debug( 'Encryption key ID for volume %(volume_id)s already ' 'matches encryption key ID in backup %(backup_id)s.', { 'volume_id': volume.id, 'backup_id': backup.id })