def _get_available_backup_service_host(self, host, az, volume_host=None): """Return an appropriate backup service host.""" # FIXME(dulek): We need to keep compatibility with Liberty, where c-bak # were coupled with c-vol. If we're running in mixed Liberty-Mitaka # environment we will be scheduling backup jobs the old way. # # This snippet should go away in Newton. Note that volume_host # parameter will also be unnecessary then. if not self._is_scalable_only(): if volume_host: volume_host = volume_utils.extract_host(volume_host, level='host') if volume_host and self._is_backup_service_enabled( az, volume_host): return volume_host elif host and self._is_backup_service_enabled(az, host): return host else: raise exception.ServiceNotFound(service_id='cinder-backup') backup_host = None if (not host or not CONF.backup_use_same_host): backup_host = self._get_any_available_backup_service(az) elif self._is_backup_service_enabled(az, host): backup_host = host if not backup_host: raise exception.ServiceNotFound(service_id='cinder-backup') return backup_host
def test_manage_volume_service_not_found_on_host(self, mock_service): """Test correct failure when host having no volume service on it.""" body = {'volume': {'host': 'host_ok', 'ref': 'fake_ref'}} mock_service.side_effect = exception.ServiceNotFound( service_id='cinder-volume', host='host_ok') res = self._get_resp_post(body) self.assertEqual(http_client.BAD_REQUEST, res.status_int)
def import_record(self, context, backup_service, backup_url): """Make the RPC call to import a volume backup. :param context: running context :param backup_service: backup service name :param backup_url: backup description to be used by the backup driver :raises InvalidBackup: :raises ServiceNotFound: :raises InvalidInput: """ context.authorize(policy.IMPORT_POLICY) # NOTE(ronenkat): since we don't have a backup-scheduler # we need to find a host that support the backup service # that was used to create the backup. # We send it to the first backup service host, and the backup manager # on that host will forward it to other hosts on the hosts list if it # cannot support correct service itself. hosts = self._list_backup_hosts() if len(hosts) == 0: raise exception.ServiceNotFound(service_id=backup_service) # Get Backup object that will be used to import this backup record backup = self._get_import_backup(context, backup_url) first_host = hosts.pop() self.backup_rpcapi.import_record(context, first_host, backup, backup_service, backup_url, hosts) return backup
def fake_service_update(context, service_id, values): service = fake_service_get_by_id(service_id) if service is None: raise exception.ServiceNotFound(service_id=service_id) else: {'host': 'host1', 'service': 'cinder-volume', 'disabled': values['disabled']}
def delete(self, context, backup, force=False): """Make the RPC call to delete a volume backup. Call backup manager to execute backup delete or force delete operation. :param context: running context :param backup: the dict of backup that is got from DB. :param force: indicate force delete or not :raises: InvalidBackup :raises: BackupDriverException :raises: ServiceNotFound """ check_policy(context, 'delete') if not force and backup.status not in [ fields.BackupStatus.AVAILABLE, fields.BackupStatus.ERROR ]: msg = _('Backup status must be available or error') raise exception.InvalidBackup(reason=msg) if force and not self._check_support_to_force_delete( context, backup.host): msg = _('force delete') raise exception.NotSupportedOperation(operation=msg) if not self._is_backup_service_enabled(backup['availability_zone'], backup.host): raise exception.ServiceNotFound(service_id='cinder-backup') # Don't allow backup to be deleted if there are incremental # backups dependent on it. deltas = self.get_all(context, search_opts={'parent_id': backup.id}) if deltas and len(deltas): msg = _('Incremental backups exist for this backup.') raise exception.InvalidBackup(reason=msg) backup.status = fields.BackupStatus.DELETING backup.save() self.backup_rpcapi.delete_backup(context, backup)
def fake_service_update(context, service_id, values): service = fake_service_get_by_id(service_id) if service is None: raise exception.ServiceNotFound(service_id=service_id) else: {'host': 'host1', 'service': constants.VOLUME_BINARY, 'disabled': values['disabled']}
def create(self, context, name, description, volume_id, container, availability_zone=None): """Make the RPC call to create a volume backup.""" check_policy(context, 'create') volume = self.volume_api.get(context, volume_id) if volume['status'] != "available": msg = _('Volume to be backed up must be available') raise exception.InvalidVolume(reason=msg) volume_host = volume['host'].partition('@')[0] if not self._is_backup_service_enabled(volume, volume_host): raise exception.ServiceNotFound(service_id='cinder-backup') self.db.volume_update(context, volume_id, {'status': 'backing-up'}) options = {'user_id': context.user_id, 'project_id': context.project_id, 'display_name': name, 'display_description': description, 'volume_id': volume_id, 'status': 'creating', 'container': container, 'size': volume['size'], 'host': volume_host, } backup = self.db.backup_create(context, options) #TODO(DuncanT): In future, when we have a generic local attach, # this can go via the scheduler, which enables # better load ballancing and isolation of services self.backup_rpcapi.create_backup(context, backup['host'], backup['id'], volume_id) return backup
def import_record(self, context, backup_service, backup_url): """Make the RPC call to import a volume backup. :param context: running context :param backup_service: backup service name :param backup_url: backup description to be used by the backup driver :raises: InvalidBackup :raises: ServiceNotFound """ check_policy(context, 'backup-import') # NOTE(ronenkat): since we don't have a backup-scheduler # we need to find a host that support the backup service # that was used to create the backup. # We send it to the first backup service host, and the backup manager # on that host will forward it to other hosts on the hosts list if it # cannot support correct service itself. hosts = self._list_backup_services() if len(hosts) == 0: raise exception.ServiceNotFound(service_id=backup_service) options = { 'user_id': context.user_id, 'project_id': context.project_id, 'volume_id': '0000-0000-0000-0000', 'status': 'creating', } backup = self.db.backup_create(context, options) first_host = hosts.pop() self.backup_rpcapi.import_record(context, first_host, backup['id'], backup_service, backup_url, hosts) return backup
def _get_available_backup_service_host(self, host, az): """Return an appropriate backup service host.""" backup_host = None if not host or not CONF.backup_use_same_host: backup_host = self._get_any_available_backup_service(az) elif self._is_backup_service_enabled(az, host): backup_host = host if not backup_host: raise exception.ServiceNotFound(service_id='cinder-backup') return backup_host
def db_service_get_by_host_and_topic(context, host, topic): """Replacement for db.service_get_by_host_and_topic. We stub the db.service_get_by_host_and_topic method to return something for a specific host, and raise an exception for anything else. We don't use the returned data (the code under test just use the call to check for existence of a host, so the content returned doesn't matter. """ if host == 'host_ok': return {} raise exception.ServiceNotFound(service_id=host)
def test_backup_create_scheduling_error(self, mock_msg_create, mock_get_vol, mock_vol_update, mock_update_error, mock_get_backup_host): manager = sch_manager.SchedulerManager() fake_context = mock.MagicMock() fake_backup = mock.MagicMock(id=fake.BACKUP_ID, volume_id=fake.VOLUME_ID) mock_get_vol.return_value = mock.MagicMock() exception.ServiceNotFound(service_id='cinder-backup') mock_get_backup_host.side_effect = exception.ServiceNotFound( service_id='cinder-backup') manager.create_backup(fake_context, fake_backup) mock_msg_create.assert_called_once_with( fake_context, action=message_field.Action.BACKUP_CREATE, resource_type=message_field.Resource.VOLUME_BACKUP, resource_uuid=fake_backup.id, detail=message_field.Detail.BACKUP_SCHEDULE_ERROR)
def service_get(context, host, binary): """Replacement for Service.service_get_by_host_and_topic. We mock the Service.service_get_by_host_and_topic method to return something for a specific host, and raise an exception for anything else. We don't use the returned data (the code under test just use the call to check for existence of a host, so the content returned doesn't matter. """ if host == 'host_ok': return {'disabled': False} if host == 'host_disabled': return {'disabled': True} raise exception.ServiceNotFound(service_id=host)
def test_create_backup_no_service(self, mock_volume_get, mock_host, mock_error): volume = fake_volume.fake_db_volume() mock_volume_get.return_value = volume mock_host.side_effect = exception.ServiceNotFound( service_id='cinder-volume') backup = fake_backup.fake_backup_obj(self.context) self.manager.create_backup(self.context, backup=backup) mock_host.assert_called_once_with(volume) mock_volume_get.assert_called_once_with(self.context, backup.volume_id) mock_error.assert_called_once_with( backup, 'Service not found for creating backup.')
def service_get(context, service_id, backend_match_level=None, host=None, **filters): """Replacement for db.sqlalchemy.api.service_get. We mock the db.sqlalchemy.api.service_get method to return something for a specific host, and raise an exception for anything else. We don't use the returned data (the code under test just use the call to check for existence of a host, so the content returned doesn't matter. """ if host == 'host_ok': return {'disabled': False} if host == 'host_disabled': return {'disabled': True} raise exception.ServiceNotFound(service_id=host)
def test_create_backup_no_service(self, mock_volume_update, mock_volume_get, mock_host, mock_error): volume = fake_volume.fake_db_volume() volume['status'] = 'backing-up' volume['previous_status'] = 'available' mock_volume_get.return_value = volume mock_host.side_effect = exception.ServiceNotFound( service_id='cinder-volume') backup = fake_backup.fake_backup_obj(self.context) self.manager.create_backup(self.context, backup=backup) mock_host.assert_called_once_with(volume) mock_volume_get.assert_called_once_with(self.context, backup.volume_id) mock_volume_update.assert_called_once_with( self.context, backup.volume_id, { 'status': 'available', 'previous_status': 'backing-up' }) mock_error.assert_called_once_with( backup, 'Service not found for creating backup.')
def service_get(context, service_id, backend_match_level=None, host=None, **filters): """Replacement for db.sqlalchemy.api.service_get. We mock the db.sqlalchemy.api.service_get method to return something for a specific host, and raise an exception for anything else. We don't use the returned data (the code under test just use the call to check for existence of a host, so the content returned doesn't matter. """ if host == 'host_ok': return { 'disabled': False, 'uuid': 'a3a593da-7f8d-4bb7-8b4c-f2bc1e0b4824' } if host == 'host_disabled': return { 'disabled': True, 'uuid': '4200b32b-0bf9-436c-86b2-0675f6ac218e' } raise exception.ServiceNotFound(service_id=host)
def create(self, context, name, description, volume_id, container, incremental=False, availability_zone=None, force=False, snapshot_id=None): """Make the RPC call to create a volume backup.""" check_policy(context, 'create') volume = self.volume_api.get(context, volume_id) snapshot = None if snapshot_id: snapshot = self.volume_api.get_snapshot(context, snapshot_id) if volume['status'] not in ["available", "in-use"]: msg = (_('Volume to be backed up must be available ' 'or in-use, but the current status is "%s".') % volume['status']) raise exception.InvalidVolume(reason=msg) elif volume['status'] in ["in-use"] and not snapshot_id and not force: msg = _('Backing up an in-use volume must use ' 'the force flag.') raise exception.InvalidVolume(reason=msg) elif snapshot_id and snapshot['status'] not in ["available"]: msg = (_('Snapshot to be backed up must be available, ' 'but the current status is "%s".') % snapshot['status']) raise exception.InvalidSnapshot(reason=msg) previous_status = volume['status'] volume_host = volume_utils.extract_host(volume['host'], 'host') if not self._is_backup_service_enabled(volume['availability_zone'], volume_host): raise exception.ServiceNotFound(service_id='cinder-backup') # Reserve a quota before setting volume status and backup status try: reserve_opts = {'backups': 1, 'backup_gigabytes': volume['size']} reservations = QUOTAS.reserve(context, **reserve_opts) except exception.OverQuota as e: overs = e.kwargs['overs'] usages = e.kwargs['usages'] quotas = e.kwargs['quotas'] def _consumed(resource_name): return (usages[resource_name]['reserved'] + usages[resource_name]['in_use']) for over in overs: if 'gigabytes' in over: msg = _LW("Quota exceeded for %(s_pid)s, tried to create " "%(s_size)sG backup (%(d_consumed)dG of " "%(d_quota)dG already consumed)") LOG.warning( msg, { 's_pid': context.project_id, 's_size': volume['size'], 'd_consumed': _consumed(over), 'd_quota': quotas[over] }) raise exception.VolumeBackupSizeExceedsAvailableQuota( requested=volume['size'], consumed=_consumed('backup_gigabytes'), quota=quotas['backup_gigabytes']) elif 'backups' in over: msg = _LW("Quota exceeded for %(s_pid)s, tried to create " "backups (%(d_consumed)d backups " "already consumed)") LOG.warning(msg, { 's_pid': context.project_id, 'd_consumed': _consumed(over) }) raise exception.BackupLimitExceeded(allowed=quotas[over]) # Find the latest backup and use it as the parent backup to do an # incremental backup. latest_backup = None if incremental: backups = objects.BackupList.get_all_by_volume( context.elevated(), volume_id) if backups.objects: # NOTE(xyang): The 'data_timestamp' field records the time # when the data on the volume was first saved. If it is # a backup from volume, 'data_timestamp' will be the same # as 'created_at' for a backup. If it is a backup from a # snapshot, 'data_timestamp' will be the same as # 'created_at' for a snapshot. # If not backing up from snapshot, the backup with the latest # 'data_timestamp' will be the parent; If backing up from # snapshot, the backup with the latest 'data_timestamp' will # be chosen only if 'data_timestamp' is earlier than the # 'created_at' timestamp of the snapshot; Otherwise, the # backup will not be chosen as the parent. # For example, a volume has a backup taken at 8:00, then # a snapshot taken at 8:10, and then a backup at 8:20. # When taking an incremental backup of the snapshot, the # parent should be the backup at 8:00, not 8:20, and the # 'data_timestamp' of this new backup will be 8:10. latest_backup = max( backups.objects, key=lambda x: x['data_timestamp'] if (not snapshot or (snapshot and x['data_timestamp'] < snapshot['created_at'])) else datetime(1, 1, 1, 1, 1, 1, tzinfo=timezone('UTC'))) else: msg = _('No backups available to do an incremental backup.') raise exception.InvalidBackup(reason=msg) parent_id = None if latest_backup: parent_id = latest_backup.id if latest_backup['status'] != fields.BackupStatus.AVAILABLE: msg = _('The parent backup must be available for ' 'incremental backup.') raise exception.InvalidBackup(reason=msg) data_timestamp = None if snapshot_id: snapshot = objects.Snapshot.get_by_id(context, snapshot_id) data_timestamp = snapshot.created_at self.db.volume_update(context, volume_id, { 'status': 'backing-up', 'previous_status': previous_status }) try: kwargs = { 'user_id': context.user_id, 'project_id': context.project_id, 'display_name': name, 'display_description': description, 'volume_id': volume_id, 'status': fields.BackupStatus.CREATING, 'container': container, 'parent_id': parent_id, 'size': volume['size'], 'host': volume_host, 'snapshot_id': snapshot_id, 'data_timestamp': data_timestamp, } backup = objects.Backup(context=context, **kwargs) backup.create() if not snapshot_id: backup.data_timestamp = backup.created_at backup.save() QUOTAS.commit(context, reservations) except Exception: with excutils.save_and_reraise_exception(): try: backup.destroy() finally: QUOTAS.rollback(context, reservations) # TODO(DuncanT): In future, when we have a generic local attach, # this can go via the scheduler, which enables # better load balancing and isolation of services self.backup_rpcapi.create_backup(context, backup) return backup
def fake_service_get(context, service_id=None, **filters): result = fake_service_get_all(context, id=service_id, **filters) if not result: raise exception.ServiceNotFound(service_id=service_id) return result[0]
def import_record(self, context, backup, backup_service, backup_url, backup_hosts): """Import all volume backup metadata details to the backup db. :param context: running context :param backup: The new backup object for the import :param backup_service: The needed backup driver for import :param backup_url: An identifier string to locate the backup :param backup_hosts: Potential hosts to execute the import :raises: InvalidBackup :raises: ServiceNotFound """ LOG.info(_LI('Import record started, backup_url: %s.'), backup_url) # Can we import this backup? if (backup_service != self.driver_name): # No, are there additional potential backup hosts in the list? if len(backup_hosts) > 0: # try the next host on the list, maybe he can import first_host = backup_hosts.pop() self.backup_rpcapi.import_record(context, first_host, backup, backup_service, backup_url, backup_hosts) else: # empty list - we are the last host on the list, fail err = _('Import record failed, cannot find backup ' 'service to perform the import. Request service ' '%(service)s') % { 'service': backup_service } self._update_backup_error(backup, context, err) raise exception.ServiceNotFound(service_id=backup_service) else: # Yes... try: # Deserialize backup record information backup_options = backup.decode_record(backup_url) # Extract driver specific info and pass it to the driver driver_options = backup_options.pop('driver_info', {}) utils.require_driver_initialized(self.driver) backup_service = self.service.get_backup_driver(context) backup_service.import_record(backup, driver_options) except Exception as err: msg = six.text_type(err) self._update_backup_error(backup, context, msg) raise exception.InvalidBackup(reason=msg) required_import_options = { 'display_name', 'display_description', 'container', 'size', 'service_metadata', 'service', 'object_count', 'id' } # Check for missing fields in imported data missing_opts = required_import_options - set(backup_options) if missing_opts: msg = (_('Driver successfully decoded imported backup data, ' 'but there are missing fields (%s).') % ', '.join(missing_opts)) self._update_backup_error(backup, context, msg) raise exception.InvalidBackup(reason=msg) # Confirm the ID from the record in the DB is the right one backup_id = backup_options['id'] if backup_id != backup.id: msg = (_('Trying to import backup metadata from id %(meta_id)s' ' into backup %(id)s.') % { 'meta_id': backup_id, 'id': backup.id }) self._update_backup_error(backup, context, msg) raise exception.InvalidBackup(reason=msg) # Overwrite some fields backup_options['status'] = fields.BackupStatus.AVAILABLE backup_options['service'] = self.driver_name backup_options['availability_zone'] = self.az backup_options['host'] = self.host # Remove some values which are not actual fields and some that # were set by the API node for key in ('name', 'user_id', 'project_id'): backup_options.pop(key, None) # Update the database backup.update(backup_options) backup.save() # Verify backup try: if isinstance(backup_service, driver.BackupDriverWithVerify): backup_service.verify(backup.id) else: LOG.warning( _LW('Backup service %(service)s does not ' 'support verify. Backup id %(id)s is ' 'not verified. Skipping verify.'), { 'service': self.driver_name, 'id': backup.id }) except exception.InvalidBackup as err: with excutils.save_and_reraise_exception(): self._update_backup_error(backup, context, six.text_type(err)) LOG.info( _LI('Import record id %s metadata from driver ' 'finished.'), backup.id)
def import_record(self, context, backup, backup_service, backup_url, backup_hosts): """Import all volume backup metadata details to the backup db. :param context: running context :param backup: The new backup object for the import :param backup_service: The needed backup driver for import :param backup_url: An identifier string to locate the backup :param backup_hosts: Potential hosts to execute the import :raises InvalidBackup: :raises ServiceNotFound: """ LOG.info('Import record started, backup_url: %s.', backup_url) # Can we import this backup? if not self._is_our_backup(backup_service): # No, are there additional potential backup hosts in the list? if len(backup_hosts) > 0: # try the next host on the list, maybe he can import first_host = backup_hosts.pop() self.backup_rpcapi.import_record(context, first_host, backup, backup_service, backup_url, backup_hosts) else: # empty list - we are the last host on the list, fail err = _('Import record failed, cannot find backup ' 'service to perform the import. Request service ' '%(service)s.') % { 'service': backup_service } volume_utils.update_backup_error(backup, err) raise exception.ServiceNotFound(service_id=backup_service) else: # Yes... try: # Deserialize backup record information backup_options = backup.decode_record(backup_url) # Extract driver specific info and pass it to the driver driver_options = backup_options.pop('driver_info', {}) backup_service = self.service(context) backup_service.import_record(backup, driver_options) except Exception as err: msg = str(err) volume_utils.update_backup_error(backup, msg) raise exception.InvalidBackup(reason=msg) required_import_options = { 'display_name', 'display_description', 'container', 'size', 'service_metadata', 'object_count', 'id' } # Check for missing fields in imported data missing_opts = required_import_options - set(backup_options) if missing_opts: msg = (_('Driver successfully decoded imported backup data, ' 'but there are missing fields (%s).') % ', '.join(missing_opts)) volume_utils.update_backup_error(backup, msg) raise exception.InvalidBackup(reason=msg) # Confirm the ID from the record in the DB is the right one backup_id = backup_options['id'] if backup_id != backup.id: msg = (_('Trying to import backup metadata from id %(meta_id)s' ' into backup %(id)s.') % { 'meta_id': backup_id, 'id': backup.id }) volume_utils.update_backup_error(backup, msg) raise exception.InvalidBackup(reason=msg) # Overwrite some fields backup_options['service'] = self.driver_name backup_options['availability_zone'] = self.az backup_options['host'] = self.host # Remove some values which are not actual fields and some that # were set by the API node for key in ('name', 'user_id', 'project_id', 'deleted_at', 'deleted', 'fail_reason', 'status'): backup_options.pop(key, None) # Update the database backup.update(backup_options) backup.save() # Update the backup's status backup.update({"status": fields.BackupStatus.AVAILABLE}) backup.save() LOG.info('Import record id %s metadata from driver ' 'finished.', backup.id)
def create_instance_backup(self, context, instance_uuid, name, description, volume_ids, container, incremental=False, availability_zone=None, force=True): """Make the RPC call to create backup for volume-based instance.""" # Use the same policy as backup creatation check_policy(context, 'create') server = nova.API().get_server(context, instance_uuid) if server.status not in [ "ACTIVE", "SHUTOFF", "PAUSED", "SUSPENDED", "SHELVED_OFFLOADED" ]: msg = (_("Instance %(instance_uuid)s in %(status)s status " "which is not allowed to be backed up.") % { 'instance_uuid': instance_uuid, 'status': server.status }) raise exception.InvalidInstanceStatus(reason=msg) volumes = [ self.volume_api.get(context, volume_id) for volume_id in volume_ids ] for volume in volumes: # Verify all volumes are in 'in-use' state if volume['status'] != "in-use": msg = (_('Volume to be backed up must be in-use ' 'but the current status is "%s".') % volume['status']) raise exception.InvalidVolume(reason=msg) # Verify backup service is enabled on host volume_host = volume_utils.extract_host(volume['host'], 'host') if not self._is_backup_service_enabled(volume, volume_host): raise exception.ServiceNotFound(service_id='cinder-backup') backups = [] inst_backup_kwargs = [] # Add a 32-bit UUID prefix to display_description, in order to # distinguish which backups are created at the same time desc_prefix = str(uuid.uuid4()).replace('-', '') for volume in volumes: # Reserve a quota before setting volume status and backup status try: reserve_opts = { 'backups': 1, 'backup_gigabytes': volume['size'] } LOG.info( _LI("create_instance_backup " "reserve_opts: %(reserve_opts)s"), {'reserve_opts': reserve_opts}) reservations = QUOTAS.reserve(context, **reserve_opts) except exception.OverQuota as e: overs = e.kwargs['overs'] usages = e.kwargs['usages'] quotas = e.kwargs['quotas'] # reset status for the other volumes and # remove the related backup for backup in backups: self.db.volume_update(context, backup['volume_id'], {'status': 'in-use'}) self.db.backup_update(context, backup['id'], {'status': 'error'}) self.delete(context, backup['id']) def _consumed(resource_name): return (usages[resource_name]['reserved'] + usages[resource_name]['in_use']) for over in overs: if 'gigabytes' in over: msg = _LW("Quota exceeded for %(s_pid)s, tried to " "create " "%(s_size)sG backup " "(%(d_consumed)dG of " "%(d_quota)dG already consumed)") LOG.warning( msg, { 's_pid': context.project_id, 's_size': volume['size'], 'd_consumed': _consumed(over), 'd_quota': quotas[over] }) raise exception.VolumeBackupSizeExceedsAvailableQuota( requested=volume['size'], consumed=_consumed('backup_gigabytes'), quota=quotas['backup_gigabytes']) elif 'backups' in over: msg = _LW("Quota exceeded for %(s_pid)s, tried to " "create backups (%(d_consumed)d backups " "already consumed)") LOG.warning( msg, { 's_pid': context.project_id, 'd_consumed': _consumed(over) }) raise exception.BackupLimitExceeded( allowed=quotas[over]) # Since Ceph doesn't use parent_id to determine an incremental # backup, comment this part. # # Find the latest backup of the volume and use it as the parent # backup to do an incremental backup. # latest_backup = None # if incremental: # backups = \ # objects.BackupList.get_all_by_volume(context.elevated(), # volume['id']) # if backups.objects: # latest_backup = max(backups.objects, # key=lambda x: x['created_at']) # else: # msg = _('No backups available \ # to do an incremental backup.') # raise exception.InvalidBackup(reason=msg) latest_backup = None # Added for periodic backup if getattr(context, 'periodic', False): latest_backup = None description = PERIODICSTR + description if description \ else PERIODICSTR else: if incremental: all_backups = self.db.\ backup_get_all_by_volume(context.elevated(), volume['id']) if all_backups: normal_backups = [] for bk in all_backups: if not bk.display_description or \ PERIODICSTR not in bk.display_description: normal_backups.append(bk) if normal_backups: latest_backup = max(normal_backups, key=lambda x: x['created_at']) parent_id = None if latest_backup: if latest_backup['status'] == "available": parent_id = latest_backup['id'] LOG.info( _LI("Found parent backup %(bak)s for volume " "%(volume)s. Do an incremental backup."), { 'bak': latest_backup['id'], 'volume': volume['id'] }) elif latest_backup['status'] == "creating": msg = _('The parent backup is creating.') LOG.info(_LI("The parent backup %(bak)s is creating."), {'bak': latest_backup['id']}) raise exception.InvalidBackup(reason=msg) else: LOG.info( _LI("No backups available to do an incremental " "backup, do a full backup for " "volume %(volume)s."), {'volume': volume['id']}) else: LOG.info( _LI("No backups available to do an incremental " "backup, do a full backup for volume %(volume)s."), {'volume': volume['id']}) options = { 'user_id': context.user_id, 'project_id': context.project_id, 'display_name': name, 'display_description': (lambda x: desc_prefix + x if x else desc_prefix)(description), 'volume_id': volume['id'], 'status': 'creating', 'container': container, 'parent_id': parent_id, # Set backup size to "0" which means # it's not available. Backup driver # will return the exact size when # backing up is done. We lined up with OP # that when backup is in "creating" status, # OP will show "--" in the "size" field # instead of "0". # 'size': volume['size'], 'size': 0, 'host': volume_host, } # (maqi) Use volume display_description field to save volume # previous_status since volumes in Kilo don't have # previous_status field in database previous_status = volume['status'] self.db.volume_update(context, volume['id'], { 'status': 'backing-up', 'display_description': previous_status }) try: backup = self.db.backup_create(context, options) QUOTAS.commit(context, reservations) except Exception: with excutils.save_and_reraise_exception(): try: self.db.backup_destroy(context, backup['id']) finally: QUOTAS.rollback(context, reservations) backups.append(backup) kwargs = { 'host': backup['host'], 'backup_id': backup['id'], 'volume_id': volume['id'], } inst_backup_kwargs.append(kwargs) self.backup_rpcapi.create_instance_backup(context, instance_uuid, inst_backup_kwargs) LOG.debug( "I am ready to return from create_instance_backup" "with result: %(backups)s", {'backups': backups}) return backups
def create(self, context, name, description, volume_id, container, incremental=False, availability_zone=None, force=False): """Make the RPC call to create a volume backup.""" check_policy(context, 'create') volume = self.volume_api.get(context, volume_id) if volume['status'] not in ["available", "in-use"]: msg = (_('Volume to be backed up must be available ' 'or in-use, but the current status is "%s".') % volume['status']) raise exception.InvalidVolume(reason=msg) elif volume['status'] in ["in-use"] and not force: msg = _('Backing up an in-use volume must use ' 'the force flag.') raise exception.InvalidVolume(reason=msg) previous_status = volume['status'] volume_host = volume_utils.extract_host(volume['host'], 'host') if not self._is_backup_service_enabled(volume, volume_host): raise exception.ServiceNotFound(service_id='cinder-backup') # do quota reserver before setting volume status and backup status try: reserve_opts = {'backups': 1, 'backup_gigabytes': volume['size']} reservations = QUOTAS.reserve(context, **reserve_opts) except exception.OverQuota as e: overs = e.kwargs['overs'] usages = e.kwargs['usages'] quotas = e.kwargs['quotas'] def _consumed(resource_name): return (usages[resource_name]['reserved'] + usages[resource_name]['in_use']) for over in overs: if 'gigabytes' in over: msg = _LW("Quota exceeded for %(s_pid)s, tried to create " "%(s_size)sG backup (%(d_consumed)dG of " "%(d_quota)dG already consumed)") LOG.warning(msg, {'s_pid': context.project_id, 's_size': volume['size'], 'd_consumed': _consumed(over), 'd_quota': quotas[over]}) raise exception.VolumeBackupSizeExceedsAvailableQuota( requested=volume['size'], consumed=_consumed('backup_gigabytes'), quota=quotas['backup_gigabytes']) elif 'backups' in over: msg = _LW("Quota exceeded for %(s_pid)s, tried to create " "backups (%(d_consumed)d backups " "already consumed)") LOG.warning(msg, {'s_pid': context.project_id, 'd_consumed': _consumed(over)}) raise exception.BackupLimitExceeded( allowed=quotas[over]) # Find the latest backup of the volume and use it as the parent # backup to do an incremental backup. latest_backup = None if incremental: backups = objects.BackupList.get_all_by_volume(context.elevated(), volume_id) if backups.objects: latest_backup = max(backups.objects, key=lambda x: x['created_at']) else: msg = _('No backups available to do an incremental backup.') raise exception.InvalidBackup(reason=msg) parent_id = None if latest_backup: parent_id = latest_backup.id if latest_backup['status'] != "available": msg = _('The parent backup must be available for ' 'incremental backup.') raise exception.InvalidBackup(reason=msg) self.db.volume_update(context, volume_id, {'status': 'backing-up', 'previous_status': previous_status}) try: kwargs = { 'user_id': context.user_id, 'project_id': context.project_id, 'display_name': name, 'display_description': description, 'volume_id': volume_id, 'status': 'creating', 'container': container, 'parent_id': parent_id, 'size': volume['size'], 'host': volume_host, } backup = objects.Backup(context=context, **kwargs) backup.create() QUOTAS.commit(context, reservations) except Exception: with excutils.save_and_reraise_exception(): try: backup.destroy() finally: QUOTAS.rollback(context, reservations) # TODO(DuncanT): In future, when we have a generic local attach, # this can go via the scheduler, which enables # better load balancing and isolation of services self.backup_rpcapi.create_backup(context, backup) return backup
def import_record(self, context, backup_id, backup_service, backup_url, backup_hosts): """Import all volume backup metadata details to the backup db. :param context: running context :param backup_id: The new backup id for the import :param backup_service: The needed backup driver for import :param backup_url: An identifier string to locate the backup :param backup_hosts: Potential hosts to execute the import :raises: InvalidBackup :raises: ServiceNotFound """ LOG.info(_('Import record started, backup_url: %s.'), backup_url) # Can we import this backup? if (backup_service != self.driver_name): # No, are there additional potential backup hosts in the list? if len(backup_hosts) > 0: # try the next host on the list, maybe he can import first_host = backup_hosts.pop() self.backup_rpcapi.import_record(context, first_host, backup_id, backup_service, backup_url, backup_hosts) else: # empty list - we are the last host on the list, fail err = _('Import record failed, cannot find backup ' 'service to perform the import. Request service ' '%(service)s') % { 'service': backup_service } self.db.backup_update(context, backup_id, { 'status': 'error', 'fail_reason': err }) raise exception.ServiceNotFound(service_id=backup_service) else: # Yes... try: utils.require_driver_initialized(self.driver) backup_service = self.service.get_backup_driver(context) backup_options = backup_service.import_record(backup_url) except Exception as err: msg = unicode(err) self.db.backup_update(context, backup_id, { 'status': 'error', 'fail_reason': msg }) raise exception.InvalidBackup(reason=msg) required_import_options = [ 'display_name', 'display_description', 'container', 'size', 'service_metadata', 'service', 'object_count' ] backup_update = {} backup_update['status'] = 'available' backup_update['service'] = self.driver_name backup_update['availability_zone'] = self.az backup_update['host'] = self.host for entry in required_import_options: if entry not in backup_options: msg = (_('Backup metadata received from driver for ' 'import is missing %s.'), entry) self.db.backup_update(context, backup_id, { 'status': 'error', 'fail_reason': msg }) raise exception.InvalidBackup(reason=msg) backup_update[entry] = backup_options[entry] # Update the database self.db.backup_update(context, backup_id, backup_update) # Verify backup try: if isinstance(backup_service, driver.BackupDriverWithVerify): backup_service.verify(backup_id) else: LOG.warn( _('Backup service %(service)s does not support ' 'verify. Backup id %(id)s is not verified. ' 'Skipping verify.') % { 'service': self.driver_name, 'id': backup_id }) except exception.InvalidBackup as err: with excutils.save_and_reraise_exception(): self.db.backup_update(context, backup_id, { 'status': 'error', 'fail_reason': unicode(err) }) LOG.info( _('Import record id %s metadata from driver ' 'finished.') % backup_id)
def create(self, context, name, description, volume_id, container, incremental=False, availability_zone=None): """Make the RPC call to create a volume backup.""" check_policy(context, 'create') volume = self.volume_api.get(context, volume_id) if volume['status'] != "available": msg = _('Volume to be backed up must be available') raise exception.InvalidVolume(reason=msg) volume_host = volume_utils.extract_host(volume['host'], 'host') if not self._is_backup_service_enabled(volume, volume_host): raise exception.ServiceNotFound(service_id='cinder-backup') # do quota reserver before setting volume status and backup status try: reserve_opts = {'backups': 1, 'backup_gigabytes': volume['size']} reservations = QUOTAS.reserve(context, **reserve_opts) except exception.OverQuota as e: overs = e.kwargs['overs'] usages = e.kwargs['usages'] quotas = e.kwargs['quotas'] def _consumed(resource_name): return (usages[resource_name]['reserved'] + usages[resource_name]['in_use']) for over in overs: if 'gigabytes' in over: msg = _LW("Quota exceeded for %(s_pid)s, tried to create " "%(s_size)sG backup (%(d_consumed)dG of " "%(d_quota)dG already consumed)") LOG.warning( msg, { 's_pid': context.project_id, 's_size': volume['size'], 'd_consumed': _consumed(over), 'd_quota': quotas[over] }) raise exception.VolumeBackupSizeExceedsAvailableQuota( requested=volume['size'], consumed=_consumed('backup_gigabytes'), quota=quotas['backup_gigabytes']) elif 'backups' in over: msg = _LW("Quota exceeded for %(s_pid)s, tried to create " "backups (%(d_consumed)d backups " "already consumed)") LOG.warning(msg, { 's_pid': context.project_id, 'd_consumed': _consumed(over) }) raise exception.BackupLimitExceeded(allowed=quotas[over]) # Find the latest backup of the volume and use it as the parent # backup to do an incremental backup. latest_backup = None # Added for periodic backup if getattr(context, 'periodic', False): latest_backup = None if description: description = PERIODICSTR + description else: description = PERIODICSTR else: if incremental: backups = self.db.backup_get_all_by_volume( context.elevated(), volume_id) if backups: normal_backups = [] for bk in backups: if not bk.display_description or \ PERIODICSTR not in bk.display_description: LOG.debug("Found normal backup %(bak)s " "for volume %(vol)s." % { "bak": bk.id, "vol": volume_id }) normal_backups.append(bk) if normal_backups: LOG.debug( "The normal backups for volume " "%(vol)s: %(baks)s." % { "vol": volume_id, "baks": [bk.id for bk in normal_backups] }) latest_backup = max(normal_backups, key=lambda x: x['created_at']) parent_id = None if latest_backup: if latest_backup['status'] == "available": parent_id = latest_backup['id'] LOG.info( _LI("Found parent backup %(bak)s for volume " "%(volume)s. Do an incremental backup."), { 'bak': latest_backup['id'], 'volume': volume['id'] }) elif latest_backup['status'] == "creating": msg = _('The parent backup is creating.') LOG.info(_LI("The parent backup %(bak)s is creating."), {'bak': latest_backup['id']}) raise exception.InvalidBackup(reason=msg) else: LOG.info( _LI("No backups available to do an incremental " "backup, do a full backup for volume %(volume)s."), {'volume': volume['id']}) else: LOG.info( _LI("No backups available to do an incremental " "backup, do a full backup for volume %(volume)s."), {'volume': volume['id']}) self.db.volume_update(context, volume_id, {'status': 'backing-up'}) options = { 'user_id': context.user_id, 'project_id': context.project_id, 'display_name': name, 'display_description': description, 'volume_id': volume_id, 'status': 'creating', 'container': container, 'parent_id': parent_id, # Set backup size to "0" which means # it's not available. Backup driver # will return the exact size when # backing up is done. We lined up with OP # that when backup is in "creating" status, # OP will show "--" in the "size" field # instead of "0". # 'size': volume['size'], 'size': 0, 'host': volume_host, } try: backup = self.db.backup_create(context, options) QUOTAS.commit(context, reservations) except Exception: with excutils.save_and_reraise_exception(): try: self.db.backup_destroy(context, backup['id']) finally: QUOTAS.rollback(context, reservations) # TODO(DuncanT): In future, when we have a generic local attach, # this can go via the scheduler, which enables # better load balancing and isolation of services self.backup_rpcapi.create_backup(context, backup['host'], backup['id'], volume_id) return backup
def create(self, context, name, description, volume_id, container, availability_zone=None): """Make the RPC call to create a volume backup.""" check_policy(context, 'create') volume = self.volume_api.get(context, volume_id) if volume['status'] != "available": msg = _('Volume to be backed up must be available') raise exception.InvalidVolume(reason=msg) volume_host = volume_utils.extract_host(volume['host'], 'host') if not self._is_backup_service_enabled(volume, volume_host): raise exception.ServiceNotFound(service_id='cinder-backup') # do quota reserver before setting volume status and backup status try: reserve_opts = {'backups': 1, 'backup_gigabytes': volume['size']} reservations = QUOTAS.reserve(context, **reserve_opts) except exception.OverQuota as e: overs = e.kwargs['overs'] usages = e.kwargs['usages'] quotas = e.kwargs['quotas'] def _consumed(resource_name): return (usages[resource_name]['reserved'] + usages[resource_name]['in_use']) for over in overs: if 'gigabytes' in over: msg = _LW("Quota exceeded for %(s_pid)s, tried to create " "%(s_size)sG backup (%(d_consumed)dG of " "%(d_quota)dG already consumed)") LOG.warn( msg % { 's_pid': context.project_id, 's_size': volume['size'], 'd_consumed': _consumed(over), 'd_quota': quotas[over] }) raise exception.VolumeBackupSizeExceedsAvailableQuota( requested=volume['size'], consumed=_consumed('backup_gigabytes'), quota=quotas['backup_gigabytes']) elif 'backups' in over: msg = _LW("Quota exceeded for %(s_pid)s, tried to create " "backups (%(d_consumed)d backups " "already consumed)") LOG.warn(msg % { 's_pid': context.project_id, 'd_consumed': _consumed(over) }) raise exception.BackupLimitExceeded(allowed=quotas[over]) self.db.volume_update(context, volume_id, {'status': 'backing-up'}) options = { 'user_id': context.user_id, 'project_id': context.project_id, 'display_name': name, 'display_description': description, 'volume_id': volume_id, 'status': 'creating', 'container': container, 'size': volume['size'], 'host': volume_host, } try: backup = self.db.backup_create(context, options) QUOTAS.commit(context, reservations) except Exception: with excutils.save_and_reraise_exception(): try: self.db.backup_destroy(context, backup['id']) finally: QUOTAS.rollback(context, reservations) # TODO(DuncanT): In future, when we have a generic local attach, # this can go via the scheduler, which enables # better load balancing and isolation of services self.backup_rpcapi.create_backup(context, backup['host'], backup['id'], volume_id) return backup