def delete_volume(self, volume): """Delete volume. Return ok if doesn't exist. Auto detach from all servers. """ # Get volume name name = self.configuration.zadara_vol_name_template % volume['name'] vpsa_vol = self._get_vpsa_volume_name(name) if not vpsa_vol: msg = _('Volume %(name)s could not be found. ' 'It might be already deleted') % {'name': name} LOG.warning(msg) if self.configuration.zadara_vpsa_allow_nonexistent_delete: return else: raise exception.VolumeNotFound(volume_id=name) # Check attachment info and detach from all xml_tree = self.vpsa.send_cmd('list_vol_attachments', vpsa_vol=vpsa_vol) servers = self._xml_parse_helper(xml_tree, 'servers', ('iqn', None), first=False) if servers: if not self.configuration.zadara_vpsa_auto_detach_on_delete: raise exception.VolumeAttached(volume_id=name) for server in servers: vpsa_srv = server.findtext('name') if vpsa_srv: self.vpsa.send_cmd('detach_volume', vpsa_srv=vpsa_srv, vpsa_vol=vpsa_vol) # Delete volume self.vpsa.send_cmd('delete_volume', vpsa_vol=vpsa_vol)
def delete(self, context, volume, force=False): if context.is_admin and context.project_id != volume['project_id']: project_id = volume['project_id'] else: project_id = context.project_id volume_id = volume['id'] if not volume['host']: # NOTE(vish): scheduling failed, so delete it # Note(zhiteng): update volume quota reservation try: reserve_opts = {'volumes': -1, 'gigabytes': -volume['size']} QUOTAS.add_volume_type_opts(context, reserve_opts, volume['volume_type_id']) reservations = QUOTAS.reserve(context, project_id=project_id, **reserve_opts) except Exception: reservations = None LOG.exception(_("Failed to update quota for deleting volume")) self.db.volume_destroy(context.elevated(), volume_id) if reservations: QUOTAS.commit(context, reservations, project_id=project_id) return if not force and volume['status'] not in [ "available", "error", "error_restoring", "error_extending" ]: msg = _("Volume status must be available or error, " "but current status is: %s") % volume['status'] raise exception.InvalidVolume(reason=msg) if volume['attach_status'] == "attached": # Volume is still attached, need to detach first raise exception.VolumeAttached(volume_id=volume_id) if volume['migration_status'] != None: # Volume is migrating, wait until done msg = _("Volume cannot be deleted while migrating") raise exception.InvalidVolume(reason=msg) snapshots = self.db.snapshot_get_all_for_volume(context, volume_id) if len(snapshots): msg = _("Volume still has %d dependent snapshots") % len(snapshots) raise exception.InvalidVolume(reason=msg) # If the volume is encrypted, delete its encryption key from the key # manager. This operation makes volume deletion an irreversible process # because the volume cannot be decrypted without its key. encryption_key_id = volume.get('encryption_key_id', None) if encryption_key_id is not None: self.key_manager.delete_key(context, encryption_key_id) now = timeutils.utcnow() self.db.volume_update(context, volume_id, { 'status': 'deleting', 'terminated_at': now }) self.volume_rpcapi.delete_volume(context, volume)
def delete_volume(self, context, volume_id): """Deletes and unexports volume.""" context = context.elevated() volume_ref = self.db.volume_get(context, volume_id) if context.project_id != volume_ref['project_id']: project_id = volume_ref['project_id'] else: project_id = context.project_id LOG.info(_("volume %s: deleting"), volume_ref['name']) if volume_ref['attach_status'] == "attached": # Volume is still attached, need to detach first raise exception.VolumeAttached(volume_id=volume_id) if volume_ref['host'] != self.host: raise exception.InvalidVolume( reason=_("volume is not local to this node")) self._notify_about_volume_usage(context, volume_ref, "delete.start") self._reset_stats() try: LOG.debug(_("volume %s: removing export"), volume_ref['name']) self.driver.remove_export(context, volume_ref) LOG.debug(_("volume %s: deleting"), volume_ref['name']) self.driver.delete_volume(volume_ref) except exception.VolumeIsBusy: LOG.debug(_("volume %s: volume is busy"), volume_ref['name']) self.driver.ensure_export(context, volume_ref) self.db.volume_update(context, volume_ref['id'], {'status': 'available'}) return True except Exception: with excutils.save_and_reraise_exception(): self.db.volume_update(context, volume_ref['id'], {'status': 'error_deleting'}) # Get reservations try: reservations = QUOTAS.reserve(context, project_id=project_id, volumes=-1, gigabytes=-volume_ref['size']) except Exception: reservations = None LOG.exception(_("Failed to update usages deleting volume")) self.db.volume_glance_metadata_delete_by_volume(context, volume_id) self.db.volume_destroy(context, volume_id) LOG.info(_("volume %s: deleted successfully"), volume_ref['name']) self._notify_about_volume_usage(context, volume_ref, "delete.end") # Commit the reservations if reservations: QUOTAS.commit(context, reservations, project_id=project_id) self.publish_service_capabilities(context) return True
def delete_volume(self, volume): LOG.info("delete_volume %s", volume) # delete snapshot or volume from snapshot if ('snapshot_id' not in volume) or (volume['snapshot_id'] is not None): volume_id = volume['provider_id'] req_vars = { 'server_ip': self.server_ip, 'server_port': self.server_port } request = ("http://%(server_ip)s:%(server_port)s" "/v1/osd-snapshot") % req_vars r, response = self._execute_px_get_request(request) LOG.info("get response %s", response) for one in response: if one['id'] == volume_id and 'attached_on' not in one: self._delete_volume(volume_id) return elif one['id'] == volume_id: raise exception.VolumeAttached(data=response) LOG.info("already deleted") return # delete volume else: volume_id = volume['provider_id'] req_vars = { 'server_ip': self.server_ip, 'server_port': self.server_port, 'provider_id': volume_id } request = ("http://%(server_ip)s:%(server_port)s" "/v1/osd-volumes/" "%(provider_id)s") % req_vars r, response = self._execute_px_get_request(request) LOG.info("get response %s", response) if response and 'id' in response[ 0] and 'attached_on' not in response[0]: self._delete_volume(volume_id) return elif not response or 'id' not in response[0]: LOG.info("already deleted") return else: raise exception.VolumeAttached(data=response)
def _assert_source_detached(self, volume): """The DotHill requires a volume to be dettached to clone it. Make sure that the volume is not in use when trying to copy it. """ if (volume['status'] != "available" or volume['attach_status'] == "attached"): LOG.error(_LE("Volume must be detached for clone operation.")) raise exception.VolumeAttached(volume_id=volume['id'])
def _assert_source_detached(self, volume): """The MSA requires a volume to be dettached to clone it. Make sure that the volume is not in use when trying to copy it. """ if volume['status'] != "available" or \ volume['attach_status'] == "attached": msg = _("Volume must be detached to perform a clone operation.") LOG.error(msg) raise exception.VolumeAttached(volume_id=volume['id'])
def _delete_volume(self, vol_id): req_vars = { 'server_ip': self.server_ip, 'server_port': self.server_port, 'provider_id': vol_id } request = ("http://%(server_ip)s:%(server_port)s" "/v1/osd-volumes/" "%(provider_id)s") % req_vars r, response = self._execute_px_delete_request(request) LOG.info("delete volume %s", response) if r.status_code != http_client.OK: raise exception.VolumeBackendAPIException(data=response) elif 'error' in response: raise exception.VolumeAttached(data=response)
def delete(self, context, volume, force=False): if context.is_admin and context.project_id != volume['project_id']: project_id = volume['project_id'] else: project_id = context.project_id volume_id = volume['id'] if not volume['host']: # NOTE(vish): scheduling failed, so delete it # Note(zhiteng): update volume quota reservation try: reservations = QUOTAS.reserve(context, project_id=project_id, volumes=-1, gigabytes=-volume['size']) except Exception: reservations = None LOG.exception(_("Failed to update quota for deleting volume")) self.db.volume_destroy(context.elevated(), volume_id) if reservations: QUOTAS.commit(context, reservations, project_id=project_id) return if not force and volume['status'] not in [ "available", "error", "error_restoring" ]: msg = _("Volume status must be available or error") raise exception.InvalidVolume(reason=msg) if volume['attach_status'] == "attached": # Volume is still attached, need to detach first raise exception.VolumeAttached(volume_id=volume_id) snapshots = self.db.snapshot_get_all_for_volume(context, volume_id) if len(snapshots): msg = _("Volume still has %d dependent snapshots") % len(snapshots) raise exception.InvalidVolume(reason=msg) now = timeutils.utcnow() self.db.volume_update(context, volume_id, { 'status': 'deleting', 'terminated_at': now }) self.volume_rpcapi.delete_volume(context, volume)
def delete_volume(self, context, volume_id): """Deletes and unexports volume.""" context = context.elevated() volume_ref = self.db.volume_get(context, volume_id) if context.project_id != volume_ref['project_id']: project_id = volume_ref['project_id'] else: project_id = context.project_id LOG.info(_("volume %s: deleting"), volume_ref['id']) if volume_ref['attach_status'] == "attached": # Volume is still attached, need to detach first raise exception.VolumeAttached(volume_id=volume_id) if volume_ref['host'] != self.host: raise exception.InvalidVolume( reason=_("volume is not local to this node")) self._notify_about_volume_usage(context, volume_ref, "delete.start") self._reset_stats() try: LOG.debug(_("volume %s: removing export"), volume_ref['id']) self.driver.remove_export(context, volume_ref) LOG.debug(_("volume %s: deleting"), volume_ref['id']) self.driver.delete_volume(volume_ref) except exception.VolumeIsBusy: LOG.error(_("Cannot delete volume %s: volume is busy"), volume_ref['id']) self.driver.ensure_export(context, volume_ref) self.db.volume_update(context, volume_ref['id'], {'status': 'available'}) return True except Exception: with excutils.save_and_reraise_exception(): self.db.volume_update(context, volume_ref['id'], {'status': 'error_deleting'}) # If deleting the source volume in a migration, we want to skip quotas # and other database updates. if volume_ref['migration_status']: return True # Get reservations try: reserve_opts = {'volumes': -1, 'gigabytes': -volume_ref['size']} QUOTAS.add_volume_type_opts(context, reserve_opts, volume_ref.get('volume_type_id')) reservations = QUOTAS.reserve(context, project_id=project_id, **reserve_opts) except Exception: reservations = None LOG.exception(_("Failed to update usages deleting volume")) # Delete glance metadata if it exists try: self.db.volume_glance_metadata_delete_by_volume(context, volume_id) LOG.debug(_("volume %s: glance metadata deleted"), volume_ref['id']) except exception.GlanceMetadataNotFound: LOG.debug(_("no glance metadata found for volume %s"), volume_ref['id']) self.db.volume_destroy(context, volume_id) LOG.info(_("volume %s: deleted successfully"), volume_ref['id']) self._notify_about_volume_usage(context, volume_ref, "delete.end") # Commit the reservations if reservations: QUOTAS.commit(context, reservations, project_id=project_id) self.publish_service_capabilities(context) return True
def _assert_source_detached(self, volume): """The array requires volume to be detached before cloning.""" if (volume['status'] != "available" or volume['attach_status'] == fields.VolumeAttachStatus.ATTACHED): LOG.error("Volume must be detached for clone operation.") raise exception.VolumeAttached(volume_id=volume['id'])
def stub_volume_attached(self, context, volume, force=False): raise exception.VolumeAttached(volume_id=volume['id'])