def _authorize_update_or_delete(self, context_project, target_project_id, parent_id): """Checks if update or delete are allowed in the current hierarchy. With hierarchical projects, only the admin of the parent or the root project has privilege to perform quota update and delete operations. :param context_project: The project in which the user is scoped to. :param target_project_id: The id of the project in which the user want to perform an update or delete operation. :param parent_id: The parent id of the project in which the user want to perform an update or delete operation. """ if context_project.parent_id and parent_id != context_project.id: msg = _("Update and delete quota operations can only be made " "by an admin of immediate parent or by the CLOUD admin.") raise webob.exc.HTTPForbidden(explanation=msg) if context_project.id != target_project_id: if not self._is_descendant(target_project_id, context_project.subtree): msg = _("Update and delete quota operations can only be made " "to projects in the same hierarchy of the project in " "which users are scoped to.") raise webob.exc.HTTPForbidden(explanation=msg) else: msg = _("Update and delete quota operations can only be made " "by an admin of immediate parent or by the CLOUD admin.") raise webob.exc.HTTPForbidden(explanation=msg)
def _get_barbican_client(self, ctxt): """Creates a client to connect to the Barbican service. :param ctxt: the user context for authentication :return: a Barbican Client object :throws NotAuthorized: if the ctxt is None """ if not self._barbican_client: # Confirm context is provided, if not raise not authorized if not ctxt: msg = _("User is not authorized to use key manager.") LOG.error(msg) raise exception.NotAuthorized(msg) try: auth = identity.v3.Token( auth_url=CONF.keymgr.encryption_auth_url, token=ctxt.auth_token) sess = session.Session(auth=auth) self._barbican_client = barbican_client.Client( session=sess, endpoint=self._barbican_endpoint) except Exception as e: with excutils.save_and_reraise_exception(): LOG.error(_("Error creating Barbican client: %s"), (e)) return self._barbican_client
def do_setup(self, context): """Any initialization the driver does while starting.""" if not self.config.san_ip: raise exception.InvalidInput( reason=_('Gateway VIP option \'san_ip\' is not set')) if not self.config.gateway_mga: raise exception.InvalidInput( reason=_('Gateway MG-A IP option \'gateway_mga\' is not set')) if not self.config.gateway_mgb: raise exception.InvalidInput( reason=_('Gateway MG-B IP option \'gateway_mgb\' is not set')) if self.config.request_timeout <= 0: raise exception.InvalidInput( reason=_('Global timeout option \'request_timeout\' must be ' 'greater than 0')) self.vip = vmemclient.open(self.config.san_ip, self.config.san_login, self.config.san_password, keepalive=True) self.mga = vmemclient.open(self.config.gateway_mga, self.config.san_login, self.config.san_password, keepalive=True) self.mgb = vmemclient.open(self.config.gateway_mgb, self.config.san_login, self.config.san_password, keepalive=True) ret_dict = self.vip.basic.get_node_values( "/vshare/state/local/container/*") if ret_dict: self.container = list(ret_dict.items())[0][1]
def _authorize_show(self, context_project, target_project): """Checks if show is allowed in the current hierarchy. With hierarchical projects, are allowed to perform quota show operation users with admin role in, at least, one of the following projects: the current project; the immediate parent project; or the root project. :param context_project: The project in which the user is scoped to. :param target_project: The project in which the user wants to perform a show operation. """ if target_project.parent_id: if target_project.id != context_project.id: if not self._is_descendant(target_project.id, context_project.subtree): msg = _("Show operations can only be made to projects in " "the same hierarchy of the project in which users " "are scoped to.") raise webob.exc.HTTPForbidden(explanation=msg) if context_project.id != target_project.parent_id: if context_project.parent_id: msg = _("Only users with token scoped to immediate " "parents or root projects are allowed to see " "its children quotas.") raise webob.exc.HTTPForbidden(explanation=msg) elif context_project.parent_id: msg = _("An user with a token scoped to a subproject is not " "allowed to see the quota of its parents.") raise webob.exc.HTTPForbidden(explanation=msg)
def save(self): updates = self.cinder_obj_get_changes() if updates: if 'consistencygroup' in updates: raise exception.ObjectActionError( action='save', reason=_('consistencygroup changed')) if 'glance_metadata' in updates: raise exception.ObjectActionError( action='save', reason=_('glance_metadata changed')) if 'snapshots' in updates: raise exception.ObjectActionError( action='save', reason=_('snapshots changed')) if 'metadata' in updates: # Metadata items that are not specified in the # self.metadata will be deleted metadata = updates.pop('metadata', None) self.metadata = db.volume_metadata_update(self._context, self.id, metadata, True) if self._context.is_admin and 'admin_metadata' in updates: metadata = updates.pop('admin_metadata', None) self.admin_metadata = db.volume_admin_metadata_update( self._context, self.id, metadata, True) db.volume_update(self._context, self.id, updates) self.obj_reset_changes()
def _check_image_metadata(self, context, image_id, size): """Checks image existence and validates that the image metadata.""" # Check image existence if image_id is None: return # NOTE(harlowja): this should raise an error if the image does not # exist, this is expected as it signals that the image_id is missing. image_meta = self.image_service.show(context, image_id) # check whether image is active if image_meta['status'] != 'active': msg = _('Image %(image_id)s is not active.')\ % {'image_id': image_id} raise exception.InvalidInput(reason=msg) # Check image size is not larger than volume size. image_size = utils.as_int(image_meta['size'], quiet=False) image_size_in_gb = (image_size + GB - 1) // GB if image_size_in_gb > size: msg = _('Size of specified image %(image_size)sGB' ' is larger than volume size %(volume_size)sGB.') msg = msg % {'image_size': image_size_in_gb, 'volume_size': size} raise exception.InvalidInput(reason=msg) # Check image min_disk requirement is met for the particular volume min_disk = image_meta.get('min_disk', 0) if size < min_disk: msg = _('Volume size %(volume_size)sGB cannot be smaller' ' than the image minDisk size %(min_disk)sGB.') msg = msg % {'volume_size': size, 'min_disk': min_disk} raise exception.InvalidInput(reason=msg)
def validate_update(self, body): update = {} status = body.get('status', None) attach_status = body.get('attach_status', None) migration_status = body.get('migration_status', None) valid = False if status: valid = True update = super(VolumeAdminController, self).validate_update(body) if attach_status: valid = True update['attach_status'] = attach_status.lower() if update['attach_status'] not in self.valid_attach_status: raise exc.HTTPBadRequest( explanation=_("Must specify a valid attach status")) if migration_status: valid = True update['migration_status'] = migration_status.lower() if update['migration_status'] not in self.valid_migration_status: raise exc.HTTPBadRequest( explanation=_("Must specify a valid migration status")) if update['migration_status'] == 'none': update['migration_status'] = None if not valid: raise exc.HTTPBadRequest( explanation=_("Must specify 'status', 'attach_status' " "or 'migration_status' for update.")) return update
def delete(self, req, id, body): """Delete a consistency group.""" LOG.debug('delete called for member %s', id) context = req.environ['cinder.context'] force = False if body: if not self.is_valid_body(body, 'consistencygroup'): msg = _("Missing required element 'consistencygroup' in " "request body.") raise exc.HTTPBadRequest(explanation=msg) cg_body = body['consistencygroup'] try: force = strutils.bool_from_string(cg_body.get('force', False), strict=True) except ValueError: msg = _("Invalid value '%s' for force.") % force raise exc.HTTPBadRequest(explanation=msg) LOG.info(_LI('Delete consistency group with id: %s'), id) try: group = self.consistencygroup_api.get(context, id) self.consistencygroup_api.delete(context, group, force) # Not found exception will be handled at the wsgi level except exception.InvalidConsistencyGroup as error: raise exc.HTTPBadRequest(explanation=error.msg) return webob.Response(status_int=202)
def _clone_with_extension_check(self, source, destination_volume): source_size = source['size'] source_id = source['id'] source_name = source['name'] destination_volume_size = destination_volume['size'] self._clone_backing_file_for_volume(source_name, destination_volume['name'], source_id) path = self.local_path(destination_volume) if self._discover_file_till_timeout(path): self._set_rw_permissions(path) if destination_volume_size != source_size: try: self.extend_volume(destination_volume, destination_volume_size) except Exception: LOG.error(_LE("Resizing %s failed. Cleaning " "volume."), destination_volume['name']) self._cleanup_volume_on_failure(destination_volume) raise exception.CinderException( _("Resizing clone %s failed.") % destination_volume['name']) else: raise exception.CinderException(_("NFS file %s not discovered.") % destination_volume['name'])
def _copy_volume_with_file(src, dest, size_in_m): src_handle = src if isinstance(src, six.string_types): src_handle = _open_volume_with_path(src, 'rb') dest_handle = dest if isinstance(dest, six.string_types): dest_handle = _open_volume_with_path(dest, 'wb') if not src_handle: raise exception.DeviceUnavailable( _("Failed to copy volume, source device unavailable.")) if not dest_handle: raise exception.DeviceUnavailable( _("Failed to copy volume, destination device unavailable.")) start_time = timeutils.utcnow() _transfer_data(src_handle, dest_handle, size_in_m * units.Mi, units.Mi * 4) duration = max(1, timeutils.delta_seconds(start_time, timeutils.utcnow())) if isinstance(src, six.string_types): src_handle.close() if isinstance(dest, six.string_types): dest_handle.close() mbps = (size_in_m / duration) LOG.info(_LI("Volume copy completed (%(size_in_m).2f MB at " "%(mbps).2f MB/s)."), {'size_in_m': size_in_m, 'mbps': mbps})
def terminate_connection(self, volume, connector, **kwargs): """Disallow connection from connector.""" ret, output = self.dpl.unassign_vdev( self._conver_uuid2hex(volume['id']), connector['initiator']) if ret == errno.EAGAIN: ret, event_uuid = self._get_event_uuid(output) if ret == 0: status = self._wait_event( self.dpl.get_vdev_status, volume['id'], event_uuid) if status['state'] == 'error': ret = errno.EFAULT msg = _('Flexvisor failed to unassign volume %(id)s:' ' %(status)s.') % {'id': volume['id'], 'status': status} raise exception.VolumeBackendAPIException(data=msg) else: msg = _('Flexvisor failed to unassign volume (get event) ' '%(id)s.') % {'id': volume['id']} raise exception.VolumeBackendAPIException(data=msg) elif ret == errno.ENODATA: LOG.info('Flexvisor already unassigned volume %(id)s.', {'id': volume['id']}) elif ret != 0: msg = _('Flexvisor failed to unassign volume:%(id)s:' '%(status)s.') % {'id': volume['id'], 'status': ret} raise exception.VolumeBackendAPIException(data=msg)
def _post_sub_clone_resize(self, path): """Try post sub clone resize in a transactional manner.""" st_tm_mv, st_nw_mv, st_del_old = None, None, None seg = path.split("/") LOG.info(_LI("Post clone resize LUN %s"), seg[-1]) new_lun = 'new-%s' % (seg[-1]) tmp_lun = 'tmp-%s' % (seg[-1]) tmp_path = "/vol/%s/%s" % (seg[2], tmp_lun) new_path = "/vol/%s/%s" % (seg[2], new_lun) try: st_tm_mv = self.zapi_client.move_lun(path, tmp_path) st_nw_mv = self.zapi_client.move_lun(new_path, path) st_del_old = self.zapi_client.destroy_lun(tmp_path) except Exception as e: if st_tm_mv is None: msg = _("Failure staging LUN %s to tmp.") raise exception.VolumeBackendAPIException(data=msg % (seg[-1])) else: if st_nw_mv is None: self.zapi_client.move_lun(tmp_path, path) msg = _("Failure moving new cloned LUN to %s.") raise exception.VolumeBackendAPIException( data=msg % (seg[-1])) elif st_del_old is None: LOG.error(_LE("Failure deleting staged tmp LUN %s."), tmp_lun) else: LOG.error(_LE("Unknown exception in" " post clone resize LUN %s."), seg[-1]) LOG.error(_LE("Exception details: %s"), e)
def _get_existing_vol_with_manage_ref(self, existing_ref): """Get the corresponding LUN from the storage server.""" uuid = existing_ref.get('source-id') path = existing_ref.get('source-name') if not (uuid or path): reason = _('Reference must contain either source-id' ' or source-name element.') raise exception.ManageExistingInvalidReference( existing_ref=existing_ref, reason=reason) lun_info = {} lun_info.setdefault('path', path if path else None) if hasattr(self, 'vserver') and uuid: lun_info['uuid'] = uuid luns = self.zapi_client.get_lun_by_args(**lun_info) if luns: for lun in luns: netapp_lun = self._extract_lun_info(lun) storage_valid = self._is_lun_valid_on_storage(netapp_lun) uuid_valid = True if uuid: if netapp_lun.get_metadata_property('UUID') == uuid: uuid_valid = True else: uuid_valid = False if storage_valid and uuid_valid: return netapp_lun raise exception.ManageExistingInvalidReference( existing_ref=existing_ref, reason=(_('LUN not found with given ref %s.') % existing_ref))
def _save_and_activate_cfg(self, checksum, activate, active_cfg_name): body = {"checksum": checksum} json_str = json.dumps(body) url = self._build_url(rest_constants.PATCH_CFG_SAVE) response = self.session.patch(url, data=json_str) if response.status_code == 204: LOG.info("REST cfg save success") else: msg = (_("REST cfg save failed: %s") % six.text_type(response.text)) LOG.error(msg) raise exception.BrocadeZoningRestException(reason=msg) # if activate=true, then enable the cfg changes to effective cfg if activate: checksum = self._get_checksum() body = {"checksum": checksum} json_str = json.dumps(body) url = self._build_url(rest_constants.PATCH_CFG_ENABLE + active_cfg_name) response = self.session.patch(url, data=json_str) if response.status_code == 204: LOG.info("REST cfg activate success: %s", active_cfg_name) else: msg = (_("REST cfg activate failed: %s") % six.text_type(response.text)) LOG.error(msg) raise exception.BrocadeZoningRestException(reason=msg)
def create(self, group_snapshot_id=None, source_group_id=None): if self.obj_attr_is_set('id'): raise exception.ObjectActionError(action='create', reason=_('already_created')) updates = self.cinder_obj_get_changes() if 'volume_types' in updates: raise exception.ObjectActionError( action='create', reason=_('volume_types assigned')) if 'volumes' in updates: raise exception.ObjectActionError(action='create', reason=_('volumes assigned')) if 'group_snapshots' in updates: raise exception.ObjectActionError( action='create', reason=_('group_snapshots assigned')) db_groups = db.group_create(self._context, updates, group_snapshot_id, source_group_id) self._from_db_object(self._context, self, db_groups)
def _prepare_fc_map(self, fc_map_id, timeout): self.ssh.prestartfcmap(fc_map_id) mapping_ready = False wait_time = 5 max_retries = (timeout / wait_time) + 1 for try_number in range(1, max_retries): mapping_attrs = self._get_flashcopy_mapping_attributes(fc_map_id) if (mapping_attrs is None or 'status' not in mapping_attrs): break if mapping_attrs['status'] == 'prepared': mapping_ready = True break elif mapping_attrs['status'] == 'stopped': self.ssh.prestartfcmap(fc_map_id) elif mapping_attrs['status'] != 'preparing': msg = (_('Unexecpted mapping status %(status)s for mapping' '%(id)s. Attributes: %(attr)s') % {'status': mapping_attrs['status'], 'id': fc_map_id, 'attr': mapping_attrs}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) greenthread.sleep(wait_time) if not mapping_ready: msg = (_('Mapping %(id)s prepare failed to complete within the' 'allotted %(to)d seconds timeout. Terminating.') % {'id': fc_map_id, 'to': timeout}) LOG.error(msg) raise exception.VolumeDriverException(message=msg)
def _check_pool_and_fs(self, volume, fs_label): """Validation of the pool and filesystem. Checks if the file system for the volume-type chosen matches the one passed in the volume reference. Also, checks if the pool for the volume type matches the pool for the host passed. :param volume: Reference to the volume. :param fs_label: Label of the file system. """ pool_from_vol_type = self.get_pool(volume) pool_from_host = utils.extract_host(volume['host'], level='pool') if self.config['services'][pool_from_vol_type]['hdp'] != fs_label: msg = (_("Failed to manage existing volume because the pool of " "the volume type chosen does not match the file system " "passed in the volume reference."), {'File System passed': fs_label, 'File System for volume type': self.config['services'][pool_from_vol_type]['hdp']}) raise exception.ManageExistingVolumeTypeMismatch(reason=msg) if pool_from_host != pool_from_vol_type: msg = (_("Failed to manage existing volume because the pool of " "the volume type chosen does not match the pool of " "the host."), {'Pool of the volume type': pool_from_vol_type, 'Pool of the host': pool_from_host}) raise exception.ManageExistingVolumeTypeMismatch(reason=msg)
def _validate_remove_volumes(self, volumes, remove_volumes_list, group): # Validate volumes in remove_volumes. remove_volumes_new = "" for volume in volumes: if volume['id'] in remove_volumes_list: if volume['status'] not in VALID_REMOVE_VOL_FROM_CG_STATUS: msg = (_("Cannot remove volume %(volume_id)s from " "consistency group %(group_id)s because volume " "is in an invalid state: %(status)s. Valid " "states are: %(valid)s.") % {'volume_id': volume['id'], 'group_id': group['id'], 'status': volume['status'], 'valid': VALID_REMOVE_VOL_FROM_CG_STATUS}) raise exception.InvalidVolume(reason=msg) # Volume currently in CG. It will be removed from CG. if remove_volumes_new: remove_volumes_new += "," remove_volumes_new += volume['id'] for rem_vol in remove_volumes_list: if rem_vol not in remove_volumes_new: msg = (_("Cannot remove volume %(volume_id)s from " "consistency group %(group_id)s because it " "is not in the group.") % {'volume_id': rem_vol, 'group_id': group['id']}) raise exception.InvalidVolume(reason=msg) return remove_volumes_new
def get_all(self, context, marker=None, limit=None, sort_key='created_at', sort_dir='desc', filters=None): check_policy(context, 'get_all') if filters is None: filters = {} try: if limit is not None: limit = int(limit) if limit < 0: msg = _('limit param must be positive') raise exception.InvalidInput(reason=msg) except ValueError: msg = _('limit param must be an integer') raise exception.InvalidInput(reason=msg) if filters: LOG.debug("Searching by: %s", filters) if (context.is_admin and 'all_tenants' in filters): # Need to remove all_tenants to pass the filtering below. del filters['all_tenants'] groups = self.db.consistencygroup_get_all(context) else: groups = self.db.consistencygroup_get_all_by_project( context, context.project_id) return groups
def _resource_and_snap_data_from_snapshot(self, snapshot, empty_ok=False): """Find the DRBD Resource and the snapshot name from the snapshot ID. """ s_uuid = snapshot["id"] res, rs = self.call_or_reconnect( self.odm.list_snapshots, self.empty_list, self.empty_list, dm_utils.dict_to_aux_props({CINDER_AUX_PROP_id: s_uuid}), self.empty_list, ) self._check_result(res) if (not rs) or (len(rs) == 0): if empty_ok: return None else: raise exception.VolumeBackendAPIException(data=_("no snapshot with id %s found in drbdmanage") % s_uuid) if len(rs) > 1: raise exception.VolumeBackendAPIException(data=_("multiple resources with snapshot ID %s found") % s_uuid) (r_name, snaps) = rs[0] if len(snaps) != 1: raise exception.VolumeBackendAPIException(data=_("not exactly one snapshot with id %s") % s_uuid) (s_name, s_props) = snaps[0] LOG.debug("snapshot %s is %s/%s" % (s_uuid, r_name, s_name)) return r_name, s_name, s_props
def __init__(self, mount_type, root_helper, execute=putils.execute, *args, **kwargs): self._mount_type = mount_type if mount_type == "nfs": self._mount_base = kwargs.get('nfs_mount_point_base', None) if not self._mount_base: raise exception.InvalidParameterValue( err=_('nfs_mount_point_base required')) self._mount_options = kwargs.get('nfs_mount_options', None) self._check_nfs_options() elif mount_type == "cifs": self._mount_base = kwargs.get('smbfs_mount_point_base', None) if not self._mount_base: raise exception.InvalidParameterValue( err=_('smbfs_mount_point_base required')) self._mount_options = kwargs.get('smbfs_mount_options', None) elif mount_type == "glusterfs": self._mount_base = kwargs.get('glusterfs_mount_point_base', None) if not self._mount_base: raise exception.InvalidParameterValue( err=_('glusterfs_mount_point_base required')) self._mount_options = None else: raise exception.ProtocolNotSupported(protocol=mount_type) self.root_helper = root_helper self.set_execute(execute)
def _resource_and_snap_data_from_snapshot(self, snapshot, empty_ok=False): """Find DRBD resource and snapshot name from the snapshot ID.""" s_uuid = snapshot['id'] res, rs = self.call_or_reconnect(self.odm.list_snapshots, self.empty_dict, self.empty_dict, 0, dm_utils.dict_to_aux_props( {AUX_PROP_CINDER_VOL_ID: s_uuid}), self.empty_dict) self._check_result(res) if (not rs) or (len(rs) == 0): if empty_ok: return None else: raise exception.VolumeBackendAPIException( data=_("no snapshot with id %s found in drbdmanage") % s_uuid) if len(rs) > 1: raise exception.VolumeBackendAPIException( data=_("multiple resources with snapshot ID %s found") % s_uuid) (r_name, snaps) = rs[0] if len(snaps) != 1: raise exception.VolumeBackendAPIException( data=_("not exactly one snapshot with id %s") % s_uuid) (s_name, s_props) = snaps[0] LOG.debug("snapshot %(uuid)s is %(res)s/%(snap)s", {'uuid': s_uuid, 'res': r_name, 'snap': s_name}) return r_name, s_name, s_props
def create_share(self, pool, project, share, args): """Create a share in the specified pool and project""" svc = self.share_path % (pool, project, share) ret = self.rclient.get(svc) if ret.status != restclient.Status.OK: svc = self.shares_path % (pool, project) args.update({'name': share}) ret = self.rclient.post(svc, args) if ret.status != restclient.Status.CREATED: exception_msg = (_('Error Creating ' 'Share: %(name)s ' 'Return code: %(ret.status)d ' 'Message: %(ret.data)s.') % {'name': share, 'ret.status': ret.status, 'ret.data': ret.data}) LOG.error(exception_msg) raise exception.VolumeBackendAPIException(data=exception_msg) else: LOG.debug('Editing properties of a pre-existing share') ret = self.rclient.put(svc, args) if ret.status != restclient.Status.ACCEPTED: exception_msg = (_('Error editing share: ' '%(share)s on ' 'Pool: %(pool)s ' 'Return code: %(ret.status)d ' 'Message: %(ret.data)s .') % {'share': share, 'pool': pool, 'ret.status': ret.status, 'ret.data': ret.data}) LOG.error(exception_msg) raise exception.VolumeBackendAPIException(data=exception_msg)
def _change_service_state(self, service, state=''): svc = self.services_path + service + '/' + state ret = self.rclient.put(svc) if ret.status != restclient.Status.ACCEPTED: exception_msg = (_('Error Verifying ' 'Service: %(service)s ' 'Return code: %(ret.status)d ' 'Message: %(ret.data)s.') % {'service': service, 'ret.status': ret.status, 'ret.data': ret.data}) LOG.error(exception_msg) raise exception.VolumeBackendAPIException(data=exception_msg) data = json.loads(ret.data)['service'] LOG.debug('%s service state: %s' % (service, data)) status = 'online' if state == 'enable' else 'disabled' if data['<status>'] != status: exception_msg = (_('%(service)s Service is not %(status)s ' 'on storage appliance: %(host)s') % {'service': service, 'status': status, 'host': self.host}) LOG.error(exception_msg) raise exception.VolumeBackendAPIException(data=exception_msg)
def verify_service(self, service, status='online'): """Checks whether a service is online or not""" svc = self.services_path + service ret = self.rclient.get(svc) if ret.status != restclient.Status.OK: exception_msg = (_('Error Verifying ' 'Service: %(service)s ' 'Return code: %(ret.status)d ' 'Message: %(ret.data)s.') % {'service': service, 'ret.status': ret.status, 'ret.data': ret.data}) LOG.error(exception_msg) raise exception.VolumeBackendAPIException(data=exception_msg) data = json.loads(ret.data)['service'] if data['<status>'] != status: exception_msg = (_('%(service)s Service is not %(status)s ' 'on storage appliance: %(host)s') % {'service': service, 'status': status, 'host': self.host}) LOG.error(exception_msg) raise exception.VolumeBackendAPIException(data=exception_msg)
def unmanage(self, req, id, body): """Stop managing a volume. This action is very much like a delete, except that a different method (unmanage) is called on the Cinder driver. This has the effect of removing the volume from Cinder management without actually removing the backend storage object associated with it. There are no required parameters. A Not Found error is returned if the specified volume does not exist. A Bad Request error is returned if the specified volume is still attached to an instance. """ context = req.environ['cinder.context'] authorize(context) LOG.info(_("Unmanage volume with id: %s"), id, context=context) try: vol = self.volume_api.get(context, id) self.volume_api.delete(context, vol, unmanage_only=True) except exception.NotFound: msg = _("Volume could not be found") raise exc.HTTPNotFound(explanation=msg) except exception.VolumeAttached: msg = _("Volume cannot be deleted while in attached state") raise exc.HTTPBadRequest(explanation=msg) return webob.Response(status_int=202)
def get_pool_stats(self, pool): """Get space available and total properties of a pool returns (avail, total). """ svc = '/api/storage/v1/pools/' + pool ret = self.rclient.get(svc) if ret.status != restclient.Status.OK: exception_msg = (_('Error Getting Pool Stats: ' 'Pool: %(pool)s ' 'Return code: %(ret.status)d ' 'Message: %(ret.data)s.') % {'pool': pool, 'ret.status': ret.status, 'ret.data': ret.data}) LOG.error(exception_msg) raise exception.InvalidVolume(reason=exception_msg) val = json.loads(ret.data) if not self._is_pool_owned(val): exception_msg = (_('Error Pool ownership: ' 'Pool %(pool)s is not owned ' 'by %(host)s.') % {'pool': pool, 'host': self.host}) LOG.error(exception_msg) raise exception.InvalidInput(reason=pool) avail = val['pool']['usage']['available'] total = val['pool']['usage']['total'] return avail, total
def delete(self, req, id): """Deletes an existing qos specs.""" context = req.environ['cinder.context'] authorize(context) # Convert string to bool type in strict manner force = utils.get_bool_param('force', req.params) LOG.debug("Delete qos_spec: %(id)s, force: %(force)s", {'id': id, 'force': force}) try: qos_specs.delete(context, id, force) notifier_info = dict(id=id) rpc.get_notifier('QoSSpecs').info(context, 'qos_specs.delete', notifier_info) except exception.QoSSpecsNotFound as err: notifier_err = dict(id=id, error_message=err) self._notify_qos_specs_error(context, 'qos_specs.delete', notifier_err) # Not found exception will be handled at the wsgi level raise except exception.QoSSpecsInUse as err: notifier_err = dict(id=id, error_message=err) self._notify_qos_specs_error(context, 'qos_specs.delete', notifier_err) if force: msg = _('Failed to disassociate qos specs.') raise webob.exc.HTTPInternalServerError(explanation=msg) msg = _('Qos specs still in use.') raise webob.exc.HTTPBadRequest(explanation=msg) return webob.Response(status_int=202)
def __init__(self, message=None, **kwargs): if kwargs.get('host', None): self.message = _("Service %(service_id)s could not be " "found on host %(host)s.") else: self.message = _("Service %(service_id)s could not be found.") super(ServiceNotFound, self).__init__(None, **kwargs)
def create_snapshot(self, snapshot): """Creates a snapshot.""" sn_name = self.snapshot_name_from_cinder_snapshot(snapshot) d_res_name, d_vol_nr = self._resource_name_volnr_for_volume( snapshot["volume_id"]) res, data = self.call_or_reconnect(self.odm.list_assignments, self.empty_dict, [d_res_name], 0, {CS_DISKLESS: dm_const.BOOL_FALSE}, self.empty_list) self._check_result(res) nodes = [d[0] for d in data] if len(nodes) < 1: raise exception.VolumeBackendAPIException( _('Snapshot res "%s" that is not deployed anywhere?') % (d_res_name)) props = self._priv_hash_from_volume(snapshot) res = self.call_or_reconnect(self.odm.create_snapshot, d_res_name, sn_name, nodes, props) self._check_result(res) okay = self._call_policy_plugin(self.plugin_snapshot, self.policy_snapshot, dict(resource=d_res_name, snapshot=sn_name)) if not okay: message = (_('DRBDmanage timeout waiting for snapshot creation; ' 'resource "%(res)s", snapshot "%(sn)s"') % {'res': d_res_name, 'sn': sn_name}) raise exception.VolumeBackendAPIException(data=message)
class QuotaClassNotFound(QuotaNotFound): message = _("Quota class %(class_name)s could not be found.")
class VolumeTypeAccessExists(Duplicate): message = _("Volume type access for %(volume_type_id)s / " "%(project_id)s combination already exists.")
class VolumeTypeExists(Duplicate): message = _("Volume Type %(id)s already exists.")
class FileNotFound(NotFound): message = _("File %(file_path)s could not be found.")
class OverQuota(CinderException): message = _("Quota exceeded for resources: %(overs)s")
class ReservationNotFound(QuotaNotFound): message = _("Quota reservation %(uuid)s could not be found.")
class QuotaUsageNotFound(QuotaNotFound): message = _("Quota usage for project %(project_id)s could not be found.")
class QuotaResourceUnknown(QuotaNotFound): message = _("Unknown quota resources %(unknown)s.")
class InvalidNestedQuotaSetup(CinderException): message = _("Project quotas are not properly setup for nested quotas: " "%(reason)s.")
class ProjectQuotaNotFound(QuotaNotFound): message = _("Quota for project %(project_id)s could not be found.")
class InvalidReservationExpiration(Invalid): message = _("Invalid reservation expiration %(expire)s.")
class QuotaNotFound(NotFound): message = _("Quota could not be found")
class SchedulerHostFilterNotFound(NotFound): message = _("Scheduler Host Filter %(filter_name)s could not be found.")
class InvalidQuotaValue(Invalid): message = _("Change would make usage less than 0 for the following " "resources: %(unders)s")
class ClusterExists(Duplicate): message = _("Cluster %(name)s already exists.")
class SchedulerHostWeigherNotFound(NotFound): message = _("Scheduler Host Weigher %(weigher_name)s could not be found.")
class ClusterNotFound(NotFound): message = _('Cluster %(id)s could not be found.')
class HostNotFound(NotFound): message = _("Host %(host)s could not be found.")
class WorkerExists(Duplicate): message = _("Worker for %(type)s %(id)s already exists.")
class ClusterHasHosts(Invalid): message = _("Cluster %(id)s still has hosts.")
class ImageNotFound(NotFound): message = _("Image %(image_id)s could not be found.")
class CleanableInUse(Invalid): message = _('%(type)s with id %(id)s is already being cleaned up or ' 'another host has taken over it.')
class ISCSITargetNotFoundForVolume(NotFound): message = _("No target id found for volume %(volume_id)s.")
class ServiceTooOld(Invalid): message = _("Service is too old to fulfil this request.")
class VolumeIsBusy(CinderException): message = _("deleting volume %(volume_name)s that has snapshot")
class InvalidImageRef(Invalid): message = _("Invalid image href %(image_href)s.")
class ServerNotFound(NotFound): message = _("Instance %(uuid)s could not be found.")
class SnapshotIsBusy(CinderException): message = _("deleting snapshot %(snapshot_name)s that has " "dependent volumes")
class SnapshotNotFound(NotFound): message = _("Snapshot %(snapshot_id)s could not be found.")
class VolumeSnapshotNotFound(NotFound): message = _("No snapshots found for volume %(volume_id)s.")