def check_attach(self, context, volume, instance=None): # TODO(vish): abstract status checking? if 'multiattach' in volume and bool(volume['multiattach']): if volume['status'] not in ("available", 'in-use'): msg = _("multiattach volume's status must be " "'available' or 'in-use'") raise exception.InvalidVolume(reason=msg) else: if volume['status'] != "available": msg = _( "volume '%(vol)s' status must be 'available'. Currently " "in '%(status)s'") % { 'vol': volume['id'], 'status': volume['status'] } raise exception.InvalidVolume(reason=msg) if volume['attach_status'] == "attached": msg = _("volume %s already attached") % volume['id'] raise exception.InvalidVolume(reason=msg) if instance and not CONF.cinder.cross_az_attach: instance_az = az.get_instance_availability_zone(context, instance) if instance_az != volume['availability_zone']: msg = _("Instance %(instance)s and volume %(vol)s are not in " "the same availability_zone. Instance is in " "%(ins_zone)s. Volume is in %(vol_zone)s") % { "instance": instance['id'], "vol": volume['id'], 'ins_zone': instance_az, 'vol_zone': volume['availability_zone'] } raise exception.InvalidVolume(reason=msg)
def check_attach(self, context, volume): if volume['status'] != 'available': msg = _("status must be available") raise exception.InvalidVolume(reason=msg) if volume['attach_status'] == 'attached': msg = _("already attached") raise exception.InvalidVolume(reason=msg)
def delete(self, context, volume): volume_id = volume['id'] if not volume['host']: # NOTE(vish): scheduling failed, so delete it self.db.volume_destroy(context, volume_id) return if volume['status'] not in ["available", "error"]: msg = _("Volume status must be available or error") raise exception.InvalidVolume(reason=msg) snapshots = self.db.snapshot_get_all_for_volume(context, volume_id) if len(snapshots): msg = _("Volume still has %d dependent snapshots") % len(snapshots) raise exception.InvalidVolume(reason=msg) now = utils.utcnow() self.db.volume_update(context, volume_id, { 'status': 'deleting', 'terminated_at': now }) host = volume['host'] rpc.cast(context, self.db.queue_get_for(context, FLAGS.volume_topic, host), { "method": "delete_volume", "args": { "volume_id": volume_id } })
def check_attach(self, context, volume, instance=None): # TODO(vish): abstract status checking? if volume['status'] != "available": msg = _("volume '%(vol)s' status must be 'available'. Currently " "in '%(status)s'") % {'vol': volume['id'], 'status': volume['status']} raise exception.InvalidVolume(reason=msg) if volume['attach_status'] == "attached": msg = _("volume %s already attached") % volume['id'] raise exception.InvalidVolume(reason=msg) if instance and not CONF.cinder.cross_az_attach: # NOTE(sorrison): If instance is on a host we match against it's AZ # else we check the intended AZ if instance.get('host'): instance_az = az.get_instance_availability_zone( context, instance) else: instance_az = instance['availability_zone'] if instance_az != volume['availability_zone']: msg = _("Instance %(instance)s and volume %(vol)s are not in " "the same availability_zone. Instance is in " "%(ins_zone)s. Volume is in %(vol_zone)s") % { "instance": instance['id'], "vol": volume['id'], 'ins_zone': instance_az, 'vol_zone': volume['availability_zone']} raise exception.InvalidVolume(reason=msg)
def validate_volume(mount_path): """Determine if the volume is a valid Quobyte mount. Runs a number of tests to be sure this is a (working) Quobyte mount """ partitions = psutil.disk_partitions(all=True) for p in partitions: if mount_path != p.mountpoint: continue if p.device.startswith("quobyte@") or p.fstype == "fuse.quobyte": statresult = os.stat(mount_path) # Note(kaisers): Quobyte always shows mount points with size 0 if statresult.st_size == 0: # client looks healthy return # we're happy here else: msg = (_("The mount %(mount_path)s is not a " "valid Quobyte volume. Stale mount?") % { 'mount_path': mount_path }) raise nova_exception.StaleVolumeMount(msg, mount_path=mount_path) else: msg = (_("The mount %(mount_path)s is not a valid " "Quobyte volume according to partition list.") % { 'mount_path': mount_path }) raise nova_exception.InvalidVolume(msg) msg = (_("No matching Quobyte mount entry for %(mount_path)s" " could be found for validation in partition list.") % { 'mount_path': mount_path }) raise nova_exception.InvalidVolume(msg)
def delete(self, context, volume, force=False): volume_id = volume['id'] if not volume['host']: # NOTE(vish): scheduling failed, so delete it # Note(zhiteng): update volume quota reservation try: reservations = QUOTAS.reserve(context, volumes=-1, gigabytes=-volume['size']) except Exception: reservations = None LOG.exception(_("Failed to update quota for deleting volume.")) self.db.volume_destroy(context, volume_id) if reservations: QUOTAS.commit(context, reservations) return if not force and volume['status'] not in ["available", "error"]: msg = _("Volume status must be available or error") raise exception.InvalidVolume(reason=msg) snapshots = self.db.snapshot_get_all_for_volume(context, volume_id) if len(snapshots): msg = _("Volume still has %d dependent snapshots") % len(snapshots) raise exception.InvalidVolume(reason=msg) now = timeutils.utcnow() self.db.volume_update(context, volume_id, {'status': 'deleting', 'terminated_at': now}) host = volume['host'] rpc.cast(context, rpc.queue_get_for(context, FLAGS.volume_topic, host), {"method": "delete_volume", "args": {"volume_id": volume_id}})
def check_attach(self, context, volume, instance=None): # TODO(vish): abstract status checking? if volume['shareable'] == 'true': if (volume['status'] != "available" and volume['status'] != 'in-use'): msg = _("shareable volume's status must be" "'available' or 'in-use'") raise exception.InvalidVolume(reason=msg) if instance: for attachment in volume['attachments']: if attachment['instance_uuid'] == instance['uuid']: msg = _("the shareable volume has already been attached" " to this instance and you cannot repeatedly attach") raise exception.InvalidVolume(reason=msg) else: if volume['status'] != "available": msg = _("status must be 'available'") raise exception.InvalidVolume(reason=msg) if volume['attach_status'] == "attached": msg = _("already attached") raise exception.InvalidVolume(reason=msg) if instance and not CONF.cinder.cross_az_attach: # NOTE(sorrison): If instance is on a host we match against it's AZ # else we check the intended AZ if instance.get('host'): instance_az = az.get_instance_availability_zone( context, instance) else: instance_az = instance['availability_zone'] if instance_az != volume['availability_zone']: msg = _("Instance and volume not in same availability_zone") raise exception.InvalidVolume(reason=msg)
def _check_volume_availability(self, context, volume, force): """Check if the volume can be used.""" if volume['status'] not in ['available', 'in-use']: msg = _('Volume status must be available/in-use.') raise exception.InvalidVolume(reason=msg) if not force and 'in-use' == volume['status']: msg = _('Volume status is in-use.') raise exception.InvalidVolume(reason=msg)
def check_attach(self, context, volume): # TODO(vish): abstract status checking? if volume['status'] != "available": msg = _("status must be available") raise exception.InvalidVolume(reason=msg) if volume['attach_status'] == "attached": msg = _("already attached") raise exception.InvalidVolume(reason=msg)
def check_attach(self, context, volume, instance=None): if volume['status'] != 'available': msg = "Status of volume '%s' must be available" % volume raise exception.InvalidVolume(reason=msg) if volume['attach_status'] == 'attached': msg = "already attached" raise exception.InvalidVolume(reason=msg) if instance and not CONF.cinder.cross_az_attach: if instance['availability_zone'] != volume['availability_zone']: msg = "Instance and volume not in same availability_zone" raise exception.InvalidVolume(reason=msg)
def check_detach(self, context, volume, instance=None): if volume['status'] == "available": msg = "already detached" raise exception.InvalidVolume(reason=msg) if volume['attach_status'] == 'detached': msg = "Volume must be attached in order to detach." raise exception.InvalidVolume(reason=msg) if instance and not volume.get('attachments', {}).get(instance.uuid): raise exception.VolumeUnattached(volume_id=volume['id'])
def check_attach(self, context, volume, instance=None): # TODO(vish): abstract status checking? if volume['status'] != "available": msg = _("volume '%(vol)s' status must be 'available'. Currently " "in '%(status)s'") % {'vol': volume['id'], 'status': volume['status']} raise exception.InvalidVolume(reason=msg) if volume['attach_status'] == "attached": msg = _("volume %s already attached") % volume['id'] raise exception.InvalidVolume(reason=msg) self.check_availability_zone(context, volume, instance)
def check_attach(self, context, volume, instance=None): # TODO(vish): abstract status checking? if volume['status'] != "available": msg = _("status must be 'available'") raise exception.InvalidVolume(reason=msg) if volume['attach_status'] == "attached": msg = _("already attached") raise exception.InvalidVolume(reason=msg) if instance and not CONF.cinder_cross_az_attach: if instance['availability_zone'] != volume['availability_zone']: msg = _("Instance and volume not in same availability_zone") raise exception.InvalidVolume(reason=msg)
def _create_snapshot(self, context, volume, name, description, force=False): check_policy(context, 'create_snapshot', volume) if ((not force) and (volume['status'] != "available")): msg = _("must be available") raise exception.InvalidVolume(reason=msg) options = { 'volume_id': volume['id'], 'user_id': context.user_id, 'project_id': context.project_id, 'status': "creating", 'progress': '0%', 'volume_size': volume['size'], 'display_name': name, 'display_description': description } snapshot = self.db.snapshot_create(context, options) rpc.cast( context, FLAGS.scheduler_topic, { "method": "create_snapshot", "args": { "topic": FLAGS.volume_topic, "volume_id": volume['id'], "snapshot_id": snapshot['id'] } }) return snapshot
def get_by_volume_id(cls, context, volume_id, instance_uuid=None, expected_attrs=None): if expected_attrs is None: expected_attrs = [] db_bdms = db.block_device_mapping_get_all_by_volume_id( context, volume_id, _expected_cols(expected_attrs)) if not db_bdms: raise exception.VolumeBDMNotFound(volume_id=volume_id) if len(db_bdms) > 1: LOG.warning( 'Legacy get_by_volume_id() call found multiple ' 'BDMs for volume %(volume)s', {'volume': volume_id}) db_bdm = db_bdms[0] # NOTE (ndipanov): Move this to the db layer into a # get_by_instance_and_volume_id method if instance_uuid and instance_uuid != db_bdm['instance_uuid']: raise exception.InvalidVolume( reason=_("Volume does not belong to the " "requested instance.")) return cls._from_db_object(context, cls(), db_bdm, expected_attrs=expected_attrs)
def build_volume_driver(adapter, instance, conn_info, stg_ftsk=None): drv_type = conn_info.get('driver_volume_type') if drv_type != 'fibre_channel': reason = _("Invalid connection type of %s") % drv_type raise exception.InvalidVolume(reason=reason) return fcvscsi.FCVscsiVolumeAdapter(adapter, instance, conn_info, stg_ftsk=stg_ftsk)
def check_detach(self, context, volume, instance=None): # TODO(vish): abstract status checking? if volume['status'] == "available": msg = _("volume %s already detached") % volume['id'] raise exception.InvalidVolume(reason=msg) if volume['attach_status'] == 'detached': msg = _("Volume must be attached in order to detach.") raise exception.InvalidVolume(reason=msg) # NOTE(ildikov):Preparation for multiattach support, when a volume # can be attached to multiple hosts and/or instances, # so just check the attachment specific to this instance if instance is not None and instance.uuid not in volume['attachments']: # TODO(ildikov): change it to a better exception, when enable # multi-attach. raise exception.VolumeUnattached(volume_id=volume['id'])
def check_attached(self, context, volume): if volume['status'] != "in-use": msg = _("volume '%(vol)s' status must be 'in-use'. Currently in " "'%(status)s' status") % { "vol": volume['id'], "status": volume['status'] } raise exception.InvalidVolume(reason=msg)
def check_attach(self, context, volume, instance=None): # TODO(vish): abstract status checking? if volume['status'] != "available": msg = _("status must be 'available'") raise exception.InvalidVolume(reason=msg) if volume['attach_status'] == "attached": msg = _("already attached") raise exception.InvalidVolume(reason=msg) if instance and not CONF.cinder_cross_az_attach: # NOTE(sorrison): If instance is on a host we match against it's AZ # else we check the intended AZ if instance.get('host'): instance_az = az.get_instance_availability_zone( context, instance) else: instance_az = instance['availability_zone'] if instance_az != volume['availability_zone']: msg = _("Instance and volume not in same availability_zone") raise exception.InvalidVolume(reason=msg)
def get_volume_class(drv_type): if drv_type in _STATIC_VOLUME_MAPPINGS: class_type = _STATIC_VOLUME_MAPPINGS[drv_type] elif drv_type == 'fibre_channel': class_type = ( FC_STRATEGY_MAPPING[CONF.powervm.fc_attach_strategy.lower()]) else: failure_reason = _("Invalid connection type of %s") % drv_type raise exception.InvalidVolume(reason=failure_reason) return importutils.import_class(class_type)
def delete_snapshot(self, context, snapshot): if snapshot['status'] not in ["available", "error"]: msg = _("Volume Snapshot status must be available or error") raise exception.InvalidVolume(reason=msg) self.db.snapshot_update(context, snapshot['id'], {'status': 'deleting'}) volume = self.db.volume_get(context, snapshot['volume_id']) host = volume['host'] rpc.cast(context, rpc.queue_get_for(context, FLAGS.volume_topic, host), {"method": "delete_snapshot", "args": {"snapshot_id": snapshot['id']}})
def delete_snapshot(self, context, snapshot): if snapshot['status'] != "available": msg = _("must be available") raise exception.InvalidVolume(reason=msg) self.db.snapshot_update(context, snapshot['id'], {'status': 'deleting'}) rpc.cast( context, FLAGS.scheduler_topic, { "method": "delete_snapshot", "args": { "topic": FLAGS.volume_topic, "snapshot_id": snapshot['id'] } })
def _parse_vz_share(self, vz_share): m = re.match(self.SHARE_FORMAT_REGEX, vz_share) if not m: msg = _("Valid share format is " "[mds[,mds1[...]]:/]clustername[:password]") raise exception.InvalidVolume(msg) if m.group(1): mds_list = m.group(1).split(',') else: mds_list = None return VzShare(cluster_name=m.group(2), mds_list=mds_list, password=m.group(3))
def check_availability_zone(self, context, volume, instance=None): """Ensure that the availability zone is the same.""" # TODO(walter-boring): move this check to Cinder as part of # the reserve call. if instance and not CONF.cinder.cross_az_attach: instance_az = az.get_instance_availability_zone(context, instance) if instance_az != volume['availability_zone']: msg = _("Instance %(instance)s and volume %(vol)s are not in " "the same availability_zone. Instance is in " "%(ins_zone)s. Volume is in %(vol_zone)s") % { "instance": instance.uuid, "vol": volume['id'], 'ins_zone': instance_az, 'vol_zone': volume['availability_zone']} raise exception.InvalidVolume(reason=msg)
def get_by_volume_id(cls, context, volume_id, instance_uuid=None, expected_attrs=None): if expected_attrs is None: expected_attrs = [] db_bdm = db.block_device_mapping_get_by_volume_id( context, volume_id, _expected_cols(expected_attrs)) if not db_bdm: raise exception.VolumeBDMNotFound(volume_id=volume_id) # NOTE (ndipanov): Move this to the db layer into a # get_by_instance_and_volume_id method if instance_uuid and instance_uuid != db_bdm['instance_uuid']: raise exception.InvalidVolume( reason=_("Volume does not belong to the " "requested instance.")) return cls._from_db_object(context, cls(), db_bdm, expected_attrs=expected_attrs)
def delete_volume(self, context, volume_id): """Deletes and unexports volume.""" context = context.elevated() volume_ref = self.db.volume_get(context, volume_id) if volume_ref['attach_status'] == "attached": # Volume is still attached, need to detach first raise exception.VolumeAttached(volume_id=volume_id) if volume_ref['host'] != self.host: raise exception.InvalidVolume( reason=_("Volume is not local to this node")) self._notify_about_volume_usage(context, volume_ref, "delete.start") self._reset_stats() try: LOG.debug(_("volume %s: removing export"), volume_ref['name']) self.driver.remove_export(context, volume_ref) LOG.debug(_("volume %s: deleting"), volume_ref['name']) self.driver.delete_volume(volume_ref) except exception.VolumeIsBusy: LOG.debug(_("volume %s: volume is busy"), volume_ref['name']) self.driver.ensure_export(context, volume_ref) self.db.volume_update(context, volume_ref['id'], {'status': 'available'}) return True except Exception: with excutils.save_and_reraise_exception(): self.db.volume_update(context, volume_ref['id'], {'status': 'error_deleting'}) # Get reservations try: reservations = QUOTAS.reserve(context, volumes=-1, gigabytes=-volume_ref['size']) except Exception: reservations = None LOG.exception(_("Failed to update usages deleting volume")) volume_ref = self.db.volume_destroy(context, volume_id) LOG.debug(_("volume %s: deleted successfully"), volume_ref['name']) self._notify_about_volume_usage(context, volume_ref, "delete.end") # Commit the reservations if reservations: QUOTAS.commit(context, reservations) return True
def delete(self, context, volume): volume_id = volume['id'] if volume['status'] != "available": msg = _("Volume status must be available") raise exception.InvalidVolume(reason=msg) now = utils.utcnow() self.db.volume_update(context, volume_id, { 'status': 'deleting', 'terminated_at': now }) host = volume['host'] rpc.cast(context, self.db.queue_get_for(context, FLAGS.volume_topic, host), { "method": "delete_volume", "args": { "volume_id": volume_id } })
def fake_attach_volume_invalid_volume(self, context, instance, volume_id, device, disk_bus, device_type): raise exception.InvalidVolume(reason='')
def fake_detach_volume_invalid_volume(self, context, instance, volume): raise exception.InvalidVolume(reason='')
def fake_swap_volume_invalid_volume(self, context, instance, volume_id, device): raise exception.InvalidVolume(reason='', volume_id=volume_id)