def host_passes(self, host_state, filter_properties): context = filter_properties['context'] scheduler_hints = filter_properties.get('scheduler_hints') or {} affinity_uuids = scheduler_hints.get('same_host', []) # scheduler hint verification: affinity_uuids can be a list of uuids # or single uuid. The checks here is to make sure every single string # in the list looks like a uuid, otherwise, this filter will fail to # pass. Note that the filter does *NOT* ignore string doesn't look # like a uuid, it is better to fail the request than serving it wrong. if isinstance(affinity_uuids, list): for uuid in affinity_uuids: if uuidutils.is_uuid_like(uuid): continue else: return False elif uuidutils.is_uuid_like(affinity_uuids): affinity_uuids = [affinity_uuids] else: # Not a list, not a string looks like uuid, don't pass it # to DB for query to avoid potential risk. return False if affinity_uuids: return self.volume_api.get_all( context, filters={'host': host_state.host, 'id': affinity_uuids, 'deleted': False}) # With no same_host key return True
def do_attach(): # check the volume status before attaching volume = self.db.volume_get(context, volume_id) if volume['status'] == 'attaching': if (volume['instance_uuid'] and volume['instance_uuid'] != instance_uuid): msg = _("being attached by another instance") raise exception.InvalidVolume(reason=msg) elif volume['status'] != "available": msg = _("status must be available") raise exception.InvalidVolume(reason=msg) self.db.volume_update(context, volume_id, {"instance_uuid": instance_uuid, "status": "attaching"}) # TODO(vish): refactor this into a more general "reserve" # TODO(sleepsonthefloor): Is this 'elevated' appropriate? if not uuidutils.is_uuid_like(instance_uuid): raise exception.InvalidUUID(uuid=instance_uuid) try: self.driver.attach_volume(context, volume_id, instance_uuid, mountpoint) except Exception: with excutils.save_and_reraise_exception(): self.db.volume_update(context, volume_id, {'status': 'error_attaching'}) self.db.volume_attached(context.elevated(), volume_id, instance_uuid, mountpoint)
def do_attach(): # check the volume status before attaching volume = self.db.volume_get(context, volume_id) if volume['status'] == 'attaching': if (volume['instance_uuid'] and volume['instance_uuid'] != instance_uuid): msg = _("being attached by another instance") raise exception.InvalidVolume(reason=msg) elif volume['status'] != "available": msg = _("status must be available") raise exception.InvalidVolume(reason=msg) self.db.volume_update(context, volume_id, { "instance_uuid": instance_uuid, "status": "attaching" }) # TODO(vish): refactor this into a more general "reserve" # TODO(sleepsonthefloor): Is this 'elevated' appropriate? if not uuidutils.is_uuid_like(instance_uuid): raise exception.InvalidUUID(uuid=instance_uuid) try: self.driver.attach_volume(context, volume_id, instance_uuid, mountpoint) except Exception: with excutils.save_and_reraise_exception(): self.db.volume_update(context, volume_id, {'status': 'error_attaching'}) self.db.volume_attached(context.elevated(), volume_id, instance_uuid, mountpoint)
def do_attach(): # check the volume status before attaching volume = self.db.volume_get(context, volume_id) volume_metadata = self.db.volume_admin_metadata_get(context.elevated(), volume_id) if volume["status"] == "attaching": if volume["instance_uuid"] and volume["instance_uuid"] != instance_uuid: msg = _("being attached by another instance") raise exception.InvalidVolume(reason=msg) if volume["attached_host"] and volume["attached_host"] != host_name: msg = _("being attached by another host") raise exception.InvalidVolume(reason=msg) if volume_metadata.get("attached_mode") and volume_metadata.get("attached_mode") != mode: msg = _("being attached by different mode") raise exception.InvalidVolume(reason=msg) elif volume["status"] != "available": msg = _("status must be available or attaching") raise exception.InvalidVolume(reason=msg) # TODO(jdg): attach_time column is currently varchar # we should update this to a date-time object # also consider adding detach_time? self._notify_about_volume_usage(context, volume, "attach.start") self.db.volume_update( context, volume_id, { "instance_uuid": instance_uuid, "attached_host": host_name, "status": "attaching", "attach_time": timeutils.strtime(), }, ) self.db.volume_admin_metadata_update(context.elevated(), volume_id, {"attached_mode": mode}, False) if instance_uuid and not uuidutils.is_uuid_like(instance_uuid): self.db.volume_update(context, volume_id, {"status": "error_attaching"}) raise exception.InvalidUUID(uuid=instance_uuid) host_name_sanitized = utils.sanitize_hostname(host_name) if host_name else None volume = self.db.volume_get(context, volume_id) if volume_metadata.get("readonly") == "True" and mode != "ro": self.db.volume_update(context, volume_id, {"status": "error_attaching"}) raise exception.InvalidVolumeAttachMode(mode=mode, volume_id=volume_id) try: # NOTE(flaper87): Verify the driver is enabled # before going forward. The exception will be caught # and the volume status updated. utils.require_driver_initialized(self.driver) self.driver.attach_volume(context, volume, instance_uuid, host_name_sanitized, mountpoint) except Exception: with excutils.save_and_reraise_exception(): self.db.volume_update(context, volume_id, {"status": "error_attaching"}) volume = self.db.volume_attached( context.elevated(), volume_id, instance_uuid, host_name_sanitized, mountpoint ) self._notify_about_volume_usage(context, volume, "attach.end")
def _check_body(self, body, action_name): if not self.is_valid_body(body, action_name): raise webob.exc.HTTPBadRequest() access = body[action_name] project = access.get('project') if not uuidutils.is_uuid_like(project): msg = _("Bad project format: " "project is not in proper format (%s)") % project raise webob.exc.HTTPBadRequest(explanation=msg)
def do_attach(): # check the volume status before attaching volume = self.db.volume_get(context, volume_id) if volume['status'] == 'attaching': if (volume['instance_uuid'] and volume['instance_uuid'] != instance_uuid): msg = _("being attached by another instance") raise exception.InvalidVolume(reason=msg) if (volume['attached_host'] and volume['attached_host'] != host_name): msg = _("being attached by another host") raise exception.InvalidVolume(reason=msg) elif volume['status'] != "available": msg = _("status must be available") raise exception.InvalidVolume(reason=msg) # TODO(jdg): attach_time column is currently varchar # we should update this to a date-time object # also consider adding detach_time? now = timeutils.strtime() self.db.volume_update(context, volume_id, {"instance_uuid": instance_uuid, "attached_host": host_name, "status": "attaching", "attach_time": now}) if instance_uuid and not uuidutils.is_uuid_like(instance_uuid): self.db.volume_update(context, volume_id, {'status': 'error_attaching'}) raise exception.InvalidUUID(uuid=instance_uuid) host_name_sanitized = utils.sanitize_hostname( host_name) if host_name else None volume = self.db.volume_get(context, volume_id) try: self.driver.attach_volume(context, volume, instance_uuid, host_name_sanitized, mountpoint) except Exception: with excutils.save_and_reraise_exception(): self.db.volume_update(context, volume_id, {'status': 'error_attaching'}) self.db.volume_attached(context.elevated(), volume_id, instance_uuid, host_name_sanitized, mountpoint)
def param2id(object_id): """Helper function to convert various id types to internal id. args: [object_id], e.g. 'vol-0000000a' or 'volume-0000000a' or '10' """ if uuidutils.is_uuid_like(object_id): return object_id elif '-' in object_id: # FIXME(ja): mapping occurs in nova? pass else: try: return int(object_id) except ValueError: return object_id
def param2id(object_id): """Helper function to convert various id types to internal id. args: [object_id], e.g. 'vol-0000000a' or 'volume-0000000a' or '10' """ if uuidutils.is_uuid_like(object_id): return object_id elif "-" in object_id: # FIXME(ja): mapping occurs in nova? pass else: try: return int(object_id) except ValueError: return object_id
def _image_uuid_from_href(self, image_href): # If the image href was generated by nova api, strip image_href # down to an id. try: image_uuid = image_href.split('/').pop() except (TypeError, AttributeError): msg = _("Invalid imageRef provided.") raise exc.HTTPBadRequest(explanation=msg) if not uuidutils.is_uuid_like(image_uuid): msg = _("Invalid imageRef provided.") raise exc.HTTPBadRequest(explanation=msg) return image_uuid
def host_passes(self, host_state, filter_properties): context = filter_properties['context'] host = volume_utils.extract_host(host_state.host, 'host') scheduler_hints = filter_properties.get('scheduler_hints') or {} instance_uuid = scheduler_hints.get(HINT_KEYWORD, None) # Without 'local_to_instance' hint if not instance_uuid: return True if not uuidutils.is_uuid_like(instance_uuid): raise exception.InvalidUUID(uuid=instance_uuid) # TODO(adrienverge): Currently it is not recommended to allow instance # migrations for hypervisors where this hint will be used. In case of # instance migration, a previously locally-created volume will not be # automatically migrated. Also in case of instance migration during the # volume's scheduling, the result is unpredictable. A future # enhancement would be to subscribe to Nova migration events (e.g. via # Ceilometer). # First, lookup for already-known information in local cache if instance_uuid in self._cache: return self._cache[instance_uuid] == host if not self._nova_has_extended_server_attributes(context): LOG.warning(_LW('Hint "%s" dropped because ' 'ExtendedServerAttributes not active in Nova.'), HINT_KEYWORD) raise exception.CinderException(_('Hint "%s" not supported.') % HINT_KEYWORD) server = nova.API().get_server(context, instance_uuid, privileged_user=True, timeout=REQUESTS_TIMEOUT) if not hasattr(server, INSTANCE_HOST_PROP): LOG.warning(_LW('Hint "%s" dropped because Nova did not return ' 'enough information. Either Nova policy needs to ' 'be changed or a privileged account for Nova ' 'should be specified in conf.'), HINT_KEYWORD) raise exception.CinderException(_('Hint "%s" not supported.') % HINT_KEYWORD) self._cache[instance_uuid] = getattr(server, INSTANCE_HOST_PROP) # Match if given instance is hosted on host return self._cache[instance_uuid] == host
def do_attach(): # check the volume status before attaching volume = self.db.volume_get(context, volume_id) if volume['status'] == 'attaching': if (volume['instance_uuid'] and volume['instance_uuid'] != instance_uuid): msg = _("being attached by another instance") raise exception.InvalidVolume(reason=msg) if (volume['attached_host'] and volume['attached_host'] != host_name): msg = _("being attached by another host") raise exception.InvalidVolume(reason=msg) elif volume['status'] != "available": msg = _("status must be available") raise exception.InvalidVolume(reason=msg) # TODO(jdg): attach_time column is currently varchar # we should update this to a date-time object # also consider adding detach_time? now = timeutils.strtime() self.db.volume_update( context, volume_id, { "instance_uuid": instance_uuid, "attached_host": host_name, "status": "attaching", "attach_time": now }) if instance_uuid and not uuidutils.is_uuid_like(instance_uuid): self.db.volume_update(context, volume_id, {'status': 'error_attaching'}) raise exception.InvalidUUID(uuid=instance_uuid) host_name_sanitized = utils.sanitize_hostname( host_name) if host_name else None volume = self.db.volume_get(context, volume_id) try: self.driver.attach_volume(context, volume, instance_uuid, host_name_sanitized, mountpoint) except Exception: with excutils.save_and_reraise_exception(): self.db.volume_update(context, volume_id, {'status': 'error_attaching'}) self.db.volume_attached(context.elevated(), volume_id, instance_uuid, host_name_sanitized, mountpoint)
def do_attach(): # check the volume status before attaching volume = self.db.volume_get(context, volume_id) if volume['status'] == 'attaching': if (volume['instance_uuid'] and volume['instance_uuid'] != instance_uuid): msg = _("being attached by another instance") raise exception.InvalidVolume(reason=msg) if (volume['attached_host'] and volume['attached_host'] != host_name): msg = _("being attached by another host") raise exception.InvalidVolume(reason=msg) elif volume['status'] != "available": msg = _("status must be available") raise exception.InvalidVolume(reason=msg) self.db.volume_update(context, volume_id, {"instance_uuid": instance_uuid, "attached_host": host_name, "status": "attaching"}) if instance_uuid and not uuidutils.is_uuid_like(instance_uuid): self.db.volume_update(context, volume_id, {'status': 'error_attaching'}) raise exception.InvalidUUID(uuid=instance_uuid) host_name_sanitized = utils.sanitize_hostname( host_name) if host_name else None try: self.driver.attach_volume(context, volume_id, instance_uuid, host_name_sanitized, mountpoint) except Exception: with excutils.save_and_reraise_exception(): self.db.volume_update(context, volume_id, {'status': 'error_attaching'}) self.db.volume_attached(context.elevated(), volume_id, instance_uuid, host_name_sanitized, mountpoint)
def _image_uuid_from_ref(self, image_ref, context): # If the image ref was generated by nova api, strip image_ref # down to an id. image_uuid = None try: image_uuid = image_ref.split('/').pop() except AttributeError: msg = _("Invalid imageRef provided.") raise exc.HTTPBadRequest(explanation=msg) image_service = glance.get_default_image_service() # First see if this is an actual image ID if uuidutils.is_uuid_like(image_uuid): try: image = image_service.show(context, image_uuid) if 'id' in image: return image['id'] except Exception: # Pass and see if there is a matching image name pass # Could not find by ID, check if it is an image name try: params = {'filters': {'name': image_ref}} images = list(image_service.detail(context, **params)) if len(images) > 1: msg = _("Multiple matches found for '%s', use an ID to be more" " specific.") % image_ref raise exc.HTTPConflict(msg) for img in images: return img['id'] except Exception: # Pass and let default not found error handling take care of it pass msg = _("Invalid image identifier or unable to " "access requested image.") raise exc.HTTPBadRequest(explanation=msg)
def attach_volume(self, context, volume_id, instance_uuid, mountpoint): """Updates db to show volume is attached""" # TODO(vish): refactor this into a more general "reserve" # TODO(sleepsonthefloor): Is this 'elevated' appropriate? if not uuidutils.is_uuid_like(instance_uuid): raise exception.InvalidUUID(instance_uuid) try: self.driver.attach_volume(context, volume_id, instance_uuid, mountpoint) except Exception: with excutils.save_and_reraise_exception(): self.db.volume_update(context, volume_id, {'status': 'error_attaching'}) self.db.volume_attached(context.elevated(), volume_id, instance_uuid, mountpoint)
def do_attach(): # check the volume status before attaching volume = self.db.volume_get(context, volume_id) if volume["status"] == "attaching": if volume["instance_uuid"] and volume["instance_uuid"] != instance_uuid: msg = _("being attached by another instance") raise exception.InvalidVolume(reason=msg) elif volume["status"] != "available": msg = _("status must be available") raise exception.InvalidVolume(reason=msg) self.db.volume_update(context, volume_id, {"instance_uuid": instance_uuid, "status": "attaching"}) if not uuidutils.is_uuid_like(instance_uuid): self.db.volume_update(context, volume_id, {"status": "error_attaching"}) raise exception.InvalidUUID(uuid=instance_uuid) try: self.driver.attach_volume(context, volume_id, instance_uuid, mountpoint) except Exception: with excutils.save_and_reraise_exception(): self.db.volume_update(context, volume_id, {"status": "error_attaching"}) self.db.volume_attached(context.elevated(), volume_id, instance_uuid, mountpoint)
def retype(self, context, volume, new_type, migration_policy=None): """Attempt to modify the type associated with an existing volume.""" if volume['status'] not in ['available', 'in-use']: msg = _('Unable to update type due to incorrect status ' 'on volume: %s') % volume['id'] LOG.error(msg) raise exception.InvalidVolume(reason=msg) if volume['migration_status'] is not None: msg = (_("Volume %s is already part of an active migration.") % volume['id']) LOG.error(msg) raise exception.InvalidVolume(reason=msg) if migration_policy and migration_policy not in ['on-demand', 'never']: msg = _('migration_policy must be \'on-demand\' or \'never\', ' 'passed: %s') % new_type LOG.error(msg) raise exception.InvalidInput(reason=msg) # Support specifying volume type by ID or name try: if uuidutils.is_uuid_like(new_type): vol_type = volume_types.get_volume_type(context, new_type) else: vol_type = volume_types.get_volume_type_by_name(context, new_type) except exception.InvalidVolumeType: msg = _('Invalid volume_type passed: %s') % new_type LOG.error(msg) raise exception.InvalidInput(reason=msg) vol_type_id = vol_type['id'] vol_type_qos_id = vol_type['qos_specs_id'] old_vol_type = None old_vol_type_id = volume['volume_type_id'] old_vol_type_qos_id = None # Error if the original and new type are the same if volume['volume_type_id'] == vol_type_id: msg = (_('New volume_type same as original: %s') % new_type) LOG.error(msg) raise exception.InvalidInput(reason=msg) if volume['volume_type_id']: old_vol_type = volume_types.get_volume_type( context, old_vol_type_id) old_vol_type_qos_id = old_vol_type['qos_specs_id'] # We don't support changing encryption requirements yet old_enc = volume_types.get_volume_type_encryption(context, old_vol_type_id) new_enc = volume_types.get_volume_type_encryption(context, vol_type_id) if old_enc != new_enc: msg = _('Retype cannot change encryption requirements') raise exception.InvalidInput(reason=msg) # We don't support changing QoS at the front-end yet for in-use volumes # TODO(avishay): Call Nova to change QoS setting (libvirt has support # - virDomainSetBlockIoTune() - Nova does not have support yet). if (volume['status'] != 'available' and old_vol_type_qos_id != vol_type_qos_id): for qos_id in [old_vol_type_qos_id, vol_type_qos_id]: if qos_id: specs = qos_specs.get_qos_specs(context.elevated(), qos_id) if specs['qos_specs']['consumer'] != 'back-end': msg = _('Retype cannot change front-end qos specs for ' 'in-use volumes') raise exception.InvalidInput(reason=msg) # We're checking here in so that we can report any quota issues as # early as possible, but won't commit until we change the type. We # pass the reservations onward in case we need to roll back. reservations = quota_utils.get_volume_type_reservation(context, volume, vol_type_id) self.update(context, volume, {'status': 'retyping'}) request_spec = {'volume_properties': volume, 'volume_id': volume['id'], 'volume_type': vol_type, 'migration_policy': migration_policy, 'quota_reservations': reservations} self.scheduler_rpcapi.retype(context, CONF.volume_topic, volume['id'], request_spec=request_spec, filter_properties={})
def create(self, req, body): """Instruct Cinder to manage a storage object. Manages an existing backend storage object (e.g. a Linux logical volume or a SAN disk) by creating the Cinder objects required to manage it, and possibly renaming the backend storage object (driver dependent) From an API perspective, this operation behaves very much like a volume creation operation, except that properties such as image, snapshot and volume references don't make sense, because we are taking an existing storage object into Cinder management. Required HTTP Body: { 'volume': { 'host': <Cinder host on which the existing storage resides>, 'ref': <Driver-specific reference to the existing storage object>, } } See the appropriate Cinder drivers' implementations of the manage_volume method to find out the accepted format of 'ref'. This API call will return with an error if any of the above elements are missing from the request, or if the 'host' element refers to a cinder host that is not registered. The volume will later enter the error state if it is discovered that 'ref' is bad. Optional elements to 'volume' are: name A name for the new volume. description A description for the new volume. volume_type ID or name of a volume type to associate with the new Cinder volume. Does not necessarily guarantee that the managed volume will have the properties described in the volume_type. The driver may choose to fail if it identifies that the specified volume_type is not compatible with the backend storage object. metadata Key/value pairs to be associated with the new volume. availability_zone The availability zone to associate with the new volume. """ context = req.environ['cinder.context'] authorize(context) if not self.is_valid_body(body, 'volume'): msg = _("Missing required element '%s' in request body") % 'volume' raise exc.HTTPBadRequest(explanation=msg) volume = body['volume'] # Check that the required keys are present, return an error if they # are not. required_keys = set(['ref', 'host']) missing_keys = list(required_keys - set(volume.keys())) if missing_keys: msg = _("The following elements are required: %s") % \ ', '.join(missing_keys) raise exc.HTTPBadRequest(explanation=msg) LOG.debug('Manage volume request body: %s', body) kwargs = {} req_volume_type = volume.get('volume_type', None) if req_volume_type: try: if not uuidutils.is_uuid_like(req_volume_type): kwargs['volume_type'] = \ volume_types.get_volume_type_by_name( context, req_volume_type) else: kwargs['volume_type'] = volume_types.get_volume_type( context, req_volume_type) except exception.VolumeTypeNotFound: msg = _("Volume type not found.") raise exc.HTTPNotFound(explanation=msg) else: kwargs['volume_type'] = {} kwargs['name'] = volume.get('name', None) kwargs['description'] = volume.get('description', None) kwargs['metadata'] = volume.get('metadata', None) kwargs['availability_zone'] = volume.get('availability_zone', None) try: new_volume = self.volume_api.manage_existing(context, volume['host'], volume['ref'], **kwargs) except exception.ServiceNotFound: msg = _("Service not found.") raise exc.HTTPNotFound(explanation=msg) new_volume = dict(new_volume.iteritems()) utils.add_visible_admin_metadata(context, new_volume, self.volume_api) return self._view_builder.detail(req, new_volume)
def do_attach(): # check the volume status before attaching volume = self.db.volume_get(context, volume_id) volume_metadata = self.db.volume_admin_metadata_get( context.elevated(), volume_id) if volume['status'] == 'attaching': if (volume['instance_uuid'] and volume['instance_uuid'] != instance_uuid): msg = _("being attached by another instance") raise exception.InvalidVolume(reason=msg) if (volume['attached_host'] and volume['attached_host'] != host_name): msg = _("being attached by another host") raise exception.InvalidVolume(reason=msg) if (volume_metadata.get('attached_mode') and volume_metadata.get('attached_mode') != mode): msg = _("being attached by different mode") raise exception.InvalidVolume(reason=msg) elif volume['status'] != "available": msg = _("status must be available") raise exception.InvalidVolume(reason=msg) # TODO(jdg): attach_time column is currently varchar # we should update this to a date-time object # also consider adding detach_time? self._notify_about_volume_usage(context, volume, "attach.start") self.db.volume_update( context, volume_id, { "instance_uuid": instance_uuid, "attached_host": host_name, "status": "attaching", "attach_time": timeutils.strtime() }) self.db.volume_admin_metadata_update(context.elevated(), volume_id, {"attached_mode": mode}, False) if instance_uuid and not uuidutils.is_uuid_like(instance_uuid): self.db.volume_update(context, volume_id, {'status': 'error_attaching'}) raise exception.InvalidUUID(uuid=instance_uuid) host_name_sanitized = utils.sanitize_hostname( host_name) if host_name else None volume = self.db.volume_get(context, volume_id) if volume_metadata.get('readonly') == 'True' and mode != 'ro': self.db.volume_update(context, volume_id, {'status': 'error_attaching'}) raise exception.InvalidVolumeAttachMode(mode=mode, volume_id=volume_id) try: self.driver.attach_volume(context, volume, instance_uuid, host_name_sanitized, mountpoint) except Exception: with excutils.save_and_reraise_exception(): self.db.volume_update(context, volume_id, {'status': 'error_attaching'}) volume = self.db.volume_attached(context.elevated(), volume_id, instance_uuid, host_name_sanitized, mountpoint) self._notify_about_volume_usage(context, volume, "attach.end")
def create(self, req, body): """Creates a new volume.""" if not self.is_valid_body(body, 'volume'): raise exc.HTTPUnprocessableEntity() LOG.debug('Create volume request body: %s', body) context = req.environ['cinder.context'] volume = body['volume'] kwargs = {} req_volume_type = volume.get('volume_type', None) if req_volume_type: try: if not uuidutils.is_uuid_like(req_volume_type): kwargs['volume_type'] = \ volume_types.get_volume_type_by_name( context, req_volume_type) else: kwargs['volume_type'] = volume_types.get_volume_type( context, req_volume_type) except exception.VolumeTypeNotFound: explanation = 'Volume type not found.' raise exc.HTTPNotFound(explanation=explanation) kwargs['metadata'] = volume.get('metadata', None) snapshot_id = volume.get('snapshot_id') if snapshot_id is not None: try: kwargs['snapshot'] = self.volume_api.get_snapshot(context, snapshot_id) except exception.NotFound: explanation = _('snapshot id:%s not found') % snapshot_id raise exc.HTTPNotFound(explanation=explanation) else: kwargs['snapshot'] = None source_volid = volume.get('source_volid') if source_volid is not None: try: kwargs['source_volume'] = \ self.volume_api.get_volume(context, source_volid) except exception.NotFound: explanation = _('source vol id:%s not found') % source_volid raise exc.HTTPNotFound(explanation=explanation) else: kwargs['source_volume'] = None size = volume.get('size', None) if size is None and kwargs['snapshot'] is not None: size = kwargs['snapshot']['volume_size'] elif size is None and kwargs['source_volume'] is not None: size = kwargs['source_volume']['size'] LOG.audit(_("Create volume of %s GB"), size, context=context) image_href = None image_uuid = None if self.ext_mgr.is_loaded('os-image-create'): # NOTE(jdg): misleading name "imageRef" as it's an image-id image_href = volume.get('imageRef') if image_href: image_uuid = self._image_uuid_from_href(image_href) kwargs['image_id'] = image_uuid kwargs['availability_zone'] = volume.get('availability_zone', None) new_volume = self.volume_api.create(context, size, volume.get('display_name'), volume.get('display_description'), **kwargs) # TODO(vish): Instance should be None at db layer instead of # trying to lazy load, but for now we turn it into # a dict to avoid an error. new_volume = dict(new_volume.iteritems()) self._add_visible_admin_metadata(context, new_volume) retval = _translate_volume_detail_view(context, new_volume, image_uuid) return {'volume': retval}
def retype(self, context, volume, new_type, migration_policy=None): """Attempt to modify the type associated with an existing volume.""" if volume['status'] not in ['available', 'in-use']: msg = _('Unable to update type due to incorrect status ' 'on volume: %s') % volume['id'] LOG.error(msg) raise exception.InvalidVolume(reason=msg) if volume['migration_status'] is not None: msg = (_("Volume %s is already part of an active migration.") % volume['id']) LOG.error(msg) raise exception.InvalidVolume(reason=msg) if migration_policy and migration_policy not in ['on-demand', 'never']: msg = _('migration_policy must be \'on-demand\' or \'never\', ' 'passed: %s') % new_type LOG.error(msg) raise exception.InvalidInput(reason=msg) # Support specifying volume type by ID or name try: if uuidutils.is_uuid_like(new_type): vol_type = volume_types.get_volume_type(context, new_type) else: vol_type = volume_types.get_volume_type_by_name( context, new_type) except exception.InvalidVolumeType: msg = _('Invalid volume_type passed: %s') % new_type LOG.error(msg) raise exception.InvalidInput(reason=msg) vol_type_id = vol_type['id'] vol_type_qos_id = vol_type['qos_specs_id'] old_vol_type = None old_vol_type_id = volume['volume_type_id'] old_vol_type_qos_id = None # Error if the original and new type are the same if volume['volume_type_id'] == vol_type_id: msg = (_('New volume_type same as original: %s') % new_type) LOG.error(msg) raise exception.InvalidInput(reason=msg) if volume['volume_type_id']: old_vol_type = volume_types.get_volume_type( context, old_vol_type_id) old_vol_type_qos_id = old_vol_type['qos_specs_id'] # We don't support changing encryption requirements yet old_enc = volume_types.get_volume_type_encryption( context, old_vol_type_id) new_enc = volume_types.get_volume_type_encryption(context, vol_type_id) if old_enc != new_enc: msg = _('Retype cannot change encryption requirements') raise exception.InvalidInput(reason=msg) # We don't support changing QoS at the front-end yet for in-use volumes # TODO(avishay): Call Nova to change QoS setting (libvirt has support # - virDomainSetBlockIoTune() - Nova does not have support yet). if (volume['status'] != 'available' and old_vol_type_qos_id != vol_type_qos_id): for qos_id in [old_vol_type_qos_id, vol_type_qos_id]: if qos_id: specs = qos_specs.get_qos_specs(context.elevated(), qos_id) if specs['qos_specs']['consumer'] != 'back-end': msg = _('Retype cannot change front-end qos specs for ' 'in-use volumes') raise exception.InvalidInput(reason=msg) # We're checking here in so that we can report any quota issues as # early as possible, but won't commit until we change the type. We # pass the reservations onward in case we need to roll back. reservations = quota_utils.get_volume_type_reservation( context, volume, vol_type_id) self.update(context, volume, {'status': 'retyping'}) request_spec = { 'volume_properties': volume, 'volume_id': volume['id'], 'volume_type': vol_type, 'migration_policy': migration_policy, 'quota_reservations': reservations } self.scheduler_rpcapi.retype(context, CONF.volume_topic, volume['id'], request_spec=request_spec, filter_properties={})
def create(self, req, body): """Creates a new volume.""" if not self.is_valid_body(body, 'volume'): raise exc.HTTPUnprocessableEntity() context = req.environ['cinder.context'] volume = body['volume'] kwargs = {} req_volume_type = volume.get('volume_type', None) if req_volume_type: if not uuidutils.is_uuid_like(req_volume_type): try: kwargs['volume_type'] = \ volume_types.get_volume_type_by_name( context, req_volume_type) except exception.VolumeTypeNotFound: explanation = 'Volume type not found.' raise exc.HTTPNotFound(explanation=explanation) else: try: kwargs['volume_type'] = volume_types.get_volume_type( context, req_volume_type) except exception.VolumeTypeNotFound: explanation = 'Volume type not found.' raise exc.HTTPNotFound(explanation=explanation) kwargs['metadata'] = volume.get('metadata', None) snapshot_id = volume.get('snapshot_id') if snapshot_id is not None: kwargs['snapshot'] = self.volume_api.get_snapshot(context, snapshot_id) else: kwargs['snapshot'] = None size = volume.get('size', None) if size is None and kwargs['snapshot'] is not None: size = kwargs['snapshot']['volume_size'] LOG.audit(_("Create volume of %s GB"), size, context=context) image_href = None image_uuid = None if self.ext_mgr.is_loaded('os-image-create'): image_href = volume.get('imageRef') if snapshot_id and image_href: msg = _("Snapshot and image cannot be specified together.") raise exc.HTTPBadRequest(explanation=msg) if image_href: image_uuid = self._image_uuid_from_href(image_href) kwargs['image_id'] = image_uuid kwargs['availability_zone'] = volume.get('availability_zone', None) new_volume = self.volume_api.create(context, size, volume.get('display_name'), volume.get('display_description'), **kwargs) # TODO(vish): Instance should be None at db layer instead of # trying to lazy load, but for now we turn it into # a dict to avoid an error. retval = _translate_volume_detail_view(context, dict(new_volume.iteritems()), image_uuid) return {'volume': retval}
def create(self, req, body): """Instruct Cinder to manage a storage object. Manages an existing backend storage object (e.g. a Linux logical volume or a SAN disk) by creating the Cinder objects required to manage it, and possibly renaming the backend storage object (driver dependent) From an API perspective, this operation behaves very much like a volume creation operation, except that properties such as image, snapshot and volume references don't make sense, because we are taking an existing storage object into Cinder management. Required HTTP Body: { 'volume': { 'host': <Cinder host on which the existing storage resides>, 'ref': <Driver-specific reference to the existing storage object>, } } See the appropriate Cinder drivers' implementations of the manage_volume method to find out the accepted format of 'ref'. This API call will return with an error if any of the above elements are missing from the request, or if the 'host' element refers to a cinder host that is not registered. The volume will later enter the error state if it is discovered that 'ref' is bad. Optional elements to 'volume' are: name A name for the new volume. description A description for the new volume. volume_type ID or name of a volume type to associate with the new Cinder volume. Does not necessarily guarantee that the managed volume will have the properties described in the volume_type. The driver may choose to fail if it identifies that the specified volume_type is not compatible with the backend storage object. metadata Key/value pairs to be associated with the new volume. availability_zone The availability zone to associate with the new volume. bootable If set to True, marks the volume as bootable. """ context = req.environ['cinder.context'] authorize(context) if not self.is_valid_body(body, 'volume'): msg = _("Missing required element '%s' in request body") % 'volume' raise exc.HTTPBadRequest(explanation=msg) volume = body['volume'] # Check that the required keys are present, return an error if they # are not. required_keys = set(['ref', 'host']) missing_keys = list(required_keys - set(volume.keys())) if missing_keys: msg = _("The following elements are required: %s") % \ ', '.join(missing_keys) raise exc.HTTPBadRequest(explanation=msg) LOG.debug('Manage volume request body: %s', body) kwargs = {} req_volume_type = volume.get('volume_type', None) if req_volume_type: try: if not uuidutils.is_uuid_like(req_volume_type): kwargs['volume_type'] = \ volume_types.get_volume_type_by_name( context, req_volume_type) else: kwargs['volume_type'] = volume_types.get_volume_type( context, req_volume_type) except exception.VolumeTypeNotFound: msg = _("Volume type not found.") raise exc.HTTPNotFound(explanation=msg) else: kwargs['volume_type'] = {} kwargs['name'] = volume.get('name', None) kwargs['description'] = volume.get('description', None) kwargs['metadata'] = volume.get('metadata', None) kwargs['availability_zone'] = volume.get('availability_zone', None) kwargs['bootable'] = volume.get('bootable', False) try: new_volume = self.volume_api.manage_existing(context, volume['host'], volume['ref'], **kwargs) except exception.ServiceNotFound: msg = _("Service not found.") raise exc.HTTPNotFound(explanation=msg) new_volume = dict(new_volume.iteritems()) utils.add_visible_admin_metadata(new_volume) return self._view_builder.detail(req, new_volume)
def retype(self, context, volume, new_type, migration_policy=None): """Attempt to modify the type associated with an existing volume.""" if volume["status"] not in ["available", "in-use"]: msg = _("Unable to update type due to incorrect status " "on volume: %s") % volume["id"] LOG.error(msg) raise exception.InvalidVolume(reason=msg) if volume["migration_status"] is not None: msg = _("Volume %s is already part of an active migration.") % volume["id"] LOG.error(msg) raise exception.InvalidVolume(reason=msg) if migration_policy and migration_policy not in ["on-demand", "never"]: msg = _("migration_policy must be 'on-demand' or 'never', " "passed: %s") % new_type LOG.error(msg) raise exception.InvalidInput(reason=msg) cg_id = volume.get("consistencygroup_id", None) if cg_id: msg = _("Volume must not be part of a consistency group.") LOG.error(msg) raise exception.InvalidVolume(reason=msg) # Support specifying volume type by ID or name try: if uuidutils.is_uuid_like(new_type): vol_type = volume_types.get_volume_type(context, new_type) else: vol_type = volume_types.get_volume_type_by_name(context, new_type) except exception.InvalidVolumeType: msg = _("Invalid volume_type passed: %s") % new_type LOG.error(msg) raise exception.InvalidInput(reason=msg) vol_type_id = vol_type["id"] vol_type_qos_id = vol_type["qos_specs_id"] old_vol_type = None old_vol_type_id = volume["volume_type_id"] old_vol_type_qos_id = None # Error if the original and new type are the same if volume["volume_type_id"] == vol_type_id: msg = _("New volume_type same as original: %s") % new_type LOG.error(msg) raise exception.InvalidInput(reason=msg) if volume["volume_type_id"]: old_vol_type = volume_types.get_volume_type(context, old_vol_type_id) old_vol_type_qos_id = old_vol_type["qos_specs_id"] # We don't support changing encryption requirements yet old_enc = volume_types.get_volume_type_encryption(context, old_vol_type_id) new_enc = volume_types.get_volume_type_encryption(context, vol_type_id) if old_enc != new_enc: msg = _("Retype cannot change encryption requirements") raise exception.InvalidInput(reason=msg) # We don't support changing QoS at the front-end yet for in-use volumes # TODO(avishay): Call Nova to change QoS setting (libvirt has support # - virDomainSetBlockIoTune() - Nova does not have support yet). if volume["status"] != "available" and old_vol_type_qos_id != vol_type_qos_id: for qos_id in [old_vol_type_qos_id, vol_type_qos_id]: if qos_id: specs = qos_specs.get_qos_specs(context.elevated(), qos_id) if specs["qos_specs"]["consumer"] != "back-end": msg = _("Retype cannot change front-end qos specs for " "in-use volumes") raise exception.InvalidInput(reason=msg) # We're checking here in so that we can report any quota issues as # early as possible, but won't commit until we change the type. We # pass the reservations onward in case we need to roll back. reservations = quota_utils.get_volume_type_reservation(context, volume, vol_type_id) self.update(context, volume, {"status": "retyping"}) request_spec = { "volume_properties": volume, "volume_id": volume["id"], "volume_type": vol_type, "migration_policy": migration_policy, "quota_reservations": reservations, } self.scheduler_rpcapi.retype( context, CONF.volume_topic, volume["id"], request_spec=request_spec, filter_properties={} )
def do_attach(): # check the volume status before attaching volume = self.db.volume_get(context, volume_id) volume_metadata = self.db.volume_admin_metadata_get( context.elevated(), volume_id) if volume['status'] == 'attaching': if (volume['instance_uuid'] and volume['instance_uuid'] != instance_uuid): msg = _("being attached by another instance") raise exception.InvalidVolume(reason=msg) if (volume['attached_host'] and volume['attached_host'] != host_name): msg = _("being attached by another host") raise exception.InvalidVolume(reason=msg) if (volume_metadata.get('attached_mode') and volume_metadata.get('attached_mode') != mode): msg = _("being attached by different mode") raise exception.InvalidVolume(reason=msg) elif volume['status'] != "available": msg = _("status must be available") raise exception.InvalidVolume(reason=msg) # TODO(jdg): attach_time column is currently varchar # we should update this to a date-time object # also consider adding detach_time? self._notify_about_volume_usage(context, volume, "attach.start") self.db.volume_update(context, volume_id, {"instance_uuid": instance_uuid, "attached_host": host_name, "status": "attaching", "attach_time": timeutils.strtime()}) self.db.volume_admin_metadata_update(context.elevated(), volume_id, {"attached_mode": mode}, False) if instance_uuid and not uuidutils.is_uuid_like(instance_uuid): self.db.volume_update(context, volume_id, {'status': 'error_attaching'}) raise exception.InvalidUUID(uuid=instance_uuid) host_name_sanitized = utils.sanitize_hostname( host_name) if host_name else None volume = self.db.volume_get(context, volume_id) if volume_metadata.get('readonly') == 'True' and mode != 'ro': self.db.volume_update(context, volume_id, {'status': 'error_attaching'}) raise exception.InvalidVolumeAttachMode(mode=mode, volume_id=volume_id) try: self.driver.attach_volume(context, volume, instance_uuid, host_name_sanitized, mountpoint) except Exception: with excutils.save_and_reraise_exception(): self.db.volume_update(context, volume_id, {'status': 'error_attaching'}) volume = self.db.volume_attached(context.elevated(), volume_id, instance_uuid, host_name_sanitized, mountpoint) self._notify_about_volume_usage(context, volume, "attach.end")
def test_volume_create(self): volume = db.volume_create(self.ctxt, {'host': 'host1'}) self.assertTrue(uuidutils.is_uuid_like(volume['id'])) self.assertEqual(volume.host, 'host1')
def test_gen_valid_uuid(self): self.assertTrue(uuidutils.is_uuid_like(str(utils.gen_uuid())))
def create(self, req, body): """Creates a new volume.""" if not self.is_valid_body(body, "volume"): raise exc.HTTPUnprocessableEntity() LOG.debug("Create volume request body: %s", body) context = req.environ["cinder.context"] volume = body["volume"] kwargs = {} req_volume_type = volume.get("volume_type", None) if req_volume_type: if not uuidutils.is_uuid_like(req_volume_type): try: kwargs["volume_type"] = volume_types.get_volume_type_by_name(context, req_volume_type) except exception.VolumeTypeNotFound: explanation = "Volume type not found." raise exc.HTTPNotFound(explanation=explanation) else: try: kwargs["volume_type"] = volume_types.get_volume_type(context, req_volume_type) except exception.VolumeTypeNotFound: explanation = "Volume type not found." raise exc.HTTPNotFound(explanation=explanation) kwargs["metadata"] = volume.get("metadata", None) snapshot_id = volume.get("snapshot_id") if snapshot_id is not None: kwargs["snapshot"] = self.volume_api.get_snapshot(context, snapshot_id) else: kwargs["snapshot"] = None source_volid = volume.get("source_volid") if source_volid is not None: kwargs["source_volume"] = self.volume_api.get_volume(context, source_volid) else: kwargs["source_volume"] = None size = volume.get("size", None) if size is None and kwargs["snapshot"] is not None: size = kwargs["snapshot"]["volume_size"] elif size is None and kwargs["source_volume"] is not None: size = kwargs["source_volume"]["size"] LOG.audit(_("Create volume of %s GB"), size, context=context) image_href = None image_uuid = None if self.ext_mgr.is_loaded("os-image-create"): image_href = volume.get("imageRef") if image_href: image_uuid = self._image_uuid_from_href(image_href) kwargs["image_id"] = image_uuid kwargs["availability_zone"] = volume.get("availability_zone", None) new_volume = self.volume_api.create( context, size, volume.get("display_name"), volume.get("display_description"), **kwargs ) # TODO(vish): Instance should be None at db layer instead of # trying to lazy load, but for now we turn it into # a dict to avoid an error. retval = _translate_volume_detail_view(context, dict(new_volume.iteritems()), image_uuid) return {"volume": retval}
def create(self, req, body): """Creates a new volume.""" if not self.is_valid_body(body, 'volume'): raise exc.HTTPUnprocessableEntity() LOG.debug('Create volume request body: %s', body) context = req.environ['cinder.context'] volume = body['volume'] kwargs = {} req_volume_type = volume.get('volume_type', None) if req_volume_type: try: if not uuidutils.is_uuid_like(req_volume_type): kwargs['volume_type'] = \ volume_types.get_volume_type_by_name( context, req_volume_type) else: kwargs['volume_type'] = volume_types.get_volume_type( context, req_volume_type) except exception.VolumeTypeNotFound: explanation = 'Volume type not found.' raise exc.HTTPNotFound(explanation=explanation) kwargs['metadata'] = volume.get('metadata', None) snapshot_id = volume.get('snapshot_id') if snapshot_id is not None: kwargs['snapshot'] = self.volume_api.get_snapshot(context, snapshot_id) else: kwargs['snapshot'] = None source_volid = volume.get('source_volid') if source_volid is not None: kwargs['source_volume'] = self.volume_api.get_volume(context, source_volid) else: kwargs['source_volume'] = None size = volume.get('size', None) if size is None and kwargs['snapshot'] is not None: size = kwargs['snapshot']['volume_size'] elif size is None and kwargs['source_volume'] is not None: size = kwargs['source_volume']['size'] LOG.audit(_("Create volume of %s GB"), size, context=context) image_href = None image_uuid = None if self.ext_mgr.is_loaded('os-image-create'): # NOTE(jdg): misleading name "imageRef" as it's an image-id image_href = volume.get('imageRef') if image_href: image_uuid = self._image_uuid_from_href(image_href) kwargs['image_id'] = image_uuid kwargs['availability_zone'] = volume.get('availability_zone', None) new_volume = self.volume_api.create(context, size, volume.get('display_name'), volume.get('display_description'), **kwargs) # TODO(vish): Instance should be None at db layer instead of # trying to lazy load, but for now we turn it into # a dict to avoid an error. new_volume = dict(new_volume.iteritems()) self._add_visible_admin_metadata(context, new_volume) retval = _translate_volume_detail_view(context, new_volume, image_uuid) return {'volume': retval}
def create(self, req, body): """Creates a new volume.""" if not self.is_valid_body(body, 'volume'): msg = _("Missing required element '%s' in request body") % 'volume' raise exc.HTTPBadRequest(explanation=msg) LOG.debug('Create volume request body: %s', body) context = req.environ['cinder.context'] volume = body['volume'] kwargs = {} # NOTE(thingee): v2 API allows name instead of display_name if volume.get('name'): volume['display_name'] = volume.get('name') del volume['name'] # NOTE(thingee): v2 API allows description instead of # display_description if volume.get('description'): volume['display_description'] = volume.get('description') del volume['description'] req_volume_type = volume.get('volume_type', None) if req_volume_type: try: if not uuidutils.is_uuid_like(req_volume_type): kwargs['volume_type'] = \ volume_types.get_volume_type_by_name( context, req_volume_type) else: kwargs['volume_type'] = volume_types.get_volume_type( context, req_volume_type) except exception.VolumeTypeNotFound: msg = _("Volume type not found.") raise exc.HTTPNotFound(explanation=msg) kwargs['metadata'] = volume.get('metadata', None) snapshot_id = volume.get('snapshot_id') if snapshot_id is not None: try: kwargs['snapshot'] = self.volume_api.get_snapshot( context, snapshot_id) except exception.NotFound: explanation = _('snapshot id:%s not found') % snapshot_id raise exc.HTTPNotFound(explanation=explanation) else: kwargs['snapshot'] = None source_volid = volume.get('source_volid') if source_volid is not None: try: kwargs['source_volume'] = \ self.volume_api.get_volume(context, source_volid) except exception.NotFound: explanation = _('source volume id:%s not found') % source_volid raise exc.HTTPNotFound(explanation=explanation) else: kwargs['source_volume'] = None size = volume.get('size', None) if size is None and kwargs['snapshot'] is not None: size = kwargs['snapshot']['volume_size'] elif size is None and kwargs['source_volume'] is not None: size = kwargs['source_volume']['size'] LOG.audit(_("Create volume of %s GB"), size, context=context) if self.ext_mgr.is_loaded('os-image-create'): image_href = volume.get('imageRef') if image_href: image_uuid = self._image_uuid_from_href(image_href) kwargs['image_id'] = image_uuid kwargs['availability_zone'] = volume.get('availability_zone', None) kwargs['scheduler_hints'] = volume.get('scheduler_hints', None) new_volume = self.volume_api.create(context, size, volume.get('display_name'), volume.get('display_description'), **kwargs) # TODO(vish): Instance should be None at db layer instead of # trying to lazy load, but for now we turn it into # a dict to avoid an error. new_volume = dict(new_volume.iteritems()) self._add_visible_admin_metadata(context, new_volume) retval = self._view_builder.detail(req, new_volume) return retval
def create(self, req, body): """Creates a new volume.""" if not self.is_valid_body(body, 'volume'): msg = _("Missing required element '%s' in request body") % 'volume' raise exc.HTTPBadRequest(explanation=msg) LOG.debug('Create volume request body: %s', body) context = req.environ['cinder.context'] volume = body['volume'] kwargs = {} # NOTE(thingee): v2 API allows name instead of display_name if volume.get('name'): volume['display_name'] = volume.get('name') del volume['name'] # NOTE(thingee): v2 API allows description instead of # display_description if volume.get('description'): volume['display_description'] = volume.get('description') del volume['description'] req_volume_type = volume.get('volume_type', None) if req_volume_type: try: if not uuidutils.is_uuid_like(req_volume_type): kwargs['volume_type'] = \ volume_types.get_volume_type_by_name( context, req_volume_type) else: kwargs['volume_type'] = volume_types.get_volume_type( context, req_volume_type) except exception.VolumeTypeNotFound: msg = _("Volume type not found.") raise exc.HTTPNotFound(explanation=msg) kwargs['metadata'] = volume.get('metadata', None) snapshot_id = volume.get('snapshot_id') if snapshot_id is not None: try: kwargs['snapshot'] = self.volume_api.get_snapshot(context, snapshot_id) except exception.NotFound: explanation = _('snapshot id:%s not found') % snapshot_id raise exc.HTTPNotFound(explanation=explanation) else: kwargs['snapshot'] = None source_volid = volume.get('source_volid') if source_volid is not None: try: kwargs['source_volume'] = \ self.volume_api.get_volume(context, source_volid) except exception.NotFound: explanation = _('source volume id:%s not found') % source_volid raise exc.HTTPNotFound(explanation=explanation) else: kwargs['source_volume'] = None size = volume.get('size', None) if size is None and kwargs['snapshot'] is not None: size = kwargs['snapshot']['volume_size'] elif size is None and kwargs['source_volume'] is not None: size = kwargs['source_volume']['size'] LOG.audit(_("Create volume of %s GB"), size, context=context) if self.ext_mgr.is_loaded('os-image-create'): image_href = volume.get('imageRef') if image_href: image_uuid = self._image_uuid_from_href(image_href) kwargs['image_id'] = image_uuid kwargs['availability_zone'] = volume.get('availability_zone', None) kwargs['scheduler_hints'] = volume.get('scheduler_hints', None) new_volume = self.volume_api.create(context, size, volume.get('display_name'), volume.get('display_description'), **kwargs) # TODO(vish): Instance should be None at db layer instead of # trying to lazy load, but for now we turn it into # a dict to avoid an error. new_volume = dict(new_volume.iteritems()) retval = self._view_builder.detail(req, new_volume) return retval
def test_volume_create(self): volume = db.volume_create(self.ctxt, {"host": "host1"}) self.assertTrue(uuidutils.is_uuid_like(volume["id"])) self.assertEqual(volume.host, "host1")