class VolumeAttachment(base.CinderPersistentObject, base.CinderObject, base.CinderObjectDictCompat, base.CinderComparableObject): # Version 1.0: Initial version VERSION = '1.0' fields = { 'id': fields.UUIDField(), 'volume_id': fields.UUIDField(), 'instance_uuid': fields.UUIDField(nullable=True), 'attached_host': fields.StringField(nullable=True), 'mountpoint': fields.StringField(nullable=True), 'attach_time': fields.DateTimeField(nullable=True), 'detach_time': fields.DateTimeField(nullable=True), 'attach_status': c_fields.VolumeAttachStatusField(nullable=True), 'attach_mode': fields.StringField(nullable=True), } @staticmethod def _from_db_object(context, attachment, db_attachment): for name, field in attachment.fields.items(): value = db_attachment.get(name) if isinstance(field, fields.IntegerField): value = value or 0 attachment[name] = value attachment._context = context attachment.obj_reset_changes() return attachment def save(self): updates = self.cinder_obj_get_changes() if updates: db.volume_attachment_update(self._context, self.id, updates) self.obj_reset_changes()
class VolumeAttachment(base.CinderPersistentObject, base.CinderObject, base.CinderObjectDictCompat, base.CinderComparableObject): # Version 1.0: Initial version VERSION = '1.0' fields = { 'id': fields.UUIDField(), 'volume_id': fields.UUIDField(), 'instance_uuid': fields.UUIDField(nullable=True), 'attached_host': fields.StringField(nullable=True), 'mountpoint': fields.StringField(nullable=True), 'attach_time': fields.DateTimeField(nullable=True), 'detach_time': fields.DateTimeField(nullable=True), 'attach_status': c_fields.VolumeAttachStatusField(nullable=True), 'attach_mode': fields.StringField(nullable=True), } @staticmethod def _from_db_object(context, attachment, db_attachment): for name, field in attachment.fields.items(): value = db_attachment.get(name) if isinstance(field, fields.IntegerField): value = value or 0 attachment[name] = value attachment._context = context attachment.obj_reset_changes() return attachment def save(self): updates = self.cinder_obj_get_changes() if updates: db.volume_attachment_update(self._context, self.id, updates) self.obj_reset_changes() def finish_attach(self, instance_uuid, host_name, mount_point, attach_mode='rw'): with self.obj_as_admin(): db_volume, updated_values = db.volume_attached( self._context, self.id, instance_uuid, host_name, mount_point, attach_mode) self.update(updated_values) self.obj_reset_changes(updated_values.keys()) return objects.Volume._from_db_object(self._context, objects.Volume(), db_volume) def create(self): if self.obj_attr_is_set('id'): raise exception.ObjectActionError(action='create', reason=_('already created')) updates = self.cinder_obj_get_changes() with self.obj_as_admin(): db_attachment = db.volume_attach(self._context, updates) self._from_db_object(self._context, self, db_attachment)
def setUp(self): super(TestVolumeAttachStatus, self).setUp() self.field = fields.VolumeAttachStatusField() self.coerce_good_values = [ ('attaching', fields.VolumeAttachStatus.ATTACHING), ('attached', fields.VolumeAttachStatus.ATTACHED), ('detached', fields.VolumeAttachStatus.DETACHED), ('error_attaching', fields.VolumeAttachStatus.ERROR_ATTACHING), ('error_detaching', fields.VolumeAttachStatus.ERROR_DETACHING) ] self.coerce_bad_values = ['acme'] self.to_primitive_values = self.coerce_good_values[0:1] self.from_primitive_values = self.coerce_good_values[0:1]
class Volume(cleanable.CinderCleanableObject, base.CinderObject, base.CinderObjectDictCompat, base.CinderComparableObject, base.ClusteredObject): # Version 1.0: Initial version # Version 1.1: Added metadata, admin_metadata, volume_attachment, and # volume_type # Version 1.2: Added glance_metadata, consistencygroup and snapshots # Version 1.3: Added finish_volume_migration() # Version 1.4: Added cluster fields # Version 1.5: Added group # Version 1.6: This object is now cleanable (adds rows to workers table) # Version 1.7: Added service_uuid # Version 1.8: Added shared_targets VERSION = '1.8' OPTIONAL_FIELDS = ('metadata', 'admin_metadata', 'glance_metadata', 'volume_type', 'volume_attachment', 'consistencygroup', 'snapshots', 'cluster', 'group') fields = { 'id': fields.UUIDField(), '_name_id': fields.UUIDField(nullable=True), 'ec2_id': fields.UUIDField(nullable=True), 'user_id': fields.StringField(nullable=True), 'project_id': fields.StringField(nullable=True), 'snapshot_id': fields.UUIDField(nullable=True), 'cluster_name': fields.StringField(nullable=True), 'cluster': fields.ObjectField('Cluster', nullable=True, read_only=True), 'host': fields.StringField(nullable=True), 'size': fields.IntegerField(nullable=True), 'availability_zone': fields.StringField(nullable=True), 'status': fields.StringField(nullable=True), 'attach_status': c_fields.VolumeAttachStatusField(nullable=True), 'migration_status': fields.StringField(nullable=True), 'scheduled_at': fields.DateTimeField(nullable=True), 'launched_at': fields.DateTimeField(nullable=True), 'terminated_at': fields.DateTimeField(nullable=True), 'display_name': fields.StringField(nullable=True), 'display_description': fields.StringField(nullable=True), 'provider_id': fields.StringField(nullable=True), 'provider_location': fields.StringField(nullable=True), 'provider_auth': fields.StringField(nullable=True), 'provider_geometry': fields.StringField(nullable=True), 'volume_type_id': fields.UUIDField(nullable=True), 'source_volid': fields.UUIDField(nullable=True), 'encryption_key_id': fields.UUIDField(nullable=True), 'consistencygroup_id': fields.UUIDField(nullable=True), 'group_id': fields.UUIDField(nullable=True), 'deleted': fields.BooleanField(default=False, nullable=True), 'bootable': fields.BooleanField(default=False, nullable=True), 'multiattach': fields.BooleanField(default=False, nullable=True), 'replication_status': fields.StringField(nullable=True), 'replication_extended_status': fields.StringField(nullable=True), 'replication_driver_data': fields.StringField(nullable=True), 'previous_status': fields.StringField(nullable=True), 'metadata': fields.DictOfStringsField(nullable=True), 'admin_metadata': fields.DictOfStringsField(nullable=True), 'glance_metadata': fields.DictOfStringsField(nullable=True), 'volume_type': fields.ObjectField('VolumeType', nullable=True), 'volume_attachment': fields.ObjectField('VolumeAttachmentList', nullable=True), 'consistencygroup': fields.ObjectField('ConsistencyGroup', nullable=True), 'snapshots': fields.ObjectField('SnapshotList', nullable=True), 'group': fields.ObjectField('Group', nullable=True), 'service_uuid': fields.StringField(nullable=True), 'shared_targets': fields.BooleanField(default=True, nullable=True), } # NOTE(thangp): obj_extra_fields is used to hold properties that are not # usually part of the model obj_extra_fields = ['name', 'name_id', 'volume_metadata', 'volume_admin_metadata', 'volume_glance_metadata'] @classmethod def _get_expected_attrs(cls, context, *args, **kwargs): expected_attrs = ['metadata', 'volume_type', 'volume_type.extra_specs'] if context.is_admin: expected_attrs.append('admin_metadata') return expected_attrs @property def name_id(self): return self.id if not self._name_id else self._name_id @name_id.setter def name_id(self, value): self._name_id = value @property def name(self): return CONF.volume_name_template % self.name_id # TODO(dulek): Three properties below are for compatibility with dict # representation of volume. The format there is different (list of # SQLAlchemy models) so we need a conversion. Anyway - these should be # removed when we stop this class from deriving from DictObjectCompat. @property def volume_metadata(self): md = [MetadataObject(k, v) for k, v in self.metadata.items()] return md @volume_metadata.setter def volume_metadata(self, value): md = {d['key']: d['value'] for d in value} self.metadata = md @property def volume_admin_metadata(self): md = [MetadataObject(k, v) for k, v in self.admin_metadata.items()] return md @volume_admin_metadata.setter def volume_admin_metadata(self, value): md = {d['key']: d['value'] for d in value} self.admin_metadata = md @property def volume_glance_metadata(self): md = [MetadataObject(k, v) for k, v in self.glance_metadata.items()] return md @volume_glance_metadata.setter def volume_glance_metadata(self, value): md = {d['key']: d['value'] for d in value} self.glance_metadata = md def __init__(self, *args, **kwargs): super(Volume, self).__init__(*args, **kwargs) self._reset_metadata_tracking() def obj_reset_changes(self, fields=None): super(Volume, self).obj_reset_changes(fields) self._reset_metadata_tracking(fields=fields) @classmethod def _obj_from_primitive(cls, context, objver, primitive): obj = super(Volume, Volume)._obj_from_primitive(context, objver, primitive) obj._reset_metadata_tracking() return obj def _reset_metadata_tracking(self, fields=None): if fields is None or 'metadata' in fields: self._orig_metadata = (dict(self.metadata) if 'metadata' in self else {}) if fields is None or 'admin_metadata' in fields: self._orig_admin_metadata = (dict(self.admin_metadata) if 'admin_metadata' in self else {}) if fields is None or 'glance_metadata' in fields: self._orig_glance_metadata = (dict(self.glance_metadata) if 'glance_metadata' in self else {}) def obj_what_changed(self): changes = super(Volume, self).obj_what_changed() if 'metadata' in self and self.metadata != self._orig_metadata: changes.add('metadata') if ('admin_metadata' in self and self.admin_metadata != self._orig_admin_metadata): changes.add('admin_metadata') if ('glance_metadata' in self and self.glance_metadata != self._orig_glance_metadata): changes.add('glance_metadata') return changes def obj_make_compatible(self, primitive, target_version): """Make a Volume representation compatible with a target version.""" added_fields = (((1, 4), ('cluster', 'cluster_name')), ((1, 5), ('group', 'group_id')), ((1, 7), ('service_uuid'))) # Convert all related objects super(Volume, self).obj_make_compatible(primitive, target_version) target_version = versionutils.convert_version_to_tuple(target_version) for version, remove_fields in added_fields: if target_version < version: for obj_field in remove_fields: primitive.pop(obj_field, None) @classmethod def _from_db_object(cls, context, volume, db_volume, expected_attrs=None): if expected_attrs is None: expected_attrs = [] for name, field in volume.fields.items(): if name in cls.OPTIONAL_FIELDS: continue value = db_volume.get(name) if isinstance(field, fields.IntegerField): value = value or 0 volume[name] = value # Get data from db_volume object that was queried by joined query # from DB if 'metadata' in expected_attrs: metadata = db_volume.get('volume_metadata', []) volume.metadata = {item['key']: item['value'] for item in metadata} if 'admin_metadata' in expected_attrs: metadata = db_volume.get('volume_admin_metadata', []) volume.admin_metadata = {item['key']: item['value'] for item in metadata} if 'glance_metadata' in expected_attrs: metadata = db_volume.get('volume_glance_metadata', []) volume.glance_metadata = {item['key']: item['value'] for item in metadata} if 'volume_type' in expected_attrs: db_volume_type = db_volume.get('volume_type') if db_volume_type: vt_expected_attrs = [] if 'volume_type.extra_specs' in expected_attrs: vt_expected_attrs.append('extra_specs') volume.volume_type = objects.VolumeType._from_db_object( context, objects.VolumeType(), db_volume_type, expected_attrs=vt_expected_attrs) if 'volume_attachment' in expected_attrs: attachments = base.obj_make_list( context, objects.VolumeAttachmentList(context), objects.VolumeAttachment, db_volume.get('volume_attachment')) volume.volume_attachment = attachments if volume.consistencygroup_id and 'consistencygroup' in expected_attrs: consistencygroup = objects.ConsistencyGroup(context) consistencygroup._from_db_object(context, consistencygroup, db_volume['consistencygroup']) volume.consistencygroup = consistencygroup if 'snapshots' in expected_attrs: snapshots = base.obj_make_list( context, objects.SnapshotList(context), objects.Snapshot, db_volume['snapshots']) volume.snapshots = snapshots if 'cluster' in expected_attrs: db_cluster = db_volume.get('cluster') # If this volume doesn't belong to a cluster the cluster field in # the ORM instance will have value of None. if db_cluster: volume.cluster = objects.Cluster(context) objects.Cluster._from_db_object(context, volume.cluster, db_cluster) else: volume.cluster = None if volume.group_id and 'group' in expected_attrs: group = objects.Group(context) group._from_db_object(context, group, db_volume['group']) volume.group = group volume._context = context volume.obj_reset_changes() return volume def create(self): if self.obj_attr_is_set('id'): raise exception.ObjectActionError(action='create', reason=_('already created')) updates = self.cinder_obj_get_changes() if 'consistencygroup' in updates: raise exception.ObjectActionError( action='create', reason=_('consistencygroup assigned')) if 'snapshots' in updates: raise exception.ObjectActionError( action='create', reason=_('snapshots assigned')) if 'cluster' in updates: raise exception.ObjectActionError( action='create', reason=_('cluster assigned')) if 'group' in updates: raise exception.ObjectActionError( action='create', reason=_('group assigned')) if ('volume_type_id' not in updates or updates['volume_type_id'] is None): updates['volume_type_id'] = ( volume_types.get_default_volume_type()['id']) db_volume = db.volume_create(self._context, updates) self._from_db_object(self._context, self, db_volume) def save(self): updates = self.cinder_obj_get_changes() if updates: # NOTE(xyang): Allow this to pass if 'consistencygroup' is # set to None. This is to support backward compatibility. # Also remove 'consistencygroup' from updates because # consistencygroup is the name of a relationship in the ORM # Volume model, so SQLA tries to do some kind of update of # the foreign key based on the provided updates if # 'consistencygroup' is in updates. if updates.pop('consistencygroup', None): raise exception.ObjectActionError( action='save', reason=_('consistencygroup changed')) if 'group' in updates: raise exception.ObjectActionError( action='save', reason=_('group changed')) if 'glance_metadata' in updates: raise exception.ObjectActionError( action='save', reason=_('glance_metadata changed')) if 'snapshots' in updates: raise exception.ObjectActionError( action='save', reason=_('snapshots changed')) if 'cluster' in updates: raise exception.ObjectActionError( action='save', reason=_('cluster changed')) if 'metadata' in updates: # Metadata items that are not specified in the # self.metadata will be deleted metadata = updates.pop('metadata', None) self.metadata = db.volume_metadata_update(self._context, self.id, metadata, True) if self._context.is_admin and 'admin_metadata' in updates: metadata = updates.pop('admin_metadata', None) self.admin_metadata = db.volume_admin_metadata_update( self._context, self.id, metadata, True) # When we are creating a volume and we change from 'creating' # status to 'downloading' status we have to change the worker entry # in the DB to reflect this change, otherwise the cleanup will # not be performed as it will be mistaken for a volume that has # been somehow changed (reset status, forced operation...) if updates.get('status') == 'downloading': self.set_worker() # updates are changed after popping out metadata. if updates: db.volume_update(self._context, self.id, updates) self.obj_reset_changes() def destroy(self): with self.obj_as_admin(): updated_values = db.volume_destroy(self._context, self.id) self.update(updated_values) self.obj_reset_changes(updated_values.keys()) def obj_load_attr(self, attrname): if attrname not in self.OPTIONAL_FIELDS: raise exception.ObjectActionError( action='obj_load_attr', reason=_('attribute %s not lazy-loadable') % attrname) if not self._context: raise exception.OrphanedObjectError(method='obj_load_attr', objtype=self.obj_name()) if attrname == 'metadata': self.metadata = db.volume_metadata_get(self._context, self.id) elif attrname == 'admin_metadata': self.admin_metadata = {} if self._context.is_admin: self.admin_metadata = db.volume_admin_metadata_get( self._context, self.id) elif attrname == 'glance_metadata': try: # NOTE(dulek): We're using alias here to have conversion from # list to dict done there. self.volume_glance_metadata = db.volume_glance_metadata_get( self._context, self.id) except exception.GlanceMetadataNotFound: # NOTE(dulek): DB API raises when volume has no # glance_metadata. Silencing this because at this level no # metadata is a completely valid result. self.glance_metadata = {} elif attrname == 'volume_type': # If the volume doesn't have volume_type, VolumeType.get_by_id # would trigger a db call which raise VolumeTypeNotFound exception. self.volume_type = (objects.VolumeType.get_by_id( self._context, self.volume_type_id) if self.volume_type_id else None) elif attrname == 'volume_attachment': attachments = objects.VolumeAttachmentList.get_all_by_volume_id( self._context, self.id) self.volume_attachment = attachments elif attrname == 'consistencygroup': if self.consistencygroup_id is None: self.consistencygroup = None else: consistencygroup = objects.ConsistencyGroup.get_by_id( self._context, self.consistencygroup_id) self.consistencygroup = consistencygroup elif attrname == 'snapshots': self.snapshots = objects.SnapshotList.get_all_for_volume( self._context, self.id) elif attrname == 'cluster': # If this volume doesn't belong to a cluster (cluster_name is # empty), then cluster field will be None. if self.cluster_name: self.cluster = objects.Cluster.get_by_id( self._context, name=self.cluster_name) else: self.cluster = None elif attrname == 'group': if self.group_id is None: self.group = None else: group = objects.Group.get_by_id( self._context, self.group_id) self.group = group self.obj_reset_changes(fields=[attrname]) def delete_metadata_key(self, key): db.volume_metadata_delete(self._context, self.id, key) md_was_changed = 'metadata' in self.obj_what_changed() del self.metadata[key] self._orig_metadata.pop(key, None) if not md_was_changed: self.obj_reset_changes(['metadata']) def finish_volume_migration(self, dest_volume): # We swap fields between source (i.e. self) and destination at the # end of migration because we want to keep the original volume id # in the DB but now pointing to the migrated volume. skip = ({'id', 'provider_location', 'glance_metadata', 'volume_type'} | set(self.obj_extra_fields)) for key in set(dest_volume.fields.keys()) - skip: # Only swap attributes that are already set. We do not want to # unexpectedly trigger a lazy-load. if not dest_volume.obj_attr_is_set(key): continue value = getattr(dest_volume, key) value_to_dst = getattr(self, key) # Destination must have a _name_id since the id no longer matches # the volume. If it doesn't have a _name_id we set one. if key == '_name_id': if not dest_volume._name_id: setattr(dest_volume, key, self.id) continue elif key == 'migration_status': value = None value_to_dst = 'deleting' elif key == 'display_description': value_to_dst = 'migration src for ' + self.id elif key == 'status': value_to_dst = 'deleting' # Because dest_volume will be deleted soon, we can # skip to copy volume_type_id and volume_type which # are not keys for volume deletion. elif key == 'volume_type_id': # Initialize volume_type of source volume using # new volume_type_id. self.update({'volume_type_id': value}) continue setattr(self, key, value) setattr(dest_volume, key, value_to_dst) self.save() dest_volume.save() return dest_volume def get_latest_snapshot(self): """Get volume's latest snapshot""" snapshot_db = db.snapshot_get_latest_for_volume(self._context, self.id) snapshot = objects.Snapshot(self._context) return snapshot._from_db_object(self._context, snapshot, snapshot_db) @staticmethod def _is_cleanable(status, obj_version): # Before 1.6 we didn't have workers table, so cleanup wasn't supported. # cleaning. if obj_version and obj_version < 1.6: return False return status in ('creating', 'deleting', 'uploading', 'downloading') def begin_attach(self, attach_mode): attachment = objects.VolumeAttachment( context=self._context, attach_status=c_fields.VolumeAttachStatus.ATTACHING, volume_id=self.id) attachment.create() with self.obj_as_admin(): self.admin_metadata['attached_mode'] = attach_mode self.save() return attachment def finish_detach(self, attachment_id): with self.obj_as_admin(): volume_updates, attachment_updates = ( db.volume_detached(self._context, self.id, attachment_id)) db.volume_admin_metadata_delete(self._context, self.id, 'attached_mode') self.admin_metadata.pop('attached_mode', None) # Remove attachment in volume only when this field is loaded. if attachment_updates and self.obj_attr_is_set('volume_attachment'): for i, attachment in enumerate(self.volume_attachment): if attachment.id == attachment_id: del self.volume_attachment.objects[i] break self.update(volume_updates) self.obj_reset_changes( list(volume_updates.keys()) + ['volume_attachment', 'admin_metadata']) def is_replicated(self): return self.volume_type and self.volume_type.is_replicated() def is_multiattach(self): return self.volume_type and self.volume_type.is_multiattach()
class VolumeAttachment(base.CinderPersistentObject, base.CinderObject, base.CinderObjectDictCompat, base.CinderComparableObject): # Version 1.0: Initial version # Version 1.1: Added volume relationship # Version 1.2: Added connection_info attribute # Version 1.3: Added the connector attribute. VERSION = '1.3' OPTIONAL_FIELDS = ['volume'] obj_extra_fields = ['project_id', 'volume_host'] fields = { 'id': fields.UUIDField(), 'volume_id': fields.UUIDField(), 'instance_uuid': fields.UUIDField(nullable=True), 'attached_host': fields.StringField(nullable=True), 'mountpoint': fields.StringField(nullable=True), 'attach_time': fields.DateTimeField(nullable=True), 'detach_time': fields.DateTimeField(nullable=True), 'attach_status': c_fields.VolumeAttachStatusField(nullable=True), 'attach_mode': fields.StringField(nullable=True), 'volume': fields.ObjectField('Volume', nullable=False), 'connection_info': c_fields.DictOfNullableField(nullable=True), 'connector': c_fields.DictOfNullableField(nullable=True) } @property def project_id(self): return self.volume.project_id @property def volume_host(self): return self.volume.host @classmethod def _get_expected_attrs(cls, context, *args, **kwargs): return ['volume'] def obj_make_compatible(self, primitive, target_version): """Make an object representation compatible with target version.""" super(VolumeAttachment, self).obj_make_compatible(primitive, target_version) target_version = versionutils.convert_version_to_tuple(target_version) if target_version < (1, 3): primitive.pop('connector', None) if target_version < (1, 2): primitive.pop('connection_info', None) @classmethod def _from_db_object(cls, context, attachment, db_attachment, expected_attrs=None): if expected_attrs is None: expected_attrs = cls._get_expected_attrs(context) for name, field in attachment.fields.items(): if name in cls.OPTIONAL_FIELDS: continue value = db_attachment.get(name) if isinstance(field, fields.IntegerField): value = value or 0 if name in ('connection_info', 'connector'): # Both of these fields are nullable serialized json dicts. setattr(attachment, name, jsonutils.loads(value) if value else None) else: attachment[name] = value # NOTE: hasattr check is necessary to avoid doing a lazy loading when # loading VolumeList.get_all: Getting a Volume loads its # VolumeAttachmentList, which think they have the volume loaded, but # they don't. More detail on https://review.openstack.org/632549 if 'volume' in expected_attrs and hasattr(db_attachment, 'volume'): db_volume = db_attachment.volume if db_volume: attachment.volume = objects.Volume._from_db_object( context, objects.Volume(), db_volume) attachment._context = context attachment.obj_reset_changes() # This is an online data migration which we should remove when enough # time has passed and we have a blocker schema migration to check to # make sure that the attachment_specs table is empty. Operators should # run the "cinder-manage db online_data_migrations" CLI to force the # migration on-demand. connector = db.attachment_specs_get(context, attachment.id) if connector: # Update ourselves and delete the attachment_specs. attachment.connector = connector attachment.save() # TODO(mriedem): Really need a delete-all method for this. for spec_key in connector: db.attachment_specs_delete(context, attachment.id, spec_key) return attachment def obj_load_attr(self, attrname): if attrname not in self.OPTIONAL_FIELDS: raise exception.ObjectActionError( action='obj_load_attr', reason=_('attribute %s not lazy-loadable') % attrname) if not self._context: raise exception.OrphanedObjectError(method='obj_load_attr', objtype=self.obj_name()) if attrname == 'volume': volume = objects.Volume.get_by_id(self._context, self.volume_id) self.volume = volume self.obj_reset_changes(fields=[attrname]) @staticmethod def _convert_connection_info_to_db_format(updates): properties = updates.pop('connection_info', None) if properties is not None: updates['connection_info'] = jsonutils.dumps(properties) @staticmethod def _convert_connector_to_db_format(updates): connector = updates.pop('connector', None) if connector is not None: updates['connector'] = jsonutils.dumps(connector) def save(self): updates = self.cinder_obj_get_changes() if updates: if 'connection_info' in updates: self._convert_connection_info_to_db_format(updates) if 'connector' in updates: self._convert_connector_to_db_format(updates) if 'volume' in updates: raise exception.ObjectActionError(action='save', reason=_('volume changed')) db.volume_attachment_update(self._context, self.id, updates) self.obj_reset_changes() def finish_attach(self, instance_uuid, host_name, mount_point, attach_mode='rw'): with self.obj_as_admin(): db_volume, updated_values = db.volume_attached( self._context, self.id, instance_uuid, host_name, mount_point, attach_mode) self.update(updated_values) self.obj_reset_changes(updated_values.keys()) return objects.Volume._from_db_object(self._context, objects.Volume(), db_volume) def create(self): if self.obj_attr_is_set('id'): raise exception.ObjectActionError(action='create', reason=_('already created')) updates = self.cinder_obj_get_changes() if 'connector' in updates: self._convert_connector_to_db_format(updates) with self.obj_as_admin(): db_attachment = db.volume_attach(self._context, updates) self._from_db_object(self._context, self, db_attachment) def destroy(self): updated_values = db.attachment_destroy(self._context, self.id) self.update(updated_values) self.obj_reset_changes(updated_values.keys())
class VolumeAttachment(base.CinderPersistentObject, base.CinderObject, base.CinderObjectDictCompat, base.CinderComparableObject): # Version 1.0: Initial version # Version 1.1: Added volume relationship # Version 1.2: Added connection_info attribute VERSION = '1.2' OPTIONAL_FIELDS = ['volume'] obj_extra_fields = ['project_id', 'volume_host'] fields = { 'id': fields.UUIDField(), 'volume_id': fields.UUIDField(), 'instance_uuid': fields.UUIDField(nullable=True), 'attached_host': fields.StringField(nullable=True), 'mountpoint': fields.StringField(nullable=True), 'attach_time': fields.DateTimeField(nullable=True), 'detach_time': fields.DateTimeField(nullable=True), 'attach_status': c_fields.VolumeAttachStatusField(nullable=True), 'attach_mode': fields.StringField(nullable=True), 'volume': fields.ObjectField('Volume', nullable=False), 'connection_info': c_fields.DictOfNullableField(nullable=True) } @property def project_id(self): return self.volume.project_id @property def volume_host(self): return self.volume.host @classmethod def _get_expected_attrs(cls, context, *args, **kwargs): return ['volume'] def obj_make_compatible(self, primitive, target_version): """Make an object representation compatible with target version.""" super(VolumeAttachment, self).obj_make_compatible(primitive, target_version) target_version = versionutils.convert_version_to_tuple(target_version) if target_version < (1, 2): primitive.pop('connection_info', None) @classmethod def _from_db_object(cls, context, attachment, db_attachment, expected_attrs=None): if expected_attrs is None: expected_attrs = cls._get_expected_attrs(context) for name, field in attachment.fields.items(): if name in cls.OPTIONAL_FIELDS: continue value = db_attachment.get(name) if isinstance(field, fields.IntegerField): value = value or 0 if name == 'connection_info': attachment.connection_info = jsonutils.loads( value) if value else None else: attachment[name] = value if 'volume' in expected_attrs: db_volume = db_attachment.get('volume') if db_volume: attachment.volume = objects.Volume._from_db_object( context, objects.Volume(), db_volume) attachment._context = context attachment.obj_reset_changes() return attachment def obj_load_attr(self, attrname): if attrname not in self.OPTIONAL_FIELDS: raise exception.ObjectActionError( action='obj_load_attr', reason=_('attribute %s not lazy-loadable') % attrname) if not self._context: raise exception.OrphanedObjectError(method='obj_load_attr', objtype=self.obj_name()) if attrname == 'volume': volume = objects.Volume.get_by_id(self._context, self.volume_id) self.volume = volume self.obj_reset_changes(fields=[attrname]) @staticmethod def _convert_connection_info_to_db_format(updates): properties = updates.pop('connection_info', None) if properties is not None: updates['connection_info'] = jsonutils.dumps(properties) def save(self): updates = self.cinder_obj_get_changes() if updates: if 'connection_info' in updates: self._convert_connection_info_to_db_format(updates) if 'volume' in updates: raise exception.ObjectActionError(action='save', reason=_('volume changed')) db.volume_attachment_update(self._context, self.id, updates) self.obj_reset_changes() def finish_attach(self, instance_uuid, host_name, mount_point, attach_mode='rw'): with self.obj_as_admin(): db_volume, updated_values = db.volume_attached( self._context, self.id, instance_uuid, host_name, mount_point, attach_mode) self.update(updated_values) self.obj_reset_changes(updated_values.keys()) return objects.Volume._from_db_object(self._context, objects.Volume(), db_volume) def create(self): if self.obj_attr_is_set('id'): raise exception.ObjectActionError(action='create', reason=_('already created')) updates = self.cinder_obj_get_changes() with self.obj_as_admin(): db_attachment = db.volume_attach(self._context, updates) self._from_db_object(self._context, self, db_attachment) def destroy(self): updated_values = db.attachment_destroy(self._context, self.id) self.update(updated_values) self.obj_reset_changes(updated_values.keys())
class VolumeAttachment(base.CinderPersistentObject, base.CinderObject, base.CinderObjectDictCompat, base.CinderComparableObject): # Version 1.0: Initial version # Version 1.1: Added volume relationship VERSION = '1.1' OPTIONAL_FIELDS = ['volume'] obj_extra_fields = ['project_id', 'volume_host'] fields = { 'id': fields.UUIDField(), 'volume_id': fields.UUIDField(), 'instance_uuid': fields.UUIDField(nullable=True), 'attached_host': fields.StringField(nullable=True), 'mountpoint': fields.StringField(nullable=True), 'attach_time': fields.DateTimeField(nullable=True), 'detach_time': fields.DateTimeField(nullable=True), 'attach_status': c_fields.VolumeAttachStatusField(nullable=True), 'attach_mode': fields.StringField(nullable=True), 'volume': fields.ObjectField('Volume', nullable=False), } @property def project_id(self): return self.volume.project_id @property def volume_host(self): return self.volume.host @classmethod def _get_expected_attrs(cls, context, *args, **kwargs): return ['volume'] @classmethod def _from_db_object(cls, context, attachment, db_attachment, expected_attrs=None): if expected_attrs is None: expected_attrs = cls._get_expected_attrs(context) for name, field in attachment.fields.items(): if name in cls.OPTIONAL_FIELDS: continue value = db_attachment.get(name) if isinstance(field, fields.IntegerField): value = value or 0 attachment[name] = value if 'volume' in expected_attrs: db_volume = db_attachment.get('volume') if db_volume: attachment.volume = objects.Volume._from_db_object( context, objects.Volume(), db_volume) attachment._context = context attachment.obj_reset_changes() return attachment def obj_load_attr(self, attrname): if attrname not in self.OPTIONAL_FIELDS: raise exception.ObjectActionError( action='obj_load_attr', reason=_('attribute %s not lazy-loadable') % attrname) if not self._context: raise exception.OrphanedObjectError(method='obj_load_attr', objtype=self.obj_name()) if attrname == 'volume': volume = objects.Volume.get_by_id(self._context, self.id) self.volume = volume self.obj_reset_changes(fields=[attrname]) def save(self): updates = self.cinder_obj_get_changes() if updates: if 'volume' in updates: raise exception.ObjectActionError(action='save', reason=_('volume changed')) db.volume_attachment_update(self._context, self.id, updates) self.obj_reset_changes() def finish_attach(self, instance_uuid, host_name, mount_point, attach_mode='rw'): with self.obj_as_admin(): db_volume, updated_values = db.volume_attached( self._context, self.id, instance_uuid, host_name, mount_point, attach_mode) self.update(updated_values) self.obj_reset_changes(updated_values.keys()) return objects.Volume._from_db_object(self._context, objects.Volume(), db_volume) def create(self): if self.obj_attr_is_set('id'): raise exception.ObjectActionError(action='create', reason=_('already created')) updates = self.cinder_obj_get_changes() with self.obj_as_admin(): db_attachment = db.volume_attach(self._context, updates) self._from_db_object(self._context, self, db_attachment)