def test_migrate_attachment_specs(self): # Create an attachment. attachment = objects.VolumeAttachment( self.context, attach_status='attaching', volume_id=fake.VOLUME_ID) attachment.create() # Create some attachment_specs. Note that the key and value have to # be strings, the table doesn't handle things like a wwpns list # for a fibrechannel connector. connector = {'host': '127.0.0.1'} db.attachment_specs_update_or_create( self.context, attachment.id, connector) # Now get the volume attachment object from the database and make # sure the connector was migrated from the attachment_specs table # to the volume_attachment table and the specs were deleted. attachment = objects.VolumeAttachment.get_by_id( self.context, attachment.id) self.assertIn('connector', attachment) self.assertDictEqual(connector, attachment.connector) self.assertEqual(0, len(db.attachment_specs_get( self.context, attachment.id))) # Make sure we can store a fibrechannel type connector that has a wwpns # list value. connector['wwpns'] = ['21000024ff34c92d', '21000024ff34c92c'] attachment.connector = connector attachment.save() # Get the object from the DB again and make sure the connector is # there. attachment = objects.VolumeAttachment.get_by_id( self.context, attachment.id) self.assertIn('connector', attachment) self.assertDictEqual(connector, attachment.connector)
def test_migrate_attachment_specs(self): # Create an attachment. attachment = objects.VolumeAttachment(self.context, attach_status='attaching', volume_id=fake.VOLUME_ID) attachment.create() # Create some attachment_specs. Note that the key and value have to # be strings, the table doesn't handle things like a wwpns list # for a fibrechannel connector. connector = {'host': '127.0.0.1'} db.attachment_specs_update_or_create(self.context, attachment.id, connector) # Now get the volume attachment object from the database and make # sure the connector was migrated from the attachment_specs table # to the volume_attachment table and the specs were deleted. attachment = objects.VolumeAttachment.get_by_id( self.context, attachment.id) self.assertIn('connector', attachment) self.assertDictEqual(connector, attachment.connector) self.assertEqual( 0, len(db.attachment_specs_get(self.context, attachment.id))) # Make sure we can store a fibrechannel type connector that has a wwpns # list value. connector['wwpns'] = ['21000024ff34c92d', '21000024ff34c92c'] attachment.connector = connector attachment.save() # Get the object from the DB again and make sure the connector is # there. attachment = objects.VolumeAttachment.get_by_id( self.context, attachment.id) self.assertIn('connector', attachment) self.assertDictEqual(connector, attachment.connector)
def _from_db_object(cls, context, attachment, db_attachment, expected_attrs=None): if expected_attrs is None: expected_attrs = cls._get_expected_attrs(context) for name, field in attachment.fields.items(): if name in cls.OPTIONAL_FIELDS: continue value = db_attachment.get(name) if isinstance(field, fields.IntegerField): value = value or 0 if name in ('connection_info', 'connector'): # Both of these fields are nullable serialized json dicts. setattr(attachment, name, jsonutils.loads(value) if value else None) else: attachment[name] = value # NOTE: Check against the ORM instance's dictionary instead of using # hasattr or get to avoid the lazy loading of the Volume on # VolumeList.get_all. # Getting a Volume loads its VolumeAttachmentList, which think they # have the volume loaded, but they don't. More detail on # https://review.opendev.org/632549 # and its related bug report. if 'volume' in expected_attrs and 'volume' in vars(db_attachment): db_volume = db_attachment.volume if db_volume: attachment.volume = objects.Volume._from_db_object( context, objects.Volume(), db_volume) attachment._context = context attachment.obj_reset_changes() # This is an online data migration which we should remove when enough # time has passed and we have a blocker schema migration to check to # make sure that the attachment_specs table is empty. Operators should # run the "cinder-manage db online_data_migrations" CLI to force the # migration on-demand. connector = db.attachment_specs_get(context, attachment.id) if connector: # Update ourselves and delete the attachment_specs. attachment.connector = connector attachment.save() # TODO(mriedem): Really need a delete-all method for this. for spec_key in connector: db.attachment_specs_delete( context, attachment.id, spec_key) return attachment
def _from_db_object(cls, context, attachment, db_attachment, expected_attrs=None): if expected_attrs is None: expected_attrs = cls._get_expected_attrs(context) for name, field in attachment.fields.items(): if name in cls.OPTIONAL_FIELDS: continue value = db_attachment.get(name) if isinstance(field, fields.IntegerField): value = value or 0 if name in ('connection_info', 'connector'): # Both of these fields are nullable serialized json dicts. setattr(attachment, name, jsonutils.loads(value) if value else None) else: attachment[name] = value if 'volume' in expected_attrs: db_volume = db_attachment.get('volume') if db_volume: attachment.volume = objects.Volume._from_db_object( context, objects.Volume(), db_volume) # This is an online data migration which we should remove when enough # time has passed and we have a blocker schema migration to check to # make sure that the attachment_specs table is empty. Operators should # run the "cinder-manage db online_data_migrations" CLI to force the # migration on-demand. # TODO(mriedem): Need a hook for the online_data_migration CLI to query # the database for all attachment_specs entries and migrate their # related volume_attachment records using this object. connector = db.attachment_specs_get(context, attachment.id) if connector: # Update ourselves and delete the attachment_specs. attachment.connector = connector attachment.save() # TODO(mriedem): Really need a delete-all method for this. for spec_key in connector: db.attachment_specs_delete(context, attachment.id, spec_key) attachment._context = context attachment.obj_reset_changes() return attachment
def _from_db_object(cls, context, attachment, db_attachment, expected_attrs=None): if expected_attrs is None: expected_attrs = cls._get_expected_attrs(context) for name, field in attachment.fields.items(): if name in cls.OPTIONAL_FIELDS: continue value = db_attachment.get(name) if isinstance(field, fields.IntegerField): value = value or 0 if name in ('connection_info', 'connector'): # Both of these fields are nullable serialized json dicts. setattr(attachment, name, jsonutils.loads(value) if value else None) else: attachment[name] = value # NOTE: hasattr check is necessary to avoid doing a lazy loading when # loading VolumeList.get_all: Getting a Volume loads its # VolumeAttachmentList, which think they have the volume loaded, but # they don't. More detail on https://review.openstack.org/632549 if 'volume' in expected_attrs and hasattr(db_attachment, 'volume'): db_volume = db_attachment.volume if db_volume: attachment.volume = objects.Volume._from_db_object( context, objects.Volume(), db_volume) attachment._context = context attachment.obj_reset_changes() # This is an online data migration which we should remove when enough # time has passed and we have a blocker schema migration to check to # make sure that the attachment_specs table is empty. Operators should # run the "cinder-manage db online_data_migrations" CLI to force the # migration on-demand. connector = db.attachment_specs_get(context, attachment.id) if connector: # Update ourselves and delete the attachment_specs. attachment.connector = connector attachment.save() # TODO(mriedem): Really need a delete-all method for this. for spec_key in connector: db.attachment_specs_delete( context, attachment.id, spec_key) return attachment