def test_attach_attaching_volume_with_different_instance(self): """Test that attaching volume reserved for another instance fails.""" # current status is available volume = self._create_volume(self.ctx, { 'provider_location': '', 'size': 1 }) self.volume_api.reserve_volume(self.ctx, volume) values = { 'volume_id': volume['id'], 'attach_status': 'attaching', 'attach_time': timeutils.utcnow(), 'instance_uuid': 'abc123', } db.volume_attach(self.ctx, values) db.volume_admin_metadata_update(self.ctx, volume['id'], {"attached_mode": 'rw'}, False) mountpoint = '/dev/vbd' attachment = self.volume_api.attach(self.ctx, volume, fake.INSTANCE_ID, None, mountpoint, 'rw') self.assertEqual(fake.INSTANCE_ID, attachment['instance_uuid']) self.assertEqual(volume['id'], attachment['volume_id'], volume['id']) self.assertEqual('attached', attachment['attach_status'])
def test_attach_attaching_volume_with_different_mode(self): """Test that attaching volume reserved for another mode fails.""" # admin context ctx = context.RequestContext('admin', 'fake', True) # current status is available volume = db.volume_create(ctx, {'status': 'available', 'host': 'test', 'provider_location': '', 'size': 1}) # start service to handle rpc messages for attach requests svc = self.start_service('volume', host='test') values = {'status': 'attaching', 'instance_uuid': fakes.get_fake_uuid()} db.volume_update(ctx, volume['id'], values) db.volume_admin_metadata_update(ctx, volume['id'], {"attached_mode": 'rw'}, False) mountpoint = '/dev/vbd' self.assertRaises(exception.InvalidVolume, self.volume_api.attach, ctx, volume, values['instance_uuid'], None, mountpoint, 'ro') # cleanup svc.stop()
def test_attach_attaching_volume_with_different_instance(self): """Test that attaching volume reserved for another instance fails.""" ctx = context.RequestContext('admin', 'fake', True) # current status is available volume = db.volume_create(ctx, {'status': 'available', 'host': 'test', 'provider_location': '', 'size': 1}) # start service to handle rpc messages for attach requests svc = self.start_service('volume', host='test') self.volume_api.reserve_volume(ctx, volume) values = {'volume_id': volume['id'], 'attach_status': 'attaching', 'attach_time': timeutils.utcnow(), 'instance_uuid': 'abc123', } db.volume_attach(ctx, values) db.volume_admin_metadata_update(ctx, volume['id'], {"attached_mode": 'rw'}, False) mountpoint = '/dev/vbd' attachment = self.volume_api.attach(ctx, volume, stubs.FAKE_UUID, None, mountpoint, 'rw') self.assertEqual(stubs.FAKE_UUID, attachment['instance_uuid']) self.assertEqual(volume['id'], attachment['volume_id'], volume['id']) self.assertEqual('attached', attachment['attach_status']) svc.stop()
def test_attach_attaching_volume_with_different_instance(self): """Test that attaching volume reserved for another instance fails.""" ctx = context.RequestContext('admin', 'fake', True) # current status is available volume = self._create_volume(ctx, {'provider_location': '', 'size': 1}) # start service to handle rpc messages for attach requests svc = self.start_service('volume', host='test') self.volume_api.reserve_volume(ctx, volume) values = { 'volume_id': volume['id'], 'attach_status': 'attaching', 'attach_time': timeutils.utcnow(), 'instance_uuid': 'abc123', } db.volume_attach(ctx, values) db.volume_admin_metadata_update(ctx, volume['id'], {"attached_mode": 'rw'}, False) mountpoint = '/dev/vbd' attachment = self.volume_api.attach(ctx, volume, stubs.FAKE_UUID, None, mountpoint, 'rw') self.assertEqual(stubs.FAKE_UUID, attachment['instance_uuid']) self.assertEqual(volume['id'], attachment['volume_id'], volume['id']) self.assertEqual('attached', attachment['attach_status']) svc.stop()
def update_meta(self, _id, admin_meta): """ Update the admin metadata """ admin_context = context.get_admin_context() db.volume_admin_metadata_update( admin_context, _id, admin_meta, False)
def test_attach_attaching_volume_with_different_mode(self): """Test that attaching volume reserved for another mode fails.""" # admin context ctx = context.RequestContext('admin', 'fake', True) # current status is available volume = db.volume_create( ctx, { 'status': 'available', 'host': 'test', 'provider_location': '', 'size': 1 }) # start service to handle rpc messages for attach requests svc = self.start_service('volume', host='test') values = { 'status': 'attaching', 'instance_uuid': fakes.get_fake_uuid() } db.volume_update(ctx, volume['id'], values) db.volume_admin_metadata_update(ctx, volume['id'], {"attached_mode": 'rw'}, False) mountpoint = '/dev/vbd' self.assertRaises(exception.InvalidVolume, self.volume_api.attach, ctx, volume, values['instance_uuid'], None, mountpoint, 'ro') # cleanup svc.stop()
def test_volume_update_with_admin_metadata(self): def stubs_volume_admin_metadata_get(context, volume_id): return {'key': 'value', 'readonly': 'True'} self.stubs.Set(db, 'volume_admin_metadata_get', stubs_volume_admin_metadata_get) self.stubs.Set(volume_api.API, "update", stubs.stub_volume_update) volume = stubs.stub_volume("1") del volume['name'] del volume['volume_type'] del volume['volume_type_id'] volume['metadata'] = {'key': 'value'} db.volume_create(context.get_admin_context(), volume) db.volume_admin_metadata_update(context.get_admin_context(), "1", {"readonly": "True", "invisible_key": "invisible_value"}, False) values = {'volume_id': '1', } attachment = db.volume_attach(context.get_admin_context(), values) db.volume_attached(context.get_admin_context(), attachment['id'], stubs.FAKE_UUID, None, '/') updates = { "display_name": "Updated Test Name", } body = {"volume": updates} req = fakes.HTTPRequest.blank('/v1/volumes/1') self.assertEqual(0, len(self.notifier.notifications)) admin_ctx = context.RequestContext('admin', 'fakeproject', True) req.environ['cinder.context'] = admin_ctx res_dict = self.controller.update(req, '1', body) expected = {'volume': { 'status': 'in-use', 'display_description': 'displaydesc', 'availability_zone': 'fakeaz', 'display_name': 'Updated Test Name', 'encrypted': False, 'attachments': [{ 'attachment_id': attachment['id'], 'id': '1', 'volume_id': '1', 'server_id': stubs.FAKE_UUID, 'host_name': None, 'device': '/' }], 'multiattach': 'false', 'bootable': 'false', 'volume_type': None, 'snapshot_id': None, 'source_volid': None, 'metadata': {'key': 'value', 'readonly': 'True'}, 'id': '1', 'created_at': datetime.datetime(1900, 1, 1, 1, 1, 1), 'size': 1}} self.assertEqual(expected, res_dict) self.assertEqual(2, len(self.notifier.notifications))
def test_volume_show_with_admin_metadata(self): volume = stubs.stub_volume("1") del volume['name'] del volume['volume_type'] del volume['volume_type_id'] volume['metadata'] = {'key': 'value'} db.volume_create(context.get_admin_context(), volume) db.volume_admin_metadata_update(context.get_admin_context(), "1", { "readonly": "True", "invisible_key": "invisible_value" }, False) req = fakes.HTTPRequest.blank('/v1/volumes/1') admin_ctx = context.RequestContext('admin', 'fakeproject', True) req.environ['cinder.context'] = admin_ctx res_dict = self.controller.show(req, '1') expected = { 'volume': { 'status': 'fakestatus', 'display_description': 'displaydesc', 'availability_zone': 'fakeaz', 'display_name': 'displayname', 'encrypted': False, 'attachments': [{ 'device': '/', 'server_id': 'fakeuuid', 'host_name': None, 'id': '1', 'volume_id': '1' }], 'bootable': 'false', 'volume_type': None, 'snapshot_id': None, 'source_volid': None, 'metadata': { 'key': 'value', 'readonly': 'True' }, 'id': '1', 'created_at': datetime.datetime(1900, 1, 1, 1, 1, 1), 'size': 1 } } self.assertEqual(res_dict, expected)
def test_volume_get_all_filters_limit(self): vol1 = db.volume_create(self.ctxt, {"display_name": "test1"}) vol2 = db.volume_create(self.ctxt, {"display_name": "test2"}) vol3 = db.volume_create(self.ctxt, {"display_name": "test2", "metadata": {"key1": "val1"}}) vol4 = db.volume_create(self.ctxt, {"display_name": "test3", "metadata": {"key1": "val1", "key2": "val2"}}) vol5 = db.volume_create( self.ctxt, {"display_name": "test3", "metadata": {"key2": "val2", "key3": "val3"}, "host": "host5"} ) db.volume_admin_metadata_update(self.ctxt, vol5.id, {"readonly": "True"}, False) vols = [vol1, vol2, vol3, vol4, vol5] # Ensure we have 5 total instances self._assertEqualsVolumeOrderResult(vols) # No filters, test limit self._assertEqualsVolumeOrderResult(vols[:1], limit=1) self._assertEqualsVolumeOrderResult(vols[:4], limit=4) # Just the test2 volumes filters = {"display_name": "test2"} self._assertEqualsVolumeOrderResult([vol2, vol3], filters=filters) self._assertEqualsVolumeOrderResult([vol2], limit=1, filters=filters) self._assertEqualsVolumeOrderResult([vol2, vol3], limit=2, filters=filters) self._assertEqualsVolumeOrderResult([vol2, vol3], limit=100, filters=filters) # metadata filters filters = {"metadata": {"key1": "val1"}} self._assertEqualsVolumeOrderResult([vol3, vol4], filters=filters) self._assertEqualsVolumeOrderResult([vol3], limit=1, filters=filters) self._assertEqualsVolumeOrderResult([vol3, vol4], limit=10, filters=filters) filters = {"metadata": {"readonly": "True"}} self._assertEqualsVolumeOrderResult([vol5], filters=filters) filters = {"metadata": {"key1": "val1", "key2": "val2"}} self._assertEqualsVolumeOrderResult([vol4], filters=filters) self._assertEqualsVolumeOrderResult([vol4], limit=1, filters=filters) # No match filters = {"metadata": {"key1": "val1", "key2": "val2", "key3": "val3"}} self._assertEqualsVolumeOrderResult([], filters=filters) filters = {"metadata": {"key1": "val1", "key2": "bogus"}} self._assertEqualsVolumeOrderResult([], filters=filters) filters = {"metadata": {"key1": "val1", "key2": "val1"}} self._assertEqualsVolumeOrderResult([], filters=filters) # Combination filters = {"display_name": "test2", "metadata": {"key1": "val1"}} self._assertEqualsVolumeOrderResult([vol3], filters=filters) self._assertEqualsVolumeOrderResult([vol3], limit=1, filters=filters) self._assertEqualsVolumeOrderResult([vol3], limit=100, filters=filters) filters = {"display_name": "test3", "metadata": {"key2": "val2", "key3": "val3"}, "host": "host5"} self._assertEqualsVolumeOrderResult([vol5], filters=filters) self._assertEqualsVolumeOrderResult([vol5], limit=1, filters=filters)
def delete_entry(self, volume, entry): """ Remove snapshot's ID from the volume's admin metadata :param volume: OpenStack Volume Object. :param entry: OpenStack Volume ID. """ admin_context = context.get_admin_context() metadata = db.volume_admin_metadata_get(admin_context, volume['id']) if entry in metadata: del metadata[entry] db.volume_admin_metadata_update( admin_context, volume['id'], metadata, delete=True)
def save(self): updates = self.cinder_obj_get_changes() if updates: if 'consistencygroup' in updates: raise exception.ObjectActionError( action='save', reason=_('consistencygroup changed')) if 'glance_metadata' in updates: raise exception.ObjectActionError( action='save', reason=_('glance_metadata changed')) if 'snapshots' in updates: raise exception.ObjectActionError( action='save', reason=_('snapshots changed')) if 'metadata' in updates: # Metadata items that are not specified in the # self.metadata will be deleted metadata = updates.pop('metadata', None) self.metadata = db.volume_metadata_update(self._context, self.id, metadata, True) if self._context.is_admin and 'admin_metadata' in updates: metadata = updates.pop('admin_metadata', None) self.admin_metadata = db.volume_admin_metadata_update( self._context, self.id, metadata, True) db.volume_update(self._context, self.id, updates) self.obj_reset_changes()
def test_attach_attaching_volume_with_different_mode(self): """Test that attaching volume reserved for another mode fails.""" # current status is available volume = self._create_volume(self.ctx, { 'provider_location': '', 'size': 1 }) values = {'status': 'attaching', 'instance_uuid': fake.INSTANCE_ID} db.volume_update(self.ctx, volume['id'], values) db.volume_admin_metadata_update(self.ctx, volume['id'], {"attached_mode": 'rw'}, False) mountpoint = '/dev/vbd' self.assertRaises(exception.InvalidVolume, self.volume_api.attach, self.ctx, volume, values['instance_uuid'], None, mountpoint, 'ro')
def test_volume_update_with_admin_metadata(self): self.stubs.Set(volume_api.API, "update", stubs.stub_volume_update) volume = stubs.stub_volume("1") del volume['name'] del volume['volume_type'] del volume['volume_type_id'] volume['metadata'] = {'key': 'value'} db.volume_create(context.get_admin_context(), volume) db.volume_admin_metadata_update(context.get_admin_context(), "1", {"readonly": "True", "invisible_key": "invisible_value"}, False) updates = { "display_name": "Updated Test Name", } body = {"volume": updates} req = fakes.HTTPRequest.blank('/v1/volumes/1') self.assertEqual(len(fake_notifier.NOTIFICATIONS), 0) admin_ctx = context.RequestContext('admin', 'fakeproject', True) req.environ['cinder.context'] = admin_ctx res_dict = self.controller.update(req, '1', body) expected = {'volume': { 'status': 'fakestatus', 'display_description': 'displaydesc', 'availability_zone': 'fakeaz', 'display_name': 'Updated Test Name', 'encrypted': False, 'attachments': [{ 'id': '1', 'volume_id': '1', 'server_id': 'fakeuuid', 'host_name': None, 'device': '/' }], 'bootable': 'false', 'volume_type': None, 'snapshot_id': None, 'source_volid': None, 'metadata': {'key': 'value', 'readonly': 'True'}, 'id': '1', 'created_at': datetime.datetime(1, 1, 1, 1, 1, 1), 'size': 1}} self.assertEqual(res_dict, expected) self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
def test_volume_update_with_admin_metadata(self): self.stubs.Set(volume_api.API, "update", stubs.stub_volume_update) volume = stubs.stub_volume("1") del volume['name'] del volume['volume_type'] del volume['volume_type_id'] volume['metadata'] = {'key': 'value'} db.volume_create(context.get_admin_context(), volume) db.volume_admin_metadata_update(context.get_admin_context(), "1", {"readonly": "True", "invisible_key": "invisible_value"}, False) updates = { "display_name": "Updated Test Name", } body = {"volume": updates} req = fakes.HTTPRequest.blank('/v1/volumes/1') self.assertEqual(len(fake_notifier.NOTIFICATIONS), 0) admin_ctx = context.RequestContext('admin', 'fakeproject', True) req.environ['cinder.context'] = admin_ctx res_dict = self.controller.update(req, '1', body) expected = {'volume': { 'status': 'fakestatus', 'display_description': 'displaydesc', 'availability_zone': 'fakeaz', 'display_name': 'Updated Test Name', 'encrypted': False, 'attachments': [{ 'id': '1', 'volume_id': '1', 'server_id': 'fakeuuid', 'host_name': None, 'device': '/' }], 'bootable': 'false', 'volume_type': 'None', 'snapshot_id': None, 'source_volid': None, 'metadata': {'key': 'value', 'readonly': 'True'}, 'id': '1', 'created_at': datetime.datetime(1, 1, 1, 1, 1, 1), 'size': 1}} self.assertEqual(res_dict, expected) self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
def test_volume_show_with_admin_metadata(self): volume = stubs.stub_volume(fake.VOLUME_ID) del volume['name'] del volume['volume_type'] del volume['volume_type_id'] volume['metadata'] = {'key': 'value'} db.volume_create(context.get_admin_context(), volume) db.volume_admin_metadata_update(context.get_admin_context(), fake.VOLUME_ID, {"readonly": "True", "invisible_key": "invisible_value"}, False) values = {'volume_id': fake.VOLUME_ID, } attachment = db.volume_attach(context.get_admin_context(), values) db.volume_attached(context.get_admin_context(), attachment['id'], fake.INSTANCE_ID, None, '/') req = fakes.HTTPRequest.blank('/v1/volumes/%s' % fake.VOLUME_ID) admin_ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True) req.environ['cinder.context'] = admin_ctx res_dict = self.controller.show(req, fake.VOLUME_ID) expected = {'volume': {'status': 'in-use', 'display_description': 'displaydesc', 'availability_zone': 'fakeaz', 'display_name': 'displayname', 'encrypted': False, 'attachments': [ {'attachment_id': attachment['id'], 'device': '/', 'server_id': fake.INSTANCE_ID, 'host_name': None, 'id': fake.VOLUME_ID, 'volume_id': fake.VOLUME_ID}], 'multiattach': 'false', 'bootable': 'false', 'volume_type': None, 'snapshot_id': None, 'source_volid': None, 'metadata': {'key': 'value', 'readonly': 'True'}, 'id': fake.VOLUME_ID, 'created_at': datetime.datetime( 1900, 1, 1, 1, 1, 1, tzinfo=iso8601.iso8601.Utc()), 'size': 1}} self.assertEqual(expected, res_dict)
def test_volume_show_with_admin_metadata(self): volume = stubs.stub_volume("1") del volume['name'] del volume['volume_type'] del volume['volume_type_id'] volume['metadata'] = {'key': 'value'} db.volume_create(context.get_admin_context(), volume) db.volume_admin_metadata_update(context.get_admin_context(), "1", {"readonly": "True", "invisible_key": "invisible_value"}, False) values = {'volume_id': '1', } attachment = db.volume_attach(context.get_admin_context(), values) db.volume_attached(context.get_admin_context(), attachment['id'], stubs.FAKE_UUID, None, '/') req = fakes.HTTPRequest.blank('/v1/volumes/1') admin_ctx = context.RequestContext('admin', 'fakeproject', True) req.environ['cinder.context'] = admin_ctx res_dict = self.controller.show(req, '1') expected = {'volume': {'status': 'in-use', 'display_description': 'displaydesc', 'availability_zone': 'fakeaz', 'display_name': 'displayname', 'encrypted': False, 'attachments': [ {'attachment_id': attachment['id'], 'device': '/', 'server_id': stubs.FAKE_UUID, 'host_name': None, 'id': '1', 'volume_id': '1'}], 'multiattach': 'false', 'bootable': 'false', 'volume_type': None, 'snapshot_id': None, 'source_volid': None, 'metadata': {'key': 'value', 'readonly': 'True'}, 'id': '1', 'created_at': datetime.datetime(1900, 1, 1, 1, 1, 1), 'size': 1}} self.assertEqual(res_dict, expected)
def test_attach_attaching_volume_with_different_mode(self): """Test that attaching volume reserved for another mode fails.""" # current status is available volume = self._create_volume(self.ctx, {"provider_location": "", "size": 1}) values = {"status": "attaching", "instance_uuid": fake.INSTANCE_ID} db.volume_update(self.ctx, volume["id"], values) db.volume_admin_metadata_update(self.ctx, volume["id"], {"attached_mode": "rw"}, False) mountpoint = "/dev/vbd" self.assertRaises( exception.InvalidVolume, self.volume_api.attach, self.ctx, volume, values["instance_uuid"], None, mountpoint, "ro", )
def test_attach_attaching_volume_with_different_instance(self): """Test that attaching volume reserved for another instance fails.""" # current status is available volume = self._create_volume(self.ctx, {"provider_location": "", "size": 1}) self.volume_api.reserve_volume(self.ctx, volume) values = { "volume_id": volume["id"], "attach_status": "attaching", "attach_time": timeutils.utcnow(), "instance_uuid": "abc123", } db.volume_attach(self.ctx, values) db.volume_admin_metadata_update(self.ctx, volume["id"], {"attached_mode": "rw"}, False) mountpoint = "/dev/vbd" attachment = self.volume_api.attach(self.ctx, volume, fake.INSTANCE_ID, None, mountpoint, "rw") self.assertEqual(fake.INSTANCE_ID, attachment["instance_uuid"]) self.assertEqual(volume["id"], attachment["volume_id"], volume["id"]) self.assertEqual("attached", attachment["attach_status"])
def save(self): # TODO: (Y release) Remove this online migration code # Pass self directly since it's a CinderObjectDictCompat self._ensure_use_quota_is_set(self) updates = self.cinder_obj_get_changes() if updates: # NOTE(xyang): Allow this to pass if 'consistencygroup' is # set to None. This is to support backward compatibility. # Also remove 'consistencygroup' from updates because # consistencygroup is the name of a relationship in the ORM # Volume model, so SQLA tries to do some kind of update of # the foreign key based on the provided updates if # 'consistencygroup' is in updates. if updates.pop('consistencygroup', None): raise exception.ObjectActionError( action='save', reason=_('consistencygroup changed')) if 'group' in updates: raise exception.ObjectActionError(action='save', reason=_('group changed')) if 'glance_metadata' in updates: raise exception.ObjectActionError( action='save', reason=_('glance_metadata changed')) if 'snapshots' in updates: raise exception.ObjectActionError( action='save', reason=_('snapshots changed')) if 'cluster' in updates: raise exception.ObjectActionError(action='save', reason=_('cluster changed')) if 'metadata' in updates: # Metadata items that are not specified in the # self.metadata will be deleted metadata = updates.pop('metadata', None) self.metadata = db.volume_metadata_update( self._context, self.id, metadata, True) if self._context.is_admin and 'admin_metadata' in updates: metadata = updates.pop('admin_metadata', None) self.admin_metadata = db.volume_admin_metadata_update( self._context, self.id, metadata, True) # When we are creating a volume and we change from 'creating' # status to 'downloading' status we have to change the worker entry # in the DB to reflect this change, otherwise the cleanup will # not be performed as it will be mistaken for a volume that has # been somehow changed (reset status, forced operation...) if updates.get('status') == 'downloading': self.set_worker() # updates are changed after popping out metadata. if updates: db.volume_update(self._context, self.id, updates) self.obj_reset_changes()
def test_attach_attaching_volume_with_different_mode(self): """Test that attaching volume reserved for another mode fails.""" # admin context ctx = context.RequestContext("admin", "fake", True) # current status is available volume = self._create_volume(ctx, {"provider_location": "", "size": 1}) # start service to handle rpc messages for attach requests svc = self.start_service("volume", host="test") self.addCleanup(svc.stop) values = {"status": "attaching", "instance_uuid": fakes.get_fake_uuid()} db.volume_update(ctx, volume["id"], values) db.volume_admin_metadata_update(ctx, volume["id"], {"attached_mode": "rw"}, False) mountpoint = "/dev/vbd" self.assertRaises( exception.InvalidVolume, self.volume_api.attach, ctx, volume, values["instance_uuid"], None, mountpoint, "ro", )
def test_attach_attaching_volume_with_different_instance(self): """Test that attaching volume reserved for another instance fails.""" ctx = context.RequestContext("admin", "fake", True) # current status is available volume = db.volume_create(ctx, {"status": "available", "host": "test", "provider_location": "", "size": 1}) # start service to handle rpc messages for attach requests svc = self.start_service("volume", host="test") self.volume_api.reserve_volume(ctx, volume) values = { "volume_id": volume["id"], "attach_status": "attaching", "attach_time": timeutils.utcnow(), "instance_uuid": "abc123", } db.volume_attach(ctx, values) db.volume_admin_metadata_update(ctx, volume["id"], {"attached_mode": "rw"}, False) mountpoint = "/dev/vbd" attachment = self.volume_api.attach(ctx, volume, stubs.FAKE_UUID, None, mountpoint, "rw") self.assertEqual(stubs.FAKE_UUID, attachment["instance_uuid"]) self.assertEqual(volume["id"], attachment["volume_id"], volume["id"]) self.assertEqual("attached", attachment["attach_status"]) svc.stop()
def test_volume_list_detail_with_admin_metadata(self): volume = stubs.stub_volume("1") del volume['name'] del volume['volume_type'] del volume['volume_type_id'] volume['metadata'] = {'key': 'value'} db.volume_create(context.get_admin_context(), volume) db.volume_admin_metadata_update(context.get_admin_context(), "1", {"readonly": "True", "invisible_key": "invisible_value"}, False) req = fakes.HTTPRequest.blank('/v1/volumes/detail') admin_ctx = context.RequestContext('admin', 'fakeproject', True) req.environ['cinder.context'] = admin_ctx res_dict = self.controller.index(req) expected = {'volumes': [{'status': 'fakestatus', 'display_description': 'displaydesc', 'availability_zone': 'fakeaz', 'display_name': 'displayname', 'encrypted': False, 'attachments': [{'device': '/', 'server_id': 'fakeuuid', 'host_name': None, 'id': '1', 'volume_id': '1'}], 'bootable': 'false', 'volume_type': 'None', 'snapshot_id': None, 'source_volid': None, 'metadata': {'key': 'value', 'readonly': 'True'}, 'id': '1', 'created_at': datetime.datetime(1, 1, 1, 1, 1, 1), 'size': 1}]} self.assertEqual(expected, res_dict)
def save(self): updates = self.cinder_obj_get_changes() if updates: # NOTE(xyang): Allow this to pass if 'consistencygroup' is # set to None. This is to support backward compatibility. # Also remove 'consistencygroup' from updates because # consistencygroup is the name of a relationship in the ORM # Volume model, so SQLA tries to do some kind of update of # the foreign key based on the provided updates if # 'consistencygroup' is in updates. if updates.pop('consistencygroup', None): raise exception.ObjectActionError( action='save', reason=_('consistencygroup changed')) if 'group' in updates: raise exception.ObjectActionError( action='save', reason=_('group changed')) if 'glance_metadata' in updates: raise exception.ObjectActionError( action='save', reason=_('glance_metadata changed')) if 'snapshots' in updates: raise exception.ObjectActionError( action='save', reason=_('snapshots changed')) if 'cluster' in updates: raise exception.ObjectActionError( action='save', reason=_('cluster changed')) if 'metadata' in updates: # Metadata items that are not specified in the # self.metadata will be deleted metadata = updates.pop('metadata', None) self.metadata = db.volume_metadata_update(self._context, self.id, metadata, True) if self._context.is_admin and 'admin_metadata' in updates: metadata = updates.pop('admin_metadata', None) self.admin_metadata = db.volume_admin_metadata_update( self._context, self.id, metadata, True) # When we are creating a volume and we change from 'creating' # status to 'downloading' status we have to change the worker entry # in the DB to reflect this change, otherwise the cleanup will # not be performed as it will be mistaken for a volume that has # been somehow changed (reset status, forced operation...) if updates.get('status') == 'downloading': self.set_worker() # updates are changed after popping out metadata. if updates: db.volume_update(self._context, self.id, updates) self.obj_reset_changes()
def save(self): updates = self.cinder_obj_get_changes() if updates: if 'metadata' in updates: # Metadata items that are not specified in the # self.metadata will be deleted metadata = updates.pop('metadata', None) self.metadata = db.volume_metadata_update( self._context, self.id, metadata, True) if self._context.is_admin and 'admin_metadata' in updates: metadata = updates.pop('admin_metadata', None) self.admin_metadata = db.volume_admin_metadata_update( self._context, self.id, metadata, True) db.volume_update(self._context, self.id, updates) self.obj_reset_changes()
def save(self): updates = self.cinder_obj_get_changes() if updates: if 'metadata' in updates: # Metadata items that are not specified in the # self.metadata will be deleted metadata = updates.pop('metadata', None) self.metadata = db.volume_metadata_update(self._context, self.id, metadata, True) if self._context.is_admin and 'admin_metadata' in updates: metadata = updates.pop('admin_metadata', None) self.admin_metadata = db.volume_admin_metadata_update( self._context, self.id, metadata, True) db.volume_update(self._context, self.id, updates) self.obj_reset_changes()
def save(self): updates = self.cinder_obj_get_changes() if updates: if 'consistencygroup' in updates: # NOTE(xyang): Allow this to pass if 'consistencygroup' is # set to None. This is to support backward compatibility. if updates.get('consistencygroup'): raise exception.ObjectActionError( action='save', reason=_('consistencygroup changed')) if 'group' in updates: raise exception.ObjectActionError( action='save', reason=_('group changed')) if 'glance_metadata' in updates: raise exception.ObjectActionError( action='save', reason=_('glance_metadata changed')) if 'snapshots' in updates: raise exception.ObjectActionError( action='save', reason=_('snapshots changed')) if 'cluster' in updates: raise exception.ObjectActionError( action='save', reason=_('cluster changed')) if 'metadata' in updates: # Metadata items that are not specified in the # self.metadata will be deleted metadata = updates.pop('metadata', None) self.metadata = db.volume_metadata_update(self._context, self.id, metadata, True) if self._context.is_admin and 'admin_metadata' in updates: metadata = updates.pop('admin_metadata', None) self.admin_metadata = db.volume_admin_metadata_update( self._context, self.id, metadata, True) # When we are creating a volume and we change from 'creating' # status to 'downloading' status we have to change the worker entry # in the DB to reflect this change, otherwise the cleanup will # not be performed as it will be mistaken for a volume that has # been somehow changed (reset status, forced operation...) if updates.get('status') == 'downloading': self.set_worker() # updates are changed after popping out metadata. if updates: db.volume_update(self._context, self.id, updates) self.obj_reset_changes()
def save(self): updates = self.cinder_obj_get_changes() if updates: if 'consistencygroup' in updates: raise exception.ObjectActionError( action='save', reason=_('consistencygroup changed')) if 'group' in updates: raise exception.ObjectActionError( action='save', reason=_('group changed')) if 'glance_metadata' in updates: raise exception.ObjectActionError( action='save', reason=_('glance_metadata changed')) if 'snapshots' in updates: raise exception.ObjectActionError( action='save', reason=_('snapshots changed')) if 'cluster' in updates: raise exception.ObjectActionError( action='save', reason=_('cluster changed')) if 'metadata' in updates: # Metadata items that are not specified in the # self.metadata will be deleted metadata = updates.pop('metadata', None) self.metadata = db.volume_metadata_update(self._context, self.id, metadata, True) if self._context.is_admin and 'admin_metadata' in updates: metadata = updates.pop('admin_metadata', None) self.admin_metadata = db.volume_admin_metadata_update( self._context, self.id, metadata, True) # When we are creating a volume and we change from 'creating' # status to 'downloading' status we have to change the worker entry # in the DB to reflect this change, otherwise the cleanup will # not be performed as it will be mistaken for a volume that has # been somehow changed (reset status, forced operation...) if updates.get('status') == 'downloading': self.set_worker() # updates are changed after popping out metadata. if updates: db.volume_update(self._context, self.id, updates) self.obj_reset_changes()
def save(self): updates = self.cinder_obj_get_changes() if updates: if "consistencygroup" in updates: raise exception.ObjectActionError(action="save", reason=_("consistencygroup changed")) if "glance_metadata" in updates: raise exception.ObjectActionError(action="save", reason=_("glance_metadata changed")) if "snapshots" in updates: raise exception.ObjectActionError(action="save", reason=_("snapshots changed")) if "metadata" in updates: # Metadata items that are not specified in the # self.metadata will be deleted metadata = updates.pop("metadata", None) self.metadata = db.volume_metadata_update(self._context, self.id, metadata, True) if self._context.is_admin and "admin_metadata" in updates: metadata = updates.pop("admin_metadata", None) self.admin_metadata = db.volume_admin_metadata_update(self._context, self.id, metadata, True) db.volume_update(self._context, self.id, updates) self.obj_reset_changes()
def admin_metadata_update(self, metadata, delete, add=True, update=True): new_metadata = db.volume_admin_metadata_update(self._context, self.id, metadata, delete, add, update) self.admin_metadata = new_metadata self._reset_metadata_tracking(fields=('admin_metadata', ))
def test_volume_update_with_admin_metadata(self): def stubs_volume_admin_metadata_get(context, volume_id): return {'key': 'value', 'readonly': 'True'} self.stubs.Set(db, 'volume_admin_metadata_get', stubs_volume_admin_metadata_get) self.stubs.Set(volume_api.API, "update", stubs.stub_volume_update) volume = stubs.stub_volume("1") del volume['name'] del volume['volume_type'] del volume['volume_type_id'] volume['metadata'] = {'key': 'value'} db.volume_create(context.get_admin_context(), volume) db.volume_admin_metadata_update(context.get_admin_context(), "1", { "readonly": "True", "invisible_key": "invisible_value" }, False) values = { 'volume_id': '1', } attachment = db.volume_attach(context.get_admin_context(), values) db.volume_attached(context.get_admin_context(), attachment['id'], stubs.FAKE_UUID, None, '/') updates = { "display_name": "Updated Test Name", } body = {"volume": updates} req = fakes.HTTPRequest.blank('/v1/volumes/1') self.assertEqual(0, len(self.notifier.notifications)) admin_ctx = context.RequestContext('admin', 'fakeproject', True) req.environ['cinder.context'] = admin_ctx res_dict = self.controller.update(req, '1', body) expected = { 'volume': { 'status': 'in-use', 'display_description': 'displaydesc', 'availability_zone': 'fakeaz', 'display_name': 'Updated Test Name', 'encrypted': False, 'attachments': [{ 'attachment_id': attachment['id'], 'id': '1', 'volume_id': '1', 'server_id': stubs.FAKE_UUID, 'host_name': None, 'device': '/' }], 'multiattach': 'false', 'bootable': 'false', 'volume_type': None, 'snapshot_id': None, 'source_volid': None, 'metadata': { 'key': 'value', 'readonly': 'True' }, 'id': '1', 'created_at': datetime.datetime(1900, 1, 1, 1, 1, 1), 'size': 1 } } self.assertEqual(expected, res_dict) self.assertEqual(2, len(self.notifier.notifications))