def test_attach_attaching_volume_with_different_instance(self): """Test that attaching volume reserved for another instance fails.""" # current status is available volume = self._create_volume(self.ctx, { 'provider_location': '', 'size': 1 }) self.volume_api.reserve_volume(self.ctx, volume) values = { 'volume_id': volume['id'], 'attach_status': 'attaching', 'attach_time': timeutils.utcnow(), 'instance_uuid': 'abc123', } db.volume_attach(self.ctx, values) db.volume_admin_metadata_update(self.ctx, volume['id'], {"attached_mode": 'rw'}, False) mountpoint = '/dev/vbd' attachment = self.volume_api.attach(self.ctx, volume, fake.INSTANCE_ID, None, mountpoint, 'rw') self.assertEqual(fake.INSTANCE_ID, attachment['instance_uuid']) self.assertEqual(volume['id'], attachment['volume_id'], volume['id']) self.assertEqual('attached', attachment['attach_status'])
def test_attach_attaching_volume_with_different_instance(self): """Test that attaching volume reserved for another instance fails.""" ctx = context.RequestContext('admin', 'fake', True) # current status is available volume = db.volume_create(ctx, {'status': 'available', 'host': 'test', 'provider_location': '', 'size': 1}) # start service to handle rpc messages for attach requests svc = self.start_service('volume', host='test') self.volume_api.reserve_volume(ctx, volume) values = {'volume_id': volume['id'], 'attach_status': 'attaching', 'attach_time': timeutils.utcnow(), 'instance_uuid': 'abc123', } db.volume_attach(ctx, values) db.volume_admin_metadata_update(ctx, volume['id'], {"attached_mode": 'rw'}, False) mountpoint = '/dev/vbd' attachment = self.volume_api.attach(ctx, volume, stubs.FAKE_UUID, None, mountpoint, 'rw') self.assertEqual(stubs.FAKE_UUID, attachment['instance_uuid']) self.assertEqual(volume['id'], attachment['volume_id'], volume['id']) self.assertEqual('attached', attachment['attach_status']) svc.stop()
def test_attach_attaching_volume_with_different_instance(self): """Test that attaching volume reserved for another instance fails.""" ctx = context.RequestContext('admin', 'fake', True) # current status is available volume = self._create_volume(ctx, {'provider_location': '', 'size': 1}) # start service to handle rpc messages for attach requests svc = self.start_service('volume', host='test') self.volume_api.reserve_volume(ctx, volume) values = { 'volume_id': volume['id'], 'attach_status': 'attaching', 'attach_time': timeutils.utcnow(), 'instance_uuid': 'abc123', } db.volume_attach(ctx, values) db.volume_admin_metadata_update(ctx, volume['id'], {"attached_mode": 'rw'}, False) mountpoint = '/dev/vbd' attachment = self.volume_api.attach(ctx, volume, stubs.FAKE_UUID, None, mountpoint, 'rw') self.assertEqual(stubs.FAKE_UUID, attachment['instance_uuid']) self.assertEqual(volume['id'], attachment['volume_id'], volume['id']) self.assertEqual('attached', attachment['attach_status']) svc.stop()
def test_create_snapshot_in_use(self): """Test snapshot in use can be created forcibly.""" instance_uuid = 'a14dc210-d43b-4792-a608-09fe0824de54' # create volume and attach to the instance volume = tests_utils.create_volume(self.context, **self.volume_params) self.volume.create_volume(self.context, volume) values = {'volume_id': volume['id'], 'instance_uuid': instance_uuid, 'attach_status': fields.VolumeAttachStatus.ATTACHING, } attachment = db.volume_attach(self.context, values) db.volume_attached(self.context, attachment['id'], instance_uuid, None, '/dev/sda1') volume_api = cinder.volume.api.API() volume = volume_api.get(self.context, volume['id']) self.assertRaises(exception.InvalidVolume, volume_api.create_snapshot, self.context, volume, 'fake_name', 'fake_description') snapshot_ref = volume_api.create_snapshot(self.context, volume, 'fake_name', 'fake_description', allow_in_use=True) snapshot_ref.destroy() db.volume_destroy(self.context, volume['id']) # create volume and attach to the host volume = tests_utils.create_volume(self.context, **self.volume_params) self.volume.create_volume(self.context, volume) values = {'volume_id': volume['id'], 'attached_host': 'fake_host', 'attach_status': fields.VolumeAttachStatus.ATTACHING, } attachment = db.volume_attach(self.context, values) db.volume_attached(self.context, attachment['id'], None, 'fake_host', '/dev/sda1') volume_api = cinder.volume.api.API() volume = volume_api.get(self.context, volume['id']) self.assertRaises(exception.InvalidVolume, volume_api.create_snapshot, self.context, volume, 'fake_name', 'fake_description') snapshot_ref = volume_api.create_snapshot(self.context, volume, 'fake_name', 'fake_description', allow_in_use=True) snapshot_ref.destroy() db.volume_destroy(self.context, volume['id'])
def test_create_snapshot_force(self): """Test snapshot in use can be created forcibly.""" instance_uuid = '12345678-1234-5678-1234-567812345678' # create volume and attach to the instance volume = tests_utils.create_volume(self.context, **self.volume_params) self.volume.create_volume(self.context, volume) values = {'volume_id': volume['id'], 'instance_uuid': instance_uuid, 'attach_status': fields.VolumeAttachStatus.ATTACHING, } attachment = db.volume_attach(self.context, values) db.volume_attached(self.context, attachment['id'], instance_uuid, None, '/dev/sda1') volume_api = cinder.volume.api.API() volume = volume_api.get(self.context, volume['id']) self.assertRaises(exception.InvalidVolume, volume_api.create_snapshot, self.context, volume, 'fake_name', 'fake_description') snapshot_ref = volume_api.create_snapshot_force(self.context, volume, 'fake_name', 'fake_description') snapshot_ref.destroy() db.volume_destroy(self.context, volume['id']) # create volume and attach to the host volume = tests_utils.create_volume(self.context, **self.volume_params) self.volume.create_volume(self.context, volume) values = {'volume_id': volume['id'], 'attached_host': 'fake_host', 'attach_status': fields.VolumeAttachStatus.ATTACHING, } attachment = db.volume_attach(self.context, values) db.volume_attached(self.context, attachment['id'], None, 'fake_host', '/dev/sda1') volume_api = cinder.volume.api.API() volume = volume_api.get(self.context, volume['id']) self.assertRaises(exception.InvalidVolume, volume_api.create_snapshot, self.context, volume, 'fake_name', 'fake_description') snapshot_ref = volume_api.create_snapshot_force(self.context, volume, 'fake_name', 'fake_description') snapshot_ref.destroy() db.volume_destroy(self.context, volume['id'])
def test_attachment_delete_remove_export_fail(self): """attachment_delete removes attachment on remove_export failure.""" self.mock_object(self.manager.driver, 'remove_export', side_effect=Exception) # Report that the connection is not shared self.mock_object(self.manager, '_connection_terminate', return_value=False) vref = tests_utils.create_volume(self.context, status='in-use', attach_status='attached') values = {'volume_id': vref.id, 'volume_host': vref.host, 'attach_status': 'reserved', 'instance_uuid': fake.UUID1} attach = db.volume_attach(self.context, values) # Confirm the volume OVO has the attachment before the deletion vref.refresh() expected_vol_status = (vref.status, vref.attach_status) self.assertEqual(1, len(vref.volume_attachment)) self.manager.attachment_delete(self.context, attach.id, vref) # Manager doesn't change the resource status. It is changed on the API attachment = db.volume_attachment_get(self.context, attach.id) self.assertEqual(attach.attach_status, attachment.attach_status) vref = db.volume_get(self.context, vref.id) self.assertEqual(expected_vol_status, (vref.status, vref.attach_status))
def test_attachment_delete(self): """Test attachment_delete.""" volume_params = {'status': 'available'} vref = tests_utils.create_volume(self.context, **volume_params) self.manager.create_volume(self.context, vref) values = {'volume_id': vref.id, 'volume_host': vref.host, 'attach_status': 'reserved', 'instance_uuid': fake.UUID1} attachment_ref = db.volume_attach(self.context, values) attachment_ref = db.volume_attachment_get( self.context, attachment_ref['id']) vref.refresh() expected_status = (vref.status, vref.attach_status, attachment_ref.attach_status) self.manager.attachment_delete(self.context, attachment_ref['id'], vref) # Manager doesn't change the resource status. It is changed on the API attachment_ref = db.volume_attachment_get(self.context, attachment_ref.id) vref.refresh() self.assertEqual( expected_status, (vref.status, vref.attach_status, attachment_ref.attach_status))
def test_attachment_delete_remove_export_fail(self): """attachment_delete removes attachment on remove_export failure.""" self.mock_object(self.manager.driver, 'remove_export', side_effect=Exception) # Report that the connection is not shared self.mock_object(self.manager, '_connection_terminate', return_value=False) vref = tests_utils.create_volume(self.context, status='in-use', attach_status='attached') values = { 'volume_id': vref.id, 'volume_host': vref.host, 'attach_status': 'reserved', 'instance_uuid': fake.UUID1 } attach = db.volume_attach(self.context, values) # Confirm the volume OVO has the attachment before the deletion vref.refresh() self.assertEqual(1, len(vref.volume_attachment)) self.manager.attachment_delete(self.context, attach.id, vref) # Attachment has been removed from the DB self.assertRaises(exception.VolumeAttachmentNotFound, db.volume_attachment_get, self.context, attach.id) # Attachment has been removed from the volume OVO attachment list self.assertEqual(0, len(vref.volume_attachment))
def test_create_snapshot_force(self): """Test snapshot in use can be created forcibly.""" instance_uuid = '12345678-1234-4678-1234-567812345678' # create volume and attach to the instance volume = tests_utils.create_volume(self.context, **self.volume_params) self.volume.create_volume(self.context, volume) values = {'volume_id': volume['id'], 'instance_uuid': instance_uuid, 'attach_status': fields.VolumeAttachStatus.ATTACHING, } attachment = db.volume_attach(self.context, values) db.volume_attached(self.context, attachment['id'], instance_uuid, None, '/dev/sda1') volume_api = cinder.volume.api.API() volume = volume_api.get(self.context, volume['id']) self.assertRaises(exception.InvalidVolume, volume_api.create_snapshot, self.context, volume, 'fake_name', 'fake_description') snapshot_ref = volume_api.create_snapshot_force(self.context, volume, 'fake_name', 'fake_description') snapshot_ref.destroy() db.volume_destroy(self.context, volume['id'])
def attach_volume(ctxt, volume_id, instance_uuid, attached_host, mountpoint, mode='rw'): if isinstance(volume_id, objects.Volume): volume_ovo = volume_id volume_id = volume_ovo.id else: volume_ovo = None now = timeutils.utcnow() values = {} values['volume_id'] = volume_id values['attached_host'] = attached_host values['mountpoint'] = mountpoint values['attach_time'] = now attachment = db.volume_attach(ctxt.elevated(), values) volume, updated_values = db.volume_attached( ctxt.elevated(), attachment['id'], instance_uuid, attached_host, mountpoint, mode) if volume_ovo: cls = objects.Volume expected_attrs = cls._get_expected_attrs(ctxt) volume = cls._from_db_object(ctxt, cls(ctxt), volume, expected_attrs=expected_attrs) return volume
def test_attachment_create_update_and_delete(self, mock_rpc_attachment_update, mock_rpc_attachment_delete, mock_policy): """Test attachment_delete.""" volume_params = {'status': 'available'} connection_info = {'fake_key': 'fake_value'} mock_rpc_attachment_update.return_value = connection_info vref = tests_utils.create_volume(self.context, **volume_params) aref = self.volume_api.attachment_create(self.context, vref, fake.UUID2) aref = objects.VolumeAttachment.get_by_id(self.context, aref.id) vref = objects.Volume.get_by_id(self.context, vref.id) connector = {'fake': 'connector'} self.volume_api.attachment_update(self.context, aref, connector) aref = objects.VolumeAttachment.get_by_id(self.context, aref.id) self.assertEqual(connection_info, aref.connection_info) # We mock the actual call that updates the status # so force it here values = { 'volume_id': vref.id, 'volume_host': vref.host, 'attach_status': 'attached', 'instance_uuid': fake.UUID2 } aref = db.volume_attach(self.context, values) aref = objects.VolumeAttachment.get_by_id(self.context, aref.id) self.assertEqual(vref.id, aref.volume_id) self.volume_api.attachment_delete(self.context, aref) mock_rpc_attachment_delete.assert_called_once_with( self.context, aref.id, mock.ANY)
def _create_volume_attach(self, volume_id): values = { 'volume_id': volume_id, 'attach_status': 'attached', } attachment = db.volume_attach(self.ctxt, values) db.volume_attached(self.ctxt, attachment['id'], None, 'testhost', '/dev/vd0')
def create(self): if self.obj_attr_is_set('id'): raise exception.ObjectActionError(action='create', reason=_('already created')) updates = self.cinder_obj_get_changes() with self.obj_as_admin(): db_attachment = db.volume_attach(self._context, updates) self._from_db_object(self._context, self, db_attachment)
def test_volume_update_with_admin_metadata(self): def stubs_volume_admin_metadata_get(context, volume_id): return {'key': 'value', 'readonly': 'True'} self.stubs.Set(db, 'volume_admin_metadata_get', stubs_volume_admin_metadata_get) self.stubs.Set(volume_api.API, "update", stubs.stub_volume_update) volume = stubs.stub_volume("1") del volume['name'] del volume['volume_type'] del volume['volume_type_id'] volume['metadata'] = {'key': 'value'} db.volume_create(context.get_admin_context(), volume) db.volume_admin_metadata_update(context.get_admin_context(), "1", {"readonly": "True", "invisible_key": "invisible_value"}, False) values = {'volume_id': '1', } attachment = db.volume_attach(context.get_admin_context(), values) db.volume_attached(context.get_admin_context(), attachment['id'], stubs.FAKE_UUID, None, '/') updates = { "display_name": "Updated Test Name", } body = {"volume": updates} req = fakes.HTTPRequest.blank('/v1/volumes/1') self.assertEqual(0, len(self.notifier.notifications)) admin_ctx = context.RequestContext('admin', 'fakeproject', True) req.environ['cinder.context'] = admin_ctx res_dict = self.controller.update(req, '1', body) expected = {'volume': { 'status': 'in-use', 'display_description': 'displaydesc', 'availability_zone': 'fakeaz', 'display_name': 'Updated Test Name', 'encrypted': False, 'attachments': [{ 'attachment_id': attachment['id'], 'id': '1', 'volume_id': '1', 'server_id': stubs.FAKE_UUID, 'host_name': None, 'device': '/' }], 'multiattach': 'false', 'bootable': 'false', 'volume_type': None, 'snapshot_id': None, 'source_volid': None, 'metadata': {'key': 'value', 'readonly': 'True'}, 'id': '1', 'created_at': datetime.datetime(1900, 1, 1, 1, 1, 1), 'size': 1}} self.assertEqual(expected, res_dict) self.assertEqual(2, len(self.notifier.notifications))
def test_attachment_update(self): """Test attachment_update.""" volume_params = {'status': 'available'} connector = { "initiator": "iqn.1993-08.org.debian:01:cad181614cec", "ip": "192.168.1.20", "platform": "x86_64", "host": "tempest-1", "os_type": "linux2", "multipath": False } vref = tests_utils.create_volume(self.context, **volume_params) self.manager.create_volume(self.context, vref) values = { 'volume_id': vref.id, 'attached_host': vref.host, 'attach_status': 'reserved', 'instance_uuid': fake.UUID1, 'attach_mode': 'rw' } attachment_ref = db.volume_attach(self.context, values) with mock.patch.object( self.manager, '_notify_about_volume_usage'),\ mock.patch.object( self.manager.driver, 'attach_volume') as mock_attach: expected = { 'encrypted': False, 'qos_specs': None, 'access_mode': 'rw', 'driver_volume_type': 'iscsi', 'attachment_id': attachment_ref.id } self.assertEqual( expected, self.manager.attachment_update(self.context, vref, connector, attachment_ref.id)) mock_attach.assert_called_once_with(self.context, vref, attachment_ref.instance_uuid, connector['host'], "na") new_attachment_ref = db.volume_attachment_get( self.context, attachment_ref.id) self.assertEqual(attachment_ref.instance_uuid, new_attachment_ref['instance_uuid']) self.assertEqual(connector['host'], new_attachment_ref['attached_host']) self.assertEqual('na', new_attachment_ref['mountpoint']) self.assertEqual('rw', new_attachment_ref['attach_mode']) new_volume_ref = db.volume_get(self.context, vref.id) self.assertEqual('attaching', new_volume_ref.status) self.assertEqual(fields.VolumeAttachStatus.ATTACHING, new_volume_ref.attach_status)
def test_attachment_update(self): """Test attachment_update.""" volume_params = {'status': 'available'} connector = { "initiator": "iqn.1993-08.org.debian:01:cad181614cec", "ip": "192.168.1.20", "platform": "x86_64", "host": "tempest-1", "os_type": "linux2", "multipath": False} vref = tests_utils.create_volume(self.context, **volume_params) self.manager.create_volume(self.context, vref) values = {'volume_id': vref.id, 'attached_host': vref.host, 'attach_status': 'reserved', 'instance_uuid': fake.UUID1, 'attach_mode': 'rw'} attachment_ref = db.volume_attach(self.context, values) with mock.patch.object( self.manager, '_notify_about_volume_usage'),\ mock.patch.object( self.manager.driver, 'attach_volume') as mock_attach: expected = { 'encrypted': False, 'qos_specs': None, 'access_mode': 'rw', 'driver_volume_type': 'iscsi', 'attachment_id': attachment_ref.id} self.assertEqual(expected, self.manager.attachment_update( self.context, vref, connector, attachment_ref.id)) mock_attach.assert_called_once_with(self.context, vref, attachment_ref.instance_uuid, connector['host'], "na") new_attachment_ref = db.volume_attachment_get(self.context, attachment_ref.id) self.assertEqual(attachment_ref.instance_uuid, new_attachment_ref['instance_uuid']) self.assertEqual(connector['host'], new_attachment_ref['attached_host']) self.assertEqual('na', new_attachment_ref['mountpoint']) self.assertEqual('rw', new_attachment_ref['attach_mode']) new_volume_ref = db.volume_get(self.context, vref.id) self.assertEqual('attaching', new_volume_ref.status) self.assertEqual(fields.VolumeAttachStatus.ATTACHING, new_volume_ref.attach_status)
def test_attach_attaching_volume_with_different_instance(self): """Test that attaching volume reserved for another instance fails.""" # current status is available volume = self._create_volume(self.ctx, {"provider_location": "", "size": 1}) self.volume_api.reserve_volume(self.ctx, volume) values = { "volume_id": volume["id"], "attach_status": "attaching", "attach_time": timeutils.utcnow(), "instance_uuid": "abc123", } db.volume_attach(self.ctx, values) db.volume_admin_metadata_update(self.ctx, volume["id"], {"attached_mode": "rw"}, False) mountpoint = "/dev/vbd" attachment = self.volume_api.attach(self.ctx, volume, fake.INSTANCE_ID, None, mountpoint, "rw") self.assertEqual(fake.INSTANCE_ID, attachment["instance_uuid"]) self.assertEqual(volume["id"], attachment["volume_id"], volume["id"]) self.assertEqual("attached", attachment["attach_status"])
def attach_volume(ctxt, volume_id, instance_uuid, attached_host, mountpoint, mode="rw"): now = timeutils.utcnow() values = {} values["volume_id"] = volume_id values["attached_host"] = attached_host values["mountpoint"] = mountpoint values["attach_time"] = now attachment = db.volume_attach(ctxt, values) return db.volume_attached(ctxt, attachment["id"], instance_uuid, attached_host, mountpoint, mode)
def attach_volume(ctxt, volume_id, instance_uuid, attached_host, mountpoint, mode='rw'): now = timeutils.utcnow() values = {} values['volume_id'] = volume_id values['attached_host'] = attached_host values['mountpoint'] = mountpoint values['attach_time'] = now attachment = db.volume_attach(ctxt, values) return db.volume_attached(ctxt, attachment['id'], instance_uuid, attached_host, mountpoint, mode)
def test_attach_attaching_volume_with_different_instance(self): """Test that attaching volume reserved for another instance fails.""" ctx = context.RequestContext("admin", "fake", True) # current status is available volume = db.volume_create(ctx, {"status": "available", "host": "test", "provider_location": "", "size": 1}) # start service to handle rpc messages for attach requests svc = self.start_service("volume", host="test") self.volume_api.reserve_volume(ctx, volume) values = { "volume_id": volume["id"], "attach_status": "attaching", "attach_time": timeutils.utcnow(), "instance_uuid": "abc123", } db.volume_attach(ctx, values) db.volume_admin_metadata_update(ctx, volume["id"], {"attached_mode": "rw"}, False) mountpoint = "/dev/vbd" attachment = self.volume_api.attach(ctx, volume, stubs.FAKE_UUID, None, mountpoint, "rw") self.assertEqual(stubs.FAKE_UUID, attachment["instance_uuid"]) self.assertEqual(volume["id"], attachment["volume_id"], volume["id"]) self.assertEqual("attached", attachment["attach_status"]) svc.stop()
def test_volume_show_with_admin_metadata(self): volume = stubs.stub_volume(fake.VOLUME_ID) del volume['name'] del volume['volume_type'] del volume['volume_type_id'] volume['metadata'] = {'key': 'value'} db.volume_create(context.get_admin_context(), volume) db.volume_admin_metadata_update(context.get_admin_context(), fake.VOLUME_ID, {"readonly": "True", "invisible_key": "invisible_value"}, False) values = {'volume_id': fake.VOLUME_ID, } attachment = db.volume_attach(context.get_admin_context(), values) db.volume_attached(context.get_admin_context(), attachment['id'], fake.INSTANCE_ID, None, '/') req = fakes.HTTPRequest.blank('/v1/volumes/%s' % fake.VOLUME_ID) admin_ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True) req.environ['cinder.context'] = admin_ctx res_dict = self.controller.show(req, fake.VOLUME_ID) expected = {'volume': {'status': 'in-use', 'display_description': 'displaydesc', 'availability_zone': 'fakeaz', 'display_name': 'displayname', 'encrypted': False, 'attachments': [ {'attachment_id': attachment['id'], 'device': '/', 'server_id': fake.INSTANCE_ID, 'host_name': None, 'id': fake.VOLUME_ID, 'volume_id': fake.VOLUME_ID}], 'multiattach': 'false', 'bootable': 'false', 'volume_type': None, 'snapshot_id': None, 'source_volid': None, 'metadata': {'key': 'value', 'readonly': 'True'}, 'id': fake.VOLUME_ID, 'created_at': datetime.datetime( 1900, 1, 1, 1, 1, 1, tzinfo=iso8601.iso8601.Utc()), 'size': 1}} self.assertEqual(expected, res_dict)
def test_volume_show_with_admin_metadata(self): volume = stubs.stub_volume("1") del volume['name'] del volume['volume_type'] del volume['volume_type_id'] volume['metadata'] = {'key': 'value'} db.volume_create(context.get_admin_context(), volume) db.volume_admin_metadata_update(context.get_admin_context(), "1", {"readonly": "True", "invisible_key": "invisible_value"}, False) values = {'volume_id': '1', } attachment = db.volume_attach(context.get_admin_context(), values) db.volume_attached(context.get_admin_context(), attachment['id'], stubs.FAKE_UUID, None, '/') req = fakes.HTTPRequest.blank('/v1/volumes/1') admin_ctx = context.RequestContext('admin', 'fakeproject', True) req.environ['cinder.context'] = admin_ctx res_dict = self.controller.show(req, '1') expected = {'volume': {'status': 'in-use', 'display_description': 'displaydesc', 'availability_zone': 'fakeaz', 'display_name': 'displayname', 'encrypted': False, 'attachments': [ {'attachment_id': attachment['id'], 'device': '/', 'server_id': stubs.FAKE_UUID, 'host_name': None, 'id': '1', 'volume_id': '1'}], 'multiattach': 'false', 'bootable': 'false', 'volume_type': None, 'snapshot_id': None, 'source_volid': None, 'metadata': {'key': 'value', 'readonly': 'True'}, 'id': '1', 'created_at': datetime.datetime(1900, 1, 1, 1, 1, 1), 'size': 1}} self.assertEqual(res_dict, expected)
def test_attachment_create_update_and_delete( self, mock_rpc_attachment_update, mock_rpc_attachment_delete, mock_policy): """Test attachment_delete.""" volume_params = {'status': 'available'} connection_info = {'fake_key': 'fake_value', 'fake_key2': ['fake_value1', 'fake_value2']} mock_rpc_attachment_update.return_value = connection_info vref = tests_utils.create_volume(self.context, **volume_params) aref = self.volume_api.attachment_create(self.context, vref, fake.UUID2) aref = objects.VolumeAttachment.get_by_id(self.context, aref.id) vref = objects.Volume.get_by_id(self.context, vref.id) connector = {'fake': 'connector'} self.volume_api.attachment_update(self.context, aref, connector) aref = objects.VolumeAttachment.get_by_id(self.context, aref.id) self.assertEqual(connection_info, aref.connection_info) # We mock the actual call that updates the status # so force it here values = {'volume_id': vref.id, 'volume_host': vref.host, 'attach_status': 'attached', 'instance_uuid': fake.UUID2} aref = db.volume_attach(self.context, values) aref = objects.VolumeAttachment.get_by_id(self.context, aref.id) self.assertEqual(vref.id, aref.volume_id) self.volume_api.attachment_delete(self.context, aref) mock_rpc_attachment_delete.assert_called_once_with(self.context, aref.id, mock.ANY)
def test_attachment_delete(self): """Test attachment_delete.""" volume_params = {'status': 'available'} vref = tests_utils.create_volume(self.context, **volume_params) self.manager.create_volume(self.context, vref) values = { 'volume_id': vref.id, 'volume_host': vref.host, 'attach_status': 'reserved', 'instance_uuid': fake.UUID1 } attachment_ref = db.volume_attach(self.context, values) attachment_ref = db.volume_attachment_get(self.context, attachment_ref['id']) self.manager.attachment_delete(self.context, attachment_ref['id'], vref) self.assertRaises(exception.VolumeAttachmentNotFound, db.volume_attachment_get, self.context, attachment_ref.id)
def test_attachment_delete(self): """Test attachment_delete.""" volume_params = {'status': 'available'} vref = tests_utils.create_volume(self.context, **volume_params) self.manager.create_volume(self.context, vref) values = {'volume_id': vref.id, 'volume_host': vref.host, 'attach_status': 'reserved', 'instance_uuid': fake.UUID1} attachment_ref = db.volume_attach(self.context, values) attachment_ref = db.volume_attachment_get( self.context, attachment_ref['id']) self.manager.attachment_delete(self.context, attachment_ref['id'], vref) self.assertRaises(exception.VolumeAttachmentNotFound, db.volume_attachment_get, self.context, attachment_ref.id)
def test_attachment_update_with_readonly_volume(self, mock_update): mock_update.return_value = {'readonly': 'True'} vref = tests_utils.create_volume(self.context, **{'status': 'available'}) self.manager.create_volume(self.context, vref) attachment_ref = db.volume_attach(self.context, {'volume_id': vref.id, 'volume_host': vref.host, 'attach_status': 'reserved', 'instance_uuid': fake.UUID1}) with mock.patch.object(self.manager, '_notify_about_volume_usage', return_value=None), mock.patch.object( self.manager, '_connection_create'): self.assertRaises(exception.InvalidVolumeAttachMode, self.manager.attachment_update, self.context, vref, {}, attachment_ref.id) attachment = db.volume_attachment_get(self.context, attachment_ref.id) self.assertEqual(fields.VolumeAttachStatus.ERROR_ATTACHING, attachment['attach_status'])
def test_create_snapshot_force_host(self): # create volume and attach to the host volume = tests_utils.create_volume(self.context, **self.volume_params) self.volume.create_volume(self.context, volume) values = {'volume_id': volume['id'], 'attached_host': 'fake_host', 'attach_status': fields.VolumeAttachStatus.ATTACHING, } attachment = db.volume_attach(self.context, values) db.volume_attached(self.context, attachment['id'], None, 'fake_host', '/dev/sda1') volume_api = cinder.volume.api.API() volume = volume_api.get(self.context, volume['id']) self.assertRaises(exception.InvalidVolume, volume_api.create_snapshot, self.context, volume, 'fake_name', 'fake_description') snapshot_ref = volume_api.create_snapshot_force(self.context, volume, 'fake_name', 'fake_description') snapshot_ref.destroy() db.volume_destroy(self.context, volume['id'])
def test_attachment_update(self): """Test attachment_update.""" volume_params = {'status': 'available'} connector = { "initiator": "iqn.1993-08.org.debian:01:cad181614cec", "ip": "192.168.1.20", "platform": "x86_64", "host": "tempest-1", "os_type": "linux2", "multipath": False } vref = tests_utils.create_volume(self.context, **volume_params) self.manager.create_volume(self.context, vref) values = { 'volume_id': vref.id, 'volume_host': vref.host, 'attach_status': 'reserved', 'instance_uuid': fake.UUID1 } attachment_ref = db.volume_attach(self.context, values) with mock.patch.object(self.manager, '_notify_about_volume_usage', return_value=None): expected = { 'encrypted': False, 'qos_specs': None, 'access_mode': 'rw', 'driver_volume_type': 'iscsi', 'attachment_id': attachment_ref.id } self.assertEqual( expected, self.manager.attachment_update(self.context, vref, connector, attachment_ref.id))
def _create_volume_attach(self, volume_id): values = {"volume_id": volume_id, "attach_status": "attached"} attachment = db.volume_attach(self.ctxt, values) db.volume_attached(self.ctxt, attachment["id"], None, "testhost", "/dev/vd0")
def _create_volume_attach(self, volume_id): values = {'volume_id': volume_id, 'attach_status': 'attached', } attachment = db.volume_attach(self.ctxt, values) db.volume_attached(self.ctxt, attachment['id'], None, 'testhost', '/dev/vd0')
def test_volume_list_detail_with_admin_metadata(self): volume = stubs.stub_volume("1") del volume['name'] del volume['volume_type'] del volume['volume_type_id'] volume['metadata'] = {'key': 'value'} db.volume_create(context.get_admin_context(), volume) db.volume_admin_metadata_update(context.get_admin_context(), "1", { "readonly": "True", "invisible_key": "invisible_value" }, False) values = { 'volume_id': '1', } attachment = db.volume_attach(context.get_admin_context(), values) db.volume_attached(context.get_admin_context(), attachment['id'], stubs.FAKE_UUID, None, '/') req = fakes.HTTPRequest.blank('/v1/volumes/detail') admin_ctx = context.RequestContext('admin', 'fakeproject', True) req.environ['cinder.context'] = admin_ctx res_dict = self.controller.index(req) expected = { 'volumes': [{ 'status': 'in-use', 'display_description': 'displaydesc', 'availability_zone': 'fakeaz', 'display_name': 'displayname', 'encrypted': False, 'attachments': [{ 'attachment_id': attachment['id'], 'device': '/', 'server_id': stubs.FAKE_UUID, 'host_name': None, 'id': '1', 'volume_id': '1' }], 'multiattach': 'false', 'bootable': 'false', 'volume_type': None, 'snapshot_id': None, 'source_volid': None, 'metadata': { 'key': 'value', 'readonly': 'True' }, 'id': '1', 'created_at': datetime.datetime(1900, 1, 1, 1, 1, 1), 'size': 1 }] } self.assertEqual(expected, res_dict)
def test_volume_update_with_admin_metadata(self): def stubs_volume_admin_metadata_get(context, volume_id): return {'key': 'value', 'readonly': 'True'} self.stubs.Set(db, 'volume_admin_metadata_get', stubs_volume_admin_metadata_get) self.stubs.Set(volume_api.API, "update", stubs.stub_volume_update) volume = stubs.stub_volume("1") del volume['name'] del volume['volume_type'] del volume['volume_type_id'] volume['metadata'] = {'key': 'value'} db.volume_create(context.get_admin_context(), volume) db.volume_admin_metadata_update(context.get_admin_context(), "1", { "readonly": "True", "invisible_key": "invisible_value" }, False) values = { 'volume_id': '1', } attachment = db.volume_attach(context.get_admin_context(), values) db.volume_attached(context.get_admin_context(), attachment['id'], stubs.FAKE_UUID, None, '/') updates = { "display_name": "Updated Test Name", } body = {"volume": updates} req = fakes.HTTPRequest.blank('/v1/volumes/1') self.assertEqual(0, len(self.notifier.notifications)) admin_ctx = context.RequestContext('admin', 'fakeproject', True) req.environ['cinder.context'] = admin_ctx res_dict = self.controller.update(req, '1', body) expected = { 'volume': { 'status': 'in-use', 'display_description': 'displaydesc', 'availability_zone': 'fakeaz', 'display_name': 'Updated Test Name', 'encrypted': False, 'attachments': [{ 'attachment_id': attachment['id'], 'id': '1', 'volume_id': '1', 'server_id': stubs.FAKE_UUID, 'host_name': None, 'device': '/' }], 'multiattach': 'false', 'bootable': 'false', 'volume_type': None, 'snapshot_id': None, 'source_volid': None, 'metadata': { 'key': 'value', 'readonly': 'True' }, 'id': '1', 'created_at': datetime.datetime(1900, 1, 1, 1, 1, 1), 'size': 1 } } self.assertEqual(expected, res_dict) self.assertEqual(2, len(self.notifier.notifications))