def _create_volume_from_image(self, expected_status, raw=False, clone_error=False): """Try to clone a volume from an image, and check the status afterwards. NOTE: if clone_error is True we force the image type to raw otherwise clone_image is not called """ def mock_clone_image(volume, image_location, image_id, image_meta): self.called.append('clone_image') if clone_error: raise exception.CinderException() else: return {'provider_location': None}, True # See tests.image.fake for image types. if raw: image_id = '155d900f-4e14-4e4c-a73d-069cbf4541e6' else: image_id = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77' volume_id = 1 # creating volume testdata db.volume_create(self.context, {'id': volume_id, 'updated_at': timeutils.utcnow(), 'display_description': 'Test Desc', 'size': 20, 'status': 'creating', 'instance_uuid': None, 'host': 'dummy'}) mpo = mock.patch.object with mpo(self.volume.driver, 'create_volume') as mock_create_volume: with mpo(self.volume.driver, 'clone_image', mock_clone_image): with mpo(create_volume.CreateVolumeFromSpecTask, '_copy_image_to_volume') as mock_copy_image_to_volume: try: if not clone_error: self.volume.create_volume(self.context, volume_id, image_id=image_id) else: self.assertRaises(exception.CinderException, self.volume.create_volume, self.context, volume_id, image_id=image_id) volume = db.volume_get(self.context, volume_id) self.assertEqual(volume['status'], expected_status) finally: # cleanup db.volume_destroy(self.context, volume_id) self.assertEqual(self.called, ['clone_image']) mock_create_volume.assert_called() mock_copy_image_to_volume.assert_called()
def test_create_volume_from_image_exception(self): """Verify that create volume from image, the volume status is 'downloading'.""" dst_fd, dst_path = tempfile.mkstemp() os.close(dst_fd) self.stubs.Set(self.volume.driver, 'local_path', lambda x: dst_path) image_id = 'aaaaaaaa-0000-0000-0000-000000000000' # creating volume testdata volume_id = 1 db.volume_create(self.context, {'id': volume_id, 'updated_at': datetime.datetime(1, 1, 1, 1, 1, 1), 'display_description': 'Test Desc', 'size': 20, 'status': 'creating', 'host': 'dummy'}) self.assertRaises(exception.ImageNotFound, self.volume.create_volume, self.context, volume_id, None, None, None, None, image_id) volume = db.volume_get(self.context, volume_id) self.assertEqual(volume['status'], "error") # cleanup db.volume_destroy(self.context, volume_id) os.unlink(dst_path)
def test_volume_get_all_active_by_window(self): # Find all all volumes valid within a timeframe window. # Not in window db.volume_create(self.ctx, self.db_vol_attrs[0]) # In - deleted in window db.volume_create(self.ctx, self.db_vol_attrs[1]) # In - deleted after window db.volume_create(self.ctx, self.db_vol_attrs[2]) # In - created in window db.volume_create(self.context, self.db_vol_attrs[3]) # Not of window. db.volume_create(self.context, self.db_vol_attrs[4]) volumes = db.volume_get_all_active_by_window( self.context, datetime.datetime(1, 3, 1, 1, 1, 1), datetime.datetime(1, 4, 1, 1, 1, 1), project_id=fake.PROJECT_ID) self.assertEqual(3, len(volumes)) self.assertEqual(fake.VOLUME2_ID, volumes[0].id) self.assertEqual(fake.VOLUME3_ID, volumes[1].id) self.assertEqual(fake.VOLUME4_ID, volumes[2].id)
def test_vol_update_glance_metadata(self): ctxt = context.get_admin_context() db.volume_create(ctxt, {"id": 1}) db.volume_create(ctxt, {"id": 2}) db.volume_glance_metadata_create(ctxt, 1, "key1", "value1") db.volume_glance_metadata_create(ctxt, 2, "key1", "value1") db.volume_glance_metadata_create(ctxt, 2, "key2", "value2") db.volume_glance_metadata_create(ctxt, 2, "key3", 123) expected_metadata_1 = {"volume_id": "1", "key": "key1", "value": "value1"} metadata = db.volume_glance_metadata_get(ctxt, 1) self.assertEqual(len(metadata), 1) for key, value in expected_metadata_1.items(): self.assertEqual(metadata[0][key], value) expected_metadata_2 = ( {"volume_id": "2", "key": "key1", "value": "value1"}, {"volume_id": "2", "key": "key2", "value": "value2"}, {"volume_id": "2", "key": "key3", "value": "123"}, ) metadata = db.volume_glance_metadata_get(ctxt, 2) self.assertEqual(len(metadata), 3) for expected, meta in zip(expected_metadata_2, metadata): for key, value in expected.items(): self.assertEqual(meta[key], value) self.assertRaises(exception.GlanceMetadataExists, db.volume_glance_metadata_create, ctxt, 1, "key1", "value1a") metadata = db.volume_glance_metadata_get(ctxt, 1) self.assertEqual(len(metadata), 1) for key, value in expected_metadata_1.items(): self.assertEqual(metadata[0][key], value)
def test_snapshot_metadata_get(self): metadata = {'a': 'b', 'c': 'd'} db.volume_create(self.ctxt, {'id': 1}) db.snapshot_create(self.ctxt, {'id': 1, 'volume_id': 1, 'metadata': metadata}) self.assertEquals(metadata, db.snapshot_metadata_get(self.ctxt, 1))
def test_copy_volume_to_image_status_available(self): dst_fd, dst_path = tempfile.mkstemp() os.close(dst_fd) def fake_local_path(volume): return dst_path self.stubs.Set(self.volume.driver, "local_path", fake_local_path) image_id = "70a599e0-31e7-49b7-b260-868f441e862b" # creating volume testdata volume_id = 1 db.volume_create( self.context, { "id": volume_id, "updated_at": datetime.datetime(1, 1, 1, 1, 1, 1), "display_description": "Test Desc", "size": 20, "status": "uploading", "instance_uuid": None, "host": "dummy", }, ) try: # start test self.volume.copy_volume_to_image(self.context, volume_id, image_id) volume = db.volume_get(self.context, volume_id) self.assertEqual(volume["status"], "available") finally: # cleanup db.volume_destroy(self.context, volume_id) os.unlink(dst_path)
def test_snapshot_data_get_for_project(self): actual = db.snapshot_data_get_for_project(self.ctxt, "project1") self.assertEqual(actual, (0, 0)) db.volume_create(self.ctxt, {"id": 1, "project_id": "project1", "size": 42}) db.snapshot_create(self.ctxt, {"id": 1, "volume_id": 1, "project_id": "project1", "volume_size": 42}) actual = db.snapshot_data_get_for_project(self.ctxt, "project1") self.assertEqual(actual, (1, 42))
def test_copy_volume_to_image_exception(self): dst_fd, dst_path = tempfile.mkstemp() os.close(dst_fd) def fake_local_path(volume): return dst_path self.stubs.Set(self.volume.driver, 'local_path', fake_local_path) image_id = 'aaaaaaaa-0000-0000-0000-000000000000' # creating volume testdata volume_id = 1 db.volume_create(self.context, {'id': volume_id, 'updated_at': datetime.datetime(1, 1, 1, 1, 1, 1), 'display_description': 'Test Desc', 'size': 20, 'status': 'in-use', 'host': 'dummy'}) try: # start test self.assertRaises(exception.ImageNotFound, self.volume.copy_volume_to_image, self.context, volume_id, image_id) volume = db.volume_get(self.context, volume_id) self.assertEqual(volume['status'], 'available') finally: # cleanup db.volume_destroy(self.context, volume_id) os.unlink(dst_path)
def test_volume_get_by_filter(self): """Verifies that all filtering is done at the DB layer.""" vols = [] vols.extend( [ db.volume_create(self.ctxt, {"project_id": "g1", "display_name": "name_%d" % i, "size": 1}) for i in xrange(2) ] ) vols.extend( [ db.volume_create(self.ctxt, {"project_id": "g1", "display_name": "name_%d" % i, "size": 2}) for i in xrange(2) ] ) vols.extend( [db.volume_create(self.ctxt, {"project_id": "g1", "display_name": "name_%d" % i}) for i in xrange(2)] ) vols.extend( [ db.volume_create(self.ctxt, {"project_id": "g2", "display_name": "name_%d" % i, "size": 1}) for i in xrange(2) ] ) # By project, filter on size and name filters = {"size": "1"} correct_order = [vols[0], vols[1]] self._assertEqualsVolumeOrderResult(correct_order, filters=filters, project_id="g1") filters = {"size": "1", "display_name": "name_1"} correct_order = [vols[1]] self._assertEqualsVolumeOrderResult(correct_order, filters=filters, project_id="g1") # Remove project scope filters = {"size": "1"} correct_order = [vols[0], vols[1], vols[6], vols[7]] self._assertEqualsVolumeOrderResult(correct_order, filters=filters) filters = {"size": "1", "display_name": "name_1"} correct_order = [vols[1], vols[7]] self._assertEqualsVolumeOrderResult(correct_order, filters=filters) # Remove size constraint filters = {"display_name": "name_1"} correct_order = [vols[1], vols[3], vols[5]] self._assertEqualsVolumeOrderResult(correct_order, filters=filters, project_id="g1") correct_order = [vols[1], vols[3], vols[5], vols[7]] self._assertEqualsVolumeOrderResult(correct_order, filters=filters) # Verify bogus values return nothing filters = {"display_name": "name_1", "bogus_value": "foo"} self._assertEqualsVolumeOrderResult([], filters=filters, project_id="g1") self._assertEqualsVolumeOrderResult([], project_id="bogus") self._assertEqualsVolumeOrderResult([], filters=filters) self._assertEqualsVolumeOrderResult([], filters={"metadata": "not valid"}) self._assertEqualsVolumeOrderResult([], filters={"metadata": ["not", "valid"]}) # Verify that relationship property keys return nothing, these # exist on the Volumes model but are not columns filters = {"volume_type": "bogus_type"} self._assertEqualsVolumeOrderResult([], filters=filters)
def test_backup_get_all_active_by_window(self): # Find all backups valid within a timeframe window. db.volume_create(self.context, {'id': fake.VOLUME_ID}) for i in range(5): self.db_back_attrs[i]['volume_id'] = fake.VOLUME_ID # Not in window db.backup_create(self.ctx, self.db_back_attrs[0]) # In - deleted in window db.backup_create(self.ctx, self.db_back_attrs[1]) # In - deleted after window db.backup_create(self.ctx, self.db_back_attrs[2]) # In - created in window db.backup_create(self.ctx, self.db_back_attrs[3]) # Not of window db.backup_create(self.ctx, self.db_back_attrs[4]) backups = db.backup_get_all_active_by_window( self.context, datetime.datetime(1, 3, 1, 1, 1, 1), datetime.datetime(1, 4, 1, 1, 1, 1), project_id=fake.PROJECT_ID ) self.assertEqual(3, len(backups)) self.assertEqual(fake.BACKUP2_ID, backups[0].id) self.assertEqual(fake.BACKUP3_ID, backups[1].id) self.assertEqual(fake.BACKUP4_ID, backups[2].id)
def test_migrate_volume_comp_no_action(self): volume = db.volume_create(self.ctx, {"id": fake.VOLUME_ID}) new_volume = db.volume_create(self.ctx, {"id": fake.VOLUME2_ID}) expected_status = 400 expected_id = None ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID) self._migrate_volume_comp_exec(ctx, volume, new_volume, False, expected_status, expected_id, True)
def test_copy_volume_to_image_status_use(self): dst_fd, dst_path = tempfile.mkstemp() os.close(dst_fd) def fake_local_path(volume): return dst_path self.stubs.Set(self.volume.driver, "local_path", fake_local_path) image_meta = {"id": "a440c04b-79fa-479c-bed1-0b816eaec379", "container_format": "bare", "disk_format": "raw"} # creating volume testdata volume_id = 1 db.volume_create( self.context, { "id": volume_id, "updated_at": datetime.datetime(1, 1, 1, 1, 1, 1), "display_description": "Test Desc", "size": 20, "status": "uploading", "instance_uuid": "b21f957d-a72f-4b93-b5a5-45b1161abb02", "host": "dummy", }, ) try: # start test self.volume.copy_volume_to_image(self.context, volume_id, image_meta) volume = db.volume_get(self.context, volume_id) self.assertEqual(volume["status"], "in-use") finally: # cleanup db.volume_destroy(self.context, volume_id) os.unlink(dst_path)
def test_copy_volume_to_image_status_available(self): dst_fd, dst_path = tempfile.mkstemp() os.close(dst_fd) def fake_local_path(volume): return dst_path self.stubs.Set(self.volume.driver, 'local_path', fake_local_path) image_id = '70a599e0-31e7-49b7-b260-868f441e862b' # creating volume testdata volume_id = 1 db.volume_create(self.context, {'id': volume_id, 'updated_at': datetime.datetime(1, 1, 1, 1, 1, 1), 'display_description': 'Test Desc', 'size': 20, 'status': 'uploading', 'instance_uuid': None, 'host': 'dummy'}) try: # start test self.volume.copy_volume_to_image(self.context, volume_id, image_id) volume = db.volume_get(self.context, volume_id) self.assertEqual(volume['status'], 'available') finally: # cleanup db.volume_destroy(self.context, volume_id) os.unlink(dst_path)
def test_copy_volume_to_image_status_use(self): dst_fd, dst_path = tempfile.mkstemp() os.close(dst_fd) def fake_local_path(volume): return dst_path self.stubs.Set(self.volume.driver, 'local_path', fake_local_path) #image_id = '70a599e0-31e7-49b7-b260-868f441e862b' image_id = 'a440c04b-79fa-479c-bed1-0b816eaec379' # creating volume testdata volume_id = 1 db.volume_create(self.context, {'id': volume_id, 'updated_at': datetime.datetime(1, 1, 1, 1, 1, 1), 'display_description': 'Test Desc', 'size': 20, 'status': 'uploading', 'instance_uuid': 'b21f957d-a72f-4b93-b5a5-45b1161abb02', 'host': 'dummy'}) try: # start test self.volume.copy_volume_to_image(self.context, volume_id, image_id) volume = db.volume_get(self.context, volume_id) self.assertEqual(volume['status'], 'in-use') finally: # cleanup db.volume_destroy(self.context, volume_id) os.unlink(dst_path)
def test_vol_delete_glance_metadata(self): ctxt = context.get_admin_context() db.volume_create(ctxt, {"id": fake.VOLUME_ID}) db.volume_glance_metadata_delete_by_volume(ctxt, fake.VOLUME_ID) db.volume_glance_metadata_create(ctxt, fake.VOLUME_ID, "key1", "value1") db.volume_glance_metadata_delete_by_volume(ctxt, fake.VOLUME_ID) self.assertRaises(exception.GlanceMetadataNotFound, db.volume_glance_metadata_get, ctxt, fake.VOLUME_ID)
def test_create_volume_from_image_exception(self): """Verify that create volume from image, the volume status is 'downloading'.""" dst_fd, dst_path = tempfile.mkstemp() os.close(dst_fd) self.stubs.Set(self.volume.driver, "local_path", lambda x: dst_path) image_id = "aaaaaaaa-0000-0000-0000-000000000000" # creating volume testdata volume_id = 1 db.volume_create( self.context, { "id": volume_id, "updated_at": datetime.datetime(1, 1, 1, 1, 1, 1), "display_description": "Test Desc", "size": 20, "status": "creating", "host": "dummy", }, ) self.assertRaises(exception.ImageNotFound, self.volume.create_volume, self.context, volume_id, None, image_id) volume = db.volume_get(self.context, volume_id) self.assertEqual(volume["status"], "error") # cleanup db.volume_destroy(self.context, volume_id) os.unlink(dst_path)
def test_migrate_volume_comp_as_non_admin(self): volume = db.volume_create(self.ctx, {'id': fake.VOLUME_ID}) new_volume = db.volume_create(self.ctx, {'id': fake.VOLUME2_ID}) expected_status = 403 expected_id = None ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID) self._migrate_volume_comp_exec(ctx, volume, new_volume, False, expected_status, expected_id)
def test_vol_delete_glance_metadata(self): ctxt = context.get_admin_context() db.volume_create(ctxt, {'id': 1}) db.volume_glance_metadata_delete_by_volume(ctxt, 1) db.volume_glance_metadata_create(ctxt, 1, 'key1', 'value1') db.volume_glance_metadata_delete_by_volume(ctxt, 1) self.assertRaises(exception.GlanceMetadataNotFound, db.volume_glance_metadata_get, ctxt, 1)
def test_migrate_volume_comp_no_action(self): admin_ctx = context.get_admin_context() volume = db.volume_create(admin_ctx, {"id": "fake1"}) new_volume = db.volume_create(admin_ctx, {"id": "fake2"}) expected_status = 400 expected_id = None ctx = context.RequestContext("fake", "fake") self._migrate_volume_comp_exec(ctx, volume, new_volume, False, expected_status, expected_id, True)
def test_migrate_volume_comp_bad_mig_status(self): admin_ctx = context.get_admin_context() volume1 = db.volume_create(admin_ctx, {"id": "fake1", "migration_status": "migrating"}) volume2 = db.volume_create(admin_ctx, {"id": "fake2", "migration_status": "target:foo"}) expected_status = 400 expected_id = None ctx = context.RequestContext("admin", "fake", True) self._migrate_volume_comp_exec(ctx, volume1, volume2, False, expected_status, expected_id)
def test_migrate_volume_comp_no_action(self): volume = db.volume_create(self.ctx, {'id': 'fake1'}) new_volume = db.volume_create(self.ctx, {'id': 'fake2'}) expected_status = 400 expected_id = None ctx = context.RequestContext('fake', 'fake') self._migrate_volume_comp_exec(ctx, volume, new_volume, False, expected_status, expected_id, True)
def test_volume_data_get_for_host(self): for i in xrange(3): for j in xrange(3): db.volume_create(self.ctxt, {'host': 'h%d' % i, 'size': 100}) for i in xrange(3): self.assertEqual((3, 300), db.volume_data_get_for_host( self.ctxt, 'h%d' % i))
def test_volume_update_with_admin_metadata(self): def stubs_volume_admin_metadata_get(context, volume_id): return {'key': 'value', 'readonly': 'True'} self.stubs.Set(db, 'volume_admin_metadata_get', stubs_volume_admin_metadata_get) self.stubs.Set(volume_api.API, "update", stubs.stub_volume_update) volume = stubs.stub_volume("1") del volume['name'] del volume['volume_type'] del volume['volume_type_id'] volume['metadata'] = {'key': 'value'} db.volume_create(context.get_admin_context(), volume) db.volume_admin_metadata_update(context.get_admin_context(), "1", {"readonly": "True", "invisible_key": "invisible_value"}, False) values = {'volume_id': '1', } attachment = db.volume_attach(context.get_admin_context(), values) db.volume_attached(context.get_admin_context(), attachment['id'], stubs.FAKE_UUID, None, '/') updates = { "display_name": "Updated Test Name", } body = {"volume": updates} req = fakes.HTTPRequest.blank('/v1/volumes/1') self.assertEqual(0, len(self.notifier.notifications)) admin_ctx = context.RequestContext('admin', 'fakeproject', True) req.environ['cinder.context'] = admin_ctx res_dict = self.controller.update(req, '1', body) expected = {'volume': { 'status': 'in-use', 'display_description': 'displaydesc', 'availability_zone': 'fakeaz', 'display_name': 'Updated Test Name', 'encrypted': False, 'attachments': [{ 'attachment_id': attachment['id'], 'id': '1', 'volume_id': '1', 'server_id': stubs.FAKE_UUID, 'host_name': None, 'device': '/' }], 'multiattach': 'false', 'bootable': 'false', 'volume_type': None, 'snapshot_id': None, 'source_volid': None, 'metadata': {'key': 'value', 'readonly': 'True'}, 'id': '1', 'created_at': datetime.datetime(1900, 1, 1, 1, 1, 1), 'size': 1}} self.assertEqual(expected, res_dict) self.assertEqual(2, len(self.notifier.notifications))
def test_volume_get_all_marker_passed(self): volumes = [ db.volume_create(self.ctxt, {"id": 1}), db.volume_create(self.ctxt, {"id": 2}), db.volume_create(self.ctxt, {"id": 3}), db.volume_create(self.ctxt, {"id": 4}), ] self._assertEqualListsOfObjects(volumes[2:], db.volume_get_all(self.ctxt, 2, 2, "id", None))
def test_volume_metadata_update_delete(self): metadata1 = {"a": "1", "c": "2"} metadata2 = {"a": "3", "d": "4"} should_be = metadata2 db.volume_create(self.ctxt, {"id": 1, "metadata": metadata1}) db_meta = db.volume_metadata_update(self.ctxt, 1, metadata2, True) self.assertEqual(should_be, db_meta)
def test_volume_metadata_update(self): metadata1 = {'a': '1', 'c': '2'} metadata2 = {'a': '3', 'd': '5'} should_be = {'a': '3', 'c': '2', 'd': '5'} db.volume_create(self.ctxt, {'id': 1, 'metadata': metadata1}) db_meta = db.volume_metadata_update(self.ctxt, 1, metadata2, False) self.assertEqual(should_be, db_meta)
def test_snapshot_metadata_delete(self): metadata = {"a": "1", "c": "2"} should_be = {"a": "1"} db.volume_create(self.ctxt, {"id": 1}) db.snapshot_create(self.ctxt, {"id": 1, "volume_id": 1, "metadata": metadata}) db.snapshot_metadata_delete(self.ctxt, 1, "c") self.assertEqual(should_be, db.snapshot_metadata_get(self.ctxt, 1))
def _create_volume_and_glance_metadata(self): ctxt = context.get_admin_context() db.volume_create( ctxt, {"id": "fake", "status": "available", "host": "test", "provider_location": "", "size": 1} ) db.volume_glance_metadata_create(ctxt, "fake", "image_id", "someid") db.volume_glance_metadata_create(ctxt, "fake", "image_name", "fake") db.volume_glance_metadata_create(ctxt, "fake", "kernel_id", "somekernel") db.volume_glance_metadata_create(ctxt, "fake", "ramdisk_id", "someramdisk")
def test_migrate_volume_comp_as_non_admin(self): admin_ctx = context.get_admin_context() volume = db.volume_create(admin_ctx, {'id': 'fake1'}) new_volume = db.volume_create(admin_ctx, {'id': 'fake2'}) expected_status = 403 expected_id = None ctx = context.RequestContext('fake', 'fake') volume = self._migrate_volume_comp_exec(ctx, volume, new_volume, False, expected_status, expected_id)
def test_volume_metadata_update_delete(self): metadata1 = {'a': '1', 'c': '2'} metadata2 = {'a': '3', 'd': '4'} should_be = metadata2 db.volume_create(self.ctxt, {'id': 1, 'metadata': metadata1}) db.volume_metadata_update(self.ctxt, 1, metadata2, True) self.assertEquals(should_be, db.volume_metadata_get(self.ctxt, 1))
def test_vols_get_glance_metadata(self): ctxt = context.get_admin_context() db.volume_create(ctxt, {'id': '1'}) db.volume_create(ctxt, {'id': '2'}) db.volume_create(ctxt, {'id': '3'}) db.volume_glance_metadata_create(ctxt, '1', 'key1', 'value1') db.volume_glance_metadata_create(ctxt, '2', 'key2', 'value2') db.volume_glance_metadata_create(ctxt, '2', 'key22', 'value22') metadata = db.volume_glance_metadata_get_all(ctxt) self.assertEqual(len(metadata), 3) self._assert_metadata_equals('1', 'key1', 'value1', metadata[0]) self._assert_metadata_equals('2', 'key2', 'value2', metadata[1]) self._assert_metadata_equals('2', 'key22', 'value22', metadata[2])
def setUp(self): super(VolumeRPCAPITestCase, self).setUp() self.rpcapi = volume_rpcapi.VolumeAPI self.base_version = '3.0' vol = {} vol['host'] = 'fake_host' vol['availability_zone'] = CONF.storage_availability_zone vol['status'] = "available" vol['attach_status'] = "detached" vol['metadata'] = {"test_key": "test_val"} vol['size'] = 1 volume = db.volume_create(self.context, vol) kwargs = { 'status': fields.SnapshotStatus.CREATING, 'progress': '0%', 'display_name': 'fake_name', 'display_description': 'fake_description'} snapshot = tests_utils.create_snapshot(self.context, vol['id'], **kwargs) generic_group = tests_utils.create_group( self.context, availability_zone=CONF.storage_availability_zone, group_type_id='group_type1', host='fakehost@fakedrv#fakepool') group_snapshot = tests_utils.create_group_snapshot( self.context, group_id=generic_group.id, group_type_id=fake.GROUP_TYPE_ID) self.fake_volume = jsonutils.to_primitive(volume) self.fake_volume_obj = fake_volume.fake_volume_obj(self.context, **vol) self.fake_snapshot = snapshot self.fake_reservations = ["RESERVATION"] self.fake_backup_obj = fake_backup.fake_backup_obj(self.context) self.fake_group = generic_group self.fake_group_snapshot = group_snapshot self.can_send_version_mock = self.patch( 'oslo_messaging.RPCClient.can_send_version', return_value=True)
def test_copy_volume_to_image_over_image_quota(self): # creating volume testdata self.volume_attrs['instance_uuid'] = None volume = db.volume_create(self.context, self.volume_attrs) with mock.patch.object(self.volume.driver, 'copy_volume_to_image') as driver_copy_mock: driver_copy_mock.side_effect = exception.ImageLimitExceeded # test with image not in queued state self.assertRaises(exception.ImageLimitExceeded, self.volume.copy_volume_to_image, self.context, self.volume_id, self.image_meta) # Assert a user message was created self.volume.message_api.create.assert_called_once_with( self.context, message_field.Action.COPY_VOLUME_TO_IMAGE, resource_uuid=volume['id'], exception=mock.ANY, detail=message_field.Detail.FAILED_TO_UPLOAD_VOLUME)
def test_invalid_status_for_snapshot(self): ctx = context.RequestContext('admin', 'fake', True) volume = db.volume_create( ctx, { 'status': 'available', 'host': 'test', 'provider_location': '', 'size': 1 }) snapshot = db.snapshot_create(ctx, { 'status': 'available', 'volume_id': volume['id'] }) resp = self._issue_snapshot_reset(ctx, snapshot, {'status': 'attaching'}) self.assertEqual(400, resp.status_int) snapshot = db.snapshot_get(ctx, snapshot['id']) self.assertEqual('available', snapshot['status'])
def test_backup_reset_status_as_admin(self): volume = db.volume_create(self.ctx, { 'status': 'available', 'user_id': 'user', 'project_id': 'project' }) backup = db.backup_create( self.ctx, { 'status': fields.BackupStatus.AVAILABLE, 'size': 1, 'volume_id': volume['id'], 'user_id': 'user', 'project_id': 'project', 'host': 'test' }) resp = self._issue_backup_reset(self.ctx, backup, {'status': fields.BackupStatus.ERROR}) self.assertEqual(202, resp.status_int)
def test_reset_status_as_non_admin(self): # current status is 'error' volume = db.volume_create(context.get_admin_context(), {'status': 'error'}) req = webob.Request.blank('/v2/fake/volumes/%s/action' % volume['id']) req.method = 'POST' req.headers['content-type'] = 'application/json' # request changing status to available req.body = jsonutils.dumps( {'os-reset_status': { 'status': 'available' }}) # non-admin context req.environ['cinder.context'] = context.RequestContext('fake', 'fake') resp = req.get_response(app()) # request is not authorized self.assertEqual(resp.status_int, 403) volume = db.volume_get(context.get_admin_context(), volume['id']) # status is still 'error' self.assertEqual(volume['status'], 'error')
def test_backup_reset_status(self): ctx = context.RequestContext('admin', 'fake', True) volume = db.volume_create( ctx, { 'status': 'available', 'host': 'test', 'provider_location': '', 'size': 1 }) backup = db.backup_create( ctx, { 'status': 'available', 'volume_id': volume['id'], 'user_id': 'user', 'project_id': 'project' }) resp = self._issue_backup_reset(ctx, backup, {'status': 'error'}) self.assertEqual(202, resp.status_int)
def test_reset_attached_status(self): ctx = context.RequestContext('admin', 'fake', True) volume = db.volume_create( ctx, { 'status': 'available', 'host': 'test', 'provider_location': '', 'size': 1, 'attach_status': 'attached' }) resp = self._issue_volume_reset(ctx, volume, { 'status': 'available', 'attach_status': 'detached' }) self.assertEqual(202, resp.status_int) volume = db.volume_get(ctx, volume['id']) self.assertEqual('detached', volume['attach_status']) self.assertEqual('available', volume['status'])
def test_invalid_status_for_snapshot(self): ctx = context.RequestContext('admin', 'fake', True) volume = db.volume_create( ctx, { 'status': 'available', 'host': 'test', 'provider_location': '', 'size': 1 }) snapshot = objects.Snapshot(ctx, status='available', volume_id=volume['id']) snapshot.create() self.addCleanup(snapshot.destroy) resp = self._issue_snapshot_reset(ctx, snapshot, {'status': 'attaching'}) self.assertEqual(400, resp.status_int) self.assertEqual('available', snapshot.status)
def setUp(self): self.context = context.get_admin_context() vol = {} vol['host'] = 'fake_host' vol['availability_zone'] = FLAGS.storage_availability_zone vol['status'] = "available" vol['attach_status'] = "detached" volume = db.volume_create(self.context, vol) snpshot = { 'volume_id': 'fake_id', 'status': "creating", 'progress': '0%', 'volume_size': 0, 'display_name': 'fake_name', 'display_description': 'fake_description'} snapshot = db.snapshot_create(self.context, snpshot) self.fake_volume = jsonutils.to_primitive(volume) self.fake_snapshot = jsonutils.to_primitive(snapshot) super(VolumeRpcAPITestCase, self).setUp()
def test_attach_in_use_volume(self): """Test that attaching to an in-use volume fails.""" # admin context ctx = context.RequestContext('admin', 'fake', True) # current status is available volume = db.volume_create(ctx, { 'status': 'available', 'host': 'test', 'provider_location': '' }) # start service to handle rpc messages for attach requests svc = self.start_service('volume', host='test') self.volume_api.reserve_volume(ctx, volume) self.volume_api.initialize_connection(ctx, volume, {}) mountpoint = '/dev/vbd' self.volume_api.attach(ctx, volume, stubs.FAKE_UUID, mountpoint) self.assertRaises(exception.InvalidVolume, self.volume_api.attach, ctx, volume, fakes.get_fake_uuid(), mountpoint) # cleanup svc.stop()
def test_invalid_iscsi_connector(self): """Test connector without the initiator (required by iscsi driver).""" # admin context ctx = context.RequestContext('admin', 'fake', True) # current status is available volume = db.volume_create( ctx, { 'status': 'available', 'host': 'test', 'provider_location': '', 'size': 1 }) connector = {} # start service to handle rpc messages for attach requests svc = self.start_service('volume', host='test') self.assertRaises(exception.VolumeBackendAPIException, self.volume_api.initialize_connection, ctx, volume, connector) # cleanup svc.stop()
def test_get_volume_format_spec(self, volume_versioned_object, volume_meta_contains_fmt, volume_type_contains_fmt): fake_vol_meta_fmt = 'vhd' fake_vol_type_fmt = 'vhdx' volume_metadata = {} volume_type_extra_specs = {} fake_vol_dict = fake_volume.fake_db_volume() del fake_vol_dict['name'] if volume_meta_contains_fmt: volume_metadata['volume_format'] = fake_vol_meta_fmt elif volume_type_contains_fmt: volume_type_extra_specs['smbfs:volume_format'] = fake_vol_type_fmt ctxt = context.get_admin_context() volume_type = db.volume_type_create( ctxt, {'extra_specs': volume_type_extra_specs, 'name': 'fake_vol_type'}) fake_vol_dict.update(metadata=volume_metadata, volume_type_id=volume_type.id) # We want to get a 'real' SqlA model object, not just a dict. volume = db.volume_create(ctxt, fake_vol_dict) volume = db.volume_get(ctxt, volume.id) if volume_versioned_object: volume = objects.Volume._from_db_object(ctxt, objects.Volume(), volume) resulted_fmt = self._smbfs_driver._get_volume_format_spec(volume) if volume_meta_contains_fmt: expected_fmt = fake_vol_meta_fmt elif volume_type_contains_fmt: expected_fmt = fake_vol_type_fmt else: expected_fmt = None self.assertEqual(expected_fmt, resulted_fmt)
def test_force_detach_volume(self): # admin context ctx = context.RequestContext('admin', 'fake', True) # current status is available volume = db.volume_create(ctx, { 'status': 'available', 'host': 'test', 'provider_location': '' }) # start service to handle rpc messages for attach requests svc = self.start_service('volume', host='test') self.volume_api.reserve_volume(ctx, volume) self.volume_api.initialize_connection(ctx, volume, {}) mountpoint = '/dev/vbd' self.volume_api.attach(ctx, volume, stubs.FAKE_UUID, mountpoint) # volume is attached volume = db.volume_get(ctx, volume['id']) self.assertEquals(volume['status'], 'in-use') self.assertEquals(volume['instance_uuid'], stubs.FAKE_UUID) self.assertEquals(volume['mountpoint'], mountpoint) self.assertEquals(volume['attach_status'], 'attached') # build request to force detach req = webob.Request.blank('/v2/fake/volumes/%s/action' % volume['id']) req.method = 'POST' req.headers['content-type'] = 'application/json' # request status of 'error' req.body = jsonutils.dumps({'os-force_detach': None}) # attach admin context to request req.environ['cinder.context'] = ctx # make request resp = req.get_response(app()) # request is accepted self.assertEquals(resp.status_int, 202) volume = db.volume_get(ctx, volume['id']) # status changed to 'available' self.assertEquals(volume['status'], 'available') self.assertEquals(volume['instance_uuid'], None) self.assertEquals(volume['mountpoint'], None) self.assertEquals(volume['attach_status'], 'detached') # cleanup svc.stop()
def _create_multiple_volumes_with_different_project(self): # Create volumes in project 1 db.volume_create(self.ctxt, { 'display_name': 'test1', 'project_id': fake.PROJECT_ID }) db.volume_create(self.ctxt, { 'display_name': 'test2', 'project_id': fake.PROJECT_ID }) # Create volume in project 2 db.volume_create(self.ctxt, { 'display_name': 'test3', 'project_id': fake.PROJECT2_ID })
def test_backup_reset_status(self): volume = db.volume_create( self.ctx, { 'status': 'available', 'host': 'test', 'provider_location': '', 'size': 1 }) backup = db.backup_create( self.ctx, { 'status': fields.BackupStatus.AVAILABLE, 'volume_id': volume['id'], 'user_id': fake.USER_ID, 'project_id': fake.PROJECT_ID, 'host': 'test' }) resp = self._issue_backup_reset(self.ctx, backup, {'status': fields.BackupStatus.ERROR}) self.assertEqual(202, resp.status_int)
def test_backup_reset_status_with_invalid_backup(self): volume = db.volume_create( self.ctx, { 'status': 'available', 'host': 'test', 'provider_location': '', 'size': 1 }) backup = db.backup_create( self.ctx, { 'status': 'available', 'volume_id': volume['id'], 'user_id': 'user', 'project_id': 'project' }) backup['id'] = 'fake_id' resp = self._issue_backup_reset(self.ctx, backup, {'status': 'error'}) # Should raise 404 if backup doesn't exist. self.assertEqual(404, resp.status_int)
def test_copy_volume_to_image_over_image_quota(self): # creating volume testdata self.volume_attrs['instance_uuid'] = None volume = db.volume_create(self.context, self.volume_attrs) with mock.patch.object(self.volume.driver, 'copy_volume_to_image') as driver_copy_mock: driver_copy_mock.side_effect = exception.ImageLimitExceeded # test with image not in queued state self.assertRaises(exception.ImageLimitExceeded, self.volume.copy_volume_to_image, self.context, self.volume_id, self.image_meta) # Assert a user message was created self.volume.message_api.create.assert_called_once_with( self.context, defined_messages.EventIds.IMAGE_FROM_VOLUME_OVER_QUOTA, self.context.project_id, resource_type=resource_types.VOLUME, resource_uuid=volume['id'])
def create(self): if self.obj_attr_is_set('id'): raise exception.ObjectActionError(action='create', reason=_('already created')) updates = self.cinder_obj_get_changes() if 'consistencygroup' in updates: raise exception.ObjectActionError( action='create', reason=_('consistencygroup assigned')) if 'snapshots' in updates: raise exception.ObjectActionError(action='create', reason=_('snapshots assigned')) if 'cluster' in updates: raise exception.ObjectActionError(action='create', reason=_('cluster assigned')) if 'group' in updates: raise exception.ObjectActionError(action='create', reason=_('group assigned')) db_volume = db.volume_create(self._context, updates) self._from_db_object(self._context, self, db_volume)
def _create_volume_db_entry(self, display_name='test_volume', display_description='this is a test volume', status='backing-up', previous_status='available', size=1): """Create a volume entry in the DB. Return the entry ID """ vol = {} vol['size'] = size vol['host'] = 'testhost' vol['user_id'] = 'fake' vol['project_id'] = 'fake' vol['status'] = status vol['display_name'] = display_name vol['display_description'] = display_description vol['attach_status'] = 'detached' vol['availability_zone'] = '1' vol['previous_status'] = previous_status return db.volume_create(self.ctxt, vol)['id']
def _create_volume(context, display_name='test_volume', display_description='this is a test volume', status='creating', availability_zone='fake_az', host='fake_host', size=1): """Create a volume object.""" volume = { 'size': size, 'user_id': 'fake', 'project_id': 'fake', 'status': status, 'display_name': display_name, 'display_description': display_description, 'attach_status': 'detached', 'availability_zone': availability_zone, 'host': host, 'encryption_key_id': 'fake_key', } return db.volume_create(context, volume)['id']
def test_attach_attaching_volume_with_different_instance(self): """Test that attaching volume reserved for another instance fails.""" ctx = context.RequestContext('admin', 'fake', True) # current status is available volume = db.volume_create(ctx, { 'status': 'available', 'host': 'test', 'provider_location': '' }) # start service to handle rpc messages for attach requests svc = self.start_service('volume', host='test') values = { 'status': 'attaching', 'instance_uuid': fakes.get_fake_uuid() } db.volume_update(ctx, volume['id'], values) mountpoint = '/dev/vbd' self.assertRaises(exception.InvalidVolume, self.volume_api.attach, ctx, volume, stubs.FAKE_UUID, None, mountpoint, 'rw') # cleanup svc.stop()
def _create_volume(self, volume_id, status='available', user_id=None, project_id=None): if user_id is None: user_id = self.ctxt.user_id if project_id is None: project_id = self.ctxt.project_id vol = { 'id': volume_id, 'updated_at': datetime.datetime(1, 1, 1, 1, 1, 1), 'user_id': user_id, 'project_id': project_id, 'display_name': 'Display Name', 'display_description': 'Display Description', 'size': 1, 'status': status } volume = db.volume_create(self.ctxt, vol) return volume
def test_snapshot_reset_status(self): # admin context ctx = context.RequestContext('admin', 'fake', True) # snapshot in 'error_deleting' volume = db.volume_create(ctx, {}) snapshot = db.snapshot_create(ctx, {'status': 'error_deleting', 'volume_id': volume['id']}) req = webob.Request.blank('/v2/fake/snapshots/%s/action' % snapshot['id']) req.method = 'POST' req.headers['content-type'] = 'application/json' # request status of 'error' req.body = jsonutils.dumps({'os-reset_status': {'status': 'error'}}) # attach admin context to request req.environ['cinder.context'] = ctx resp = req.get_response(app()) # request is accepted self.assertEquals(resp.status_int, 202) snapshot = db.snapshot_get(ctx, snapshot['id']) # status changed to 'error' self.assertEquals(snapshot['status'], 'error')
def test_attach_in_used_volume_by_host(self): """Test that attaching to an in-use volume fails.""" # admin context ctx = context.RequestContext('admin', 'fake', True) # current status is available volume = db.volume_create(ctx, { 'status': 'available', 'host': 'test', 'provider_location': '' }) connector = {'initiator': 'iqn.2012-07.org.fake:01'} # start service to handle rpc messages for attach requests svc = self.start_service('volume', host='test') self.volume_api.reserve_volume(ctx, volume) self.volume_api.initialize_connection(ctx, volume, connector) mountpoint = '/dev/vbd' host_name = 'fake_host' self.volume_api.attach(ctx, volume, None, host_name, mountpoint) self.assertRaises(exception.InvalidVolume, self.volume_api.attach, ctx, volume, None, host_name, mountpoint) # cleanup svc.stop()
def test_invalid_status_for_snapshot(self): # admin context ctx = context.RequestContext('admin', 'fake', True) # snapshot in 'available' volume = db.volume_create(ctx, {}) snapshot = db.snapshot_create(ctx, {'status': 'available', 'volume_id': volume['id']}) req = webob.Request.blank('/v2/fake/snapshots/%s/action' % snapshot['id']) req.method = 'POST' req.headers['content-type'] = 'application/json' # 'attaching' is not a valid status for snapshots req.body = jsonutils.dumps({'os-reset_status': {'status': 'attaching'}}) # attach admin context to request req.environ['cinder.context'] = ctx resp = req.get_response(app()) # request is accepted self.assertEquals(resp.status_int, 400) snapshot = db.snapshot_get(ctx, snapshot['id']) # status is still 'available' self.assertEquals(snapshot['status'], 'available')
def test_invalid_reset_attached_status(self): # admin context ctx = context.RequestContext('admin', 'fake', True) # current status is available volume = db.volume_create(ctx, {'status': 'available', 'attach_status': 'detached'}) req = webob.Request.blank('/v2/fake/volumes/%s/action' % volume['id']) req.method = 'POST' req.headers['content-type'] = 'application/json' # 'invalid' is not a valid attach_status body = {'os-reset_status': {'status': 'available', 'attach_status': 'invalid'}} req.body = jsonutils.dumps(body) # attach admin context to request req.environ['cinder.context'] = ctx resp = req.get_response(app()) # bad request self.assertEquals(resp.status_int, 400) volume = db.volume_get(ctx, volume['id']) # status and attach_status un-modified self.assertEquals(volume['status'], 'available') self.assertEquals(volume['attach_status'], 'detached')
def test_backup_reset_status_with_invalid_backup(self): volume = db.volume_create( self.ctx, { 'status': 'available', 'host': 'test', 'provider_location': '', 'size': 1 }) backup = db.backup_create( self.ctx, { 'status': fields.BackupStatus.AVAILABLE, 'volume_id': volume['id'], 'user_id': fake.USER_ID, 'project_id': fake.PROJECT_ID }) backup['id'] = fake.BACKUP_ID resp = self._issue_backup_reset(self.ctx, backup, {'status': fields.BackupStatus.ERROR}) # Should raise 404 if backup doesn't exist. self.assertEqual(404, resp.status_int)
def _create_volume(self, display_name='test_volume', display_description='this is a test volume', status='available', size=1, project_id=fake.PROJECT_ID, attach_status=fields.VolumeAttachStatus.DETACHED): """Create a volume object.""" vol = {} vol['host'] = 'fake_host' vol['size'] = size vol['user_id'] = fake.USER_ID vol['project_id'] = project_id vol['status'] = status vol['display_name'] = display_name vol['display_description'] = display_description vol['attach_status'] = attach_status vol['availability_zone'] = 'fake_zone' volume_id = db.volume_create(context.get_admin_context(), vol)['id'] self.addCleanup(db.volume_destroy, context.get_admin_context(), volume_id) return volume_id
def create_volume(ctxt, host='test_host', display_name='test_volume', display_description='this is a test volume', status='available', migration_status=None, size=1, availability_zone='fake_az', volume_type_id=None, replication_status='disabled', replication_extended_status=None, replication_driver_data=None, consistencygroup_id=None, previous_status=None, **kwargs): """Create a volume object in the DB.""" vol = {} vol['size'] = size vol['host'] = host vol['user_id'] = ctxt.user_id vol['project_id'] = ctxt.project_id vol['status'] = status vol['migration_status'] = migration_status vol['display_name'] = display_name vol['display_description'] = display_description vol['attach_status'] = 'detached' vol['availability_zone'] = availability_zone if consistencygroup_id: vol['consistencygroup_id'] = consistencygroup_id if volume_type_id: vol['volume_type_id'] = volume_type_id for key in kwargs: vol[key] = kwargs[key] vol['replication_status'] = replication_status vol['replication_extended_status'] = replication_extended_status vol['replication_driver_data'] = replication_driver_data vol['previous_status'] = previous_status return db.volume_create(ctxt, vol)