def test_volume_create_with_type(self): vol_type = FLAGS.default_volume_type db.volume_type_create(context.get_admin_context(), dict(name=vol_type, extra_specs={})) db_vol_type = db.volume_type_get_by_name(context.get_admin_context(), vol_type) vol = { "size": 100, "name": "Volume Test Name", "display_description": "Volume Test Desc", "availability_zone": "zone1:host1", "volume_type": db_vol_type["name"], } body = {"volume": vol} req = fakes.HTTPRequest.blank("/v2/volumes") res_dict = self.controller.create(req, body) volume_id = res_dict["volume"]["id"] self.assertEquals(len(res_dict), 1) self.stubs.Set( volume_api.API, "get_all", lambda *args, **kwargs: [stubs.stub_volume(volume_id, volume_type={"name": vol_type})], ) req = fakes.HTTPRequest.blank("/v2/volumes/detail") res_dict = self.controller.detail(req)
def test_list_consistencygroups_xml(self): consistencygroup_id1 = self._create_consistencygroup() consistencygroup_id2 = self._create_consistencygroup() consistencygroup_id3 = self._create_consistencygroup() req = webob.Request.blank('/v2/fake/consistencygroups') req.method = 'GET' req.headers['Content-Type'] = 'application/xml' req.headers['Accept'] = 'application/xml' res = req.get_response(fakes.wsgi_app()) self.assertEqual(200, res.status_int) dom = minidom.parseString(res.body) consistencygroup_list = dom.getElementsByTagName('consistencygroup') self.assertEqual(consistencygroup_id1, consistencygroup_list.item(0).getAttribute('id')) self.assertEqual(consistencygroup_id2, consistencygroup_list.item(1).getAttribute('id')) self.assertEqual(consistencygroup_id3, consistencygroup_list.item(2).getAttribute('id')) db.consistencygroup_destroy(context.get_admin_context(), consistencygroup_id3) db.consistencygroup_destroy(context.get_admin_context(), consistencygroup_id2) db.consistencygroup_destroy(context.get_admin_context(), consistencygroup_id1)
def test_create_volume_with_consistencygroup_invalid_type(self): """Test volume creation with ConsistencyGroup & invalid volume type.""" vol_type = db.volume_type_create( context.get_admin_context(), dict(name=conf_fixture.def_vol_type, extra_specs={}) ) db_vol_type = db.volume_type_get(context.get_admin_context(), vol_type.id) cg = { 'id': '1', 'name': 'cg1', 'volume_type_id': db_vol_type['id'], } fake_type = { 'id': '9999', 'name': 'fake', } vol_api = cinder.volume.api.API() # Volume type must be provided when creating a volume in a # consistency group. self.assertRaises(exception.InvalidInput, vol_api.create, self.context, 1, 'vol1', 'volume 1', consistencygroup=cg) # Volume type must be valid. self.assertRaises(exception.InvalidInput, vol_api.create, self.context, 1, 'vol1', 'volume 1', volume_type=fake_type, consistencygroup=cg)
def test_list_transfers_detail_json(self): volume_id_1 = self._create_volume(size=5) volume_id_2 = self._create_volume(size=5) transfer1 = self._create_transfer(volume_id_1) transfer2 = self._create_transfer(volume_id_2) req = webob.Request.blank("/v2/fake/os-volume-transfer/detail") req.method = "GET" req.headers["Content-Type"] = "application/json" req.headers["Accept"] = "application/json" res = req.get_response(fakes.wsgi_app()) res_dict = json.loads(res.body) self.assertEqual(200, res.status_int) self.assertEqual(5, len(res_dict["transfers"][0])) self.assertEqual("test_transfer", res_dict["transfers"][0]["name"]) self.assertEqual(transfer1["id"], res_dict["transfers"][0]["id"]) self.assertEqual(volume_id_1, res_dict["transfers"][0]["volume_id"]) self.assertEqual(5, len(res_dict["transfers"][1])) self.assertEqual("test_transfer", res_dict["transfers"][1]["name"]) self.assertEqual(transfer2["id"], res_dict["transfers"][1]["id"]) self.assertEqual(volume_id_2, res_dict["transfers"][1]["volume_id"]) db.transfer_destroy(context.get_admin_context(), transfer2["id"]) db.transfer_destroy(context.get_admin_context(), transfer1["id"]) db.volume_destroy(context.get_admin_context(), volume_id_2) db.volume_destroy(context.get_admin_context(), volume_id_1)
def test_list_consistencygroups_json(self): consistencygroup_id1 = self._create_consistencygroup() consistencygroup_id2 = self._create_consistencygroup() consistencygroup_id3 = self._create_consistencygroup() req = webob.Request.blank('/v2/fake/consistencygroups') req.method = 'GET' req.headers['Content-Type'] = 'application/json' res = req.get_response(fakes.wsgi_app()) res_dict = json.loads(res.body) self.assertEqual(200, res.status_int) self.assertEqual(consistencygroup_id1, res_dict['consistencygroups'][0]['id']) self.assertEqual('test_consistencygroup', res_dict['consistencygroups'][0]['name']) self.assertEqual(consistencygroup_id2, res_dict['consistencygroups'][1]['id']) self.assertEqual('test_consistencygroup', res_dict['consistencygroups'][1]['name']) self.assertEqual(consistencygroup_id3, res_dict['consistencygroups'][2]['id']) self.assertEqual('test_consistencygroup', res_dict['consistencygroups'][2]['name']) db.consistencygroup_destroy(context.get_admin_context(), consistencygroup_id3) db.consistencygroup_destroy(context.get_admin_context(), consistencygroup_id2) db.consistencygroup_destroy(context.get_admin_context(), consistencygroup_id1)
def test_create_delete_volume(self): """Test volume can be created and deleted.""" # Need to stub out reserve, commit, and rollback def fake_reserve(context, expire=None, **deltas): return ["RESERVATION"] def fake_commit(context, reservations): pass def fake_rollback(context, reservations): pass self.stubs.Set(QUOTAS, "reserve", fake_reserve) self.stubs.Set(QUOTAS, "commit", fake_commit) self.stubs.Set(QUOTAS, "rollback", fake_rollback) volume = self._create_volume() volume_id = volume['id'] self.assertEquals(len(test_notifier.NOTIFICATIONS), 0) self.volume.create_volume(self.context, volume_id) self.assertEquals(len(test_notifier.NOTIFICATIONS), 2) self.assertEqual(volume_id, db.volume_get(context.get_admin_context(), volume_id).id) self.volume.delete_volume(self.context, volume_id) vol = db.volume_get(context.get_admin_context(read_deleted='yes'), volume_id) self.assertEquals(vol['status'], 'deleted') self.assertEquals(len(test_notifier.NOTIFICATIONS), 4) self.assertRaises(exception.NotFound, db.volume_get, self.context, volume_id)
def test_volume_create_with_type(self): vol_type = db.volume_type_create( context.get_admin_context(), dict(name=CONF.default_volume_type, extra_specs={}) ) db_vol_type = db.volume_type_get(context.get_admin_context(), vol_type.id) vol = { "size": 100, "name": "Volume Test Name", "description": "Volume Test Desc", "availability_zone": "zone1:host1", "volume_type": db_vol_type['id'], } body = {"volume": vol} req = fakes.HTTPRequest.blank('/v2/volumes') res_dict = self.controller.create(req, body) volume_id = res_dict['volume']['id'] self.assertEquals(len(res_dict), 1) self.stubs.Set(volume_api.API, 'get_all', lambda *args, **kwargs: [stubs.stub_volume(volume_id, volume_type={'name': vol_type})]) req = fakes.HTTPRequest.blank('/v2/volumes/detail') res_dict = self.controller.detail(req)
def test_list_transfers_detail_json(self): volume_id_1 = self._create_volume(size=5) volume_id_2 = self._create_volume(size=5) transfer1 = self._create_transfer(volume_id_1) transfer2 = self._create_transfer(volume_id_2) req = webob.Request.blank('/v2/%s/os-volume-transfer/detail' % fake.PROJECT_ID) req.method = 'GET' req.headers['Content-Type'] = 'application/json' req.headers['Accept'] = 'application/json' res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_ctxt)) res_dict = jsonutils.loads(res.body) self.assertEqual(200, res.status_int) self.assertEqual(5, len(res_dict['transfers'][0])) self.assertEqual('test_transfer', res_dict['transfers'][0]['name']) self.assertEqual(transfer1['id'], res_dict['transfers'][0]['id']) self.assertEqual(volume_id_1, res_dict['transfers'][0]['volume_id']) self.assertEqual(5, len(res_dict['transfers'][1])) self.assertEqual('test_transfer', res_dict['transfers'][1]['name']) self.assertEqual(transfer2['id'], res_dict['transfers'][1]['id']) self.assertEqual(volume_id_2, res_dict['transfers'][1]['volume_id']) db.transfer_destroy(context.get_admin_context(), transfer2['id']) db.transfer_destroy(context.get_admin_context(), transfer1['id']) db.volume_destroy(context.get_admin_context(), volume_id_2) db.volume_destroy(context.get_admin_context(), volume_id_1)
def test_delete_transfer_awaiting_transfer(self): volume_id = self._create_volume() transfer = self._create_transfer(volume_id) req = webob.Request.blank('/v2/%s/os-volume-transfer/%s' % ( fake.PROJECT_ID, transfer['id'])) req.method = 'DELETE' req.headers['Content-Type'] = 'application/json' res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_ctxt)) self.assertEqual(202, res.status_int) # verify transfer has been deleted req = webob.Request.blank('/v2/%s/os-volume-transfer/%s' % ( fake.PROJECT_ID, transfer['id'])) req.method = 'GET' req.headers['Content-Type'] = 'application/json' res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_ctxt)) res_dict = jsonutils.loads(res.body) self.assertEqual(404, res.status_int) self.assertEqual(404, res_dict['itemNotFound']['code']) self.assertEqual('Transfer %s could not be found.' % transfer['id'], res_dict['itemNotFound']['message']) self.assertEqual(db.volume_get(context.get_admin_context(), volume_id)['status'], 'available') db.volume_destroy(context.get_admin_context(), volume_id)
def tearDown(self): # Remove the volume type from the database db.volume_type_destroy(context.get_admin_context(), self.vol_type1['id']) db.volume_type_destroy(context.get_admin_context(), self.vol_type2_noextra['id']) super(VolumeTypeExtraSpecsTestCase, self).tearDown()
def test_show_cgsnapshot(self): consistencygroup = utils.create_consistencygroup(self.context) volume_id = utils.create_volume(self.context, consistencygroup_id= consistencygroup.id)['id'] cgsnapshot_id = self._create_cgsnapshot( consistencygroup_id=consistencygroup.id) req = webob.Request.blank('/v2/fake/cgsnapshots/%s' % cgsnapshot_id) req.method = 'GET' req.headers['Content-Type'] = 'application/json' res = req.get_response(fakes.wsgi_app()) res_dict = json.loads(res.body) self.assertEqual(200, res.status_int) self.assertEqual('this is a test cgsnapshot', res_dict['cgsnapshot']['description']) self.assertEqual('test_cgsnapshot', res_dict['cgsnapshot']['name']) self.assertEqual('creating', res_dict['cgsnapshot']['status']) db.cgsnapshot_destroy(context.get_admin_context(), cgsnapshot_id) db.volume_destroy(context.get_admin_context(), volume_id) consistencygroup.destroy()
def test_transfer_destroy(self): volume_id = utils.create_volume(self.ctxt)['id'] volume_id2 = utils.create_volume(self.ctxt)['id'] xfer_id1 = self._create_transfer(volume_id) xfer_id2 = self._create_transfer(volume_id2) xfer = db.transfer_get_all(context.get_admin_context()) self.assertEquals(len(xfer), 2, "Unexpected number of transfer records") self.assertFalse(xfer[0]['deleted'], "Deleted flag is set") db.transfer_destroy(self.ctxt, xfer_id1) xfer = db.transfer_get_all(context.get_admin_context()) self.assertEquals(len(xfer), 1, "Unexpected number of transfer records") self.assertEquals(xfer[0]['id'], xfer_id2, "Unexpected value for Transfer id") nctxt = context.RequestContext(user_id='new_user_id', project_id='new_project_id') self.assertRaises(exception.TransferNotFound, db.transfer_destroy, nctxt, xfer_id2) db.transfer_destroy(nctxt.elevated(), xfer_id2) xfer = db.transfer_get_all(context.get_admin_context()) self.assertEquals(len(xfer), 0, "Unexpected number of transfer records")
def test_volume_create_with_type(self): vol_type = CONF.default_volume_type db.volume_type_create(context.get_admin_context(), dict(name=vol_type, extra_specs={})) db_vol_type = db.volume_type_get_by_name(context.get_admin_context(), vol_type) vol = {"size": 100, "display_name": "Volume Test Name", "display_description": "Volume Test Desc", "availability_zone": "zone1:host1", "volume_type": "FakeTypeName"} body = {"volume": vol} req = fakes.HTTPRequest.blank('/v1/volumes') # Raise 404 when type name isn't valid self.assertRaises(webob.exc.HTTPNotFound, self.controller.create, req, body) # Use correct volume type name vol.update(dict(volume_type=CONF.default_volume_type)) body.update(dict(volume=vol)) res_dict = self.controller.create(req, body) self.assertIn('id', res_dict['volume']) self.assertEqual(len(res_dict), 1) self.assertEqual(res_dict['volume']['volume_type'], db_vol_type['name']) # Use correct volume type id vol.update(dict(volume_type=db_vol_type['id'])) body.update(dict(volume=vol)) res_dict = self.controller.create(req, body) self.assertIn('id', res_dict['volume']) self.assertEqual(len(res_dict), 1) self.assertEqual(res_dict['volume']['volume_type'], db_vol_type['name'])
def test_restore_backup_to_undersized_volume(self): backup_size = 10 backup_id = self._create_backup(status='available', size=backup_size) # need to create the volume referenced below first volume_size = 5 volume_id = self._create_volume(status='available', size=volume_size) body = {"restore": {"volume_id": volume_id, }} req = webob.Request.blank('/v2/fake/backups/%s/restore' % backup_id) req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = json.dumps(body) res = req.get_response(fakes.wsgi_app()) res_dict = json.loads(res.body) self.assertEqual(res.status_int, 400) self.assertEqual(res_dict['badRequest']['code'], 400) self.assertEqual(res_dict['badRequest']['message'], 'Invalid volume: volume size %d is too ' 'small to restore backup of size %d.' % (volume_size, backup_size)) db.volume_destroy(context.get_admin_context(), volume_id) db.backup_destroy(context.get_admin_context(), backup_id)
def test_show_backup(self): volume_id = self._create_volume(size=5) backup_id = self._create_backup(volume_id) LOG.debug('Created backup with id %s' % backup_id) req = webob.Request.blank('/v2/fake/backups/%s' % backup_id) req.method = 'GET' req.headers['Content-Type'] = 'application/json' res = req.get_response(fakes.wsgi_app()) res_dict = json.loads(res.body) self.assertEqual(res.status_int, 200) self.assertEqual(res_dict['backup']['availability_zone'], 'az1') self.assertEqual(res_dict['backup']['container'], 'volumebackups') self.assertEqual(res_dict['backup']['description'], 'this is a test backup') self.assertEqual(res_dict['backup']['name'], 'test_backup') self.assertEqual(res_dict['backup']['id'], backup_id) self.assertEqual(res_dict['backup']['object_count'], 0) self.assertEqual(res_dict['backup']['size'], 0) self.assertEqual(res_dict['backup']['status'], 'creating') self.assertEqual(res_dict['backup']['volume_id'], volume_id) db.backup_destroy(context.get_admin_context(), backup_id) db.volume_destroy(context.get_admin_context(), volume_id)
def test_list_backups_json(self): backup_id1 = self._create_backup() backup_id2 = self._create_backup() backup_id3 = self._create_backup() req = webob.Request.blank('/v2/fake/backups') req.method = 'GET' req.headers['Content-Type'] = 'application/json' res = req.get_response(fakes.wsgi_app()) res_dict = json.loads(res.body) self.assertEqual(res.status_int, 200) self.assertEqual(len(res_dict['backups'][0]), 3) self.assertEqual(res_dict['backups'][0]['id'], backup_id1) self.assertEqual(res_dict['backups'][0]['name'], 'test_backup') self.assertEqual(len(res_dict['backups'][1]), 3) self.assertEqual(res_dict['backups'][1]['id'], backup_id2) self.assertEqual(res_dict['backups'][1]['name'], 'test_backup') self.assertEqual(len(res_dict['backups'][2]), 3) self.assertEqual(res_dict['backups'][2]['id'], backup_id3) self.assertEqual(res_dict['backups'][2]['name'], 'test_backup') db.backup_destroy(context.get_admin_context(), backup_id3) db.backup_destroy(context.get_admin_context(), backup_id2) db.backup_destroy(context.get_admin_context(), backup_id1)
def test_list_backups_xml(self): backup_id1 = self._create_backup() backup_id2 = self._create_backup() backup_id3 = self._create_backup() req = webob.Request.blank('/v2/fake/backups') req.method = 'GET' req.headers['Content-Type'] = 'application/xml' req.headers['Accept'] = 'application/xml' res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 200) dom = minidom.parseString(res.body) backup_list = dom.getElementsByTagName('backup') self.assertEqual(backup_list.item(0).attributes.length, 2) self.assertEqual(backup_list.item(0).getAttribute('id'), backup_id1) self.assertEqual(backup_list.item(1).attributes.length, 2) self.assertEqual(backup_list.item(1).getAttribute('id'), backup_id2) self.assertEqual(backup_list.item(2).attributes.length, 2) self.assertEqual(backup_list.item(2).getAttribute('id'), backup_id3) db.backup_destroy(context.get_admin_context(), backup_id3) db.backup_destroy(context.get_admin_context(), backup_id2) db.backup_destroy(context.get_admin_context(), backup_id1)
def test_create_consistencygroup_json(self, mock_validate): group_id = "1" # Create volume type vol_type = 'test' db.volume_type_create(context.get_admin_context(), {'name': vol_type, 'extra_specs': {}}) body = {"consistencygroup": {"name": "cg1", "volume_types": vol_type, "description": "Consistency Group 1", }} req = webob.Request.blank('/v2/fake/consistencygroups') req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = json.dumps(body) res = req.get_response(fakes.wsgi_app()) res_dict = json.loads(res.body) self.assertEqual(202, res.status_int) self.assertIn('id', res_dict['consistencygroup']) self.assertTrue(mock_validate.called) group_id = res_dict['consistencygroup']['id'] cg = objects.ConsistencyGroup.get_by_id(context.get_admin_context(), group_id) cg.destroy()
def test_add_visible_admin_metadata_visible_key_only(self): admin_metadata = [{"key": "invisible_key", "value": "invisible_value"}, {"key": "readonly", "value": "visible"}, {"key": "attached_mode", "value": "visible"}] metadata = [{"key": "key", "value": "value"}] volume = dict(volume_admin_metadata=admin_metadata, volume_metadata=metadata) admin_ctx = context.get_admin_context() self.controller._add_visible_admin_metadata(admin_ctx, volume) self.assertEqual(volume['volume_metadata'], [{"key": "key", "value": "value"}, {"key": "readonly", "value": "visible"}, {"key": "attached_mode", "value": "visible"}]) admin_metadata = {"invisible_key": "invisible_value", "readonly": "visible", "attached_mode": "visible"} metadata = {"key": "value"} volume = dict(admin_metadata=admin_metadata, metadata=metadata) admin_ctx = context.get_admin_context() self.controller._add_visible_admin_metadata(admin_ctx, volume) self.assertEqual(volume['metadata'], {'key': 'value', 'attached_mode': 'visible', 'readonly': 'visible'})
def test_delete_cgsnapshot_available(self): consistencygroup_id = utils.create_consistencygroup(self.context)['id'] volume_id = utils.create_volume( self.context, consistencygroup_id=consistencygroup_id)['id'] cgsnapshot_id = self._create_cgsnapshot( consistencygroup_id=consistencygroup_id, status='available') req = webob.Request.blank('/v2/fake/cgsnapshots/%s' % cgsnapshot_id) req.method = 'DELETE' req.headers['Content-Type'] = 'application/json' res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 202) self.assertEqual(self._get_cgsnapshot_attrib(cgsnapshot_id, 'status'), 'deleting') db.cgsnapshot_destroy(context.get_admin_context(), cgsnapshot_id) db.volume_destroy(context.get_admin_context(), volume_id) db.consistencygroup_destroy(context.get_admin_context(), consistencygroup_id)
def test_delete_cgsnapshot_with_Invalidcgsnapshot(self): consistencygroup_id = utils.create_consistencygroup(self.context)['id'] volume_id = utils.create_volume( self.context, consistencygroup_id=consistencygroup_id)['id'] cgsnapshot_id = self._create_cgsnapshot( consistencygroup_id=consistencygroup_id, status='invalid') req = webob.Request.blank('/v2/fake/cgsnapshots/%s' % cgsnapshot_id) req.method = 'DELETE' req.headers['Content-Type'] = 'application/json' res = req.get_response(fakes.wsgi_app()) res_dict = json.loads(res.body) self.assertEqual(res.status_int, 400) self.assertEqual(res_dict['badRequest']['code'], 400) self.assertEqual(res_dict['badRequest']['message'], 'Invalid cgsnapshot') db.cgsnapshot_destroy(context.get_admin_context(), cgsnapshot_id) db.volume_destroy(context.get_admin_context(), volume_id) db.consistencygroup_destroy(context.get_admin_context(), consistencygroup_id)
def test_list_transfers_detail_json(self): volume_id_1 = self._create_volume(size=5) volume_id_2 = self._create_volume(size=5) transfer1 = self._create_transfer(volume_id_1) transfer2 = self._create_transfer(volume_id_2) req = webob.Request.blank('/v2/fake/os-volume-transfer/detail') req.method = 'GET' req.headers['Content-Type'] = 'application/json' req.headers['Accept'] = 'application/json' res = req.get_response(fakes.wsgi_app()) res_dict = json.loads(res.body) self.assertEqual(res.status_int, 200) self.assertEqual(len(res_dict['transfers'][0]), 5) self.assertEqual(res_dict['transfers'][0]['name'], 'test_transfer') self.assertEqual(res_dict['transfers'][0]['id'], transfer1['id']) self.assertEqual(res_dict['transfers'][0]['volume_id'], volume_id_1) self.assertEqual(len(res_dict['transfers'][1]), 5) self.assertEqual(res_dict['transfers'][1]['name'], 'test_transfer') self.assertEqual(res_dict['transfers'][1]['id'], transfer2['id']) self.assertEqual(res_dict['transfers'][1]['volume_id'], volume_id_2) db.transfer_destroy(context.get_admin_context(), transfer2['id']) db.transfer_destroy(context.get_admin_context(), transfer1['id']) db.volume_destroy(context.get_admin_context(), volume_id_2) db.volume_destroy(context.get_admin_context(), volume_id_1)
def test_delete_cgsnapshot_with_invalid_cgsnapshot(self): vol_type = utils.create_volume_type(context.get_admin_context(), self, name='my_vol_type') consistencygroup = utils.create_group( self.context, group_type_id=fake.GROUP_TYPE_ID, volume_type_ids=[vol_type['id']]) volume_id = utils.create_volume(self.context, volume_type_id=vol_type['id'], group_id= consistencygroup.id)['id'] cgsnapshot = utils.create_group_snapshot( self.context, group_id=consistencygroup.id, group_type_id=fake.GROUP_TYPE_ID, status='invalid') req = webob.Request.blank('/v2/%s/cgsnapshots/%s' % ( fake.PROJECT_ID, cgsnapshot.id)) req.method = 'DELETE' req.headers['Content-Type'] = 'application/json' res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_ctxt)) res_dict = jsonutils.loads(res.body) self.assertEqual(http_client.BAD_REQUEST, res.status_int) self.assertEqual(http_client.BAD_REQUEST, res_dict['badRequest']['code']) self.assertIsNotNone(res_dict['badRequest']['message']) cgsnapshot.destroy() db.volume_destroy(context.get_admin_context(), volume_id) consistencygroup.destroy()
def test_delete_cgsnapshot_available(self): vol_type = utils.create_volume_type(context.get_admin_context(), self, name='my_vol_type') consistencygroup = utils.create_group( self.context, group_type_id=fake.GROUP_TYPE_ID, volume_type_ids=[vol_type['id']]) volume_id = utils.create_volume(self.context, volume_type_id=vol_type['id'], group_id= consistencygroup.id)['id'] cgsnapshot = utils.create_group_snapshot( self.context, group_id=consistencygroup.id, group_type_id=fake.GROUP_TYPE_ID, status='available') req = webob.Request.blank('/v2/%s/cgsnapshots/%s' % (fake.PROJECT_ID, cgsnapshot.id)) req.method = 'DELETE' req.headers['Content-Type'] = 'application/json' res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_ctxt)) cgsnapshot = objects.GroupSnapshot.get_by_id(self.context, cgsnapshot.id) self.assertEqual(http_client.ACCEPTED, res.status_int) self.assertEqual('deleting', cgsnapshot.status) cgsnapshot.destroy() db.volume_destroy(context.get_admin_context(), volume_id) consistencygroup.destroy()
def test_delete_cgsnapshot_available_used_as_source(self): vol_type = utils.create_volume_type(context.get_admin_context(), self, name='my_vol_type') consistencygroup = utils.create_group( self.context, group_type_id=fake.GROUP_TYPE_ID, volume_type_ids=[vol_type['id']]) volume_id = utils.create_volume(self.context, volume_type_id=vol_type['id'], group_id= consistencygroup.id)['id'] cgsnapshot = utils.create_group_snapshot( self.context, group_id=consistencygroup.id, group_type_id=fake.GROUP_TYPE_ID, status='available') cg2 = utils.create_consistencygroup( self.context, status='creating', group_snapshot_id=cgsnapshot.id, group_type_id=fake.GROUP_TYPE_ID) req = webob.Request.blank('/v2/fake/cgsnapshots/%s' % cgsnapshot.id) req.method = 'DELETE' req.headers['Content-Type'] = 'application/json' res = req.get_response(fakes.wsgi_app()) cgsnapshot = objects.GroupSnapshot.get_by_id(self.context, cgsnapshot.id) self.assertEqual(http_client.BAD_REQUEST, res.status_int) self.assertEqual('available', cgsnapshot.status) cgsnapshot.destroy() db.volume_destroy(context.get_admin_context(), volume_id) consistencygroup.destroy() cg2.destroy()
def test_create_cgsnapshot_json(self, mock_validate): vol_type = utils.create_volume_type(context.get_admin_context(), self, name='my_vol_type') consistencygroup = utils.create_group( self.context, group_type_id=fake.GROUP_TYPE_ID, volume_type_ids=[vol_type['id']]) volume_id = utils.create_volume(self.context, volume_type_id=vol_type['id'], group_id= consistencygroup.id)['id'] body = {"cgsnapshot": {"name": "cg1", "description": "CG Snapshot 1", "consistencygroup_id": consistencygroup.id}} req = webob.Request.blank('/v2/%s/cgsnapshots' % fake.PROJECT_ID) req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_ctxt)) res_dict = jsonutils.loads(res.body) self.assertEqual(http_client.ACCEPTED, res.status_int) self.assertIn('id', res_dict['cgsnapshot']) self.assertTrue(mock_validate.called) cgsnapshot = objects.GroupSnapshot.get_by_id( context.get_admin_context(), res_dict['cgsnapshot']['id']) cgsnapshot.destroy() db.volume_destroy(context.get_admin_context(), volume_id) consistencygroup.destroy()
def test_create_with_cgsnapshot_not_found(self, mock_create_cgsnapshot): vol_type = utils.create_volume_type(context.get_admin_context(), self, name='my_vol_type') consistencygroup = utils.create_group( self.context, group_type_id=fake.GROUP_TYPE_ID, volume_type_ids=[vol_type['id']]) volume_id = utils.create_volume(self.context, volume_type_id=vol_type['id'], group_id=consistencygroup.id)['id'] body = {"cgsnapshot": {"name": "cg1", "description": "CG Snapshot 1", "consistencygroup_id": consistencygroup.id}} req = webob.Request.blank('/v2/%s/cgsnapshots' % fake.PROJECT_ID) req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_ctxt)) res_dict = jsonutils.loads(res.body) self.assertEqual(http_client.NOT_FOUND, res.status_int) self.assertEqual(http_client.NOT_FOUND, res_dict['itemNotFound']['code']) self.assertEqual('GroupSnapshot invalid_id could not be found.', res_dict['itemNotFound']['message']) db.volume_destroy(context.get_admin_context(), volume_id) consistencygroup.destroy()
def test_list_transfers_xml(self): volume_id_1 = self._create_volume(size=5) volume_id_2 = self._create_volume(size=5) transfer1 = self._create_transfer(volume_id_1) transfer2 = self._create_transfer(volume_id_2) req = webob.Request.blank('/v2/fake/os-volume-transfer') req.method = 'GET' req.headers['Content-Type'] = 'application/xml' req.headers['Accept'] = 'application/xml' res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 200) dom = minidom.parseString(res.body) transfer_list = dom.getElementsByTagName('transfer') self.assertEqual(transfer_list.item(0).attributes.length, 3) self.assertEqual(transfer_list.item(0).getAttribute('id'), transfer1['id']) self.assertEqual(transfer_list.item(1).attributes.length, 3) self.assertEqual(transfer_list.item(1).getAttribute('id'), transfer2['id']) db.transfer_destroy(context.get_admin_context(), transfer2['id']) db.transfer_destroy(context.get_admin_context(), transfer1['id']) db.volume_destroy(context.get_admin_context(), volume_id_2) db.volume_destroy(context.get_admin_context(), volume_id_1)
def test_list_transfers_detail_xml(self): volume_id_1 = self._create_volume(size=5) volume_id_2 = self._create_volume(size=5) transfer1 = self._create_transfer(volume_id_1) transfer2 = self._create_transfer(volume_id_2) req = webob.Request.blank("/v2/fake/os-volume-transfer/detail") req.method = "GET" req.headers["Content-Type"] = "application/xml" req.headers["Accept"] = "application/xml" res = req.get_response(fakes.wsgi_app()) self.assertEqual(200, res.status_int) dom = minidom.parseString(res.body) transfer_detail = dom.getElementsByTagName("transfer") self.assertEqual(4, transfer_detail.item(0).attributes.length) self.assertEqual("test_transfer", transfer_detail.item(0).getAttribute("name")) self.assertEqual(transfer1["id"], transfer_detail.item(0).getAttribute("id")) self.assertEqual(volume_id_1, transfer_detail.item(0).getAttribute("volume_id")) self.assertEqual(4, transfer_detail.item(1).attributes.length) self.assertEqual("test_transfer", transfer_detail.item(1).getAttribute("name")) self.assertEqual(transfer2["id"], transfer_detail.item(1).getAttribute("id")) self.assertEqual(volume_id_2, transfer_detail.item(1).getAttribute("volume_id")) db.transfer_destroy(context.get_admin_context(), transfer2["id"]) db.transfer_destroy(context.get_admin_context(), transfer1["id"]) db.volume_destroy(context.get_admin_context(), volume_id_2) db.volume_destroy(context.get_admin_context(), volume_id_1)
def test_create_encryption_type_exists(self): self.stubs.Set(db, 'volume_type_encryption_get', return_volume_type_encryption) volume_type = { 'id': 'fake_type_id', 'name': 'fake_type', } db.volume_type_create(context.get_admin_context(), volume_type) body = {"encryption": {'cipher': 'cipher', 'control_location': 'front-end', 'key_size': 128, 'provider': 'fake_provider', 'volume_type_id': volume_type['id']}} # Try to create encryption specs for a volume type # that already has them. res = self._get_response(volume_type, req_method='POST', req_body=json.dumps(body), req_headers='application/json') res_dict = json.loads(res.body) expected = { 'badRequest': { 'code': 400, 'message': ('Volume type encryption for type ' 'fake_type_id already exists.') } } self.assertEqual(expected, res_dict) db.volume_type_destroy(context.get_admin_context(), volume_type['id'])
def create_cloned_volume(self, volume, src_vref): """Creates a full clone of the specified volume.""" ctxt = context.get_admin_context() self.common._create_lun(volume) self.copy_volume_data(ctxt, src_vref, volume)
def setUp(self): super(TestCinderObjectConditionalUpdate, self).setUp() self.context = context.get_admin_context()
def get_test_admin_context(): return context.get_admin_context()
class FakeRequestWithHostBinary(object): environ = {"cinder.context": context.get_admin_context()} GET = {"host": "host1", "binary": "cinder-volume"}
class FakeRequestWithHostService(object): environ = {"cinder.context": context.get_admin_context()} GET = {"host": "host1", "service": "cinder-volume"}
class FakeRequestWithHost(object): environ = {"cinder.context": context.get_admin_context()} GET = {"host": "host1"}
def get_usage_and_notify(self, capa_new, updated_pools, host, timestamp): context = cinder_context.get_admin_context() usage = self._get_usage(capa_new, updated_pools, host, timestamp) self._notify_capacity_usage(context, usage)
def periodic_tasks(self, raise_on_error=False): """Tasks to be run at a periodic interval.""" ctxt = context.get_admin_context() self.manager.periodic_tasks(ctxt, raise_on_error=raise_on_error)
def setUp(self): super(BackupNFSShareTestCase, self).setUp() self.ctxt = context.get_admin_context() self.mock_object(nfs, 'LOG')
def create_volume_from_snapshot(self, volume, snapshot): """Creates a volume from a snapshot.""" ctxt = context.get_admin_context() snapshot['size'] = snapshot['volume']['size'] self.common._create_lun(volume) self.copy_volume_data(ctxt, snapshot, volume)
def _get_snapshot_volume(self, snapshot): ctxt = context.get_admin_context() return db.volume_get(ctxt, snapshot['volume_id'])
def init_host_with_rpc(self): ctxt = context.get_admin_context() self.request_service_capabilities(ctxt) eventlet.sleep(CONF.periodic_interval) self._startup_delay = False
def test_volume_show_with_admin_metadata(self): volume = stubs.stub_volume("1") del volume['name'] del volume['volume_type'] del volume['volume_type_id'] volume['metadata'] = {'key': 'value'} db.volume_create(context.get_admin_context(), volume) db.volume_admin_metadata_update(context.get_admin_context(), "1", { "readonly": "True", "invisible_key": "invisible_value" }, False) values = { 'volume_id': '1', } attachment = db.volume_attach(context.get_admin_context(), values) db.volume_attached(context.get_admin_context(), attachment['id'], stubs.FAKE_UUID, None, '/') req = fakes.HTTPRequest.blank('/v1/volumes/1') admin_ctx = context.RequestContext('admin', 'fakeproject', True) req.environ['cinder.context'] = admin_ctx res_dict = self.controller.show(req, '1') expected = { 'volume': { 'status': 'in-use', 'display_description': 'displaydesc', 'availability_zone': 'fakeaz', 'display_name': 'displayname', 'encrypted': False, 'attachments': [{ 'attachment_id': attachment['id'], 'device': '/', 'server_id': stubs.FAKE_UUID, 'host_name': None, 'id': '1', 'volume_id': '1' }], 'multiattach': 'false', 'bootable': 'false', 'volume_type': None, 'snapshot_id': None, 'source_volid': None, 'metadata': { 'key': 'value', 'readonly': 'True' }, 'id': '1', 'created_at': datetime.datetime(1900, 1, 1, 1, 1, 1), 'size': 1 } } self.assertEqual(res_dict, expected)
def init_host(self): ctxt = context.get_admin_context() self.request_service_capabilities(ctxt)
def _create_transfer(self, volume_id=fake.VOLUME_ID, display_name='test_transfer'): """Create a transfer object.""" return self.volume_transfer_api.create(context.get_admin_context(), volume_id, display_name)
def setUp(self): super(QuotaClassesSerializerTest, self).setUp() self.req = self.mox.CreateMockAnything() self.req.environ = {'cinder.context': context.get_admin_context()}
def setUp(self): super(CreateVolumeFlowManagerGlanceCinderBackendCase, self).setUp() self.ctxt = context.get_admin_context()
def _do_clone_volume(self, src_uuid, src_project_id, v_ref): """Create a clone of an existing volume. Currently snapshots are the same as clones on the SF cluster. Due to the way the SF cluster works there's no loss in efficiency or space usage between the two. The only thing different right now is the restore snapshot functionality which has not been implemented in the pre-release version of the SolidFire Cluster. """ attributes = {} qos = {} sfaccount = self._get_sfaccount(src_project_id) params = {'accountID': sfaccount['accountID']} sf_vol = self._get_sf_volume(src_uuid, params) if sf_vol is None: raise exception.VolumeNotFound(volume_id=src_uuid) if src_project_id != v_ref['project_id']: sfaccount = self._create_sfaccount(v_ref['project_id']) if v_ref.get('size', None): new_size = v_ref['size'] else: new_size = v_ref['volume_size'] params = {'volumeID': int(sf_vol['volumeID']), 'name': 'UUID-%s' % v_ref['id'], 'newSize': int(new_size * self.GB), 'newAccountID': sfaccount['accountID']} data = self._issue_api_request('CloneVolume', params) if (('result' not in data) or ('volumeID' not in data['result'])): msg = _("API response: %s") % data raise exception.SolidFireAPIException(msg) sf_volume_id = data['result']['volumeID'] if (self.configuration.sf_allow_tenant_qos and v_ref.get('volume_metadata')is not None): qos = self._set_qos_presets(v_ref) ctxt = context.get_admin_context() type_id = v_ref.get('volume_type_id', None) if type_id is not None: qos = self._set_qos_by_volume_type(ctxt, type_id) # NOTE(jdg): all attributes are copied via clone, need to do an update # to set any that were provided params = {'volumeID': sf_volume_id} create_time = timeutils.strtime(v_ref['created_at']) attributes = {'uuid': v_ref['id'], 'is_clone': 'True', 'src_uuid': src_uuid, 'created_at': create_time} if qos: params['qos'] = qos for k, v in qos.items(): attributes[k] = str(v) params['attributes'] = attributes data = self._issue_api_request('ModifyVolume', params) model_update = self._get_model_info(sfaccount, sf_volume_id) if model_update is None: mesg = _('Failed to get model update from clone') raise exception.SolidFireAPIException(mesg) return (data, sfaccount, model_update)
def setUp(self): """Initialize LightOS Storage Driver.""" super(LightOSStorageVolumeDriverTest, self).setUp() configuration = mock.Mock(conf.Configuration) configuration.lightos_api_address = \ "10.10.10.71,10.10.10.72,10.10.10.73" configuration.lightos_api_port = 443 configuration.lightos_jwt = None configuration.lightos_snapshotname_prefix = 'openstack_' configuration.lightos_intermediate_snapshot_name_prefix = 'for_clone_' configuration.lightos_default_compression_enabled = ( DEFAULT_COMPRESSION) configuration.lightos_default_num_replicas = 3 configuration.num_volume_device_scan_tries = ( DEVICE_SCAN_ATTEMPTS_DEFAULT) configuration.lightos_api_service_timeout = LIGHTOS_API_SERVICE_TIMEOUT configuration.driver_ssl_cert_verify = False # for some reason this value is not initialized by the driver parent # configs configuration.volume_name_template = 'volume-%s' configuration.initiator_connector = ( "cinder.tests.unit.volume.drivers.lightos." "test_lightos_storage.InitiatorConnectorFactoryMocker") configuration.volume_backend_name = VOLUME_BACKEND_NAME configuration.reserved_percentage = RESERVED_PERCENTAGE def mocked_safe_get(config, variable_name): if hasattr(config, variable_name): return config.__getattribute__(variable_name) else: return None configuration.safe_get = functools.partial(mocked_safe_get, configuration) self.driver = lightos.LightOSVolumeDriver(configuration=configuration) self.ctxt = context.get_admin_context() self.db: DBMock = DBMock() # define a default send_cmd override to return default values. def send_cmd_default_mock(cmd, timeout, **kwargs): if cmd == "get_nodes": return (httpstatus.OK, FAKE_LIGHTOS_CLUSTER_NODES) if cmd == "get_node": self.assertTrue(kwargs["UUID"]) for node in FAKE_LIGHTOS_CLUSTER_NODES["nodes"]: if kwargs["UUID"] == node["UUID"]: return (httpstatus.OK, node) return (httpstatus.NOT_FOUND, node) elif cmd == "get_cluster_info": return (httpstatus.OK, FAKE_LIGHTOS_CLUSTER_INFO) elif cmd == "create_volume": project_name = kwargs["project_name"] volume = { "project_name": project_name, "name": kwargs["name"], "size": kwargs["size"], "n_replicas": kwargs["n_replicas"], "compression": kwargs["compression"], "src_snapshot_name": kwargs["src_snapshot_name"], "acl": { 'values': kwargs.get('acl') }, "state": "Available", } volume["ETag"] = get_vol_etag(volume) code, new_vol = self.db.create_volume(volume) return (code, new_vol) elif cmd == "delete_volume": return self.db.delete_volume(kwargs["project_name"], kwargs["volume_uuid"]) elif cmd == "get_volume": return self.db.get_volume_by_uuid(kwargs["project_name"], kwargs["volume_uuid"]) elif cmd == "get_volume_by_name": return self.db.get_volume_by_name(kwargs["project_name"], kwargs["volume_name"]) elif cmd == "extend_volume": size = kwargs.get("size", None) return self.db.update_volume_by_uuid(kwargs["project_name"], kwargs["volume_uuid"], size=size) elif cmd == "create_snapshot": snapshot = { "project_name": kwargs.get("project_name", None), "name": kwargs.get("name", None), "state": "Available", } return self.db.create_snapshot(snapshot) elif cmd == "delete_snapshot": return self.db.delete_snapshot(kwargs["project_name"], kwargs["snapshot_uuid"]) elif cmd == "get_snapshot": return self.db.get_snapshot_by_uuid(kwargs["project_name"], kwargs["snapshot_uuid"]) elif cmd == "get_snapshot_by_name": return self.db.get_snapshot_by_name(kwargs["project_name"], kwargs["snapshot_name"]) elif cmd == "update_volume": return self.db.update_volume_by_uuid(**kwargs) else: raise RuntimeError( f"'{cmd}' is not implemented. kwargs: {kwargs}") self.driver.cluster.send_cmd = send_cmd_default_mock
def get_volume_type_qos_specs(volume_type_id: str) -> dict[str, Any]: """Get all qos specs for given volume type.""" ctxt = context.get_admin_context() res = db.volume_type_qos_specs_get(ctxt, volume_type_id) return res
class FakeRequest(object): environ = {"cinder.context": context.get_admin_context()} def cached_resource_by_id(self, resource_id, name=None): return VOLUME_TYPES[resource_id]
def setUp(self): super(CreateVolumeFlowManagerTestCase, self).setUp() self.ctxt = context.get_admin_context()
def setUp(self): super(CgsnapshotsAPITestCase, self).setUp() self.volume_api = cinder.volume.API() self.context = context.get_admin_context() self.context.project_id = 'fake' self.context.user_id = 'fake'
def test_get_iscsi_target(self): ctxt = context.get_admin_context() expected = 0 self.assertEqual( expected, self.target._get_iscsi_target(ctxt, self.testvol['id']))
def setUp(self): super(VolumeRpcAPITestCase, self).setUp() self.context = context.get_admin_context() vol = {} vol['host'] = 'fake_host' vol['availability_zone'] = CONF.storage_availability_zone vol['status'] = "available" vol['attach_status'] = fields.VolumeAttachStatus.DETACHED vol['metadata'] = {"test_key": "test_val"} vol['size'] = 1 volume = db.volume_create(self.context, vol) kwargs = { 'status': fields.SnapshotStatus.CREATING, 'progress': '0%', 'display_name': 'fake_name', 'display_description': 'fake_description' } snapshot = tests_utils.create_snapshot(self.context, vol['id'], **kwargs) source_group = tests_utils.create_consistencygroup( self.context, availability_zone=CONF.storage_availability_zone, volume_type='type1,type2', host='fakehost@fakedrv#fakepool') cgsnapshot = tests_utils.create_cgsnapshot( self.context, consistencygroup_id=source_group.id) cg = tests_utils.create_consistencygroup( self.context, availability_zone=CONF.storage_availability_zone, volume_type='type1,type2', host='fakehost@fakedrv#fakepool', cgsnapshot_id=cgsnapshot.id) cg2 = tests_utils.create_consistencygroup( self.context, availability_zone=CONF.storage_availability_zone, volume_type='type1,type2', host='fakehost@fakedrv#fakepool', source_cgid=source_group.id) generic_group = tests_utils.create_group( self.context, availability_zone=CONF.storage_availability_zone, group_type_id='group_type1', host='fakehost@fakedrv#fakepool') group_snapshot = tests_utils.create_group_snapshot( self.context, group_id=generic_group.id, group_type_id='group_type1') cg = objects.ConsistencyGroup.get_by_id(self.context, cg.id) cg2 = objects.ConsistencyGroup.get_by_id(self.context, cg2.id) cgsnapshot = objects.CGSnapshot.get_by_id(self.context, cgsnapshot.id) self.fake_volume = jsonutils.to_primitive(volume) self.fake_volume_obj = fake_volume.fake_volume_obj(self.context, **vol) self.fake_volume_metadata = volume["volume_metadata"] self.fake_snapshot = snapshot self.fake_reservations = ["RESERVATION"] self.fake_cg = cg self.fake_cg2 = cg2 self.fake_src_cg = source_group self.fake_cgsnap = cgsnapshot self.fake_backup_obj = fake_backup.fake_backup_obj(self.context) self.fake_group = generic_group self.fake_group_snapshot = group_snapshot self.addCleanup(self._cleanup)
def _get_cgsnapshot_attrib(cgsnapshot_id, attrib_name): return db.cgsnapshot_get(context.get_admin_context(), cgsnapshot_id)[attrib_name]
def test_list_cgsnapshots_detail_json(self): vol_type = utils.create_volume_type(context.get_admin_context(), self, name='my_vol_type') consistencygroup = utils.create_group(self.context, group_type_id=fake.GROUP_TYPE_ID, volume_type_ids=[vol_type['id']]) volume_id = utils.create_volume(self.context, volume_type_id=vol_type['id'], group_id=consistencygroup.id)['id'] cgsnapshot1 = utils.create_group_snapshot( self.context, group_id=consistencygroup.id, group_type_id=fake.GROUP_TYPE_ID, ) cgsnapshot2 = utils.create_group_snapshot( self.context, group_id=consistencygroup.id, group_type_id=fake.GROUP_TYPE_ID, ) cgsnapshot3 = utils.create_group_snapshot( self.context, group_id=consistencygroup.id, group_type_id=fake.GROUP_TYPE_ID, ) req = webob.Request.blank('/v2/%s/cgsnapshots/detail' % fake.PROJECT_ID) req.method = 'GET' req.headers['Content-Type'] = 'application/json' req.headers['Accept'] = 'application/json' res = req.get_response( fakes.wsgi_app(fake_auth_context=self.user_ctxt)) res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.OK, res.status_int) self.assertEqual('this is a test group snapshot', res_dict['cgsnapshots'][0]['description']) self.assertEqual('test_group_snapshot', res_dict['cgsnapshots'][0]['name']) self.assertEqual(cgsnapshot3.id, res_dict['cgsnapshots'][0]['id']) self.assertEqual('creating', res_dict['cgsnapshots'][0]['status']) self.assertEqual('this is a test group snapshot', res_dict['cgsnapshots'][1]['description']) self.assertEqual('test_group_snapshot', res_dict['cgsnapshots'][1]['name']) self.assertEqual(cgsnapshot2.id, res_dict['cgsnapshots'][1]['id']) self.assertEqual('creating', res_dict['cgsnapshots'][1]['status']) self.assertEqual('this is a test group snapshot', res_dict['cgsnapshots'][2]['description']) self.assertEqual('test_group_snapshot', res_dict['cgsnapshots'][2]['name']) self.assertEqual(cgsnapshot1.id, res_dict['cgsnapshots'][2]['id']) self.assertEqual('creating', res_dict['cgsnapshots'][2]['status']) cgsnapshot3.destroy() cgsnapshot2.destroy() cgsnapshot1.destroy() db.volume_destroy(context.get_admin_context(), volume_id) consistencygroup.destroy()
def setUp(self): super(RPCAPITestCase, self).setUp() self.context = context.get_admin_context() self.rpcapi = None self.base_version = '2.0'
def test_single_lun_get_target_and_lun(self): ctxt = context.get_admin_context() self.assertEqual((0, 1), self.target._get_target_and_lun(ctxt, self.testvol))
class GPFSDriverTestCase(test.TestCase): driver_name = "cinder.volume.drivers.gpfs.GPFSDriver" context = context.get_admin_context() def _execute_wrapper(self, cmd, *args, **kwargs): try: kwargs.pop('run_as_root') except KeyError: pass return utils.execute(cmd, *args, **kwargs) def setUp(self): super(GPFSDriverTestCase, self).setUp() self.volumes_path = tempfile.mkdtemp(prefix="gpfs_") self.images_dir = '%s/images' % self.volumes_path if not os.path.exists(self.volumes_path): os.mkdir(self.volumes_path) if not os.path.exists(self.images_dir): os.mkdir(self.images_dir) self.image_id = '70a599e0-31e7-49b7-b260-868f441e862b' self.driver = GPFSDriver(configuration=conf.Configuration(None)) self.driver.set_execute(self._execute_wrapper) self.flags(volume_driver=self.driver_name, gpfs_mount_point_base=self.volumes_path) self.volume = importutils.import_object(CONF.volume_manager) self.volume.driver.set_execute(self._execute_wrapper) self.volume.driver.set_initialized() self.stubs.Set(GPFSDriver, '_create_gpfs_snap', self._fake_gpfs_snap) self.stubs.Set(GPFSDriver, '_create_gpfs_copy', self._fake_gpfs_copy) self.stubs.Set(GPFSDriver, '_gpfs_redirect', self._fake_gpfs_redirect) self.stubs.Set(GPFSDriver, '_is_gpfs_parent_file', self._fake_is_gpfs_parent) self.stubs.Set(GPFSDriver, '_is_gpfs_path', self._fake_is_gpfs_path) self.stubs.Set(GPFSDriver, '_delete_gpfs_file', self._fake_delete_gpfs_file) self.stubs.Set(GPFSDriver, '_create_sparse_file', self._fake_create_sparse_file) self.stubs.Set(GPFSDriver, '_allocate_file_blocks', self._fake_allocate_file_blocks) self.stubs.Set(GPFSDriver, '_get_available_capacity', self._fake_get_available_capacity) self.stubs.Set(image_utils, 'qemu_img_info', self._fake_qemu_qcow2_image_info) self.stubs.Set(image_utils, 'convert_image', self._fake_convert_image) self.stubs.Set(image_utils, 'resize_image', self._fake_qemu_image_resize) self.context = context.get_admin_context() self.context.user_id = 'fake' self.context.project_id = 'fake' CONF.gpfs_images_dir = self.images_dir def tearDown(self): try: os.rmdir(self.images_dir) os.rmdir(self.volumes_path) except OSError: pass super(GPFSDriverTestCase, self).tearDown() def test_create_delete_volume_full_backing_file(self): """Create and delete vol with full creation method.""" CONF.gpfs_sparse_volumes = False vol = test_utils.create_volume(self.context, host=CONF.host) volume_id = vol['id'] self.assertTrue(os.path.exists(self.volumes_path)) self.volume.create_volume(self.context, volume_id) path = self.volumes_path + '/' + vol['name'] self.assertTrue(os.path.exists(path)) self.volume.delete_volume(self.context, volume_id) self.assertFalse(os.path.exists(path)) def test_create_delete_volume_sparse_backing_file(self): """Create and delete vol with default sparse creation method.""" CONF.gpfs_sparse_volumes = True vol = test_utils.create_volume(self.context, host=CONF.host) volume_id = vol['id'] self.assertTrue(os.path.exists(self.volumes_path)) self.volume.create_volume(self.context, volume_id) path = self.volumes_path + '/' + vol['name'] self.assertTrue(os.path.exists(path)) self.volume.delete_volume(self.context, volume_id) self.assertFalse(os.path.exists(path)) def test_create_volume_with_attributes(self): self.stubs.Set(GPFSDriver, '_gpfs_change_attributes', self._fake_gpfs_change_attributes) attributes = { 'dio': 'yes', 'data_pool_name': 'ssd_pool', 'replicas': '2', 'write_affinity_depth': '1', 'block_group_factor': '1', 'write_affinity_failure-group': '1,1,1:2;2,1,1:2;2,0,3:4' } vol = test_utils.create_volume(self.context, host=CONF.host, metadata=attributes) volume_id = vol['id'] self.assertTrue(os.path.exists(self.volumes_path)) self.volume.create_volume(self.context, volume_id) path = self.volumes_path + '/' + vol['name'] self.assertTrue(os.path.exists(path)) self.volume.delete_volume(self.context, volume_id) self.assertFalse(os.path.exists(path)) def test_migrate_volume(self): """Test volume migration done by driver.""" loc = 'GPFSDriver:cindertest:openstack' cap = {'location_info': loc} host = {'host': 'foo', 'capabilities': cap} volume = test_utils.create_volume(self.context, host=CONF.host) self.driver.create_volume(volume) self.driver.migrate_volume(self.context, volume, host) self.driver.delete_volume(volume) def _create_snapshot(self, volume_id, size='0'): """Create a snapshot object.""" snap = {} snap['volume_size'] = size snap['user_id'] = 'fake' snap['project_id'] = 'fake' snap['volume_id'] = volume_id snap['status'] = "creating" return db.snapshot_create(context.get_admin_context(), snap) def test_create_delete_snapshot(self): volume_src = test_utils.create_volume(self.context, host=CONF.host) self.volume.create_volume(self.context, volume_src['id']) snapCount = len( db.snapshot_get_all_for_volume(self.context, volume_src['id'])) self.assertEqual(snapCount, 0) snapshot = self._create_snapshot(volume_src['id']) snapshot_id = snapshot['id'] self.volume.create_snapshot(self.context, volume_src['id'], snapshot_id) self.assertTrue( os.path.exists(os.path.join(self.volumes_path, snapshot['name']))) snapCount = len( db.snapshot_get_all_for_volume(self.context, volume_src['id'])) self.assertEqual(snapCount, 1) self.volume.delete_snapshot(self.context, snapshot_id) self.volume.delete_volume(self.context, volume_src['id']) self.assertFalse( os.path.exists(os.path.join(self.volumes_path, snapshot['name']))) snapCount = len( db.snapshot_get_all_for_volume(self.context, volume_src['id'])) self.assertEqual(snapCount, 0) def test_create_volume_from_snapshot(self): volume_src = test_utils.create_volume(self.context, host=CONF.host) self.volume.create_volume(self.context, volume_src['id']) snapshot = self._create_snapshot(volume_src['id']) snapshot_id = snapshot['id'] self.volume.create_snapshot(self.context, volume_src['id'], snapshot_id) self.assertTrue( os.path.exists(os.path.join(self.volumes_path, snapshot['name']))) volume_dst = test_utils.create_volume(self.context, host=CONF.host, snapshot_id=snapshot_id) self.volume.create_volume(self.context, volume_dst['id'], snapshot_id) self.assertEqual( volume_dst['id'], db.volume_get(context.get_admin_context(), volume_dst['id']).id) self.assertEqual( snapshot_id, db.volume_get(context.get_admin_context(), volume_dst['id']).snapshot_id) self.volume.delete_volume(self.context, volume_dst['id']) self.volume.delete_snapshot(self.context, snapshot_id) self.volume.delete_volume(self.context, volume_src['id']) def test_create_cloned_volume(self): volume_src = test_utils.create_volume(self.context, host=CONF.host) self.volume.create_volume(self.context, volume_src['id']) volume_dst = test_utils.create_volume(self.context, host=CONF.host) volumepath = os.path.join(self.volumes_path, volume_dst['name']) self.assertFalse(os.path.exists(volumepath)) self.driver.create_cloned_volume(volume_dst, volume_src) self.assertEqual( volume_dst['id'], db.volume_get(context.get_admin_context(), volume_dst['id']).id) self.assertTrue(os.path.exists(volumepath)) self.volume.delete_volume(self.context, volume_src['id']) self.volume.delete_volume(self.context, volume_dst['id']) def test_create_volume_from_snapshot_method(self): volume_src = test_utils.create_volume(self.context, host=CONF.host) self.volume.create_volume(self.context, volume_src['id']) snapshot = self._create_snapshot(volume_src['id']) snapshot_id = snapshot['id'] self.volume.create_snapshot(self.context, volume_src['id'], snapshot_id) volume_dst = test_utils.create_volume(self.context, host=CONF.host) self.driver.create_volume_from_snapshot(volume_dst, snapshot) self.assertEqual( volume_dst['id'], db.volume_get(context.get_admin_context(), volume_dst['id']).id) volumepath = os.path.join(self.volumes_path, volume_dst['name']) self.assertTrue(os.path.exists(volumepath)) self.volume.delete_snapshot(self.context, snapshot_id) self.volume.delete_volume(self.context, volume_dst['id']) self.volume.delete_volume(self.context, volume_src['id']) def test_clone_image_to_volume_with_copy_on_write_mode(self): """Test the function of copy_image_to_volume focusing on the integretion of the image_util using copy_on_write image sharing mode. """ # specify image file format is raw self.stubs.Set(image_utils, 'qemu_img_info', self._fake_qemu_raw_image_info) volume = test_utils.create_volume(self.context, host=CONF.host) volumepath = os.path.join(self.volumes_path, volume['name']) CONF.gpfs_images_share_mode = 'copy_on_write' self.driver.clone_image(volume, None, self.image_id, {}) self.assertTrue(os.path.exists(volumepath)) self.volume.delete_volume(self.context, volume['id']) self.assertFalse(os.path.exists(volumepath)) def test_clone_image_to_volume_with_copy_mode(self): """Test the function of copy_image_to_volume focusing on the integretion of the image_util using copy image sharing mode. """ # specify image file format is raw self.stubs.Set(image_utils, 'qemu_img_info', self._fake_qemu_raw_image_info) volume = test_utils.create_volume(self.context, host=CONF.host) volumepath = os.path.join(self.volumes_path, volume['name']) CONF.gpfs_images_share_mode = 'copy' self.driver.clone_image(volume, None, self.image_id, {}) self.assertTrue(os.path.exists(volumepath)) self.volume.delete_volume(self.context, volume['id']) def test_copy_image_to_volume_with_non_gpfs_image_dir(self): """Test the function of copy_image_to_volume focusing on the integretion of the image_util using a non gpfs glance images directory """ # specify image file format is raw self.stubs.Set(image_utils, 'qemu_img_info', self._fake_qemu_raw_image_info) for share_mode in ['copy_on_write', 'copy']: volume = test_utils.create_volume(self.context, host=CONF.host) volumepath = os.path.join(self.volumes_path, volume['name']) CONF.gpfs_images_share_mode = share_mode CONF.gpfs_images_dir = None self.driver.copy_image_to_volume(self.context, volume, FakeImageService(), self.image_id) self.assertTrue(os.path.exists(volumepath)) self.volume.delete_volume(self.context, volume['id']) def test_copy_image_to_volume_with_illegal_image_format(self): """Test the function of copy_image_to_volume focusing on the integretion of the image_util using an illegal image file format """ # specify image file format is qcow2 self.stubs.Set(image_utils, 'qemu_img_info', self._fake_qemu_qcow2_image_info) volume = test_utils.create_volume(self.context, host=CONF.host) CONF.gpfs_images_share_mode = 'copy' CONF.gpfs_images_dir = self.images_dir self.assertRaises(exception.ImageUnacceptable, self.driver.copy_image_to_volume, self.context, volume, FakeImageService(), self.image_id) self.volume.delete_volume(self.context, volume['id']) def test_get_volume_stats(self): stats = self.driver.get_volume_stats() self.assertEqual(stats['volume_backend_name'], 'GPFS') self.assertEqual(stats['storage_protocol'], 'file') def test_extend_volume(self): new_vol_size = 15 mox = mox_lib.Mox() volume = test_utils.create_volume(self.context, host=CONF.host) volpath = os.path.join(self.volumes_path, volume['name']) qemu_img_info_output = """image: %s file format: raw virtual size: %sG (%s bytes) backing file: %s """ % (volume['name'], new_vol_size, new_vol_size * units.GiB, volpath) mox.StubOutWithMock(image_utils, 'resize_image') image_utils.resize_image(volpath, new_vol_size) mox.StubOutWithMock(image_utils, 'qemu_img_info') img_info = imageutils.QemuImgInfo(qemu_img_info_output) image_utils.qemu_img_info(volpath).AndReturn(img_info) mox.ReplayAll() self.driver.extend_volume(volume, new_vol_size) mox.VerifyAll() def test_extend_volume_with_failure(self): new_vol_size = 15 mox = mox_lib.Mox() volume = test_utils.create_volume(self.context, host=CONF.host) volpath = os.path.join(self.volumes_path, volume['name']) mox.StubOutWithMock(image_utils, 'resize_image') image_utils.resize_image(volpath, new_vol_size).AndRaise( processutils.ProcessExecutionError('error')) mox.ReplayAll() self.assertRaises(exception.VolumeBackendAPIException, self.driver.extend_volume, volume, new_vol_size) mox.VerifyAll() def test_check_for_setup_error_ok(self): self.stubs.Set(GPFSDriver, '_get_gpfs_state', self._fake_gpfs_get_state_active) self.stubs.Set(GPFSDriver, '_get_gpfs_cluster_release_level', self._fake_gpfs_compatible_cluster_release_level) self.stubs.Set(GPFSDriver, '_get_gpfs_filesystem_release_level', self._fake_gpfs_compatible_filesystem_release_level) self.driver.check_for_setup_error() def test_check_for_setup_error_gpfs_not_active(self): self.stubs.Set(GPFSDriver, '_get_gpfs_state', self._fake_gpfs_get_state_not_active) self.assertRaises(exception.VolumeBackendAPIException, self.driver.check_for_setup_error) def test_check_for_setup_error_not_gpfs_path(self): self.stubs.Set(GPFSDriver, '_get_gpfs_state', self._fake_gpfs_get_state_active) self.stubs.Set(GPFSDriver, '_is_gpfs_path', self._fake_is_not_gpfs_path) self.stubs.Set(GPFSDriver, '_get_gpfs_cluster_release_level', self._fake_gpfs_compatible_cluster_release_level) self.stubs.Set(GPFSDriver, '_get_gpfs_filesystem_release_level', self._fake_gpfs_compatible_filesystem_release_level) self.assertRaises(exception.VolumeBackendAPIException, self.driver.check_for_setup_error) def test_check_for_setup_error_incompatible_cluster_version(self): self.stubs.Set(GPFSDriver, '_get_gpfs_state', self._fake_gpfs_get_state_active) self.stubs.Set(GPFSDriver, '_get_gpfs_cluster_release_level', self._fake_gpfs_incompatible_cluster_release_level) self.assertRaises(exception.VolumeBackendAPIException, self.driver.check_for_setup_error) def test_check_for_setup_error_incompatible_filesystem_version(self): self.stubs.Set(GPFSDriver, '_get_gpfs_state', self._fake_gpfs_get_state_active) self.stubs.Set(GPFSDriver, '_get_gpfs_cluster_release_level', self._fake_gpfs_compatible_cluster_release_level) self.stubs.Set(GPFSDriver, '_get_gpfs_filesystem_release_level', self._fake_gpfs_incompatible_filesystem_release_level) self.assertRaises(exception.VolumeBackendAPIException, self.driver.check_for_setup_error) def _fake_create_file(self, path, modebits='666'): open(path, 'w').close() utils.execute('chmod', modebits, path) def _fake_gpfs_snap(self, src, dest=None, modebits='644'): if dest is None: dest = src self._fake_create_file(dest, '644') def _fake_gpfs_copy(self, src, dest): self._fake_create_file(dest) def _fake_create_sparse_file(self, path, size): self._fake_create_file(path) def _fake_allocate_file_blocks(self, path, size): self._fake_create_file(path) def _fake_gpfs_change_attributes(self, options, path): pass def _fake_gpfs_redirect(self, src): return True def _fake_is_gpfs_parent(self, gpfs_file): return False def _fake_get_available_capacity(self, path): fake_avail = 80 * units.GiB fake_size = 2 * fake_avail return fake_avail, fake_size def _fake_gpfs_get_state_active(self): active_txt = ('mmgetstate::HEADER:version:reserved:reserved:' 'nodeName:nodeNumber:state:quorum:nodesUp:totalNodes:' 'remarks:cnfsState:\n' 'mmgetstate::0:1:::hostname:1:active:1:1:' '1:quorum node:(undefined):') return active_txt def _fake_gpfs_get_state_not_active(self): inactive_txt = ('mmgetstate::HEADER:version:reserved:reserved:' 'nodeName:nodeNumber:state:quorum:nodesUp:totalNodes:' 'remarks:cnfsState:\n' 'mmgetstate::0:1:::hostname:1:down:1:1:' '1:quorum node:(undefined):') return inactive_txt def _fake_gpfs_compatible_cluster_release_level(self): release = 1400 return release def _fake_gpfs_incompatible_cluster_release_level(self): release = 1105 return release def _fake_gpfs_compatible_filesystem_release_level(self, path=None): release = 1400 fs = '/dev/gpfs' return fs, release def _fake_gpfs_incompatible_filesystem_release_level(self, path=None): release = 1105 fs = '/dev/gpfs' return fs, release def _fake_is_gpfs_path(self, path): pass def _fake_is_not_gpfs_path(self, path): raise (processutils.ProcessExecutionError('invalid gpfs path')) def _fake_convert_image(self, source, dest, out_format): utils.execute('cp', source, dest) def _fake_qemu_qcow2_image_info(self, path): data = FakeQemuImgInfo() data.file_format = 'qcow2' data.backing_file = None data.virtual_size = 1 * units.GiB return data def _fake_qemu_raw_image_info(self, path): data = FakeQemuImgInfo() data.file_format = 'raw' data.backing_file = None data.virtual_size = 1 * units.GiB return data def _fake_qemu_image_resize(self, path, size): LOG.info('wtf') pass def _fake_delete_gpfs_file(self, fchild): volume_path = fchild vol_name = os.path.basename(fchild) vol_id = vol_name.split('volume-').pop() utils.execute('rm', '-f', volume_path) utils.execute('rm', '-f', volume_path + '.snap') all_snaps = db.snapshot_get_all_for_volume(self.context, vol_id) for snap in all_snaps: snap_path = self.volumes_path + '/' + snap['name'] utils.execute('rm', '-f', snap_path)