def test_transfer_get_all(self): volume_id1 = utils.create_volume(self.ctxt)['id'] volume_id2 = utils.create_volume(self.ctxt)['id'] self._create_transfer(volume_id1) self._create_transfer(volume_id2) self.assertRaises(exception.NotAuthorized, db.transfer_get_all, self.ctxt) xfer = db.transfer_get_all(context.get_admin_context()) self.assertEquals(len(xfer), 2, "Unexpected number of transfer records") xfer = db.transfer_get_all_by_project(self.ctxt, self.ctxt.project_id) self.assertEquals(len(xfer), 2, "Unexpected number of transfer records") nctxt = context.RequestContext(user_id='new_user_id', project_id='new_project_id') self.assertRaises(exception.NotAuthorized, db.transfer_get_all_by_project, nctxt, self.ctxt.project_id) xfer = db.transfer_get_all_by_project(nctxt.elevated(), self.ctxt.project_id) self.assertEquals(len(xfer), 2, "Unexpected number of transfer records")
def test_finish_volume_migration(self): ctxt = context.RequestContext(user_id='user_id', project_id='project_id', is_admin=True) src_volume = testutils.create_volume(ctxt, host='src', migration_status='migrating', status='available') dest_volume = testutils.create_volume(ctxt, host='dest', migration_status='target:fake', status='available') db.finish_volume_migration(ctxt, src_volume['id'], dest_volume['id']) # Check that we have copied destination volume DB data into source DB # entry so we can keep the id src_volume = objects.Volume.get_by_id(ctxt, src_volume['id']) self.assertEqual('dest', src_volume.host) self.assertEqual('available', src_volume.status) self.assertIsNone(src_volume.migration_status) # Check that we have copied source volume DB data into destination DB # entry and we are setting it to deleting dest_volume = objects.Volume.get_by_id(ctxt, dest_volume['id']) self.assertEqual('src', dest_volume.host) self.assertEqual('deleting', dest_volume.status) self.assertEqual('deleting', dest_volume.migration_status)
def test_create_cgsnapshot_json(self): cgsnapshot_id = "1" consistencygroup_id = utils.create_consistencygroup(self.context)['id'] utils.create_volume( self.context, consistencygroup_id=consistencygroup_id)['id'] body = {"cgsnapshot": {"name": "cg1", "description": "CG Snapshot 1", "consistencygroup_id": consistencygroup_id}} req = webob.Request.blank('/v2/fake/cgsnapshots') req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = json.dumps(body) res = req.get_response(fakes.wsgi_app()) res_dict = json.loads(res.body) LOG.info(res_dict) self.assertEqual(res.status_int, 202) self.assertIn('id', res_dict['cgsnapshot']) db.cgsnapshot_destroy(context.get_admin_context(), cgsnapshot_id)
def test_transfer_destroy(self): volume_id = utils.create_volume(self.ctxt)['id'] volume_id2 = utils.create_volume(self.ctxt)['id'] xfer_id1 = self._create_transfer(volume_id) xfer_id2 = self._create_transfer(volume_id2) xfer = db.transfer_get_all(context.get_admin_context()) self.assertEquals(len(xfer), 2, "Unexpected number of transfer records") self.assertFalse(xfer[0]['deleted'], "Deleted flag is set") db.transfer_destroy(self.ctxt, xfer_id1) xfer = db.transfer_get_all(context.get_admin_context()) self.assertEquals(len(xfer), 1, "Unexpected number of transfer records") self.assertEquals(xfer[0]['id'], xfer_id2, "Unexpected value for Transfer id") nctxt = context.RequestContext(user_id='new_user_id', project_id='new_project_id') self.assertRaises(exception.TransferNotFound, db.transfer_destroy, nctxt, xfer_id2) db.transfer_destroy(nctxt.elevated(), xfer_id2) xfer = db.transfer_get_all(context.get_admin_context()) self.assertEquals(len(xfer), 0, "Unexpected number of transfer records")
def test_transfer_invalid_volume(self): tx_api = transfer_api.API() utils.create_volume(self.ctxt, id='1', status='in-use', updated_at=self.updated_at) self.assertRaises(exception.InvalidVolume, tx_api.create, self.ctxt, '1', 'Description') volume = db.volume_get(self.ctxt, '1') self.assertEqual('in-use', volume['status'], 'Unexpected state')
def test_transfer_volume_create_delete(self): tx_api = transfer_api.API() utils.create_volume(self.ctxt, id='1', updated_at=self.updated_at) response = tx_api.create(self.ctxt, '1', 'Description') volume = db.volume_get(self.ctxt, '1') self.assertEqual('awaiting-transfer', volume['status'], 'Unexpected state') tx_api.delete(self.ctxt, response['id']) volume = db.volume_get(self.ctxt, '1') self.assertEqual('available', volume['status'], 'Unexpected state')
def test_create_volume_from_snapshot_method(self): volume_src = test_utils.create_volume(self.context, host=CONF.host) snapshot = self._create_snapshot(volume_src['id']) snapshot_id = snapshot['id'] volume_dst = test_utils.create_volume(self.context, host=CONF.host) self.driver.create_volume_from_snapshot(volume_dst, snapshot) self.assertEqual(volume_dst['id'], db.volume_get( context.get_admin_context(), volume_dst['id']).id) volumepath = os.path.join(self.volumes_path, volume_dst['name']) self.assertTrue(os.path.exists(volumepath)) self.driver.delete_volume(volume_dst)
def test_same_filter_vol_list_pass(self): filt_cls = self.class_map['SameBackendFilter']() host = fakes.FakeHostState('host1', {}) volume1 = utils.create_volume(self.context, host='host1') vol_id1 = volume1.id volume2 = utils.create_volume(self.context, host='host2') vol_id2 = volume2.id filter_properties = {'context': self.context.elevated(), 'scheduler_hints': { 'same_host': [vol_id1, vol_id2], }} self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_finish_volume_migration(self): ctxt = context.RequestContext(user_id="user_id", project_id="project_id", is_admin=True) src_volume = testutils.create_volume(ctxt, host="src", migration_status="migrating", status="available") dest_volume = testutils.create_volume(ctxt, host="dest", migration_status="target:fake", status="available") db.finish_volume_migration(ctxt, src_volume["id"], dest_volume["id"]) src_volume = db.volume_get(ctxt, src_volume["id"]) expected_name = "volume-%s" % dest_volume["id"] self.assertEqual(src_volume["_name_id"], dest_volume["id"]) self.assertEqual(src_volume["name"], expected_name) self.assertEqual(src_volume["host"], "dest") self.assertEqual(src_volume["status"], "available") self.assertIsNone(src_volume["migration_status"])
def test_different_filter_handles_multiple_uuids(self): filt_cls = self.class_map['DifferentBackendFilter']() host = fakes.FakeHostState('host1#pool0', {}) volume1 = utils.create_volume(self.context, host='host1:pool1') vol_id1 = volume1.id volume2 = utils.create_volume(self.context, host='host1:pool3') vol_id2 = volume2.id filter_properties = {'context': self.context.elevated(), 'scheduler_hints': { 'different_host': [vol_id1, vol_id2], }} self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_update_consistencygroup_success(self): volume_type_id = '123456' ctxt = context.RequestContext('fake', 'fake') consistencygroup_id = self._create_consistencygroup(status='available', host='test_host') remove_volume_id = utils.create_volume( ctxt, volume_type_id=volume_type_id, consistencygroup_id=consistencygroup_id)['id'] remove_volume_id2 = utils.create_volume( ctxt, volume_type_id=volume_type_id, consistencygroup_id=consistencygroup_id)['id'] self.assertEqual('available', self._get_consistencygroup_attrib(consistencygroup_id, 'status')) cg_volumes = db.volume_get_all_by_group(ctxt.elevated(), consistencygroup_id) cg_vol_ids = [cg_vol['id'] for cg_vol in cg_volumes] self.assertIn(remove_volume_id, cg_vol_ids) self.assertIn(remove_volume_id2, cg_vol_ids) add_volume_id = utils.create_volume( ctxt, volume_type_id=volume_type_id)['id'] add_volume_id2 = utils.create_volume( ctxt, volume_type_id=volume_type_id)['id'] req = webob.Request.blank('/v2/fake/consistencygroups/%s/update' % consistencygroup_id) req.method = 'PUT' req.headers['Content-Type'] = 'application/json' name = 'newcg' description = 'New Consistency Group Description' add_volumes = add_volume_id + "," + add_volume_id2 remove_volumes = remove_volume_id + "," + remove_volume_id2 body = {"consistencygroup": {"name": name, "description": description, "add_volumes": add_volumes, "remove_volumes": remove_volumes, }} req.body = json.dumps(body) res = req.get_response(fakes.wsgi_app()) self.assertEqual(202, res.status_int) self.assertEqual('updating', self._get_consistencygroup_attrib(consistencygroup_id, 'status')) db.consistencygroup_destroy(ctxt.elevated(), consistencygroup_id)
def test_migrate_volume_local(self): """Verify volume migration performed locally by driver.""" ctxt = self.context migrated_by_driver = True volume = test_utils.create_volume(ctxt, host=CONF.host) with mock.patch('cinder.utils.execute'): LOG.debug('Migrate same cluster, different path, ' 'move file to new path.') loc = 'GPFSDriver:%s:testpath' % self.driver._cluster_id cap = {'location_info': loc} host = {'host': 'foo', 'capabilities': cap} self.driver.create_volume(volume) migr, updt = self.driver.migrate_volume(ctxt, volume, host) self.assertEqual(migr, migrated_by_driver) self.driver.delete_volume(volume) LOG.debug('Migrate same cluster, different path, ' 'move file to new path, rv = %s.' % migr) LOG.debug('Migrate same cluster, same path, no action taken.') gpfs_base = self.driver.configuration.gpfs_mount_point_base loc = 'GPFSDriver:%s:%s' % (self.driver._cluster_id, gpfs_base) cap = {'location_info': loc} host = {'host': 'foo', 'capabilities': cap} self.driver.create_volume(volume) migr, updt = self.driver.migrate_volume(ctxt, volume, host) self.assertEqual(migr, migrated_by_driver) self.driver.delete_volume(volume) LOG.debug('Migrate same cluster, same path, no action taken, ' 'rv = %s' % migr)
def test_promote_volume_not_replicated_xml(self): volume = tests_utils.create_volume( self.ctxt, **self.volume_params) (req, res) = self._get_resp('promote', volume['id'], xml=True) msg = ("request: %s\nresult: %s" % (req, res)) self.assertEqual(res.status_int, 400, msg)
def test_restore_backup_to_undersized_volume(self): backup_size = 10 backup_id = self._create_backup(status='available', size=backup_size) # need to create the volume referenced below first volume_size = 5 volume_id = utils.create_volume(self.context, size=volume_size)['id'] body = {"restore": {"volume_id": volume_id, }} req = webob.Request.blank('/v2/fake/backups/%s/restore' % backup_id) req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = json.dumps(body) res = req.get_response(fakes.wsgi_app()) res_dict = json.loads(res.body) self.assertEqual(res.status_int, 400) self.assertEqual(res_dict['badRequest']['code'], 400) self.assertEqual(res_dict['badRequest']['message'], 'Invalid volume: volume size %d is too ' 'small to restore backup of size %d.' % (volume_size, backup_size)) db.volume_destroy(context.get_admin_context(), volume_id) db.backup_destroy(context.get_admin_context(), backup_id)
def test_show_backup(self): volume_id = utils.create_volume(self.context, size=5, status='creating')['id'] backup_id = self._create_backup(volume_id) LOG.debug('Created backup with id %s' % backup_id) req = webob.Request.blank('/v2/fake/backups/%s' % backup_id) req.method = 'GET' req.headers['Content-Type'] = 'application/json' res = req.get_response(fakes.wsgi_app()) res_dict = json.loads(res.body) self.assertEqual(res.status_int, 200) self.assertEqual(res_dict['backup']['availability_zone'], 'az1') self.assertEqual(res_dict['backup']['container'], 'volumebackups') self.assertEqual(res_dict['backup']['description'], 'this is a test backup') self.assertEqual(res_dict['backup']['name'], 'test_backup') self.assertEqual(res_dict['backup']['id'], backup_id) self.assertEqual(res_dict['backup']['object_count'], 0) self.assertEqual(res_dict['backup']['size'], 0) self.assertEqual(res_dict['backup']['status'], 'creating') self.assertEqual(res_dict['backup']['volume_id'], volume_id) db.backup_destroy(context.get_admin_context(), backup_id) db.volume_destroy(context.get_admin_context(), volume_id)
def test_restore_backup_with_InvalidInput(self): def fake_backup_api_restore_throwing_InvalidInput(cls, context, backup_id, volume_id): msg = _("Invalid input") raise exception.InvalidInput(reason=msg) self.stubs.Set(cinder.backup.API, 'restore', fake_backup_api_restore_throwing_InvalidInput) backup_id = self._create_backup(status='available') # need to create the volume referenced below first volume_id = utils.create_volume(self.context, size=0)['id'] body = {"restore": {"volume_id": volume_id, }} req = webob.Request.blank('/v2/fake/backups/%s/restore' % backup_id) req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = json.dumps(body) res = req.get_response(fakes.wsgi_app()) res_dict = json.loads(res.body) self.assertEqual(res.status_int, 400) self.assertEqual(res_dict['badRequest']['code'], 400) self.assertEqual(res_dict['badRequest']['message'], 'Invalid input received: Invalid input')
def test_create_backup_json(self): self.stubs.Set(cinder.db, 'service_get_all_by_topic', self._stub_service_get_all_by_topic) volume_id = utils.create_volume(self.context, size=5)['id'] body = {"backup": {"display_name": "nightly001", "display_description": "Nightly Backup 03-Sep-2012", "volume_id": volume_id, "container": "nightlybackups", } } req = webob.Request.blank('/v2/fake/backups') req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = json.dumps(body) res = req.get_response(fakes.wsgi_app()) res_dict = json.loads(res.body) LOG.info(res_dict) self.assertEqual(res.status_int, 202) self.assertIn('id', res_dict['backup']) db.volume_destroy(context.get_admin_context(), volume_id)
def test_different_filter_handles_multiple_uuids(self): filt_cls = self.class_map['DifferentBackendFilter']() host = fakes.FakeHostState('host1#pool0', {}) volume1 = utils.create_volume(self.context, host='host1:pool1') vol_id1 = volume1.id volume2 = utils.create_volume(self.context, host='host1:pool3') vol_id2 = volume2.id filter_properties = { 'context': self.context.elevated(), 'scheduler_hints': { 'different_host': [vol_id1, vol_id2], } } self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_list_cgsnapshots_detail_json(self): consistencygroup_id = utils.create_consistencygroup(self.context)["id"] volume_id = utils.create_volume(self.context, consistencygroup_id=consistencygroup_id)["id"] cgsnapshot_id1 = self._create_cgsnapshot(consistencygroup_id=consistencygroup_id) cgsnapshot_id2 = self._create_cgsnapshot(consistencygroup_id=consistencygroup_id) cgsnapshot_id3 = self._create_cgsnapshot(consistencygroup_id=consistencygroup_id) req = webob.Request.blank("/v2/fake/cgsnapshots/detail") req.method = "GET" req.headers["Content-Type"] = "application/json" req.headers["Accept"] = "application/json" res = req.get_response(fakes.wsgi_app()) res_dict = json.loads(res.body) self.assertEqual(res.status_int, 200) self.assertEqual(res_dict["cgsnapshots"][0]["description"], "this is a test cgsnapshot") self.assertEqual(res_dict["cgsnapshots"][0]["name"], "test_cgsnapshot") self.assertEqual(res_dict["cgsnapshots"][0]["id"], cgsnapshot_id1) self.assertEqual(res_dict["cgsnapshots"][0]["status"], "creating") self.assertEqual(res_dict["cgsnapshots"][1]["description"], "this is a test cgsnapshot") self.assertEqual(res_dict["cgsnapshots"][1]["name"], "test_cgsnapshot") self.assertEqual(res_dict["cgsnapshots"][1]["id"], cgsnapshot_id2) self.assertEqual(res_dict["cgsnapshots"][1]["status"], "creating") self.assertEqual(res_dict["cgsnapshots"][2]["description"], "this is a test cgsnapshot") self.assertEqual(res_dict["cgsnapshots"][2]["name"], "test_cgsnapshot") self.assertEqual(res_dict["cgsnapshots"][2]["id"], cgsnapshot_id3) self.assertEqual(res_dict["cgsnapshots"][2]["status"], "creating") db.cgsnapshot_destroy(context.get_admin_context(), cgsnapshot_id3) db.cgsnapshot_destroy(context.get_admin_context(), cgsnapshot_id2) db.cgsnapshot_destroy(context.get_admin_context(), cgsnapshot_id1) db.volume_destroy(context.get_admin_context(), volume_id) db.consistencygroup_destroy(context.get_admin_context(), consistencygroup_id)
def test_restore_backup_with_VolumeLimitExceeded(self): def fake_backup_api_restore_throwing_VolumeLimitExceeded(cls, context, backup_id, volume_id): raise exception.VolumeLimitExceeded(allowed=1) self.stubs.Set(cinder.backup.API, 'restore', fake_backup_api_restore_throwing_VolumeLimitExceeded) backup_id = self._create_backup(status='available') # need to create the volume referenced below first volume_id = utils.create_volume(self.context, size=5)['id'] body = {"restore": {"volume_id": volume_id, }} req = webob.Request.blank('/v2/fake/backups/%s/restore' % backup_id) req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = json.dumps(body) res = req.get_response(fakes.wsgi_app()) res_dict = json.loads(res.body) self.assertEqual(res.status_int, 413) self.assertEqual(res_dict['overLimit']['code'], 413) self.assertEqual(res_dict['overLimit']['message'], 'Maximum number of volumes allowed (1) exceeded')
def test_create_backup_WithOUT_enabled_backup_service(self): # need an enabled backup service available def stub_empty_service_get_all_by_topic(ctxt, topic): return [] self.stubs.Set(cinder.db, 'service_get_all_by_topic', stub_empty_service_get_all_by_topic) volume_id = utils.create_volume(self.context, size=2)['id'] req = webob.Request.blank('/v2/fake/backups') body = {"backup": {"display_name": "nightly001", "display_description": "Nightly Backup 03-Sep-2012", "volume_id": volume_id, "container": "nightlybackups", } } req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.headers['Accept'] = 'application/json' req.body = json.dumps(body) res = req.get_response(fakes.wsgi_app()) res_dict = json.loads(res.body) self.assertEqual(res.status_int, 500) self.assertEqual(res_dict['computeFault']['code'], 500) self.assertEqual(res_dict['computeFault']['message'], 'Service cinder-backup could not be found.') volume = self.volume_api.get(context.get_admin_context(), volume_id) self.assertEqual(volume['status'], 'available')
def test_finish_volume_migration(self): ctxt = context.RequestContext(user_id='user_id', project_id='project_id', is_admin=True) src_volume = testutils.create_volume(ctxt, host='src', status='migrating') dest_volume = testutils.create_volume(ctxt, host='dest', status='migration_target') db.finish_volume_migration(ctxt, src_volume['id'], dest_volume['id']) self.assertRaises(exception.VolumeNotFound, db.volume_get, ctxt, dest_volume['id']) src_volume = db.volume_get(ctxt, src_volume['id']) self.assertEqual(src_volume['host'], 'dest') self.assertEqual(src_volume['status'], 'migrating')
def test_retype_volume_different_pool_and_host(self): ctxt = self.context loc = 'GPFSDriver:%s:testpath' % self.driver._cluster_id cap = {'location_info': loc} host = {'host': 'foo', 'capabilities': cap} key_specs_old = {'capabilities:storage_pool': 'bronze', 'volume_backend_name': 'backend1'} key_specs_new = {'capabilities:storage_pool': 'gold', 'volume_backend_name': 'backend1'} old_type_ref = volume_types.create(ctxt, 'old', key_specs_old) new_type_ref = volume_types.create(ctxt, 'new', key_specs_new) old_type = volume_types.get_volume_type(ctxt, old_type_ref['id']) new_type = volume_types.get_volume_type(ctxt, new_type_ref['id']) diff, equal = volume_types.volume_types_diff(ctxt, old_type_ref['id'], new_type_ref['id']) # set volume host to be different from target host volume = test_utils.create_volume(ctxt, host=CONF.host) volume['volume_type_id'] = old_type['id'] with mock.patch('cinder.utils.execute'): # different host different pool LOG.debug('Retype different pools and hosts, expected rv = True.') self.driver.db = mock.Mock() self.driver.create_volume(volume) rv = self.driver.retype(ctxt, volume, new_type, diff, host) self.assertTrue(rv) self.driver.delete_volume(volume) LOG.debug('Retype different pools and hosts, rv = %s.' % rv)
def test_same_filter_vol_list_pass(self): filt_cls = self.class_map['SameBackendFilter']() host = fakes.FakeHostState('host1', {}) volume1 = utils.create_volume(self.context, host='host1') vol_id1 = volume1.id volume2 = utils.create_volume(self.context, host='host2') vol_id2 = volume2.id filter_properties = { 'context': self.context.elevated(), 'scheduler_hints': { 'same_host': [vol_id1, vol_id2], } } self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_retype_volume_different_backend(self): ctxt = self.context loc = 'GPFSDriver:%s:testpath' % self.driver._cluster_id cap = {'location_info': loc} host = {'host': 'foo', 'capabilities': cap} key_specs_old = {'capabilities:storage_pool': 'bronze', 'volume_backend_name': 'backend1'} key_specs_new = {'capabilities:storage_pool': 'gold', 'volume_backend_name': 'backend2'} old_type_ref = volume_types.create(ctxt, 'old', key_specs_old) new_type_ref = volume_types.create(ctxt, 'new', key_specs_new) old_type = volume_types.get_volume_type(ctxt, old_type_ref['id']) new_type = volume_types.get_volume_type(ctxt, new_type_ref['id']) diff, equal = volume_types.volume_types_diff(ctxt, old_type_ref['id'], new_type_ref['id']) # set volume host to match target host volume = test_utils.create_volume(ctxt, host=host['host']) volume['volume_type_id'] = old_type['id'] with mock.patch('cinder.utils.execute'): LOG.debug('Retype different backends, cannot migrate. ' 'Expected rv = False.') self.driver.create_volume(volume) rv = self.driver.retype(ctxt, volume, old_type, diff, host) self.assertFalse(rv) self.driver.delete_volume(volume) LOG.debug('Retype different backends, cannot migrate, ' 'rv = %s.' % rv)
def test_restore_backup_with_InvalidVolume(self): backup_id = self._create_backup(status='available') # need to create the volume referenced below first volume_id = utils.create_volume(self.context, size=5, status='attaching')['id'] body = { "restore": { "volume_id": volume_id, } } req = webob.Request.blank('/v2/fake/backups/%s/restore' % backup_id) req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = json.dumps(body) res = req.get_response(fakes.wsgi_app()) res_dict = json.loads(res.body) self.assertEqual(res.status_int, 400) self.assertEqual(res_dict['badRequest']['code'], 400) self.assertEqual( res_dict['badRequest']['message'], 'Invalid volume: Volume to be restored to must ' 'be available') db.volume_destroy(context.get_admin_context(), volume_id) db.backup_destroy(context.get_admin_context(), backup_id)
def test_create_cloned_volume(self): volume_src = test_utils.create_volume(self.context, host=CONF.host) self.driver.create_volume(volume_src) volume_dst = test_utils.create_volume(self.context, host=CONF.host) volumepath = os.path.join(self.volumes_path, volume_dst['name']) self.assertFalse(os.path.exists(volumepath)) self.driver.create_cloned_volume(volume_dst, volume_src) self.assertEqual(volume_dst['id'], db.volume_get( context.get_admin_context(), volume_dst['id']).id) self.assertTrue(os.path.exists(volumepath)) self.driver.delete_volume(volume_src) self.driver.delete_volume(volume_dst)
def test_restore_backup_with_VolumeSizeExceedsAvailableQuota(self): def fake_backup_api_restore_throwing_VolumeSizeExceedsAvailableQuota( cls, context, backup_id, volume_id): raise exception.VolumeSizeExceedsAvailableQuota(requested='2', consumed='2', quota='3') self.stubs.Set( cinder.backup.API, 'restore', fake_backup_api_restore_throwing_VolumeSizeExceedsAvailableQuota) backup_id = self._create_backup(status='available') # need to create the volume referenced below first volume_id = utils.create_volume(self.context, size=5)['id'] body = {"restore": {"volume_id": volume_id, }} req = webob.Request.blank('/v2/fake/backups/%s/restore' % backup_id) req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = json.dumps(body) res = req.get_response(fakes.wsgi_app()) res_dict = json.loads(res.body) self.assertEqual(res.status_int, 413) self.assertEqual(res_dict['overLimit']['code'], 413) self.assertEqual(res_dict['overLimit']['message'], 'Requested volume or snapshot exceeds allowed ' 'Gigabytes quota. Requested 2G, quota is 3G and ' '2G has been consumed.')
def test_list_cgsnapshots_xml(self): consistencygroup_id = utils.create_consistencygroup(self.context)['id'] volume_id = utils.create_volume( self.context, consistencygroup_id=consistencygroup_id)['id'] cgsnapshot_id1 = self._create_cgsnapshot( consistencygroup_id=consistencygroup_id) cgsnapshot_id2 = self._create_cgsnapshot( consistencygroup_id=consistencygroup_id) cgsnapshot_id3 = self._create_cgsnapshot( consistencygroup_id=consistencygroup_id) req = webob.Request.blank('/v2/fake/cgsnapshots') req.method = 'GET' req.headers['Content-Type'] = 'application/xml' req.headers['Accept'] = 'application/xml' res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 200) dom = minidom.parseString(res.body) cgsnapshot_list = dom.getElementsByTagName('cgsnapshot') self.assertEqual( cgsnapshot_list.item(0).getAttribute('id'), cgsnapshot_id1) self.assertEqual( cgsnapshot_list.item(1).getAttribute('id'), cgsnapshot_id2) self.assertEqual( cgsnapshot_list.item(2).getAttribute('id'), cgsnapshot_id3) db.cgsnapshot_destroy(context.get_admin_context(), cgsnapshot_id3) db.cgsnapshot_destroy(context.get_admin_context(), cgsnapshot_id2) db.cgsnapshot_destroy(context.get_admin_context(), cgsnapshot_id1) db.volume_destroy(context.get_admin_context(), volume_id) db.consistencygroup_destroy(context.get_admin_context(), consistencygroup_id)
def test_name_id_diff(self): """Change name ID to mimic volume after migration.""" vol_ref = testutils.create_volume(self.ctxt, size=1) db.volume_update(self.ctxt, vol_ref['id'], {'name_id': 'fake'}) vol_ref = db.volume_get(self.ctxt, vol_ref['id']) expected_name = CONF.volume_name_template % 'fake' self.assertEqual(vol_ref['name'], expected_name)
def test_retype_volume_different_pool(self): ctxt = self.context loc = 'GPFSDriver:%s:testpath' % self.driver._cluster_id cap = {'location_info': loc} host = {'host': 'foo', 'capabilities': cap} key_specs_old = { 'capabilities:storage_pool': 'bronze', 'volume_backend_name': 'backend1' } key_specs_new = { 'capabilities:storage_pool': 'gold', 'volume_backend_name': 'backend1' } old_type_ref = volume_types.create(ctxt, 'old', key_specs_old) new_type_ref = volume_types.create(ctxt, 'new', key_specs_new) old_type = volume_types.get_volume_type(ctxt, old_type_ref['id']) new_type = volume_types.get_volume_type(ctxt, new_type_ref['id']) diff, equal = volume_types.volume_types_diff(ctxt, old_type_ref['id'], new_type_ref['id']) # set volume host to match target host volume = test_utils.create_volume(ctxt, host=host['host']) volume['volume_type_id'] = old_type['id'] with mock.patch('cinder.utils.execute'): LOG.debug('Retype different pools, expected rv = True.') self.driver.create_volume(volume) rv = self.driver.retype(ctxt, volume, new_type, diff, host) self.assertTrue(rv) self.driver.delete_volume(volume) LOG.debug('Retype different pools, rv = %s.' % rv)
def test_name_id_snapshot_volume_name(self): """Make sure snapshot['volume_name'] is updated.""" vol_ref = testutils.create_volume(self.ctxt, size=1) db.volume_update(self.ctxt, vol_ref['id'], {'name_id': 'fake'}) snap_ref = testutils.create_snapshot(self.ctxt, vol_ref['id']) expected_name = CONF.volume_name_template % 'fake' self.assertEqual(snap_ref['volume_name'], expected_name)
def test_list_cgsnapshots_json(self): consistencygroup_id = utils.create_consistencygroup(self.context)['id'] volume_id = utils.create_volume( self.context, consistencygroup_id=consistencygroup_id)['id'] cgsnapshot_id1 = self._create_cgsnapshot( consistencygroup_id=consistencygroup_id) cgsnapshot_id2 = self._create_cgsnapshot( consistencygroup_id=consistencygroup_id) cgsnapshot_id3 = self._create_cgsnapshot( consistencygroup_id=consistencygroup_id) req = webob.Request.blank('/v2/fake/cgsnapshots') req.method = 'GET' req.headers['Content-Type'] = 'application/json' res = req.get_response(fakes.wsgi_app()) res_dict = json.loads(res.body) self.assertEqual(res.status_int, 200) self.assertEqual(res_dict['cgsnapshots'][0]['id'], cgsnapshot_id1) self.assertEqual(res_dict['cgsnapshots'][0]['name'], 'test_cgsnapshot') self.assertEqual(res_dict['cgsnapshots'][1]['id'], cgsnapshot_id2) self.assertEqual(res_dict['cgsnapshots'][1]['name'], 'test_cgsnapshot') self.assertEqual(res_dict['cgsnapshots'][2]['id'], cgsnapshot_id3) self.assertEqual(res_dict['cgsnapshots'][2]['name'], 'test_cgsnapshot') db.cgsnapshot_destroy(context.get_admin_context(), cgsnapshot_id3) db.cgsnapshot_destroy(context.get_admin_context(), cgsnapshot_id2) db.cgsnapshot_destroy(context.get_admin_context(), cgsnapshot_id1) db.volume_destroy(context.get_admin_context(), volume_id) db.consistencygroup_destroy(context.get_admin_context(), consistencygroup_id)
def test_transfer_accept(self): tx_api = transfer_api.API() volume = utils.create_volume(self.ctxt, id='1', updated_at=self.updated_at) transfer = tx_api.create(self.ctxt, '1', 'Description') volume = db.volume_get(self.ctxt, '1') self.assertEqual('awaiting-transfer', volume['status'], 'Unexpected state') self.assertRaises(exception.TransferNotFound, tx_api.accept, self.ctxt, '2', transfer['auth_key']) self.assertRaises(exception.InvalidAuthKey, tx_api.accept, self.ctxt, transfer['id'], 'wrong') db.volume_update(self.ctxt, '1', {'status': 'wrong'}) self.assertRaises(exception.InvalidVolume, tx_api.accept, self.ctxt, transfer['id'], transfer['auth_key']) db.volume_update(self.ctxt, '1', {'status': 'awaiting-transfer'}) self.ctxt.user_id = 'new_user_id' self.ctxt.project_id = 'new_project_id' response = tx_api.accept(self.ctxt, transfer['id'], transfer['auth_key']) volume = db.volume_get(self.ctxt, '1') self.assertEqual(volume['project_id'], 'new_project_id', 'Unexpected project id') self.assertEqual(volume['user_id'], 'new_user_id', 'Unexpected user id') self.assertEqual(volume['id'], response['volume_id'], 'Unexpected volume id in response.') self.assertEqual(transfer['id'], response['id'], 'Unexpected transfer id in response.')
def test_create_cloned_volume(self): volume_src = test_utils.create_volume(self.context, host=CONF.host) self.driver.create_volume(volume_src) volume_dst = test_utils.create_volume(self.context, host=CONF.host) volumepath = os.path.join(self.volumes_path, volume_dst['name']) self.assertFalse(os.path.exists(volumepath)) self.driver.create_cloned_volume(volume_dst, volume_src) self.assertEqual( volume_dst['id'], db.volume_get(context.get_admin_context(), volume_dst['id']).id) self.assertTrue(os.path.exists(volumepath)) self.driver.delete_volume(volume_src) self.driver.delete_volume(volume_dst)
def test_list_cgsnapshots_xml(self): consistencygroup_id = utils.create_consistencygroup(self.context)["id"] volume_id = utils.create_volume(self.context, consistencygroup_id=consistencygroup_id)["id"] cgsnapshot_id1 = self._create_cgsnapshot(consistencygroup_id=consistencygroup_id) cgsnapshot_id2 = self._create_cgsnapshot(consistencygroup_id=consistencygroup_id) cgsnapshot_id3 = self._create_cgsnapshot(consistencygroup_id=consistencygroup_id) req = webob.Request.blank("/v2/fake/cgsnapshots") req.method = "GET" req.headers["Content-Type"] = "application/xml" req.headers["Accept"] = "application/xml" res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 200) dom = minidom.parseString(res.body) cgsnapshot_list = dom.getElementsByTagName("cgsnapshot") self.assertEqual(cgsnapshot_list.item(0).getAttribute("id"), cgsnapshot_id1) self.assertEqual(cgsnapshot_list.item(1).getAttribute("id"), cgsnapshot_id2) self.assertEqual(cgsnapshot_list.item(2).getAttribute("id"), cgsnapshot_id3) db.cgsnapshot_destroy(context.get_admin_context(), cgsnapshot_id3) db.cgsnapshot_destroy(context.get_admin_context(), cgsnapshot_id2) db.cgsnapshot_destroy(context.get_admin_context(), cgsnapshot_id1) db.volume_destroy(context.get_admin_context(), volume_id) db.consistencygroup_destroy(context.get_admin_context(), consistencygroup_id)
def test_transfer_create(self): # If the volume_id is Null a KeyError exception will be raised. self.assertRaises(KeyError, self._create_transfer) volume_id = utils.create_volume(self.ctxt)['id'] self._create_transfer(volume_id)
def test_create_backup_with_InvalidVolume(self): # need to create the volume referenced below first volume_id = utils.create_volume(self.context, size=5, status='restoring')['id'] body = { "backup": { "display_name": "nightly001", "display_description": "Nightly Backup 03-Sep-2012", "volume_id": volume_id, "container": "nightlybackups", } } req = webob.Request.blank('/v2/fake/backups') req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = json.dumps(body) res = req.get_response(fakes.wsgi_app()) res_dict = json.loads(res.body) self.assertEqual(res.status_int, 400) self.assertEqual(res_dict['badRequest']['code'], 400) self.assertEqual( res_dict['badRequest']['message'], 'Invalid volume: Volume to be backed up must' ' be available')
def test_show_cgsnapshot(self): consistencygroup_id = utils.create_consistencygroup(self.context)['id'] volume_id = utils.create_volume(self.context, consistencygroup_id= consistencygroup_id)['id'] cgsnapshot_id = self._create_cgsnapshot( consistencygroup_id=consistencygroup_id) LOG.debug('Created cgsnapshot with id %s' % cgsnapshot_id) req = webob.Request.blank('/v2/fake/cgsnapshots/%s' % cgsnapshot_id) req.method = 'GET' req.headers['Content-Type'] = 'application/json' res = req.get_response(fakes.wsgi_app()) res_dict = json.loads(res.body) self.assertEqual(res.status_int, 200) self.assertEqual(res_dict['cgsnapshot']['description'], 'this is a test cgsnapshot') self.assertEqual(res_dict['cgsnapshot']['name'], 'test_cgsnapshot') self.assertEqual(res_dict['cgsnapshot']['status'], 'creating') db.cgsnapshot_destroy(context.get_admin_context(), cgsnapshot_id) db.volume_destroy(context.get_admin_context(), volume_id) db.consistencygroup_destroy(context.get_admin_context(), consistencygroup_id)
def test_create_backup_json(self): self.stubs.Set(cinder.db, 'service_get_all_by_topic', self._stub_service_get_all_by_topic) volume_id = utils.create_volume(self.context, size=5)['id'] body = { "backup": { "display_name": "nightly001", "display_description": "Nightly Backup 03-Sep-2012", "volume_id": volume_id, "container": "nightlybackups", } } req = webob.Request.blank('/v2/fake/backups') req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = json.dumps(body) res = req.get_response(fakes.wsgi_app()) res_dict = json.loads(res.body) LOG.info(res_dict) self.assertEqual(res.status_int, 202) self.assertTrue('id' in res_dict['backup']) db.volume_destroy(context.get_admin_context(), volume_id)
def test_delete_cgsnapshot_with_Invalidcgsnapshot(self): consistencygroup_id = utils.create_consistencygroup(self.context)['id'] volume_id = utils.create_volume( self.context, consistencygroup_id=consistencygroup_id)['id'] cgsnapshot_id = self._create_cgsnapshot( consistencygroup_id=consistencygroup_id, status='invalid') req = webob.Request.blank('/v2/fake/cgsnapshots/%s' % cgsnapshot_id) req.method = 'DELETE' req.headers['Content-Type'] = 'application/json' res = req.get_response(fakes.wsgi_app()) res_dict = json.loads(res.body) self.assertEqual(res.status_int, 400) self.assertEqual(res_dict['badRequest']['code'], 400) self.assertEqual(res_dict['badRequest']['message'], 'Invalid cgsnapshot') db.cgsnapshot_destroy(context.get_admin_context(), cgsnapshot_id) db.volume_destroy(context.get_admin_context(), volume_id) db.consistencygroup_destroy(context.get_admin_context(), consistencygroup_id)
def test_delete_cgsnapshot_available(self): consistencygroup_id = utils.create_consistencygroup(self.context)['id'] volume_id = utils.create_volume( self.context, consistencygroup_id=consistencygroup_id)['id'] cgsnapshot_id = self._create_cgsnapshot( consistencygroup_id=consistencygroup_id, status='available') req = webob.Request.blank('/v2/fake/cgsnapshots/%s' % cgsnapshot_id) req.method = 'DELETE' req.headers['Content-Type'] = 'application/json' res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 202) self.assertEqual(self._get_cgsnapshot_attrib(cgsnapshot_id, 'status'), 'deleting') db.cgsnapshot_destroy(context.get_admin_context(), cgsnapshot_id) db.volume_destroy(context.get_admin_context(), volume_id) db.consistencygroup_destroy(context.get_admin_context(), consistencygroup_id)
def test_promote_replication_volume_status(self, _rpcapi_promote): for status in ['error', 'in-use']: volume = tests_utils.create_volume(self.ctxt, status=status, replication_status='active', **self.volume_params) (req, res) = self._get_resp('promote', volume['id']) msg = ("request: %s\nresult: %s" % (req, res)) self.assertEqual(res.status_int, 400, msg) for status in ['available']: volume = tests_utils.create_volume(self.ctxt, status=status, replication_status='active', **self.volume_params) (req, res) = self._get_resp('promote', volume['id']) msg = ("request: %s\nresult: %s" % (req, res)) self.assertEqual(res.status_int, 202, msg)
def test_migrate_volume(self): """Test volume migration done by driver.""" loc = 'GPFSDriver:cindertest:openstack' cap = {'location_info': loc} host = {'host': 'foo', 'capabilities': cap} volume = test_utils.create_volume(self.context, host=CONF.host) self.driver.create_volume(volume) self.driver.migrate_volume(self.context, volume, host) self.driver.delete_volume(volume)
def test_reenable_replication_uninit_driver(self, _init): """Test reenable replication when driver is not initialized.""" _init.side_effect = exception.DriverNotInitialized vol = test_utils.create_volume(self.ctxt, status='available', replication_status='error') self.assertRaises(exception.DriverNotInitialized, self.manager.reenable_replication, self.adm_ctxt, vol['id'])
def test_promote_replica_fail(self): """Test promote replication when promote fails.""" vol = test_utils.create_volume(self.ctxt, status='available', replication_status='active') self.driver.promote_replica.side_effect = exception.CinderException self.assertRaises(exception.CinderException, self.manager.promote_replica, self.adm_ctxt, vol['id'])
def test_reenable_replication_fail(self): """Test promote replication when driver is not initialized.""" vol = test_utils.create_volume(self.ctxt, status='available', replication_status='error') self.driver.reenable_replication.side_effect = \ exception.CinderException self.assertRaises(exception.CinderException, self.manager.reenable_replication, self.adm_ctxt, vol['id'])
def test_reenable_replication(self): """Test reenable replication.""" vol = test_utils.create_volume(self.ctxt, status='available', replication_status='error') self.driver.reenable_replication.return_value = \ {'replication_status': 'copying'} self.manager.reenable_replication(self.adm_ctxt, vol['id']) vol_after = db.volume_get(self.ctxt, vol['id']) self.assertEqual(vol_after['replication_status'], 'copying')
def test_promote_replica(self): """Test promote replication.""" vol = test_utils.create_volume(self.ctxt, status='available', replication_status='active') self.driver.promote_replica.return_value = \ {'replication_status': 'inactive'} self.manager.promote_replica(self.adm_ctxt, vol['id']) vol_after = db.volume_get(self.ctxt, vol['id']) self.assertEqual(vol_after['replication_status'], 'inactive')
def test_promote_replica_uninit_driver(self, _init): """Test promote replication when driver is not initialized.""" _init.side_effect = exception.DriverNotInitialized vol = test_utils.create_volume(self.ctxt, status='available', replication_status='active') self.driver.promote_replica.return_value = None self.assertRaises(exception.DriverNotInitialized, self.manager.promote_replica, self.adm_ctxt, vol['id'])
def test_transfer_invalid_volume(self): tx_api = transfer_api.API() volume = utils.create_volume(self.ctxt, id='1', status='in-use', updated_at=self.updated_at) self.assertRaises(exception.InvalidVolume, tx_api.create, self.ctxt, '1', 'Description') volume = db.volume_get(self.ctxt, '1') self.assertEqual('in-use', volume['status'], 'Unexpected state')