def test_volume_type_create_then_destroy(self): """Ensure volume types can be created and deleted""" prev_all_vtypes = volume_types.get_all_types(self.ctxt) volume_types.create(self.ctxt, self.vol_type1_name, self.vol_type1_specs) new = volume_types.get_volume_type_by_name(self.ctxt, self.vol_type1_name) LOG.info(_("Given data: %s"), self.vol_type1_specs) LOG.info(_("Result data: %s"), new) for k, v in self.vol_type1_specs.iteritems(): self.assertEqual(v, new['extra_specs'][k], 'one of fields doesnt match') new_all_vtypes = volume_types.get_all_types(self.ctxt) self.assertEqual(len(prev_all_vtypes) + 1, len(new_all_vtypes), 'drive type was not created') volume_types.destroy(self.ctxt, self.vol_type1_name) new_all_vtypes = volume_types.get_all_types(self.ctxt) self.assertEqual(prev_all_vtypes, new_all_vtypes, 'drive type was not deleted')
def test_disassociate_all(self): def fake_db_disassociate_all(context, id): if id == 'Trouble': raise db_exc.DBError() pass type1_ref = volume_types.create(self.ctxt, 'TypeName1') type2_ref = volume_types.create(self.ctxt, 'TypeName2') specs_id = self._create_qos_specs('QoSName') qos_specs.associate_qos_with_type(self.ctxt, specs_id, type1_ref['id']) qos_specs.associate_qos_with_type(self.ctxt, specs_id, type2_ref['id']) res = qos_specs.get_associations(self.ctxt, specs_id) self.assertEquals(len(res[specs_id].keys()), 2) qos_specs.disassociate_all(self.ctxt, specs_id) res = qos_specs.get_associations(self.ctxt, specs_id) self.assertEquals(len(res[specs_id].keys()), 0) self.stubs.Set(db, 'qos_specs_disassociate_all', fake_db_disassociate_all) self.assertRaises(exception.QoSSpecsDisassociateFailed, qos_specs.disassociate_all, self.ctxt, 'Trouble')
def test_retype_with_no_LH_extra_specs(self): # setup drive with default configuration # and return the mock HTTP LeftHand client mock_client = self.setup_driver() ctxt = context.get_admin_context() host = {'host': self.serverName} key_specs_old = {'foo': False, 'bar': 2, 'error': True} key_specs_new = {'foo': True, 'bar': 5, 'error': False} old_type_ref = volume_types.create(ctxt, 'old', key_specs_old) new_type_ref = volume_types.create(ctxt, 'new', key_specs_new) diff, equal = volume_types.volume_types_diff(ctxt, old_type_ref['id'], new_type_ref['id']) volume = dict.copy(self.volume) old_type = volume_types.get_volume_type(ctxt, old_type_ref['id']) volume['volume_type'] = old_type volume['host'] = host new_type = volume_types.get_volume_type(ctxt, new_type_ref['id']) self.driver.retype(ctxt, volume, new_type, diff, host) expected = self.driver_startup_call_stack + [ mock.call.getVolumeByName('fakevolume')] # validate call chain mock_client.assert_has_calls(expected)
def test_volume_type_get_by_id_and_name(self): """Ensure volume types get returns same entry.""" volume_types.create(self.ctxt, self.vol_type1_name, self.vol_type1_specs) new = volume_types.get_volume_type_by_name(self.ctxt, self.vol_type1_name) new2 = volume_types.get_volume_type(self.ctxt, new["id"]) self.assertEqual(new, new2)
def test_disassociate_all(self): def fake_db_disassociate_all(context, id): if id == 'Trouble': raise db_exc.DBError() pass def fake_qos_specs_get(context, id): if id == 'NotFound': raise exception.QoSSpecsNotFound(specs_id=id) else: pass type1_ref = volume_types.create(self.ctxt, 'TypeName1') type2_ref = volume_types.create(self.ctxt, 'TypeName2') specs_id = self._create_qos_specs('QoSName') qos_specs.associate_qos_with_type(self.ctxt, specs_id, type1_ref['id']) qos_specs.associate_qos_with_type(self.ctxt, specs_id, type2_ref['id']) res = qos_specs.get_associations(self.ctxt, specs_id) self.assertEqual(2, len(res)) qos_specs.disassociate_all(self.ctxt, specs_id) res = qos_specs.get_associations(self.ctxt, specs_id) self.assertEqual(0, len(res)) self.stubs.Set(db, 'qos_specs_disassociate_all', fake_db_disassociate_all) self.stubs.Set(qos_specs, 'get_qos_specs', fake_qos_specs_get) self.assertRaises(exception.QoSSpecsDisassociateFailed, qos_specs.disassociate_all, self.ctxt, 'Trouble')
def check_for_setup_error(self): """ Runs once on startup of the manager, good a time as any to hit lunr and make sure cinder's got the types in the db. """ lunr_admin_context = {'project_id': 'admin'} max_attempts = 3 attempt = 0 while True: attempt += 1 try: client = LunrClient(self.url, lunr_admin_context, logger=LOG) resp = client.types.list() except Exception: if attempt >= max_attempts: LOG.error('Unable up to read volume types from Lunr ' 'after %s attempts.' % attempt) return LOG.exception('failed attempt %s to retrieve volume types ' 'from %s, will retry.' % (attempt, self.url)) sleep(attempt ** 2) else: LOG.info('successfully pulled volume types from Lunr') break context = get_admin_context() for vtype in resp.body: if vtype['status'] != 'ACTIVE': LOG.debug('ignoring type %s with status %s' % ( vtype['name'], vtype['status'])) continue try: volume_types.create(context, vtype['name']) LOG.info('volume type %s successfully created' % vtype['name']) except exception.VolumeTypeExists: LOG.info('volume type %s already exists' % vtype['name'])
def test_v2_restore_factory(self): fact = self.bak_meta_api._v2_restore_factory() keys = [self.bak_meta_api.TYPE_TAG_VOL_BASE_META, self.bak_meta_api.TYPE_TAG_VOL_META, self.bak_meta_api.TYPE_TAG_VOL_GLANCE_META] self.assertEqual(set([]), set(keys).symmetric_difference(set(fact.keys()))) volume_types.create(self.ctxt, 'faketype') vol_type = volume_types.get_volume_type_by_name(self.ctxt, 'faketype') meta_container = {self.bak_meta_api.TYPE_TAG_VOL_BASE_META: {'encryption_key_id': '123', 'volume_type_id': vol_type.get('id'), 'display_name': 'vol-2', 'display_description': 'description'}, self.bak_meta_api.TYPE_TAG_VOL_META: {}, self.bak_meta_api.TYPE_TAG_VOL_GLANCE_META: {}} for f in fact: func = fact[f][0] fields = fact[f][1] func(meta_container[f], self.volume_id, fields) vol = db.volume_get(self.ctxt, self.volume_id) self.assertEqual(self.volume_display_name, vol['display_name']) self.assertEqual(self.volume_display_description, vol['display_description']) self.assertEqual('123', vol['encryption_key_id'])
def _create(self, req, body): """Creates a new volume type.""" context = req.environ['cinder.context'] context.authorize(policy.MANAGE_POLICY) vol_type = body['volume_type'] name = vol_type['name'] description = vol_type.get('description') specs = vol_type.get('extra_specs', {}) is_public = vol_type.get('os-volume-type-access:is_public', True) is_public = strutils.bool_from_string(is_public, strict=True) try: volume_types.create(context, name, specs, is_public, description=description) vol_type = volume_types.get_volume_type_by_name(context, name) req.cache_resource(vol_type, name='types') self._notify_volume_type_info( context, 'volume_type.create', vol_type) except exception.VolumeTypeExists as err: self._notify_volume_type_error( context, 'volume_type.create', err, volume_type=vol_type) raise webob.exc.HTTPConflict(explanation=six.text_type(err)) except exception.VolumeTypeNotFoundByName as err: self._notify_volume_type_error( context, 'volume_type.create', err, name=name) # Not found exception will be handled at the wsgi level raise return self._view_builder.show(req, vol_type)
def _create(self, req, body): """Creates a new volume type.""" context = req.environ['cinder.context'] authorize(context) if not body or body == "": raise webob.exc.HTTPUnprocessableEntity() vol_type = body.get('volume_type', None) if vol_type is None or vol_type == "": raise webob.exc.HTTPUnprocessableEntity() name = vol_type.get('name', None) specs = vol_type.get('extra_specs', {}) if name is None or name == "": raise webob.exc.HTTPUnprocessableEntity() try: volume_types.create(context, name, specs) vol_type = volume_types.get_volume_type_by_name(context, name) except exception.VolumeTypeExists as err: raise webob.exc.HTTPConflict(explanation=str(err)) except exception.NotFound: raise webob.exc.HTTPNotFound() return self._view_builder.show(req, vol_type)
def test_flashsystem_get_vdisk_params(self): # case 1: use default params self.driver._get_vdisk_params(None) # case 2: use extra params from type opts1 = {'storage_protocol': 'iSCSI'} opts2 = {'capabilities:storage_protocol': 'iSCSI'} opts3 = {'storage_protocol': 'FC'} type1 = volume_types.create(self.ctxt, 'opts1', opts1) type2 = volume_types.create(self.ctxt, 'opts2', opts2) type3 = volume_types.create(self.ctxt, 'opts3', opts3) self.assertEqual( 'iSCSI', self.driver._get_vdisk_params(type1['id'])['protocol']) self.assertEqual( 'iSCSI', self.driver._get_vdisk_params(type2['id'])['protocol']) self.assertRaises(exception.InvalidInput, self.driver._get_vdisk_params, type3['id']) # clear environment volume_types.destroy(self.ctxt, type1['id']) volume_types.destroy(self.ctxt, type2['id']) volume_types.destroy(self.ctxt, type3['id'])
def test_retype_volume_different_pool_and_host(self): ctxt = self.context loc = 'GPFSDriver:%s:testpath' % self.driver._cluster_id cap = {'location_info': loc} host = {'host': 'foo', 'capabilities': cap} key_specs_old = {'capabilities:storage_pool': 'bronze', 'volume_backend_name': 'backend1'} key_specs_new = {'capabilities:storage_pool': 'gold', 'volume_backend_name': 'backend1'} old_type_ref = volume_types.create(ctxt, 'old', key_specs_old) new_type_ref = volume_types.create(ctxt, 'new', key_specs_new) old_type = volume_types.get_volume_type(ctxt, old_type_ref['id']) new_type = volume_types.get_volume_type(ctxt, new_type_ref['id']) diff, equal = volume_types.volume_types_diff(ctxt, old_type_ref['id'], new_type_ref['id']) # set volume host to be different from target host volume = test_utils.create_volume(ctxt, host=CONF.host) volume['volume_type_id'] = old_type['id'] with mock.patch('cinder.utils.execute'): # different host different pool LOG.debug('Retype different pools and hosts, expected rv = True.') self.driver.db = mock.Mock() self.driver.create_volume(volume) rv = self.driver.retype(ctxt, volume, new_type, diff, host) self.assertTrue(rv) self.driver.delete_volume(volume) LOG.debug('Retype different pools and hosts, rv = %s.' % rv)
def test_retype_same_extra_specs(self): # setup drive with default configuration # and return the mock HTTP LeftHand client mock_client = self.setup_driver() mock_client.getVolumeByName.return_value = {'id': self.volume_id} ctxt = context.get_admin_context() host = {'host': self.serverName} key_specs_old = {'hplh:provisioning': 'full', 'hplh:ao': 'true'} key_specs_new = {'hplh:provisioning': 'full', 'hplh:ao': 'false'} old_type_ref = volume_types.create(ctxt, 'old', key_specs_old) new_type_ref = volume_types.create(ctxt, 'new', key_specs_new) diff, equal = volume_types.volume_types_diff(ctxt, old_type_ref['id'], new_type_ref['id']) volume = dict.copy(self.volume) old_type = volume_types.get_volume_type(ctxt, old_type_ref['id']) volume['volume_type'] = old_type volume['host'] = host new_type = volume_types.get_volume_type(ctxt, new_type_ref['id']) self.driver.retype(ctxt, volume, new_type, diff, host) expected = self.driver_startup_call_stack + [ mock.call.getVolumeByName('fakevolume'), mock.call.modifyVolume( 1, {'isAdaptiveOptimizationEnabled': False})] # validate call chain mock_client.assert_has_calls(expected)
def test_retype_volume(self, mock_req): ctxt = context.get_admin_context() extra_specs = {"Feature:Pool": "1", "Feature:Raid": "1", "Affinity:Type": "flash", "Alloc:Type": "thick"} type_ref = volume_types.create(ctxt, 'VT1', extra_specs) specs = {'qos:minIOPS': '20', 'qos:maxIOPS': '2000', 'qos:burstIOPS': '5000'} qos = qos_specs.create(ctxt, 'fake-qos', specs) qos_specs.associate_qos_with_type(ctxt, qos['id'], type_ref['id']) VOLUME1['volume_type_id'] = type_ref['id'] # New volume type extra_specs = {"Feature:Pool": "1", "Feature:Raid": "5", "Affinity:Type": "flash", "Alloc:Type": "thick"} type_ref = volume_types.create(ctxt, 'VT2', extra_specs) specs = {'qos:minIOPS': '30', 'qos:maxIOPS': '3000', 'qos:burstIOPS': '10000'} qos = qos_specs.create(ctxt, 'fake-qos2', specs) qos_specs.associate_qos_with_type(ctxt, qos['id'], type_ref['id']) mock_req.side_effect = iter([ISE_GET_QUERY_RESP, ISE_GET_VOL1_STATUS_RESP, ISE_MODIFY_VOLUME_RESP]) self.setup_driver() self.driver.retype(ctxt, VOLUME1, type_ref, 0, 0)
def test_retype(self): """Test that retype returns successfully.""" self.driver.do_setup(None) # prepare parameters ctxt = context.get_admin_context() host = {"host": "foo", "capabilities": {"location_info": "xiv_ds8k_fake_1", "extent_size": "1024"}} key_specs_old = {"easytier": False, "warning": 2, "autoexpand": True} key_specs_new = {"easytier": True, "warning": 5, "autoexpand": False} old_type_ref = volume_types.create(ctxt, "old", key_specs_old) new_type_ref = volume_types.create(ctxt, "new", key_specs_new) diff, equal = volume_types.volume_types_diff(ctxt, old_type_ref["id"], new_type_ref["id"]) volume = copy.deepcopy(VOLUME) old_type = volume_types.get_volume_type(ctxt, old_type_ref["id"]) volume["volume_type"] = old_type volume["host"] = host new_type = volume_types.get_volume_type(ctxt, new_type_ref["id"]) self.driver.create_volume(volume) ret = self.driver.retype(ctxt, volume, new_type, diff, host) self.assertTrue(ret) self.assertTrue(volume["easytier"])
def test_disassociate_all(self): def fake_db_disassociate_all(context, id): if id == "Trouble": raise db_exc.DBError() pass def fake_qos_specs_get(context, id): if id == "NotFound": raise exception.QoSSpecsNotFound(specs_id=id) else: pass type1_ref = volume_types.create(self.ctxt, "TypeName1") type2_ref = volume_types.create(self.ctxt, "TypeName2") specs_id = self._create_qos_specs("QoSName") qos_specs.associate_qos_with_type(self.ctxt, specs_id, type1_ref["id"]) qos_specs.associate_qos_with_type(self.ctxt, specs_id, type2_ref["id"]) res = qos_specs.get_associations(self.ctxt, specs_id) self.assertEqual(2, len(res)) qos_specs.disassociate_all(self.ctxt, specs_id) res = qos_specs.get_associations(self.ctxt, specs_id) self.assertEqual(0, len(res)) self.mock_object(db, "qos_specs_disassociate_all", fake_db_disassociate_all) self.mock_object(qos_specs, "get_qos_specs", fake_qos_specs_get) self.assertRaises(exception.QoSSpecsDisassociateFailed, qos_specs.disassociate_all, self.ctxt, "Trouble")
def test_retype_volume_different_backend(self): ctxt = self.context loc = 'GPFSDriver:%s:testpath' % self.driver._cluster_id cap = {'location_info': loc} host = {'host': 'foo', 'capabilities': cap} key_specs_old = {'capabilities:storage_pool': 'bronze', 'volume_backend_name': 'backend1'} key_specs_new = {'capabilities:storage_pool': 'gold', 'volume_backend_name': 'backend2'} old_type_ref = volume_types.create(ctxt, 'old', key_specs_old) new_type_ref = volume_types.create(ctxt, 'new', key_specs_new) old_type = volume_types.get_volume_type(ctxt, old_type_ref['id']) new_type = volume_types.get_volume_type(ctxt, new_type_ref['id']) diff, equal = volume_types.volume_types_diff(ctxt, old_type_ref['id'], new_type_ref['id']) # set volume host to match target host volume = test_utils.create_volume(ctxt, host=host['host']) volume['volume_type_id'] = old_type['id'] with mock.patch('cinder.utils.execute'): LOG.debug('Retype different backends, cannot migrate. ' 'Expected rv = False.') self.driver.create_volume(volume) rv = self.driver.retype(ctxt, volume, old_type, diff, host) self.assertFalse(rv) self.driver.delete_volume(volume) LOG.debug('Retype different backends, cannot migrate, ' 'rv = %s.' % rv)
def _create(self, req, body): """Creates a new volume type.""" context = req.environ["cinder.context"] authorize(context) if not self.is_valid_body(body, "volume_type"): raise webob.exc.HTTPBadRequest() vol_type = body["volume_type"] name = vol_type.get("name", None) description = vol_type.get("description") specs = vol_type.get("extra_specs", {}) is_public = vol_type.get("os-volume-type-access:is_public", True) if name is None or len(name.strip()) == 0: msg = _("Volume type name can not be empty.") raise webob.exc.HTTPBadRequest(explanation=msg) try: volume_types.create(context, name, specs, is_public, description=description) vol_type = volume_types.get_volume_type_by_name(context, name) req.cache_resource(vol_type, name="types") self._notify_volume_type_info(context, "volume_type.create", vol_type) except exception.VolumeTypeExists as err: self._notify_volume_type_error(context, "volume_type.create", err, volume_type=vol_type) raise webob.exc.HTTPConflict(explanation=six.text_type(err)) except exception.NotFound as err: self._notify_volume_type_error(context, "volume_type.create", err, name=name) raise webob.exc.HTTPNotFound() return self._view_builder.show(req, vol_type)
def _create(self, req, body): """Creates a new volume type.""" context = req.environ["cinder.context"] authorize(context) if not self.is_valid_body(body, "volume_type"): raise webob.exc.HTTPBadRequest() vol_type = body["volume_type"] name = vol_type.get("name", None) specs = vol_type.get("extra_specs", {}) if name is None or name == "": raise webob.exc.HTTPBadRequest() try: volume_types.create(context, name, specs) vol_type = volume_types.get_volume_type_by_name(context, name) notifier_info = dict(volume_types=vol_type) notifier_api.notify(context, "volumeType", "volume_type.create", notifier_api.INFO, notifier_info) except exception.VolumeTypeExists as err: notifier_err = dict(volume_types=vol_type, error_message=str(err)) self._notify_volume_type_error(context, "volume_type.create", notifier_err) raise webob.exc.HTTPConflict(explanation=str(err)) except exception.NotFound as err: notifier_err = dict(volume_types=vol_type, error_message=str(err)) self._notify_volume_type_error(context, "volume_type.create", notifier_err) raise webob.exc.HTTPNotFound() return self._view_builder.show(req, vol_type)
def test_get_default_volume_type(self): """ Ensures default volume type can be retrieved """ volume_types.create(self.ctxt, fake_flags.def_vol_type, {}) default_vol_type = volume_types.get_default_volume_type() self.assertEqual(default_vol_type.get('name'), fake_flags.def_vol_type)
def test_ensure_no_extra_specs_for_non_admin(self): # non-admin users shouldn't get extra-specs back in type-get/list etc ctxt = context.RequestContext( "average-joe", "d802f078-0af1-4e6b-8c02-7fac8d4339aa", auth_token="token", is_admin=False ) volume_types.create(self.ctxt, "type-test", is_public=False) vtype = volume_types.get_volume_type_by_name(ctxt, "type-test") self.assertIsNone(vtype.get("extra_specs", None))
def test_update_limit(self, mock_validate_integer, mock_validate): mock_validate_integer.return_value = 5 volume_types.create(self.ctxt, 'fake_type') body = make_body(volumes=5) result = self.controller.update(self.req, 'foo', body) self.assertEqual(5, result['quota_class_set']['volumes']) self.assertTrue(mock_validate.called) self.assertTrue(mock_validate_integer.called)
def test_ensure__extra_specs_for_non_admin(self): # non-admin users get extra-specs back in type-get/list etc at DB layer ctxt = context.RequestContext('average-joe', 'd802f078-0af1-4e6b-8c02-7fac8d4339aa', auth_token='token', is_admin=False) volume_types.create(self.ctxt, "type-test", is_public=False) vtype = volume_types.get_volume_type_by_name(ctxt, 'type-test') self.assertIsNotNone(vtype.get('extra_specs', None))
def test_get_volume_type_by_name_with_uuid_name(self): """Ensure volume types can be created and found.""" uuid_format_name = uuidutils.generate_uuid() volume_types.create(self.ctxt, uuid_format_name, self.vol_type1_specs, description=self.vol_type1_description) type_ref = volume_types.get_by_name_or_id(self.ctxt, uuid_format_name) self.assertEqual(uuid_format_name, type_ref['name'])
def test_update_with_more_volume_types(self): volume_types.create(self.ctxt, 'fake_type_1') volume_types.create(self.ctxt, 'fake_type_2') body = {'quota_class_set': {'gigabytes_fake_type_1': 1111, 'volumes_fake_type_2': 2222}} result = self.controller.update(self.req, 'foo', body) self.assertDictMatch(result, make_response_body(ctxt=self.ctxt, quota_class='foo', request_body=body, tenant_id=None))
def _create(self, req, body): """Creates a new volume type.""" context = req.environ['cinder.context'] authorize(context) self.assert_valid_body(body, 'volume_type') vol_type = body['volume_type'] name = vol_type.get('name', None) description = vol_type.get('description') specs = vol_type.get('extra_specs', {}) utils.validate_dictionary_string_length(specs) is_public = vol_type.get('os-volume-type-access:is_public', True) if name is None or len(name.strip()) == 0: msg = _("Volume type name can not be empty.") raise webob.exc.HTTPBadRequest(explanation=msg) utils.check_string_length(name, 'Type name', min_length=1, max_length=255) if description is not None: utils.check_string_length(description, 'Type description', min_length=0, max_length=255) if not strutils.is_valid_boolstr(is_public): msg = _("Invalid value '%s' for is_public. Accepted values: " "True or False.") % is_public raise webob.exc.HTTPBadRequest(explanation=msg) try: volume_types.create(context, name, specs, is_public, description=description) vol_type = volume_types.get_volume_type_by_name(context, name) req.cache_resource(vol_type, name='types') self._notify_volume_type_info( context, 'volume_type.create', vol_type) except exception.VolumeTypeExists as err: self._notify_volume_type_error( context, 'volume_type.create', err, volume_type=vol_type) raise webob.exc.HTTPConflict(explanation=six.text_type(err)) except exception.VolumeTypeNotFoundByName as err: self._notify_volume_type_error( context, 'volume_type.create', err, name=name) # Not found exception will be handled at the wsgi level raise return self._view_builder.show(req, vol_type)
def test_qos_specs_disassociate_all(self): specs_id = self._create_qos_specs("FakeQos") type1_id = volume_types.create(self.ctxt, "Type1Name")["id"] type2_id = volume_types.create(self.ctxt, "Type2Name")["id"] type3_id = volume_types.create(self.ctxt, "Type3Name")["id"] db.volume_type_qos_associate(self.ctxt, type1_id, specs_id) db.volume_type_qos_associate(self.ctxt, type2_id, specs_id) db.volume_type_qos_associate(self.ctxt, type3_id, specs_id) res = db.qos_specs_associations_get(self.ctxt, specs_id) self.assertEqual(len(res), 3) db.qos_specs_disassociate_all(self.ctxt, specs_id) res = db.qos_specs_associations_get(self.ctxt, specs_id) self.assertEqual(len(res), 0)
def test_qos_specs_disassociate_all(self): specs_id = self._create_qos_specs('FakeQos') type1_id = volume_types.create(self.ctxt, 'Type1Name')['id'] type2_id = volume_types.create(self.ctxt, 'Type2Name')['id'] type3_id = volume_types.create(self.ctxt, 'Type3Name')['id'] db.volume_type_qos_associate(self.ctxt, type1_id, specs_id) db.volume_type_qos_associate(self.ctxt, type2_id, specs_id) db.volume_type_qos_associate(self.ctxt, type3_id, specs_id) res = db.qos_specs_associations_get(self.ctxt, specs_id) self.assertEqual(3, len(res)) db.qos_specs_disassociate_all(self.ctxt, specs_id) res = db.qos_specs_associations_get(self.ctxt, specs_id) self.assertEqual(0, len(res))
def create(self): if self.obj_attr_is_set("id"): raise exception.ObjectActionError(action="create", reason=_("already created")) db_volume_type = volume_types.create( self._context, self.name, self.extra_specs, self.is_public, self.projects, self.description ) self._from_db_object(self._context, self, db_volume_type)
def test_disassociate_qos_specs(self): def fake_db_disassociate(context, id, type_id): raise db_exc.DBError() type_ref = volume_types.create(self.ctxt, 'TypeName') specs_id = self._create_qos_specs('QoSName') qos_specs.associate_qos_with_type(self.ctxt, specs_id, type_ref['id']) res = qos_specs.get_associations(self.ctxt, specs_id) self.assertEqual(1, len(res)) qos_specs.disassociate_qos_specs(self.ctxt, specs_id, type_ref['id']) res = qos_specs.get_associations(self.ctxt, specs_id) self.assertEqual(0, len(res)) self.assertRaises(exception.VolumeTypeNotFound, qos_specs.disassociate_qos_specs, self.ctxt, specs_id, 'NotFound') # Verify we can disassociate specs from volume_type even if they are # not associated with no error qos_specs.disassociate_qos_specs(self.ctxt, specs_id, type_ref['id']) qos_specs.associate_qos_with_type(self.ctxt, specs_id, type_ref['id']) self.mock_object(db, 'qos_specs_disassociate', fake_db_disassociate) self.assertRaises(exception.QoSSpecsDisassociateFailed, qos_specs.disassociate_qos_specs, self.ctxt, specs_id, type_ref['id'])
def test_create_volume_blocked(self, mock_ghn): """Test volume creation where only initial space-create is blocked. This should actually pass because we are blocked byt return an error in request-cancel, meaning that it got unblocked before we could kill the space request. """ ctxt = context.get_admin_context() extra_specs = {} type_ref = volume_types.create(ctxt, "hgst-1", extra_specs) volume = {"id": "1", "name": "volume1", "display_name": "", "volume_type_id": type_ref["id"], "size": 10} self._return_blocked = 1 # Block & fail cancel => create succeeded ret = self.driver.create_volume(volume) expected = { "redundancy": "0", "group": "xanadu", "name": "volume10", "mode": "0777", "user": "******", "net": "net1", "storageserver": "stor1:gbd0,stor2:gbd0,", "size": "12", } self.assertDictMatch(expected, self.created) # Check the returned provider expected_pid = {"provider_id": "volume10"} self.assertDictMatch(expected_pid, ret) self.assertEqual(True, self._request_cancel)
def test_instorage_initialize_iscsi_connection_failure(self, term_conn): # create a iSCSI volume volume_iSCSI = self._create_volume() extra_spec = {'capabilities:storage_protocol': '<in> iSCSI'} vol_type_iSCSI = volume_types.create(self.ctxt, 'iSCSI', extra_spec) volume_iSCSI['volume_type_id'] = vol_type_iSCSI['id'] connector = { 'host': 'instorage-mcs-host', 'wwnns': ['20000090fa17311e', '20000090fa17311f'], 'wwpns': ['ff00000000000000', 'ff00000000000001'], 'initiator': 'iqn.1993-08.org.debian:01:eac5ccc1aaa' } self.iscsi_driver._state['storage_nodes'] = {} self.assertRaises(exception.VolumeBackendAPIException, self.iscsi_driver.initialize_connection, volume_iSCSI, connector) term_conn.assert_called_once_with(volume_iSCSI, connector)
def test_create_volume(self, mock_ghn): """Test volume creation, ensure appropriate size expansion/name.""" ctxt = context.get_admin_context() extra_specs = {} type_ref = volume_types.create(ctxt, 'hgst-1', extra_specs) volume = {'id': '1', 'name': 'volume1', 'display_name': '', 'volume_type_id': type_ref['id'], 'size': 10} ret = self.driver.create_volume(volume) expected = {'redundancy': '0', 'group': 'xanadu', 'name': 'volume10', 'mode': '0777', 'user': '******', 'net': 'net1', 'storageserver': 'stor1:gbd0,stor2:gbd0,', 'size': '12'} self.assertDictMatch(expected, self.created) # Check the returned provider, note that provider_id is hashed expected_pid = {'provider_id': 'volume10'} self.assertDictMatch(expected_pid, ret)
def test_instorage_initialize_iscsi_connection_single_path(self): # Test the return value for _get_iscsi_properties connector = {'host': 'instorage-mcs-host', 'wwnns': ['20000090fa17311e', '20000090fa17311f'], 'wwpns': ['ff00000000000000', 'ff00000000000001'], 'initiator': 'iqn.1993-08.org.debian:01:eac5ccc1aaa'} # Expected single path host-volume map return value exp_s_path = {'driver_volume_type': 'iscsi', 'data': {'target_discovered': False, 'target_iqn': 'iqn.1982-01.com.inspur:1234.sim.node1', 'target_portal': '1.234.56.78:3260', 'target_lun': 0, 'auth_method': 'CHAP', 'discovery_auth_method': 'CHAP'}} volume_iSCSI = self._create_volume() extra_spec = {'capabilities:storage_protocol': '<in> iSCSI'} vol_type_iSCSI = volume_types.create(self.ctxt, 'iSCSI', extra_spec) volume_iSCSI['volume_type_id'] = vol_type_iSCSI['id'] # Make sure that the volumes have been created self._assert_vol_exists(volume_iSCSI['name'], True) # Check case where no hosts exist ret = self.iscsi_driver._assistant.get_host_from_connector( connector) self.assertIsNone(ret) # Initialize connection to map volume to a host ret = self.iscsi_driver.initialize_connection( volume_iSCSI, connector) self.assertEqual(exp_s_path['driver_volume_type'], ret['driver_volume_type']) # Check the single path host-volume map return value for k, v in exp_s_path['data'].items(): self.assertEqual(v, ret['data'][k]) ret = self.iscsi_driver._assistant.get_host_from_connector( connector) self.assertIsNotNone(ret)
def test_volume_type_create_then_destroy(self): """Ensure volume types can be created and deleted.""" prev_all_vtypes = volume_types.get_all_types(self.ctxt) # create type_ref = volume_types.create(self.ctxt, self.vol_type1_name, self.vol_type1_specs, description=self.vol_type1_description) new = volume_types.get_volume_type_by_name(self.ctxt, self.vol_type1_name) LOG.info(_("Given data: %s"), self.vol_type1_specs) LOG.info(_("Result data: %s"), new) self.assertEqual(self.vol_type1_description, new['description']) for k, v in self.vol_type1_specs.iteritems(): self.assertEqual(v, new['extra_specs'][k], 'one of fields does not match') new_all_vtypes = volume_types.get_all_types(self.ctxt) self.assertEqual(len(prev_all_vtypes) + 1, len(new_all_vtypes), 'drive type was not created') # update new_type_name = self.vol_type1_name + '_updated' new_type_desc = self.vol_type1_description + '_updated' type_ref_updated = volume_types.update(self.ctxt, type_ref.id, new_type_name, new_type_desc) self.assertEqual(new_type_name, type_ref_updated['name']) self.assertEqual(new_type_desc, type_ref_updated['description']) # destroy volume_types.destroy(self.ctxt, type_ref['id']) new_all_vtypes = volume_types.get_all_types(self.ctxt) self.assertEqual(prev_all_vtypes, new_all_vtypes, 'drive type was not deleted')
def test_update_volume_type(self, body): req = fakes.HTTPRequest.blank('/v3/%s/types/%s' % ( fake.PROJECT_ID, DEFAULT_VOLUME_TYPE)) req.method = 'PUT' ctxt = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True) req.environ['cinder.context'] = ctxt volume_type_1 = volume_types.create(ctxt, 'volume_type') res = self.controller._update(req, volume_type_1.get('id'), body=body) expected_name = body['volume_type'].get('name') if expected_name is not None: self.assertEqual(expected_name, res['volume_type']['name']) expected_is_public = body['volume_type'].get('is_public') if expected_is_public is not None: self.assertEqual(expected_is_public, res['volume_type']['is_public']) self.assertEqual(body['volume_type'].get('description'), res['volume_type']['description'])
def test_delete_volume_failure_modes(self): """Test cases where space-delete fails, but OS delete is still OK.""" ctxt = context.get_admin_context() extra_specs = {} type_ref = volume_types.create(ctxt, 'hgst-1', extra_specs) volume = { 'id': '1', 'name': 'volume1', 'display_name': '', 'volume_type_id': type_ref['id'], 'size': 10, 'provider_id': 'volume10' } self._fail_space_delete = True # This should not throw an exception, space-delete failure not problem self.driver.delete_volume(volume) self._fail_space_delete = False volume['provider_id'] = None # This should also not throw an exception self.driver.delete_volume(volume)
def test_get_volume_type_qos_specs(self): qos_ref = qos_specs.create(self.ctxt, 'qos-specs-1', {'k1': 'v1', 'k2': 'v2', 'k3': 'v3'}) type_ref = volume_types.create(self.ctxt, "type1", {"key2": "val2", "key3": "val3"}) res = volume_types.get_volume_type_qos_specs(type_ref['id']) self.assertIsNone(res['qos_specs']) qos_specs.associate_qos_with_type(self.ctxt, qos_ref['id'], type_ref['id']) expected = {'qos_specs': {'id': qos_ref['id'], 'name': 'qos-specs-1', 'consumer': 'back-end', 'specs': { 'k1': 'v1', 'k2': 'v2', 'k3': 'v3'}}} res = volume_types.get_volume_type_qos_specs(type_ref['id']) self.assertDictMatch(expected, res)
def test_negative_create_snapshot_conflict(self, mock_req): ctxt = context.get_admin_context() extra_specs = {"Feature:Pool": "1", "Feature:Raid": "1", "Affinity:Type": "flash", "Alloc:Type": "thick"} type_ref = volume_types.create(ctxt, 'VT1', extra_specs) specs = {'qos:minIOPS': '20', 'qos:maxIOPS': '2000', 'qos:burstIOPS': '5000'} qos = qos_specs.create(ctxt, 'fake-qos', specs) qos_specs.associate_qos_with_type(ctxt, qos['id'], type_ref['id']) SNAPSHOT1['volume_type_id'] = type_ref['id'] mock_req.side_effect = iter([ISE_GET_QUERY_RESP, ISE_GET_VOL1_STATUS_RESP, ISE_409_CONFLICT_RESP]) self.configuration.ise_completion_retries = 1 self.setup_driver() self.assertRaises(exception.XIODriverException, self.driver.create_snapshot, SNAPSHOT1)
def test_create_volume_from_snapshot(self, mock_req): ctxt = context.get_admin_context() extra_specs = {"Feature:Pool": "1", "Feature:Raid": "1", "Affinity:Type": "flash", "Alloc:Type": "thick"} type_ref = volume_types.create(ctxt, 'VT1', extra_specs) specs = {'qos:minIOPS': '20', 'qos:maxIOPS': '2000', 'qos:burstIOPS': '5000'} qos = qos_specs.create(ctxt, 'fake-qos', specs) qos_specs.associate_qos_with_type(ctxt, qos['id'], type_ref['id']) SNAPSHOT1['volume_type_id'] = type_ref['id'] mock_req.side_effect = iter([ISE_GET_QUERY_RESP, ISE_GET_SNAP1_STATUS_RESP, ISE_PREP_SNAPSHOT_RESP, ISE_GET_VOL1_STATUS_RESP, ISE_CREATE_SNAPSHOT_RESP, ISE_GET_VOL1_STATUS_RESP]) self.setup_driver() self.driver.create_volume_from_snapshot(VOLUME1, SNAPSHOT1)
def test_extend_volume(self): """Test extending a volume, check the size in GB vs. GiB.""" ctxt = context.get_admin_context() extra_specs = {} type_ref = volume_types.create(ctxt, 'hgst-1', extra_specs) volume = { 'id': '1', 'name': 'volume1', 'display_name': '', 'volume_type_id': type_ref['id'], 'size': 10, 'provider_id': 'volume10' } self.extended = {'name': '', 'size': '0', 'storageserver': ''} self.driver.extend_volume(volume, 12) expected = { 'name': 'volume10', 'size': '2', 'storageserver': 'stor1:gbd0,stor2:gbd0,' } self.assertDictMatch(expected, self.extended)
def test_update_volume_type_name_with_db_error(self, mock_update_quota): type_ref = volume_types.create(self.ctxt, self.vol_type1_name, self.vol_type1_specs, description=self.vol_type1_description) mock_update_quota.side_effect = db_exc.DBError new_type_name = self.vol_type1_name + '_updated' description = 'new_test' is_public = False self.assertRaises(exception.VolumeTypeUpdateFailed, volume_types.update, self.ctxt, type_ref.id, new_type_name, description, is_public) mock_update_quota.assert_called_once_with(self.ctxt, self.vol_type1_name, new_type_name) new = volume_types.get_volume_type_by_name(self.ctxt, self.vol_type1_name) self.assertEqual(self.vol_type1_name, new.get('name')) self.assertEqual(self.vol_type1_description, new.get('description')) self.assertTrue(new.get('is_public')) volume_types.destroy(self.ctxt, type_ref.id)
def test_create_volume_blocked_and_fail(self, mock_ghn): """Test volume creation where space-create blocked permanently. This should fail because the initial create was blocked and the request-cancel succeeded, meaning the create operation never completed. """ ctxt = context.get_admin_context() extra_specs = {} type_ref = volume_types.create(ctxt, 'hgst-1', extra_specs) volume = { 'id': '1', 'name': 'volume1', 'display_name': '', 'volume_type_id': type_ref['id'], 'size': 10 } self._return_blocked = 2 # Block & pass cancel => create failed. :( self.assertRaises(exception.VolumeDriverException, self.driver.create_volume, volume) self.assertEqual(True, self._request_cancel)
def test_retype_volume_different_host(self): ctxt = self.context loc = 'GPFSDriver:%s:testpath' % self.driver._cluster_id cap = {'location_info': loc} host = {'host': 'foo', 'capabilities': cap} newloc = 'GPFSDriver:000000:testpath' newcap = {'location_info': newloc} newhost = {'host': 'foo', 'capabilities': newcap} key_specs_old = { 'capabilities:storage_pool': 'bronze', 'volume_backend_name': 'backend1' } old_type_ref = volume_types.create(ctxt, 'old', key_specs_old) old_type = volume_types.get_volume_type(ctxt, old_type_ref['id']) diff, equal = volume_types.volume_types_diff(ctxt, old_type_ref['id'], old_type_ref['id']) # set volume host to be different from target host volume = test_utils.create_volume(ctxt, host=CONF.host) volume['volume_type_id'] = old_type['id'] with mock.patch('cinder.utils.execute'): LOG.debug('Retype different hosts same cluster, ' 'expected rv = True.') self.driver.db = mock.Mock() self.driver.create_volume(volume) rv = self.driver.retype(ctxt, volume, old_type, diff, host) self.assertTrue(rv) self.driver.delete_volume(volume) LOG.debug('Retype different hosts same cluster, rv = %s.' % rv) LOG.debug('Retype different hosts, different cluster, ' 'cannot migrate. Expected rv = False.') self.driver.create_volume(volume) rv = self.driver.retype(ctxt, volume, old_type, diff, newhost) self.assertFalse(rv) self.driver.delete_volume(volume) LOG.debug('Retype different hosts, different cluster, ' 'cannot migrate, rv = %s.' % rv)
def test_cli_error_not_blocked(self): """Test the _blocked handler's handlinf of a non-blocked error. The _handle_blocked handler is called on any process errors in the code. If the error was not caused by a blocked command condition (syntax error, out of space, etc.) then it should just throw the exception and not try and retry the command. """ ctxt = context.get_admin_context() extra_specs = {} type_ref = volume_types.create(ctxt, 'hgst-1', extra_specs) volume = {'id': '1', 'name': 'volume1', 'display_name': '', 'volume_type_id': type_ref['id'], 'size': 10, 'provider_id': 'volume10'} self.extended = {'name': '', 'size': '0', 'storageserver': ''} self._fail_extend = True self.assertRaises(exception.VolumeDriverException, self.driver.extend_volume, volume, 12) self.assertFalse(self._request_cancel)
def test_create_volume_from_snapshot(self, mock_ghn): """Test creating volume from snapshot, ensure full space copy.""" ctxt = context.get_admin_context() extra_specs = {} type_ref = volume_types.create(ctxt, 'hgst-1', extra_specs) snap = { 'id': '1', 'name': 'volume1', 'display_name': '', 'volume_type_id': type_ref['id'], 'size': 10, 'provider_id': 'space_orig' } volume = { 'id': '2', 'name': 'volume2', 'display_name': '', 'volume_type_id': type_ref['id'], 'size': 10 } pid = self.driver.create_volume_from_snapshot(volume, snap) # We must copy entier underlying storage, ~12GB, not just 10GB self.assertEqual(11444 * units.Mi, self.dd_count) self.assertEqual('1M', self.bs) # Check space-create command expected = { 'redundancy': '0', 'group': 'xanadu', 'name': 'volume2', 'mode': '0777', 'user': '******', 'net': 'net1', 'storageserver': 'stor1:gbd0,stor2:gbd0,', 'size': '12' } self.assertDictEqual(expected, self.created) # Check the returned provider expected_pid = {'provider_id': 'volume2'} self.assertDictEqual(expected_pid, pid)
def test_retype(self): sfv = SolidFireDriver(configuration=self.configuration) self.stubs.Set(SolidFireDriver, '_issue_api_request', self.fake_issue_api_request) type_ref = volume_types.create(self.ctxt, "type1", {"qos:minIOPS": "500", "qos:burstIOPS": "2000", "qos:maxIOPS": "1000"}) diff = {'encryption': {}, 'qos_specs': {}, 'extra_specs': {'qos:burstIOPS': ('10000', u'2000'), 'qos:minIOPS': ('1000', u'500'), 'qos:maxIOPS': ('10000', u'1000')}} host = None testvol = {'project_id': 'testprjid', 'name': 'test_volume', 'size': 1, 'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66', 'created_at': timeutils.utcnow()} self.assertTrue(sfv.retype(self.ctxt, testvol, type_ref, diff, host))
def test_volume_type_create_then_destroy_with_non_admin(self): """Ensure volume types can be created and deleted by non-admin user. If a non-admn user is authorized at API, volume type operations should be permitted. """ prev_all_vtypes = volume_types.get_all_types(self.ctxt) self.ctxt = context.RequestContext('fake', 'fake', is_admin=False) # create type_ref = volume_types.create(self.ctxt, self.vol_type1_name, self.vol_type1_specs, description=self.vol_type1_description) new = volume_types.get_volume_type_by_name(self.ctxt, self.vol_type1_name) self.assertEqual(self.vol_type1_description, new['description']) new_all_vtypes = volume_types.get_all_types(self.ctxt) self.assertEqual(len(prev_all_vtypes) + 1, len(new_all_vtypes), 'drive type was not created') # update new_type_name = self.vol_type1_name + '_updated' new_type_desc = self.vol_type1_description + '_updated' type_ref_updated = volume_types.update(self.ctxt, type_ref.id, new_type_name, new_type_desc) self.assertEqual(new_type_name, type_ref_updated['name']) self.assertEqual(new_type_desc, type_ref_updated['description']) # destroy volume_types.destroy(self.ctxt, type_ref['id']) new_all_vtypes = volume_types.get_all_types(self.ctxt) self.assertEqual(prev_all_vtypes, new_all_vtypes, 'drive type was not deleted')
def test_create_cloned_volume(self, mock_ghn): """Test creating a clone, ensure full size is copied from original.""" ctxt = context.get_admin_context() extra_specs = {} type_ref = volume_types.create(ctxt, 'hgst-1', extra_specs) orig = { 'id': '1', 'name': 'volume1', 'display_name': '', 'volume_type_id': type_ref['id'], 'size': 10, 'provider_id': 'space_orig' } clone = { 'id': '2', 'name': 'clone1', 'display_name': '', 'volume_type_id': type_ref['id'], 'size': 10 } pid = self.driver.create_cloned_volume(clone, orig) # We must copy entier underlying storage, ~12GB, not just 10GB self.assertEqual(11444, self.dd_count) # Check space-create command expected = { 'redundancy': '0', 'group': 'xanadu', 'name': 'clone1', 'mode': '0777', 'user': '******', 'net': 'net1', 'storageserver': 'stor1:gbd0,stor2:gbd0,', 'size': '12' } self.assertDictMatch(expected, self.created) # Check the returned provider expected_pid = {'provider_id': 'clone1'} self.assertDictMatch(expected_pid, pid)
def test_extend_volume_noextend(self): """Test extending a volume where Space does not need to be enlarged. Because Spaces are generated somewhat larger than the requested size from OpenStack due to the base10(HGST)/base2(OS) mismatch, they can sometimes be larger than requested from OS. In that case a volume_extend may actually be a noop since the volume is already large enough to satisfy OS's request. """ ctxt = context.get_admin_context() extra_specs = {} type_ref = volume_types.create(ctxt, 'hgst-1', extra_specs) volume = {'id': '1', 'name': 'volume1', 'display_name': '', 'volume_type_id': type_ref['id'], 'size': 10, 'provider_id': 'volume10'} self.extended = {'name': '', 'size': '0', 'storageserver': ''} self.driver.extend_volume(volume, 10) expected = {'name': '', 'size': '0', 'storageserver': ''} self.assertDictMatch(expected, self.extended)
def test_add_cinder_apphosts_fails(self, mock_ghn): """Test exception when set-apphost can't connect volume to host.""" ctxt = context.get_admin_context() extra_specs = {} type_ref = volume_types.create(ctxt, 'hgst-1', extra_specs) orig = { 'id': '1', 'name': 'volume1', 'display_name': '', 'volume_type_id': type_ref['id'], 'size': 10, 'provider_id': 'space_orig' } clone = { 'id': '2', 'name': 'clone1', 'display_name': '', 'volume_type_id': type_ref['id'], 'size': 10 } self._fail_set_apphosts = True self.assertRaises(exception.VolumeDriverException, self.driver.create_cloned_volume, clone, orig)
def test_manage_existing(self, mock_req): ctxt = context.get_admin_context() extra_specs = {"Feature:Pool": "1", "Feature:Raid": "1", "Affinity:Type": "flash", "Alloc:Type": "thick"} type_ref = volume_types.create(ctxt, 'VT1', extra_specs) specs = {'qos:minIOPS': '20', 'qos:maxIOPS': '2000', 'qos:burstIOPS': '5000'} qos = qos_specs.create(ctxt, 'fake-qos', specs) qos_specs.associate_qos_with_type(ctxt, qos['id'], type_ref['id']) VOLUME1['volume_type_id'] = type_ref['id'] self.setup_driver() if self.configuration.ise_protocol == 'iscsi': mock_req.side_effect = iter([ISE_GET_QUERY_RESP, ISE_GET_VOL1_STATUS_RESP, ISE_MODIFY_VOLUME_RESP, ISE_GET_IONETWORKS_RESP]) elif self.configuration.ise_protocol == 'fibre_channel': mock_req.side_effect = iter([ISE_GET_QUERY_RESP, ISE_GET_VOL1_STATUS_RESP, ISE_MODIFY_VOLUME_RESP]) self.driver.manage_existing(VOLUME1, {'source-name': 'testvol'})
def test_associate_qos_with_type(self): def fake_db_associate(context, id, type_id): if id == 'Trouble': raise db_exc.DBError() elif type_id == 'NotFound': raise exception.VolumeTypeNotFound(volume_type_id=type_id) pass type_ref = volume_types.create(self.ctxt, 'TypeName') specs_id = self._create_qos_specs('QoSName') qos_specs.associate_qos_with_type(self.ctxt, specs_id, type_ref['id']) res = qos_specs.get_associations(self.ctxt, specs_id) self.assertEquals(len(res[specs_id].keys()), 1) self.assertTrue('TypeName' in res[specs_id].keys()) self.assertTrue(type_ref['id'] in res[specs_id].values()) self.stubs.Set(db, 'qos_specs_associate', fake_db_associate) self.assertRaises(exception.VolumeTypeNotFound, qos_specs.associate_qos_with_type, self.ctxt, 'specs-id', 'NotFound') self.assertRaises(exception.QoSSpecsAssociateFailed, qos_specs.associate_qos_with_type, self.ctxt, 'Trouble', 'id')
def test_instorage_get_host_with_fc_connection_with_volume(self): # create a FC volume extra_spec = {'capabilities:storage_protocol': '<in> FC'} vol_type_fc = volume_types.create(self.ctxt, 'FC', extra_spec) volume_fc = self._generate_vol_info(None, None) volume_fc['volume_type_id'] = vol_type_fc['id'] self.fc_driver.create_volume(volume_fc) connector = {'host': 'instorage-mcs-host', 'wwnns': ['20000090fa17311e', '20000090fa17311f'], 'wwpns': ['ff00000000000000', 'ff00000000000001'], 'initiator': 'iqn.1993-08.org.debian:01:eac5ccc1aaa'} self.fc_driver.initialize_connection(volume_fc, connector) # Create a FC host assistant = self.fc_driver._assistant # tell lsfabric to not return anything self.sim.error_injection('lsfabric', 'no_hosts') host_name = assistant.get_host_from_connector( connector, volume_fc['name']) self.assertIsNotNone(host_name)
def test_disassociate_qos_specs(self): def fake_qos_specs_get(context, id): if id == 'NotFound': raise exception.QoSSpecsNotFound(specs_id=id) else: pass def fake_db_disassociate(context, id, type_id): if id == 'Trouble': raise db_exc.DBError() elif type_id == 'NotFound': raise exception.VolumeTypeNotFound(volume_type_id=type_id) pass type_ref = volume_types.create(self.ctxt, 'TypeName') specs_id = self._create_qos_specs('QoSName') qos_specs.associate_qos_with_type(self.ctxt, specs_id, type_ref['id']) res = qos_specs.get_associations(self.ctxt, specs_id) self.assertEqual(len(res), 1) qos_specs.disassociate_qos_specs(self.ctxt, specs_id, type_ref['id']) res = qos_specs.get_associations(self.ctxt, specs_id) self.assertEqual(len(res), 0) self.stubs.Set(db, 'qos_specs_disassociate', fake_db_disassociate) self.stubs.Set(qos_specs, 'get_qos_specs', fake_qos_specs_get) self.assertRaises(exception.VolumeTypeNotFound, qos_specs.disassociate_qos_specs, self.ctxt, 'specs-id', 'NotFound') self.assertRaises(exception.QoSSpecsDisassociateFailed, qos_specs.disassociate_qos_specs, self.ctxt, 'Trouble', 'id')
def test_instorage_initiator_multiple_wwpns_connected(self): # Generate us a test volume volume = self._create_volume() # Fibre Channel volume type extra_spec = {'capabilities:storage_protocol': '<in> FC'} vol_type = volume_types.create(self.ctxt, 'FC', extra_spec) volume['volume_type_id'] = vol_type['id'] # Make sure that the volumes have been created self._assert_vol_exists(volume['name'], True) # Set up one WWPN that won't match and one that will. self.fc_driver._state['storage_nodes']['1']['WWPN'] = [ '123456789ABCDEF0', 'AABBCCDDEEFF0010' ] wwpns = ['ff00000000000000', 'ff00000000000001'] connector = {'host': 'instorage-mcs-test', 'wwpns': wwpns} with mock.patch.object(instorage_common.InStorageAssistant, 'get_conn_fc_wwpns') as get_mappings: mapped_wwpns = [ 'AABBCCDDEEFF0001', 'AABBCCDDEEFF0002', 'AABBCCDDEEFF0010', 'AABBCCDDEEFF0012' ] get_mappings.return_value = mapped_wwpns # Initialize the connection init_ret = self.fc_driver.initialize_connection(volume, connector) # Make sure we return all wwpns which where mapped as part of the # connection self.assertEqual(mapped_wwpns, init_ret['data']['target_wwn'])
def test_volume_type_search_by_extra_spec(self): """Ensure volume types get by extra spec returns correct type.""" volume_types.create(self.ctxt, "type1", { "key1": "val1", "key2": "val2" }) volume_types.create(self.ctxt, "type2", { "key2": "val2", "key3": "val3" }) volume_types.create(self.ctxt, "type3", { "key3": "another_value", "key4": "val4" }) vol_types = volume_types.get_all_types( self.ctxt, search_opts={'extra_specs': { "key1": "val1" }}) LOG.info("vol_types: %s" % vol_types) self.assertEqual(len(vol_types), 1) self.assertIn("type1", vol_types.keys()) self.assertEqual(vol_types['type1']['extra_specs'], { "key1": "val1", "key2": "val2" }) vol_types = volume_types.get_all_types( self.ctxt, search_opts={'extra_specs': { "key2": "val2" }}) LOG.info("vol_types: %s" % vol_types) self.assertEqual(len(vol_types), 2) self.assertIn("type1", vol_types.keys()) self.assertIn("type2", vol_types.keys()) vol_types = volume_types.get_all_types( self.ctxt, search_opts={'extra_specs': { "key3": "val3" }}) LOG.info("vol_types: %s" % vol_types) self.assertEqual(len(vol_types), 1) self.assertIn("type2", vol_types.keys())
def test_volume_type_search_by_extra_spec_multiple(self): """Ensure volume types get by extra spec returns correct type.""" volume_types.create(self.ctxt, "type1", {"key1": "val1", "key2": "val2", "key3": "val3"}) volume_types.create(self.ctxt, "type2", {"key2": "val2", "key3": "val3"}) volume_types.create(self.ctxt, "type3", {"key1": "val1", "key3": "val3", "key4": "val4"}) vol_types = volume_types.get_all_types( self.ctxt, filters={'extra_specs': {"key1": "val1", "key3": "val3"}}) self.assertEqual(2, len(vol_types)) self.assertIn("type1", vol_types.keys()) self.assertIn("type3", vol_types.keys()) self.assertEqual({"key1": "val1", "key2": "val2", "key3": "val3"}, vol_types['type1']['extra_specs']) self.assertEqual({"key1": "val1", "key3": "val3", "key4": "val4"}, vol_types['type3']['extra_specs'])
def setUp(self): super(VolumeRetypeTestCase, self).setUp() self.patch('cinder.volume.utils.clear_volume', autospec=True) self.expected_status = 'available' self.service_id = 1 self.user_context = context.RequestContext(user_id=fake.USER_ID, project_id=fake.PROJECT_ID) volume_types.create(self.context, "old-type", {}, description="test-multiattach") volume_types.create(self.context, "fake_vol_type", {}, description="fake_type") volume_types.create(self.context, "multiattach-type", {'multiattach': "<is> True"}, description="test-multiattach") self.default_vol_type = objects.VolumeType.get_by_name_or_id( self.context, 'fake_vol_type') self.multiattach_type = objects.VolumeType.get_by_name_or_id( self.context, 'multiattach-type')
def _create_volume_type(self, volume_type_name, extra_specs=None, is_public=True, projects=None): return volume_types.create(self.ctxt, volume_type_name, extra_specs, is_public, projects).get('id')
def create_coprhd_volume_type(self): ctx = context.get_admin_context() vipr_volume_type = volume_types.create(ctx, "coprhd-volume-type", {'CoprHD:VPOOL': 'vpool_vipr'}) return vipr_volume_type