def test_do_create_volume_with_volume_type(self, mock_exists): drv = self._vz_driver drv.local_path = mock.Mock( return_value=self._FAKE_VOLUME_PATH) drv._write_info_file = mock.Mock() drv._qemu_img_info = mock.Mock() drv._create_qcow2_file = mock.Mock() drv._create_ploop = mock.Mock() volume_type = fake_volume.fake_volume_type_obj(self.context) volume_type.extra_specs = { 'vz:volume_format': 'qcow2' } volume1 = fake_volume.fake_volume_obj(self.context) volume1.size = 1024 volume1.volume_type = volume_type volume2 = copy.deepcopy(volume1) volume2.metadata = { 'volume_format': 'ploop' } drv._do_create_volume(volume1) drv._create_qcow2_file.assert_called_once_with( self._FAKE_VOLUME_PATH, 1024) drv._do_create_volume(volume2) drv._create_ploop.assert_called_once_with( self._FAKE_VOLUME_PATH, 1024)
def setUp(self): super(VZStorageTestCase, self).setUp() self._remotefsclient = mock.patch.object(remotefs, 'RemoteFsClient').start() get_mount_point = mock.Mock(return_value=self._FAKE_MNT_POINT) self._remotefsclient.get_mount_point = get_mount_point cfg = copy.copy(self._FAKE_VZ_CONFIG) self._vz_driver = vzstorage.VZStorageDriver(configuration=cfg) self._vz_driver._local_volume_dir = mock.Mock( return_value=self._FAKE_MNT_POINT) self._vz_driver._execute = mock.Mock() self._vz_driver.base = self._FAKE_MNT_BASE self.context = context.get_admin_context() vol_type = fake_volume.fake_volume_type_obj(self.context) vol_type.extra_specs = {} _FAKE_VOLUME = {'id': '4f711859-4928-4cb7-801a-a50c37ceaccc', 'size': 1, 'provider_location': self._FAKE_SHARE, 'name': self._FAKE_VOLUME_NAME, 'status': 'available'} self.vol = fake_volume.fake_volume_obj(self.context, volume_type_id=vol_type.id, **_FAKE_VOLUME) self.vol.volume_type = vol_type _FAKE_SNAPSHOT = {'id': self._FAKE_SNAPSHOT_ID, 'status': 'available', 'volume_size': 1} self.snap = fake_snapshot.fake_snapshot_obj(self.context, **_FAKE_SNAPSHOT) self.snap.volume = self.vol
def test_create_volume_with_group_invalid_type(self): """Test volume creation with group & invalid volume type.""" vol_type = db.volume_type_create( context.get_admin_context(), dict(name=conf_fixture.def_vol_type, extra_specs={}) ) db_vol_type = db.volume_type_get(context.get_admin_context(), vol_type.id) grp = tests_utils.create_group( self.context, availability_zone=CONF.storage_availability_zone, status=fields.GroupStatus.AVAILABLE, volume_type_ids=[db_vol_type['id']], group_type_id=fake.GROUP_TYPE_ID, host=CONF.host) fake_type = fake_volume.fake_volume_type_obj( self.context, id=fake.VOLUME_TYPE_ID, name='fake') # Volume type must be provided when creating a volume in a # group. self.assertRaises(exception.InvalidInput, self.volume_api.create, self.context, 1, 'vol1', 'volume 1', group=grp) # Volume type must be valid. self.assertRaises(exception.InvalidInput, self.volume_api.create, self.context, 1, 'vol1', 'volume 1', volume_type=fake_type, group=grp)
def test_get_volume_format_spec(self, volume_meta_contains_fmt, volume_type_contains_fmt): self._smbfs_driver.configuration = copy.copy(self._FAKE_SMBFS_CONFIG) fake_vol_meta_fmt = 'vhd' fake_vol_type_fmt = 'vhdx' volume_metadata = {} volume_type_extra_specs = {} if volume_meta_contains_fmt: volume_metadata['volume_format'] = fake_vol_meta_fmt elif volume_type_contains_fmt: volume_type_extra_specs['volume_format'] = fake_vol_type_fmt volume_type = fake_volume.fake_volume_type_obj(self.context) volume = fake_volume.fake_volume_obj(self.context) # Optional arguments are not set in _from_db_object, # so have to set explicitly here volume.volume_type = volume_type volume.metadata = volume_metadata # Same for extra_specs and VolumeType volume_type.extra_specs = volume_type_extra_specs resulted_fmt = self._smbfs_driver._get_volume_format_spec(volume) if volume_meta_contains_fmt: expected_fmt = fake_vol_meta_fmt elif volume_type_contains_fmt: expected_fmt = fake_vol_type_fmt else: expected_fmt = self._FAKE_SMBFS_CONFIG.smbfs_default_volume_format self.assertEqual(expected_fmt, resulted_fmt)
def setUp(self): self._setup_config() self._setup_driver() super(TestKaminarioISCSI, self).setUp() self.context = context.get_admin_context() self.vol = fake_volume.fake_volume_obj(self.context) self.vol.volume_type = fake_volume.fake_volume_type_obj(self.context) self.snap = fake_snapshot.fake_snapshot_obj(self.context) self.snap.volume = self.vol
def test_create_group_from_group(self, mock_volume_get_all, mock_rpc_create_group_from_src, mock_group_get, mock_volume_api_create, mock_mapping_create, mock_get_volume_type): vol_type = fake_volume.fake_volume_type_obj( self.ctxt, id=fake.VOLUME_TYPE_ID, name='fake_volume_type') mock_get_volume_type.return_value = vol_type grp = utils.create_group(self.ctxt, group_type_id=fake.GROUP_TYPE_ID, volume_type_ids=[vol_type['id']], availability_zone='nova', status=fields.GroupStatus.CREATING) mock_group_get.return_value = grp vol = utils.create_volume( self.ctxt, availability_zone=grp.availability_zone, volume_type_id=fake.VOLUME_TYPE_ID, group_id=grp.id) mock_volume_get_all.return_value = [vol] grp2 = utils.create_group(self.ctxt, group_type_id=fake.GROUP_TYPE_ID, volume_type_ids=[vol_type['id']], availability_zone='nova', source_group_id=grp.id, status=fields.GroupStatus.CREATING) vol2 = utils.create_volume( self.ctxt, availability_zone=grp.availability_zone, volume_type_id=vol_type['id'], group_id=grp2.id, source_volid=vol.id) self.group_api._create_group_from_source_group(self.ctxt, grp2, grp.id) mock_volume_api_create.assert_called_once_with( self.ctxt, 1, None, None, availability_zone=grp.availability_zone, source_group=grp, group=grp2, source_volume=vol, volume_type=vol_type) mock_rpc_create_group_from_src.assert_called_once_with( self.ctxt, grp2, None, grp) vol2.destroy() grp2.destroy() vol.destroy() grp.destroy()
def test_is_replicated(self, result): volume_type = fake_volume.fake_volume_type_obj(self.context) volume = fake_volume.fake_volume_obj( self.context, volume_type_id=volume_type.id) volume.volume_type = volume_type with mock.patch.object(volume_type, 'is_replicated', return_value=result) as is_replicated: self.assertEqual(result, volume.is_replicated()) is_replicated.assert_called_once_with()
def test_create_group_from_group(self, mock_volume_get_all, mock_rpc_create_group_from_src, mock_group_get, mock_volume_api_create, mock_mapping_create, mock_get_volume_type): vol_type = fake_volume.fake_volume_type_obj(self.ctxt, id=fake.VOLUME_TYPE_ID, name='fake_volume_type') mock_get_volume_type.return_value = vol_type grp = utils.create_group(self.ctxt, group_type_id=fake.GROUP_TYPE_ID, volume_type_ids=[vol_type['id']], availability_zone='nova', status=fields.GroupStatus.CREATING) mock_group_get.return_value = grp vol = utils.create_volume(self.ctxt, availability_zone=grp.availability_zone, volume_type_id=fake.VOLUME_TYPE_ID, group_id=grp.id) mock_volume_get_all.return_value = [vol] grp2 = utils.create_group(self.ctxt, group_type_id=fake.GROUP_TYPE_ID, volume_type_ids=[vol_type['id']], availability_zone='nova', source_group_id=grp.id, status=fields.GroupStatus.CREATING) vol2 = utils.create_volume(self.ctxt, availability_zone=grp.availability_zone, volume_type_id=vol_type['id'], group_id=grp2.id, source_volid=vol.id) self.group_api._create_group_from_source_group(self.ctxt, grp2, grp.id) mock_volume_api_create.assert_called_once_with( self.ctxt, 1, None, None, availability_zone=grp.availability_zone, source_group=grp, group=grp2, source_volume=vol, volume_type=vol_type) mock_rpc_create_group_from_src.assert_called_once_with( self.ctxt, grp2, None, grp) vol2.destroy() grp2.destroy() vol.destroy() grp.destroy()
def setUp(self): super(HNASNFSDriverTest, self).setUp() self.context = context.get_admin_context() self.volume = fake_volume.fake_volume_obj(self.context, **_VOLUME) self.snapshot = self.instantiate_snapshot(_SNAPSHOT) self.volume_type = fake_volume.fake_volume_type_obj(None, **{"name": "silver"}) self.clone = fake_volume.fake_volume_obj( None, **{ "id": fake.VOLUME2_ID, "size": 128, "host": "host1@hnas-nfs-backend#default", "volume_type": "default", "provider_location": "hnas", } ) # xml parsed from utils self.parsed_xml = { "username": "******", "password": "******", "hnas_cmd": "ssc", "ssh_port": "22", "services": { "default": { "hdp": "172.24.49.21:/fs-cinder", "volume_type": "default", "label": "svc_0", "ctl": "1", "export": {"fs": "fs-cinder", "path": "/export-cinder/volume"}, } }, "cluster_admin_ip0": None, "ssh_private_key": None, "chap_enabled": "True", "mgmt_ip0": "172.17.44.15", "ssh_enabled": None, } self.configuration = mock.Mock(spec=conf.Configuration) self.configuration.hds_hnas_nfs_config_file = "fake.xml" self.mock_object(hnas_utils, "read_cinder_conf", mock.Mock(return_value=self.parsed_xml)) self.configuration = mock.Mock(spec=conf.Configuration) self.configuration.max_over_subscription_ratio = 20.0 self.configuration.reserved_percentage = 0 self.configuration.hds_hnas_nfs_config_file = "fake_config.xml" self.configuration.nfs_shares_config = "fake_nfs_share.xml" self.configuration.num_shell_tries = 2 self.driver = nfs.HNASNFSDriver(configuration=self.configuration)
def setUp(self): self._setup_config() self._setup_driver() super(TestKaminarioCommon, self).setUp() self.context = context.get_admin_context() self.vol = fake_volume.fake_volume_obj(self.context) self.vol.volume_type = fake_volume.fake_volume_type_obj(self.context) self.vol.volume_type.extra_specs = {'foo': None} self.snap = fake_snapshot.fake_snapshot_obj(self.context) self.snap.volume = self.vol self.patch('eventlet.sleep')
def _create_fake_config(self): self.configuration = mock.Mock(spec=conf.Configuration) self.configuration.san_ip = '1.0.0.1' self.configuration.san_login = '******' self.configuration.san_password = '******' self.configuration.san_clustername = 'dummy_cluster' self.configuration.san_is_local = False self.ctxt = context.get_admin_context() self.vol = fake_volume.fake_volume_obj(self.context) self.vol.volume_type = fake_volume.fake_volume_type_obj(self.context) self.snap = fake_snapshot.fake_snapshot_obj(self.context) self.snap.volume = self.vol self.driver = hdvg.HedvigISCSIDriver(configuration=self.configuration)
def test_create_group_from_group_create_volume_failed( self, mock_volume_get_all, mock_rpc_create_group_from_src, mock_group_get, mock_volume_api_create, mock_mapping_create, mock_get_volume_type, mock_volume_delete): vol_type = fake_volume.fake_volume_type_obj( self.ctxt, id=fake.VOLUME_TYPE_ID, name='fake_volume_type') mock_get_volume_type.return_value = vol_type grp = utils.create_group(self.ctxt, group_type_id=fake.GROUP_TYPE_ID, volume_type_ids=[vol_type['id']], availability_zone='nova', status=fields.GroupStatus.CREATING) mock_group_get.return_value = grp vol1 = utils.create_volume( self.ctxt, availability_zone=grp.availability_zone, volume_type_id=fake.VOLUME_TYPE_ID, group_id=grp.id) vol2 = utils.create_volume( self.ctxt, availability_zone=grp.availability_zone, volume_type_id=fake.VOLUME_TYPE_ID, group_id=grp.id) mock_volume_get_all.side_effect = [[vol1, vol2], [vol1]] grp2 = utils.create_group(self.ctxt, group_type_id=fake.GROUP_TYPE_ID, volume_type_ids=[vol_type['id']], availability_zone='nova', source_group_id=grp.id, status=fields.GroupStatus.CREATING) mock_volume_api_create.side_effect = [None, exception.CinderException] self.assertRaises( exception.CinderException, self.group_api._create_group_from_source_group, self.ctxt, grp2, grp.id) mock_rpc_create_group_from_src.assert_not_called() mock_volume_delete.assert_called_once_with(self.ctxt, vol1) grp2.destroy() vol2.destroy() vol1.destroy() grp.destroy()
def test_do_create_volume_with_volume_type(self, mock_exists): drv = self._mfs_driver drv.local_path = mock.Mock(return_value=self._FAKE_VOLUME_PATH) drv._write_info_file = mock.Mock() drv._qemu_img_info = mock.Mock() drv._create_qcow2_file = mock.Mock() volume_type = fake_volume.fake_volume_type_obj(self.context) volume_type.extra_specs = {'moosefs:volume_format': 'qcow2'} volume1 = fake_volume.fake_volume_obj(self.context) volume1.size = 1024 volume1.volume_type = volume_type drv._do_create_volume(volume1) drv._create_qcow2_file.assert_called_once_with(self._FAKE_VOLUME_PATH, 1024)
def setUp(self): super(HNASUtilsTest, self).setUp() self.fake_conf = conf.Configuration(hnas_utils.CONF) self.override_config('hnas_username', 'supervisor') self.override_config('hnas_password', 'supervisor') self.override_config('hnas_mgmt_ip0', '172.24.44.15') self.override_config('hnas_svc0_pool_name', 'default') self.override_config('hnas_svc0_hdp', 'easy-stack') self.override_config('hnas_svc1_pool_name', 'FS-CinderDev1') self.override_config('hnas_svc1_hdp', 'silver') self.context = context.get_admin_context() self.volume = fake_volume.fake_volume_obj(self.context, **_VOLUME) self.volume_type = (fake_volume.fake_volume_type_obj(None, **{ 'id': fake_constants.VOLUME_TYPE_ID, 'name': 'silver'}))
def setUp(self): super(HNASUtilsTest, self).setUp() self.fake_conf = conf.Configuration(hnas_utils.CONF) self.fake_conf.append_config_values(hnas_iscsi.iSCSI_OPTS) self.override_config('hnas_username', 'supervisor') self.override_config('hnas_password', 'supervisor') self.override_config('hnas_mgmt_ip0', '172.24.44.15') self.override_config('hnas_svc0_volume_type', 'default') self.override_config('hnas_svc0_hdp', 'easy-stack') self.override_config('hnas_svc0_iscsi_ip', '172.24.49.21') self.override_config('hnas_svc1_volume_type', 'FS-CinderDev1') self.override_config('hnas_svc1_hdp', 'silver') self.override_config('hnas_svc1_iscsi_ip', '172.24.49.32') self.context = context.get_admin_context() self.volume = fake_volume.fake_volume_obj(self.context, **_VOLUME) self.volume_type = (fake_volume.fake_volume_type_obj(None, **{ 'id': fake_constants.VOLUME_TYPE_ID, 'name': 'silver'}))
def setUp(self): super(VZStorageTestCase, self).setUp() self._cfg = mock.MagicMock() self._cfg.vzstorage_shares_config = '/fake/config/path' self._cfg.vzstorage_sparsed_volumes = False self._cfg.vzstorage_used_ratio = 0.7 self._cfg.vzstorage_mount_point_base = self._FAKE_MNT_BASE self._cfg.vzstorage_default_volume_format = 'raw' self._cfg.nas_secure_file_operations = 'auto' self._cfg.nas_secure_file_permissions = 'auto' self._vz_driver = vzstorage.VZStorageDriver(configuration=self._cfg) self._vz_driver._local_volume_dir = mock.Mock( return_value=self._FAKE_MNT_POINT) self._vz_driver._execute = mock.Mock() self._vz_driver.base = self._FAKE_MNT_BASE self.context = context.get_admin_context() vol_type = fake_volume.fake_volume_type_obj(self.context) vol_type.extra_specs = {} _FAKE_VOLUME = { 'id': '4f711859-4928-4cb7-801a-a50c37ceaccc', 'size': 1, 'provider_location': self._FAKE_SHARE, 'name': self._FAKE_VOLUME_NAME, 'status': 'available' } self.vol = fake_volume.fake_volume_obj(self.context, volume_type_id=vol_type.id, **_FAKE_VOLUME) self.vol.volume_type = vol_type _FAKE_SNAPSHOT = { 'id': self._FAKE_SNAPSHOT_ID, 'status': 'available', 'volume_size': 1 } self.snap = fake_snapshot.fake_snapshot_obj(self.context, **_FAKE_SNAPSHOT) self.snap.volume = self.vol
def test_create_db_entry_task_with_multiattach(self): fake_volume_type = fake_volume.fake_volume_type_obj( self.ctxt, extra_specs={'multiattach': '<is> True'}) spec = { 'name': 'name', 'description': 'description', 'host': 'host', 'ref': 'ref', 'volume_type': fake_volume_type, 'metadata': {}, 'availability_zone': 'availability_zone', 'bootable': 'bootable', 'volume_type_id': fake_volume_type.id, 'cluster_name': 'fake_cluster' } task = manage_existing.EntryCreateTask(fake_volume_api.FakeDb()) result = task.execute(self.ctxt, **spec) self.assertTrue(result['volume_properties']['multiattach'])
def test_retype(self): """Test retype.""" replica_status = self.driver._get_replica_status('test') self.assertTrue(replica_status) replica = self.driver._get_is_replica(self.vol.volume_type) self.assertFalse(replica) self.driver.replica = Replication() result = self.driver._add_replication(self.vol) self.assertIsNone(result) self.driver.target = FakeKrest() self.driver._check_for_status = mock.Mock() result = self.driver._delete_replication(self.vol) self.assertIsNone(result) self.driver._delete_volume_replica = mock.Mock() result = self.driver.retype(None, self.vol, self.vol.volume_type, None, None) self.assertTrue(result) new_vol_type = fake_volume.fake_volume_type_obj(self.context) new_vol_type.extra_specs = {'kaminario:thin_prov_type': 'nodedup'} result2 = self.driver.retype(None, self.vol, new_vol_type, None, None) self.assertFalse(result2)
def test_create_volume_with_group_invalid_type(self): """Test volume creation with group & invalid volume type.""" vol_type = db.volume_type_create( context.get_admin_context(), dict(name=conf_fixture.def_vol_type, extra_specs={})) db_vol_type = db.volume_type_get(context.get_admin_context(), vol_type.id) grp = tests_utils.create_group( self.context, availability_zone=CONF.storage_availability_zone, status=fields.GroupStatus.AVAILABLE, volume_type_ids=[db_vol_type['id']], group_type_id=fake.GROUP_TYPE_ID, host=CONF.host) fake_type = fake_volume.fake_volume_type_obj(self.context, id=fake.VOLUME_TYPE_ID, name='fake') # Volume type must be provided when creating a volume in a # group. self.assertRaises(exception.InvalidInput, self.volume_api.create, self.context, 1, 'vol1', 'volume 1', group=grp) # Volume type must be valid. self.assertRaises(exception.InvalidInput, self.volume_api.create, self.context, 1, 'vol1', 'volume 1', volume_type=fake_type, group=grp)
def test_do_create_volume_with_volume_type(self, mock_exists): drv = self._vz_driver drv.local_path = mock.Mock(return_value=self._FAKE_VOLUME_PATH) drv._write_info_file = mock.Mock() drv._qemu_img_info = mock.Mock() drv._create_qcow2_file = mock.Mock() drv._create_ploop = mock.Mock() volume_type = fake_volume.fake_volume_type_obj(self.context) volume_type.extra_specs = {'vz:volume_format': 'qcow2'} volume1 = fake_volume.fake_volume_obj(self.context) volume1.size = 1024 volume1.volume_type = volume_type volume2 = copy.deepcopy(volume1) volume2.metadata = {'volume_format': 'ploop'} drv._do_create_volume(volume1) drv._create_qcow2_file.assert_called_once_with(self._FAKE_VOLUME_PATH, 1024) drv._do_create_volume(volume2) drv._create_ploop.assert_called_once_with(self._FAKE_VOLUME_PATH, 1024)
def setUp(self): super(VZStorageTestCase, self).setUp() self._cfg = mock.MagicMock() self._cfg.vzstorage_shares_config = '/fake/config/path' self._cfg.vzstorage_sparsed_volumes = False self._cfg.vzstorage_used_ratio = 0.7 self._cfg.vzstorage_mount_point_base = self._FAKE_MNT_BASE self._cfg.vzstorage_default_volume_format = 'raw' self._cfg.nas_secure_file_operations = 'auto' self._cfg.nas_secure_file_permissions = 'auto' self._vz_driver = vzstorage.VZStorageDriver(configuration=self._cfg) self._vz_driver._local_volume_dir = mock.Mock( return_value=self._FAKE_MNT_POINT) self._vz_driver._execute = mock.Mock() self._vz_driver.base = self._FAKE_MNT_BASE self.context = context.get_admin_context() vol_type = fake_volume.fake_volume_type_obj(self.context) vol_type.extra_specs = {} _FAKE_VOLUME = {'id': '4f711859-4928-4cb7-801a-a50c37ceaccc', 'size': 1, 'provider_location': self._FAKE_SHARE, 'name': self._FAKE_VOLUME_NAME, 'status': 'available'} self.vol = fake_volume.fake_volume_obj(self.context, volume_type_id=vol_type.id, **_FAKE_VOLUME) self.vol.volume_type = vol_type _FAKE_SNAPSHOT = {'id': self._FAKE_SNAPSHOT_ID, 'status': 'available', 'volume_size': 1} self.snap = fake_snapshot.fake_snapshot_obj(self.context, **_FAKE_SNAPSHOT) self.snap.volume = self.vol
def setUp(self): super(VZStorageTestCase, self).setUp() self._remotefsclient = mock.patch.object( remotefs, 'VZStorageRemoteFSClient').start() get_mount_point = mock.Mock(return_value=self._FAKE_MNT_POINT) self._remotefsclient.get_mount_point = get_mount_point cfg = copy.copy(self._FAKE_VZ_CONFIG) self._vz_driver = vzstorage.VZStorageDriver(configuration=cfg) self._vz_driver._local_volume_dir = mock.Mock( return_value=self._FAKE_MNT_POINT) self._vz_driver._execute = mock.Mock() self._vz_driver.base = self._FAKE_MNT_BASE self.context = context.get_admin_context() vol_type = fake_volume.fake_volume_type_obj(self.context) vol_type.extra_specs = {} _FAKE_VOLUME = { 'id': '4f711859-4928-4cb7-801a-a50c37ceaccc', 'size': 1, 'provider_location': self._FAKE_SHARE, 'name': self._FAKE_VOLUME_NAME, 'status': 'available' } self.vol = fake_volume.fake_volume_obj(self.context, volume_type_id=vol_type.id, **_FAKE_VOLUME) self.vol.volume_type = vol_type _FAKE_SNAPSHOT = { 'id': self._FAKE_SNAPSHOT_ID, 'status': 'available', 'volume_size': 1 } self.snap = fake_snapshot.fake_snapshot_obj(self.context, **_FAKE_SNAPSHOT) self.snap.volume = self.vol
def test_cast_manage_existing(self): volume = fake_volume.fake_volume_type_obj(self.ctxt) spec = { 'name': 'name', 'description': 'description', 'host': 'host', 'ref': 'ref', 'volume_type': 'volume_type', 'metadata': 'metadata', 'availability_zone': 'availability_zone', 'bootable': 'bootable', 'volume_id': volume.id, } # Fake objects assert specs task = manage_existing.ManageCastTask( fake_volume_api.FakeSchedulerRpcAPI(spec, self), fake_volume_api.FakeDb()) create_what = spec.copy() create_what.update({'volume': volume}) create_what.pop('volume_id') task.execute(self.ctxt, **create_what)
def test_is_multiattach_specs_true(self, true): volume_type = fake_volume.fake_volume_type_obj( self.context, extra_specs={'multiattach': true}) self.assertTrue(volume_type.is_multiattach())
def setUp(self): super(HNASNFSDriverTest, self).setUp() self.context = context.get_admin_context() self.volume = fake_volume.fake_volume_obj( self.context, **_VOLUME) self.snapshot = self.instantiate_snapshot(_SNAPSHOT) self.volume_type = fake_volume.fake_volume_type_obj( None, **{'name': 'silver'} ) self.clone = fake_volume.fake_volume_obj( None, **{'id': fake.VOLUME2_ID, 'size': 128, 'host': 'host1@hnas-nfs-backend#default', 'volume_type': 'default', 'provider_location': 'hnas'}) # xml parsed from utils self.parsed_xml = { 'username': '******', 'password': '******', 'hnas_cmd': 'ssc', 'ssh_port': '22', 'services': { 'default': { 'hdp': '172.24.49.21:/fs-cinder', 'pool_name': 'default', 'label': 'svc_0', 'ctl': '1', 'export': { 'fs': 'fs-cinder', 'path': '/export-cinder/volume' } }, }, 'cluster_admin_ip0': None, 'ssh_private_key': None, 'chap_enabled': 'True', 'mgmt_ip0': '172.17.44.15', 'ssh_enabled': None } self.configuration = mock.Mock(spec=conf.Configuration) self.configuration.hds_hnas_nfs_config_file = 'fake.xml' self.mock_object(hnas_utils, 'read_cinder_conf', mock.Mock(return_value=self.parsed_xml)) self.configuration = mock.Mock(spec=conf.Configuration) self.configuration.max_over_subscription_ratio = 20.0 self.configuration.reserved_percentage = 0 self.configuration.hds_hnas_nfs_config_file = 'fake_config.xml' self.configuration.nfs_shares_config = 'fake_nfs_share.xml' self.configuration.num_shell_tries = 2 self.driver = nfs.HNASNFSDriver(configuration=self.configuration)
def test_is_replicated_true(self, enabled): volume_type = fake_volume.fake_volume_type_obj( self.context, extra_specs={'replication_enabled': enabled}) self.assertTrue(volume_type.is_replicated())
def setUp(self): super(HNASiSCSIDriverTest, self).setUp() self.context = context.get_admin_context() self.volume = fake_volume.fake_volume_obj(self.context, **_VOLUME) self.volume_clone = fake_volume.fake_volume_obj( self.context, **_VOLUME2) self.snapshot = self.instantiate_snapshot(_SNAPSHOT) self.volume_type = fake_volume.fake_volume_type_obj( None, **{'name': 'silver'}) self.parsed_xml = { 'username': '******', 'password': '******', 'hnas_cmd': 'ssc', 'fs': { 'fs2': 'fs2' }, 'ssh_port': '22', 'port': '3260', 'services': { 'default': { 'hdp': 'fs2', 'iscsi_ip': '172.17.39.132', 'iscsi_port': '3260', 'port': '22', 'volume_type': 'default', 'label': 'svc_0', 'evs': '1', 'tgt': { 'alias': 'test', 'secret': 'itEpgB5gPefGhW2' } }, 'silver': { 'hdp': 'fs3', 'iscsi_ip': '172.17.39.133', 'iscsi_port': '3260', 'port': '22', 'volume_type': 'silver', 'label': 'svc_1', 'evs': '2', 'tgt': { 'alias': 'iscsi-test', 'secret': 'itEpgB5gPefGhW2' } } }, 'cluster_admin_ip0': None, 'ssh_private_key': None, 'chap_enabled': True, 'mgmt_ip0': '172.17.44.15', 'ssh_enabled': None } self.configuration = mock.Mock(spec=conf.Configuration) self.configuration.hds_hnas_iscsi_config_file = 'fake.xml' self.mock_object(hnas_utils, 'read_cinder_conf', mock.Mock(return_value=self.parsed_xml)) self.driver = iscsi.HNASISCSIDriver(configuration=self.configuration)
def test_is_replicated_no_specs(self): volume_type = fake_volume.fake_volume_type_obj(self.context, extra_specs={}) self.assertFalse(bool(volume_type.is_replicated()))
class PowerMaxData(object): # array info array = '000197800123' uni_array = u'000197800123' array_herc = '000197900123' srp = 'SRP_1' srp2 = 'SRP_2' slo = 'Diamond' workload = 'DSS' port_group_name_f = 'OS-fibre-PG' port_group_name_i = 'OS-iscsi-PG' masking_view_name_f = 'OS-HostX-F-OS-fibre-PG-MV' masking_view_name_i = 'OS-HostX-SRP_1-I-OS-iscsi-PG-MV' initiatorgroup_name_f = 'OS-HostX-F-IG' initiatorgroup_name_i = 'OS-HostX-I-IG' parent_sg_f = 'OS-HostX-F-OS-fibre-PG-SG' parent_sg_i = 'OS-HostX-I-OS-iscsi-PG-SG' storagegroup_name_f = 'OS-HostX-SRP_1-DiamondDSS-OS-fibre-PG' storagegroup_name_i = 'OS-HostX-SRP_1-Diamond-DSS-OS-iscsi-PG' defaultstoragegroup_name = 'OS-SRP_1-Diamond-DSS-SG' storagegroup_list = [defaultstoragegroup_name] default_sg_no_slo = 'OS-no_SLO-SG' default_sg_compr_disabled = 'OS-SRP_1-Diamond-DSS-CD-SG' default_sg_re_enabled = 'OS-SRP_1-Diamond-DSS-RE-SG' failed_resource = 'OS-failed-resource' fake_host = 'HostX@Backend#Diamond+DSS+SRP_1+000197800123' new_host = 'HostX@Backend#Silver+OLTP+SRP_1+000197800123' none_host = 'HostX@Backend#Diamond+None+SRP_1+000197800123' version = '3.1.0' volume_wwn = '600000345' remote_array = '000197800124' device_id = '00001' device_id2 = '00002' device_id3 = '00003' device_id4 = '00004' rdf_group_name = '23_24_007' rdf_group_no = '70' u4v_version = '91' storagegroup_name_source = 'Grp_source_sg' storagegroup_name_target = 'Grp_target_sg' group_snapshot_name = 'Grp_snapshot' target_group_name = 'Grp_target' storagegroup_name_with_id = 'GrpId_group_name' rdf_managed_async_grp = 'OS-%s-Asynchronous-rdf-sg' % rdf_group_name volume_id = '2b06255d-f5f0-4520-a953-b029196add6a' no_slo_sg_name = 'OS-HostX-No_SLO-OS-fibre-PG' temp_snapvx = 'temp-00001-snapshot_for_clone' # connector info wwpn1 = '123456789012345' wwpn2 = '123456789054321' wwnn1 = '223456789012345' initiator = 'iqn.1993-08.org.debian: 01: 222' ip, ip2 = u'123.456.7.8', u'123.456.7.9' iqn = u'iqn.1992-04.com.emc:600009700bca30c01e3e012e00000001,t,0x0001' iqn2 = u'iqn.1992-04.com.emc:600009700bca30c01e3e012e00000002,t,0x0001' connector = { 'ip': ip, 'initiator': initiator, 'wwpns': [wwpn1, wwpn2], 'wwnns': [wwnn1], 'host': 'HostX' } fabric_name_prefix = 'fakeFabric' end_point_map = { connector['wwpns'][0]: [wwnn1], connector['wwpns'][1]: [wwnn1] } target_wwns = [wwnn1] zoning_mappings = { 'array': u'000197800123', 'init_targ_map': end_point_map, 'initiator_group': initiatorgroup_name_f, 'port_group': port_group_name_f, 'target_wwns': target_wwns } zoning_mappings_metro = deepcopy(zoning_mappings) zoning_mappings_metro.update({ 'metro_port_group': port_group_name_f, 'metro_ig': initiatorgroup_name_f, 'metro_array': remote_array }) device_map = {} for wwn in connector['wwpns']: fabric_name = ''.join([fabric_name_prefix, wwn[-2:]]) target_wwn = wwn[::-1] fabric_map = { 'initiator_port_wwn_list': [wwn], 'target_port_wwn_list': [target_wwn] } device_map[fabric_name] = fabric_map iscsi_device_info = { 'maskingview': masking_view_name_i, 'ip_and_iqn': [{ 'ip': ip, 'iqn': initiator }], 'is_multipath': True, 'array': array, 'controller': { 'host': '10.00.00.00' }, 'hostlunid': 3 } iscsi_device_info_metro = deepcopy(iscsi_device_info) iscsi_device_info_metro['metro_ip_and_iqn'] = [{'ip': ip2, 'iqn': iqn2}] iscsi_device_info_metro['metro_hostlunid'] = 2 fc_device_info = { 'maskingview': masking_view_name_f, 'array': array, 'controller': { 'host': '10.00.00.00' }, 'hostlunid': 3 } # snapshot info snapshot_id = '390eeb4d-0f56-4a02-ba14-167167967014' snapshot_display_id = 'my_snap' managed_snap_id = 'OS-390eeb4d-0f56-4a02-ba14-167167967014' test_snapshot_snap_name = 'OS-' + snapshot_id[:6] + snapshot_id[-9:] snap_location = { 'snap_name': test_snapshot_snap_name, 'source_id': device_id } # cinder volume info ctx = context.RequestContext('admin', 'fake', True) provider_location = {'array': array, 'device_id': device_id} provider_location2 = { 'array': six.text_type(array), 'device_id': device_id2 } provider_location3 = { 'array': six.text_type(remote_array), 'device_id': device_id2 } provider_location4 = { 'array': six.text_type(uni_array), 'device_id': device_id } provider_location_clone = { 'array': array, 'device_id': device_id, 'snap_name': temp_snapvx, 'source_device_id': device_id } provider_location_snapshot = { 'array': array, 'device_id': device_id, 'snap_name': test_snapshot_snap_name, 'source_device_id': device_id } provider_location5 = {'array': remote_array, 'device_id': device_id} legacy_provider_location = { 'classname': 'Symm_StorageVolume', 'keybindings': { 'CreationClassName': u'Symm_StorageVolume', 'SystemName': u'SYMMETRIX+000197800123', 'DeviceID': device_id, 'SystemCreationClassName': u'Symm_StorageSystem' } } legacy_provider_location2 = { 'classname': 'Symm_StorageVolume', 'keybindings': { 'CreationClassName': u'Symm_StorageVolume', 'SystemName': u'SYMMETRIX+000197800123', 'DeviceID': device_id2, 'SystemCreationClassName': u'Symm_StorageSystem' } } test_volume_type = fake_volume.fake_volume_type_obj(context=ctx) test_volume = fake_volume.fake_volume_obj( context=ctx, name='vol1', size=2, provider_auth=None, provider_location=six.text_type(provider_location), volume_type=test_volume_type, host=fake_host, replication_driver_data=six.text_type(provider_location3)) test_attached_volume = fake_volume.fake_volume_obj( id='4732de9b-98a4-4b6d-ae4b-3cafb3d34220', context=ctx, name='vol1', size=0, provider_auth=None, attach_status='attached', provider_location=six.text_type(provider_location), host=fake_host, volume_type=test_volume_type, replication_driver_data=six.text_type(provider_location3)) test_legacy_vol = fake_volume.fake_volume_obj( context=ctx, name='vol1', size=2, provider_auth=None, provider_location=six.text_type(legacy_provider_location), replication_driver_data=six.text_type(legacy_provider_location2), host=fake_host, volume_type=test_volume_type) test_clone_volume = fake_volume.fake_volume_obj( context=ctx, name='vol1', size=2, provider_auth=None, provider_location=six.text_type(provider_location2), host=fake_host, source_volid=test_volume.id, snapshot_id=snapshot_id, _name_id=test_volume.id) test_volume_snap_manage = fake_volume.fake_volume_obj( context=ctx, name='vol1', size=2, provider_auth=None, display_name='vol1', provider_location=six.text_type(provider_location), volume_type=test_volume_type, host=fake_host, replication_driver_data=six.text_type(provider_location4)) test_snapshot = fake_snapshot.fake_snapshot_obj( context=ctx, id=snapshot_id, name='my_snap', size=2, provider_location=six.text_type(snap_location), host=fake_host, volume=test_volume) test_legacy_snapshot = fake_snapshot.fake_snapshot_obj( context=ctx, id=test_volume.id, name='my_snap', size=2, provider_location=six.text_type(legacy_provider_location), host=fake_host, volume=test_volume) test_failed_snap = fake_snapshot.fake_snapshot_obj( context=ctx, id='4732de9b-98a4-4b6d-ae4b-3cafb3d34220', name=failed_resource, size=2, provider_location=six.text_type(snap_location), host=fake_host, volume=test_volume) test_snapshot_manage = fake_snapshot.fake_snapshot_obj( context=ctx, id=snapshot_id, name='my_snap', size=2, provider_location=six.text_type(snap_location), host=fake_host, volume=test_volume_snap_manage, display_name='my_snap') test_volume_attachment = volume_attachment.VolumeAttachment( id='2b06255d-f5f0-4520-a953-b029196add6b', volume_id=test_volume.id, connector=connector) location_info = { 'location_info': '000197800123#SRP_1#Diamond#DSS', 'storage_protocol': 'FC' } test_host = {'capabilities': location_info, 'host': fake_host} # extra-specs vol_type_extra_specs = {'pool_name': u'Diamond+DSS+SRP_1+000197800123'} vol_type_extra_specs_compr_disabled = { 'pool_name': u'Diamond+DSS+SRP_1+000197800123', 'storagetype:disablecompression': 'true' } vol_type_extra_specs_rep_enabled = { 'pool_name': u'Diamond+DSS+SRP_1+000197800123', 'replication_enabled': '<is> True' } extra_specs = { 'pool_name': u'Diamond+DSS+SRP_1+000197800123', 'slo': slo, 'workload': workload, 'srp': srp, 'array': array, 'interval': 3, 'retries': 120 } extra_specs_migrate = deepcopy(extra_specs) extra_specs_migrate[utils.PORTGROUPNAME] = port_group_name_f extra_specs_disable_compression = deepcopy(extra_specs) extra_specs_disable_compression[utils.DISABLECOMPRESSION] = 'true' extra_specs_intervals_set = deepcopy(extra_specs) extra_specs_intervals_set['interval'] = 1 extra_specs_intervals_set['retries'] = 1 extra_specs_rep_enabled = deepcopy(extra_specs) extra_specs_rep_enabled['replication_enabled'] = True rep_extra_specs = deepcopy(extra_specs_rep_enabled) rep_extra_specs['array'] = remote_array rep_extra_specs['interval'] = 1 rep_extra_specs['retries'] = 1 rep_extra_specs['srp'] = srp2 rep_extra_specs['rep_mode'] = 'Synchronous' rep_extra_specs2 = deepcopy(rep_extra_specs) rep_extra_specs2[utils.PORTGROUPNAME] = port_group_name_f rep_extra_specs3 = deepcopy(rep_extra_specs) rep_extra_specs3['slo'] = slo rep_extra_specs3['workload'] = workload rep_extra_specs4 = deepcopy(rep_extra_specs3) rep_extra_specs4['rdf_group_label'] = rdf_group_name rep_extra_specs5 = deepcopy(rep_extra_specs2) rep_extra_specs5['target_array_model'] = 'VMAX250F' test_volume_type_1 = volume_type.VolumeType( id='2b06255d-f5f0-4520-a953-b029196add6a', name='abc', extra_specs=extra_specs) test_volume_type_list = volume_type.VolumeTypeList( objects=[test_volume_type_1]) test_vol_grp_name_id_only = 'ec870a2f-6bf7-4152-aa41-75aad8e2ea96' test_vol_grp_name = 'Grp_source_sg_%s' % test_vol_grp_name_id_only test_fo_vol_group = 'fo_vol_group_%s' % test_vol_grp_name_id_only test_group_1 = group.Group( context=None, name=storagegroup_name_source, group_id='abc', size=1, id=test_vol_grp_name_id_only, status='available', provider_auth=None, volume_type_ids=['abc'], group_type_id='grptypeid', volume_types=test_volume_type_list, host=fake_host, provider_location=six.text_type(provider_location)) test_group_failed = group.Group( context=None, name=failed_resource, group_id='14b8894e-54ec-450a-b168-c172a16ed166', size=1, id='318c721c-51ad-4160-bfe1-ebde2273836f', status='available', provider_auth=None, volume_type_ids=['abc'], group_type_id='grptypeid', volume_types=test_volume_type_list, host=fake_host, provider_location=six.text_type(provider_location), replication_status=fields.ReplicationStatus.DISABLED) test_rep_group = fake_group.fake_group_obj( context=ctx, name=storagegroup_name_source, id=test_vol_grp_name_id_only, host=fake_host, replication_status=fields.ReplicationStatus.ENABLED) test_group = fake_group.fake_group_obj(context=ctx, name=storagegroup_name_source, id=test_vol_grp_name_id_only, host=fake_host) test_group_without_name = fake_group.fake_group_obj( context=ctx, name=None, id=test_vol_grp_name_id_only, host=fake_host) test_group_snapshot_1 = group_snapshot.GroupSnapshot( context=None, id='6560405d-b89a-4f79-9e81-ad1752f5a139', group_id='876d9fbb-de48-4948-9f82-15c913ed05e7', name=group_snapshot_name, group_type_id='c6934c26-dde8-4bf8-a765-82b3d0130e9f', status='available', group=test_group_1) test_group_snapshot_failed = group_snapshot.GroupSnapshot( context=None, id='0819dd5e-9aa1-4ec7-9dda-c78e51b2ad76', group_id='1fc735cb-d36c-4352-8aa6-dc1e16b5a0a7', name=failed_resource, group_type_id='6b70de13-98c5-46b2-8f24-e4e96a8988fa', status='available', group=test_group_failed) test_volume_group_member = fake_volume.fake_volume_obj( context=ctx, name='vol1', size=2, provider_auth=None, provider_location=six.text_type(provider_location), volume_type=test_volume_type, host=fake_host, replication_driver_data=six.text_type(provider_location3), group_id=test_vol_grp_name_id_only) # masking view dict masking_view_dict = { 'array': array, 'connector': connector, 'device_id': device_id, 'init_group_name': initiatorgroup_name_f, 'initiator_check': None, 'maskingview_name': masking_view_name_f, 'parent_sg_name': parent_sg_f, 'srp': srp, 'storagetype:disablecompression': False, utils.PORTGROUPNAME: port_group_name_f, 'slo': slo, 'storagegroup_name': storagegroup_name_f, 'volume_name': test_volume.name, 'workload': workload, 'replication_enabled': False } masking_view_dict_no_slo = deepcopy(masking_view_dict) masking_view_dict_no_slo.update({ 'slo': None, 'workload': None, 'storagegroup_name': no_slo_sg_name }) masking_view_dict_compression_disabled = deepcopy(masking_view_dict) masking_view_dict_compression_disabled.update({ 'storagetype:disablecompression': True, 'storagegroup_name': 'OS-HostX-SRP_1-DiamondDSS-OS-fibre-PG-CD' }) masking_view_dict_replication_enabled = deepcopy(masking_view_dict) masking_view_dict_replication_enabled.update({ 'replication_enabled': True, 'storagegroup_name': 'OS-HostX-SRP_1-DiamondDSS-OS-fibre-PG-RE' }) masking_view_dict_multiattach = deepcopy(masking_view_dict) masking_view_dict_multiattach.update({ utils.EXTRA_SPECS: extra_specs, utils.IS_MULTIATTACH: True, utils.OTHER_PARENT_SG: parent_sg_i, utils.FAST_SG: storagegroup_name_i, utils.NO_SLO_SG: no_slo_sg_name }) # vmax data # sloprovisioning compression_info = {'symmetrixId': ['000197800128']} inititiatorgroup = [{ 'initiator': [wwpn1], 'hostId': initiatorgroup_name_f, 'maskingview': [masking_view_name_f] }, { 'initiator': [initiator], 'hostId': initiatorgroup_name_i, 'maskingview': [masking_view_name_i] }] initiator_list = [{ 'host': initiatorgroup_name_f, 'initiatorId': wwpn1, 'maskingview': [masking_view_name_f] }, { 'host': initiatorgroup_name_i, 'initiatorId': initiator, 'maskingview': [masking_view_name_i] }, { 'initiatorId': ['FA-1D:4:' + wwpn1, 'SE-4E:0:' + initiator] }] maskingview = [{ 'maskingViewId': masking_view_name_f, 'portGroupId': port_group_name_f, 'storageGroupId': storagegroup_name_f, 'hostId': initiatorgroup_name_f, 'maskingViewConnection': [{ 'host_lun_address': '0003' }] }, { 'maskingViewId': masking_view_name_i, 'portGroupId': port_group_name_i, 'storageGroupId': storagegroup_name_i, 'hostId': initiatorgroup_name_i, 'maskingViewConnection': [{ 'host_lun_address': '0003' }] }, {}] portgroup = [{ 'portGroupId': port_group_name_f, 'symmetrixPortKey': [{ 'directorId': 'FA-1D', 'portId': '4' }], 'maskingview': [masking_view_name_f] }, { 'portGroupId': port_group_name_i, 'symmetrixPortKey': [{ 'directorId': 'SE-4E', 'portId': '0' }], 'maskingview': [masking_view_name_i] }] port_list = [{ 'symmetrixPort': { 'num_of_masking_views': 1, 'maskingview': [masking_view_name_f], 'identifier': wwnn1, 'symmetrixPortKey': { 'directorId': 'FA-1D', 'portId': '4' }, 'portgroup': [port_group_name_f] } }, { 'symmetrixPort': { 'identifier': initiator, 'symmetrixPortKey': { 'directorId': 'SE-4E', 'portId': '0' }, 'ip_addresses': [ip], 'num_of_masking_views': 1, 'maskingview': [masking_view_name_i], 'portgroup': [port_group_name_i] } }] sg_details = [{ 'srp': srp, 'num_of_vols': 2, 'cap_gb': 2, 'storageGroupId': defaultstoragegroup_name, 'slo': slo, 'workload': workload }, { 'srp': srp, 'num_of_vols': 2, 'cap_gb': 2, 'storageGroupId': storagegroup_name_f, 'slo': slo, 'workload': workload, 'maskingview': [masking_view_name_f], 'parent_storage_group': [parent_sg_f] }, { 'srp': srp, 'num_of_vols': 2, 'cap_gb': 2, 'storageGroupId': storagegroup_name_i, 'slo': slo, 'workload': workload, 'maskingview': [masking_view_name_i], 'parent_storage_group': [parent_sg_i] }, { 'num_of_vols': 2, 'cap_gb': 2, 'storageGroupId': parent_sg_f, 'num_of_child_sgs': 1, 'child_storage_group': [storagegroup_name_f], 'maskingview': [masking_view_name_f] }, { 'num_of_vols': 2, 'cap_gb': 2, 'storageGroupId': parent_sg_i, 'num_of_child_sgs': 1, 'child_storage_group': [storagegroup_name_i], 'maskingview': [masking_view_name_i], }, { 'srp': srp, 'num_of_vols': 2, 'cap_gb': 2, 'storageGroupId': no_slo_sg_name, 'slo': None, 'workload': None, 'maskingview': [masking_view_name_i], 'parent_storage_group': [parent_sg_i] }] sg_details_rep = [{ 'childNames': [], 'numDevicesNonGk': 2, 'isLinkTarget': False, 'rdf': True, 'capacityGB': 2.0, 'name': storagegroup_name_source, 'snapVXSnapshots': ['6560405d-752f5a139'], 'symmetrixId': array, 'numSnapVXSnapshots': 1 }] sg_rdf_details = [{ 'storageGroupName': test_vol_grp_name, 'symmetrixId': array, 'modes': ['Synchronous'], 'rdfGroupNumber': rdf_group_no, 'states': ['Synchronized'] }, { 'storageGroupName': test_fo_vol_group, 'symmetrixId': array, 'modes': ['Synchronous'], 'rdfGroupNumber': rdf_group_no, 'states': ['Failed Over'] }] sg_list = { 'storageGroupId': [storagegroup_name_f, defaultstoragegroup_name] } sg_list_rep = [storagegroup_name_with_id] srp_details = { 'srp_capacity': { u'subscribed_total_tb': 93.52, u'usable_used_tb': 8.62, u'usable_total_tb': 24.45, u'snapshot_modified_tb': 0.0, u'subscribed_allocated_tb': 18.77, u'snapshot_total_tb': 1.58 }, 'srpId': srp, 'reserved_cap_percent': 10 } array_info_wl = { 'RestServerIp': '1.1.1.1', 'RestServerPort': 3448, 'RestUserName': '******', 'RestPassword': '******', 'SSLVerify': False, 'SerialNumber': array, 'srpName': 'SRP_1', 'PortGroup': port_group_name_i, 'SLO': 'Diamond', 'Workload': 'OLTP' } array_info_no_wl = { 'RestServerIp': '1.1.1.1', 'RestServerPort': 3448, 'RestUserName': '******', 'RestPassword': '******', 'SSLVerify': False, 'SerialNumber': array, 'srpName': 'SRP_1', 'PortGroup': port_group_name_i, 'SLO': 'Diamond' } volume_details = [ { 'cap_gb': 2, 'num_of_storage_groups': 1, 'volumeId': device_id, 'volume_identifier': 'OS-%s' % test_volume.id, 'wwn': volume_wwn, 'snapvx_target': 'false', 'snapvx_source': 'false', 'storageGroupId': [defaultstoragegroup_name, storagegroup_name_f] }, { 'cap_gb': 1, 'num_of_storage_groups': 1, 'volumeId': device_id2, 'volume_identifier': 'OS-%s' % test_volume.id, 'wwn': '600012345', 'storageGroupId': [defaultstoragegroup_name, storagegroup_name_f] }, { 'cap_gb': 1, 'num_of_storage_groups': 0, 'volumeId': device_id3, 'volume_identifier': '123', 'wwn': '600012345' }, { 'cap_gb': 1, 'num_of_storage_groups': 1, 'volumeId': device_id4, 'volume_identifier': 'random_name', 'wwn': '600012345', 'storageGroupId': ['random_sg_1', 'random_sg_2'] }, ] volume_details_attached = { 'cap_gb': 2, 'num_of_storage_groups': 1, 'volumeId': device_id, 'volume_identifier': 'OS-%s' % test_volume.id, 'wwn': volume_wwn, 'snapvx_target': 'false', 'snapvx_source': 'false', 'storageGroupId': [storagegroup_name_f] } volume_details_no_sg = { 'cap_gb': 2, 'num_of_storage_groups': 1, 'volumeId': device_id, 'volume_identifier': 'OS-%s' % test_volume.id, 'wwn': volume_wwn, 'snapvx_target': 'false', 'snapvx_source': 'false', 'storageGroupId': [] } volume_list = [{ 'id': '6b70de13-98c5-46b2-8f24-e4e96a8988fa', 'count': 2, 'maxPageSize': 1, 'resultList': { 'result': [{ 'volumeId': device_id }], 'from': 0, 'to': 1 } }, { 'resultList': { 'result': [{ 'volumeId': device_id2 }] } }, { 'id': '6b70de13-98c5-46b2-8f24-e4e96a8988fa', 'count': 2, 'maxPageSize': 1, 'resultList': { 'result': [{ 'volumeId': device_id }, { 'volumeId': device_id2 }], 'from': 0, 'to': 1 } }] private_vol_details = { 'id': '6b70de13-98c5-46b2-8f24-e4e96a8988fa', 'count': 2, 'maxPageSize': 1, 'resultList': { 'result': [{ 'timeFinderInfo': { 'snapVXSession': [{ 'srcSnapshotGenInfo': [{ 'snapshotHeader': { 'snapshotName': 'temp-1', 'device': device_id, 'generation': '0' }, 'lnkSnapshotGenInfo': [{ 'targetDevice': device_id2, 'state': 'Copied' }] }] }, { 'tgtSrcSnapshotGenInfo': { 'snapshotName': 'temp-1', 'targetDevice': device_id2, 'sourceDevice': device_id, 'generation': '0', 'state': 'Copied' } }], 'snapVXSrc': 'true', 'snapVXTgt': 'true' }, 'rdfInfo': { 'RDFSession': [{ 'SRDFStatus': 'Ready', 'pairState': 'Synchronized', 'remoteDeviceID': device_id2, 'remoteSymmetrixID': remote_array }] } }], 'from': 0, 'to': 1 } } # Service Levels / Workloads workloadtype = {'workloadId': ['OLTP', 'OLTP_REP', 'DSS', 'DSS_REP']} srp_slo_details = { 'serviceLevelDemand': [{ 'serviceLevelId': 'None' }, { 'serviceLevelId': 'Diamond' }, { 'serviceLevelId': 'Gold' }, { 'serviceLevelId': 'Optimized' }] } slo_details = ['None', 'Diamond', 'Gold', 'Optimized'] powermax_slo_details = { 'sloId': ['Bronze', 'Diamond', 'Gold', 'Optimized', 'Platinum', 'Silver'] } powermax_model_details = { 'symmetrixId': array, 'model': 'PowerMax_2000', 'ucode': '5978.1091.1092' } vmax_slo_details = {'sloId': ['Diamond', 'Optimized']} vmax_model_details = {'model': 'VMAX450F'} # replication volume_snap_vx = { 'snapshotLnks': [], 'snapshotSrcs': [{ 'generation': 0, 'linkedDevices': [{ 'targetDevice': device_id2, 'percentageCopied': 100, 'state': 'Copied', 'copy': True, 'defined': True, 'linked': True }], 'snapshotName': test_snapshot_snap_name, 'state': 'Established' }] } capabilities = { 'symmetrixCapability': [{ 'rdfCapable': True, 'snapVxCapable': True, 'symmetrixId': '0001111111' }, { 'symmetrixId': array, 'snapVxCapable': True, 'rdfCapable': True }] } group_snap_vx = { 'generation': 0, 'isLinked': False, 'numUniqueTracks': 0, 'isRestored': False, 'name': group_snapshot_name, 'numStorageGroupVolumes': 1, 'state': ['Established'], 'timeToLiveExpiryDate': 'N/A', 'isExpired': False, 'numSharedTracks': 0, 'timestamp': '00:30:50 Fri, 02 Jun 2017 IST +0100', 'numSourceVolumes': 1 } group_snap_vx_1 = { 'generation': 0, 'isLinked': False, 'numUniqueTracks': 0, 'isRestored': False, 'name': group_snapshot_name, 'numStorageGroupVolumes': 1, 'state': ['Copied'], 'timeToLiveExpiryDate': 'N/A', 'isExpired': False, 'numSharedTracks': 0, 'timestamp': '00:30:50 Fri, 02 Jun 2017 IST +0100', 'numSourceVolumes': 1, 'linkedStorageGroup': { 'name': target_group_name, 'percentageCopied': 100 }, } grp_snapvx_links = [{ 'name': target_group_name, 'percentageCopied': 100 }, { 'name': 'another-target', 'percentageCopied': 90 }] rdf_group_list = { 'rdfGroupID': [{ 'rdfgNumber': rdf_group_no, 'label': rdf_group_name }] } rdf_group_details = { 'modes': ['Synchronous'], 'remoteSymmetrix': remote_array, 'label': rdf_group_name, 'type': 'Dynamic', 'numDevices': 1, 'remoteRdfgNumber': rdf_group_no, 'rdfgNumber': rdf_group_no } rdf_group_vol_details = { 'remoteRdfGroupNumber': rdf_group_no, 'localSymmetrixId': array, 'volumeConfig': 'RDF1+TDEV', 'localRdfGroupNumber': rdf_group_no, 'localVolumeName': device_id, 'rdfpairState': 'Synchronized', 'remoteVolumeName': device_id2, 'localVolumeState': 'Ready', 'rdfMode': 'Synchronous', 'remoteVolumeState': 'Write Disabled', 'remoteSymmetrixId': remote_array } # system job_list = [{ 'status': 'SUCCEEDED', 'jobId': '12345', 'result': 'created', 'resourceLink': 'storagegroup/%s' % storagegroup_name_f }, { 'status': 'RUNNING', 'jobId': '55555' }, { 'status': 'FAILED', 'jobId': '09999' }] symmetrix = [{ 'symmetrixId': array, 'model': 'VMAX250F', 'ucode': '5977.1091.1092' }, { 'symmetrixId': array_herc, 'model': 'PowerMax 2000', 'ucode': '5978.1091.1092' }] version_details = {'version': 'V9.1.0.1'} headroom = {'headroom': [{'headroomCapacity': 20348.29}]} ucode_5978_foxtail = {'ucode': '5978.435.435'} p_vol_rest_response_single = { 'id': 'f3aab01c-a5a8-4fb4-af2b-16ae1c46dc9e_0', 'count': 1, 'expirationTime': 1521650650793, 'maxPageSize': 1000, 'resultList': { 'to': 1, 'from': 1, 'result': [{ 'volumeHeader': { 'capGB': 1.0, 'capMB': 1026.0, 'volumeId': '00001', 'status': 'Ready', 'configuration': 'TDEV' } }] } } p_vol_rest_response_none = { 'id': 'f3aab01c-a5a8-4fb4-af2b-16ae1c46dc9e_0', 'count': 0, 'expirationTime': 1521650650793, 'maxPageSize': 1000, 'resultList': { 'to': 0, 'from': 0, 'result': [] } } p_vol_rest_response_iterator_1 = { 'id': 'f3aab01c-a5a8-4fb4-af2b-16ae1c46dc9e_0', 'count': 1500, 'expirationTime': 1521650650793, 'maxPageSize': 1000, 'resultList': { 'to': 1, 'from': 1, 'result': [{ 'volumeHeader': { 'capGB': 1.0, 'capMB': 1026.0, 'volumeId': '00002', 'status': 'Ready', 'configuration': 'TDEV' } }] } } p_vol_rest_response_iterator_2 = { 'to': 2000, 'from': 1001, 'result': [{ 'volumeHeader': { 'capGB': 1.0, 'capMB': 1026.0, 'volumeId': '00001', 'status': 'Ready', 'configuration': 'TDEV' } }] } rest_iterator_resonse_one = { 'to': 1000, 'from': 1, 'result': [{ 'volumeHeader': { 'capGB': 1.0, 'capMB': 1026.0, 'volumeId': '00001', 'status': 'Ready', 'configuration': 'TDEV' } }] } rest_iterator_resonse_two = { 'to': 1500, 'from': 1001, 'result': [{ 'volumeHeader': { 'capGB': 1.0, 'capMB': 1026.0, 'volumeId': '00002', 'status': 'Ready', 'configuration': 'TDEV' } }] } # COMMON.PY priv_vol_func_response_single = [{ 'volumeHeader': { 'private': False, 'capGB': 1.0, 'capMB': 1026.0, 'serviceState': 'Normal', 'emulationType': 'FBA', 'volumeId': '00001', 'status': 'Ready', 'mapped': False, 'numStorageGroups': 0, 'reservationInfo': { 'reserved': False }, 'encapsulated': False, 'formattedName': '00001', 'system_resource': False, 'numSymDevMaskingViews': 0, 'nameModifier': "", 'configuration': 'TDEV' }, 'maskingInfo': { 'masked': False }, 'rdfInfo': { 'dynamicRDF': False, 'RDF': False, 'concurrentRDF': False, 'getDynamicRDFCapability': 'RDF1_Capable', 'RDFA': False }, 'timeFinderInfo': { 'mirror': False, 'snapVXTgt': False, 'cloneTarget': False, 'cloneSrc': False, 'snapVXSrc': True, 'snapVXSession': [{ 'srcSnapshotGenInfo': [{ 'snapshotHeader': { 'timestamp': 1512763278000, 'expired': False, 'secured': False, 'snapshotName': 'testSnap1', 'device': '00001', 'generation': 0, 'timeToLive': 0 } }] }] } }] priv_vol_func_response_multi = [{ 'volumeHeader': { 'private': False, 'capGB': 100.0, 'capMB': 102400.0, 'serviceState': 'Normal', 'emulationType': 'FBA', 'volumeId': '00001', 'status': 'Ready', 'numStorageGroups': 0, 'reservationInfo': { 'reserved': False }, 'mapped': False, 'encapsulated': False, 'formattedName': '00001', 'system_resource': False, 'numSymDevMaskingViews': 0, 'nameModifier': "", 'configuration': 'TDEV' }, 'rdfInfo': { 'dynamicRDF': False, 'RDF': False, 'concurrentRDF': False, 'getDynamicRDFCapability': 'RDF1_Capable', 'RDFA': False }, 'maskingInfo': { 'masked': False }, 'timeFinderInfo': { 'mirror': False, 'snapVXTgt': False, 'cloneTarget': False, 'cloneSrc': False, 'snapVXSrc': True, 'snapVXSession': [{ 'srcSnapshotGenInfo': [{ 'snapshotHeader': { 'timestamp': 1512763278000, 'expired': False, 'secured': False, 'snapshotName': 'testSnap1', 'device': '00001', 'generation': 0, 'timeToLive': 0 } }] }] } }, { 'volumeHeader': { 'private': False, 'capGB': 200.0, 'capMB': 204800.0, 'serviceState': 'Normal', 'emulationType': 'FBA', 'volumeId': '00002', 'status': 'Ready', 'numStorageGroups': 0, 'reservationInfo': { 'reserved': False }, 'mapped': False, 'encapsulated': False, 'formattedName': '00002', 'system_resource': False, 'numSymDevMaskingViews': 0, 'nameModifier': "", 'configuration': 'TDEV' }, 'rdfInfo': { 'dynamicRDF': False, 'RDF': False, 'concurrentRDF': False, 'getDynamicRDFCapability': 'RDF1_Capable', 'RDFA': False }, 'maskingInfo': { 'masked': False }, 'timeFinderInfo': { 'mirror': False, 'snapVXTgt': False, 'cloneTarget': False, 'cloneSrc': False, 'snapVXSrc': True, 'snapVXSession': [{ 'srcSnapshotGenInfo': [{ 'snapshotHeader': { 'timestamp': 1512763278000, 'expired': False, 'secured': False, 'snapshotName': 'testSnap2', 'device': '00002', 'generation': 0, 'timeToLive': 0 } }] }] } }, { 'volumeHeader': { 'private': False, 'capGB': 300.0, 'capMB': 307200.0, 'serviceState': 'Normal', 'emulationType': 'FBA', 'volumeId': '00003', 'status': 'Ready', 'numStorageGroups': 0, 'reservationInfo': { 'reserved': False }, 'mapped': False, 'encapsulated': False, 'formattedName': '00003', 'system_resource': False, 'numSymDevMaskingViews': 0, 'nameModifier': "", 'configuration': 'TDEV' }, 'rdfInfo': { 'dynamicRDF': False, 'RDF': False, 'concurrentRDF': False, 'getDynamicRDFCapability': 'RDF1_Capable', 'RDFA': False }, 'maskingInfo': { 'masked': False }, 'timeFinderInfo': { 'mirror': False, 'snapVXTgt': False, 'cloneTarget': False, 'cloneSrc': False, 'snapVXSrc': True, 'snapVXSession': [{ 'srcSnapshotGenInfo': [{ 'snapshotHeader': { 'timestamp': 1512763278000, 'expired': False, 'secured': False, 'snapshotName': 'testSnap3', 'device': '00003', 'generation': 0, 'timeToLive': 0 } }] }] } }, { 'volumeHeader': { 'private': False, 'capGB': 400.0, 'capMB': 409600.0, 'serviceState': 'Normal', 'emulationType': 'FBA', 'volumeId': '00004', 'status': 'Ready', 'numStorageGroups': 0, 'reservationInfo': { 'reserved': False }, 'mapped': False, 'encapsulated': False, 'formattedName': '00004', 'system_resource': False, 'numSymDevMaskingViews': 0, 'nameModifier': "", 'configuration': 'TDEV' }, 'rdfInfo': { 'dynamicRDF': False, 'RDF': False, 'concurrentRDF': False, 'getDynamicRDFCapability': 'RDF1_Capable', 'RDFA': False }, 'maskingInfo': { 'masked': False }, 'timeFinderInfo': { 'mirror': False, 'snapVXTgt': False, 'cloneTarget': False, 'cloneSrc': False, 'snapVXSrc': True, 'snapVXSession': [{ 'srcSnapshotGenInfo': [{ 'snapshotHeader': { 'timestamp': 1512763278000, 'expired': False, 'secured': False, 'snapshotName': 'testSnap4', 'device': '00004', 'generation': 0, 'timeToLive': 0 } }] }] } }] priv_vol_func_response_multi_invalid = [{ 'volumeHeader': { 'private': False, 'capGB': 1.0, 'capMB': 10.0, 'serviceState': 'Normal', 'emulationType': 'FBA', 'volumeId': '00001', 'status': 'Ready', 'mapped': False, 'numStorageGroups': 0, 'reservationInfo': { 'reserved': False }, 'encapsulated': False, 'formattedName': '00001', 'system_resource': False, 'numSymDevMaskingViews': 0, 'nameModifier': "", 'configuration': 'TDEV' }, 'maskingInfo': { 'masked': False }, 'rdfInfo': { 'dynamicRDF': False, 'RDF': False, 'concurrentRDF': False, 'getDynamicRDFCapability': 'RDF1_Capable', 'RDFA': False }, 'timeFinderInfo': { 'snapVXTgt': False, 'snapVXSrc': False } }, { 'volumeHeader': { 'private': False, 'capGB': 1.0, 'capMB': 1026.0, 'serviceState': 'Normal', 'emulationType': 'FBA', 'volumeId': '00002', 'status': 'Ready', 'mapped': False, 'numStorageGroups': 0, 'reservationInfo': { 'reserved': False }, 'encapsulated': False, 'formattedName': '00002', 'system_resource': False, 'numSymDevMaskingViews': 1, 'nameModifier': "", 'configuration': 'TDEV' }, 'maskingInfo': { 'masked': False }, 'rdfInfo': { 'dynamicRDF': False, 'RDF': False, 'concurrentRDF': False, 'getDynamicRDFCapability': 'RDF1_Capable', 'RDFA': False }, 'timeFinderInfo': { 'snapVXTgt': False, 'snapVXSrc': False } }, { 'volumeHeader': { 'private': False, 'capGB': 1.0, 'capMB': 1026.0, 'serviceState': 'Normal', 'emulationType': 'CKD', 'volumeId': '00003', 'status': 'Ready', 'mapped': False, 'numStorageGroups': 0, 'reservationInfo': { 'reserved': False }, 'encapsulated': False, 'formattedName': '00003', 'system_resource': False, 'numSymDevMaskingViews': 0, 'nameModifier': "", 'configuration': 'TDEV' }, 'maskingInfo': { 'masked': False }, 'rdfInfo': { 'dynamicRDF': False, 'RDF': False, 'concurrentRDF': False, 'getDynamicRDFCapability': 'RDF1_Capable', 'RDFA': False }, 'timeFinderInfo': { 'snapVXTgt': False, 'snapVXSrc': False } }, { 'volumeHeader': { 'private': False, 'capGB': 1.0, 'capMB': 1026.0, 'serviceState': 'Normal', 'emulationType': 'FBA', 'volumeId': '00004', 'status': 'Ready', 'mapped': False, 'numStorageGroups': 0, 'reservationInfo': { 'reserved': False }, 'encapsulated': False, 'formattedName': '00004', 'system_resource': False, 'numSymDevMaskingViews': 0, 'nameModifier': "", 'configuration': 'TDEV' }, 'maskingInfo': { 'masked': False }, 'rdfInfo': { 'dynamicRDF': False, 'RDF': False, 'concurrentRDF': False, 'getDynamicRDFCapability': 'RDF1_Capable', 'RDFA': False }, 'timeFinderInfo': { 'snapVXTgt': True, 'snapVXSrc': False } }, { 'volumeHeader': { 'private': False, 'capGB': 1.0, 'capMB': 1026.0, 'serviceState': 'Normal', 'emulationType': 'FBA', 'volumeId': '00005', 'status': 'Ready', 'mapped': False, 'numStorageGroups': 0, 'reservationInfo': { 'reserved': False }, 'encapsulated': False, 'formattedName': '00005', 'system_resource': False, 'numSymDevMaskingViews': 0, 'nameModifier': 'OS-vol', 'configuration': 'TDEV' }, 'maskingInfo': { 'masked': False }, 'rdfInfo': { 'dynamicRDF': False, 'RDF': False, 'concurrentRDF': False, 'getDynamicRDFCapability': 'RDF1_Capable', 'RDFA': False }, 'timeFinderInfo': { 'snapVXTgt': False, 'snapVXSrc': False } }] volume_info_dict = { 'volume_id': volume_id, 'service_level': 'Diamond', 'masking_view': 'OS-HostX-F-OS-fibre-PG-MV', 'host': fake_host, 'display_name': 'attach_vol_name', 'volume_updated_time': '2018-03-05 20:32:41', 'port_group': 'OS-fibre-PG', 'operation': 'attach', 'srp': 'SRP_1', 'initiator_group': 'OS-HostX-F-IG', 'serial_number': '000197800123', 'parent_storage_group': 'OS-HostX-F-OS-fibre-PG-SG', 'workload': 'DSS', 'child_storage_group': 'OS-HostX-SRP_1-DiamondDSS-OS-fibre-PG' } data_dict = {volume_id: volume_info_dict} platform = 'Linux-4.4.0-104-generic-x86_64-with-Ubuntu-16.04-xenial' unisphere_version = u'V9.1.0.1' openstack_release = '12.0.0.0b3.dev401' openstack_version = '12.0.0' python_version = '2.7.12' vmax_driver_version = '4.1' vmax_firmware_version = u'5977.1125.1125' vmax_model = u'VMAX250F' version_dict = { 'unisphere_for_powermax_version': unisphere_version, 'openstack_release': openstack_release, 'openstack_version': openstack_version, 'python_version': python_version, 'powermax_cinder_driver_version': vmax_driver_version, 'openstack_platform': platform, 'storage_firmware_version': vmax_firmware_version, 'serial_number': array, 'storage_model': vmax_model } u4p_failover_config = { 'u4p_failover_backoff_factor': '2', 'u4p_failover_retries': '3', 'u4p_failover_timeout': '10', 'u4p_primary': '10.10.10.10', 'u4p_failover_autofailback': 'True', 'u4p_failover_targets': [{ 'san_ip': '10.10.10.11', 'san_api_port': '8443', 'san_login': '******', 'san_password': '******', 'driver_ssl_cert_verify': '/path/to/cert', 'driver_ssl_cert_path': 'True' }, { 'san_ip': '10.10.10.12', 'san_api_port': '8443', 'san_login': '******', 'san_password': '******', 'driver_ssl_cert_verify': 'True' }, { 'san_ip': '10.10.10.11', 'san_api_port': '8443', 'san_login': '******', 'san_password': '******', 'driver_ssl_cert_verify': '/path/to/cert', 'driver_ssl_cert_path': 'False' }] } u4p_failover_target = [{ 'RestServerIp': '10.10.10.11', 'RestServerPort': '8443', 'RestUserName': '******', 'RestPassword': '******', 'SSLVerify': '/path/to/cert' }, { 'RestServerIp': '10.10.10.12', 'RestServerPort': '8443', 'RestUserName': '******', 'RestPassword': '******', 'SSLVerify': 'True' }]
for i in range(4): volume = {} volume['id'] = '00000000-0000-0000-0000-{0:012d}'.format(i) volume['name'] = 'test-volume{0:d}'.format(i) volume['volume_type_id'] = '00000000-0000-0000-0000-{0:012d}'.format(i) if i == 3: volume['provider_location'] = None else: volume['provider_location'] = '{0:d}'.format(i) volume['size'] = 128 if i == 2: volume['status'] = 'in-use' else: volume['status'] = 'available' volume = fake_volume.fake_volume_obj(CTXT, **volume) volume.volume_type = fake_volume.fake_volume_type_obj(CTXT) TEST_VOLUME.append(volume) def _volume_get(context, volume_id): """Return predefined volume info.""" return TEST_VOLUME[int(volume_id.replace("-", ""))] TEST_SNAPSHOT = [] snapshot = {} snapshot['id'] = '10000000-0000-0000-0000-{0:012d}'.format(0) snapshot['name'] = 'TEST_SNAPSHOT{0:d}'.format(0) snapshot['provider_location'] = '{0:d}'.format(1) snapshot['status'] = 'available' snapshot['volume_id'] = '00000000-0000-0000-0000-{0:012d}'.format(0)
def test_is_replicated_specs_false(self, not_enabled): volume_type = fake_volume.fake_volume_type_obj( self.context, extra_specs={'replication_enabled': not_enabled}) self.assertFalse(volume_type.is_replicated())
def test_is_multiattach_specs_false(self, false): volume_type = fake_volume.fake_volume_type_obj( self.context, extra_specs={'multiattach': false}) self.assertFalse(volume_type.is_multiattach())
def test_is_replicated_no_specs(self): volume_type = fake_volume.fake_volume_type_obj( self.context, extra_specs={}) self.assertFalse(bool(volume_type.is_replicated()))
def setUp(self): super(HNASiSCSIDriverTest, self).setUp() self.context = context.get_admin_context() self.volume = fake_volume.fake_volume_obj( self.context, **_VOLUME) self.volume_clone = fake_volume.fake_volume_obj( self.context, **_VOLUME2) self.snapshot = self.snapshot = self.instantiate_snapshot(_SNAPSHOT) self.volume_type = fake_volume.fake_volume_type_obj( None, **{'name': 'silver'} ) self.parsed_xml = { 'username': '******', 'password': '******', 'hnas_cmd': 'ssc', 'fs': {'fs2': 'fs2'}, 'ssh_port': '22', 'port': '3260', 'services': { 'default': { 'hdp': 'fs2', 'iscsi_ip': '172.17.39.132', 'iscsi_port': '3260', 'port': '22', 'volume_type': 'default', 'label': 'svc_0', 'evs': '1', 'tgt': { 'alias': 'test', 'secret': 'itEpgB5gPefGhW2' } }, 'silver': { 'hdp': 'fs3', 'iscsi_ip': '172.17.39.133', 'iscsi_port': '3260', 'port': '22', 'volume_type': 'silver', 'label': 'svc_1', 'evs': '2', 'tgt': { 'alias': 'iscsi-test', 'secret': 'itEpgB5gPefGhW2' } } }, 'cluster_admin_ip0': None, 'ssh_private_key': None, 'chap_enabled': 'True', 'mgmt_ip0': '172.17.44.15', 'ssh_enabled': None } self.configuration = mock.Mock(spec=conf.Configuration) self.configuration.hds_hnas_iscsi_config_file = 'fake.xml' self.mock_object(hnas_utils, 'read_config', mock.Mock(return_value=self.parsed_xml)) self.driver = iscsi.HNASISCSIDriver(configuration=self.configuration)
def test_create_group_from_snap(self, mock_volume_get_all, mock_rpc_create_group_from_src, mock_snap_get_all, mock_group_snap_get, mock_volume_api_create, mock_mapping_create, mock_get_volume_type): vol_type = fake_volume.fake_volume_type_obj(self.ctxt, id=fake.VOLUME_TYPE_ID, name='fake_volume_type') mock_get_volume_type.return_value = vol_type grp_snap = utils.create_group_snapshot( self.ctxt, fake.GROUP_ID, group_type_id=fake.GROUP_TYPE_ID, status=fields.GroupStatus.CREATING) mock_group_snap_get.return_value = grp_snap vol1 = utils.create_volume(self.ctxt, availability_zone='nova', volume_type_id=vol_type['id'], group_id=fake.GROUP_ID) snap = utils.create_snapshot(self.ctxt, vol1.id, volume_type_id=vol_type['id'], status=fields.GroupStatus.CREATING) mock_snap_get_all.return_value = [snap] name = "test_group" description = "this is a test group" grp = utils.create_group(self.ctxt, group_type_id=fake.GROUP_TYPE_ID, volume_type_ids=[vol_type['id']], availability_zone='nova', name=name, description=description, group_snapshot_id=grp_snap.id, status=fields.GroupStatus.CREATING) vol2 = utils.create_volume(self.ctxt, availability_zone=grp.availability_zone, volume_type_id=vol_type['id'], group_id=grp.id, snapshot_id=snap.id) mock_volume_get_all.return_value = [vol2] self.group_api._create_group_from_group_snapshot( self.ctxt, grp, grp_snap.id) mock_volume_api_create.assert_called_once_with( self.ctxt, 1, None, None, availability_zone=grp.availability_zone, group_snapshot=grp_snap, group=grp, snapshot=snap, volume_type=vol_type) mock_rpc_create_group_from_src.assert_called_once_with( self.ctxt, grp, grp_snap) vol2.destroy() grp.destroy() snap.destroy() vol1.destroy() grp_snap.destroy()
def setUp(self): super(HNASNFSDriverTest, self).setUp() self.context = context.get_admin_context() self.volume = fake_volume.fake_volume_obj( self.context, **_VOLUME) self.snapshot = self.instantiate_snapshot(_SNAPSHOT) self.volume_type = fake_volume.fake_volume_type_obj( None, **{'name': 'silver'} ) self.clone = fake_volume.fake_volume_obj( None, **{'id': fake.VOLUME2_ID, 'size': 128, 'host': 'host1@hnas-nfs-backend#default', 'volume_type': 'default', 'provider_location': 'hnas'}) # xml parsed from utils self.parsed_xml = { 'username': '******', 'password': '******', 'hnas_cmd': 'ssc', 'ssh_port': '22', 'services': { 'default': { 'hdp': '172.24.49.21:/fs-cinder', 'volume_type': 'default', 'label': 'svc_0', 'ctl': '1', 'export': { 'fs': 'fs-cinder', 'path': '/export-cinder/volume' } }, }, 'cluster_admin_ip0': None, 'ssh_private_key': None, 'chap_enabled': 'True', 'mgmt_ip0': '172.17.44.15', 'ssh_enabled': None } self.configuration = mock.Mock(spec=conf.Configuration) self.configuration.hds_hnas_nfs_config_file = 'fake.xml' self.mock_object(hnas_utils, 'read_cinder_conf', mock.Mock(return_value=self.parsed_xml)) self.configuration = mock.Mock(spec=conf.Configuration) self.configuration.max_over_subscription_ratio = 20.0 self.configuration.reserved_percentage = 0 self.configuration.hds_hnas_nfs_config_file = 'fake_config.xml' self.configuration.nfs_shares_config = 'fake_nfs_share.xml' self.configuration.num_shell_tries = 2 self.driver = nfs.HNASNFSDriver(configuration=self.configuration)
def test_create_group_from_snap(self, mock_volume_get_all, mock_rpc_create_group_from_src, mock_snap_get_all, mock_group_snap_get, mock_volume_api_create, mock_mapping_create, mock_get_volume_type): vol_type = fake_volume.fake_volume_type_obj( self.ctxt, id=fake.VOLUME_TYPE_ID, name='fake_volume_type') mock_get_volume_type.return_value = vol_type grp_snap = utils.create_group_snapshot( self.ctxt, fake.GROUP_ID, group_type_id=fake.GROUP_TYPE_ID, status=fields.GroupStatus.CREATING) mock_group_snap_get.return_value = grp_snap vol1 = utils.create_volume( self.ctxt, availability_zone='nova', volume_type_id=vol_type['id'], group_id=fake.GROUP_ID) snap = utils.create_snapshot(self.ctxt, vol1.id, volume_type_id=vol_type['id'], status=fields.GroupStatus.CREATING) mock_snap_get_all.return_value = [snap] name = "test_group" description = "this is a test group" grp = utils.create_group(self.ctxt, group_type_id=fake.GROUP_TYPE_ID, volume_type_ids=[vol_type['id']], availability_zone='nova', name=name, description=description, group_snapshot_id=grp_snap.id, status=fields.GroupStatus.CREATING) vol2 = utils.create_volume( self.ctxt, availability_zone=grp.availability_zone, volume_type_id=vol_type['id'], group_id=grp.id, snapshot_id=snap.id) mock_volume_get_all.return_value = [vol2] self.group_api._create_group_from_group_snapshot(self.ctxt, grp, grp_snap.id) mock_volume_api_create.assert_called_once_with( self.ctxt, 1, None, None, availability_zone=grp.availability_zone, group_snapshot=grp_snap, group=grp, snapshot=snap, volume_type=vol_type) mock_rpc_create_group_from_src.assert_called_once_with( self.ctxt, grp, grp_snap) vol2.destroy() grp.destroy() snap.destroy() vol1.destroy() grp_snap.destroy()