def test_lvm_max_over_subscription_ratio(self, global_value, lvm_value, expected_value): configuration = conf.Configuration(fake_opt, 'fake_group') configuration.max_over_subscription_ratio = global_value configuration.lvm_max_over_subscription_ratio = lvm_value fake_vg = mock.Mock(fake_lvm.FakeBrickLVM('cinder-volumes', False, None, 'default')) lvm_driver = lvm.LVMVolumeDriver(configuration=configuration, vg_obj=fake_vg, db=db) self.assertEqual(expected_value, lvm_driver.configuration.max_over_subscription_ratio)
def setUp(self): """Setup a test case environment. Creates a ``ScaleIODriver`` instance Mocks the ``requests.get/post`` methods to return ``MockHTTPSResponse``'s instead. """ super(TestScaleIODriver, self).setUp() self.configuration = conf.Configuration(driver.scaleio_opts, conf.SHARED_CONF_GROUP) self._set_overrides() self.driver = mocks.ScaleIODriver(configuration=self.configuration) self.mock_object(requests, 'get', self.do_request) self.mock_object(requests, 'post', self.do_request)
def __init__(self): """Init conf client pool.""" self.configuration = config.Configuration(None) self.client = None self.address = "192.168.200.100" self.user = "******" self.password = "******" self.pool = "fake_pool_name" self.poolid = 1 self.iscsi_info = { "iqn.1994-05.com.redhat:899c5f9d15d": "1.1.1.1,1.1.1.2,1.1.1.3" } self.default_target_ips = ["1.1.1.1", "1.1.1.2", "1.1.1.3"] self.chap_username = "******" self.chap_password = "******"
def test_revert_thin_snapshot(self): configuration = conf.Configuration(fake_opt, 'fake_group') configuration.lvm_type = 'thin' lvm_driver = lvm.LVMVolumeDriver(configuration=configuration, db=db) fake_volume = tests_utils.create_volume(self.context, display_name='fake_volume') fake_snapshot = tests_utils.create_snapshot( self.context, fake_volume.id) self.assertRaises(NotImplementedError, lvm_driver.revert_to_snapshot, self.context, fake_volume, fake_snapshot)
def get_fake_cmode_config(backend_name): config = configuration.Configuration(driver.volume_opts, config_group=backend_name) config.append_config_values(na_opts.netapp_proxy_opts) config.append_config_values(na_opts.netapp_connection_opts) config.append_config_values(na_opts.netapp_transport_opts) config.append_config_values(na_opts.netapp_basicauth_opts) config.append_config_values(na_opts.netapp_provisioning_opts) config.append_config_values(na_opts.netapp_cluster_opts) config.append_config_values(na_opts.netapp_san_opts) config.append_config_values(na_opts.netapp_replication_opts) config.append_config_values(na_opts.netapp_support_opts) return config
def create_configuration(self): fc_fabric_opts = [] fc_fabric_opts.append(cfg.StrOpt('fc_fabric_address', default='10.24.49.100', help='')) fc_fabric_opts.append(cfg.StrOpt('fc_fabric_user', default='admin', help='')) fc_fabric_opts.append(cfg.StrOpt('fc_fabric_password', default='password', help='', secret=True)) fc_fabric_opts.append(cfg.PortOpt('fc_fabric_port', default=22, help='')) fc_fabric_opts.append(cfg.StrOpt('principal_switch_wwn', default='100000051e55a100', help='')) config = conf.Configuration(fc_fabric_opts, 'BRCD_FAB_2') self.fabric_configs = {'BRCD_FAB_2': config}
def test_auto_max_subscription_ratio_options(self, cfg_value, valid): # This tests the max_over_subscription_ratio option as it is now # checked by a regex def _set_conf(config, value): config.set_override('max_over_subscription_ratio', value) config = conf.Configuration(None) config.append_config_values(driver.volume_opts) if valid: _set_conf(config, cfg_value) self.assertEqual(cfg_value, config.safe_get( 'max_over_subscription_ratio')) else: self.assertRaises(ValueError, _set_conf, config, cfg_value)
def __init__(self, **kwargs): """Load the driver from the one specified in args, or from flags.""" super(ZoneManager, self).__init__(**kwargs) self.configuration = kwargs.get('configuration', None) if self.configuration: self.configuration.append_config_values(zone_manager_opts) zone_driver = self.configuration.zone_driver LOG.debug(_("Zone Driver from config: {%s}"), zone_driver) zm_config = config.Configuration(zone_manager_opts, 'fc-zone-manager') # Initialize vendor specific implementation of FCZoneDriver self.driver = importutils.import_object(zone_driver, configuration=zm_config)
def test_lvm_type_auto_no_lvs(self, *_unused_mocks): configuration = conf.Configuration(fake_opt, 'fake_group') configuration.lvm_type = 'auto' vg_obj = fake_lvm.FakeBrickLVM('cinder-volumes', False, None, 'default') lvm_driver = lvm.LVMVolumeDriver(configuration=configuration, vg_obj=vg_obj) lvm_driver.check_for_setup_error() self.assertEqual('thin', lvm_driver.configuration.lvm_type)
def setUp(self): super(GPFSDriverTestCase, self).setUp() self.volumes_path = tempfile.mkdtemp(prefix="gpfs_") self.images_dir = '%s/images' % self.volumes_path if not os.path.exists(self.volumes_path): os.mkdir(self.volumes_path) if not os.path.exists(self.images_dir): os.mkdir(self.images_dir) self.image_id = '70a599e0-31e7-49b7-b260-868f441e862b' self.driver = gpfs.GPFSDriver(configuration=conf.Configuration(None)) self.driver.set_execute(self._execute_wrapper) self.driver._cluster_id = '123456' self.driver._gpfs_device = '/dev/gpfs' self.driver._storage_pool = 'system' self.flags(volume_driver=self.driver_name, gpfs_mount_point_base=self.volumes_path) self.stubs.Set(gpfs.GPFSDriver, '_create_gpfs_snap', self._fake_gpfs_snap) self.stubs.Set(gpfs.GPFSDriver, '_create_gpfs_copy', self._fake_gpfs_copy) self.stubs.Set(gpfs.GPFSDriver, '_gpfs_redirect', self._fake_gpfs_redirect) self.stubs.Set(gpfs.GPFSDriver, '_is_gpfs_parent_file', self._fake_is_gpfs_parent) self.stubs.Set(gpfs.GPFSDriver, '_is_gpfs_path', self._fake_is_gpfs_path) self.stubs.Set(gpfs.GPFSDriver, '_delete_gpfs_file', self._fake_delete_gpfs_file) self.stubs.Set(gpfs.GPFSDriver, '_create_sparse_file', self._fake_create_sparse_file) self.stubs.Set(gpfs.GPFSDriver, '_allocate_file_blocks', self._fake_allocate_file_blocks) self.stubs.Set(gpfs.GPFSDriver, '_get_available_capacity', self._fake_get_available_capacity) self.stubs.Set(image_utils, 'qemu_img_info', self._fake_qemu_qcow2_image_info) self.stubs.Set(image_utils, 'convert_image', self._fake_convert_image) self.stubs.Set(image_utils, 'resize_image', self._fake_qemu_image_resize) self.context = context.get_admin_context() self.context.user_id = 'fake' self.context.project_id = 'fake' CONF.gpfs_images_dir = self.images_dir
def vmax_from_configuration(cluster_id=None, protocol=u'iSCSI', config_file=u'/etc/flocker/vmax3.conf', hosts=None, profiles=None, compute_instance=None): """ :param cluster_id: :param protocol: :param config_file: :param hosts: :param profiles: :param compute_instance: :return: """ CONF(default_config_files=[config_file], args=[]) CONF.register_opts(flocker_opts) for backend in CONF.enabled_backends: CONF.register_group(cfg.OptGroup(name=backend)) CONF.register_opts(backend_opts, group=backend) oslo_logging.setup(CONF, __name__) LOG.info(u'Logging to directory ' + unicode(CONF.log_dir)) vmax_common = {} for backend in CONF.enabled_backends: local_conf = conf.Configuration(backend_opts, config_group=backend) local_conf.config_group = backend args = [protocol, '2.0.0', local_conf] if 'version' not in inspect.getargspec(EMCVMAXCommon.__init__).args: args = [protocol, local_conf] try: for p in (profiles if profiles is not None else []): if unicode(p['backend']) == unicode(backend): backend = unicode(p['name']) break vmax_common[backend] = EMCVMAXCommon(*args) except TypeError as e: LOG.error(unicode(e)) return EMCVmaxBlockDeviceAPI(cluster_id, vmax_common=vmax_common, vmax_hosts=hosts, compute_instance=compute_instance, lock_path=CONF.get('lock_path'))
def test_unsupported_driver_disabled(self): config = conf.Configuration(fc_zone_manager.zone_manager_opts, 'fc-zone-manager') config.fc_fabric_names = fabric_name config.enable_unsupported_driver = False def fake_import(self, *args, **kwargs): fake_driver = mock.Mock(fc_zone_driver.FCZoneDriver) fake_driver.supported = False return fake_driver self.patch('oslo_utils.importutils.import_object', fake_import) zm = fc_zone_manager.ZoneManager(configuration=config) self.assertFalse(zm.driver.supported) self.assertFalse(zm.initialized)
def test_default_initialize(self): config = conf.Configuration(None) iscsi_driver = driver.UnityDriver(configuration=config) self.assertIsNone(config.unity_storage_pool_names) self.assertTrue(config.san_thin_provision) self.assertEqual('', config.san_ip) self.assertEqual('admin', config.san_login) self.assertEqual('', config.san_password) self.assertEqual('', config.san_private_key) self.assertEqual('', config.san_clustername) self.assertEqual(22, config.san_ssh_port) self.assertEqual(False, config.san_is_local) self.assertEqual(30, config.ssh_conn_timeout) self.assertEqual(1, config.ssh_min_pool_conn) self.assertEqual(5, config.ssh_max_pool_conn) self.assertEqual('iSCSI', iscsi_driver.protocol)
def setUp(self): super(TestVolumeManager, self).setUp() self.configuration = conf.Configuration(None) self.configuration.set_default('fc_fabric_names', 'BRCD_FAB_4', 'fc-zone-manager') self.configuration.zoning_mode = 'fabric' self.driver = mock.Mock(driver.VolumeDriver) self.driver.initialize_connection.return_value = conn_info self.driver.terminate_connection.return_value = conn_info self.driver.create_export.return_value = None self.db = mock.Mock() self.db.volume_get.return_value = {'volume_type_id': None} self.db.volume_admin_metadata_get.return_value = {} self.context_mock = mock.Mock() self.context_mock.elevated.return_value = None self.zonemanager = fc_zone_manager.ZoneManager( configuration=self.configuration)
def setUp(self, mock_fcfcntr, mock_fcfd, mock_fcfc): mock_fcfcntr.return_value = None mock_fcfd.return_value = None mock_fcfc.return_value = None # Initialise a test seup super(VRTSHyperScaleDriverTestCase, self).setUp() self.configuration = mock.Mock(conf.Configuration(None)) self.configuration.reserved_percentage = 0 self.context = context.get_admin_context() self.driver = vrts.HyperScaleDriver(db=FakeDb(), configuration=self.configuration) self.driver.dn_routing_key = '{1234}' self.driver.datanode_ip = '192.0.2.1' self.volume = _stub_volume() self.snapshot = _stub_snapshot()
def _do_partner_setup(self): partner_backend = self.configuration.netapp_partner_backend_name if partner_backend: config = configuration.Configuration(na_opts.netapp_7mode_opts, partner_backend) config.append_config_values(na_opts.netapp_connection_opts) config.append_config_values(na_opts.netapp_basicauth_opts) config.append_config_values(na_opts.netapp_transport_opts) self.partner_zapi_client = client_7mode.Client( None, transport_type=config.netapp_transport_type, username=config.netapp_login, password=config.netapp_password, hostname=config.netapp_server_hostname, port=config.netapp_server_port, vfiler=None)
def setUp(self): super(HNASUtilsTest, self).setUp() self.fake_conf = conf.Configuration(hnas_utils.CONF) self.override_config('hnas_username', 'supervisor') self.override_config('hnas_password', 'supervisor') self.override_config('hnas_mgmt_ip0', '172.24.44.15') self.override_config('hnas_svc0_pool_name', 'default') self.override_config('hnas_svc0_hdp', 'easy-stack') self.override_config('hnas_svc1_pool_name', 'FS-CinderDev1') self.override_config('hnas_svc1_hdp', 'silver') self.context = context.get_admin_context() self.volume = fake_volume.fake_volume_obj(self.context, **_VOLUME) self.volume_type = (fake_volume.fake_volume_type_obj(None, **{ 'id': fake_constants.VOLUME_TYPE_ID, 'name': 'silver'}))
def __init__(self, *args, **kwargs): super(TestCxtAdmDriver, self).__init__(*args, **kwargs) self.configuration = conf.Configuration(None) self.configuration.append_config_values = mock.Mock(return_value=0) self.configuration.iscsi_ip_address = '10.9.8.7' self.cxt_subdir = cxt.CxtAdm.cxt_subdir self.fake_id_1 = 'ed2c1fd4-5fc0-11e4-aa15-123b93f75cba' self.fake_id_2 = 'ed2c2222-5fc0-11e4-aa15-123b93f75cba' self.target = cxt.CxtAdm(root_helper=utils.get_root_helper(), configuration=self.configuration) self.fake_volume = 'volume-83c2e877-feed-46be-8435-77884fe55b45' self.testvol_1 =\ {'project_id': self.fake_id_1, 'name': 'testvol', 'size': 1, 'id': self.fake_id_2, 'volume_type_id': None, 'provider_location': '10.9.8.7:3260 ' 'iqn.2010-10.org.openstack:' 'volume-%s 0' % self.fake_id_2, 'provider_auth': 'CHAP stack-1-a60e2611875f40199931f2' 'c76370d66b 2FE0CQ8J196R', 'provider_geometry': '512 512', 'created_at': timeutils.utcnow(), 'host': 'fake_host@lvm#lvm'} self.expected_iscsi_properties = \ {'auth_method': 'CHAP', 'auth_password': '******', 'auth_username': '******', 'encrypted': False, 'logical_block_size': '512', 'physical_block_size': '512', 'target_discovered': False, 'target_iqn': 'iqn.2010-10.org.openstack:volume-%s' % self.fake_id_2, 'target_lun': 0, 'target_portal': '10.10.7.1:3260', 'volume_id': self.fake_id_2} self.fake_iscsi_scan =\ ('\n' 'TARGET: iqn.2010-10.org.openstack:%s, id=1, login_ip=0\n' # noqa ' [email protected]:3260,timeout=0\n' ' TargetDevice=/dev/stack-volumes-lvmdriver-1/%s,BLK,PROD=CHISCSI Target,SN=0N0743000000000,ID=0D074300000000000000000,WWN=:W00743000000000\n' # noqa % (self.fake_volume, self.fake_volume))
def __init__(self): """Init conf client pool sds_client.""" self.configuration = config.Configuration(None) self.configuration.append_config_values(sds_driver.sds_opts) self.configuration.append_config_values(san.san_opts) self.configuration.suppress_requests_ssl_warnings = True self.client = None self.poolid = 1 self.VERSION = '1.0' self.address = "192.168.200.100" self.user = "******" self.password = "******" self.pool = "fake_pool_name" self.iscsi_info = { "iqn.1994-05.com.redhat:899c5f9d15d": "1.1.1.1,1.1.1.2,1.1.1.3" } self.default_target_ips = ["1.1.1.1", "1.1.1.2", "1.1.1.3"] self.default_chap_info = "1234567891234,123456789123"
def create_zone_manager(): """If zoning is enabled, build the Zone Manager.""" config = configuration.Configuration(manager.volume_manager_opts) LOG.debug("Zoning mode: %s", config.safe_get('zoning_mode')) if config.safe_get('zoning_mode') == 'fabric': LOG.debug("FC Zone Manager enabled.") zm = fc_zone_manager.ZoneManager() LOG.info( _LI("Using FC Zone Manager %(zm_version)s," " Driver %(drv_name)s %(drv_version)s."), { 'zm_version': zm.get_version(), 'drv_name': zm.driver.__class__.__name__, 'drv_version': zm.driver.get_version() }) return zm else: LOG.debug("FC Zone Manager not enabled in cinder.conf.") return None
def setUp(self): LOG.debug('Enter: setUp') super(ZadaraVPSADriverTestCase, self).setUp() global RUNTIME_VARS RUNTIME_VARS = copy.deepcopy(DEFAULT_RUNTIME_VARS) self.configuration = conf.Configuration(None) self.configuration.append_config_values(zadara_opts) self.configuration.reserved_percentage = 10 self.configuration.zadara_user = '******' self.configuration.zadara_password = '******' self.configuration.zadara_vpsa_poolname = 'pool-0001' self.driver = ZadaraVPSAISCSIDriver(configuration=self.configuration) self.stubs.Set(httplib, 'HTTPConnection', FakeHTTPConnection) self.stubs.Set(httplib, 'HTTPSConnection', FakeHTTPSConnection) self.driver.do_setup(None)
def test_unsupported_driver_enabled(self): config = conf.Configuration(None) config.fc_fabric_names = fabric_name def fake_import(self, *args, **kwargs): fake_driver = mock.Mock(fc_zone_driver.FCZoneDriver) fake_driver.supported = False return fake_driver self.patch('oslo_utils.importutils.import_object', fake_import) with mock.patch( 'cinder.volume.configuration.Configuration') as mock_config: mock_config.return_value.zone_driver = 'test' mock_config.return_value.enable_unsupported_driver = True zm = fc_zone_manager.ZoneManager(configuration=config) self.assertFalse(zm.driver.supported) self.assertTrue(zm.initialized)
def _set_unique_fqdn_override(self, value, in_shared): """Override the unique_fqdn_network configuration option. Meant for driver tests that use a Mock for their driver configuration instead of a real Oslo Conf. """ # Since we don't use a real oslo config for the driver we don't get # the default initialization, so create a group and register the option cfg.CONF.register_group(cfg.OptGroup('driver_cfg')) new_config = configuration.Configuration([], config_group='driver_cfg') new_config.append_config_values(vol_driver.fqdn_opts) # Now we override the value for this test group = configuration.SHARED_CONF_GROUP if in_shared else 'driver_cfg' self.addCleanup(CONF.clear_override, 'unique_fqdn_network', group=group) cfg.CONF.set_override('unique_fqdn_network', value, group=group) return new_config
def convert(cinder_source, yaml_dest=None): result_cfgs = [] if not path.exists(cinder_source): raise Exception("Cinder config file %s doesn't exist" % cinder_source) # Manually parse the Cinder configuration file so we know which options are # set. parser = configparser.ConfigParser() parser.read(cinder_source) enabled_backends = parser.get('DEFAULT', 'enabled_backends') backends = [name.strip() for name in enabled_backends.split(',') if name] volume.CONF(('--config-file', cinder_source), project='cinder') for backend in backends: options_present = parser.options(backend) # Dynamically loading the driver triggers adding the specific # configuration options to the backend_defaults section cfg = config.Configuration(manager.volume_backend_opts, config_group=backend) driver_ns = cfg.volume_driver.rsplit('.', 1)[0] __import__(driver_ns) # Use the backend_defaults section to extract the configuration for # options that are present in the backend section and add them to # the backend section. opts = volume.CONF._groups['backend_defaults']._opts known_present_options = [opt for opt in options_present if opt in opts] volume_opts = [opts[option]['opt'] for option in known_present_options] cfg.append_config_values(volume_opts) # Now retrieve the options that are set in the configuration file. result_cfgs.append( {option: cfg.safe_get(option) for option in known_present_options}) result = {'backends': result_cfgs} if yaml_dest: # Write the YAML to the destination with open(yaml_dest, 'w') as f: yaml.dump(result, f) return result
def create_configuration(self): fc_fabric_opts = [] fc_fabric_opts.append( cfg.StrOpt('cisco_fc_fabric_address', default='172.24.173.142', help='')) fc_fabric_opts.append( cfg.StrOpt('cisco_fc_fabric_user', default='admin', help='')) fc_fabric_opts.append( cfg.StrOpt('cisco_fc_fabric_password', default='admin1234', help='', secret=True)) fc_fabric_opts.append( cfg.IntOpt('cisco_fc_fabric_port', default=22, help='')) fc_fabric_opts.append( cfg.StrOpt('cisco_zoning_vsan', default='304', help='')) config = conf.Configuration(fc_fabric_opts, 'CISCO_FAB_2') self.fabric_configs = {'CISCO_FAB_2': config}
def test_check_for_setup_error(self): def get_all_volume_groups(vg): return [{'name': 'cinder-volumes'}] self.stubs.Set(volutils, 'get_all_volume_groups', get_all_volume_groups) vg_obj = fake_lvm.FakeBrickLVM('cinder-volumes', False, None, 'default') configuration = conf.Configuration(fake_opt, 'fake_group') lvm_driver = lvm.LVMVolumeDriver(configuration=configuration, vg_obj=vg_obj, db=db) lvm_driver.delete_snapshot = mock.Mock() self.stubs.Set(volutils, 'get_all_volume_groups', get_all_volume_groups) volume = tests_utils.create_volume(self.context, host=socket.gethostname()) volume_id = volume['id'] backup = {} backup['volume_id'] = volume_id backup['user_id'] = fake.USER_ID backup['project_id'] = fake.PROJECT_ID backup['host'] = socket.gethostname() backup['availability_zone'] = '1' backup['display_name'] = 'test_check_for_setup_error' backup['display_description'] = 'test_check_for_setup_error' backup['container'] = 'fake' backup['status'] = fields.BackupStatus.CREATING backup['fail_reason'] = '' backup['service'] = 'fake' backup['parent_id'] = None backup['size'] = 5 * 1024 * 1024 backup['object_count'] = 22 db.backup_create(self.context, backup) lvm_driver.check_for_setup_error()
def __init__(self, *args, **kwargs): configuration = conf.Configuration([ cfg.StrOpt('fake'), ], None) # Override the defaults to fake values configuration.set_override('san_ip', override='127.0.0.1') configuration.set_override('sio_rest_server_port', override='8888') configuration.set_override('san_login', override='test') configuration.set_override('san_password', override='pass') configuration.set_override('sio_storage_pool_id', override='test_pool') configuration.set_override('sio_protection_domain_id', override='test_domain') configuration.set_override('sio_storage_pools', override='test_domain:test_pool') super(ScaleIODriver, self).__init__(configuration=configuration, *args, **kwargs)
def create_configuration_eseries(): config = conf.Configuration(None) config.append_config_values(na_opts.netapp_connection_opts) config.append_config_values(na_opts.netapp_transport_opts) config.append_config_values(na_opts.netapp_basicauth_opts) config.append_config_values(na_opts.netapp_provisioning_opts) config.append_config_values(na_opts.netapp_eseries_opts) config.netapp_storage_protocol = 'iscsi' config.netapp_login = '******' config.netapp_password = '******' config.netapp_server_hostname = '127.0.0.1' config.netapp_transport_type = 'http' config.netapp_server_port = '8080' config.netapp_storage_pools = 'DDP' config.netapp_storage_family = 'eseries' config.netapp_sa_password = '******' config.netapp_controller_ips = '10.11.12.13,10.11.12.14' config.netapp_webservice_path = '/devmgr/v2' config.netapp_enable_multiattach = False return config
def setUp(self): super(BaseDriverTestCase, self).setUp() vol_tmpdir = tempfile.mkdtemp() self.flags(volume_driver=self.driver_name, volumes_dir=vol_tmpdir) self.volume = importutils.import_object(CONF.volume_manager) self.context = context.get_admin_context() self.output = "" self.configuration = conf.Configuration(None) self.mock_object(brick_lvm.LVM, '_vg_exists', lambda x: True) def _fake_execute(_command, *_args, **_kwargs): """Fake _execute.""" return self.output, None exec_patcher = mock.patch.object(self.volume.driver, '_execute', _fake_execute) exec_patcher.start() self.addCleanup(exec_patcher.stop) self.volume.driver.set_initialized() self.addCleanup(self._cleanup)
def get_backend_configuration(backend_name, backend_opts=None): """Get a configuration object for a specific backend.""" config_stanzas = CONF.list_all_sections() if backend_name not in config_stanzas: msg = _("Could not find backend stanza %(backend_name)s in " "configuration. Available stanzas are %(stanzas)s") params = { "stanzas": config_stanzas, "backend_name": backend_name, } raise exception.ConfigNotFound(message=msg % params) config = configuration.Configuration(driver.volume_opts, config_group=backend_name) if backend_opts: config.append_config_values(backend_opts) return config