def __init__(self, volume_driver=None, *args, **kwargs): """Load the driver from the one specified in args, or from flags.""" if not volume_driver: volume_driver = FLAGS.volume_driver if volume_driver in MAPPING: self.driver = importutils.import_object(MAPPING[volume_driver]) else: self.driver = importutils.import_object(volume_driver) super(VolumeManager, self).__init__(service_name='volume', *args, **kwargs) # NOTE(vish): Implementation specific db handling is done # by the driver. self.driver.db = self.db self._last_volume_stats = []
def __init__(self, volume_driver=None, *args, **kwargs): """Load the driver from the one specified in args, or from flags.""" if not volume_driver: volume_driver = FLAGS.volume_driver if volume_driver in MAPPING: LOG.warn(_("Driver path %s is deprecated, update your " "configuration to the new path."), volume_driver) self.driver = importutils.import_object(MAPPING[volume_driver]) else: self.driver = importutils.import_object(volume_driver) super(VolumeManager, self).__init__(service_name='volume', *args, **kwargs) # NOTE(vish): Implementation specific db handling is done # by the driver. self.driver.db = self.db
def test_backup_manager_driver_name(self): """"Test mapping between backup services and backup drivers.""" cfg.CONF.set_override('backup_driver', "cinder.backup.services.swift") backup_mgr = \ importutils.import_object(CONF.backup_manager) self.assertEqual('cinder.backup.drivers.swift', backup_mgr.driver_name)
def __init__(self, volume_driver=None, service_name=None, *args, **kwargs): """Load the driver from the one specified in args, or from flags.""" # update_service_capabilities needs service_name to be volume super(VolumeManager, self).__init__(service_name='volume', *args, **kwargs) self.configuration = Configuration(volume_manager_opts, config_group=service_name) self._tp = GreenPool() if not volume_driver: # Get from configuration, which will get the default # if its not using the multi backend volume_driver = self.configuration.volume_driver if volume_driver in MAPPING: LOG.warn( _("Driver path %s is deprecated, update your " "configuration to the new path."), volume_driver) volume_driver = MAPPING[volume_driver] if volume_driver == 'cinder.volume.drivers.lvm.ThinLVMVolumeDriver': # Deprecated in Havana # Not handled in MAPPING because it requires setting a conf option LOG.warn( _("ThinLVMVolumeDriver is deprecated, please configure " "LVMISCSIDriver and lvm_type=thin. Continuing with " "those settings.")) volume_driver = 'cinder.volume.drivers.lvm.LVMISCSIDriver' self.configuration.lvm_type = 'thin' self.driver = importutils.import_object( volume_driver, configuration=self.configuration, db=self.db)
def __init__(self, volume_driver=None, service_name=None, *args, **kwargs): """Load the driver from the one specified in args, or from flags.""" # update_service_capabilities needs service_name to be volume super(VolumeManager, self).__init__(service_name='volume', *args, **kwargs) self.configuration = Configuration(volume_manager_opts, config_group=service_name) self._tp = GreenPool() if not volume_driver: # Get from configuration, which will get the default # if its not using the multi backend volume_driver = self.configuration.volume_driver if volume_driver in MAPPING: LOG.warn(_("Driver path %s is deprecated, update your " "configuration to the new path."), volume_driver) volume_driver = MAPPING[volume_driver] if volume_driver == 'cinder.volume.drivers.lvm.ThinLVMVolumeDriver': # Deprecated in Havana # Not handled in MAPPING because it requires setting a conf option LOG.warn(_("ThinLVMVolumeDriver is deprecated, please configure " "LVMISCSIDriver and lvm_type=thin. Continuing with " "those settings.")) volume_driver = 'cinder.volume.drivers.lvm.LVMISCSIDriver' self.configuration.lvm_type = 'thin' self.driver = importutils.import_object( volume_driver, configuration=self.configuration, db=self.db)
def __init__(self, service_name=None, *args, **kwargs): self.service = importutils.import_module(self.driver_name) self.az = CONF.storage_availability_zone self.volume_manager = importutils.import_object(CONF.volume_manager) self.driver = self.volume_manager.driver super(BackupManager, self).__init__(service_name="backup", *args, **kwargs) self.driver.db = self.db
def _setup_volume_drivers(self): if CONF.enabled_backends: for backend in CONF.enabled_backends: host = "%s@%s" % (CONF.host, backend) mgr = importutils.import_object(CONF.volume_manager, host=host, service_name=backend) config = mgr.configuration backend_name = config.safe_get("volume_backend_name") LOG.debug( _("Registering backend %(backend)s (host=%(host)s " "backend_name=%(backend_name)s).") % {"backend": backend, "host": host, "backend_name": backend_name} ) self.volume_managers[backend] = mgr else: default = importutils.import_object(CONF.volume_manager) LOG.debug(_("Registering default backend %s.") % (default)) self.volume_managers["default"] = default
def setUp(self): super(VolumeTestCase, self).setUp() vol_tmpdir = tempfile.mkdtemp() self.flags(connection_type='fake', volumes_dir=vol_tmpdir) self.volume = importutils.import_object(FLAGS.volume_manager) self.context = context.get_admin_context()
def create_driver( storage_family, storage_protocol, *args, **kwargs): """"Creates an appropriate driver based on family and protocol.""" fmt = {'storage_family': storage_family, 'storage_protocol': storage_protocol} LOG.info(_('Requested unified config: %(storage_family)s and ' '%(storage_protocol)s') % fmt) storage_family = storage_family.lower() family_meta = netapp_unified_plugin_registry.get(storage_family) if family_meta is None: raise exception.InvalidInput( reason=_('Storage family %s is not supported') % storage_family) if storage_protocol is None: storage_protocol = netapp_family_default.get(storage_family) fmt['storage_protocol'] = storage_protocol if storage_protocol is None: raise exception.InvalidInput( reason=_('No default storage protocol found' ' for storage family %(storage_family)s') % fmt) storage_protocol = storage_protocol.lower() driver_loc = family_meta.get(storage_protocol) if driver_loc is None: raise exception.InvalidInput( reason=_('Protocol %(storage_protocol)s is not supported' ' for storage family %(storage_family)s') % fmt) NetAppDriverFactory.check_netapp_driver(driver_loc) kwargs = kwargs or {} kwargs['netapp_mode'] = 'proxy' driver = importutils.import_object(driver_loc, *args, **kwargs) LOG.info(_('NetApp driver of family %(storage_family)s and protocol' ' %(storage_protocol)s loaded') % fmt) return driver
def get_active_zone_set(self, fabric_ip, fabric_user, fabric_pwd, fabric_port): """Gets active zone config from fabric.""" cfgmap = {} conn = None try: LOG.debug("Southbound connector:" " %s", self.configuration.brcd_sb_connector) conn = importutils.import_object( self.configuration.brcd_sb_connector, ipaddress=fabric_ip, username=fabric_user, password=fabric_pwd, port=fabric_port) if not conn.is_supported_firmware(): msg = _("Unsupported firmware on switch %s. Make sure " "switch is running firmware v6.4 or higher" ) % fabric_ip LOG.error(msg) raise exception.FCZoneDriverException(msg) cfgmap = conn.get_active_zone_set() conn.cleanup() except exception.BrocadeZoningCliException as brocade_ex: raise exception.FCZoneDriverException(brocade_ex) except Exception as e: msg = (_("Failed to access active zoning configuration:%s") % e) LOG.error(msg) raise exception.FCZoneDriverException(msg) LOG.debug("Active zone set from fabric: %s", cfgmap) return cfgmap
def setUp(self): super(BackupTestCase, self).setUp() vol_tmpdir = tempfile.mkdtemp() self.flags(connection_type="fake", volumes_dir=vol_tmpdir) self.backup_mgr = importutils.import_object(FLAGS.backup_manager) self.backup_mgr.host = "testhost" self.ctxt = context.get_admin_context()
def __init__(self, service_name=None, *args, **kwargs): self.service = importutils.import_module(FLAGS.backup_service) self.az = FLAGS.storage_availability_zone self.volume_manager = importutils.import_object(FLAGS.volume_manager) self.driver = self.volume_manager.driver super(BackupManager, self).__init__(service_name='backup', *args, **kwargs) self.driver.db = self.db
def setUp(self): super(BackupTestCase, self).setUp() vol_tmpdir = tempfile.mkdtemp() self.flags(connection_type='fake', volumes_dir=vol_tmpdir) self.backup_mgr = \ importutils.import_object(FLAGS.backup_manager) self.backup_mgr.host = 'testhost' self.ctxt = context.get_admin_context()
def setUp(self): super(BackupTestCase, self).setUp() vol_tmpdir = tempfile.mkdtemp() self.flags(volumes_dir=vol_tmpdir) self.backup_mgr = importutils.import_object(CONF.backup_manager) self.backup_mgr.host = "testhost" self.ctxt = context.get_admin_context() self.backup_mgr.driver.set_initialized()
def test_backup_manager_driver_name(self): """"Test mapping between backup services and backup drivers.""" old_setting = CONF.backup_driver setattr(cfg.CONF, "backup_driver", "cinder.backup.services.swift") backup_mgr = importutils.import_object(CONF.backup_manager) self.assertEqual("cinder.backup.drivers.swift", backup_mgr.driver_name) setattr(cfg.CONF, "backup_driver", old_setting)
def _build_driver(self): zone_driver = self.configuration.zone_driver LOG.debug("Zone Driver from config: {%s}", zone_driver) zm_config = config.Configuration(zone_manager_opts, 'fc-zone-manager') # Initialize vendor specific implementation of FCZoneDriver self.driver = importutils.import_object(zone_driver, configuration=zm_config)
def setUp(self): super(VolumeReplicationTestCase, self).setUp() self.ctxt = context.RequestContext('user', 'fake', False) self.adm_ctxt = context.RequestContext('admin', 'fake', True) self.manager = importutils.import_object(CONF.volume_manager) self.manager.host = 'test_host' self.manager.stats = {'allocated_capacity_gb': 0} self.driver_patcher = mock.patch.object(self.manager, 'driver') self.driver = self.driver_patcher.start()
def _setup_volume_drivers(self): if CONF.enabled_backends: for backend in CONF.enabled_backends: host = "%s@%s" % (CONF.host, backend) mgr = importutils.import_object(CONF.volume_manager, host=host, service_name=backend) config = mgr.configuration backend_name = config.safe_get('volume_backend_name') LOG.debug("Registering backend %(backend)s (host=%(host)s " "backend_name=%(backend_name)s)." % {'backend': backend, 'host': host, 'backend_name': backend_name}) self.volume_managers[backend] = mgr else: default = importutils.import_object(CONF.volume_manager) LOG.debug("Registering default backend %s." % (default)) self.volume_managers['default'] = default
def setUp(self): super(VolumeTestCase, self).setUp() vol_tmpdir = tempfile.mkdtemp() self.flags(connection_type="fake", volumes_dir=vol_tmpdir) self.volume = importutils.import_object(FLAGS.volume_manager) self.context = context.get_admin_context() self.stubs.Set(cinder.flags.FLAGS, "notification_driver", "cinder.openstack.common.notifier.test_notifier") fake_image.stub_out_image_service(self.stubs) test_notifier.NOTIFICATIONS = []
def setUp(self): super(BackupTestCase, self).setUp() vol_tmpdir = tempfile.mkdtemp() self.flags(volumes_dir=vol_tmpdir) self.backup_mgr = \ importutils.import_object(CONF.backup_manager) self.backup_mgr.host = 'testhost' self.ctxt = context.get_admin_context() self.backup_mgr.driver.set_initialized()
def setUp(self): super(UsageInfoTestCase, self).setUp() self.flags(host='fake', notification_driver=["test"]) self.volume = importutils.import_object(CONF.volume_manager) self.user_id = 'fake' self.project_id = 'fake' self.snapshot_id = 'fake' self.volume_size = 0 self.context = context.RequestContext(self.user_id, self.project_id)
def __init__(self, scheduler_driver=None, service_name=None, *args, **kwargs): if not scheduler_driver: scheduler_driver = CONF.scheduler_driver self.driver = importutils.import_object(scheduler_driver) super(SchedulerManager, self).__init__(*args, **kwargs)
def _build_driver(self): zone_driver = self.configuration.zone_driver LOG.debug("Zone Driver from config: {%s}", zone_driver) zm_config = config.Configuration(zone_manager_opts, 'fc-zone-manager') # Initialize vendor specific implementation of FCZoneDriver self.driver = importutils.import_object( zone_driver, configuration=zm_config)
def setUp(self): super(VolumeTestCase, self).setUp() vol_tmpdir = tempfile.mkdtemp() self.flags(connection_type="fake", volumes_dir=vol_tmpdir, notification_driver=[test_notifier.__name__]) self.volume = importutils.import_object(FLAGS.volume_manager) self.context = context.get_admin_context() self.stubs.Set(iscsi.TgtAdm, "_get_target", self.fake_get_target) fake_image.stub_out_image_service(self.stubs) test_notifier.NOTIFICATIONS = []
def _setup_logging_from_conf(): log_root = getLogger(None).logger for handler in log_root.handlers: log_root.removeHandler(handler) if CONF.use_syslog: facility = _find_facility_from_conf() syslog = logging.handlers.SysLogHandler(address='/dev/log', facility=facility) log_root.addHandler(syslog) logpath = _get_log_file_path() if logpath: filelog = logging.handlers.WatchedFileHandler(logpath) log_root.addHandler(filelog) if CONF.use_stderr: streamlog = ColorHandler() log_root.addHandler(streamlog) elif not CONF.log_file: # pass sys.stdout as a positional argument # python2.6 calls the argument strm, in 2.7 it's stream streamlog = logging.StreamHandler(sys.stdout) log_root.addHandler(streamlog) if CONF.publish_errors: handler = importutils.import_object( "cinder.openstack.common.log_handler.PublishErrorsHandler", logging.ERROR) log_root.addHandler(handler) datefmt = CONF.log_date_format for handler in log_root.handlers: # NOTE(alaski): CONF.log_format overrides everything currently. This # should be deprecated in favor of context aware formatting. if CONF.log_format: handler.setFormatter(logging.Formatter(fmt=CONF.log_format, datefmt=datefmt)) log_root.info('Deprecated: log_format is now deprecated and will ' 'be removed in the next release') else: handler.setFormatter(ContextFormatter(datefmt=datefmt)) if CONF.debug: log_root.setLevel(logging.DEBUG) elif CONF.verbose: log_root.setLevel(logging.INFO) else: log_root.setLevel(logging.WARNING) for pair in CONF.default_log_levels: mod, _sep, level_name = pair.partition('=') level = logging.getLevelName(level_name) logger = logging.getLogger(mod) logger.setLevel(level)
def setUp(self): super(UsageInfoTestCase, self).setUp() self.flags(connection_type="fake", host="fake", notification_driver=[test_notifier.__name__]) self.volume = importutils.import_object(CONF.volume_manager) self.user_id = "fake" self.project_id = "fake" self.snapshot_id = "fake" self.volume_size = 0 self.context = context.RequestContext(self.user_id, self.project_id) test_notifier.NOTIFICATIONS = []
def __init__(self, quota_driver_class=None): """Initialize a Quota object.""" if not quota_driver_class: quota_driver_class = CONF.quota_driver if isinstance(quota_driver_class, basestring): quota_driver_class = importutils.import_object(quota_driver_class) self._resources = {} self._driver = quota_driver_class
def __init__(self, volume_driver=None, *args, **kwargs): """Load the driver from the one specified in args, or from flags.""" if not volume_driver: volume_driver = FLAGS.volume_driver self.driver = importutils.import_object(volume_driver) super(VolumeManager, self).__init__(service_name='volume', *args, **kwargs) # NOTE(vish): Implementation specific db handling is done # by the driver. self.driver.db = self.db self._last_volume_stats = []
def _get_matchmaker(*args, **kwargs): global matchmaker if not matchmaker: mm = CONF.rpc_zmq_matchmaker if mm.endswith('matchmaker.MatchMakerRing'): mm.replace('matchmaker', 'matchmaker_ring') LOG.warn(_('rpc_zmq_matchmaker = %(orig)s is deprecated; use' ' %(new)s instead') % dict( orig=CONF.rpc_zmq_matchmaker, new=mm)) matchmaker = importutils.import_object(mm, *args, **kwargs) return matchmaker
def __init__(self, quota_driver_class=None): """Initialize a Quota object.""" if not quota_driver_class: quota_driver_class = FLAGS.quota_driver if isinstance(quota_driver_class, basestring): quota_driver_class = importutils.import_object(quota_driver_class) self._resources = {} self._driver = quota_driver_class
def setUp(self): super(VolumeTestCase, self).setUp() vol_tmpdir = tempfile.mkdtemp() self.flags(connection_type='fake', volumes_dir=vol_tmpdir, notification_driver=[test_notifier.__name__]) self.volume = importutils.import_object(FLAGS.volume_manager) self.context = context.get_admin_context() self.stubs.Set(iscsi.TgtAdm, '_get_target', self.fake_get_target) fake_image.stub_out_image_service(self.stubs) test_notifier.NOTIFICATIONS = []
def setUp(self): super(UsageInfoTestCase, self).setUp() self.flags(connection_type='fake', host='fake', notification_driver=[test_notifier.__name__]) self.volume = importutils.import_object(CONF.volume_manager) self.user_id = 'fake' self.project_id = 'fake' self.snapshot_id = 'fake' self.volume_size = 0 self.context = context.RequestContext(self.user_id, self.project_id) test_notifier.NOTIFICATIONS = []
def setUp(self): super(UsageInfoTestCase, self).setUp() self.flags( connection_type="fake", host="fake", notification_driver="cinder.openstack.common.notifier.test_notifier" ) self.stubs.Set(flags.FLAGS, "notification_driver", "cinder.openstack.common.notifier.test_notifier") self.volume = importutils.import_object(FLAGS.volume_manager) self.user_id = "fake" self.project_id = "fake" self.snapshot_id = "fake" self.volume_size = 0 self.context = context.RequestContext(self.user_id, self.project_id) test_notifier.NOTIFICATIONS = []
def setUp(self): super(BaseBackupTest, self).setUp() vol_tmpdir = tempfile.mkdtemp() self.flags(volumes_dir=vol_tmpdir) with mock.patch("osprofiler.profiler.trace_cls") as mock_trace_cls: side_effect = lambda value: value mock_decorator = mock.MagicMock(side_effect=side_effect) mock_trace_cls.return_value = mock_decorator self.backup_mgr = \ importutils.import_object(CONF.backup_manager) self.backup_mgr.host = 'testhost' self.ctxt = context.get_admin_context() self.backup_mgr.driver.set_initialized()
def setUp(self): super(DriverTestCase, self).setUp() vol_tmpdir = tempfile.mkdtemp() self.flags(volume_driver=self.driver_name, volumes_dir=vol_tmpdir) self.volume = importutils.import_object(FLAGS.volume_manager) self.context = context.get_admin_context() self.output = "" def _fake_execute(_command, *_args, **_kwargs): """Fake _execute.""" return self.output, None self.volume.driver.set_execute(_fake_execute)
def __init__(self, volume_driver=None, service_name=None, *args, **kwargs): """Load the driver from the one specified in args, or from flags.""" # update_service_capabilities needs service_name to be volume super(VolumeManager, self).__init__(service_name="volume", *args, **kwargs) self.configuration = Configuration(volume_manager_opts, config_group=service_name) if not volume_driver: # Get from configuration, which will get the default # if its not using the multi backend volume_driver = self.configuration.volume_driver if volume_driver in MAPPING: LOG.warn(_("Driver path %s is deprecated, update your " "configuration to the new path."), volume_driver) volume_driver = MAPPING[volume_driver] self.driver = importutils.import_object(volume_driver, configuration=self.configuration, db=self.db)
def setUp(self): super(DriverTestCase, self).setUp() vol_tmpdir = tempfile.mkdtemp() self.flags(volume_driver=self.driver_name, volumes_dir=vol_tmpdir) self.volume = importutils.import_object(FLAGS.volume_manager) self.context = context.get_admin_context() self.output = "" self.stubs.Set(iscsi.TgtAdm, '_get_target', self.fake_get_target) def _fake_execute(_command, *_args, **_kwargs): """Fake _execute.""" return self.output, None self.volume.driver.set_execute(_fake_execute)
def __init__(self, **kwargs): """Load the driver from the one specified in args, or from flags.""" self.configuration = kwargs.get('configuration', None) if self.configuration: self.configuration.append_config_values(zone_manager_opts) zone_driver = self.configuration.zone_driver LOG.debug(_("Zone Driver from config: {%s}"), zone_driver) zm_config = config.Configuration(zone_manager_opts, 'fc-zone-manager') # Initialize vendor specific implementation of FCZoneDriver self.driver = importutils.import_object(zone_driver, configuration=zm_config)
def setUp(self): super(GPFSDriverTestCase, self).setUp() self.volumes_path = tempfile.mkdtemp(prefix="gpfs_") self.images_dir = '%s/images' % self.volumes_path if not os.path.exists(self.volumes_path): os.mkdir(self.volumes_path) if not os.path.exists(self.images_dir): os.mkdir(self.images_dir) self.image_id = '70a599e0-31e7-49b7-b260-868f441e862b' self.driver = GPFSDriver(configuration=conf.Configuration(None)) self.driver.set_execute(self._execute_wrapper) self.flags(volume_driver=self.driver_name, gpfs_mount_point_base=self.volumes_path) self.volume = importutils.import_object(CONF.volume_manager) self.volume.driver.set_execute(self._execute_wrapper) self.volume.driver.set_initialized() self.volume.stats = dict(allocated_capacity_gb=0) self.stubs.Set(GPFSDriver, '_create_gpfs_snap', self._fake_gpfs_snap) self.stubs.Set(GPFSDriver, '_create_gpfs_copy', self._fake_gpfs_copy) self.stubs.Set(GPFSDriver, '_gpfs_redirect', self._fake_gpfs_redirect) self.stubs.Set(GPFSDriver, '_is_gpfs_parent_file', self._fake_is_gpfs_parent) self.stubs.Set(GPFSDriver, '_is_gpfs_path', self._fake_is_gpfs_path) self.stubs.Set(GPFSDriver, '_delete_gpfs_file', self._fake_delete_gpfs_file) self.stubs.Set(GPFSDriver, '_create_sparse_file', self._fake_create_sparse_file) self.stubs.Set(GPFSDriver, '_allocate_file_blocks', self._fake_allocate_file_blocks) self.stubs.Set(GPFSDriver, '_get_available_capacity', self._fake_get_available_capacity) self.stubs.Set(image_utils, 'qemu_img_info', self._fake_qemu_qcow2_image_info) self.stubs.Set(image_utils, 'convert_image', self._fake_convert_image) self.stubs.Set(image_utils, 'resize_image', self._fake_qemu_image_resize) self.context = context.get_admin_context() self.context.user_id = 'fake' self.context.project_id = 'fake' CONF.gpfs_images_dir = self.images_dir
def __init__(self, **kwargs): """Load the driver from the one specified in args, or from flags.""" self.configuration = kwargs.get('configuration', None) if self.configuration: self.configuration.append_config_values(zone_manager_opts) zone_driver = self.configuration.zone_driver LOG.debug(_("Zone Driver from config: {%s}"), zone_driver) zm_config = config.Configuration(zone_manager_opts, 'fc-zone-manager') # Initialize vendor specific implementation of FCZoneDriver self.driver = importutils.import_object( zone_driver, configuration=zm_config)
def setUp(self): super(DriverTestCase, self).setUp() self.flags(volume_driver=self.driver_name, logging_default_format_string="%(message)s") self.volume = importutils.import_object(FLAGS.volume_manager) self.context = context.get_admin_context() self.output = "" def _fake_execute(_command, *_args, **_kwargs): """Fake _execute.""" return self.output, None self.volume.driver.set_execute(_fake_execute) log = logging.getLogger() self.stream = cStringIO.StringIO() log.logger.addHandler(logging.logging.StreamHandler(self.stream))
def __init__(self, scheduler_driver=None, service_name=None, *args, **kwargs): if not scheduler_driver: scheduler_driver = CONF.scheduler_driver if scheduler_driver in ['cinder.scheduler.chance.ChanceScheduler', 'cinder.scheduler.simple.SimpleScheduler']: scheduler_driver = ('cinder.scheduler.filter_scheduler.' 'FilterScheduler') LOG.deprecated(_('ChanceScheduler and SimpleScheduler have been ' 'deprecated due to lack of support for advanced ' 'features like: volume types, volume encryption,' ' QoS etc. These two schedulers can be fully ' 'replaced by FilterScheduler with certain ' 'combination of filters and weighers.')) self.driver = importutils.import_object(scheduler_driver) super(SchedulerManager, self).__init__(*args, **kwargs)
def setUp(self): super(GPFSDriverTestCase, self).setUp() self.volumes_path = tempfile.mkdtemp(prefix="gpfs_") self.images_dir = '%s/images' % self.volumes_path if not os.path.exists(self.volumes_path): os.mkdir(self.volumes_path) if not os.path.exists(self.images_dir): os.mkdir(self.images_dir) self.image_id = '70a599e0-31e7-49b7-b260-868f441e862b' self.driver = GPFSDriver(configuration=conf.Configuration(None)) self.driver.set_execute(self._execute_wrapper) self.flags(volume_driver=self.driver_name, gpfs_mount_point_base=self.volumes_path) self.volume = importutils.import_object(CONF.volume_manager) self.volume.driver.set_execute(self._execute_wrapper) self.volume.driver.set_initialized() self.stubs.Set(GPFSDriver, '_create_gpfs_snap', self._fake_gpfs_snap) self.stubs.Set(GPFSDriver, '_create_gpfs_copy', self._fake_gpfs_copy) self.stubs.Set(GPFSDriver, '_gpfs_redirect', self._fake_gpfs_redirect) self.stubs.Set(GPFSDriver, '_is_gpfs_parent_file', self._fake_is_gpfs_parent) self.stubs.Set(GPFSDriver, '_is_gpfs_path', self._fake_is_gpfs_path) self.stubs.Set(GPFSDriver, '_delete_gpfs_file', self._fake_delete_gpfs_file) self.stubs.Set(GPFSDriver, '_create_sparse_file', self._fake_create_sparse_file) self.stubs.Set(GPFSDriver, '_allocate_file_blocks', self._fake_allocate_file_blocks) self.stubs.Set(GPFSDriver, '_get_available_capacity', self._fake_get_available_capacity) self.stubs.Set(image_utils, 'qemu_img_info', self._fake_qemu_qcow2_image_info) self.stubs.Set(image_utils, 'convert_image', self._fake_convert_image) self.stubs.Set(image_utils, 'resize_image', self._fake_qemu_image_resize) self.context = context.get_admin_context() self.context.user_id = 'fake' self.context.project_id = 'fake' CONF.gpfs_images_dir = self.images_dir
def __init__(self, volume_driver=None, service_name=None, *args, **kwargs): """Load the driver from the one specified in args, or from flags.""" # update_service_capabilities needs service_name to be volume super(VolumeManager, self).__init__(service_name='volume', *args, **kwargs) self.configuration = Configuration(volume_manager_opts, config_group=service_name) if not volume_driver: # Get from configuration, which will get the default # if its not using the multi backend volume_driver = self.configuration.volume_driver if volume_driver in MAPPING: LOG.warn( _("Driver path %s is deprecated, update your " "configuration to the new path."), volume_driver) volume_driver = MAPPING[volume_driver] self.driver = importutils.import_object( volume_driver, configuration=self.configuration, db=self.db)
def get_device_mapping_from_network(self, initiator_list, target_list): """Get device mapping from FC network. Gets a filtered list of initiator ports and target ports for each SAN available. :param initiator_list list of initiator port WWN :param target_list list of target port WWN :return device wwn map in following format { <San name>: { 'initiator_port_wwn_list': ('200000051E55A100', '200000051E55A121'..) 'target_port_wwn_list': ('100000051E55A100', '100000051E55A121'..) } } :raise Exception when a lookup service implementation is not specified in cinder.conf:fc_san_lookup_service """ # Initialize vendor specific implementation of FCZoneDriver if (self.configuration.fc_san_lookup_service): lookup_service = self.configuration.fc_san_lookup_service LOG.debug("Lookup service to invoke: " "%s", lookup_service) self.lookup_service = importutils.import_object( lookup_service, configuration=self.configuration) else: msg = _("Lookup service not configured. Config option for " "fc_san_lookup_service need to specify a concrete " "implementation of lookup service") LOG.error(msg) raise exception.FCSanLookupServiceException(msg) try: device_map = self.lookup_service.get_device_mapping_from_network( initiator_list, target_list) except Exception as e: LOG.error(e) raise exception.FCSanLookupServiceException(e) return device_map
def _get_cisco_nsinfo(self, fabric): (fabric_ip, fabric_user, fabric_pwd, fabric_port, zoning_vsan) = (self._get_cisco_config(fabric)) try: conn = importutils.import_object( self.zm.driver.configuration.cisco_sb_connector, ipaddress=fabric_ip, username=fabric_user, password=fabric_pwd, port=fabric_port, vsan=zoning_vsan) nsinfo = conn.get_nameserver_info() LOG.debug("name server info from fabric: %s", nsinfo) conn.cleanup() except exception.CiscoZoningCliException: with excutils.save_and_reraise_exception(): LOG.exception(_LE("Error getting show fcns database " "info.")) except Exception: msg = ("Failed to get show fcns database info.") LOG.exception(msg) raise exception.FCZoneDriverException(msg) return nsinfo
def _get_cli_client(self, fabric): fabric_ip = self.fabric_configs[fabric].safe_get('fc_fabric_address') fabric_user = self.fabric_configs[fabric].safe_get('fc_fabric_user') fabric_pwd = self.fabric_configs[fabric].safe_get('fc_fabric_password') fabric_port = self.fabric_configs[fabric].safe_get('fc_fabric_port') cli_client = None try: cli_client = self.sb_conn_map.get(fabric_ip) if not cli_client: LOG.debug("CLI client not found, creating for %s", fabric_ip) cli_client = importutils.import_object( self.configuration.brcd_sb_connector, ipaddress=fabric_ip, username=fabric_user, password=fabric_pwd, port=fabric_port) self.sb_conn_map[fabric_ip] = cli_client except Exception as e: LOG.error(e) msg = _("Failed to create sb connector for %s") % fabric_ip raise exception.FCZoneDriverException(msg) return cli_client
def get_zoning_status(self, fabric_ip, fabric_user, fabric_pwd, fabric_port, zoning_vsan): """Gets zoneset status and mode.""" statusmap = {} conn = None try: LOG.debug("Southbound connector: %s", self.configuration.cisco_sb_connector) conn = importutils.import_object( self.configuration.cisco_sb_connector, ipaddress=fabric_ip, username=fabric_user, password=fabric_pwd, port=fabric_port, vsan=zoning_vsan) statusmap = conn.get_zoning_status() conn.cleanup() except Exception as e: msg = (_("Failed to access zoneset status:%s") % six.text_type(e)) LOG.error(msg) raise exception.FCZoneDriverException(msg) LOG.debug("Zoneset status from fabric: %s", statusmap) return statusmap
def get_active_zone_set(self, fabric_ip, fabric_user, fabric_pwd, fabric_port, zoning_vsan): """Gets active zoneset config for vsan.""" cfgmap = {} conn = None try: LOG.debug("Southbound connector: %s", self.configuration.cisco_sb_connector) conn = importutils.import_object( self.configuration.cisco_sb_connector, ipaddress=fabric_ip, username=fabric_user, password=fabric_pwd, port=fabric_port, vsan=zoning_vsan) cfgmap = conn.get_active_zone_set() conn.cleanup() except Exception as e: msg = (_("Failed to access active zoning configuration:%s") % six.text_type(e)) LOG.error(msg) raise exception.FCZoneDriverException(msg) LOG.debug("Active zone set from fabric: %s", cfgmap) return cfgmap
def _get_matchmaker(*args, **kwargs): global matchmaker if not matchmaker: matchmaker = importutils.import_object(CONF.rpc_zmq_matchmaker, *args, **kwargs) return matchmaker