def __init__(self): self.queryclient = LazyLoader( importutils.import_class( 'nova.scheduler.client.query.SchedulerQueryClient')) self.reportclient = LazyLoader( importutils.import_class( 'nova.scheduler.client.report.SchedulerReportClient'))
def _get_response_code(self, req): req_method = req.environ['REQUEST_METHOD'] controller = importutils.import_class('keystone.common.controller') code = None if isinstance(self, controller.V3Controller) and req_method == 'POST': code = (201, 'Created') return code
def __init__(self, host, binary, topic, manager, report_interval=None, periodic_interval=None, periodic_fuzzy_delay=None, service_name=None, *args, **kwargs): super(Service, self).__init__() if not rpc.initialized(): rpc.init(CONF) self.host = host self.binary = binary self.topic = topic self.manager_class_name = manager manager_class = importutils.import_class(self.manager_class_name) manager_class = profiler.trace_cls("rpc")(manager_class) self.manager = manager_class(host=self.host, service_name=service_name, *args, **kwargs) self.report_interval = report_interval self.periodic_interval = periodic_interval self.periodic_fuzzy_delay = periodic_fuzzy_delay self.basic_config_check() self.saved_args, self.saved_kwargs = args, kwargs self.timers = [] setup_profiler(binary, host)
def __init__(self, *args, **kwargs): """Initialize the driver.""" super(XIVDS8KDriver, self).__init__(*args, **kwargs) self.configuration.append_config_values(xiv_ds8k_opts) proxy = importutils.import_class(self.configuration.xiv_ds8k_proxy) # NOTE: All Array specific configurations are prefixed with: # "xiv_ds8k_array_" # These additional flags should be specified in the cinder.conf # preferably in each backend configuration. self.xiv_ds8k_proxy = proxy( { "xiv_ds8k_user": self.configuration.san_login, "xiv_ds8k_pass": self.configuration.san_password, "xiv_ds8k_address": self.configuration.san_ip, "xiv_ds8k_vol_pool": self.configuration.san_clustername, "xiv_ds8k_connection_type": self.configuration.xiv_ds8k_connection_type, "xiv_chap": self.configuration.xiv_chap }, LOG, exception, driver=self)
def start(self): LOG.debug('IPABackend start') self.request = requests.Session() authclassname = cfg.CONF[self.name].ipa_auth_driver_class authclass = importutils.import_class(authclassname) self.request.auth = \ authclass(cfg.CONF[self.name].ipa_client_keytab, cfg.CONF[self.name].ipa_host) ipa_base_url = cfg.CONF[self.name].ipa_base_url if ipa_base_url.startswith("http"): # full URL self.baseurl = ipa_base_url else: # assume relative to https://host[:port] self.baseurl = "https://" + cfg.CONF[self.name].ipa_host ipa_port = cfg.CONF[self.name].ipa_port if ipa_port != IPA_DEFAULT_PORT: self.baseurl += ":" + str(ipa_port) self.baseurl += ipa_base_url ipa_json_url = cfg.CONF[self.name].ipa_json_url if ipa_json_url.startswith("http"): # full URL self.jsonurl = ipa_json_url else: # assume relative to https://host[:port] self.jsonurl = self.baseurl + ipa_json_url xtra_hdrs = {'Content-Type': 'application/json', 'Referer': self.baseurl} self.request.headers.update(xtra_hdrs) self.request.verify = cfg.CONF[self.name].ipa_ca_cert self.ntries = cfg.CONF[self.name].ipa_connect_retries self.force = cfg.CONF[self.name].ipa_force_ns_use
def __init__(self, application, limits=None, limiter=None, **kwargs): """Initialize new `RateLimitingMiddleware`. `RateLimitingMiddleware` wraps the given WSGI application and sets up the given limits. @param application: WSGI application to wrap @param limits: String describing limits @param limiter: String identifying class for representing limits Other parameters are passed to the constructor for the limiter. """ base_wsgi.Middleware.__init__(self, application) # Select the limiter class if limiter is None: limiter = Limiter else: limiter = importutils.import_class(limiter) # Parse the limits, if any are provided if limits is not None: limits = limiter.parse_limits(limits) self._limiter = limiter(limits or DEFAULT_LIMITS, **kwargs)
def do_request(self, method, **kwargs): """ Simple do_request override. This method serializes the outgoing body and builds the command that will be sent. :params method: The remote python method to call :params kwargs: Dynamic parameters that will be passed to the remote method. """ content = self.bulk_request([{'command': method, 'kwargs': kwargs}]) # NOTE(flaper87): Return the first result if # a single command was executed. content = content[0] # NOTE(flaper87): Check if content is an error # and re-raise it if raise_exc is True. Before # checking if content contains the '_error' key, # verify if it is an instance of dict - since the # RPC call may have returned something different. if self.raise_exc and (isinstance(content, dict) and '_error' in content): error = content['_error'] try: exc_cls = imp.import_class(error['cls']) raise exc_cls(error['val']) except ImportError: # NOTE(flaper87): The exception # class couldn't be imported, using # a generic exception. raise exception.RPCError(**error) return content
def __init__(self, app, conf, **local_conf): # Determine the context class to use self.ctxcls = RequestContext if 'context_class' in local_conf: self.ctxcls = importutils.import_class(local_conf['context_class']) super(ContextMiddleware, self).__init__(app)
def __init__(self, host, binary, topic, manager, report_interval=None, periodic_enable=None, periodic_fuzzy_delay=None, periodic_interval_max=None, db_allowed=True, *args, **kwargs): super(Service, self).__init__() self.host = host self.binary = binary self.topic = topic self.manager_class_name = manager # NOTE(russellb) We want to make sure to create the servicegroup API # instance early, before creating other things such as the manager, # that will also create a servicegroup API instance. Internally, the # servicegroup only allocates a single instance of the driver API and # we want to make sure that our value of db_allowed is there when it # gets created. For that to happen, this has to be the first instance # of the servicegroup API. self.servicegroup_api = servicegroup.API(db_allowed=db_allowed) manager_class = importutils.import_class(self.manager_class_name) self.manager = manager_class(host=self.host, *args, **kwargs) self.rpcserver = None self.report_interval = report_interval self.periodic_enable = periodic_enable self.periodic_fuzzy_delay = periodic_fuzzy_delay self.periodic_interval_max = periodic_interval_max self.saved_args, self.saved_kwargs = args, kwargs self.backdoor_port = None self.conductor_api = conductor.API(use_local=db_allowed) self.conductor_api.wait_until_ready(context.get_admin_context())
def __init__(self, plugin): self._resource_name = RESOURCE_NAME self._plugin = plugin self._driver = importutils.import_class( cfg.CONF.QUOTAS.quota_driver ) self._update_extended_attributes = True
def __init__(self, virtapi): super(DockerDriver, self).__init__(virtapi) self._docker = None vif_class = importutils.import_class(CONF.docker.vif_driver) self.vif_driver = vif_class() self.firewall_driver = firewall.load_driver( default='nova.virt.firewall.NoopFirewallDriver')
def load_driver(plugin, ofc_manager): if (PROVIDER_OPENFLOW in ROUTER_DRIVER_MAP and not ofc_manager.driver.router_supported): LOG.warning( _('OFC does not support router with provider=%(provider)s, ' 'so removed it from supported provider ' '(new router driver map=%(driver_map)s)'), {'provider': PROVIDER_OPENFLOW, 'driver_map': ROUTER_DRIVER_MAP}) del ROUTER_DRIVER_MAP[PROVIDER_OPENFLOW] if config.PROVIDER.default_router_provider not in ROUTER_DRIVER_MAP: LOG.error(_('default_router_provider %(default)s is supported! ' 'Please specify one of %(supported)s'), {'default': config.PROVIDER.default_router_provider, 'supported': ROUTER_DRIVER_MAP.keys()}) raise SystemExit(1) enabled_providers = (set(config.PROVIDER.router_providers + [config.PROVIDER.default_router_provider]) & set(ROUTER_DRIVER_MAP.keys())) for driver in enabled_providers: driver_klass = importutils.import_class(ROUTER_DRIVER_MAP[driver]) ROUTER_DRIVERS[driver] = driver_klass(plugin, ofc_manager) LOG.info(_('Enabled router drivers: %s'), ROUTER_DRIVERS.keys()) if not ROUTER_DRIVERS: LOG.error(_('No router provider is enabled. neutron-server terminated!' ' (supported=%(supported)s, configured=%(config)s)'), {'supported': ROUTER_DRIVER_MAP.keys(), 'config': config.PROVIDER.router_providers}) raise SystemExit(1)
def _get_response_code(self, req): req_method = req.environ["REQUEST_METHOD"] controller = importutils.import_class("keystone.common.controller") code = None if isinstance(self, controller.V3Controller) and req_method == "POST": code = (201, "Created") return code
def _setup_helpers(self): """Initializes protocol-specific NAS drivers.""" for helper_str in self.configuration.share_helpers: share_proto, __, import_str = helper_str.partition('=') helper = importutils.import_class(import_str) self._helpers[share_proto.upper()] = helper( self._execute, self._ssh_exec, self.configuration)
def _lookup_extension(self): try: return (importutils.import_class( "powervc.common.client.extensions.%s.Client" % (self.base_name))) except ImportError: return None return None
def _setup_helpers(self): """Initializes protocol-specific NAS drivers.""" self._helpers = {} for helper_str in self.configuration.gpfs_share_helpers: share_proto, _, import_str = helper_str.partition('=') helper = importutils.import_class(import_str) self._helpers[share_proto.upper()] = helper(self._gpfs_execute, self.configuration)
def InstanceActionAPI(*args, **kwargs): """Returns the 'InstanceActionAPI' class from the same module as the configured compute api. """ compute_api_class_name = _get_compute_api_class_name() compute_api_class = importutils.import_class(compute_api_class_name) class_name = compute_api_class.__module__ + ".InstanceActionAPI" return importutils.import_object(class_name, *args, **kwargs)
def from_primitive(primitive): """ Construct an object from primitive types This is used while deserializing the object. """ cls = importutils.import_class(primitive['chimera_object.name']) return cls._obj_from_primitive(primitive)
def get_store_location_class(self): """ Returns the store location class that is used by this store. """ if not self.store_location_class: class_name = "%s.StoreLocation" % (self.__module__) LOG.debug("Late loading location class %s", class_name) self.store_location_class = importutils.import_class(class_name) return self.store_location_class
def __init__(self, app, conf, **local_conf): cfg.CONF.register_opts(self.opts) # Determine the context class to use self.ctxcls = RequestContext if "context_class" in local_conf: self.ctxcls = importutils.import_class(local_conf["context_class"]) super(ContextMiddleware, self).__init__(app)
def _get_sample_path(cls, name, dirname, suffix=''): parts = [dirname] parts.append('api_samples') if cls.all_extensions: parts.append('all_extensions') if cls.extension_name: alias = importutils.import_class(cls.extension_name).alias parts.append(alias) parts.append(name + "." + cls.ctype + suffix) return os.path.join(*parts)
def get_datasource_schema(cls, datasource_id): datasource = datasources_db.get_datasource(datasource_id) if not datasource: raise DatasourceNotFound(id=datasource_id) driver = cls.get_driver_info(datasource.driver) if driver: # NOTE(arosen): raises if not found driver = cls.get_driver_info(driver['id']) obj = importutils.import_class(driver['module']) return obj.get_schema()
def monkey_patch(): """Patch decorator. If the Flags.monkey_patch set as True, this function patches a decorator for all functions in specified modules. You can set decorators for each modules using CONF.monkey_patch_modules. The format is "Module path:Decorator function". Example: 'manila.api.ec2.cloud:' \ manila.openstack.common.notifier.api.notify_decorator' Parameters of the decorator is as follows. (See manila.openstack.common.notifier.api.notify_decorator) name - name of the function function - object of the function """ # If CONF.monkey_patch is not True, this function do nothing. if not CONF.monkey_patch: return # Get list of modules and decorators for module_and_decorator in CONF.monkey_patch_modules: module, decorator_name = module_and_decorator.split(':') # import decorator function decorator = importutils.import_class(decorator_name) __import__(module) # Retrieve module information using pyclbr module_data = pyclbr.readmodule_ex(module) for key in module_data.keys(): # set the decorator for the class methods if isinstance(module_data[key], pyclbr.Class): clz = importutils.import_class("%s.%s" % (module, key)) for method, func in inspect.getmembers(clz, inspect.ismethod): setattr( clz, method, decorator("%s.%s.%s" % (module, key, method), func)) # set the decorator for the function if isinstance(module_data[key], pyclbr.Function): func = importutils.import_class("%s.%s" % (module, key)) setattr(sys.modules[module], key, decorator("%s.%s" % (module, key), func))
def get_datasource_schema(cls, datasource_id): datasource = datasources_db.get_datasource(datasource_id) if not datasource: raise DatasourceNotFound(id=datasource_id) driver = cls.get_driver_info(datasource.driver) if driver: # NOTE(arosen): raises if not found driver = cls.get_driver_info( driver['id']) obj = importutils.import_class(driver['module']) return obj.get_schema()
def init_driver(self): """ Create the driver for the cache """ driver_name = CONF.image_cache_driver driver_module = (__name__ + '.drivers.' + driver_name + '.Driver') try: self.driver_class = importutils.import_class(driver_module) LOG.info(_LI("Image cache loaded driver '%s'.") % driver_name) except ImportError as import_err: LOG.warn(_LW("Image cache driver " "'%(driver_name)s' failed to load. " "Got error: '%(import_err)s."), {'driver_name': driver_name, 'import_err': import_err}) driver_module = __name__ + '.drivers.sqlite.Driver' LOG.info(_LI("Defaulting to SQLite driver.")) self.driver_class = importutils.import_class(driver_module) self.configure_driver()
def validate_configured_drivers(cls): result = {} for driver_path in cfg.CONF.drivers: obj = importutils.import_class(driver_path) driver = obj.get_datasource_info() if driver['id'] in result: raise BadConfig(_("There is a driver loaded already with the" "driver name of %s") % driver['id']) driver['module'] = driver_path result[driver['id']] = driver cls.loaded_drivers = result
def get_client_class(version): version_map = { '1': 'madclient.v1.client.Client', } try: client_path = version_map[str(version)] except (KeyError, ValueError): msg = "Invalid client version '%s'. must be one of: %s" % ( (version, ', '.join(version_map))) raise exceptions.UnsupportedVersion(msg) return importutils.import_class(client_path)
def validate_configured_drivers(cls): result = {} for driver_path in cfg.CONF.drivers: obj = importutils.import_class(driver_path) driver = obj.get_datasource_info() if driver['id'] in result: raise BadConfig( _("There is a driver loaded already with the" "driver name of %s") % driver['id']) driver['module'] = driver_path result[driver['id']] = driver cls.loaded_drivers = result
def new_task_executor(self, context): try: executor_cls = ('glance.async.%s_executor.' 'TaskExecutor' % CONF.task.task_executor) LOG.debug("Loading %s executor" % CONF.task.task_executor) executor = importutils.import_class(executor_cls) return executor(context, self.task_repo, self.image_repo, self.image_factory) except ImportError: with excutils.save_and_reraise_exception(): LOG.exception( _LE("Failed to load the %s executor provided " "in the config.") % CONF.task.task_executor)
def get_client_class(version): version_map = { '1.0': 'fsgatewayclient.v1_0.client.Client', } try: client_path = version_map[str(version)] except (KeyError, ValueError): msg = _("Invalid client version '%(version)s'. must be one of: " "%(keys)s") % {'version': version, 'keys': ', '.join(version_map.keys())} raise exceptions.UnsupportedVersion(msg) return importutils.import_class(client_path)
def API(config_group_name=None): """Selects class and config group of network plugin. :param config_group_name: name of config group to be used for registration of networking opts. :returns: instance of networking plugin class """ CONF.register_opts(network_opts, group=config_group_name) if config_group_name: network_api_class = getattr(CONF, config_group_name).network_api_class else: network_api_class = CONF.network_api_class cls = importutils.import_class(network_api_class) return cls(config_group_name=config_group_name)
def _get_plugin_instance(self, namespace, plugin_provider): try: # Try to resolve plugin by name mgr = driver.DriverManager(namespace, plugin_provider) plugin_class = mgr.driver except RuntimeError as e1: # fallback to class name try: plugin_class = importutils.import_class(plugin_provider) except ImportError as e2: LOG.exception(_LE("Error loading plugin by name, %s"), e1) LOG.exception(_LE("Error loading plugin by class, %s"), e2) raise ImportError(_("Plugin not found.")) return plugin_class()
def __init__(self, host, binary, topic, manager, report_interval=None, periodic_interval=None, periodic_fuzzy_delay=None, *args, **kwargs): self.binary = binary self.manager_class_name = manager manager_class = importutils.import_class(self.manager_class_name) self.manager = manager_class(host=host, *args, **kwargs) self.report_interval = report_interval self.periodic_interval = periodic_interval self.periodic_fuzzy_delay = periodic_fuzzy_delay self.saved_args, self.saved_kwargs = args, kwargs self.timers = [] super(Service, self).__init__(host, topic, manager=self.manager)
def new_task_executor(self, context): try: executor_cls = ('glance.async.%s_executor.' 'TaskExecutor' % CONF.task.task_executor) LOG.debug("Loading %s executor" % CONF.task.task_executor) executor = importutils.import_class(executor_cls) return executor(context, self.task_repo, self.image_repo, self.image_factory) except ImportError: with excutils.save_and_reraise_exception(): LOG.exception(_LE("Failed to load the %s executor provided " "in the config.") % CONF.task.task_executor)
def get_client_class(api_name, version, version_map): """Returns the client class for the requested API version :param api_name: the name of the API, e.g. 'compute', 'image', etc :param version: the requested API version :param version_map: a dict of client classes keyed by version :rtype: a client class for the requested API version """ try: client_path = version_map[str(version)] except (KeyError, ValueError): msg = "Invalid %s client version '%s'. must be one of: %s" % ( (api_name, version, ', '.join(version_map.keys()))) raise exceptions.UnsupportedVersion(msg) return importutils.import_class(client_path)
def __init__(self, host=None): super(DhcpAgent, self).__init__(host=host) self.needs_resync_reasons = collections.defaultdict(list) self.conf = cfg.CONF self.cache = NetworkCache() self.root_helper = config.get_root_helper(self.conf) self.dhcp_driver_cls = importutils.import_class(self.conf.dhcp_driver) ctx = context.get_admin_context_without_session() self.plugin_rpc = DhcpPluginApi(topics.PLUGIN, ctx, self.conf.use_namespaces) # create dhcp dir to store dhcp info dhcp_dir = os.path.dirname("/%s/dhcp/" % self.conf.state_path) if not os.path.isdir(dhcp_dir): os.makedirs(dhcp_dir, 0o755) self.dhcp_version = self.dhcp_driver_cls.check_version() self._populate_networks_cache()
def load_extension(self, ext_factory): """Execute an extension factory. Loads an extension. The 'ext_factory' is the name of a callable that will be imported and called with one argument--the extension manager. The factory callable is expected to call the register() method at least once. """ LOG.debug("Loading extension %s", ext_factory) # Load the factory factory = importutils.import_class(ext_factory) # Call it LOG.debug("Calling extension factory %s", ext_factory) factory(self)
def get_client_class(api_name, version, version_map): """Returns the client class for the requested API version. :param api_name: the name of the API, e.g. 'compute', 'image', etc :param version: the requested API version :param version_map: a dict of client classes keyed by version :rtype: a client class for the requested API version """ try: client_path = version_map[str(version)] except (KeyError, ValueError): msg = _("Invalid %(api_name)s client version '%(version)s'. must be " "one of: %(map_keys)s") msg = msg % {'api_name': api_name, 'version': version, 'map_keys': ', '.join(version_map.keys())} raise exceptions.UnsupportedVersion(msg) return importutils.import_class(client_path)
def configure_cache_region(region): """Configure a cache region. :param region: optional CacheRegion object, if not provided a new region will be instantiated :raises: exception.ValidationError :returns: dogpile.cache.CacheRegion """ if not isinstance(region, dogpile.cache.CacheRegion): raise exception.ValidationError( _('region not type dogpile.cache.CacheRegion')) if not region.is_configured: # NOTE(morganfainberg): this is how you tell if a region is configured. # There is a request logged with dogpile.cache upstream to make this # easier / less ugly. config_dict = build_cache_config() region.configure_from_config(config_dict, '%s.' % CONF.cache.config_prefix) if CONF.cache.debug_cache_backend: region.wrap(DebugProxy) # NOTE(morganfainberg): if the backend requests the use of a # key_mangler, we should respect that key_mangler function. If a # key_mangler is not defined by the backend, use the sha1_mangle_key # mangler provided by dogpile.cache. This ensures we always use a fixed # size cache-key. if region.key_mangler is None: region.key_mangler = util.sha1_mangle_key for class_path in CONF.cache.proxies: # NOTE(morganfainberg): if we have any proxy wrappers, we should # ensure they are added to the cache region's backend. Since # configure_from_config doesn't handle the wrap argument, we need # to manually add the Proxies. For information on how the # ProxyBackends work, see the dogpile.cache documents on # "changing-backend-behavior" cls = importutils.import_class(class_path) LOG.debug("Adding cache-proxy '%s' to backend.", class_path) region.wrap(cls) return region
def is_neutron(): global _IS_NEUTRON if _IS_NEUTRON is not None: return _IS_NEUTRON try: # compatibility with Folsom/Grizzly configs cls_name = CONF.network_api_class if cls_name == 'nova.network.quantumv2.api.API': cls_name = 'nova.network.neutronv2.api.API' from nova.network.neutronv2 import api as neutron_api _IS_NEUTRON = issubclass(importutils.import_class(cls_name), neutron_api.API) except ImportError: _IS_NEUTRON = False return _IS_NEUTRON