def _ssl_cert_req_type(self, req_type): try: import ssl except ImportError: raise exception.ConfigurationError(_('no ssl support available')) req_type = req_type.upper() try: return { 'NONE': ssl.CERT_NONE, 'OPTIONAL': ssl.CERT_OPTIONAL, 'REQUIRED': ssl.CERT_REQUIRED }[req_type] except KeyError: msg = _('Invalid ssl_cert_reqs value of %s, must be one of ' '"NONE", "OPTIONAL", "REQUIRED"') % req_type raise exception.ConfigurationError(msg)
def configure_cache_region(conf, region): """Configure a cache region. If the cache region is already configured, this function does nothing. Otherwise, the region is configured. :param conf: config object, must have had :func:`configure` called on it. :type conf: oslo_config.cfg.ConfigOpts :param region: Cache region to configure (see :func:`create_region`). :type region: dogpile.cache.region.CacheRegion :raises oslo_cache.exception.ConfigurationError: If the region parameter is not a dogpile.cache.CacheRegion. :returns: The region. :rtype: :class:`dogpile.cache.region.CacheRegion` """ if not isinstance(region, dogpile.cache.CacheRegion): raise exception.ConfigurationError( _('region not type dogpile.cache.CacheRegion')) if not region.is_configured: # NOTE(morganfainberg): this is how you tell if a region is configured. # There is a request logged with dogpile.cache upstream to make this # easier / less ugly. config_dict = _build_cache_config(conf) region.configure_from_config(config_dict, '%s.' % conf.cache.config_prefix) if conf.cache.debug_cache_backend: region.wrap(_DebugProxy) # NOTE(morganfainberg): if the backend requests the use of a # key_mangler, we should respect that key_mangler function. If a # key_mangler is not defined by the backend, use the sha1_mangle_key # mangler provided by dogpile.cache. This ensures we always use a fixed # size cache-key. if region.key_mangler is None: region.key_mangler = _sha1_mangle_key for class_path in conf.cache.proxies: # NOTE(morganfainberg): if we have any proxy wrappers, we should # ensure they are added to the cache region's backend. Since # configure_from_config doesn't handle the wrap argument, we need # to manually add the Proxies. For information on how the # ProxyBackends work, see the dogpile.cache documents on # "changing-backend-behavior" cls = importutils.import_class(class_path) _LOG.debug("Adding cache-proxy '%s' to backend.", class_path) region.wrap(cls) return region
def _init_args(self, arguments): """Helper logic for collecting and parsing MongoDB specific arguments. The arguments passed in are separated out in connection specific setting and rest of arguments are passed to create/update/delete db operations. """ self.conn_kwargs = {} # connection specific arguments self.hosts = arguments.pop('db_hosts', None) if self.hosts is None: msg = _('db_hosts value is required') raise exception.ConfigurationError(msg) self.db_name = arguments.pop('db_name', None) if self.db_name is None: msg = _('database db_name is required') raise exception.ConfigurationError(msg) self.cache_collection = arguments.pop('cache_collection', None) if self.cache_collection is None: msg = _('cache_collection name is required') raise exception.ConfigurationError(msg) self.username = arguments.pop('username', None) self.password = arguments.pop('password', None) self.max_pool_size = arguments.pop('max_pool_size', 10) self.w = arguments.pop('w', -1) try: self.w = int(self.w) except ValueError: msg = _('integer value expected for w (write concern attribute)') raise exception.ConfigurationError(msg) self.read_preference = arguments.pop('read_preference', None) self.use_replica = arguments.pop('use_replica', False) if self.use_replica: if arguments.get('replicaset_name') is None: msg = _('replicaset_name required when use_replica is True') raise exception.ConfigurationError(msg) self.replicaset_name = arguments.get('replicaset_name') self.son_manipulator = arguments.pop('son_manipulator', None) # set if mongo collection needs to be TTL type. # This needs to be max ttl for any cache entry. # By default, -1 means don't use TTL collection. # With ttl set, it creates related index and have doc_date field with # needed expiration interval self.ttl_seconds = arguments.pop('mongo_ttl_seconds', -1) try: self.ttl_seconds = int(self.ttl_seconds) except ValueError: msg = _('integer value expected for mongo_ttl_seconds') raise exception.ConfigurationError(msg) self.conn_kwargs['ssl'] = arguments.pop('ssl', False) if self.conn_kwargs['ssl']: ssl_keyfile = arguments.pop('ssl_keyfile', None) ssl_certfile = arguments.pop('ssl_certfile', None) ssl_ca_certs = arguments.pop('ssl_ca_certs', None) ssl_cert_reqs = arguments.pop('ssl_cert_reqs', None) if ssl_keyfile: self.conn_kwargs['ssl_keyfile'] = ssl_keyfile if ssl_certfile: self.conn_kwargs['ssl_certfile'] = ssl_certfile if ssl_ca_certs: self.conn_kwargs['ssl_ca_certs'] = ssl_ca_certs if ssl_cert_reqs: self.conn_kwargs['ssl_cert_reqs'] = ( self._ssl_cert_req_type(ssl_cert_reqs)) # rest of arguments are passed to mongo crud calls self.meth_kwargs = arguments
def _build_cache_config(conf): """Build the cache region dictionary configuration. :returns: dict """ prefix = conf.cache.config_prefix conf_dict = {} conf_dict['%s.backend' % prefix] = _opts._DEFAULT_BACKEND if conf.cache.enabled is True: conf_dict['%s.backend' % prefix] = conf.cache.backend conf_dict['%s.expiration_time' % prefix] = conf.cache.expiration_time for argument in conf.cache.backend_argument: try: (argname, argvalue) = argument.split(':', 1) except ValueError: msg = ('Unable to build cache config-key. Expected format ' '"<argname>:<value>". Skipping unknown format: %s') _LOG.error(msg, argument) continue arg_key = '.'.join([prefix, 'arguments', argname]) # NOTE(morgan): The handling of the URL data in memcache is bad and # only takes cases where the values are a list. This explicitly # checks for the base dogpile.cache.memcached backend and does the # split if needed. Other backends such as redis get the same # previous behavior. Overall the fact that the backends opaquely # take data and do not handle processing/validation as expected # directly makes for odd behaviors when wrapping dogpile.cache in # a library like oslo.cache if (conf.cache.backend in ('dogpile.cache.memcached', 'oslo_cache.memcache_pool') and argname == 'url'): argvalue = argvalue.split(',') conf_dict[arg_key] = argvalue _LOG.debug('Oslo Cache Config: %s', conf_dict) # NOTE(yorik-sar): these arguments will be used for memcache-related # backends. Use setdefault for url to support old-style setting through # backend_argument=url:127.0.0.1:11211 # # NOTE(morgan): If requested by config, 'flush_on_reconnect' will be set # for pooled connections. This can ensure that stale data is never # consumed from a server that pops in/out due to a network partition # or disconnect. # # See the help from python-memcached: # # param flush_on_reconnect: optional flag which prevents a # scenario that can cause stale data to be read: If there's more # than one memcached server and the connection to one is # interrupted, keys that mapped to that server will get # reassigned to another. If the first server comes back, those # keys will map to it again. If it still has its data, get()s # can read stale data that was overwritten on another # server. This flag is off by default for backwards # compatibility. # # The normal non-pooled clients connect explicitly on each use and # does not need the explicit flush_on_reconnect conf_dict.setdefault('%s.arguments.url' % prefix, conf.cache.memcache_servers) for arg in ('dead_retry', 'socket_timeout', 'pool_maxsize', 'pool_unused_timeout', 'pool_connection_get_timeout', 'pool_flush_on_reconnect'): value = getattr(conf.cache, 'memcache_' + arg) conf_dict['%s.arguments.%s' % (prefix, arg)] = value if conf.cache.tls_enabled: _LOG.debug('Oslo Cache TLS - CA: %s', conf.cache.tls_cafile) tls_context = ssl.create_default_context(cafile=conf.cache.tls_cafile) if conf.cache.tls_certfile is not None: _LOG.debug('Oslo Cache TLS - cert: %s', conf.cache.tls_certfile) _LOG.debug('Oslo Cache TLS - key: %s', conf.cache.tls_keyfile) tls_context.load_cert_chain( conf.cache.tls_certfile, conf.cache.tls_keyfile, ) if conf.cache.tls_allowed_ciphers is not None: _LOG.debug( 'Oslo Cache TLS - ciphers: %s', conf.cache.tls_allowed_ciphers, ) tls_context.set_ciphers(conf.cache.tls_allowed_ciphers) conf_dict['%s.arguments.tls_context' % prefix] = tls_context # NOTE(hberaud): Pymemcache support socket keepalive, If it is enable in # our config then configure it to enable this feature. # The socket keepalive feature means that pymemcache will be able to check # your connected socket and determine whether the connection is still up # and running or if it has broken. # This could be used by users who want to handle fine grained failures. if conf.cache.enable_socket_keepalive: if conf.cache.backend != 'dogpile.cache.pymemcache': msg = _("Socket keepalive is only supported by the " "'dogpile.cache.pymemcache' backend.") raise exception.ConfigurationError(msg) import pymemcache socket_keepalive = pymemcache.KeepaliveOpts( idle=conf.cache.socket_keepalive_idle, intvl=conf.cache.socket_keepalive_interval, cnt=conf.cache.socket_keepalive_count) # As with the TLS context above, the config dict below will be # consumed by dogpile.cache that will be used as a proxy between # oslo.cache and pymemcache. conf_dict['%s.arguments.socket_keepalive' % prefix] = socket_keepalive # NOTE(hberaud): The pymemcache library comes with retry mechanisms that # can be used to wrap all kind of pymemcache clients. The retry wrapper # allow you to define how many attempts to make and how long to wait # between attempts. The section below will pass our config # to dogpile.cache to setup the pymemcache retry client wrapper. if conf.cache.enable_retry_client: if conf.cache.backend != 'dogpile.cache.pymemcache': msg = _("Retry client is only supported by the " "'dogpile.cache.pymemcache' backend.") raise exception.ConfigurationError(msg) import pymemcache conf_dict['%s.arguments.enable_retry_client' % prefix] = True conf_dict['%s.arguments.retry_attempts' % prefix] = \ conf.cache.retry_attempts conf_dict['%s.arguments.retry_delay' % prefix] = \ conf.cache.retry_delay conf_dict['%s.arguments.hashclient_retry_attempts' % prefix] = \ conf.cache.hashclient_retry_attempts conf_dict['%s.arguments.hashclient_retry_delay' % prefix] = \ conf.cache.hashclient_retry_delay conf_dict['%s.arguments.dead_timeout' % prefix] = \ conf.cache.dead_timeout return conf_dict