def _sentinel_managed_pool(self, asynchronous=False): connparams = self._connparams(asynchronous) additional_params = connparams.copy() additional_params.pop("host", None) additional_params.pop("port", None) sentinels = [] for url in self.connection.client.alt: url = _parse_url(url) if url.scheme == "sentinel": sentinels.append((url.hostname, url.port)) # Fallback for when only one sentinel is provided. if not sentinels: sentinels.append((connparams["host"], connparams["port"])) sentinel_inst = sentinel.Sentinel( sentinels, min_other_sentinels=getattr(self, "min_other_sentinels", 0), sentinel_kwargs=getattr(self, "sentinel_kwargs", None), **additional_params) master_name = getattr(self, "master_name", None) return sentinel_inst.master_for( master_name, self.Client, ).connection_pool
def __init__(self, host=None, port=None, db=None, password=None, expires=None, max_connections=None, url=None, **kwargs): super(RedisBackend, self).__init__(**kwargs) conf = self.app.conf if self.redis is None: raise ImproperlyConfigured(REDIS_MISSING) # For compatibility with the old REDIS_* configuration keys. def _get(key): for prefix in 'CELERY_REDIS_{0}', 'REDIS_{0}': try: return conf[prefix.format(key)] except KeyError: pass if host and '://' in host: url, host = host, None self.url = url uhost = uport = upass = udb = None if url: _, uhost, uport, _, upass, udb, _ = _parse_url(url) udb = udb.strip('/') if udb else 0 self.host = uhost or host or _get('HOST') or self.host self.port = int(uport or port or _get('PORT') or self.port) self.db = udb or db or _get('DB') or self.db self.password = upass or password or _get('PASSWORD') or self.password self.expires = self.prepare_expires(expires, type=int) self.max_connections = (max_connections or _get('MAX_CONNECTIONS') or self.max_connections)
def __init__(self, url = None, *args, **kwargs): super(IronCacheBackend, self).__init__(*args, **kwargs) _, self._host, _, self._project_id, self._token, self._ncache, _ = _parse_url(url) if self._ncache == None: self._ncache = "Celery"
def _params_from_url(self, url, defaults): scheme, host, port, user, password, path, query = _parse_url(url) connparams = dict( defaults, **dictfilter({ 'host': host, 'port': port, 'password': password, 'db': query.pop('virtual_host', None) })) if scheme == 'socket': # use 'path' as path to the socket… in this case # the database number should be given in 'query' connparams.update({ 'connection_class': self.redis.UnixDomainSocketConnection, 'path': '/' + path, }) # host+port are invalid options when using this connection type. connparams.pop('host', None) connparams.pop('port', None) else: connparams['db'] = path # db may be string and start with / like in kombu. db = connparams.get('db') or 0 db = db.strip('/') if isinstance(db, string_t) else db connparams['db'] = int(db) # Query parameters override other parameters connparams.update(query) return connparams
def __init__(self, url=None, *args, **kwargs): """Initialize CouchDB backend instance. :raises celery.exceptions.ImproperlyConfigured: if module :mod:`pycouchdb` is not available. """ super(CouchBackend, self).__init__(*args, **kwargs) if pycouchdb is None: raise ImproperlyConfigured(ERR_LIB_MISSING) uscheme = uhost = uport = uname = upass = ucontainer = None if url: _, uhost, uport, uname, upass, ucontainer, _ = _parse_url( url) # noqa ucontainer = ucontainer.strip('/') if ucontainer else None self.scheme = uscheme or self.scheme self.host = uhost or self.host self.port = int(uport or self.port) self.container = ucontainer or self.container self.username = uname or self.username self.password = upass or self.password self._connection = None
def __init__(self, url=None, *args, **kwargs): super(ElasticsearchBackend, self).__init__(*args, **kwargs) self.url = url _get = self.app.conf.get if elasticsearch is None: raise ImproperlyConfigured(E_LIB_MISSING) index = doc_type = scheme = host = port = None if url: scheme, host, port, _, _, path, _ = _parse_url(url) # noqa if path: path = path.strip('/') index, _, doc_type = path.partition('/') self.index = index or self.index self.doc_type = doc_type or self.doc_type self.scheme = scheme or self.scheme self.host = host or self.host self.port = port or self.port self.es_retry_on_timeout = ( _get('elasticsearch_retry_on_timeout') or self.es_retry_on_timeout ) es_timeout = _get('elasticsearch_timeout') if es_timeout is not None: self.es_timeout = es_timeout es_max_retries = _get('elasticsearch_max_retries') if es_max_retries is not None: self.es_max_retries = es_max_retries self._server = None
def __init__(self, url=None, *args, **kwargs): """Initialize CouchBase backend instance. :raises celery.exceptions.ImproperlyConfigured: if module :mod:`couchbase` is not available. """ super(CouchBaseBackend, self).__init__(*args, **kwargs) if Couchbase is None: raise ImproperlyConfigured("You need to install the couchbase library to use the " "CouchBase backend.") uhost = uport = uname = upass = ubucket = None if url: _, uhost, uport, uname, upass, ubucket, _ = _parse_url(url) ubucket = ubucket.strip("/") if ubucket else None config = self.app.conf.get("CELERY_COUCHBASE_BACKEND_SETTINGS", None) if config is not None: if not isinstance(config, dict): raise ImproperlyConfigured("Couchbase backend settings should be grouped in a dict") else: config = {} self.host = uhost or config.get("host", self.host) self.port = int(uport or config.get("port", self.port)) self.bucket = ubucket or config.get("bucket", self.bucket) self.username = uname or config.get("username", self.username) self.password = upass or config.get("password", self.password) self._connection = None
def __init__(self, url=None, *args, **kwargs): super(CouchBaseBackend, self).__init__(*args, **kwargs) self.url = url self.expires = kwargs.get('expires') or maybe_timedelta( self.app.conf.CELERY_TASK_RESULT_EXPIRES) if Couchbase is None: raise ImproperlyConfigured( 'You need to install the couchbase library to use the ' 'CouchBase backend.', ) uhost = uport = uname = upass = ubucket = None if url: _, uhost, uport, uname, upass, ubucket, _ = _parse_url(url) ubucket = ubucket.strip('/') if ubucket else None config = self.app.conf.get('CELERY_COUCHBASE_BACKEND_SETTINGS', None) if config is not None: if not isinstance(config, dict): raise ImproperlyConfigured( 'Couchbase backend settings should be grouped in a dict', ) else: config = {} self.host = uhost or config.get('host', self.host) self.port = int(uport or config.get('port', self.port)) self.bucket = ubucket or config.get('bucket', self.bucket) self.username = uname or config.get('username', self.username) self.password = upass or config.get('password', self.password) self._connection = None
def _connparams(self): conninfo = self.connection.client connparams = {'host': conninfo.hostname or '127.0.0.1', 'port': conninfo.port or DEFAULT_PORT, 'virtual_host': conninfo.virtual_host, 'password': conninfo.password, 'max_connections': self.max_connections, 'socket_timeout': self.socket_timeout} host = connparams['host'] if '://' in host: scheme, _, _, _, password, path, query = _parse_url(host) if scheme == 'socket': connparams.update({ 'connection_class': redis.UnixDomainSocketConnection, 'path': '/' + path, 'password': password}, **query) connparams.pop('host', None) connparams.pop('port', None) connparams['db'] = self._prepare_virtual_host( connparams.pop('virtual_host', None)) channel = self connection_cls = ( connparams.get('connection_class') or redis.Connection ) class Connection(connection_cls): def disconnect(self): channel._on_connection_disconnect(self) super(Connection, self).disconnect() connparams['connection_class'] = Connection return connparams
def __init__(self, url=None, *args, **kwargs): """Initialize CouchDB backend instance. :raises celery.exceptions.ImproperlyConfigured: if module :mod:`pycouchdb` is not available. """ super(CouchBackend, self).__init__(*args, **kwargs) if pycouchdb is None: raise ImproperlyConfigured(ERR_LIB_MISSING) uscheme = uhost = uport = uname = upass = ucontainer = None if url: _, uhost, uport, uname, upass, ucontainer, _ = _parse_url(url) # noqa ucontainer = ucontainer.strip('/') if ucontainer else None self.scheme = uscheme or self.scheme self.host = uhost or self.host self.port = int(uport or self.port) self.container = ucontainer or self.container self.username = uname or self.username self.password = upass or self.password self._connection = None
def __init__(self, host=None, port=None, db=None, password=None, expires=None, max_connections=None, url=None, **kwargs): super(RedisBackend, self).__init__(**kwargs) conf = self.app.conf if self.redis is None: raise ImproperlyConfigured( "You need to install the redis library in order to use " + "the Redis result store backend.") # For compatibility with the old REDIS_* configuration keys. def _get(key): for prefix in "CELERY_REDIS_%s", "REDIS_%s": try: return conf[prefix % key] except KeyError: pass if host and '://' in host: url, host = host, None self.url = url uhost = uport = upass = udb = None if url: _, uhost, uport, _, upass, udb, _ = _parse_url(url) self.host = uhost or host or _get("HOST") or self.host self.port = int(uport or port or _get("PORT") or self.port) self.db = udb or db or _get("DB") or self.db self.password = upass or password or _get("PASSWORD") or self.password self.expires = self.prepare_expires(expires, type=int) self.max_connections = (max_connections or _get("MAX_CONNECTIONS") or self.max_connections)
def _params_from_url(self, url, defaults): scheme, host, port, _, password, path, query = _parse_url(url) connparams = dict( defaults, **dictfilter({ "host": host, "port": port, "password": password, "db": query.pop("virtual_host", None), })) if scheme == "socket": # use 'path' as path to the socket… in this case # the database number should be given in 'query' connparams.update({ "connection_class": self.redis.UnixDomainSocketConnection, "path": "/" + path, }) # host+port are invalid options when using this connection type. connparams.pop("host", None) connparams.pop("port", None) connparams.pop("socket_connect_timeout") else: connparams["db"] = path ssl_param_keys = [ "ssl_ca_certs", "ssl_certfile", "ssl_keyfile", "ssl_cert_reqs", ] if scheme == "redis": # If connparams or query string contain ssl params, raise error if any(key in connparams for key in ssl_param_keys) or any( key in query for key in ssl_param_keys): raise ValueError(E_REDIS_SSL_PARAMS_AND_SCHEME_MISMATCH) if scheme == "rediss": connparams["connection_class"] = redis.SSLConnection # The following parameters, if present in the URL, are encoded. We # must add the decoded values to connparams. for ssl_setting in ssl_param_keys: ssl_val = query.pop(ssl_setting, None) if ssl_val: connparams[ssl_setting] = unquote(ssl_val) # db may be string and start with / like in kombu. db = connparams.get("db") or 0 db = db.strip("/") if isinstance(db, string_t) else db connparams["db"] = int(db) for key, value in query.items(): if key in redis.connection.URL_QUERY_ARGUMENT_PARSERS: query[key] = redis.connection.URL_QUERY_ARGUMENT_PARSERS[key]( value) # Query parameters override other parameters connparams.update(query) return connparams
def __init__(self, host=None, port=None, bucket_name=None, protocol=None, url=None, *args, **kwargs): super(RiakBackend, self).__init__(*args, **kwargs) self.url = url if not riak: raise ImproperlyConfigured( 'You need to install the riak library to use the ' 'Riak backend.') uhost = uport = uname = upass = ubucket = None if url: uprot, uhost, uport, uname, upass, ubucket, _ = _parse_url(url) if ubucket: ubucket = ubucket.strip('/') config = self.app.conf.get('riak_backend_settings', None) if config is not None: if not isinstance(config, dict): raise ImproperlyConfigured( 'Riak backend settings should be grouped in a dict') else: config = {} self.host = uhost or config.get('host', self.host) self.port = int(uport or config.get('port', self.port)) self.bucket_name = ubucket or config.get('bucket', self.bucket_name) self.protocol = protocol or config.get('protocol', self.protocol) # riak bucket must be ascii letters or numbers only if not is_ascii(self.bucket_name): raise ValueError(E_BUCKET_NAME.format(self.bucket_name)) self._client = None
def __init__(self, url=None, *args, **kwargs): super(CouchbaseBackend, self).__init__(*args, **kwargs) self.url = url if Couchbase is None: raise ImproperlyConfigured( 'You need to install the couchbase library to use the ' 'Couchbase backend.', ) uhost = uport = uname = upass = ubucket = None if url: _, uhost, uport, uname, upass, ubucket, _ = _parse_url(url) ubucket = ubucket.strip('/') if ubucket else None config = self.app.conf.get('couchbase_backend_settings', None) if config is not None: if not isinstance(config, dict): raise ImproperlyConfigured( 'Couchbase backend settings should be grouped in a dict', ) else: config = {} self.host = uhost or config.get('host', self.host) self.port = int(uport or config.get('port', self.port)) self.bucket = ubucket or config.get('bucket', self.bucket) self.username = uname or config.get('username', self.username) self.password = upass or config.get('password', self.password) self._connection = None
def _sentinel_managed_pool(self, asynchronous=False): connparams = self._connparams(asynchronous) additional_params = connparams.copy() additional_params.pop('host', None) additional_params.pop('port', None) sentinels = [] for url in self.connection.client.alt: url = _parse_url(url) if url.scheme == 'sentinel': port = url.port or self.connection.default_port sentinels.append((url.hostname, port)) # Fallback for when only one sentinel is provided. if not sentinels: sentinels.append((connparams['host'], connparams['port'])) sentinel_inst = sentinel.Sentinel( sentinels, min_other_sentinels=getattr(self, 'min_other_sentinels', 0), sentinel_kwargs=getattr(self, 'sentinel_kwargs', None), **additional_params) master_name = getattr(self, 'master_name', None) if master_name is None: raise ValueError( "'master_name' transport option must be specified.") return sentinel_inst.master_for( master_name, self.Client, ).connection_pool
def __init__(self,url= None, *args, **kwargs): super(ElasticsearchBackend, self).__init__(url= url, *args, **kwargs) self.url = url _get = self.app.conf.get if url: _, host, port, username, password, path, _ = _parse_url(url) path = path.strip('/') index, _, doc_type = path.partition('/') self.username = username self.password = password self.index = index or self.index self.doc_type = doc_type or self.doc_type self.scheme = None self.host = host or self.host self.port = port or self.port self.es_retry_on_timeout = ( _get('elasticsearch_retry_on_timeout') or self.es_retry_on_timeout ) es_timeout = _get('elasticsearch_timeout') if es_timeout is not None: self.es_timeout = es_timeout es_max_retries = _get('elasticsearch_max_retries') if es_max_retries is not None: self.es_max_retries = es_max_retries self._server = None
def _connparams(self): conninfo = self.connection.client connparams = { "host": conninfo.hostname or "127.0.0.1", "port": conninfo.port or DEFAULT_PORT, "virtual_host": conninfo.virtual_host, "password": conninfo.password, "max_connections": self.max_connections, "socket_timeout": self.socket_timeout, } host = connparams["host"] if "://" in host: scheme, _, _, _, password, path, query = _parse_url(host) if scheme == "socket": connparams.update( {"connection_class": redis.UnixDomainSocketConnection, "path": "/" + path, "password": password}, **query ) connparams.pop("host", None) connparams.pop("port", None) connparams["db"] = self._prepare_virtual_host(connparams.pop("virtual_host", None)) channel = self connection_cls = connparams.get("connection_class") or redis.Connection class Connection(connection_cls): def disconnect(self): channel._on_connection_disconnect(self) super(Connection, self).disconnect() connparams["connection_class"] = Connection return connparams
def __init__(self, url=None, *args, **kwargs): """Initialize CouchBase backend instance. :raises celery.exceptions.ImproperlyConfigured: if module :mod:`couchbase` is not available. """ super(CouchBaseBackend, self).__init__(*args, **kwargs) if Couchbase is None: raise ImproperlyConfigured( 'You need to install the couchbase library to use the ' 'CouchBase backend.', ) uhost = uport = uname = upass = ubucket = None if url: _, uhost, uport, uname, upass, ubucket, _ = _parse_url(url) ubucket = ubucket.strip('/') if ubucket else None config = self.app.conf.get('CELERY_COUCHBASE_BACKEND_SETTINGS', None) if config is not None: if not isinstance(config, dict): raise ImproperlyConfigured( 'Couchbase backend settings should be grouped in a dict', ) else: config = {} self.host = uhost or config.get('host', self.host) self.port = int(uport or config.get('port', self.port)) self.bucket = ubucket or config.get('bucket', self.bucket) self.username = uname or config.get('username', self.username) self.password = upass or config.get('password', self.password) self._connection = None
def __init__(self, url=None, *args, **kwargs): super(ElasticsearchBackend, self).__init__(*args, **kwargs) self.url = url _get = self.app.conf.get if elasticsearch is None: raise ImproperlyConfigured(E_LIB_MISSING) index = doc_type = scheme = host = port = None if url: scheme, host, port, _, _, path, _ = _parse_url(url) # noqa if path: path = path.strip('/') index, _, doc_type = path.partition('/') self.index = index or self.index self.doc_type = doc_type or self.doc_type self.scheme = scheme or self.scheme self.host = host or self.host self.port = port or self.port self.es_retry_on_timeout = (_get('elasticsearch_retry_on_timeout') or self.es_retry_on_timeout) es_timeout = _get('elasticsearch_timeout') if es_timeout is not None: self.es_timeout = es_timeout es_max_retries = _get('elasticsearch_max_retries') if es_max_retries is not None: self.es_max_retries = es_max_retries self._server = None
def _params_from_url(self, url, defaults): scheme, host, port, user, password, path, query = _parse_url(url) connparams = dict( defaults, **dictfilter({ 'host': host, 'port': port, 'password': password, 'db': query.pop('virtual_host', None)}) ) if scheme == 'socket': # use 'path' as path to the socket… in this case # the database number should be given in 'query' connparams.update({ 'connection_class': self.redis.UnixDomainSocketConnection, 'path': '/' + path, }) # host+port are invalid options when using this connection type. connparams.pop('host', None) connparams.pop('port', None) else: connparams['db'] = path # db may be string and start with / like in kombu. db = connparams.get('db') or 0 db = db.strip('/') if isinstance(db, string_t) else db connparams['db'] = int(db) # Query parameters override other parameters connparams.update(query) return connparams
def __init__(self, url=None, *args, **kwargs): kwargs.setdefault('expires_type', int) super(CouchbaseBackend, self).__init__(*args, **kwargs) self.url = url if Couchbase is None: raise ImproperlyConfigured( 'You need to install the couchbase library to use the ' 'Couchbase backend.', ) uhost = uport = uname = upass = ubucket = None if url: _, uhost, uport, uname, upass, ubucket, _ = _parse_url(url) ubucket = ubucket.strip('/') if ubucket else None config = self.app.conf.get('couchbase_backend_settings', None) if config is not None: if not isinstance(config, dict): raise ImproperlyConfigured( 'Couchbase backend settings should be grouped in a dict', ) else: config = {} self.host = uhost or config.get('host', self.host) self.port = int(uport or config.get('port', self.port)) self.bucket = ubucket or config.get('bucket', self.bucket) self.username = uname or config.get('username', self.username) self.password = upass or config.get('password', self.password) self._connection = None
def __init__(self, url=None, *args, **kwargs): kwargs.setdefault("expires_type", int) super(CouchbaseBackend, self).__init__(*args, **kwargs) self.url = url if Couchbase is None: raise ImproperlyConfigured( "You need to install the couchbase library to use the " "Couchbase backend.", ) uhost = uport = uname = upass = ubucket = None if url: _, uhost, uport, uname, upass, ubucket, _ = _parse_url(url) ubucket = ubucket.strip("/") if ubucket else None config = self.app.conf.get("couchbase_backend_settings", None) if config is not None: if not isinstance(config, dict): raise ImproperlyConfigured( "Couchbase backend settings should be grouped in a dict", ) else: config = {} self.host = uhost or config.get("host", self.host) self.port = int(uport or config.get("port", self.port)) self.bucket = ubucket or config.get("bucket", self.bucket) self.username = uname or config.get("username", self.username) self.password = upass or config.get("password", self.password) self._connection = None
def __init__(self, host=None, port=None, bucket_name=None, protocol=None, url=None, *args, **kwargs): super(RiakBackend, self).__init__(*args, **kwargs) self.url = url if not riak: raise ImproperlyConfigured( 'You need to install the riak library to use the ' 'Riak backend.') uhost = uport = upass = ubucket = None if url: _, uhost, uport, _, upass, ubucket, _ = _parse_url(url) if ubucket: ubucket = ubucket.strip('/') config = self.app.conf.get('riak_backend_settings', None) if config is not None: if not isinstance(config, dict): raise ImproperlyConfigured( 'Riak backend settings should be grouped in a dict') else: config = {} self.host = uhost or config.get('host', self.host) self.port = int(uport or config.get('port', self.port)) self.bucket_name = ubucket or config.get('bucket', self.bucket_name) self.protocol = protocol or config.get('protocol', self.protocol) # riak bucket must be ascii letters or numbers only if not is_ascii(self.bucket_name): raise ValueError(E_BUCKET_NAME.format(self.bucket_name)) self._client = None
def _params_from_url(self, url, defaults): scheme, host, port, username, password, path, query = _parse_url(url) connparams = dict( defaults, **dictfilter({ 'host': host, 'port': port, 'username': username, 'password': password, 'db': query.pop('virtual_host', None) })) if scheme == 'socket': # use 'path' as path to the socket… in this case # the database number should be given in 'query' connparams.update({ 'connection_class': self.redis.UnixDomainSocketConnection, 'path': '/' + path, }) # host+port are invalid options when using this connection type. connparams.pop('host', None) connparams.pop('port', None) connparams.pop('socket_connect_timeout') else: connparams['db'] = path ssl_param_keys = [ 'ssl_ca_certs', 'ssl_certfile', 'ssl_keyfile', 'ssl_cert_reqs' ] if scheme == 'redis': # If connparams or query string contain ssl params, raise error if (any(key in connparams for key in ssl_param_keys) or any(key in query for key in ssl_param_keys)): raise ValueError(E_REDIS_SSL_PARAMS_AND_SCHEME_MISMATCH) if scheme == 'rediss': connparams['connection_class'] = redis.SSLConnection # The following parameters, if present in the URL, are encoded. We # must add the decoded values to connparams. for ssl_setting in ssl_param_keys: ssl_val = query.pop(ssl_setting, None) if ssl_val: connparams[ssl_setting] = unquote(ssl_val) # db may be string and start with / like in kombu. db = connparams.get('db') or 0 db = db.strip('/') if isinstance(db, str) else db connparams['db'] = int(db) for key, value in query.items(): if key in redis.connection.URL_QUERY_ARGUMENT_PARSERS: query[key] = redis.connection.URL_QUERY_ARGUMENT_PARSERS[key]( value) # Query parameters override other parameters connparams.update(query) return connparams
def _params_from_url(self, url, defaults): scheme, host, port, _, password, path, query = _parse_url(url) connparams = dict( defaults, **dictfilter({ 'host': host, 'port': port, 'password': password, 'db': query.pop('virtual_host', None) })) if scheme == 'socket': # use 'path' as path to the socket… in this case # the database number should be given in 'query' connparams.update({ 'connection_class': self.redis.UnixDomainSocketConnection, 'path': '/' + path, }) # host+port are invalid options when using this connection type. connparams.pop('host', None) connparams.pop('port', None) connparams.pop('socket_connect_timeout') else: connparams['db'] = path if scheme == 'rediss': connparams['connection_class'] = redis.SSLConnection # The following parameters, if present in the URL, are encoded. We # must add the decoded values to connparams. for ssl_setting in ['ssl_ca_certs', 'ssl_certfile', 'ssl_keyfile']: ssl_val = query.pop(ssl_setting, None) if ssl_val: connparams[ssl_setting] = unquote(ssl_val) ssl_cert_reqs = query.pop('ssl_cert_reqs', 'MISSING') if ssl_cert_reqs == 'CERT_REQUIRED': connparams['ssl_cert_reqs'] = CERT_REQUIRED elif ssl_cert_reqs == 'CERT_OPTIONAL': logger.warn(W_REDIS_SSL_CERT_OPTIONAL) connparams['ssl_cert_reqs'] = CERT_OPTIONAL elif ssl_cert_reqs == 'CERT_NONE': logger.warn(W_REDIS_SSL_CERT_NONE) connparams['ssl_cert_reqs'] = CERT_NONE else: raise ValueError(E_REDIS_SSL_CERT_REQS_MISSING) # db may be string and start with / like in kombu. db = connparams.get('db') or 0 db = db.strip('/') if isinstance(db, string_t) else db connparams['db'] = int(db) for key, value in query.items(): if key in redis.connection.URL_QUERY_ARGUMENT_PARSERS: query[key] = redis.connection.URL_QUERY_ARGUMENT_PARSERS[key]( value) # Query parameters override other parameters connparams.update(query) return connparams
def _connparams(self, asynchronous=False): conninfo = self.connection.client connparams = { 'host': conninfo.hostname or '127.0.0.1', 'port': conninfo.port or self.connection.default_port, 'virtual_host': conninfo.virtual_host, 'password': conninfo.password, 'max_connections': self.max_connections, 'socket_timeout': self.socket_timeout, 'socket_connect_timeout': self.socket_connect_timeout, 'socket_keepalive': self.socket_keepalive, 'socket_keepalive_options': self.socket_keepalive_options, } if conninfo.ssl: # Connection(ssl={}) must be a dict containing the keys: # 'ssl_cert_reqs', 'ssl_ca_certs', 'ssl_certfile', 'ssl_keyfile' try: connparams.update(conninfo.ssl) connparams['connection_class'] = redis.SSLConnection except TypeError: pass host = connparams['host'] if '://' in host: scheme, _, _, _, password, path, query = _parse_url(host) if scheme == 'socket': connparams = self._filter_tcp_connparams(**connparams) connparams.update( { 'connection_class': redis.UnixDomainSocketConnection, 'path': '/' + path }, **query) connparams.pop('socket_connect_timeout', None) connparams.pop('socket_keepalive', None) connparams.pop('socket_keepalive_options', None) connparams['password'] = password connparams.pop('host', None) connparams.pop('port', None) connparams['db'] = self._prepare_virtual_host( connparams.pop('virtual_host', None)) channel = self connection_cls = (connparams.get('connection_class') or self.connection_class) if asynchronous: class Connection(connection_cls): def disconnect(self): super(Connection, self).disconnect() channel._on_connection_disconnect(self) connection_cls = Connection connparams['connection_class'] = connection_cls return connparams
def _params_from_url(self, url, defaults): scheme, host, port, _, password, path, query = _parse_url(url) connparams = dict( defaults, **dictfilter({ 'host': host, 'port': port, 'password': password, 'db': query.pop('virtual_host', None)}) ) if scheme == 'socket': # use 'path' as path to the socket… in this case # the database number should be given in 'query' connparams.update({ 'connection_class': self.redis.UnixDomainSocketConnection, 'path': '/' + path, }) # host+port are invalid options when using this connection type. connparams.pop('host', None) connparams.pop('port', None) connparams.pop('socket_connect_timeout') else: connparams['db'] = path if scheme == 'rediss': connparams['connection_class'] = redis.SSLConnection # The following parameters, if present in the URL, are encoded. We # must add the decoded values to connparams. for ssl_setting in ['ssl_ca_certs', 'ssl_certfile', 'ssl_keyfile']: ssl_val = query.pop(ssl_setting, None) if ssl_val: connparams[ssl_setting] = unquote(ssl_val) ssl_cert_reqs = query.pop('ssl_cert_reqs', 'MISSING') if ssl_cert_reqs == 'CERT_REQUIRED': connparams['ssl_cert_reqs'] = CERT_REQUIRED elif ssl_cert_reqs == 'CERT_OPTIONAL': logger.warn(W_REDIS_SSL_CERT_OPTIONAL) connparams['ssl_cert_reqs'] = CERT_OPTIONAL elif ssl_cert_reqs == 'CERT_NONE': logger.warn(W_REDIS_SSL_CERT_NONE) connparams['ssl_cert_reqs'] = CERT_NONE else: raise ValueError(E_REDIS_SSL_CERT_REQS_MISSING) # db may be string and start with / like in kombu. db = connparams.get('db') or 0 db = db.strip('/') if isinstance(db, string_t) else db connparams['db'] = int(db) if redis: for key, value in query.items(): if key in redis.connection.URL_QUERY_ARGUMENT_PARSERS: query[key] = redis.connection.URL_QUERY_ARGUMENT_PARSERS[key]( value ) # Query parameters override other parameters connparams.update(query) return connparams
def get_backend_by_url(backend=None, loader=None): url = None if backend and '://' in backend: url = backend if '+' in url[:url.index('://')]: backend, url = url.split('+', 1) else: backend, _, _, _, _, _, _ = _parse_url(url) return get_backend_cls(backend, loader), url
def get_backend_by_url(backend=None, loader=None): url = None if backend and "://" in backend: url = backend if "+" in url[: url.index("://")]: backend, url = url.split("+", 1) else: backend, _, _, _, _, _, _ = _parse_url(url) return get_backend_cls(backend, loader), url
def _connparams(self, asynchronous=False): conninfo = self.connection.client connparams = { 'host': conninfo.hostname or '127.0.0.1', 'port': conninfo.port or self.connection.default_port, 'virtual_host': conninfo.virtual_host, 'password': conninfo.password, 'max_connections': self.max_connections, 'socket_timeout': self.socket_timeout, 'socket_connect_timeout': self.socket_connect_timeout, 'socket_keepalive': self.socket_keepalive, 'socket_keepalive_options': self.socket_keepalive_options, } if conninfo.ssl: # Connection(ssl={}) must be a dict containing the keys: # 'ssl_cert_reqs', 'ssl_ca_certs', 'ssl_certfile', 'ssl_keyfile' try: connparams.update(conninfo.ssl) connparams['connection_class'] = redis.SSLConnection except TypeError: pass host = connparams['host'] if '://' in host: scheme, _, _, _, password, path, query = _parse_url(host) if scheme == 'socket': connparams = self._filter_tcp_connparams(**connparams) connparams.update({ 'connection_class': redis.UnixDomainSocketConnection, 'path': '/' + path}, **query) connparams.pop('socket_connect_timeout', None) connparams.pop('socket_keepalive', None) connparams.pop('socket_keepalive_options', None) connparams['password'] = password connparams.pop('host', None) connparams.pop('port', None) connparams['db'] = self._prepare_virtual_host( connparams.pop('virtual_host', None)) channel = self connection_cls = ( connparams.get('connection_class') or self.connection_class ) if asynchronous: class Connection(connection_cls): def disconnect(self): super(Connection, self).disconnect() channel._on_connection_disconnect(self) connection_cls = Connection connparams['connection_class'] = connection_cls return connparams
def _connparams(self, asynchronous=False, _r210_options=( 'socket_connect_timeout', 'socket_keepalive', 'socket_keepalive_options')): conninfo = self.connection.client connparams = { 'host': conninfo.hostname or '127.0.0.1', 'port': conninfo.port or DEFAULT_PORT, 'virtual_host': conninfo.virtual_host, 'password': conninfo.password, 'max_connections': self.max_connections, 'socket_timeout': self.socket_timeout, 'socket_connect_timeout': self.socket_connect_timeout, 'socket_keepalive': self.socket_keepalive, 'socket_keepalive_options': self.socket_keepalive_options, } if redis.VERSION < (2, 10): for param in _r210_options: val = connparams.pop(param, None) if val is not None: raise VersionMismatch( 'redis: {0!r} requires redis 2.10.0 or higher'.format( param)) host = connparams['host'] if '://' in host: scheme, _, _, _, password, path, query = _parse_url(host) if scheme == 'socket': connparams = self._filter_tcp_connparams(**connparams) connparams.update({ 'connection_class': redis.UnixDomainSocketConnection, 'path': '/' + path, 'password': password}, **query) connparams.pop('socket_connect_timeout', None) connparams.pop('socket_keepalive', None) connparams.pop('socket_keepalive_options', None) connparams.pop('host', None) connparams.pop('port', None) connparams['db'] = self._prepare_virtual_host( connparams.pop('virtual_host', None)) channel = self connection_cls = ( connparams.get('connection_class') or redis.Connection ) if asynchronous: class Connection(connection_cls): def disconnect(self): super(Connection, self).disconnect() channel._on_connection_disconnect(self) connparams['connection_class'] = Connection return connparams
def _parse_url(cls, url): _, host, port, _, password, _, _ = _parse_url(url) if not host or not password: raise ImproperlyConfigured("Invalid URL") if not port: port = 443 scheme = "https" if port == 443 else "http" endpoint = "%s://%s:%s" % (scheme, host, port) return endpoint, password
def _parse_hosts(self, hostname): """ hostname: redis-cluster://username:[email protected]:30001/0?alts=127.0.0.1:30002' """ _, host, port, _, password, path, query = _parse_url(hostname) hosts = [{ 'host': host or '127.0.0.1', 'port': port or DEFAULT_PORT, }] if query.get('alts'): alts = query['alts'].split(',') for url in alts: _, host, port, _, _, _, _ = _parse_url('://' + url) hosts.append({ 'host': host or '127.0.0.1', 'port': port or DEFAULT_PORT, }) return hosts, password, path
def __init__(self, url=None, *args, **kwargs): """Parse the url or load the settings from settings object.""" super(ArangoDbBackend, self).__init__(*args, **kwargs) if py_arango_connection is None: raise ImproperlyConfigured( "You need to install the pyArango library to use the " "ArangoDb backend.", ) self.url = url if url is None: host = port = database = collection = username = password = None else: ( _schema, host, port, username, password, database_collection, _query, ) = _parse_url(url) if database_collection is None: database = collection = None else: database, collection = database_collection.split("/") config = self.app.conf.get("arangodb_backend_settings", None) if config is not None: if not isinstance(config, dict): raise ImproperlyConfigured( "ArangoDb backend settings should be grouped in a dict", ) else: config = {} self.host = host or config.get("host", self.host) self.port = int(port or config.get("port", self.port)) self.http_protocol = config.get("http_protocol", self.http_protocol) self.database = database or config.get("database", self.database) self.collection = collection or config.get("collection", self.collection) self.username = username or config.get("username", self.username) self.password = password or config.get("password", self.password) self.arangodb_url = "{http_protocol}://{host}:{port}".format( http_protocol=self.http_protocol, host=self.host, port=self.port) self._connection = None
def __init__(self, url=None, *args, **kwargs): """Parse the url or load the settings from settings object.""" super(ArangoDbBackend, self).__init__(*args, **kwargs) if py_arango_connection is None: raise ImproperlyConfigured( 'You need to install the pyArango library to use the ' 'ArangoDb backend.', ) self.url = url if url is None: host = port = database = collection = username = password = None else: ( _schema, host, port, username, password, database_collection, _query ) = _parse_url(url) if database_collection is None: database = collection = None else: database, collection = database_collection.split('/') config = self.app.conf.get('arangodb_backend_settings', None) if config is not None: if not isinstance(config, dict): raise ImproperlyConfigured( 'ArangoDb backend settings should be grouped in a dict', ) else: config = {} self.host = host or config.get('host', self.host) self.port = int(port or config.get('port', self.port)) self.http_protocol = config.get('http_protocol', self.http_protocol) self.database = database or config.get('database', self.database) self.collection = \ collection or config.get('collection', self.collection) self.username = username or config.get('username', self.username) self.password = password or config.get('password', self.password) self.arangodb_url = "{http_protocol}://{host}:{port}".format( http_protocol=self.http_protocol, host=self.host, port=self.port ) self._connection = None
def __init__(self, host=None, port=None, bucket_name=None, protocol=None, url=None, *args, **kwargs): """Initialize Riak backend instance. :raises celery.exceptions.ImproperlyConfigured: if module :mod:`riak` is not available. """ super(RiakBackend, self).__init__(*args, **kwargs) if not riak: raise ImproperlyConfigured( 'You need to install the riak library to use the ' 'Riak backend.') uhost = uport = uname = upass = ubucket = None if url: uprot, uhost, uport, uname, upass, ubucket, _ = _parse_url(url) if ubucket: ubucket = ubucket.strip('/') config = self.app.conf.get('CELERY_RIAK_BACKEND_SETTINGS', None) if config is not None: if not isinstance(config, dict): raise ImproperlyConfigured( 'Riak backend settings should be grouped in a dict') else: config = {} self.host = uhost or config.get('host', self.host) self.port = int(uport or config.get('port', self.port)) self.bucket_name = ubucket or config.get('bucket', self.bucket_name) self.protocol = protocol or config.get('protocol', self.protocol) # riak bucket must be ascii letters or numbers only if not is_ascii(self.bucket_name): raise ValueError(E_BUCKET_NAME.format(self.bucket_name)) self._client = None
def _connparams(self): conninfo = self.connection.client connparams = { "host": conninfo.hostname or "127.0.0.1", "port": conninfo.port or DEFAULT_PORT, "virtual_host": conninfo.virtual_host, "password": conninfo.password, "max_connections": self.max_connections, "socket_timeout": self.socket_timeout, } host = connparams["host"] if "://" in host: scheme, _, _, _, _, path, query = _parse_url(host) if scheme == "socket": connparams.update({"connection_class": redis.UnixDomainSocketConnection, "path": "/" + path}, **query) connparams.pop("host", None) connparams.pop("port", None) connparams["db"] = self._prepare_virtual_host(connparams.pop("virtual_host", None)) return connparams
def _connparams(self): conninfo = self.connection.client connparams = {'host': conninfo.hostname or '127.0.0.1', 'port': conninfo.port or DEFAULT_PORT, 'virtual_host': conninfo.virtual_host, 'password': conninfo.password, 'max_connections': self.max_connections, 'socket_timeout': self.socket_timeout} host = connparams['host'] if '://' in host: scheme, _, _, _, _, path, query = _parse_url(host) if scheme == 'socket': connparams.update({ 'connection_class': redis.UnixDomainSocketConnection, 'path': '/' + path}, **query) connparams.pop('host', None) connparams.pop('port', None) connparams['db'] = self._prepare_virtual_host( connparams.pop('virtual_host', None)) return connparams
def __init__(self, url=None, *args, **kwargs): super(CouchBackend, self).__init__(*args, **kwargs) self.url = url if pycouchdb is None: raise ImproperlyConfigured(ERR_LIB_MISSING) uscheme = uhost = uport = uname = upass = ucontainer = None if url: _, uhost, uport, uname, upass, ucontainer, _ = _parse_url(url) # noqa ucontainer = ucontainer.strip('/') if ucontainer else None self.scheme = uscheme or self.scheme self.host = uhost or self.host self.port = int(uport or self.port) self.container = ucontainer or self.container self.username = uname or self.username self.password = upass or self.password self._connection = None
def __init__(self, host=None, port=None, bucket_name=None, protocol=None, url=None, *args, **kwargs): """Initialize Riak backend instance. :raises celery.exceptions.ImproperlyConfigured: if module :mod:`riak` is not available. """ super(RiakBackend, self).__init__(*args, **kwargs) self.expires = kwargs.get('expires') or maybe_timedelta( self.app.conf.CELERY_TASK_RESULT_EXPIRES) if not riak: raise ImproperlyConfigured( 'You need to install the riak library to use the ' 'Riak backend.') uhost = uport = uname = upass = ubucket = None if url: uprot, uhost, uport, uname, upass, ubucket, _ = _parse_url(url) if ubucket: ubucket = ubucket.strip('/') config = self.app.conf.get('CELERY_RIAK_BACKEND_SETTINGS', None) if config is not None: if not isinstance(config, dict): raise ImproperlyConfigured( 'Riak backend settings should be grouped in a dict') else: config = {} self.host = uhost or config.get('host', self.host) self.port = int(uport or config.get('port', self.port)) self.bucket_name = ubucket or config.get('bucket', self.bucket_name) self.protocol = protocol or config.get('protocol', self.protocol) # riak bucket must be ascii letters or numbers only if not is_ascii(self.bucket_name): raise ValueError(E_BUCKET_NAME.format(self.bucket_name)) self._client = None
def __init__(self, url=None, *args, **kwargs): """Parse the url or load the settings from settings object.""" super().__init__(*args, **kwargs) if py_arango_connection is None: raise ImproperlyConfigured( 'You need to install the pyArango library to use the ' 'ArangoDb backend.', ) self.url = url if url is None: host = port = database = collection = username = password = None else: (_schema, host, port, username, password, database_collection, _query) = _parse_url(url) if database_collection is None: database = collection = None else: database, collection = database_collection.split('/') config = self.app.conf.get('arangodb_backend_settings', None) if config is not None: if not isinstance(config, dict): raise ImproperlyConfigured( 'ArangoDb backend settings should be grouped in a dict', ) else: config = {} self.host = host or config.get('host', self.host) self.port = int(port or config.get('port', self.port)) self.http_protocol = config.get('http_protocol', self.http_protocol) self.verify = config.get('verify', self.verify) self.database = database or config.get('database', self.database) self.collection = \ collection or config.get('collection', self.collection) self.username = username or config.get('username', self.username) self.password = password or config.get('password', self.password) self.arangodb_url = "{http_protocol}://{host}:{port}".format( http_protocol=self.http_protocol, host=self.host, port=self.port) self._connection = None
def __init__(self, url=None, *args, **kwargs): super(ElasticsearchBackend, self).__init__(*args, **kwargs) if elasticsearch is None: raise ImproperlyConfigured(E_LIB_MISSING) index = doc_type = scheme = host = port = None if url: scheme, host, port, _, _, path, _ = _parse_url(url) # noqa if path: path = path.strip('/') index, _, doc_type = path.partition('/') self.index = index or self.index self.doc_type = doc_type or self.doc_type self.scheme = scheme or self.scheme self.host = host or self.host self.port = port or self.port self._server = None
def __init__(self, url=None, *args, **kwargs): super(ElasticsearchBackend, self).__init__(*args, **kwargs) self.url = url _get = self.app.conf.get if elasticsearch is None: raise ImproperlyConfigured(E_LIB_MISSING) index = doc_type = scheme = host = port = username = password = None if url: scheme, host, port, username, password, path, _ = _parse_url(url) # noqa if scheme == "elasticsearch": scheme = None if path: path = path.strip("/") index, _, doc_type = path.partition("/") self.index = index or self.index self.doc_type = doc_type or self.doc_type self.scheme = scheme or self.scheme self.host = host or self.host self.port = port or self.port self.username = username or self.username self.password = password or self.password self.es_retry_on_timeout = ( _get("elasticsearch_retry_on_timeout") or self.es_retry_on_timeout ) es_timeout = _get("elasticsearch_timeout") if es_timeout is not None: self.es_timeout = es_timeout es_max_retries = _get("elasticsearch_max_retries") if es_max_retries is not None: self.es_max_retries = es_max_retries self.es_save_meta_as_text = _get("elasticsearch_save_meta_as_text", True) self._server = None
def __init__(self, url=None, *args, **kwargs): """Initialize CouchDB backend instance. :raises celery.exceptions.ImproperlyConfigured: if module :mod:`pycouchdb` is not available. """ super(CouchDBBackend, self).__init__(*args, **kwargs) self.expires = kwargs.get('expires') or maybe_timedelta( self.app.conf.CELERY_TASK_RESULT_EXPIRES) if pycouchdb is None: raise ImproperlyConfigured( 'You need to install the pycouchdb library to use the ' 'CouchDB backend.', ) uscheme = uhost = uport = uname = upass = ucontainer = None if url: _, uhost, uport, uname, upass, ucontainer , _ = _parse_url(url) # noqa ucontainer = ucontainer.strip('/') if ucontainer else None config = self.app.conf.get('CELERY_COUCHDB_BACKEND_SETTINGS', None) if config is not None: if not isinstance(config, dict): raise ImproperlyConfigured( 'CouchDB backend settings should be grouped in a dict', ) else: config = {} self.scheme = uscheme or config.get('scheme', self.scheme) self.host = uhost or config.get('host', self.host) self.port = int(uport or config.get('port', self.port)) self.container = ucontainer or config.get('container', self.container) self.username = uname or config.get('username', self.username) self.password = upass or config.get('password', self.password) self._connection = None
def __init__(self, url=None, *args, **kwargs): super(ElasticsearchBackend, self).__init__(*args, **kwargs) self.url = url if elasticsearch is None: raise ImproperlyConfigured(E_LIB_MISSING) index = doc_type = scheme = host = port = None if url: scheme, host, port, _, _, path, _ = _parse_url(url) # noqa if path: path = path.strip('/') index, _, doc_type = path.partition('/') self.index = index or self.index self.doc_type = doc_type or self.doc_type self.scheme = scheme or self.scheme self.host = host or self.host self.port = port or self.port self._server = None
def __init__(self, host=None, port=None, db=None, password=None, expires=None, max_connections=None, url=None, **kwargs): super(RedisBackend, self).__init__(**kwargs) conf = self.app.conf if self.redis is None: raise ImproperlyConfigured( 'You need to install the redis library in order to use ' + 'the Redis result store backend.') # For compatibility with the old REDIS_* configuration keys. def _get(key): for prefix in 'CELERY_REDIS_{0}', 'REDIS_{0}': try: return conf[prefix.format(key)] except KeyError: pass if host and '://' in host: url, host = host, None self.url = url uhost = uport = upass = udb = None if url: _, uhost, uport, _, upass, udb, _ = _parse_url(url) udb = udb.strip('/') if udb else 0 self.host = uhost or host or _get('HOST') or self.host self.port = int(uport or port or _get('PORT') or self.port) self.db = udb or db or _get('DB') or self.db self.password = upass or password or _get('PASSWORD') or self.password self.expires = self.prepare_expires(expires, type=int) self.max_connections = (max_connections or _get('MAX_CONNECTIONS') or self.max_connections)
def __init__(self, url=None, *args, **kwargs): """Initialize CouchBase backend instance. :raises celery.exceptions.ImproperlyConfigured: if module :mod:`couchbase` is not available. """ super(CouchBaseBackend, self).__init__(*args, **kwargs) if Couchbase is None: raise ImproperlyConfigured( 'You need to install the couchbase library to use the ' 'CouchBase backend.', ) uhost = uport = uname = upass = ubucket = None if url: _, uhost, uport, uname, upass, ubucket, _ = _parse_url(url) ubucket = ubucket.strip('/') if ubucket else None config = self.app.conf.get('couchbase_backend_settings', None) if config is not None: if not isinstance(config, dict): raise ImproperlyConfigured( 'Couchbase backend settings should be grouped in a dict', ) else: config = {} self.host = uhost or config.get('host', self.host) self.port = int(uport or config.get('port', self.port)) self.bucket = ubucket or config.get('bucket', self.bucket) self.username = uname or config.get('username', self.username) self.password = upass or config.get('password', self.password) self._connection = None
def _params_from_url(self, url, defaults): scheme, host, port, user, password, path, query = _parse_url(url) connparams = dict( defaults, host=host, port=port, user=user, password=password, db=int(query.pop('virtual_host', None) or 0), ) if scheme == 'socket': # Use 'path' as path to the socket... in this case # the database number should be given in 'query' connparams.update({ 'connection_class': self.redis.UnixDomainSocketConnection, 'path': '/' + path, }) connparams.pop('host', None) connparams.pop('port', None) else: path = path.strip('/') if isinstance(path, string_t) else path if path: connparams['db'] = int(path) # Query parameters override other parameters connparams.update(query) return connparams
'max_connections': self.max_connections, 'socket_timeout': self.socket_timeout, 'socket_connect_timeout': self.socket_connect_timeout, 'socket_keepalive': self.socket_keepalive, 'socket_keepalive_options': self.socket_keepalive_options, } if redis.VERSION < (2, 10): for param in _r210_options: val = connparams.pop(param, None) if val is not None: raise VersionMismatch( 'redis: {0!r} requires redis 2.10.0 or higher'.format( param)) host = connparams['host'] if '://' in host: scheme, _, _, _, password, path, query = _parse_url(host) if scheme == 'socket': connparams = self._filter_tcp_connparams(**connparams) connparams.update({ 'connection_class': redis.UnixDomainSocketConnection, 'path': '/' + path, 'password': password}, **query) connparams.pop('socket_connect_timeout', None) connparams.pop('socket_keepalive', None) connparams.pop('socket_keepalive_options', None) connparams.pop('host', None) connparams.pop('port', None) connparams['db'] = self._prepare_virtual_host( connparams.pop('virtual_host', None))
def get_backend_by_url(backend=None, loader=None): url = None if backend and "://" in backend: url = backend backend, _, _, _, _, _, _ = _parse_url(url) return get_backend_cls(backend, loader), url
def _connparams(self, asynchronous=False): conninfo = self.connection.client connparams = { "host": conninfo.hostname or "127.0.0.1", "port": conninfo.port or self.connection.default_port, "virtual_host": conninfo.virtual_host, "password": conninfo.password, "max_connections": self.max_connections, "socket_timeout": self.socket_timeout, "socket_connect_timeout": self.socket_connect_timeout, "socket_keepalive": self.socket_keepalive, "socket_keepalive_options": self.socket_keepalive_options, "health_check_interval": self.health_check_interval, "retry_on_timeout": self.retry_on_timeout, } conn_class = self.connection_class # If the connection class does not support the `health_check_interval` # argument then remove it. if hasattr(conn_class, "__init__") and not accepts_argument( conn_class.__init__, "health_check_interval"): connparams.pop("health_check_interval") if conninfo.ssl: # Connection(ssl={}) must be a dict containing the keys: # 'ssl_cert_reqs', 'ssl_ca_certs', 'ssl_certfile', 'ssl_keyfile' try: connparams.update(conninfo.ssl) connparams["connection_class"] = redis.SSLConnection except TypeError: pass host = connparams["host"] if "://" in host: scheme, _, _, _, password, path, query = _parse_url(host) if scheme == "socket": connparams = self._filter_tcp_connparams(**connparams) connparams.update( { "connection_class": redis.UnixDomainSocketConnection, "path": "/" + path, }, **query) connparams.pop("socket_connect_timeout", None) connparams.pop("socket_keepalive", None) connparams.pop("socket_keepalive_options", None) connparams["password"] = password connparams.pop("host", None) connparams.pop("port", None) connparams["db"] = self._prepare_virtual_host( connparams.pop("virtual_host", None)) channel = self connection_cls = connparams.get( "connection_class") or self.connection_class if asynchronous: class Connection(connection_cls): def disconnect(self): super(Connection, self).disconnect() channel._on_connection_disconnect(self) connection_cls = Connection connparams["connection_class"] = connection_cls return connparams
def __init__(self, url=None, *args, **kwargs): super(TarantoolBackend, self).__init__(*args, **kwargs) if url is not None: _, self._host, self._port, _, _, _, _ = _parse_url(url)