Пример #1
0
    def _open(self, scheme='mongodb://'):
        hostname, dbname, conf = self._parse_uri(scheme=scheme)

        conf['host'] = hostname

        env = _detect_environment()
        if env == 'gevent':
            from gevent import monkey
            monkey.patch_all()
        elif env == 'eventlet':
            from eventlet import monkey_patch
            monkey_patch()

        mongoconn = MongoClient(**conf)
        database = mongoconn[dbname]

        version_str = mongoconn.server_info()['version']
        version_str = version_str.split('-')[0]
        version = tuple(map(int, version_str.split('.')))

        if version < (1, 3):
            raise VersionMismatch(E_SERVER_VERSION.format(version_str))
        elif self.ttl and version < (2, 2):
            raise VersionMismatch(E_NO_TTL_INDEXES.format(version_str))

        return database
Пример #2
0
    def _get_client(self):
        if redis.VERSION < (2, 4, 4):
            raise VersionMismatch(
                'Redis transport requires redis-py versions 2.4.4 or later. '
                'You have {0.__version__}'.format(redis))

        # KombuRedis maintains a connection attribute on it's instance and
        # uses that when executing commands
        # This was added after redis-py was changed.
        class KombuRedis(redis.Redis):  # pragma: no cover
            def __init__(self, *args, **kwargs):
                super(KombuRedis, self).__init__(*args, **kwargs)
                self.connection = self.connection_pool.get_connection('_')

            def execute_command(self, *args, **options):
                conn = self.connection
                command_name = args[0]
                try:
                    conn.send_command(*args)
                    return self.parse_response(conn, command_name, **options)
                except redis.ConnectionError:
                    conn.disconnect()
                    conn.send_command(*args)
                    return self.parse_response(conn, command_name, **options)

        return KombuRedis
Пример #3
0
    def _get_client(self):
        version = getattr(redis, '__version__', (0, 0, 0))
        version = tuple(map(int, version.split('.')))
        if version < (2, 4, 4):
            raise VersionMismatch(
                'Redis transport requires redis-py versions 2.4.4 or later. '
                'You have %r' % ('.'.join(map(str_t, version)), ))

        # KombuRedis maintains a connection attribute on it's instance and
        # uses that when executing commands
        # This was added after redis-py was changed.
        class KombuRedis(redis.Redis):  # pragma: no cover

            def __init__(self, *args, **kwargs):
                super(KombuRedis, self).__init__(*args, **kwargs)
                self.connection = self.connection_pool.get_connection('_')

            def execute_command(self, *args, **options):
                conn = self.connection
                command_name = args[0]
                try:
                    conn.send_command(*args)
                    return self.parse_response(conn, command_name, **options)
                except redis.ConnectionError:
                    conn.disconnect()
                    conn.send_command(*args)
                    return self.parse_response(conn, command_name, **options)

        return KombuRedis
Пример #4
0
    def _get_client(self):
        import redis

        version = getattr(redis, "__version__", (0, 0, 0))
        if version:
            version = tuple(map(int, version.split(".")))
        if version < (2, 4, 4):
            raise VersionMismatch(
                "Redis transport requires redis-py versions 2.4.4 or later. "
                "You have %r" % (".".join(version), ))

        # KombuRedis maintains a connection attribute on it's instance and
        # uses that when executing commands
        class KombuRedis(redis.Redis):
            def __init__(self, *args, **kwargs):
                super(KombuRedis, self).__init__(*args, **kwargs)
                self.connection = self.connection_pool.get_connection('_')

            def execute_command(self, *args, **options):
                conn = self.connection
                command_name = args[0]
                try:
                    conn.send_command(*args)
                    return self.parse_response(conn, command_name, **options)
                except redis.ConnectionError:
                    conn.disconnect()
                    conn.send_command(*args)
                    return self.parse_response(conn, command_name, **options)

        return KombuRedis
Пример #5
0
    def _connparams(self, asynchronous=False, _r210_options=(
            'socket_connect_timeout', 'socket_keepalive',
            'socket_keepalive_options')):
        conninfo = self.connection.client
        connparams = {
            'host': conninfo.hostname or '127.0.0.1',
            'port': conninfo.port or DEFAULT_PORT,
            'virtual_host': conninfo.virtual_host,
            'password': conninfo.password,
            'max_connections': self.max_connections,
            'socket_timeout': self.socket_timeout,
            'socket_connect_timeout': self.socket_connect_timeout,
            'socket_keepalive': self.socket_keepalive,
            'socket_keepalive_options': self.socket_keepalive_options,
        }
        if redis.VERSION < (2, 10):
            for param in _r210_options:
                val = connparams.pop(param, None)
                if val is not None:
                    raise VersionMismatch(
                        'redis: {0!r} requires redis 2.10.0 or higher'.format(
                            param))
        host = connparams['host']
        if '://' in host:
            scheme, _, _, _, password, path, query = _parse_url(host)
            if scheme == 'socket':
                connparams = self._filter_tcp_connparams(**connparams)
                connparams.update({
                    'connection_class': redis.UnixDomainSocketConnection,
                    'path': '/' + path,
                    'password': password}, **query)

                connparams.pop('socket_connect_timeout', None)
                connparams.pop('socket_keepalive', None)
                connparams.pop('socket_keepalive_options', None)

            connparams.pop('host', None)
            connparams.pop('port', None)
        connparams['db'] = self._prepare_virtual_host(
            connparams.pop('virtual_host', None))

        channel = self
        connection_cls = (
            connparams.get('connection_class') or
            redis.Connection
        )

        if asynchronous:
            class Connection(connection_cls):
                def disconnect(self):
                    super(Connection, self).disconnect()
                    channel._on_connection_disconnect(self)
            connparams['connection_class'] = Connection

        return connparams
Пример #6
0
    def _get_client(self):
        if redis.VERSION < (3, 2, 0):
            raise VersionMismatch(
                'Redis transport requires redis-py versions 3.2.0 or later. '
                'You have {0.__version__}'.format(redis))

        if self.global_keyprefix:
            return functools.partial(
                PrefixedStrictRedis,
                global_keyprefix=self.global_keyprefix,
            )

        return redis.StrictRedis
Пример #7
0
    def _get_client(self):
        if redis.VERSION < (2, 10, 0):
            raise VersionMismatch(
                'Redis transport requires redis-py versions 2.10.0 or later. '
                'You have {0.__version__}'.format(redis))

        # KombuRedis maintains a connection attribute on it's instance and
        # uses that when executing commands
        # This was added after redis-py was changed.
        class KombuRedis(redis.StrictRedis):  # pragma: no cover
            def __init__(self, *args, **kwargs):
                super(KombuRedis, self).__init__(*args, **kwargs)
                self.connection = self.connection_pool.get_connection('_')

        return KombuRedis
Пример #8
0
    def _get_async_client(self):
        if redis.VERSION < (2, 4, 4):
            raise VersionMismatch(
                "Redis transport requires redis-py versions 2.4.4 or later. "
                "You have {0.__version__}".format(redis))

        # AsyncRedis maintains a connection attribute on it's instance and
        # uses that when executing commands
        # This was added after redis-py was changed.
        class AsyncRedis(redis.Redis):  # pragma: no cover
            def __init__(self, *args, **kwargs):
                super(AsyncRedis, self).__init__(*args, **kwargs)
                self.connection = self.connection_pool.get_connection("_")

        return AsyncRedis
Пример #9
0
    def _connparams(self,
                    _r210_options=('socket_connect_timeout',
                                   'socket_keepalive',
                                   'socket_keepalive_options')):
        conninfo = self.connection.client

        hosts, password, virtual_host = self._parse_hosts(conninfo.hostname)

        connparams = {
            'startup_nodes': hosts,
            'virtual_host': virtual_host,
            'password': password,
            'max_connections': self.max_connections,
            'socket_timeout': self.socket_timeout,
            'socket_connect_timeout': self.socket_connect_timeout,
            'socket_keepalive': self.socket_keepalive,
            'socket_keepalive_options': self.socket_keepalive_options,
        }

        if redis.VERSION < (2, 10):
            for param in _r210_options:
                val = connparams.pop(param, None)
                if val is not None:
                    raise VersionMismatch(
                        'redis: {0!r} requires redis 2.10.0 or higher'.format(
                            param))

        connparams['db'] = self._prepare_virtual_host(
            connparams.pop('virtual_host', None))

        channel = self
        connection_cls = (connparams.get('connection_class')
                          or ClusterConnection)

        class Connection(connection_cls):
            def disconnect(self):
                super(Connection, self).disconnect()
                channel._on_connection_disconnect(self)

        connparams['connection_class'] = Connection

        return connparams
Пример #10
0
:copyright: (c) 2009 - 2011 by Ask Solem.
:license: BSD, see LICENSE for more details.

"""
import socket

from operator import attrgetter

from kombu.exceptions import VersionMismatch
from kombu.transport import base

from pika import channel  # must be here to raise importerror for below.
try:
    from pika import asyncore_adapter
except ImportError:
    raise VersionMismatch("Kombu only works with pika version 0.5.2")
from pika import blocking_adapter
from pika import connection
from pika import exceptions
from pika.spec import Basic, BasicProperties

DEFAULT_PORT = 5672

BASIC_PROPERTIES = ("content_type", "content_encoding", "headers",
                    "delivery_mode", "priority", "correlation_id", "reply_to",
                    "expiration", "message_id", "timestamp", "type", "user_id",
                    "app_id", "cluster_id")


class Message(base.Message):
    def __init__(self, channel, amqp_message, **kwargs):
Пример #11
0
 def _get_client(self):
     if redis.VERSION < (3, 2, 0):
         raise VersionMismatch(
             "Redis transport requires redis-py versions 3.2.0 or later. "
             "You have {0.__version__}".format(redis))
     return redis.StrictRedis
Пример #12
0
    def _connparams(
        self,
        asynchronous=False,
        _r210_options=(
            "socket_connect_timeout",
            "socket_keepalive",
            "socket_keepalive_options",
        ),
    ):
        conninfo = self.connection.client
        connparams = {
            "host": conninfo.hostname or "127.0.0.1",
            "port": conninfo.port or DEFAULT_PORT,
            "virtual_host": conninfo.virtual_host,
            "password": conninfo.password,
            "max_connections": self.max_connections,
            "socket_timeout": self.socket_timeout,
            "socket_connect_timeout": self.socket_connect_timeout,
            "socket_keepalive": self.socket_keepalive,
            "socket_keepalive_options": self.socket_keepalive_options,
        }
        if redis.VERSION < (2, 10):
            for param in _r210_options:
                val = connparams.pop(param, None)
                if val is not None:
                    raise VersionMismatch(
                        "redis: {0!r} requires redis 2.10.0 or higher".format(
                            param))
        host = connparams["host"]
        if "://" in host:
            scheme, _, _, _, password, path, query = _parse_url(host)
            if scheme == "socket":
                connparams = self._filter_tcp_connparams(**connparams)
                connparams.update(
                    {
                        "connection_class": redis.UnixDomainSocketConnection,
                        "path": "/" + path,
                        "password": password,
                    }, **query)

                connparams.pop("socket_connect_timeout", None)
                connparams.pop("socket_keepalive", None)
                connparams.pop("socket_keepalive_options", None)

            connparams.pop("host", None)
            connparams.pop("port", None)
        connparams["db"] = self._prepare_virtual_host(
            connparams.pop("virtual_host", None))

        channel = self
        connection_cls = connparams.get("connection_class") or redis.Connection

        if asynchronous:

            class Connection(connection_cls):
                def disconnect(self):
                    super(Connection, self).disconnect()
                    channel._on_connection_disconnect(self)

            connparams["connection_class"] = Connection

        return connparams
Пример #13
0
 def _get_client(self):
     if redis.VERSION < (2, 10, 5):
         raise VersionMismatch(
             'Redis transport requires redis-py versions 2.10.5 or later. '
             'You have {0.__version__}'.format(redis))
     return redis.StrictRedis
Пример #14
0
            'host': conninfo.hostname or '127.0.0.1',
            'port': conninfo.port or DEFAULT_PORT,
            'virtual_host': conninfo.virtual_host,
            'password': conninfo.password,
            'max_connections': self.max_connections,
            'socket_timeout': self.socket_timeout,
            'socket_connect_timeout': self.socket_connect_timeout,
            'socket_keepalive': self.socket_keepalive,
            'socket_keepalive_options': self.socket_keepalive_options,
        }
        if redis.VERSION < (2, 10):
            for param in _r210_options:
                val = connparams.pop(param, None)
                if val is not None:
                    raise VersionMismatch(
                        'redis: {0!r} requires redis 2.10.0 or higher'.format(
                            param))
        host = connparams['host']
        if '://' in host:
            scheme, _, _, _, password, path, query = _parse_url(host)
            if scheme == 'socket':
                connparams = self._filter_tcp_connparams(**connparams)
                connparams.update(
                    {
                        'connection_class': redis.UnixDomainSocketConnection,
                        'path': '/' + path,
                        'password': password
                    }, **query)

                connparams.pop('socket_connect_timeout', None)
                connparams.pop('socket_keepalive', None)
Пример #15
0
:license: BSD, see LICENSE for more details.

"""
from __future__ import absolute_import

import amqp

from kombu.exceptions import StdChannelError, VersionMismatch
from kombu.utils.amq_manager import get_manager

from . import base

DEFAULT_PORT = 5672

if amqp.VERSION < (0, 9, 3):
    raise VersionMismatch('Please install amqp version 0.9.3 or higher.')


class Message(base.Message):
    def __init__(self, channel, msg, **kwargs):
        props = msg.properties
        super(Message,
              self).__init__(channel,
                             body=msg.body,
                             delivery_tag=msg.delivery_tag,
                             content_type=props.get('content_type'),
                             content_encoding=props.get('content_encoding'),
                             delivery_info=msg.delivery_info,
                             properties=msg.properties,
                             headers=props.get('application_headers') or {},
                             **kwargs)
Пример #16
0
class Channel(virtual.Channel):
    QoS = QoS

    _client = None
    _subclient = None
    _closing = False
    supports_fanout = True
    keyprefix_queue = '_kombu.binding.%s'
    keyprefix_fanout = '/{db}.'
    sep = '\x06\x16'
    _in_poll = False
    _in_listen = False
    _fanout_queues = {}
    ack_emulation = True
    unacked_key = 'unacked'
    unacked_index_key = 'unacked_index'
    unacked_mutex_key = 'unacked_mutex'
    unacked_mutex_expire = 300  # 5 minutes
    unacked_restore_limit = None
    visibility_timeout = 3600  # 1 hour
    priority_steps = PRIORITY_STEPS
    socket_timeout = None
    socket_connect_timeout = None
    socket_keepalive = None
    socket_keepalive_options = None
    max_connections = 10
    #: Transport option to enable disable fanout keyprefix.
    #: Should be enabled by default, but that is not
    #: backwards compatible.  Can also be string, in which
    #: case it changes the default prefix ('/{db}.') into to something
    #: else.  The prefix must include a leading slash and a trailing dot.
    fanout_prefix = False

    #: If enabled the fanout exchange will support patterns in routing
    #: and binding keys (like a topic exchange but using PUB/SUB).
    #: This will be enabled by default in a future version.
    fanout_patterns = False

    _async_pool = None
    _pool = None
    _disconnecting_pools = False

    from_transport_options = (
        virtual.Channel.from_transport_options +
        ('ack_emulation', 'unacked_key', 'unacked_index_key',
         'unacked_mutex_key', 'unacked_mutex_expire', 'visibility_timeout',
         'unacked_restore_limit', 'fanout_prefix', 'fanout_patterns',
         'socket_timeout', 'socket_connect_timeout', 'socket_keepalive',
         'socket_keepalive_options', 'queue_order_strategy', 'max_connections',
         'priority_steps')  # <-- do not add comma here!
    )

    def __init__(self, *args, **kwargs):
        super_ = super(Channel, self)
        super_.__init__(*args, **kwargs)

        if not self.ack_emulation:  # disable visibility timeout
            self.QoS = virtual.QoS

        self._queue_cycle = []
        self.AsyncClient = self._get_async_client()
        self.Client = redis.Redis
        self.ResponseError = self._get_response_error()
        self.active_fanout_queues = set()
        self.auto_delete_queues = set()
        self._fanout_to_queue = {}
        self.handlers = {'BRPOP': self._brpop_read, 'LISTEN': self._receive}

        if self.fanout_prefix:
            if isinstance(self.fanout_prefix, string_t):
                self.keyprefix_fanout = self.fanout_prefix
        else:
            # previous versions did not set a fanout, so cannot enable
            # by default.
            self.keyprefix_fanout = ''

        # Evaluate connection.
        try:
            self.client.info()
        except Exception:
            self._disconnect_pools()
            raise

        self.connection.cycle.add(self)  # add to channel poller.
        # copy errors, in case channel closed but threads still
        # are still waiting for data.
        self.connection_errors = self.connection.connection_errors

        register_after_fork(self, self._after_fork)

    def _after_fork(self):
        self._disconnect_pools()

    def _disconnect_pools(self):
        if not self._disconnecting_pools:
            self._disconnecting_pools = True
            try:
                if self._async_pool is not None:
                    self._async_pool.disconnect()
                if self._pool is not None:
                    self._pool.disconnect()
                self._async_pool = self._pool = None
            finally:
                self._disconnecting_pools = False

    def _on_connection_disconnect(self, connection):
        self._in_poll = False
        self._in_listen = False
        if self.connection and self.connection.cycle:
            self.connection.cycle._on_connection_disconnect(connection)
        self._disconnect_pools()
        if not self._closing:
            raise get_redis_ConnectionError()

    def _do_restore_message(self,
                            payload,
                            exchange,
                            routing_key,
                            client=None,
                            leftmost=False):
        with self.conn_or_acquire(client) as client:
            try:
                try:
                    payload['headers']['redelivered'] = True
                except KeyError:
                    pass
                for queue in self._lookup(exchange, routing_key):
                    (client.lpush if leftmost else client.rpush)(
                        queue,
                        dumps(payload),
                    )
            except Exception:
                crit('Could not restore message: %r', payload, exc_info=True)

    def _restore(self, message, leftmost=False):
        if not self.ack_emulation:
            return super(Channel, self)._restore(message)
        tag = message.delivery_tag
        with self.conn_or_acquire() as client:
            with client.pipeline() as pipe:
                P, _ = pipe.hget(self.unacked_key, tag) \
                           .hdel(self.unacked_key, tag) \
                           .execute()
            if P:
                M, EX, RK = loads(bytes_to_str(P))  # json is unicode
                self._do_restore_message(M, EX, RK, client, leftmost)

    def _restore_at_beginning(self, message):
        return self._restore(message, leftmost=True)

    def basic_consume(self, queue, *args, **kwargs):
        if queue in self._fanout_queues:
            exchange, _ = self._fanout_queues[queue]
            self.active_fanout_queues.add(queue)
            self._fanout_to_queue[exchange] = queue
        ret = super(Channel, self).basic_consume(queue, *args, **kwargs)
        self._update_cycle()
        return ret

    def basic_cancel(self, consumer_tag):
        # If we are busy reading messages we may experience
        # a race condition where a message is consumed after
        # cancelling, so we must delay this operation until reading
        # is complete (Issue celery/celery#1773).
        connection = self.connection
        if connection:
            if connection.cycle._in_protected_read:
                return connection.cycle.after_read.add(
                    promise(self._basic_cancel, (consumer_tag, )), )
            return self._basic_cancel(consumer_tag)

    def _basic_cancel(self, consumer_tag):
        try:
            queue = self._tag_to_queue[consumer_tag]
        except KeyError:
            return
        try:
            self.active_fanout_queues.remove(queue)
        except KeyError:
            pass
        else:
            self._unsubscribe_from(queue)
        try:
            exchange, _ = self._fanout_queues[queue]
            self._fanout_to_queue.pop(exchange)
        except KeyError:
            pass
        ret = super(Channel, self).basic_cancel(consumer_tag)
        self._update_cycle()
        return ret

    def _get_publish_topic(self, exchange, routing_key):
        if routing_key and self.fanout_patterns:
            return ''.join([self.keyprefix_fanout, exchange, '/', routing_key])
        return ''.join([self.keyprefix_fanout, exchange])

    def _get_subscribe_topic(self, queue):
        exchange, routing_key = self._fanout_queues[queue]
        return self._get_publish_topic(exchange, routing_key)

    def _subscribe(self):
        keys = [
            self._get_subscribe_topic(queue)
            for queue in self.active_fanout_queues
        ]
        if not keys:
            return
        c = self.subclient
        if c.connection._sock is None:
            c.connection.connect()
        self._in_listen = True
        c.psubscribe(keys)

    def _unsubscribe_from(self, queue):
        topic = self._get_subscribe_topic(queue)
        c = self.subclient
        should_disconnect = False
        if c.connection._sock is None:
            c.connection.connect()
            should_disconnect = True
        try:
            c.unsubscribe([topic])
        finally:
            if should_disconnect and c.connection:
                c.connection.disconnect()

    def _handle_message(self, client, r):
        if bytes_to_str(r[0]) == 'unsubscribe' and r[2] == 0:
            client.subscribed = False
        elif bytes_to_str(r[0]) == 'pmessage':
            return {
                'type': r[0],
                'pattern': r[1],
                'channel': r[2],
                'data': r[3]
            }
        else:
            return {
                'type': r[0],
                'pattern': None,
                'channel': r[1],
                'data': r[2]
            }

    def _receive(self):
        c = self.subclient
        response = None
        try:
            response = c.parse_response()
        except self.connection_errors:
            self._in_listen = False
            raise Empty()
        if response is not None:
            payload = self._handle_message(c, response)
            if bytes_to_str(payload['type']).endswith('message'):
                channel = bytes_to_str(payload['channel'])
                if payload['data']:
                    if channel[0] == '/':
                        _, _, channel = channel.partition('.')
                    try:
                        message = loads(bytes_to_str(payload['data']))
                    except (TypeError, ValueError):
                        warn('Cannot process event on channel %r: %s',
                             channel,
                             repr(payload)[:4096],
                             exc_info=1)
                        raise Empty()
                    exchange = channel.split('/', 1)[0]
                    return message, self._fanout_to_queue[exchange]
        raise Empty()

    def _brpop_start(self, timeout=1):
        queues = self._consume_cycle()
        if not queues:
            return
        keys = [
            self._q_for_pri(queue, pri) for pri in PRIORITY_STEPS
            for queue in queues
        ] + [timeout or 0]
        self._in_poll = True
        self.client.connection.send_command('BRPOP', *keys)

    def _brpop_read(self, **options):
        try:
            try:
                dest__item = self.client.parse_response(
                    self.client.connection, 'BRPOP', **options)
            except self.connection_errors:
                # if there's a ConnectionError, disconnect so the next
                # iteration will reconnect automatically.
                self.client.connection.disconnect()
                raise Empty()
            if dest__item:
                dest, item = dest__item
                dest = bytes_to_str(dest).rsplit(self.sep, 1)[0]
                self._rotate_cycle(dest)
                return loads(bytes_to_str(item)), dest
            else:
                raise Empty()
        finally:
            self._in_poll = False

    def _poll_error(self, type, **options):
        if type == 'LISTEN':
            self.subclient.parse_response()
        else:
            self.client.parse_response(self.client.connection, type)

    def _get(self, queue):
        with self.conn_or_acquire() as client:
            for pri in PRIORITY_STEPS:
                item = client.rpop(self._q_for_pri(queue, pri))
                if item:
                    return loads(bytes_to_str(item))
            raise Empty()

    def _size(self, queue):
        with self.conn_or_acquire() as client:
            with client.pipeline() as pipe:
                for pri in PRIORITY_STEPS:
                    pipe = pipe.llen(self._q_for_pri(queue, pri))
                sizes = pipe.execute()
                return sum(size for size in sizes
                           if isinstance(size, numbers.Integral))

    def _q_for_pri(self, queue, pri):
        pri = self.priority(pri)
        return '%s%s%s' % ((queue, self.sep, pri) if pri else (queue, '', ''))

    def priority(self, n):
        steps = self.priority_steps
        return steps[bisect(steps, n) - 1]

    def _put(self, queue, message, **kwargs):
        """Deliver message."""
        try:
            pri = max(
                min(int(message['properties']['delivery_info']['priority']),
                    9), 0)
        except (TypeError, ValueError, KeyError):
            pri = 0
        with self.conn_or_acquire() as client:
            client.lpush(self._q_for_pri(queue, pri), dumps(message))

    def _put_fanout(self, exchange, message, routing_key, **kwargs):
        """Deliver fanout message."""
        with self.conn_or_acquire() as client:
            client.publish(
                self._get_publish_topic(exchange, routing_key),
                dumps(message),
            )

    def _new_queue(self, queue, auto_delete=False, **kwargs):
        if auto_delete:
            self.auto_delete_queues.add(queue)

    def _queue_bind(self, exchange, routing_key, pattern, queue):
        if self.typeof(exchange).type == 'fanout':
            # Mark exchange as fanout.
            self._fanout_queues[queue] = (
                exchange,
                routing_key.replace('#', '*'),
            )
        with self.conn_or_acquire() as client:
            client.sadd(
                self.keyprefix_queue % (exchange, ),
                self.sep.join([routing_key or '', pattern or '', queue or '']))

    def _delete(self, queue, exchange, routing_key, pattern, *args):
        self.auto_delete_queues.discard(queue)
        with self.conn_or_acquire() as client:
            client.srem(
                self.keyprefix_queue % (exchange, ),
                self.sep.join([routing_key or '', pattern or '', queue or '']))
            with client.pipeline() as pipe:
                for pri in PRIORITY_STEPS:
                    pipe = pipe.delete(self._q_for_pri(queue, pri))
                pipe.execute()

    def _has_queue(self, queue, **kwargs):
        with self.conn_or_acquire() as client:
            with client.pipeline() as pipe:
                for pri in PRIORITY_STEPS:
                    pipe = pipe.exists(self._q_for_pri(queue, pri))
                return any(pipe.execute())

    def get_table(self, exchange):
        key = self.keyprefix_queue % exchange
        with self.conn_or_acquire() as client:
            values = client.smembers(key)
            if not values:
                raise InconsistencyError(NO_ROUTE_ERROR.format(exchange, key))
            return [tuple(bytes_to_str(val).split(self.sep)) for val in values]

    def _purge(self, queue):
        with self.conn_or_acquire() as client:
            with client.pipeline() as pipe:
                for pri in PRIORITY_STEPS:
                    priq = self._q_for_pri(queue, pri)
                    pipe = pipe.llen(priq).delete(priq)
                sizes = pipe.execute()
                return sum(sizes[::2])

    def close(self):
        self._closing = True
        self._disconnect_pools()
        if not self.closed:
            # remove from channel poller.
            self.connection.cycle.discard(self)

            # delete fanout bindings
            for queue in self._fanout_queues:
                if queue in self.auto_delete_queues:
                    self.queue_delete(queue)

            self._close_clients()

        super(Channel, self).close()

    def _close_clients(self):
        # Close connections
        for attr in 'client', 'subclient':
            try:
                self.__dict__[attr].connection.disconnect()
            except (KeyError, AttributeError, self.ResponseError):
                pass

    def _prepare_virtual_host(self, vhost):
        if not isinstance(vhost, numbers.Integral):
            if not vhost or vhost == '/':
                vhost = DEFAULT_DB
            elif vhost.startswith('/'):
                vhost = vhost[1:]
            try:
                vhost = int(vhost)
            except ValueError:
                raise ValueError(
                    'Database is int between 0 and limit - 1, not {0}'.format(
                        vhost, ))
        return vhost

    def _connparams(self, async=False):
        conninfo = self.connection.client
        connparams = {
            'host': conninfo.hostname or '127.0.0.1',
            'port': conninfo.port or DEFAULT_PORT,
            'virtual_host': conninfo.virtual_host,
            'password': conninfo.password,
            'max_connections': self.max_connections,
            'socket_timeout': self.socket_timeout,
            'socket_connect_timeout': self.socket_connect_timeout,
            'socket_keepalive': self.socket_keepalive,
            'socket_keepalive_options': self.socket_keepalive_options,
        }
        if redis.VERSION < (2, 10):
            for param in ('socket_keepalive', 'socket_keepalive_options'):
                val = connparams.pop('socket_keepalive', None)
                if val is not None:
                    raise VersionMismatch(
                        'redis: {0!r} requires redis 2.10.0 or higher'.format(
                            param))
        host = connparams['host']
        if '://' in host:
            scheme, _, _, _, password, path, query = _parse_url(host)
            if scheme == 'socket':
                connparams.update(
                    {
                        'connection_class': redis.UnixDomainSocketConnection,
                        'path': '/' + path,
                        'password': password
                    }, **query)
            connparams.pop('host', None)
            connparams.pop('port', None)
        connparams['db'] = self._prepare_virtual_host(
            connparams.pop('virtual_host', None))

        channel = self
        connection_cls = (connparams.get('connection_class')
                          or redis.Connection)

        if async:

            class Connection(connection_cls):
                def disconnect(self):
                    super(Connection, self).disconnect()
                    channel._on_connection_disconnect(self)

            connparams['connection_class'] = Connection

        return connparams