Exemple #1
0
def create_task_queues(binding_list):
    binding_map = {}
    exchange = Exchange('DouYin', type='topic')

    _queues = [
        Queue(
            'ocean:debug',
            [binding(exchange, routing_key='douyin.debug.#')],
            queue_arguments={'x-queue-mode': 'lazy'}
        )
    ]

    for routing_key, queue_name in binding_list:
        binding_map.setdefault(queue_name, [])
        binding_map[queue_name].append(routing_key)

    for queue_name, routing_keys in binding_map.items():
        _queues.append(
            Queue(
                queue_name,
                [binding(exchange, routing_key=routing_key)
                 for routing_key in routing_keys],
                queue_arguments={'x-queue-mode': 'lazy'}
            )
        )
    return _queues
Exemple #2
0
    def _generate_task_queues(self, **_: Any) -> None:
        """
        Create a list of queues for celery worker from registered handlers list.

        Each handler has it's own queue named as `service_name.event_name`.
        This queue is bound to `events` exchange with event routing key and
        receives initial events from it. If broker-side retry is enabled, queue
        is also bound to `recover` exchange with same routing key, from which
        it receives retried events.
        Also `recover` defines `dead-letter-exchange` which re-routes rejected
        messages to `retry` queue with 1 second duration (in case of failed
        republish).

        Last thing is `archived` echange/queue pair. `archived` queue has
        limited message ttl and queue-length. When a message exceeds max
        retries it is republished to `archived` queue.

        `events` -> (event routing key) -> `demo.my_event` -> Celery worker
            | (message is rejected by worker and message rerouted via DLX)
            V
        `demo:retry` -> `demo:retry` -> ... 1 second elapsed
            | (message ttl expires )
            V
        `recover` -> (routing key) -> `demo.my_event_queue` - > Celery worker
            | (MaxTaskRetriesExceeded)
            V
        `archived` -> `demo.archived`

        """
        queues = self.conf.task_queues or []
        if queues:
            return
        exchange = Exchange(name=self.conf.task_default_exchange,
                            type=self.conf.task_default_exchange_type)

        if defaults.AMQP_EVENTS_MAX_RETRIES > 0:
            # Bind same routing key to "recover" exchange if broker-side delays
            # are enabled
            recover = Exchange(name=self.recover_exchange_name,
                               type=EXCHANGE_TYPE_TOPIC)
        else:
            recover = None

        queue_arguments = None
        if defaults.AMQP_EVENTS_MAX_RETRIES:
            queue_arguments = {
                X_DEAD_LETTER_EXCHANGE: self.get_retry_exchange_name(),
            }

        for name in self._handlers:
            bindings = [binding(exchange=exchange, routing_key=name)]
            if recover:
                bindings.append(binding(exchange=recover, routing_key=name))
            queue = Queue(name=f'{self.main}.{name}',
                          bindings=bindings,
                          queue_arguments=queue_arguments)
            queues.append(queue)
        self.conf.task_queues = queues
Exemple #3
0
    def __init__(self, host: str = AMQP_HOST, port: int = AMQP_PORT, binding: Bindings = None, callbacks: Callbacks = None, debug: bool = False, **kwargs):
        """
        Consume message from the given bindings
        :param host: host running RabbitMQ
        :param port: port which handles AMQP (default 5672)
        :param binding: Queue/Exchange bindings to listen on
            Dict[
                str,  # Queue Name
                List[Union[  # Exchange
                    str,  # Exchange Name, type is direct
                    List[Tuple[
                        str,  # Exchange Name
                        Literal['direct', 'fanout', 'headers', 'topic']  # exchange type
                    ]]
                ]]
            ]
        :param callbacks: list of callback functions which are called upon receiving a message
        :param debug: print debugging messages
        :param **kwargs: extra args
        - Backwards compatibility:
            :param exchange: specifies where to read messages from
            :param routing_key:
        """
        Thread.__init__(self, daemon=True)
        self._exit = Event()

        self._url = f"amqp://{host}:{port}"
        self._debug = debug
        self._queues = []

        if isinstance(callbacks, (list, tuple)):
            self._callbacks = [f for f in callbacks if isFunction(f)]
        else:
            self._callbacks = []

        # Initialize connection we are consuming from based on defaults/passed params
        self._conn = kombu.Connection(hostname=host, port=port, userid="guest", password="******", virtual_host="/")
        if binding:
            for queue, exchanges in binding.items():
                queue_bindings = []
                for exchange in exchanges:
                    name, _type, key = (exchange, 'direct', queue) if isinstance(exchange, str) else exchange
                    queue_bindings.append(kombu.binding(exchange=kombu.Exchange(name, type=_type), routing_key=key))
                self._queues.append(kombu.Queue(name=queue, bindings=queue_bindings))

        elif 'exchange' in kwargs and 'routing_key' in kwargs:
            exchange = kombu.Exchange(kwargs['exchange'], type="direct")
            key = kwargs['routing_key']
            # At this point, consumers are reading messages regardless of queue name
            # so I am just setting it to be the same as the exchange.
            self._queues = [kombu.Queue(name=key, bindings=[kombu.binding(exchange=exchange, routing_key=key)])]

        # Start consumer as an independent process
        self.start()
        if self._debug:
            print(f"Connected to {self._url}", flush=True)
def run():
    setup_loggers()

    exchange = Exchange(config.EXCHANGE_NAME, type=config.EXCHANGE_TYPE)
    queues = [
        Queue(config.QUEUE_NAME,
              exchange,
              bindings=[
                  binding(exchange, routing_key=key)
                  for key in config.ROUTING_KEYS
              ])
    ]

    rpc_exchange = Exchange(config.RPC_EXCHANGE_NAME, type='direct')
    rpc_queues = [
        Queue(config.RPC_QUEUE_NAME,
              rpc_exchange,
              bindings=[
                  binding(rpc_exchange, routing_key=config.RPC_ROUTING_KEY)
              ])
    ]

    message_processor = MessageProcessor(logger)
    rpc_message_processor = RpcMessageProcessor(
        logger, config.APP_NAME, integrity_check, config.RABBIT_URL,
        config.QUEUE_NAME, config.RPC_QUEUE_NAME, config.ERROR_QUEUE_NAME,
        config.REGISTER_URL, config.ROUTING_KEYS[0])

    try:
        result = rpc_message_processor.startup_integrity_check(requests)
        if result:
            logger.error(
                "Entries were requested from the Register '{}'".format(result))
        else:
            logger.info("No integrity issues detected")
    except Exception as e:
        logger.exception(
            'Unhandled Exception while attempting integrity fixing: %s',
            repr(e))

    with Connection(config.RABBIT_URL, heartbeat=4) as conn:
        try:
            worker = Worker(logger, conn, queues, rpc_queues,
                            message_processor.process_message,
                            rpc_message_processor.process_rpc_message)
            logger.info("Running worker...")
            worker.run()
        except KeyboardInterrupt:
            logger.debug('KeyboardInterrupt')
        except Exception as e:
            logger.exception('Unhandled Exception: %s', repr(e))
 def get_consumers(self, Consumer, channel):
     bindings = []
     if self.routing_key:
         print('Use routing key binding')
         exchange = kombu.Exchange('xivo', type='topic')
     if self.bindings:
         print('Use headers binding')
         exchange = kombu.Exchange('wazo-headers', type='headers')
         for event in self.bindings.split(','):
             arguments = {'name': event}
             if self.tenant is not None:
                 arguments.update(tenant_uuid=self.tenant)
             bindings.append(
                 kombu.binding(
                     exchange=exchange,
                     routing_key=None,
                     arguments=arguments,
                 )
             )
     return [
         Consumer(
             kombu.Queue(
                 exchange=exchange,
                 routing_key=self.routing_key,
                 bindings=bindings,
                 exclusive=True,
             ),
             callbacks=[self.on_message],
         )
     ]
Exemple #6
0
def create_task_queues(exchange_name, binding_list):
    """
    批量创建Queue
    :param exchange_name:exchange名称
    :param binding_list:(routing_keys,queue)列表
    :return:[Queue(),Queue(),...]
    """
    binding_map = {}
    exchange = Exchange(exchange_name, type="topic")
    queues = []
    for routing_key, queue_name in binding_list:
        binding_map.setdefault(queue_name, [])
        binding_map[queue_name].append(routing_key)

    for queue_name, routing_keys in binding_map.items():
        queues.append(
            Queue(
                queue_name,
                [
                    binding(exchange, routing_key=routing_key)
                    for routing_key in routing_keys
                ],
                queue_arguments={
                    "x-queue-mode": "lazy",
                    "x-max-priority": 10
                },
            ))
    return queues
Exemple #7
0
 def test_anonymous(self):
     chan = Mock()
     x = Queue(bindings=[binding(Exchange('foo'), 'rkey')])
     chan.queue_declare.return_value = 'generated', 0, 0
     xx = x(chan)
     xx.declare()
     assert xx.name == 'generated'
Exemple #8
0
 def _create_queue_for(self, topics):
     exchange = Exchange(self._exchange, type="topic")
     queue = Queue(
         self._queue,
         exchange,
         bindings=[binding(exchange, routing_key=t) for t in topics])
     return queue
Exemple #9
0
 def test_anonymous(self):
     chan = Mock()
     x = Queue(bindings=[binding(Exchange('foo'), 'rkey')])
     chan.queue_declare.return_value = 'generated', 0, 0
     xx = x(chan)
     xx.declare()
     self.assertEqual(xx.name, 'generated')
Exemple #10
0
 def test_multiple_bindings(self):
     chan = Mock()
     q = Queue('mul', [
         binding(Exchange('mul1'), 'rkey1'),
         binding(Exchange('mul2'), 'rkey2'),
         binding(Exchange('mul3'), 'rkey3'),
     ])
     q(chan).declare()
     assert call(
         nowait=False,
         exchange='mul1',
         auto_delete=False,
         passive=False,
         arguments=None,
         type='direct',
         durable=True,
     ) in chan.exchange_declare.call_args_list
Exemple #11
0
 def test_multiple_bindings(self):
     chan = Mock()
     q = Queue('mul', [
         binding(Exchange('mul1'), 'rkey1'),
         binding(Exchange('mul2'), 'rkey2'),
         binding(Exchange('mul3'), 'rkey3'),
     ])
     q(chan).declare()
     assert call(
         nowait=False,
         exchange='mul1',
         auto_delete=False,
         passive=False,
         arguments=None,
         type='direct',
         durable=True,
     ) in chan.exchange_declare.call_args_list
 def test_constructor(self):
     x = binding(Exchange('foo'), 'rkey',
         arguments={'barg': 'bval'},
         unbind_arguments={'uarg': 'uval'},
     )
     self.assertEqual(x.exchange, Exchange('foo'))
     self.assertEqual(x.routing_key, 'rkey')
     self.assertDictEqual(x.arguments, {'barg': 'bval'})
     self.assertDictEqual(x.unbind_arguments, {'uarg': 'uval'})
Exemple #13
0
 def on_event(self, event_name, callback):
     logger.debug('Added callback on event "%s"', event_name)
     self._queue.bindings.add(
         kombu.binding(self._exchange,
                       arguments={
                           'x-match': 'all',
                           'name': event_name
                       }))
     self._events_pubsub.subscribe(event_name, callback)
Exemple #14
0
 def init_queue(self, topic_bindings):
     #bind topics to queue
     bindings = []
     for topic in topic_bindings:
         bindings.append(
             binding(exchange=self.topic_log_exchange, routing_key=topic))
     self.log_queue = Queue(exchange=self.topic_log_exchange,
                            exclusive=True,
                            bindings=bindings)
Exemple #15
0
 def get_consumers(self, consumer, channel):
     exchange = Exchange(EXCHANGE, type="topic")
     queue = Queue(
         'my_queue',
         exchange,
         bindings=[binding(exchange, routing_key='user_logged_in')])
     return [
         consumer([queue], callbacks=[self.on_message]),
     ]
Exemple #16
0
 def test_constructor(self):
     x = binding(
         Exchange('foo'), 'rkey',
         arguments={'barg': 'bval'},
         unbind_arguments={'uarg': 'uval'},
     )
     self.assertEqual(x.exchange, Exchange('foo'))
     self.assertEqual(x.routing_key, 'rkey')
     self.assertDictEqual(x.arguments, {'barg': 'bval'})
     self.assertDictEqual(x.unbind_arguments, {'uarg': 'uval'})
Exemple #17
0
 def test_constructor(self):
     x = binding(
         Exchange('foo'), 'rkey',
         arguments={'barg': 'bval'},
         unbind_arguments={'uarg': 'uval'},
     )
     assert x.exchange == Exchange('foo')
     assert x.routing_key == 'rkey'
     assert x.arguments == {'barg': 'bval'}
     assert x.unbind_arguments == {'uarg': 'uval'}
Exemple #18
0
 def test_constructor(self) -> None:
     x = binding(
         Exchange('foo'), 'rkey',
         arguments={'barg': 'bval'},
         unbind_arguments={'uarg': 'uval'},
     )
     assert x.exchange == Exchange('foo')
     assert x.routing_key == 'rkey'
     assert x.arguments == {'barg': 'bval'}
     assert x.unbind_arguments == {'uarg': 'uval'}
Exemple #19
0
    def get_consumers(self, Consumer, channel):
        job_upd_exchange = Exchange('vnc_config.object-update',
                                    'topic',
                                    durable=False)

        default_bindings = [
            kombu.binding(job_upd_exchange, routing_key='ems-central.#'),
            kombu.binding(job_upd_exchange, routing_key='fmpm-provider.#'),
            kombu.binding(job_upd_exchange, routing_key='topology-service.#'),
            kombu.binding(job_upd_exchange, routing_key='tssm.#'),
            kombu.binding(job_upd_exchange, routing_key='vim.#'),
            kombu.binding(job_upd_exchange, routing_key='data-view-central.#')
        ]
        qname = "default-hapi-consume"
        queue_arguments = None
        task_queues = Queue(qname,
                            job_upd_exchange,
                            bindings=default_bindings,
                            durable=True,
                            queue_arguments=queue_arguments)
        return [
            Consumer(queues=task_queues,
                     accept=['pickle', 'json'],
                     callbacks=[self.process_task])
        ]
Exemple #20
0
    def default(config):
        """
        Declare two exchanges, one for RPCs and one for notifications.

        RPCs return results and should therefore only be answered by a single
        agent. Notifications have no results and are processed by potentially
        multiple agents.

        Each agent/site node has a single queue specific to this node. This
        queue is bound to the RPC exchange with a node-specific routing key and
        to the notify exchange with the site-specific, node-specific, and empty
        routing key. The agent on a site node, where the root VRRP instance has
        become MASTER, will also bind its queue to the RPC exchange with the
        site-specific routing key and remove this binding as soon as the sites
        leaves the MASTER state.

        This setup ensures that RPC messages can be sent to a specific
        agent/node, by using the node-specific routing key and to the agent on
        the master by using the site-specific routing key.
        Notifications can be sent to all agents/nodes by using the empty routing
        key, to all agents/nodes of a site by using the site-specific routing
        key, and to a specific node by using the node-specific routing key.
        """
        rpc_exchange = kombu.Exchange(config.HADES_CELERY_RPC_EXCHANGE,
                                      config.HADES_CELERY_RPC_EXCHANGE_TYPE)
        notify_exchange = kombu.Exchange(
            config.HADES_CELERY_NOTIFY_EXCHANGE,
            config.HADES_CELERY_NOTIFY_EXCHANGE_TYPE)
        node_key = config.HADES_CELERY_NODE_ROUTING_KEY
        site_key = config.HADES_CELERY_SITE_ROUTING_KEY
        return (kombu.Queue(config.HADES_CELERY_NODE_QUEUE, (
            kombu.binding(rpc_exchange, routing_key=node_key),
            kombu.binding(notify_exchange, routing_key=node_key),
            kombu.binding(notify_exchange, routing_key=site_key),
            kombu.binding(notify_exchange, routing_key=''),
        ),
                            auto_delete=True,
                            durable=False), )
Exemple #21
0
def listen_to_event(queue_name: str, callback: Callable):
    with get_connection() as conn:
        bindings = [binding(some_exchange, routing_key=EVENT_KEY)]
        queue = Queue(queue_name,
                      exchange=some_exchange,
                      routing_key=EVENT_KEY,
                      bindings=bindings)
        with Consumer(conn, [queue], callbacks=[callback], auto_declare=True):
            print(f'>>> [{queue_name}] Listening to "{EVENT_KEY}" routing key')
            try:
                while True:
                    conn.drain_events()
            except (KeyboardInterrupt, SystemExit):
                print(f'>>> [{queue_name}] Stopped listening to "{EVENT_KEY}"')
Exemple #22
0
    def get_consumers(self, consumer, channel):
        api_event_queue = Queue(
            "zstack.ui.api.event.%s" % self.uuid,
            exchange=self.broadcast_exchange,
            routing_key="key.event.API.API_EVENT",
            auto_delete=True)

        canonical_event_queue = Queue(
            "zstack.ui.canonical.event.%s" % self.uuid,
            exchange=self.broadcast_exchange,
            routing_key="key.event.LOCAL.canonicalEvent",
            auto_delete=True)

        # self.new_channel = channel.connection.channel()
        reply_queue_name = "zstack.ui.message.%s" % self.uuid
        reply_queue = Queue(
            reply_queue_name,
            # exchange=self.p2p_exchange,
            # routing_key="zstack.message.cloudbus.#",
            [binding(self.p2p_exchange, "zstack.message.vmInstance.#"),
             binding(self.p2p_exchange, "zstack.message.ecs.vm.#"),
             binding(self.p2p_exchange, "zstack.message.aliyun.sdk.#")
             ],
            auto_delete=True)

        return [
            consumer(
                queues=[canonical_event_queue],
                callbacks=[self.on_canonical_event]),
            consumer(
                queues=[api_event_queue],
                callbacks=[self.on_api_event]),
            consumer(
                queues=[reply_queue],
                callbacks=[self.on_message])
        ]
Exemple #23
0
    def _create_bindings(self, event_names, user_uuid, wazo_uuid):
        result = []
        for name in event_names:
            arguments = {'x-match': 'all', 'name': name}
            if user_uuid:
                arguments['user_uuid:{uuid}'.format(uuid=user_uuid)] = True
            if wazo_uuid:
                arguments['origin_uuid'] = wazo_uuid

            binding = kombu.binding(exchange=self._exchange,
                                    arguments=arguments,
                                    unbind_arguments=arguments)
            result.append(binding)

        return result
Exemple #24
0
def amqp_subscribe(exchange, callback, queue='',
                   ex_type='fanout', routing_keys=None, durable=False,
                   auto_delete=True):
    with kombu.pools.connections[kombu.Connection(config.BROKER_URL)].acquire(
            block=True, timeout=10) as connection:
        exchange = kombu.Exchange(exchange, type=ex_type, durable=durable,
                                  auto_delete=auto_delete)
        if not routing_keys:
            queue = kombu.Queue(queue, exchange, exclusive=True)
        else:
            queue = kombu.Queue(queue,
                                [kombu.binding(exchange, routing_key=key)
                                 for key in routing_keys],
                                exclusive=True)
        with connection.Consumer([queue], callbacks=[callback], no_ack=True):
            while True:
                connection.drain_events()
Exemple #25
0
def init_celery(name, tasks_pkg, routing_keys):
    """
    Initializing celery to point to a rabbitmq type of broker

    Args:
        name (str): Name for the app. Will also be used as the default queue name.
        Will be one of ['consumer', 'producer']
        tasks_pkg (str): Package where tasks for the app being created are located.
            will be either kubernetes_test.producer or kubernetes_test.consumer
        routing_keys (list): List of routing keys to listen on for messages.

    Returns:
        Celery: Celery app object
    """

    # Create celery app
    app = Celery(
        'kubernetes_test.{}'.format(name),
        broker=environ['BROKER_CNX_STRING']
    )

    # Discover tasks appropriate to tge app being created
    app.autodiscover_tasks([tasks_pkg], force=True)

    # Set the default queue name so it matches the app name for easy identification
    app.conf.task_default_queue = name

    # use ts.messaging exchange
    messaging_exchange = Exchange('ts.messaging')

    # add the default queue name to the routing keys list
    routing_keys.append(app.conf.task_default_queue)

    bindings = (
        binding(messaging_exchange, routing_key=routing_key)
        for routing_key in routing_keys
    )

    app.conf.task_queues = [
        Queue(name, list(bindings))
    ]

    return app
Exemple #26
0
def start_consumer():
    connection = get_amqp()

    exchange = Exchange(settings.AMQP_EXCHANGE, 'topic', durable=True)

    receivers = Topic.receivers()
    bindings = []

    for r in receivers:
        logger.info(f'Subscribing {r} to queue {settings.AMQP_QUEUE}.')

        bindings.append(binding(exchange, routing_key=r))

    queue = Queue(name=settings.AMQP_QUEUE, exchange=exchange, bindings=bindings)

    with connection.Consumer([queue],
                             callbacks=[process_event]) as consumer:
        logger.info('Started consumer. Consuming messages...')

        while True:
            connection.drain_events()
def init_ts_auth_tasks(celery_app, datastore):
    """
    Initialize a Celery app with a queue and sync tasks for auth group models and roles.

    Args:
        celery_app (Celery): Celery app to register the sync tasks with.
        datastore (AuthStore): a datastore
    """
    messaging_exchange = Exchange('ts.messaging')
    bindings = (binding(messaging_exchange, routing_key=routing_key)
                for routing_key in
                [_complex_group_task_routing_key(),
                 _role_task_routing_key()])

    celery_app.conf.task_queues = celery_app.conf.task_queues or []

    celery_app.conf.task_queues.append(
        Queue('{}.ts_auth.group'.format(celery_app.main), list(bindings)))

    _init_group_tasks(datastore)
    _init_role_tasks(datastore)
Exemple #28
0
 def accumulator(self, routing_key=None, exchange=None, headers=None):
     exchange = exchange or self._default_exchange
     queue_name = 'test-{}'.format(str(uuid.uuid4()))
     with Connection(self._url) as conn:
         if routing_key:
             queue = Queue(
                 name=queue_name,
                 exchange=exchange,
                 routing_key=routing_key,
                 channel=conn.channel(),
             )
         elif headers:
             queue = Queue(
                 name=queue_name,
                 exchange=exchange,
                 bindings=[binding(exchange=exchange, arguments=headers)],
                 channel=conn.channel(),
             )
         else:
             raise Exception('Need a routing key or a header')
         queue.declare()
         queue.purge()
         accumulator = BusMessageAccumulator(self._url, queue)
     return accumulator
Exemple #29
0
 def test_repr(self):
     b = binding(Exchange('foo'), 'rkey')
     assert 'foo' in repr(b)
     assert 'rkey' in repr(b)
Exemple #30
0
 def test_bind(self):
     chan = get_conn().channel()
     x = binding(Exchange('foo'))
     x.bind(Exchange('bar')(chan))
     assert 'exchange_bind' in chan
Exemple #31
0
 def test_queue_dump(self):
     b = binding(self.exchange, 'rk')
     q = Queue('foo', self.exchange, 'rk', bindings=[b])
     d = q.as_dict(recurse=True)
     assert d['bindings'][0]['routing_key'] == 'rk'
     registry.dumps(d)
Exemple #32
0
 def test_unbind(self):
     chan = get_conn().channel()
     x = binding(Exchange('foo'))
     x.unbind(Exchange('bar')(chan))
     assert 'exchange_unbind' in chan
Exemple #33
0
 def test_declare_no_exchange(self):
     chan = get_conn().channel()
     x = binding()
     x.declare(chan)
     assert 'exchange_declare' not in chan
Exemple #34
0
 def test_declare_no_exchange(self):
     chan = get_conn().channel()
     x = binding()
     x.declare(chan)
     assert 'exchange_declare' not in chan
Exemple #35
0
 def test_repr(self):
     b = binding(Exchange('foo'), 'rkey')
     assert 'foo' in repr(b)
     assert 'rkey' in repr(b)
Exemple #36
0
 def test_declare(self):
     chan = get_conn().channel()
     x = binding(Exchange('foo'), 'rkey')
     x.declare(chan)
     assert 'exchange_declare' in chan
Exemple #37
0
 def test_repr(self):
     b = binding(Exchange('foo'), 'rkey')
     self.assertIn('foo', repr(b))
     self.assertIn('rkey', repr(b))
Exemple #38
0
 def test_declare(self):
     chan = get_conn().channel()
     x = binding(Exchange('foo'), 'rkey')
     x.declare(chan)
     self.assertIn('exchange_declare', chan)
Exemple #39
0
 def test_bind(self):
     chan = get_conn().channel()
     x = binding(Exchange('foo'))
     x.bind(Exchange('bar')(chan))
     self.assertIn('exchange_bind', chan)
Exemple #40
0
 def test_repr(self):
     b = binding(Exchange('foo'), 'rkey')
     self.assertIn('foo', repr(b))
     self.assertIn('rkey', repr(b))
Exemple #41
0
 def test_unbind(self):
     chan = get_conn().channel()
     x = binding(Exchange('foo'))
     x.unbind(Exchange('bar')(chan))
     self.assertIn('exchange_unbind', chan)
Exemple #42
0
 def test_queue_dump(self):
     b = binding(self.exchange, 'rk')
     q = Queue('foo', self.exchange, 'rk', bindings=[b])
     d = q.as_dict(recurse=True)
     self.assertEqual(d['bindings'][0]['routing_key'], 'rk')
     registry.dumps(d)
Exemple #43
0
 def test_declare_no_exchange(self):
     chan = get_conn().channel()
     x = binding()
     x.declare(chan)
     self.assertNotIn('exchange_declare', chan)