示例#1
0
    def publish(self, payload, exchange, routing_key=''):
        with Timer(key='amqp.pool_publisher.publish_with_retries.' +
                   exchange.name):
            with self.pool.acquire(block=True) as connection:
                retry_wrapper = ConnectionRetryWrapper(
                    cluster_size=self.cluster_size, logger=LOG)

                def do_publish(connection, channel):
                    # ProducerPool ends up creating it own ConnectionPool which ends up
                    # completely invalidating this ConnectionPool. Also, a ConnectionPool for
                    # producer does not really solve any problems for us so better to create a
                    # Producer for each publish.
                    producer = Producer(channel)
                    kwargs = {
                        'body': payload,
                        'exchange': exchange,
                        'routing_key': routing_key,
                        'serializer': 'pickle',
                        'content_encoding': 'utf-8'
                    }

                    retry_wrapper.ensured(connection=connection,
                                          obj=producer,
                                          to_ensure_func=producer.publish,
                                          **kwargs)

                retry_wrapper.run(connection=connection,
                                  wrapped_callback=do_publish)
示例#2
0
文件: publishers.py 项目: wingiti/st2
    def publish(self, payload, exchange, routing_key="", compression=None):
        compression = compression or cfg.CONF.messaging.compression

        with Timer(key="amqp.pool_publisher.publish_with_retries." +
                   exchange.name):
            with self.pool.acquire(block=True) as connection:
                retry_wrapper = ConnectionRetryWrapper(
                    cluster_size=self.cluster_size, logger=LOG)

                def do_publish(connection, channel):
                    # ProducerPool ends up creating it own ConnectionPool which ends up
                    # completely invalidating this ConnectionPool. Also, a ConnectionPool for
                    # producer does not really solve any problems for us so better to create a
                    # Producer for each publish.
                    producer = Producer(channel)
                    kwargs = {
                        "body": payload,
                        "exchange": exchange,
                        "routing_key": routing_key,
                        "serializer": "pickle",
                        "compression": compression,
                        "content_encoding": "utf-8",
                    }

                    retry_wrapper.ensured(
                        connection=connection,
                        obj=producer,
                        to_ensure_func=producer.publish,
                        **kwargs,
                    )

                retry_wrapper.run(connection=connection,
                                  wrapped_callback=do_publish)
示例#3
0
文件: publishers.py 项目: nzlosh/st2
    def publish(self, payload, exchange, routing_key=''):
        with Timer(key='amqp.pool_publisher.publish_with_retries.' + exchange.name):
            with self.pool.acquire(block=True) as connection:
                retry_wrapper = ConnectionRetryWrapper(cluster_size=self.cluster_size, logger=LOG)

                def do_publish(connection, channel):
                    # ProducerPool ends up creating it own ConnectionPool which ends up
                    # completely invalidating this ConnectionPool. Also, a ConnectionPool for
                    # producer does not really solve any problems for us so better to create a
                    # Producer for each publish.
                    producer = Producer(channel)
                    kwargs = {
                        'body': payload,
                        'exchange': exchange,
                        'routing_key': routing_key,
                        'serializer': 'pickle',
                        'content_encoding': 'utf-8'
                    }

                    retry_wrapper.ensured(
                        connection=connection,
                        obj=producer,
                        to_ensure_func=producer.publish,
                        **kwargs
                    )

                retry_wrapper.run(connection=connection, wrapped_callback=do_publish)
示例#4
0
def register_exchanges():
    LOG.debug('Registering exchanges...')
    connection_urls = transport_utils.get_messaging_urls()
    with Connection(connection_urls) as conn:
        # Use ConnectionRetryWrapper to deal with rmq clustering etc.
        retry_wrapper = ConnectionRetryWrapper(cluster_size=len(connection_urls), logger=LOG)

        def wrapped_register_exchanges(connection, channel):
            for exchange in EXCHANGES:
                _do_register_exchange(exchange=exchange, connection=connection, channel=channel,
                                      retry_wrapper=retry_wrapper)

        retry_wrapper.run(connection=conn, wrapped_callback=wrapped_register_exchanges)
示例#5
0
def register_exchanges():
    LOG.debug('Registering exchanges...')
    connection_urls = transport_utils.get_messaging_urls()
    with Connection(connection_urls) as conn:
        # Use ConnectionRetryWrapper to deal with rmq clustering etc.
        retry_wrapper = ConnectionRetryWrapper(cluster_size=len(connection_urls), logger=LOG)

        def wrapped_register_exchanges(connection, channel):
            for exchange in EXCHANGES:
                _do_register_exchange(exchange=exchange, connection=connection, channel=channel,
                                      retry_wrapper=retry_wrapper)

        retry_wrapper.run(connection=conn, wrapped_callback=wrapped_register_exchanges)
示例#6
0
def register_exchanges():
    LOG.debug("Registering exchanges...")
    connection_urls = transport_utils.get_messaging_urls()

    with transport_utils.get_connection() as conn:
        # Use ConnectionRetryWrapper to deal with rmq clustering etc.
        retry_wrapper = ConnectionRetryWrapper(
            cluster_size=len(connection_urls), logger=LOG
        )

        def wrapped_register_exchanges(connection, channel):
            for exchange in EXCHANGES:
                _do_register_exchange(
                    exchange=exchange,
                    connection=connection,
                    channel=channel,
                    retry_wrapper=retry_wrapper,
                )

        retry_wrapper.run(connection=conn, wrapped_callback=wrapped_register_exchanges)

        def wrapped_predeclare_queues(connection, channel):
            for queue in QUEUES:
                _do_predeclare_queue(channel=channel, queue=queue)

        retry_wrapper.run(connection=conn, wrapped_callback=wrapped_predeclare_queues)