class PollingQueueConsumer(object): """ Implements a minimum interface of the :class:`~messaging.QueueConsumer`. Instead of processing messages in a separate thread it provides a polling method to block until a message with the same correlation ID of the RPC-proxy call arrives. """ def register_provider(self, provider): self.provider = provider self.connection = Connection(provider.container.config['AMQP_URI']) self.channel = self.connection.channel() self.queue = provider.queue maybe_declare(self.queue, self.channel) def unregister_provider(self, provider): self.connection.close() def ack_message(self, msg): msg.ack() def poll_messages(self, correlation_id): channel = self.channel conn = channel.connection for body, msg in itermessages(conn, channel, self.queue, limit=None): if correlation_id == msg.properties.get('correlation_id'): self.provider.handle_message(body, msg) break
def test_close_disconnects(self): c = Connection(transport=Transport).channel() conn1 = c.client.connection conn2 = c.subclient.connection c.close() self.assertTrue(conn1.disconnected) self.assertTrue(conn2.disconnected)
def test_db_port(self): c1 = Connection(port=None, transport=Transport).channel() self.assertEqual(c1.client.port, Transport.default_port) c1.close() c2 = Connection(port=9999, transport=Transport).channel() self.assertEqual(c2.client.port, 9999) c2.close()
def send_task(obj, message): log.info('send message: %s' % message['event_type']) try: connection = Connection('amqp://%s:%s@%s:%s/%s' % (BROKER_USER, BROKER_PASSWORD, BROKER_HOST, BROKER_PORT, BROKER_VHOST_PYPO)) simple_queue = connection.SimpleQueue(BROKER_QUEUE) simple_queue.put(json.dumps(message)) simple_queue.close() connection.close() except Exception, e: log.error('error sending message: %s' % e)
class KombuMailbox(AckableMailbox): def __init__(self, address, name, transport_options, ssl=False, no_ack=True, queue_opts=None, exchange_opts=None): from kombu import Connection self._conn = Connection(address, transport_options=transport_options, ssl=ssl) self._queue = self._conn.SimpleQueue(name, no_ack, queue_opts, exchange_opts) self._no_ack = no_ack self._last_msg = None def get(self): self._last_msg = self._queue.get() return decode(unpackb(self._last_msg.body, encoding='utf-8', use_list=False)) def put(self, message): return self._queue.put(packb(encode(message), encoding='utf-8', use_bin_type=True)) def ack(self): if self._no_ack: return if self._last_msg is not None: self._last_msg.ack() self._last_msg = None def encode(self): raise NotImplementedError @staticmethod def decode(params): raise NotImplementedError def __enter__(self): return self def __exit__(self, *exc_details): self.__del__() def __del__(self): if hasattr(self, '_queue'): self._queue.close() if hasattr(self, '_conn'): self._conn.close()
def send_task(obj, message): log.info('send message: %s' % message['event_type']) try: connection = Connection(PLAYOUT_BROKER_URL) simple_queue = connection.SimpleQueue(BROKER_QUEUE) simple_queue.put(json.dumps(message)) simple_queue.close() connection.close() except Exception as e: log.error('error sending message: %s' % e)
def test_close_survives_connerror(self): class _CustomError(Exception): pass class MyTransport(Transport): connection_errors = (_CustomError,) def close_connection(self, connection): raise _CustomError("foo") conn = Connection(transport=MyTransport) conn.connect() conn.close() self.assertTrue(conn._closed)
def run(self): try: connection = Connection(hostname=self.host,port=self.port,userid=self.usr,password=self.psw,virtual_host=self.virtual_host) channel = connection.channel() self.producer=Producer(channel) task_queue = Queue(self.queue_name,durable=True) consumer = Consumer(channel,task_queue,no_ack=False) consumer.qos(prefetch_count=1) consumer.register_callback(self.RequestCallBack) consumer.consume() while True: connection.drain_events() connection.close() except BaseException,e: print e
def start(self): log.info("Listening for Pulse messages") self.running = True connection = Connection( hostname=self.pulse_host, userid=self.pulse_user, password=self.pulse_password, ssl=True, # Kombu doesn't support the port correctly for amqp with ssl... port=5671, ) consumers = [] for event in self.events: log.debug("Setting up queue on exchange: %s with routing_key: %s", event.exchange, event.routing_key) # Passive exchanges must be used, otherwise kombu will try to # create the exchange (which we don't want, we're consuming # an existing one!) e = Exchange(name=event.exchange, type="topic", passive=True) q = Queue( name=event.queue_name, exchange=e, routing_key=event.routing_key, durable=True, exclusive=False, auto_delete=False ) c = connection.Consumer( queues=[q], callbacks=[event.callback] ) c.consume() consumers.append(c) try: # XXX: drain_events only returns after receiving a message. Is # there a way we can have it return regularly to be non-blocking? # Its timeout parameter seems to break receiving of messages. # Maybe it doesn't matter if we can't shut down gracefully since # messages will be reprocessed next time. while self.running: connection.drain_events() finally: for c in consumers: c.close() connection.close()
def run(self): connection = Connection(hostname=self.host,port=self.port,userid=self.usr,password=self.psw,virtual_host=self.virtual_host) channel = connection.channel() self.producer=Producer(channel) queueargs={} if self.msg_timeout: queueargs['x-message-ttl']=self.msg_timeout task_queue = Queue(self.queue_name,durable=True,queue_arguments=queueargs if queueargs else None) consumer = Consumer(channel,task_queue,no_ack=False) consumer.qos(prefetch_count=1) consumer.register_callback(self.RequestCallBack) consumer.consume() while self.task_count: connection.drain_events() self.task_count-=1 connection.close()
class AMQPSocket(object): def __init__(self, broker_uri, exchange, routing_key): self.conn = Connection(broker_uri) self.is_logging = False self.exchange = exchange self.routing_key = routing_key def sendall(self, data): if self.is_logging: return self.is_logging = True try: channel = self.conn.default_channel msg = channel.prepare_message(data) channel.basic_publish(msg, self.exchange, self.routing_key) finally: self.is_logging = False def close(self): self.conn.close()
def send(self): try: # Connection conn = Connection(self.broker) # Channel channel = conn.channel() # Exchange task_exchange = Exchange(self._exchange_name, type=self._queue_type) # Queues if self._queue_name: queue = Queue(name=self._queue_name, channel=channel, exchange=task_exchange, routing_key=self._routing_key) queue.declare() # Producer producer = Producer(exchange=task_exchange, channel=channel, routing_key=self._routing_key) # Send message for message in self._msgs: serialized_message = json.dumps(message, ensure_ascii=False) producer.publish(serialized_message) conn.close() except Exception, e: self.log.error( u'QueueManagerError - Error on sending objects from queue.') self.log.debug(e) raise Exception( 'QueueManagerError - Error on sending objects to queue.')
class SimpleBase: def Queue(self, name, *args, **kwargs): q = name if not isinstance(q, Queue): q = self.__class__.__name__ if name: q = f'{q}.{name}' return self._Queue(q, *args, **kwargs) def _Queue(self, *args, **kwargs): raise NotImplementedError() def setup(self): self.connection = Connection(transport='memory') self.connection.default_channel.exchange_declare('amq.direct') def teardown(self): self.connection.close() self.connection = None def test_produce__consume(self): q = self.Queue('test_produce__consume', no_ack=True) q.put({'hello': 'Simple'}) assert q.get(timeout=1).payload == {'hello': 'Simple'} with pytest.raises(q.Empty): q.get(timeout=0.1) def test_produce__basic_get(self): q = self.Queue('test_produce__basic_get', no_ack=True) q.put({'hello': 'SimpleSync'}) assert q.get_nowait().payload == {'hello': 'SimpleSync'} with pytest.raises(q.Empty): q.get_nowait() q.put({'hello': 'SimpleSync'}) assert q.get(block=False).payload == {'hello': 'SimpleSync'} with pytest.raises(q.Empty): q.get(block=False) def test_get_nowait_accept(self): q = self.Queue('test_accept', serializer='pickle', accept=['json']) q.put({'hello': 'SimpleSync'}) with pytest.raises(ContentDisallowed): q.get_nowait().payload q = self.Queue('test_accept1', serializer='json', accept=[]) q.put({'hello': 'SimpleSync'}) with pytest.raises(ContentDisallowed): q.get_nowait().payload q = self.Queue('test_accept2', serializer='pickle', accept=['json', 'pickle']) q.put({'hello': 'SimpleSync'}) assert q.get_nowait().payload == {'hello': 'SimpleSync'} def test_get_accept(self): q = self.Queue('test_accept', serializer='pickle', accept=['json']) q.put({'hello': 'SimpleSync'}) with pytest.raises(ContentDisallowed): q.get().payload q = self.Queue('test_accept1', serializer='pickle', accept=[]) q.put({'hello': 'SimpleSync'}) with pytest.raises(ContentDisallowed): q.get().payload q = self.Queue('test_accept2', serializer='pickle', accept=['json', 'pickle']) q.put({'hello': 'SimpleSync'}) assert q.get().payload == {'hello': 'SimpleSync'} def test_clear(self): q = self.Queue('test_clear', no_ack=True) for i in range(10): q.put({'hello': 'SimplePurge%d' % (i, )}) assert q.clear() == 10 def test_enter_exit(self): q = self.Queue('test_enter_exit') q.close = Mock() with q as x: assert x is q q.close.assert_called_with() def test_qsize(self): q = self.Queue('test_clear', no_ack=True) for i in range(10): q.put({'hello': 'SimplePurge%d' % (i, )}) assert q.qsize() == 10 assert len(q) == 10 def test_autoclose(self): channel = self.connection.channel() q = self.Queue('test_autoclose', no_ack=True, channel=channel) q.close() def test_custom_Queue(self): n = self.__class__.__name__ exchange = Exchange(f'{n}-test.custom.Queue') queue = Queue(f'{n}-test.custom.Queue', exchange, 'my.routing.key') q = self.Queue(queue) assert q.consumer.queues[0] == queue q.close() def test_bool(self): q = self.Queue('test_nonzero') assert q
def test_close_ResponseError(self): c = Connection(transport=Transport).channel() c.client.bgsave_raises_ResponseError = True c.close()
def test_db_port(self): c1 = Connection(port=None, transport=Transport).channel() c1.close() c2 = Connection(port=9999, transport=Transport).channel() c2.close()
class PollingQueueConsumer(object): """ Implements a minimum interface of the :class:`~messaging.QueueConsumer`. Instead of processing messages in a separate thread it provides a polling method to block until a message with the same correlation ID of the RPC-proxy call arrives. """ def __init__(self, timeout=None): self.timeout = timeout def _setup_queue(self): self.channel = self.connection.channel() # queue.bind returns a bound copy self.queue = self.queue.bind(self.channel) maybe_declare(self.queue, self.channel) def register_provider(self, provider): self.provider = provider amqp_uri = provider.container.config[AMQP_URI_CONFIG_KEY] verify_amqp_uri(amqp_uri) self.connection = Connection(amqp_uri) self.queue = provider.queue self._setup_queue() message_iterator = self._poll_messages() message_iterator.send(None) # start generator self.get_message = message_iterator.send def unregister_provider(self, provider): self.connection.close() def ack_message(self, msg): msg.ack() def _poll_messages(self): replies = {} correlation_id = yield while True: try: for body, msg in queue_iterator( self.queue, timeout=self.timeout ): msg_correlation_id = msg.properties.get('correlation_id') if msg_correlation_id not in self.provider._reply_events: _logger.debug( "Unknown correlation id: %s", msg_correlation_id) continue replies[msg_correlation_id] = (body, msg) # Here, and every time we re-enter this coroutine (at the # `yield` statement below) we check if we already have the # data for the new correlation_id before polling for new # messages. while correlation_id in replies: body, msg = replies.pop(correlation_id) self.provider.handle_message(body, msg) correlation_id = yield except RpcTimeout as exc: event = self.provider._reply_events.pop(correlation_id) event.send_exception(exc) # timeout is implemented using socket timeout, so when it # fires the connection is closed, causing the reply queue # to be deleted self._setup_queue() correlation_id = yield except ConnectionError as exc: for event in self.provider._reply_events.values(): rpc_connection_error = RpcConnectionError( 'Disconnected while waiting for reply: %s', exc) event.send_exception(rpc_connection_error) self.provider._reply_events.clear() # In case this was a temporary error, attempt to reconnect. If # we fail, the connection error will bubble. self._setup_queue() correlation_id = yield except KeyboardInterrupt as exc: event = self.provider._reply_events.pop(correlation_id) event.send_exception(exc) # exception may have killed the connection self._setup_queue() correlation_id = yield
from kombu import Connection,Exchange, Consumer, Queue Queue_User="******" Queue_PassWord="******" Queue_Server='127.0.0.1' Queue_Port=5672 Queue_Path='/websocketserver' def callback(body, message): print body connection = Connection(hostname=Queue_Server,port=Queue_Port,userid=Queue_User,password=Queue_PassWord,virtual_host=Queue_Path) channel = connection.channel() smsExchange=Exchange("sys.sms",type='topic',channel=channel,durable=True,delivery_mode=2) task_queue = Queue('test_recv',exchange=smsExchange,routing_key='sms.code',durable=False,channel=channel) consumer = Consumer(channel,task_queue,no_ack=True,callbacks=[callback]) consumer.qos(prefetch_count=1) consumer.consume() while True: connection.drain_events() connection.close()
class test_Redis: def setup(self): self.connection = Connection(transport=Transport) self.exchange = Exchange('test_Redis', type='direct') self.queue = Queue('test_Redis', self.exchange, 'test_Redis') def teardown(self): self.connection.close() def test_publish__get(self): channel = self.connection.channel() producer = Producer(channel, self.exchange, routing_key='test_Redis') self.queue(channel).declare() producer.publish({'hello': 'world'}) assert self.queue(channel).get().payload == {'hello': 'world'} assert self.queue(channel).get() is None assert self.queue(channel).get() is None assert self.queue(channel).get() is None def test_publish__consume(self): connection = Connection(transport=Transport) channel = connection.channel() producer = Producer(channel, self.exchange, routing_key='test_Redis') consumer = Consumer(channel, queues=[self.queue]) producer.publish({'hello2': 'world2'}) _received = [] def callback(message_data, message): _received.append(message_data) message.ack() consumer.register_callback(callback) consumer.consume() assert channel in channel.connection.cycle._channels try: connection.drain_events(timeout=1) assert _received with pytest.raises(socket.timeout): connection.drain_events(timeout=0.01) finally: channel.close() def test_purge(self): channel = self.connection.channel() producer = Producer(channel, self.exchange, routing_key='test_Redis') self.queue(channel).declare() for i in range(10): producer.publish({'hello': 'world-%s' % (i,)}) assert channel._size('test_Redis') == 10 assert self.queue(channel).purge() == 10 channel.close() def test_db_values(self): Connection(virtual_host=1, transport=Transport).channel() Connection(virtual_host='1', transport=Transport).channel() Connection(virtual_host='/1', transport=Transport).channel() with pytest.raises(Exception): Connection('redis:///foo').channel() def test_db_port(self): c1 = Connection(port=None, transport=Transport).channel() c1.close() c2 = Connection(port=9999, transport=Transport).channel() c2.close() def test_close_poller_not_active(self): c = Connection(transport=Transport).channel() cycle = c.connection.cycle c.client.connection c.close() assert c not in cycle._channels def test_close_ResponseError(self): c = Connection(transport=Transport).channel() c.client.bgsave_raises_ResponseError = True c.close() def test_close_disconnects(self): c = Connection(transport=Transport).channel() conn1 = c.client.connection conn2 = c.subclient.connection c.close() assert conn1.disconnected assert conn2.disconnected def test_get__Empty(self): channel = self.connection.channel() with pytest.raises(Empty): channel._get('does-not-exist') channel.close() def test_get_client(self): with mock.module_exists(*_redis_modules()): conn = Connection(transport=Transport) chan = conn.channel() assert chan.Client assert chan.ResponseError assert conn.transport.connection_errors assert conn.transport.channel_errors def test_check_at_least_we_try_to_connect_and_fail(self): import redis connection = Connection('redis://localhost:65534/') with pytest.raises(redis.exceptions.ConnectionError): chan = connection.channel() chan._size('some_queue')
class PollingQueueConsumer(object): """ Implements a minimum interface of the :class:`~messaging.QueueConsumer`. Instead of processing messages in a separate thread it provides a polling method to block until a message with the same correlation ID of the RPC-proxy call arrives. """ consumer = None def __init__(self, timeout=None): self.stopped = True self.timeout = timeout self.replies = {} def _setup_consumer(self): if self.consumer is not None: try: self.consumer.cancel() except (socket.error, IOError): # pragma: no cover # On some systems (e.g. os x) we need to explicitly cancel the # consumer here. However, e.g. on ubuntu 14.04, the # disconnection has already closed the socket. We try to # cancel, and ignore any socket errors. # If the socket has been closed, an IOError is raised, ignore # it and assume the consumer is already cancelled. pass channel = self.connection.channel() # queue.bind returns a bound copy self.queue = self.queue.bind(channel) maybe_declare(self.queue, channel) consumer = Consumer( channel, queues=[self.queue], accept=self.accept, no_ack=False) consumer.callbacks = [self.on_message] consumer.consume() self.consumer = consumer def register_provider(self, provider): self.provider = provider self.serializer, self.accept = serialization.setup( provider.container.config) amqp_uri = provider.container.config[AMQP_URI_CONFIG_KEY] ssl = provider.container.config.get(AMQP_SSL_CONFIG_KEY) self.connection = Connection(amqp_uri, ssl=ssl) self.queue = provider.queue self._setup_consumer() self.stopped = False def unregister_provider(self, provider): self.connection.close() self.stopped = True def ack_message(self, msg): msg.ack() def on_message(self, body, message): msg_correlation_id = message.properties.get('correlation_id') if msg_correlation_id not in self.provider._reply_events: _logger.debug( "Unknown correlation id: %s", msg_correlation_id) self.replies[msg_correlation_id] = (body, message) def get_message(self, correlation_id): try: while correlation_id not in self.replies: self.consumer.connection.drain_events( timeout=self.timeout ) body, message = self.replies.pop(correlation_id) self.provider.handle_message(body, message) except socket.timeout: # TODO: this conflates an rpc timeout with a socket read timeout. # a better rpc proxy implementation would recover from a socket # timeout if the rpc timeout had not yet been reached timeout_error = RpcTimeout(self.timeout) event = self.provider._reply_events.pop(correlation_id) event.send_exception(timeout_error) # timeout is implemented using socket timeout, so when it # fires the connection is closed and must be re-established self._setup_consumer() except (IOError, ConnectionError) as exc: # in case this was a temporary error, attempt to reconnect # and try again. if we fail to reconnect, the error will bubble self._setup_consumer() self.get_message(correlation_id) except KeyboardInterrupt as exc: event = self.provider._reply_events.pop(correlation_id) event.send_exception(exc) # exception may have killed the connection self._setup_consumer()
class test_Redis(Case): def setup(self): self.connection = Connection(transport=Transport) self.exchange = Exchange('test_Redis', type='direct') self.queue = Queue('test_Redis', self.exchange, 'test_Redis') def teardown(self): self.connection.close() def test_publish__get(self): channel = self.connection.channel() producer = Producer(channel, self.exchange, routing_key='test_Redis') self.queue(channel).declare() producer.publish({'hello': 'world'}) self.assertDictEqual( self.queue(channel).get().payload, {'hello': 'world'}) self.assertIsNone(self.queue(channel).get()) self.assertIsNone(self.queue(channel).get()) self.assertIsNone(self.queue(channel).get()) def test_publish__consume(self): connection = Connection(transport=Transport) channel = connection.channel() producer = Producer(channel, self.exchange, routing_key='test_Redis') consumer = Consumer(channel, queues=[self.queue]) producer.publish({'hello2': 'world2'}) _received = [] def callback(message_data, message): _received.append(message_data) message.ack() consumer.register_callback(callback) consumer.consume() self.assertIn(channel, channel.connection.cycle._channels) try: connection.drain_events(timeout=1) self.assertTrue(_received) with self.assertRaises(socket.timeout): connection.drain_events(timeout=0.01) finally: channel.close() def test_purge(self): channel = self.connection.channel() producer = Producer(channel, self.exchange, routing_key='test_Redis') self.queue(channel).declare() for i in range(10): producer.publish({'hello': 'world-%s' % (i, )}) self.assertEqual(channel._size('test_Redis'), 10) self.assertEqual(self.queue(channel).purge(), 10) channel.close() def test_db_values(self): Connection(virtual_host=1, transport=Transport).channel() Connection(virtual_host='1', transport=Transport).channel() Connection(virtual_host='/1', transport=Transport).channel() with self.assertRaises(Exception): Connection('redis:///foo').channel() def test_db_port(self): c1 = Connection(port=None, transport=Transport).channel() c1.close() c2 = Connection(port=9999, transport=Transport).channel() c2.close() def test_close_poller_not_active(self): c = Connection(transport=Transport).channel() cycle = c.connection.cycle c.client.connection c.close() self.assertNotIn(c, cycle._channels) def test_close_ResponseError(self): c = Connection(transport=Transport).channel() c.client.bgsave_raises_ResponseError = True c.close() def test_close_disconnects(self): c = Connection(transport=Transport).channel() conn1 = c.client.connection conn2 = c.subclient.connection c.close() self.assertTrue(conn1.disconnected) self.assertTrue(conn2.disconnected) def test_get__Empty(self): channel = self.connection.channel() with self.assertRaises(Empty): channel._get('does-not-exist') channel.close() def test_get_client(self): with mock.module_exists(*_redis_modules()): conn = Connection(transport=Transport) chan = conn.channel() self.assertTrue(chan.Client) self.assertTrue(chan.ResponseError) self.assertTrue(conn.transport.connection_errors) self.assertTrue(conn.transport.channel_errors) def test_check_at_least_we_try_to_connect_and_fail(self): import redis connection = Connection('redis://localhost:65534/') with self.assertRaises(redis.exceptions.ConnectionError): chan = connection.channel() chan._size('some_queue')
def test_close_poller_not_active(self): c = Connection(transport=Transport).channel() cycle = c.connection.cycle c.client.connection c.close() self.assertNotIn(c, cycle._channels)
class SimpleBase(Case): abstract = True def Queue(self, name, *args, **kwargs): q = name if not isinstance(q, Queue): q = self.__class__.__name__ if name: q = '%s.%s' % (q, name) return self._Queue(q, *args, **kwargs) def _Queue(self, *args, **kwargs): raise NotImplementedError() def setup(self): if not self.abstract: self.connection = Connection(transport='memory') with self.connection.channel() as channel: channel.exchange_declare('amq.direct') self.q = self.Queue(None, no_ack=True) def teardown(self): if not self.abstract: self.q.close() self.connection.close() def test_produce__consume(self): if self.abstract: return q = self.Queue('test_produce__consume', no_ack=True) q.put({'hello': 'Simple'}) self.assertEqual(q.get(timeout=1).payload, {'hello': 'Simple'}) with self.assertRaises(q.Empty): q.get(timeout=0.1) def test_produce__basic_get(self): if self.abstract: return q = self.Queue('test_produce__basic_get', no_ack=True) q.put({'hello': 'SimpleSync'}) self.assertEqual(q.get_nowait().payload, {'hello': 'SimpleSync'}) with self.assertRaises(q.Empty): q.get_nowait() q.put({'hello': 'SimpleSync'}) self.assertEqual(q.get(block=False).payload, {'hello': 'SimpleSync'}) with self.assertRaises(q.Empty): q.get(block=False) def test_clear(self): if self.abstract: return q = self.Queue('test_clear', no_ack=True) for i in range(10): q.put({'hello': 'SimplePurge%d' % (i,)}) self.assertEqual(q.clear(), 10) def test_enter_exit(self): if self.abstract: return q = self.Queue('test_enter_exit') q.close = Mock() self.assertIs(q.__enter__(), q) q.__exit__() q.close.assert_called_with() def test_qsize(self): if self.abstract: return q = self.Queue('test_clear', no_ack=True) for i in range(10): q.put({'hello': 'SimplePurge%d' % (i,)}) self.assertEqual(q.qsize(), 10) self.assertEqual(len(q), 10) def test_autoclose(self): if self.abstract: return channel = self.connection.channel() q = self.Queue('test_autoclose', no_ack=True, channel=channel) q.close() def test_custom_Queue(self): if self.abstract: return n = self.__class__.__name__ exchange = Exchange('%s-test.custom.Queue' % (n,)) queue = Queue('%s-test.custom.Queue' % (n,), exchange, 'my.routing.key') q = self.Queue(queue) self.assertEqual(q.consumer.queues[0], queue) q.close() def test_bool(self): if self.abstract: return q = self.Queue('test_nonzero') self.assertTrue(q)
class PollingQueueConsumer(object): """ Implements a minimum interface of the :class:`~messaging.QueueConsumer`. Instead of processing messages in a separate thread it provides a polling method to block until a message with the same correlation ID of the RPC-proxy call arrives. """ consumer = None def __init__(self, timeout=None): self.stopped = False self.timeout = timeout self.replies = {} def _setup_consumer(self): if self.consumer is not None: try: self.consumer.cancel() except (socket.error, IOError): # pragma: no cover # On some systems (e.g. os x) we need to explicitly cancel the # consumer here. However, e.g. on ubuntu 14.04, the # disconnection has already closed the socket. We try to # cancel, and ignore any socket errors. # If the socket has been closed, an IOError is raised, ignore # it and assume the consumer is already cancelled. pass channel = self.connection.channel() # queue.bind returns a bound copy self.queue = self.queue.bind(channel) maybe_declare(self.queue, channel) consumer = Consumer(channel, queues=[self.queue], accept=self.accept, no_ack=False) consumer.callbacks = [self.on_message] consumer.consume() self.consumer = consumer def register_provider(self, provider): self.provider = provider self.serializer = provider.container.config.get( SERIALIZER_CONFIG_KEY, DEFAULT_SERIALIZER) self.accept = [self.serializer] amqp_uri = provider.container.config[AMQP_URI_CONFIG_KEY] verify_amqp_uri(amqp_uri) self.connection = Connection(amqp_uri) self.queue = provider.queue self._setup_consumer() def unregister_provider(self, provider): self.connection.close() self.stopped = True def ack_message(self, msg): msg.ack() def on_message(self, body, message): msg_correlation_id = message.properties.get('correlation_id') if msg_correlation_id not in self.provider._reply_events: _logger.debug("Unknown correlation id: %s", msg_correlation_id) self.replies[msg_correlation_id] = (body, message) def get_message(self, correlation_id): try: while correlation_id not in self.replies: self.consumer.channel.connection.client.drain_events( timeout=self.timeout) body, message = self.replies.pop(correlation_id) self.provider.handle_message(body, message) except socket.timeout: timeout_error = RpcTimeout(self.timeout) event = self.provider._reply_events.pop(correlation_id) event.send_exception(timeout_error) # timeout is implemented using socket timeout, so when it # fires the connection is closed, causing the reply queue # to be deleted self._setup_consumer() except (IOError, ConnectionError) as exc: for event in self.provider._reply_events.values(): rpc_connection_error = RpcConnectionError( 'Disconnected while waiting for reply: %s', exc) event.send_exception(rpc_connection_error) self.provider._reply_events.clear() # In case this was a temporary error, attempt to reconnect. If # we fail, the connection error will bubble. self._setup_consumer() except KeyboardInterrupt as exc: event = self.provider._reply_events.pop(correlation_id) event.send_exception(exc) # exception may have killed the connection self._setup_consumer()
""" print(("callback received msg routing_key={} " "body={}").format(message.delivery_info["routing_key"], body)) message.ack() # end of handle_message print("creating consumer") consumer = Consumer(connection, queues=queues, auto_declare=True, callbacks=[handle_message], accept=["json"]) not_done = True time_to_wait = 0.1 while not_done: not_done = True try: consumer.consume() connection.drain_events(timeout=time_to_wait) success = True except socket.timeout as t: connection.heartbeat_check() time.sleep(0.1) # while not done consuming print("shutting down") connection.close()
def do_produce(use_predef_msgs=False): conn = Connection(amqp_hosts, failover_strategy='round-robin') conn.ensure_connection(errback=on_ens_conn_err_cb) conn.connect() # bind xchg and Qs to the rmq connection, declare primary exchange bound_priTopicXchg = priTopicXchg(conn) bound_priTopicXchg.declare() """ # and all explicit Qs in rmq for i in priTopicExplicitQs: _bound_q = i(conn) try: _bound_q.declare() except Exception as e: print("unable to declare, exception type [%s], [%s]" % (type(e), repr(e))) _bound_q.delete() _bound_q.declare() """ producer = conn.Producer(serializer='json') if use_predef_msgs: for msg in msgs: if msg['topic'] == PRI_TOPIC_NAME: xchg = priTopicXchg qs = priTopicExplicitQs else: print("unknown topic [%s]" % msg['topic']) print("sending messages[%s], xchg[%s], topic[%s], routing[%s]" % (msg['msg'], xchg, msg['topic'], msg['routing'])) producer.publish( msg['msg'], exchange=xchg, declare=[xchg] + qs # let declaration of the exchange and the explicit Qs #,compression='zlib' , compression='bzip2', routing_key=msg['routing'] # apparently expiration per message dont really work in kombu 3.0.32 #exipration=10 # 60*15 # 15 minutes #,properties=properties #,x-message-ttl=1000 ) print("all predefined messages sent") try: while True: var = input('') print(var.split('route')) tmp = [i.strip(' ') for i in var.split('route')] if len(tmp) != 2: print( "invalid msg [%s], need to be of form: [aaa bbb route x.y.z]" % var) continue try: bound_priTopicXchg.publish(bound_priTopicXchg.Message(tmp[0]), routing_key=tmp[1]) except conn.connection_errors + conn.channel_errors: print("connection [%s] went down , reconnect to the next one" % conn.info()) conn.close() conn.ensure_connection(errback=on_ens_conn_err_cb) bound_priTopicXchg = priTopicXchg(conn) bound_priTopicXchg.publish(bound_priTopicXchg.Message(tmp[0]), routing_key=tmp[1]) except (EOFError, KeyboardInterrupt): print("done")
def do_produce(use_predef_msgs=False): conn = Connection(amqp_hosts, failover_strategy='round-robin' ) conn.ensure_connection(errback=on_ens_conn_err_cb) conn.connect() # bind xchg and Qs to the rmq connection, declare primary exchange bound_priTopicXchg = priTopicXchg(conn) bound_priTopicXchg.declare() """ # and all explicit Qs in rmq for i in priTopicExplicitQs: _bound_q = i(conn) try: _bound_q.declare() except Exception as e: print("unable to declare, exception type [%s], [%s]" % (type(e), repr(e))) _bound_q.delete() _bound_q.declare() """ producer = conn.Producer(serializer='json') if use_predef_msgs: for msg in msgs: if msg['topic'] == PRI_TOPIC_NAME: xchg = priTopicXchg qs = priTopicExplicitQs else: print("unknown topic [%s]" % msg['topic']) print("sending messages[%s], xchg[%s], topic[%s], routing[%s]" % (msg['msg'], xchg, msg['topic'], msg['routing'])) producer.publish( msg['msg'] , exchange=xchg , declare=[xchg] + qs # let declaration of the exchange and the explicit Qs #,compression='zlib' , compression='bzip2' , routing_key=msg['routing'] # apparently expiration per message dont really work in kombu 3.0.32 #exipration=10 # 60*15 # 15 minutes #,properties=properties #,x-message-ttl=1000 ) print("all predefined messages sent") try: while True: var = input('') print(var.split('route')) tmp = [i.strip(' ') for i in var.split('route')] if len(tmp) != 2: print("invalid msg [%s], need to be of form: [aaa bbb route x.y.z]" % var) continue try: bound_priTopicXchg.publish( bound_priTopicXchg.Message(tmp[0]), routing_key=tmp[1] ) except conn.connection_errors + conn.channel_errors: print("connection [%s] went down , reconnect to the next one" % conn.info()) conn.close() conn.ensure_connection(errback=on_ens_conn_err_cb) bound_priTopicXchg = priTopicXchg(conn) bound_priTopicXchg.publish( bound_priTopicXchg.Message(tmp[0]), routing_key=tmp[1] ) except (EOFError, KeyboardInterrupt): print("done")
class TestVerifyTask(unittest.TestCase): def setUp(self): # Open connection to RabbitMQ self.conn = Connection(config['broker_url']) self.channel = self.conn.channel() # Declare Verify queue q = config['queues']['verify'] self.verifyQ = Queue(q['name'], channel=self.channel, exchange=Exchange(q['name']), routing_key=q['name'], max_priority=q['max_task_priority']) self.verifyQ.declare() # Declare API queue q = config['queues']['api'] self.apiQ = Queue(q['name'], channel=self.channel, exchange=Exchange(q['name']), routing_key=q['name'], max_priority=q['max_task_priority']) self.apiQ.declare() def tearDown(self): # Delete Verify queue self.apiQ.delete() # Delete API queue self.verifyQ.delete() # Close connection self.conn.close() def test_verify(self): data = [{ 'filename': '/var/store/15525119098910.pdf', 'algorithm': 'md5', 'checksum': 'ec4e3b91d2e03fdb17db55ff46da43b2' }, { 'filename': '/var/store/15525119098910.pdf', 'algorithm': 'sha512', 'checksum': 'bc803d8abccf18d89765d6ae9fb7d490ad07f57a48e4987acc1' '73af4e65f143a4d215ffb59e9eebeb03849baab5a6e016e2806' 'a2cd0e84b14c778bdb84afbbf4' }] for i in data: self.assertTrue(path.exists(i['filename'])) # Queues cleanup self.verifyQ.purge() self.apiQ.purge() # Random DFO ID dfo_id = randint(1, 2147483647) # Send task q = config['queues']['verify'] producer = self.conn.Producer() producer.publish( routing_key=q['name'], body=[[dfo_id, i['filename'], 'test', i['algorithm']], {}, {}], headers={ 'task': 'verify_dfo', 'id': str(uuid.uuid1()) }) # Wait for result message for max 5 seconds msg = None wait = 0 while wait <= 5 and msg is None: msg = self.apiQ.get(no_ack=False) if msg is None: sleep(1) wait += 1 # Tests self.assertFalse(msg is None) self.assertTrue(msg.payload[0][0] == dfo_id) self.assertTrue(msg.payload[0][1] == i['algorithm']) self.assertTrue(msg.payload[0][2] == i['checksum'])
class rpc(object): def __init__(self): self.exchange = None self.connection = None self.connection_pool = None self.queue = {} self.consumer = {} self.producer = {} self.callbacks = {} def init_exchange(self, name = 'AllChat', type = 'direct', channel = None, durable = True, delivery_mode = 2): if self.exchange is None: self.exchange = Exchange(name, type, channel = channel, durable = durable, delivery_mode = delivery_mode) return self.exchange def init_connection(self,url,ssl = False): if self.connection is None: self.connection = Connection(url, ssl = ssl) try: self.connection.connect() except Exception as e: raise e else: self.connection.close() return self.connection def get_exchange(self): if self.exchange is None: raise Exception("No exchange. Please invoke init_exchange firstly") else: return self.exchange def create_connection(self): if self.connection_pool is None: self.connection_pool = self.connection.Pool(allchat.app.config['RPC_POOL_NUM']) return self.connection_pool.acquire() def release_connection(self, conn): try: if conn is not None: conn.release() except Exception as e: raise e def close_connection(self, conn): try: if conn is not None: conn.close() except Exception as e: raise e # def create_channel(self, conn): # if conn is not None: # return conn.channel() # else: # return None # # def release_channel(self, channel): # try: # if channel is not None: # channel.close() # except Exception as e: # raise e # def create_consumer(self, name, channel , queues = None, callbacks = None): # if name in self.consumer: # self.consumer[name].revive(channel) # else: # if not any([queues, callbacks]): # raise Exception("queues and callbacks can't be None") # if isinstance(callbacks, list): # self.consumer[name] = Consumer(channel, queues, callbacks) # else: # self.consumer[name] = Consumer(channel, queues) # self.consumer[name].register_callback(callbacks) # self.consumer[name].consume() # return self.consumer[name] def create_consumer(self, name, channel, queues = None): if name in self.consumer: try: if self.callbacks[name] != self.consumer[name].callbacks: self.consumer[name].callbacks = self.callbacks[name] except KeyError as e: raise Exception("Please invoke register_callbacks before") self.consumer[name].revive(channel) else: if not queues: queues = self.create_queue(name, name) try: self.callbacks[name] except KeyError as e: self.register_callbacks(name, [rpc_callbacks()]) #raise Exception("Please invoke register_callbacks before") finally: self.consumer[name] = Consumer(channel, queues, callbacks = self.callbacks[name]) #self.consumer[name].consume() return self.consumer[name] def release_consumer(self, name): try: if name in self.consumer: self.consumer[name].cancel() except Exception as e: raise e def create_producer(self, name, channel): if name in self.producer: self.producer[name].revive(channel) else: self.producer[name] = Producer(channel, self.get_exchange()) return self.producer[name] def release_producer(self, name): try: if name in self.producer: self.producer[name].close() except Exception as e: raise e def create_queue(self, name, routing_key, durable = True): if name not in self.queue: self.queue[name] = Queue(name, self.get_exchange(), routing_key, durable = durable) return self.queue[name] def del_queue(self, name): try: if name in self.callbacks: del self.callbacks[name] if name in self.consumer: try: self.consumer[name].close() except Exception as e: pass del self.consumer[name] if name in self.producer: try: self.producer[name].close() except Exception as e: pass del self.producer[name] if name in self.queue: tmp = self.create_connection() #Kombu中删除Queue,必须保证Queue是绑定在一个channel上的,否则删除失败 self.queue[name].maybe_bind(tmp) self.queue[name].delete() self.close_connection(tmp) del self.queue[name] except Exception as e: raise e def register_callbacks(self, name, callbacks): if isinstance(callbacks, list): self.callbacks[name] = callbacks else: raise Exception("The parameter callbacks should be a list") def extend_callbacks(self, name, callbacks): if isinstance(callbacks, list): if name in self.callbacks: self.callbacks[name].extend(callbacks) else: raise Exception("The {account} callbacks don't exist".format(account = name)) else: raise Exception("The parameter callbacks should be a list")
class PollingQueueConsumer(object): """ Implements a minimum interface of the :class:`~messaging.QueueConsumer`. Instead of processing messages in a separate thread it provides a polling method to block until a message with the same correlation ID of the RPC-proxy call arrives. """ consumer = None def __init__(self, timeout=None): self.stopped = True self.timeout = timeout self.replies = {} def _setup_consumer(self): if self.consumer is not None: try: self.consumer.cancel() except (socket.error, IOError): # pragma: no cover # On some systems (e.g. os x) we need to explicitly cancel the # consumer here. However, e.g. on ubuntu 14.04, the # disconnection has already closed the socket. We try to # cancel, and ignore any socket errors. # If the socket has been closed, an IOError is raised, ignore # it and assume the consumer is already cancelled. pass channel = self.connection.channel() # queue.bind returns a bound copy self.queue = self.queue.bind(channel) maybe_declare(self.queue, channel) consumer = Consumer(channel, queues=[self.queue], accept=self.accept, no_ack=False) consumer.callbacks = [self.on_message] consumer.consume() self.consumer = consumer def register_provider(self, provider): self.provider = provider self.serializer, self.accept = serialization.setup( provider.container.config) amqp_uri = provider.container.config[AMQP_URI_CONFIG_KEY] ssl = provider.container.config.get(AMQP_SSL_CONFIG_KEY) verify_amqp_uri(amqp_uri, ssl=ssl) self.connection = Connection(amqp_uri, ssl=ssl) self.queue = provider.queue self._setup_consumer() self.stopped = False def unregister_provider(self, provider): self.connection.close() self.stopped = True def ack_message(self, msg): msg.ack() def on_message(self, body, message): msg_correlation_id = message.properties.get('correlation_id') if msg_correlation_id not in self.provider._reply_events: _logger.debug("Unknown correlation id: %s", msg_correlation_id) self.replies[msg_correlation_id] = (body, message) def get_message(self, correlation_id): try: while correlation_id not in self.replies: self.consumer.connection.drain_events(timeout=self.timeout) body, message = self.replies.pop(correlation_id) self.provider.handle_message(body, message) except socket.timeout: # TODO: this conflates an rpc timeout with a socket read timeout. # a better rpc proxy implementation would recover from a socket # timeout if the rpc timeout had not yet been reached timeout_error = RpcTimeout(self.timeout) event = self.provider._reply_events.pop(correlation_id) event.send_exception(timeout_error) # timeout is implemented using socket timeout, so when it # fires the connection is closed and must be re-established self._setup_consumer() except (IOError, ConnectionError) as exc: # in case this was a temporary error, attempt to reconnect # and try again. if we fail to reconnect, the error will bubble self._setup_consumer() self.get_message(correlation_id) except KeyboardInterrupt as exc: event = self.provider._reply_events.pop(correlation_id) event.send_exception(exc) # exception may have killed the connection self._setup_consumer()
class StreamArchiver(LogMixin, NotificationMixin, StreamListener): def __init__(self, archives, verbosity=1, *args, **kwargs): super().__init__(*args, **kwargs) # A temporary storage for use in exception forensics self.raw_data = None # All archives in a stream belong to the same user self.user = archives[0].user self.verbosity = verbosity self.connection = Connection(settings.BROKER_URL) self.channels = [] for archive in archives: with Connection(settings.BROKER_URL) as connection: consumer = ArchiveConsumer(archive, connection, verbosity=self.verbosity) consumer.start() self.channels.append({ "archive": archive, "consumer": consumer, "queue": self.connection.SimpleQueue("archiver:{}".format( archive.pk)), }) def set_verbosity(self, verbosity): self.logger.info("Setting {} stream verbosity to {}".format( self.user, verbosity)) self.verbosity = verbosity for channel in self.channels: channel["consumer"].set_verbosity(self.verbosity) def on_data(self, raw_data): self.raw_data = raw_data return StreamListener.on_data(self, raw_data) def on_status(self, status): for channel in self.channels: archive = channel["archive"] queue = channel["queue"] query = archive.query.lower() if query in status.text.lower(): queue.put(status._json) elif hasattr(status, "retweeted_status"): if query in status.retweeted_status.text.lower(): queue.put(status._json) elif hasattr(status.retweeted_status, "quoted_status"): if query in status.retweeted_status.quoted_status[ "text"].lower(): # NOQA: E501 queue.put(status._json) elif hasattr(status, "quoted_status"): if query in status.quoted_status["text"].lower(): queue.put(status._json) def on_exception(self, exception): additional = "Source: {}".format(self.raw_data) self._alert("Collector exception [listener]", exception, additional) stderr.write("\n\nEXCEPTION:\n{}\n\nSource: {}\n".format( exception, additional)) self.close_log() return False def on_error(self, status_code): message = str(status_code) if status_code == 401: message = ( f"Twitter issued a 401 for {self.user}, so they've been " f"kicked.") self.user.status = User.STATUS_DISABLED self.user.save(update_fields=("status", )) self._alert("Collector Twitter error", message) stderr.write("ERROR: Twitter responded with {}".format(status_code)) self.close_log() return False def on_disconnect(self, notice): """ This is what happens if *Twitter* sends a disconnect, not if we disconnect from the stream ourselves. """ self._alert("Collector disconnect", str(notice)) stderr.write("\n\nTwitter disconnect: {}\n\n\n".format(notice)) self.close_log() return False def close_log(self): self.connection.close() self.connection.release() # Set `should_stop` which queues the consumer to close everything up for channel in self.channels: channel["consumer"].should_stop = True # Now wait until the consumer has confirmed that it's finished for channel in self.channels: while not channel["consumer"].is_stopped: if self.verbosity > 1: self.logger.info("Waiting for {} to stop".format( channel["consumer"].archive)) sleep(0.1) stdout.flush() Archive.objects.filter( pk__in=[__["archive"].pk for __ in self.channels]).update(is_running=False)
class PollingQueueConsumer(object): """ Implements a minimum interface of the :class:`~messaging.QueueConsumer`. Instead of processing messages in a separate thread it provides a polling method to block until a message with the same correlation ID of the RPC-proxy call arrives. """ consumer = None def __init__(self, timeout=None): self.timeout = timeout self.replies = {} def _setup_consumer(self): if self.consumer is not None: try: self.consumer.cancel() except socket.error: # pragma: no cover # On some systems (e.g. os x) we need to explicitly cancel the # consumer here. However, e.g. on ubuntu 14.04, the # disconnection has already closed the socket. We try to # cancel, and ignore any socket errors. pass channel = self.connection.channel() # queue.bind returns a bound copy self.queue = self.queue.bind(channel) maybe_declare(self.queue, channel) consumer = Consumer( channel, queues=[self.queue], accept=self.accept, no_ack=False) consumer.callbacks = [self.on_message] consumer.consume() self.consumer = consumer def register_provider(self, provider): self.provider = provider self.serializer = provider.container.config.get( SERIALIZER_CONFIG_KEY, DEFAULT_SERIALIZER) self.accept = [self.serializer] amqp_uri = provider.container.config[AMQP_URI_CONFIG_KEY] verify_amqp_uri(amqp_uri) self.connection = Connection(amqp_uri) self.queue = provider.queue self._setup_consumer() def unregister_provider(self, provider): self.connection.close() def ack_message(self, msg): msg.ack() def on_message(self, body, message): msg_correlation_id = message.properties.get('correlation_id') if msg_correlation_id not in self.provider._reply_events: _logger.debug( "Unknown correlation id: %s", msg_correlation_id) self.replies[msg_correlation_id] = (body, message) def get_message(self, correlation_id): try: while correlation_id not in self.replies: self.consumer.channel.connection.client.drain_events( timeout=self.timeout ) body, message = self.replies.pop(correlation_id) self.provider.handle_message(body, message) except socket.timeout: timeout_error = RpcTimeout(self.timeout) event = self.provider._reply_events.pop(correlation_id) event.send_exception(timeout_error) # timeout is implemented using socket timeout, so when it # fires the connection is closed, causing the reply queue # to be deleted self._setup_consumer() except ConnectionError as exc: for event in self.provider._reply_events.values(): rpc_connection_error = RpcConnectionError( 'Disconnected while waiting for reply: %s', exc) event.send_exception(rpc_connection_error) self.provider._reply_events.clear() # In case this was a temporary error, attempt to reconnect. If # we fail, the connection error will bubble. self._setup_consumer() except KeyboardInterrupt as exc: event = self.provider._reply_events.pop(correlation_id) event.send_exception(exc) # exception may have killed the connection self._setup_consumer()
class PollingQueueConsumer(object): """ Implements a minimum interface of the :class:`~messaging.QueueConsumer`. Instead of processing messages in a separate thread it provides a polling method to block until a message with the same correlation ID of the RPC-proxy call arrives. """ def __init__(self, timeout=None): self.timeout = timeout def _setup_queue(self): self.channel = self.connection.channel() # queue.bind returns a bound copy self.queue = self.queue.bind(self.channel) maybe_declare(self.queue, self.channel) def register_provider(self, provider): self.provider = provider self.connection = Connection(provider.container.config['AMQP_URI']) self.queue = provider.queue self._setup_queue() message_iterator = self._poll_messages() message_iterator.send(None) # start generator self.get_message = message_iterator.send def unregister_provider(self, provider): self.connection.close() def ack_message(self, msg): msg.ack() def _poll_messages(self): replies = {} correlation_id = yield while True: try: for body, msg in queue_iterator( self.queue, timeout=self.timeout ): msg_correlation_id = msg.properties.get('correlation_id') if msg_correlation_id not in self.provider._reply_events: _logger.debug( "Unknown correlation id: %s", msg_correlation_id) continue replies[msg_correlation_id] = (body, msg) # Here, and every time we re-enter this coroutine (at the # `yield` statement below) we check if we already have the # data for the new correlation_id before polling for new # messages. while correlation_id in replies: body, msg = replies.pop(correlation_id) self.provider.handle_message(body, msg) correlation_id = yield except RpcTimeout as exc: event = self.provider._reply_events.pop(correlation_id) event.send_exception(exc) # timeout is implemented using socket timeout, so when it # fires the connection is closed, causing the reply queue # to be deleted self._setup_queue() correlation_id = yield except ConnectionError as exc: for event in self.provider._reply_events.values(): rpc_connection_error = RpcConnectionError( 'Disconnected while waiting for reply: %s', exc) event.send_exception(rpc_connection_error) self.provider._reply_events.clear() # In case this was a temporary error, attempt to reconnect. If # we fail, the connection error will bubble. self._setup_queue() correlation_id = yield
class test_Redis(TestCase): def setUp(self): self.connection = Connection(transport=Transport) self.exchange = Exchange('test_Redis', type='direct') self.queue = Queue('test_Redis', self.exchange, 'test_Redis') def tearDown(self): self.connection.close() def test_publish__get(self): channel = self.connection.channel() producer = Producer(channel, self.exchange, routing_key='test_Redis') self.queue(channel).declare() producer.publish({'hello': 'world'}) self.assertDictEqual(self.queue(channel).get().payload, {'hello': 'world'}) self.assertIsNone(self.queue(channel).get()) self.assertIsNone(self.queue(channel).get()) self.assertIsNone(self.queue(channel).get()) def test_publish__consume(self): connection = Connection(transport=Transport) channel = connection.channel() producer = Producer(channel, self.exchange, routing_key='test_Redis') consumer = Consumer(channel, self.queue) producer.publish({'hello2': 'world2'}) _received = [] def callback(message_data, message): _received.append(message_data) message.ack() consumer.register_callback(callback) consumer.consume() self.assertIn(channel, channel.connection.cycle._channels) try: connection.drain_events(timeout=1) self.assertTrue(_received) with self.assertRaises(socket.timeout): connection.drain_events(timeout=0.01) finally: channel.close() def test_purge(self): channel = self.connection.channel() producer = Producer(channel, self.exchange, routing_key='test_Redis') self.queue(channel).declare() for i in range(10): producer.publish({'hello': 'world-%s' % (i, )}) self.assertEqual(channel._size('test_Redis'), 10) self.assertEqual(self.queue(channel).purge(), 10) channel.close() def test_db_values(self): Connection(virtual_host=1, transport=Transport).channel() Connection(virtual_host='1', transport=Transport).channel() Connection(virtual_host='/1', transport=Transport).channel() with self.assertRaises(Exception): Connection('redis:///foo').channel() def test_db_port(self): c1 = Connection(port=None, transport=Transport).channel() c1.close() c2 = Connection(port=9999, transport=Transport).channel() c2.close() def test_close_poller_not_active(self): c = Connection(transport=Transport).channel() cycle = c.connection.cycle c.client.connection c.close() self.assertNotIn(c, cycle._channels) def test_close_ResponseError(self): c = Connection(transport=Transport).channel() c.client.bgsave_raises_ResponseError = True c.close() def test_close_disconnects(self): c = Connection(transport=Transport).channel() conn1 = c.client.connection conn2 = c.subclient.connection c.close() self.assertTrue(conn1.disconnected) self.assertTrue(conn2.disconnected) def test_get__Empty(self): channel = self.connection.channel() with self.assertRaises(Empty): channel._get('does-not-exist') channel.close() def test_get_client(self): myredis, exceptions = _redis_modules() @module_exists(myredis, exceptions) def _do_test(): conn = Connection(transport=Transport) chan = conn.channel() self.assertTrue(chan.Client) self.assertTrue(chan.ResponseError) self.assertTrue(conn.transport.connection_errors) self.assertTrue(conn.transport.channel_errors) _do_test()
'process_data':dataGenerate_wrapped, 'process_data_success':runPriceReport_wrapped, 'process_data_error':dataGenerate_error, 'run_report':runPriceReport_wrapped, 'run_report_success':report_email_wrapped, 'run_report_error':reportGenerate_error, 'report_email':report_email_wrapped, 'report_email_success':hello_task, 'report_email_error':hello_task } #""" if sys.argv[0].startswith("python"): option_index = 2 else: option_index = 1 option = sys.argv[option_index] if option == "produce": try:command = sys.argv[option_index+1] except IndexError:command=None run_producer(conn,taskMap,command) elif option == "consume": run_consumer(conn,taskMap) else: print "Unknown option '%s'; exiting ..." % option sys.exit(1) finally: conn.close() #"""
def do_consume(user_qs): print("about to listen no queues [%s]" % ", ".join(list(map(lambda x: x, user_qs)))) conn = Connection(amqp_hosts, failover_strategy='round-robin') # try to get a connection no matter what while True: try: conn.ensure_connection(errback=on_ens_conn_err_cb) conn.connect() except Exception as e: print("connection error failed on exception [%s]" % repr(e)) conn.release() continue if conn.connected: break else: print("connection failed in some way, retry") chan = conn.channel() global bound_cons_Q cons_Q = Queue(common.uuid(), queue_arguments=q_expires) bound_cons_Q = cons_Q(chan) bound_cons_Q.declare() # first bind to some control route bound_cons_Q.bind_to(priTopicXchg, routing_key='manage.#') for i in user_qs: if '*' in i or '#' in i: # create the wildcard route_key bind bound_cons_Q.bind_to(priTopicXchg, routing_key=i) else: for j in allQs: if i == j.as_dict()['name']: bound_cons_Q.bind_to(priTopicXchg, routing_key=j.as_dict()['routing_key']) cons = Consumer( chan, accept=['json'], queues=bound_cons_Q, callbacks=[on_msg_cb_1, on_msg_cb_2] ) print("queue set to [%s]" % bound_cons_Q.as_dict(recurse=True)) cons.consume() while True: try: conn.drain_events() except conn.connection_errors + conn.channel_errors as e: print("connection [%s] went down (error[%s]), trying to " "connect to the next one" % (conn.info(), repr(e))) conn.close() conn.release() conn.ensure_connection(errback=on_ens_conn_err_cb) conn.connect() chan = conn.channel() cons_Q.bind(chan) cons = Consumer( chan, accept=['json'], queues=bound_cons_Q, callbacks=[on_msg_cb_1, on_msg_cb_2] ) cons.consume()
class KomBuClient(ConsumerMixin): def __init__(self, hosts_conf, exchange_name='', exchange_type='', exchange_arguments=None, queue_name='', routing_key='', queue_arguments=None, callback=None, no_ack=True): self.hosts_conf = hosts_conf self.hosts = self.create_hosts() self.connection = Connection(self.hosts) self.task_exchange = Exchange(name=exchange_name, type=exchange_type, arguments=exchange_arguments) self.task_queues = [Queue(name=queue_name, exchange=self.task_exchange, routing_key=routing_key, queue_arguments=queue_arguments)] self.callback = callback self.no_ack = no_ack def queue_size(self, queue_list, queue_arguments=None): result = dict() for i in queue_list: queue_size = self.connection.SimpleQueue(name=Queue(name=i, queue_arguments=queue_arguments)).qsize() result[i] = queue_size return result def create_hosts(self): hosts_list = [] for i in self.hosts_conf: host = i.get('host', '127.0.0.1') port = i.get('port', '5672') username = i.get('username', 'guest') passwd = i.get('passwd', 'guest') auth = "amqp://{username}:{passwd}@{host}:{port}//".format(username=username, passwd=passwd, host=host, port=port) hosts_list.append(auth) return hosts_list def get_consumers(self, Consumer, channel): channel.basic_qos(prefetch_size=0, prefetch_count=1, a_global=False) return [Consumer(queues=self.task_queues, accept=['json', 'pickle', 'msgpack', 'yaml'], callbacks=[self.callback], no_ack=self.no_ack)] def process_task(self, body, message): print self.hosts print body, message.properties message.ack() def start(self): self.run() def send_task(self, payload, routing_key=None, priority=0, content_type=None, content_encoding=None, serializer=None, headers=None, compression=None, exchange=None, retry=False, retry_policy=None, declare=[], expiration=None): try: with producers[self.connection].acquire(block=True) as producer: producer.publish(payload, serializer=serializer, compression=compression, exchange=exchange, declare=declare, routing_key=routing_key, priority=priority, content_type=content_type, content_encoding=content_encoding, headers=headers, retry=retry, retry_policy=retry_policy, expiration=expiration) except Exception as error: return False, error return True, None def close(self): self.connection.close()
class test_Redis: def setup(self): self.connection = Connection(transport=Transport) self.exchange = Exchange('test_Redis', type='direct') self.queue = Queue('test_Redis', self.exchange, 'test_Redis') def teardown(self): self.connection.close() @mock.replace_module_value(redis.redis, 'VERSION', [3, 0, 0]) def test_publish__get_redispyv3(self): channel = self.connection.channel() producer = Producer(channel, self.exchange, routing_key='test_Redis') self.queue(channel).declare() producer.publish({'hello': 'world'}) assert self.queue(channel).get().payload == {'hello': 'world'} assert self.queue(channel).get() is None assert self.queue(channel).get() is None assert self.queue(channel).get() is None @mock.replace_module_value(redis.redis, 'VERSION', [2, 5, 10]) def test_publish__get_redispyv2(self): channel = self.connection.channel() producer = Producer(channel, self.exchange, routing_key='test_Redis') self.queue(channel).declare() producer.publish({'hello': 'world'}) assert self.queue(channel).get().payload == {'hello': 'world'} assert self.queue(channel).get() is None assert self.queue(channel).get() is None assert self.queue(channel).get() is None def test_publish__consume(self): connection = Connection(transport=Transport) channel = connection.channel() producer = Producer(channel, self.exchange, routing_key='test_Redis') consumer = Consumer(channel, queues=[self.queue]) producer.publish({'hello2': 'world2'}) _received = [] def callback(message_data, message): _received.append(message_data) message.ack() consumer.register_callback(callback) consumer.consume() assert channel in channel.connection.cycle._channels try: connection.drain_events(timeout=1) assert _received with pytest.raises(socket.timeout): connection.drain_events(timeout=0.01) finally: channel.close() def test_purge(self): channel = self.connection.channel() producer = Producer(channel, self.exchange, routing_key='test_Redis') self.queue(channel).declare() for i in range(10): producer.publish({'hello': f'world-{i}'}) assert channel._size('test_Redis') == 10 assert self.queue(channel).purge() == 10 channel.close() def test_db_values(self): Connection(virtual_host=1, transport=Transport).channel() Connection(virtual_host='1', transport=Transport).channel() Connection(virtual_host='/1', transport=Transport).channel() with pytest.raises(Exception): Connection('redis:///foo').channel() def test_db_port(self): c1 = Connection(port=None, transport=Transport).channel() c1.close() c2 = Connection(port=9999, transport=Transport).channel() c2.close() def test_close_poller_not_active(self): c = Connection(transport=Transport).channel() cycle = c.connection.cycle c.client.connection c.close() assert c not in cycle._channels def test_close_ResponseError(self): c = Connection(transport=Transport).channel() c.client.bgsave_raises_ResponseError = True c.close() def test_close_disconnects(self): c = Connection(transport=Transport).channel() conn1 = c.client.connection conn2 = c.subclient.connection c.close() assert conn1.disconnected assert conn2.disconnected def test_get__Empty(self): channel = self.connection.channel() with pytest.raises(Empty): channel._get('does-not-exist') channel.close() def test_get_client(self): with mock.module_exists(*_redis_modules()): conn = Connection(transport=Transport) chan = conn.channel() assert chan.Client assert chan.ResponseError assert conn.transport.connection_errors assert conn.transport.channel_errors def test_check_at_least_we_try_to_connect_and_fail(self): import redis connection = Connection('redis://localhost:65534/') with pytest.raises(redis.exceptions.ConnectionError): chan = connection.channel() chan._size('some_queue')
class KombuPublisher( AbstractPublisher, ): """ 使用kombu作为中间件,这个能直接一次性支持很多种小众中间件,但性能很差,除非是分布式函数调度框架没实现的中间件种类用户才可以用这种,用户也可以自己对比性能。 """ def custom_init(self): self._kombu_broker_url_prefix = frame_config.KOMBU_URL.split(":")[0] logger_name = f'{self._logger_prefix}{self.__class__.__name__}--{self._kombu_broker_url_prefix}--{self._queue_name}' self.logger = LogManager(logger_name).get_logger_and_add_handlers( self._log_level_int, log_filename=f'{logger_name}.log' if self._is_add_file_handler else None, formatter_template=frame_config. NB_LOG_FORMATER_INDEX_FOR_CONSUMER_AND_PUBLISHER, ) # def init_broker(self): self.exchange = Exchange('distributed_framework_exchange', 'direct', durable=True) self.queue = Queue(self._queue_name, exchange=self.exchange, routing_key=self._queue_name, auto_delete=False) self.conn = Connection(frame_config.KOMBU_URL) self.queue(self.conn).declare() self.producer = self.conn.Producer(serializer='json') self.channel = self.producer.channel # type: Channel self.channel.body_encoding = 'no_encode' # self.channel = self.conn.channel() # type: Channel # # self.channel.exchange_declare(exchange='distributed_framework_exchange', durable=True, type='direct') # self.queue = self.channel.queue_declare(queue=self._queue_name, durable=True) self.logger.warning( f'使用 kombu 库 连接 {self._kombu_broker_url_prefix} 中间件') @deco_mq_conn_error def concrete_realization_of_publish(self, msg): self.producer.publish(json.loads(msg), exchange=self.exchange, routing_key=self._queue_name, declare=[self.queue]) @deco_mq_conn_error def clear(self): self.channel.queue_purge(self._queue_name) @deco_mq_conn_error def get_message_count(self): # queue = self.channel.queue_declare(queue=self._queue_name, durable=True) # return queue.method.message_count # self.logger.warning(self.channel._size(self._queue_name)) queue_declare_ok_t_named_tuple = self.channel.queue_declare( queue=self._queue_name, durable=True, auto_delete=False) # print(queue_declare_ok_t_named_tuple) return queue_declare_ok_t_named_tuple.message_count # if self._kombu_broker_url_prefix == 'amqp' or True: # '''amqp tries to use librabbitmq but falls back to pyamqp.''' # queue_declare_ok_t_named_tuple = self.channel.queue_declare(queue=self._queue_name, durable=True, auto_delete=False) # # queue_declare_ok_t(queue='test_rabbit_queue2', message_count=100000, consumer_count=0) # # print(type(queue_declare_ok_t_named_tuple),queue_declare_ok_t_named_tuple) # return queue_declare_ok_t_named_tuple.message_count # # noinspection PyProtectedMember # return self.channel._size(self._queue_name) def close(self): self.channel.close() self.conn.close() self.logger.warning('关闭 kombu 包 链接')