def connect(self, forever=False): name = self.name while True: try: connection = SelectConnection( self.parameters, self.on_connected) log.debug('%s connected', name) except Exception: if not forever: raise log.warning('%s cannot connect', name, exc_info=True) time.sleep(10) continue try: connection.ioloop.start() finally: try: connection.close() connection.ioloop.start() # allow connection to close except Exception: pass if not forever: break
class BaseAMQPConnection(BaseConnection): """ An object which does an actual job of (re-)connecting to the the AMQP broker. Concrete subclasses implement either listening or publishing features. """ def __init__(self, conn_params, item_name, properties=None): super(BaseAMQPConnection, self).__init__() self.conn_params = conn_params self.item_name = item_name self.properties = properties self.conn = None self.channel = None self.reconnect_exceptions = (TypeError, EnvironmentError) def _start(self): self.conn = SelectConnection(self.conn_params, self._on_connected) self.conn.ioloop.start() def _close(self): """ Actually close the connection. """ if self.conn: try: self.conn.close() except socket.error, e: if e.errno != errno.EBADF: # EBADF meant we actually had a connection but it was unusable (like the credentials were incorrect) raise
def __init__(self, parameters=None, on_open_callback=None, reconnection_strategy=None): SelectConnection.__init__(self, parameters=parameters, on_open_callback=on_open_callback, reconnection_strategy=reconnection_strategy) self._bad_channel_numbers = set()
def __init__(self, queue, request): self.queue = queue self.response = None self.channel = None self.request = request self.corrId = str(uuid.uuid4()) self.callBackQueue = None self.connection = None parameters = pika.ConnectionParameters(host='127.0.0.1') self.connection = SelectConnection(parameters, self.on_response_connected) self.connection.ioloop.start()
def start(self): # parameters require for the AMQP connection: user name and password... credentials = PlainCredentials(RabbitMQConfiguration().getRabbitMQProperty("gameLogicServerUserName"), RabbitMQConfiguration().getRabbitMQProperty("gameLogicServerUserName")) parameters = pika.ConnectionParameters(host=RabbitMQConfiguration().getRabbitMQProperty("gameLogicServerBrokerHost"), virtual_host=self.state.vhost, credentials=credentials) # instantiate a connection connection = SelectConnection(parameters=parameters, on_open_callback=self.on_connected) # required behavior on close connection.add_on_close_callback(self.on_close) # start the connection connection.ioloop.start()
def __init__(self, config): self.delivery_conn = None self.delivery_channel = None self.notifs_conn = None self.notifs_channel = None # Extract configuration self.broker_username = config['broker.username'] self.broker_password = config['broker.password'] self.broker_host = config['broker.host'] self.broker_amqp_port = config['broker.amqp_port'] self.broker_http_port = config['broker.http_port'] self.broker_vhost = config['broker.vhost'] self.incoming_exchange_name = config['broker.incoming_exchange_name'] self.notifs_queue_name = config['broker.notifications_queue_name'] # Create connection parameters object for easy reuse self.conn_params = pika.ConnectionParameters( credentials=pika.PlainCredentials( self.broker_username, self.broker_password, ), host=self.broker_host, port=self.broker_amqp_port, virtual_host=self.broker_vhost, ) self.notifs_conn = SelectConnection( self.conn_params, self.on_notifs_connected, )
def test_handling(self): envelope = create_envelope() handler = Handler(envelope) self.assertEqual(handler.envelope, envelope) handler.initialize(key='value', key2='value2') handler.pre_handle() self.assertRaises(NotImplementedError, handler.handle) handler.post_handle() envelope = create_envelope() consumer = Consumer('localhost', 'test') mock_connection = SelectConnection() mock_channel = Channel(mock_connection, 10, None) consumer.connection = mock_connection consumer.channel = mock_channel handler = MessageHandler(consumer, envelope) self.assertEqual(handler.envelope, envelope) self.assertEqual(handler.consumer, consumer) self.assertEqual(handler.channel, mock_channel) handler.initialize(key='value', key2='value2') handler.pre_handle() self.assertRaises(NotImplementedError, handler.handle) handler.post_handle() msg = 'some message' with self.assertRaises(AbortHandling) as cm: handler.abort(reason=msg) self.assertEqual(cm.exception.reason, msg) with self.assertRaises(SkipHandling) as cm: handler.skip(reason=msg) self.assertEqual(cm.exception.reason, msg) with self.assertRaises(HandlingError) as cm: handler.error(error_msg=msg) self.assertEqual(cm.exception.error_msg, msg)
def post_init(self): try: LOGGER.info('Opening a connection') self.connection = SelectConnection( parameters=settings.pika_parameters, on_open_callback=self.on_connection_open) try: LOGGER.info('Starting ioloop') self.connection.ioloop.start() except KeyboardInterrupt: # Gracefully close the connection self.connection.close() # Loop until we're fully closed, will stop on its own self.connection.ioloop.start() except: (etype, eobj, etb) = sys.exc_info() print traceback.format_exception(etype, eobj, etb)
def connect(self): if self.connecting: return self.connecting = True credentials = pika.PlainCredentials('guest', 'guest') params = pika.ConnectionParameters(host="hackinista.com", port=5672, virtual_host="/", credentials=credentials) host = (len(sys.argv) > 1) and sys.argv[1] or '127.0.0.1' try: self.connection = SelectConnection(params, self.on_connected) except: # self.L.critical("Error connecting to rabbitmq on host = # "+self.host); sys.exit(-1)
def start(self): # parameters require for the AMQP connection: user name and password... credentials = PlainCredentials( RabbitMQConfiguration().getRabbitMQProperty( "gameLogicServerUserName"), RabbitMQConfiguration().getRabbitMQProperty( "gameLogicServerUserName")) parameters = pika.ConnectionParameters( host=RabbitMQConfiguration().getRabbitMQProperty( "gameLogicServerBrokerHost"), virtual_host=self.state.vhost, credentials=credentials) # instantiate a connection connection = SelectConnection(parameters=parameters, on_open_callback=self.on_connected) # required behavior on close connection.add_on_close_callback(self.on_close) # start the connection connection.ioloop.start()
def testnb(): node = NodeNB() #ch = node.channel(('amq.direct', 'foo'), TestServer) ch = node.channel(TestServer) #ch = node.channel(TestClient) conn_parameters = ConnectionParameters() connection = SelectConnection(conn_parameters , node.on_connection_open) # Loop until CTRL-C try: # Start our blocking loop connection.ioloop.start() except KeyboardInterrupt: # Close the connection connection.close() # Loop until the connection is closed connection.ioloop.start()
class QueuePublisherClient(object): def __init__(self, queue, request): self.queue = queue self.response = None self.channel = None self.request = request self.corrId = str(uuid.uuid4()) self.callBackQueue = None self.connection = None parameters = pika.ConnectionParameters(host="0.0.0.0") self.connection = SelectConnection( parameters, self.on_response_connected ) self.connection.ioloop.start() def on_response(self,ch, method, props, body): if self.corrId == props.correlation_id: self.response = body self.connection.close() self.connection.ioloop.start() def on_response_connected(self,connection): _logger.info("Connected...\t(%s)" % self.queue) self.connection = connection self.connection.channel(self.on_channel_open) def on_response_channel_open(self,channel): self.responseChannel = channel result = self.responseChannel.queue_declare( exclusive=True, callback=self.on_response_queue_declared ) def on_connected(self,connection): self.connection = connection self.connection.channel(self.on_channel_open) def on_channel_open(self, channel): _logger.info("Channel Opened...\t(%s)" % self.queue) self.channel = channel self.channel.queue_declare(queue = self.queue, durable=True, exclusive=False, auto_delete=False, callback=self.on_queue_declared) def on_queue_declared(self,frame): self.channel.basic_publish(exchange="", routing_key = self.queue, properties = pika.BasicProperties(), body=str(self.request)) self.connection.close() _logger.info("Message Published...\t(%s)" % self.queue)
def test_retry_policy(self): envelope = create_envelope() consumer = create_consumer() mock_connection = SelectConnection() mock_channel = Channel(mock_connection, 10, None) consumer.connection = mock_connection consumer.channel = mock_channel retry_policy = RetryPolicy() self.assertRaises(NotImplementedError, retry_policy.retry, envelope=envelope)
def post_init(self): # 初始化分发任务类dispatcher # 通过异步的方式 打开rabbitMQ的连接,并新建channel,声明exchange,声明queue,绑定exchange和queue,然后运行run方法,之前运行ioloop self.dispatcher = Dispatcher(self.options) try: LOGGER.info('Opening a pika connection') self.connection = SelectConnection( parameters=settings.pika_parameters, on_open_callback=self.on_connection_open, on_open_error_callback=self.on_connection_open_error) try: LOGGER.info('Starting ioloop') self.connection.ioloop.start() except KeyboardInterrupt: # Gracefully close the connection self.connection.close() # Loop until we're fully closed, will stop on its own self.connection.ioloop.start() except: (etype, eobj, etb) = sys.exc_info() print traceback.format_exception(etype, eobj, etb)
class DataProvisionClient(object): def __init__(self, queue, request): self.queue = queue self.response = None self.channel = None self.request = request self.corrId = str(uuid.uuid4()) self.callBackQueue = None self.connection = None parameters = pika.ConnectionParameters(host='127.0.0.1') self.connection = SelectConnection(parameters, self.on_response_connected) self.connection.ioloop.start() def on_response(self, ch, method, props, body): if self.corrId == props.correlation_id: self.response = body self.connection.close() self.connection.ioloop.start() def on_response_connected(self, connection): print('connected ..') self.connection = connection self.connection.channel(self.on_channel_open) def on_response_channel_open(self, channel): self.responseChannel = channel result = self.responseChannel.queue_declare( exclusive=True, callback=self.on_response_queue_declared) def on_connected(self, connection): self.connection = connection self.connection.channel(self.on_channel_open) def on_channel_open(self, channel): print('channel opened') self.channel = channel self.channel.queue_declare(queue=self.queue, durable=True, exclusive=False, auto_delete=False, callback=self.on_queue_declared) def on_queue_declared(self, frame): self.channel.basic_publish(exchange='', routing_key=self.queue, properties=pika.BasicProperties(), body=str(self.request)) self.connection.close() print('message delivered')
def test_fixed_delay_limited_retries_policy(self): consumer = create_consumer() mock_connection = SelectConnection() mock_channel = Channel(mock_connection, 10, None) consumer.connection = mock_connection consumer.channel = mock_channel retry_policy = FixedDelayLimitedRetriesPolicy(consumer, delay=10, retries_limit=7, retry_queue_suffix='s') self.assertEqual(isinstance(retry_policy, LimitedRetriesPolicy), True) self.assertEqual(retry_policy.retry_delays, tuple([10] * 7)) self.assertEqual(retry_policy.retry_queue_suffix, 's')
def post_init(self): #初始化 新建RabbitMQ 队列和exchange并绑定(2个) LOGGER.info('Initializing a FeedProcessor') #self.feedprocessor.post_init() LOGGER.info('Initializing a FeedProcessor completed') try: LOGGER.info('Opening a connection') #self.dispatcher = Dispatcher(self.options) self.connection = SelectConnection( parameters=settings.pika_parameters, on_open_callback=self.on_connection_open) try: LOGGER.info('Starting ioloop') self.connection.ioloop.start() except KeyboardInterrupt: # Gracefully close the connection self.connection.close() # Loop until we're fully closed, will stop on its own self.connection.ioloop.start() except: (etype, eobj, etb) = sys.exc_info() print traceback.format_exception(etype, eobj, etb)
def test_fixed_delay_unlimited_retries_policy(self): consumer = create_consumer() mock_connection = SelectConnection() mock_channel = Channel(mock_connection, 10, None) consumer.connection = mock_connection consumer.channel = mock_channel retry_policy = FixedDelayUnlimitedRetriesPolicy(consumer, 10, retry_queue_suffix='h') self.assertEqual(isinstance(retry_policy, UnlimitedRetriesPolicy), True) self.assertEqual(retry_policy.initial_delay, 10) self.assertEqual(retry_policy.max_delay, 10) self.assertEqual(retry_policy.delay_incremented_by, 0) self.assertEqual(retry_policy.retry_queue_suffix, 'h')
def simple_server(ca_certs, keyfile, certfile, host_name, use_ssl): host = host_name if use_ssl: ssl_opts = {'ca_certs': ca_certs, 'keyfile': keyfile, 'certfile': certfile, 'cert_reqs': ssl.CERT_REQUIRED} port = 5671 else: ssl_opts = None port = 5672 parameters = ConnectionParameters(host, port, ssl=use_ssl, ssl_options=ssl_opts) connection = SelectConnection(parameters, on_connected) # Loop until CTRL-C try: # Start our blocking loop connection.ioloop.start() except KeyboardInterrupt: # Close the connection connection.close() # Loop until the connection is closed connection.ioloop.start()
def test_consumer_restart(self): consumer = create_consumer() consumer.connection = SelectConnection() consumer.connection.ioloop.stop = MagicMock() consumer.connection.ioloop.start = MagicMock() # Mock add_timeout consumer.connection.add_timeout = MagicMock() def add_timeout_side_effect(delay, callback): self.assertEqual(delay, 0) self.assertEqual(callback, consumer._stop_consuming) consumer.connection.add_timeout.side_effect = add_timeout_side_effect consumer.restart() consumer.connection.add_timeout.assert_called_once()
def test_consumer_reconnect(self): consumer = create_consumer() consumer.connection = SelectConnection() consumer._connect = MagicMock() def connect_side_effect(): return consumer.connection consumer._connect.side_effect = connect_side_effect consumer.connection.ioloop.stop = MagicMock() consumer.connection.ioloop.start = MagicMock() consumer._reconnect() consumer._connect.assert_called_once() consumer.connection.ioloop.stop.assert_called_once() consumer.connection.ioloop.start.assert_called_once()
def connect(self): print self if self.connecting: print ('1...PikaClient: Already connecting to RabbitMQ') return print ('1...PikaClient: Connecting to RabbitMQ on localhost:5672') self.connecting = True credentials = pika.PlainCredentials('guest', 'guest') param = pika.ConnectionParameters(host='localhost', port=5672, virtual_host="/", credentials=credentials) host = (len(sys.argv) > 1) and sys.argv[1] or '127.0.0.1' self.connection = SelectConnection(ConnectionParameters(host), self.on_connected) if self.connection != None: print self.connection print 'connection'
def run(self): """ Run thread""" threading.Thread.run(self) try: # get amqp connection params #self.mqConnection = self.serviceContext.get_object("mqConnection") parameters = self.server.serviceContext.get_object( "amqpServer_connection_pars") # Step #1: Connect to RabbitMQ self.server.mqConnection = SelectConnection( parameters, self.on_connected) self.server.mqConnection.ioloop.start() #channel.start_consuming() except Exception as ex: # Gracefully close the connection if self.server.mqConnection != None: self.server.mqConnection.close() # Loop until we're fully closed, will stop on its own #connection.ioloop.start() self.server.logger.error(traceback.format_exc())
def test_consumer_auto_reconnect_error(self): consumer = create_consumer() consumer.connection = SelectConnection() consumer.connection.ioloop.stop = MagicMock() consumer.connection.ioloop.start = MagicMock() # Mock add_timeout consumer.connection.add_timeout = MagicMock() def add_timeout_side_effect(delay, callback): self.assertEqual(delay, consumer.auto_reconnect_delay) self.assertEqual(callback, consumer._reconnect) consumer.connection.add_timeout.side_effect = add_timeout_side_effect # Simulate reconnection failute consumer._on_open_connection_error(consumer.connection, Exception()) consumer.connection.add_timeout.assert_called_once() # Test shutdown on reconnection failure consumer.auto_reconnect = False consumer._on_open_connection_error(consumer.connection, Exception()) consumer.connection.ioloop.stop.assert_called_once()
class dataChannel(object): """ The dataChannel is the base class of all our datasource. It's purpose is to: a). Setup the queues""" def __init__(self, server_name='test', mq_exchange='', mq_queue = '',mq_host=''): self.channel = None self.id = server_name self.queue_counter = 0 self.queue =mq_queue self.routing_key = '' self.exchange = mq_exchange self.connection = None self.connected = False self.connecting = False self.rabbithost = mq_host logging.getLogger('pika').setLevel(logging.DEBUG) def get_connection(self): return self.connection def connect(self): if self.connecting: return self.connecting = True credentials = pika.PlainCredentials('guest', 'guest') params = pika.ConnectionParameters(host="hackinista.com", port=5672, virtual_host="/", credentials=credentials) host = (len(sys.argv) > 1) and sys.argv[1] or '127.0.0.1' try: self.connection = SelectConnection(params, self.on_connected) except: # self.L.critical("Error connecting to rabbitmq on host = # "+self.host); sys.exit(-1) ### def on_connected(self, connection): self.connection = connection self.connection.channel(self.on_channel_open) self.connected = True def on_channel_open(self, channel): self.channel = channel try: self.channel.queue_declare(queue=self.queue, auto_delete=False, durable=True, exclusive=False, callback=self.on_queue_declared) except: print "Error declaring queue = " + self.queue sys.exit(-1) def on_queue_declared(self, frame): try: self.channel.queue_bind(exchange=self.exchange, queue=self.queue, routing_key=self.routing_key, callback=self.on_queue_bound) except: print "Binding to queue = " + self.queue pass def on_queue_bound(self, frame): self.channel.basic_consume(consumer_callback=self.handle_delivery, queue=self.queue, no_ack=False) def handle_delivery(self, channel, method_frame, header_frame, body): print "7...Basic.Deliver %s delivery-tag %i: %s" %\ (header_frame.content_type, method_frame.delivery_tag, body) self.data_op(body) channel.basic_ack(delivery_tag=method_frame.delivery_tag) def data_op(self, args): print "Please implement get_data"
class dataChannel(object): """ The dataChannel is the base class of all our datasource. It's purpose is to: a). Setup the queues""" def __init__(self, server_name='test', mq_exchange='', mq_queue='', mq_host=''): self.channel = None self.id = server_name self.queue_counter = 0 self.queue = mq_queue self.routing_key = '' self.exchange = mq_exchange self.connection = None self.connected = False self.connecting = False self.rabbithost = mq_host logging.getLogger('pika').setLevel(logging.DEBUG) def get_connection(self): return self.connection def connect(self): if self.connecting: return self.connecting = True credentials = pika.PlainCredentials('guest', 'guest') params = pika.ConnectionParameters(host="hackinista.com", port=5672, virtual_host="/", credentials=credentials) host = (len(sys.argv) > 1) and sys.argv[1] or '127.0.0.1' try: self.connection = SelectConnection(params, self.on_connected) except: # self.L.critical("Error connecting to rabbitmq on host = # "+self.host); sys.exit(-1) ### def on_connected(self, connection): self.connection = connection self.connection.channel(self.on_channel_open) self.connected = True def on_channel_open(self, channel): self.channel = channel try: self.channel.queue_declare(queue=self.queue, auto_delete=False, durable=True, exclusive=False, callback=self.on_queue_declared) except: print "Error declaring queue = " + self.queue sys.exit(-1) def on_queue_declared(self, frame): try: self.channel.queue_bind(exchange=self.exchange, queue=self.queue, routing_key=self.routing_key, callback=self.on_queue_bound) except: print "Binding to queue = " + self.queue pass def on_queue_bound(self, frame): self.channel.basic_consume(consumer_callback=self.handle_delivery, queue=self.queue, no_ack=False) def handle_delivery(self, channel, method_frame, header_frame, body): print "7...Basic.Deliver %s delivery-tag %i: %s" %\ (header_frame.content_type, method_frame.delivery_tag, body) self.data_op(body) channel.basic_ack(delivery_tag=method_frame.delivery_tag) def data_op(self, args): print "Please implement get_data"
ssl_options = {} # Uncomment this to test client certs, change to your cert paths # Uses certs as generated from http://www.rabbitmq.com/ssl.html ssl_options = { "ca_certs": "/etc/rabbitmq/new/server/chain.pem", "certfile": "/etc/rabbitmq/new/client/cert.pem", "keyfile": "/etc/rabbitmq/new/client/key.pem", "cert_reqs": CERT_REQUIRED } # Connect to RabbitMQ host = (len(sys.argv) > 1) and sys.argv[1] or '127.0.0.1' parameters = ConnectionParameters(host, 5671, ssl=True, ssl_options=ssl_options) connection = SelectConnection(parameters, on_connected) # Loop until CTRL-C try: # Start our blocking loop connection.ioloop.start() except KeyboardInterrupt: # Close the connection connection.close() # Loop until the conneciton is closed connection.ioloop.start()
def connect(self): SelectPoller.TIMEOUT = .1 self.connection = SelectConnection(self.parameters, self.on_connected) self.connection.ioloop.start()
def connect(self): SelectPoller.TIMEOUT = float(self.poller_delay) return SelectConnection(self.parameters, self.on_connection_open, stop_ioloop_on_close=False)
def test_consumer(self): amqp_url = r'amqp://*****:*****@localhost:5672/%2f' queue_name = 'queue1' durable = True exclusive = True dlx_name = 'dead_exchange' auto_reconnect = True auto_reconnect_delay = 10 consumer = Consumer(amqp_url=amqp_url, queue_name=queue_name, durable=durable, exclusive=exclusive, dlx_name=dlx_name, auto_reconnect=auto_reconnect, auto_reconnect_delay=auto_reconnect_delay) self.assertEqual(consumer.amqp_url, amqp_url) self.assertEqual(consumer.queue_name, queue_name) self.assertEqual(consumer.durable, durable) self.assertEqual(consumer.exclusive, exclusive) self.assertEqual(consumer.dlx_name, dlx_name) self.assertEqual(consumer.auto_reconnect, auto_reconnect) self.assertEqual(consumer.auto_reconnect_delay, auto_reconnect_delay) # Test abort with self.assertRaises(AbortHandling) as cm: consumer.abort('some reason') self.assertEqual(cm.exception.reason, 'some reason') # Test skip with self.assertRaises(SkipHandling) as cm: consumer.skip('some reason') self.assertEqual(cm.exception.reason, 'some reason') # Test error with self.assertRaises(HandlingError) as cm: consumer.error('some error') self.assertEqual(cm.exception.error_msg, 'some error') # Test bind to exchange exchange_name = 'exchange1' routing_key = 'some_routing_key' declare_exchange = True declare_kwargs = {'type': 'topic'} consumer.add_exchange_bind(exchange_name=exchange_name, routing_key=routing_key, declare_exchange=declare_exchange, declare_kwargs=declare_kwargs) self.assertEqual( consumer._exchange_binds[0], ((exchange_name, routing_key, declare_exchange, declare_kwargs))) with self.assertRaises(AssertionError) as cm: consumer.add_exchange_bind(exchange_name=exchange_name, routing_key=routing_key, declare_exchange=declare_exchange, declare_kwargs=None) with self.assertRaises(AssertionError) as cm: consumer.add_exchange_bind(exchange_name=exchange_name, routing_key=routing_key, declare_exchange=declare_exchange, declare_kwargs={}) # Test add handler consumer.add_handler('some_pattern', Handler, None) self.assertEqual(len(consumer.rules), 2) self.assertEqual( isinstance(consumer.rules[-2].matcher, MessageTypeMatches), True) self.assertEqual(consumer.rules[-2].matcher.message_type_pattern, 'some_pattern') consumer.add_handler(AnyMatches(), Handler, None) self.assertEqual(len(consumer.rules), 3) self.assertEqual(isinstance(consumer.rules[-2].matcher, AnyMatches), True) # Test set default handler consumer.set_default_handler(Handler) self.assertEqual(isinstance(consumer.rules[-1].matcher, AnyMatches), True) consumer.set_default_handler(None) self.assertEqual(isinstance(consumer.rules[-1].matcher, NoneMatches), True) # Test set/unset policy policy = FixedDelayLimitedRetriesPolicy(consumer=consumer, delay=5, retries_limit=15) consumer.set_retry_policy(policy) self.assertEqual(consumer._retry_policy, policy) self.assertRaises(AssertionError, consumer.set_retry_policy, 'non policy object') consumer.unset_retry_policy() self.assertIsNone(consumer._retry_policy) # Test handling message envelope = create_envelope() # Create fresh consumer consumer = create_consumer() mock_connection = SelectConnection() mock_channel = Channel(mock_connection, 10, None) mock_channel.basic_nack = MagicMock() mock_channel.basic_reject = MagicMock() consumer.channel = mock_channel # Test no handler, should reject message consumer._on_message(mock_channel, envelope.delivery_info, envelope.properties, envelope.payload) consumer.channel.basic_reject.assert_called_with(envelope.delivery_tag, requeue=False) # Test positive message handling consumer.channel.basic_ack = MagicMock() pre_handle_mock = MagicMock() handle_mock = MagicMock() post_handle_mock = MagicMock() class SuccessHandler(Handler): def __init__(self, envelope, **kwargs): super(SuccessHandler, self).__init__(envelope, **kwargs) self.pre_handle = pre_handle_mock self.handle = handle_mock self.post_handle = post_handle_mock consumer.add_handler(AnyMatches(), SuccessHandler) consumer._on_message(mock_channel, envelope.delivery_info, envelope.properties, envelope.payload) pre_handle_mock.assert_called_once() handle_mock.assert_called_once() post_handle_mock.assert_called_once() consumer.channel.basic_ack.assert_called_with(envelope.delivery_tag) # Test skip message handling # Create fresh consumer consumer = create_consumer() mock_connection = SelectConnection() mock_channel = Channel(mock_connection, 10, None) mock_channel.basic_nack = MagicMock() mock_channel.basic_reject = MagicMock() consumer.channel = mock_channel consumer.channel.basic_ack = MagicMock() pre_handle_mock = MagicMock() handle_mock = MagicMock() post_handle_mock = MagicMock() class SkipHandler(MessageHandler): def __init__(self, consumer, envelope, **kwargs): super(SkipHandler, self).__init__(consumer, envelope, **kwargs) self.handle = handle_mock self.post_handle = post_handle_mock def pre_handle(self): self.skip(reason='some reason') consumer.add_handler(AnyMatches(), SkipHandler) consumer._on_message(mock_channel, envelope.delivery_info, envelope.properties, envelope.payload) handle_mock.assert_not_called() post_handle_mock.assert_not_called() consumer.channel.basic_ack.assert_called_with(envelope.delivery_tag) # Test abort message handling # Create fresh consumer consumer = create_consumer() mock_connection = SelectConnection() mock_channel = Channel(mock_connection, 10, None) mock_channel.basic_nack = MagicMock() mock_channel.basic_reject = MagicMock() consumer.channel = mock_channel consumer.channel.basic_ack = MagicMock() pre_handle_mock = MagicMock() handle_mock = MagicMock() post_handle_mock = MagicMock() class AbortHandler(MessageHandler): def __init__(self, consumer, envelope, **kwargs): super(AbortHandler, self).__init__(consumer, envelope, **kwargs) self.handle = handle_mock self.post_handle = post_handle_mock def pre_handle(self): self.abort(reason='some reason') consumer.add_handler(AnyMatches(), AbortHandler) consumer._on_message(mock_channel, envelope.delivery_info, envelope.properties, envelope.payload) handle_mock.assert_not_called() post_handle_mock.assert_not_called() consumer.channel.basic_reject.assert_called_with(envelope.delivery_tag, requeue=False) consumer.channel.basic_ack.assert_not_called() # Test error message handling # Create fresh consumer consumer = create_consumer() mock_connection = SelectConnection() mock_channel = Channel(mock_connection, 10, None) mock_channel.basic_nack = MagicMock() mock_channel.basic_reject = MagicMock() consumer.channel = mock_channel consumer.channel.basic_ack = MagicMock() pre_handle_mock = MagicMock() handle_mock = MagicMock() post_handle_mock = MagicMock() class ErrorHandler(MessageHandler): def __init__(self, consumer, envelope, **kwargs): super(ErrorHandler, self).__init__(consumer, envelope, **kwargs) self.handle = handle_mock self.post_handle = post_handle_mock def pre_handle(self): self.error(error_msg='some reason') consumer.add_handler(AnyMatches(), ErrorHandler) consumer._on_message(mock_channel, envelope.delivery_info, envelope.properties, envelope.payload) handle_mock.assert_not_called() post_handle_mock.assert_not_called() consumer.channel.basic_reject.assert_called_with(envelope.delivery_tag, requeue=False) consumer.channel.basic_ack.assert_not_called() # Test error message handling with retry policy # Create fresh consumer consumer = create_consumer() mock_connection = SelectConnection() mock_channel = Channel(mock_connection, 10, None) mock_channel.basic_nack = MagicMock() mock_channel.basic_reject = MagicMock() consumer.channel = mock_channel consumer.channel.basic_ack = MagicMock() pre_handle_mock = MagicMock() handle_mock = MagicMock() post_handle_mock = MagicMock() retry_policy = RetryPolicy() retry_policy.retry = MagicMock() consumer.set_retry_policy(retry_policy) consumer.add_handler(AnyMatches(), ErrorHandler) consumer._on_message(mock_channel, envelope.delivery_info, envelope.properties, envelope.payload) handle_mock.assert_not_called() post_handle_mock.assert_not_called() consumer.channel.basic_reject.assert_not_called() consumer.channel.basic_ack.assert_not_called() retry_policy.retry.assert_called_once() # Test error message handling; general exception # Create fresh consumer consumer = create_consumer() mock_connection = SelectConnection() mock_channel = Channel(mock_connection, 10, None) mock_channel.basic_nack = MagicMock() mock_channel.basic_reject = MagicMock() consumer.channel = mock_channel consumer.channel.basic_ack = MagicMock() pre_handle_mock = MagicMock() handle_mock = MagicMock() post_handle_mock = MagicMock() class ExceHandler(MessageHandler): def __init__(self, consumer, envelope, **kwargs): super(ExceHandler, self).__init__(consumer, envelope, **kwargs) self.handle = handle_mock self.post_handle = post_handle_mock def pre_handle(self): raise Exception() consumer.add_handler(AnyMatches(), ExceHandler) consumer._on_message(mock_channel, envelope.delivery_info, envelope.properties, envelope.payload) handle_mock.assert_not_called() post_handle_mock.assert_not_called() consumer.channel.basic_reject.assert_called_with(envelope.delivery_tag, requeue=False) consumer.channel.basic_ack.assert_not_called() # Test error message handling; general exception with retry policy # Create fresh consumer consumer = create_consumer() mock_connection = SelectConnection() mock_channel = Channel(mock_connection, 10, None) mock_channel.basic_nack = MagicMock() mock_channel.basic_reject = MagicMock() consumer.channel = mock_channel consumer.channel.basic_ack = MagicMock() pre_handle_mock = MagicMock() handle_mock = MagicMock() post_handle_mock = MagicMock() retry_policy = RetryPolicy() retry_policy.retry = MagicMock() consumer.set_retry_policy(retry_policy) consumer.add_handler(AnyMatches(), ExceHandler) consumer._on_message(mock_channel, envelope.delivery_info, envelope.properties, envelope.payload) handle_mock.assert_not_called() post_handle_mock.assert_not_called() consumer.channel.basic_reject.assert_not_called() consumer.channel.basic_ack.assert_not_called() retry_policy.retry.assert_called_once() # Should handle AMQPConnectionError consumer = create_consumer() mock_connection = SelectConnection() mock_channel = Channel(mock_connection, 10, None) mock_channel.basic_nack = MagicMock() mock_channel.basic_reject = MagicMock() consumer.channel = mock_channel consumer.channel.basic_ack = MagicMock() pre_handle_mock = MagicMock() handle_mock = MagicMock() post_handle_mock = MagicMock() class AMQPConnectionErrorHandler(MessageHandler): def __init__(self, consumer, envelope, **kwargs): super(AMQPConnectionErrorHandler, self).__init__(consumer, envelope, **kwargs) self.handle = handle_mock self.post_handle = post_handle_mock def pre_handle(self): raise AMQPConnectionError() consumer.add_handler(AnyMatches(), AMQPConnectionErrorHandler) consumer._on_message(mock_channel, envelope.delivery_info, envelope.properties, envelope.payload) handle_mock.assert_not_called() post_handle_mock.assert_not_called() consumer.channel.basic_reject.assert_not_called() consumer.channel.basic_ack.assert_not_called() # Should handle AMQPChannelError consumer = create_consumer() mock_connection = SelectConnection() mock_channel = Channel(mock_connection, 10, None) mock_channel.basic_nack = MagicMock() mock_channel.basic_reject = MagicMock() consumer.channel = mock_channel consumer.channel.basic_ack = MagicMock() pre_handle_mock = MagicMock() handle_mock = MagicMock() post_handle_mock = MagicMock() class AMQPChannelErrorHandler(MessageHandler): def __init__(self, consumer, envelope, **kwargs): super(AMQPChannelErrorHandler, self).__init__(consumer, envelope, **kwargs) self.handle = handle_mock self.post_handle = post_handle_mock def pre_handle(self): raise AMQPChannelError() consumer.add_handler(AnyMatches(), AMQPChannelErrorHandler) consumer._on_message(mock_channel, envelope.delivery_info, envelope.properties, envelope.payload) handle_mock.assert_not_called() post_handle_mock.assert_not_called() consumer.channel.basic_reject.assert_not_called() consumer.channel.basic_ack.assert_not_called()
def _start(self): self.conn = SelectConnection(self.conn_params, self._on_connected) self.conn.ioloop.start()
def test_consumer_start_up_process(self): consumer = create_consumer() envelope = create_envelope() consumer.connection = SelectConnection() mock_channel = Channel(consumer.connection, 10, None) # Mock exchange_declare mock_channel.exchange_declare = MagicMock() def on_exchange_declareok(callback, exchange, exchange_type, durable, auto_delete, internal, arguments): self.assertEqual(exchange, 'exchange1') self.assertEqual(exchange_type, 'topic') self.assertEqual(durable, False) self.assertEqual(auto_delete, False) self.assertEqual(internal, False) self.assertEqual(arguments, None) callback(None) mock_channel.exchange_declare.side_effect = on_exchange_declareok # Mock queue_declare mock_channel.queue_declare = MagicMock() def on_consumer_queue_declareok(callback, queue, durable, exclusive, arguments): self.assertEqual(queue, consumer.queue_name) self.assertEqual(durable, consumer.durable) self.assertEqual(exclusive, consumer.exclusive) self.assertEqual(arguments, {'x-dead-letter-exchange': consumer.dlx_name}) callback(None) mock_channel.queue_declare.side_effect = on_consumer_queue_declareok # Mock queue_bind mock_channel.queue_bind = MagicMock() def on_bindok(callback, queue, exchange, routing_key): self.assertEqual(queue, consumer.queue_name) self.assertEqual(routing_key, 'key1') callback(None) mock_channel.queue_bind.side_effect = on_bindok mock_channel.add_on_close_callback = MagicMock() # Mock basic_consume mock_channel.basic_consume = MagicMock() def on_message(callback, queue): self.assertEqual(queue, consumer.queue_name) mock_channel.basic_reject = MagicMock() callback(mock_channel, envelope.delivery_info, envelope.properties, envelope.payload) mock_channel.basic_consume.side_effect = on_message # connection.channel method used to open a new channel # mock method is used to return the created mock_channel consumer.connection.channel = MagicMock() def on_channel_open(on_open_callback): on_open_callback(mock_channel) consumer.connection.channel.side_effect = on_channel_open consumer.connection.ioloop.start = MagicMock() consumer.connection.ioloop.stop = MagicMock() consumer.add_exchange_bind(exchange_name='exchange1', routing_key='key1', declare_exchange=True, declare_kwargs={'type': 'topic'}) consumer.add_exchange_bind(exchange_name='exchange1', routing_key='key1') # Initiate open connection manually consumer._on_connection_open(None) mock_channel.add_on_close_callback.assert_called_once() mock_channel.queue_declare.assert_called_once() mock_channel.basic_reject.assert_called_once() mock_channel.queue_declare.assert_called_once() queue_bind_count = len(mock_channel.queue_bind.call_args_list) self.assertEqual(queue_bind_count, 2) mock_channel.basic_consume.assert_called_once() # Stopping consumer # Mock add_timeout consumer.connection.add_timeout = MagicMock() def add_timeout_side_effect(delay, callback): callback() consumer.connection.add_timeout.side_effect = add_timeout_side_effect # Mock basic_cancel consumer.channel.basic_cancel = MagicMock() def on_cancelok(callback, consumer_tag): callback(None) consumer.channel.basic_cancel.side_effect = on_cancelok # Mock channel close consumer.channel.close = MagicMock() def channel_close_side_effect(): consumer._on_channel_closed(consumer.channel, '', '') consumer.channel.close.side_effect = channel_close_side_effect # Mock connection close consumer.connection.close = MagicMock() def on_connection_closed(): consumer._on_connection_closed(consumer.connection, '', '') consumer.connection.close.side_effect = on_connection_closed # Simulate stop consumer.stop() consumer.connection.add_timeout.assert_called_once() consumer.connection.close.assert_called_once() self.assertIsNone(consumer.channel) consumer.connection.ioloop.stop.assert_called_once()
def test_limited_retries_policy(self): envelope = create_envelope() consumer = create_consumer() mock_connection = SelectConnection() mock_channel = Channel(mock_connection, 10, None) consumer.connection = mock_connection consumer.channel = mock_channel retry_policy = LimitedRetriesPolicy(consumer=consumer, retry_delays=(1, 5, 10, 50, 5 * 60), retry_queue_suffix='r') mock_channel.queue_declare = MagicMock() mock_channel.basic_publish = MagicMock() mock_channel.basic_ack = MagicMock() mock_channel.basic_reject = MagicMock() retry_policy.retry(envelope) mock_channel.queue_declare.assert_called_with( callback=None, queue='{}.{}.{}'.format(consumer.queue_name, retry_policy.retry_queue_suffix, 1000), durable=consumer.durable, nowait=True, arguments={ 'x-dead-letter-exchange': '', 'x-dead-letter-routing-key': consumer.queue_name, 'x-message-ttl': 1000, 'x-expires': retry_policy.min_retry_queue_ttl }) mock_channel.basic_publish.assert_called_with( exchange='', routing_key='{}.{}.{}'.format(consumer.queue_name, retry_policy.retry_queue_suffix, 1000), properties=envelope.properties, body=envelope.payload) mock_channel.basic_ack.assert_called_with(envelope.delivery_tag) self.assertEqual( envelope.get_header('x-original-delivery-info'), { 'consumer_tag': envelope.consumer_tag, 'delivery_tag': envelope.delivery_tag, 'redelivered': envelope.redelivered, 'exchange': envelope.exchange, 'routing_key': envelope.routing_key }) envelope.set_header('x-death', [{ 'queue': consumer.queue_name, 'count': 1 }, { 'queue': '{}.{}.{}'.format(consumer.queue_name, retry_policy.retry_queue_suffix, 10000), 'count': 3 }, { 'queue': 'some-queue', 'count': 1 }]) retry_policy.retry(envelope) delay = 5 * 60 retry_queue_name = '{}.{}.{}'.format(consumer.queue_name, retry_policy.retry_queue_suffix, delay * 1000) mock_channel.queue_declare.assert_called_with( callback=None, queue=retry_queue_name, durable=consumer.durable, nowait=True, arguments={ 'x-dead-letter-exchange': '', 'x-dead-letter-routing-key': consumer.queue_name, 'x-message-ttl': delay * 1000, 'x-expires': delay * 2 * 1000 }) mock_channel.basic_publish.assert_called_with( exchange='', routing_key=retry_queue_name, properties=envelope.properties, body=envelope.payload) mock_channel.basic_ack.assert_called_with(envelope.delivery_tag) self.assertEqual( envelope.get_header('x-original-delivery-info'), { 'consumer_tag': envelope.consumer_tag, 'delivery_tag': envelope.delivery_tag, 'redelivered': envelope.redelivered, 'exchange': envelope.exchange, 'routing_key': envelope.routing_key }) envelope.set_header('x-death', [{ 'queue': consumer.queue_name, 'count': 1 }, { 'queue': '{}.{}.{}'.format(consumer.queue_name, retry_policy.retry_queue_suffix, 10000), 'count': 4 }, { 'queue': 'some-queue', 'count': 1 }]) retry_policy.retry(envelope) mock_channel.basic_reject.assert_called_with(envelope.delivery_tag, requeue=False) envelope.set_header('x-death', [{ 'queue': consumer.queue_name, 'count': 1 }, { 'queue': '{}.{}.{}'.format(consumer.queue_name, retry_policy.retry_queue_suffix, 10000), 'count': 46 }, { 'queue': 'some-queue', 'count': 1 }]) retry_policy.retry(envelope) mock_channel.basic_reject.assert_called_with(envelope.delivery_tag, requeue=False)
class Amqp(object): def __init__(self, conf): # RabbitMQ general options self.cacertfile = conf['cacertfile'] self.certfile = conf['certfile'] self.exchange = conf['exchange'] self.status_exchange = conf['status_exchange'] self.fail_if_no_peer_cert = conf['fail_if_no_peer_cert'] self.heartbeat = conf['heartbeat'] self.host = conf['host'] self.keyfile = conf['keyfile'] self.password = conf['password'] self.port = conf['port'] self.ssl_port = conf['ssl_port'] self.queue = conf['uuid'] self.retry_timeout = conf['retry_timeout'] self.ssl_auth = conf['ssl_auth'] self.use_ssl = conf['use_ssl'] self.username = conf['username'] self.vhost = conf['vhost'] # Connection and channel initialization self.connection = None self.channel = None # Plain credentials credentials = PlainCredentials(self.username, self.password) pika_options = {'host': self.host, 'port': self.port, 'virtual_host': self.vhost, 'credentials': credentials} # SSL options if self.use_ssl: pika_options['ssl'] = True pika_options['port'] = self.ssl_port if self.ssl_auth: pika_options['credentials'] = ExternalCredentials() pika_options['ssl_options'] = { 'ca_certs': self.cacertfile, 'certfile': self.certfile, 'keyfile': self.keyfile, 'cert_reqs': CERT_REQUIRED } if self.heartbeat: pika_options['heartbeat'] = self.heartbeat self.parameters = None try: self.parameters = pika.ConnectionParameters(**pika_options) except TypeError as err: self.logger.debug(err) # Let's be compatible with original pika version (no integer for # heartbeats and no ssl. self.logger.warning("Wrong pika lib version, won't use ssl.") pika_options['heartbeat'] = True if self.use_ssl: self.use_ssl = False pika_options['port'] = self.port del pika_options['ssl'] if self.ssl_auth: self.ssl_auth = False del pika_options['ssl_options'] self.parameters = pika.ConnectionParameters(**pika_options) def connect(self): SelectPoller.TIMEOUT = .1 self.connection = SelectConnection(self.parameters, self.on_connected) self.connection.ioloop.start() def close(self, amqperror=False): if (self.connection and not self.connection.closing and not self.connection.closed): self.logger.debug("Closing connection") self.connection.close() #self.connection.ioloop.start() def on_remote_close(self, code, text): self.logger.debug("Remote channel close, code %d" % code) time.sleep(2) if code != 200: self.close() raise AmqpError(text) def on_connection_closed(self, frame): self.connection.ioloop.stop() def on_connected(self, connection): self.connection = connection self.connection.add_on_close_callback(self.on_connection_closed) self.connection.channel(self.on_channel_open)
class NewsFeedBee(Thread): def __init__(self, options): super(NewsFeedBee, self).__init__() self.options = options self.dispatcher = None self.feeds = [] self.update_feeds() # RabbitMQ stuffs self.channel = None self.connection = None self.alive = False self.stop_flag = False #self.r = None #self.r_last_touched = datetime.now() def update_feeds(self): global db_conn try: LOGGER.info("Obtaining feeds to process from database") if db_conn is None: connect_pg() cursor = db_conn.cursor(cursor_factory=psycopg2.extras.DictCursor) cursor.execute("SELECT id, name, feed_url, etag, last_modified " \ "FROM feedjack_feed " \ "WHERE is_active=%s " "ORDER BY id", (True,)) results = cursor.fetchall() for result in results: #if result['last_modified'] == None: # last_modified = None #else: # last_modified = str(result['last_modified']) self.feeds.append({ 'id': result['id'], 'name': result['name'], 'feed_url': str(result['feed_url']), 'etag': str(result['etag']), 'last_modified': result['last_modified'] }) LOGGER.info("Obtaining feeds to process from database completed") except: db_conn.rollback() LOGGER.info(sys.exc_info()) def post_init(self): # 初始化分发任务类dispatcher # 通过异步的方式 打开rabbitMQ的连接,并新建channel,声明exchange,声明queue,绑定exchange和queue,然后运行run方法,之前运行ioloop self.dispatcher = Dispatcher(self.options) try: LOGGER.info('Opening a pika connection') self.connection = SelectConnection( parameters=settings.pika_parameters, on_open_callback=self.on_connection_open, on_open_error_callback=self.on_connection_open_error) try: LOGGER.info('Starting ioloop') self.connection.ioloop.start() except KeyboardInterrupt: # Gracefully close the connection self.connection.close() # Loop until we're fully closed, will stop on its own self.connection.ioloop.start() except: (etype, eobj, etb) = sys.exc_info() print traceback.format_exception(etype, eobj, etb) def on_connection_open(self, unused_connection): LOGGER.info('Opening a connection completed') self.open_channel() def on_connection_open_error(self, connection): LOGGER.info('Opening a connection failed') def open_channel(self): LOGGER.info('Opening a channel') self.connection.channel(on_open_callback=self.on_channel_open) def on_channel_open(self, new_channel): LOGGER.info('Opening a channel completed') self.channel = new_channel self.declare_exchange() def declare_exchange(self): LOGGER.info('Declaring an exchange') self.channel.exchange_declare( exchange=settings.RABBITMQ_NEWSFEED_RAW_FEED_EXCHANGE_NAME, exchange_type=settings.RABBITMQ_NEWSFEED_RAW_FEED_EXCHANGE_TYPE, passive=settings.RABBITMQ_NEWSFEED_RAW_FEED_EXCHANGE_PASSIVE, durable=settings.RABBITMQ_NEWSFEED_RAW_FEED_EXCHANGE_DURABLE, auto_delete=settings. RABBITMQ_NEWSFEED_RAW_FEED_EXCHANGE_AUTO_DELETE, internal=settings.RABBITMQ_NEWSFEED_RAW_FEED_EXCHANGE_INTERNAL, nowait=settings.RABBITMQ_NEWSFEED_RAW_FEED_EXCHANGE_NOWAIT, arguments=None, # Custom key/value pair arguments for the exchange callback=self.on_exchange_declared ) # Call this method on Exchange.DeclareOk def on_exchange_declared(self, frame): LOGGER.info('Declaring an exchange completed') self.declare_queue() def declare_queue(self): LOGGER.info('Declaring a queue') self.channel.queue_declare( self.on_queue_declared, settings.RABBITMQ_NEWSFEED_RAW_FEED_QUEUE_NAME) def on_queue_declared(self, method_frame): LOGGER.info('Declaring a queue completed') self.bind_queue() def bind_queue(self): LOGGER.info('Binding a queue') self.channel.queue_bind( callback=self.on_queue_binded, queue=settings.RABBITMQ_NEWSFEED_RAW_FEED_QUEUE_NAME, exchange=settings.RABBITMQ_NEWSFEED_RAW_FEED_EXCHANGE_NAME, routing_key=settings.RABBITMQ_NEWSFEED_RAW_FEED_ROUTING_KEY) def on_queue_binded(self, frame): LOGGER.info('Binding a queue completed') self.start() # Start the thread's activity def run(self): while True: for feed in self.feeds: if not self.dispatcher: self.dispatcher = Dispatcher(self.options) #if not self.channel: # self.on_connected() #返回在dispatcher处理后传来的数据(即是feedparser抓取的数据) data = self.dispatcher.add_job(feed) if data is not None and isinstance(data, dict): if settings.DEBUG: filename = 'tmp/feed_' + str(feed['id']) + '_raw.txt' with open(filename, 'w') as f: f.write((pformat(data)).decode('utf-8')) if hasattr(data, 'updated_parsed'): del data['updated_parsed'] if hasattr(data['feed'], 'published_parsed'): #data['feed']['published'] = datetime.fromtimestamp( # mktime(data['feed']['published_parsed'])) del data['feed']['published_parsed'] if hasattr(data['feed'], 'updated_parsed'): #data['feed']['updated'] = datetime.fromtimestamp( # mktime(data['feed']['updated_parsed'])) del data['feed']['updated_parsed'] # "data['entries']" is a list of dictionaries. # Each dictionary contains data from a different entry. for idx, val in enumerate(data['entries']): if hasattr(val, 'created_parsed'): #data['entries'][idx]['created'] = datetime.fromtimestamp( # mktime(val['created_parsed'])) del data['entries'][idx]['created_parsed'] if hasattr(val, 'expired_parsed'): del data['entries'][idx]['expired_parsed'] if hasattr(val, 'published_parsed'): del data['entries'][idx]['published_parsed'] if hasattr(val, 'updated_parsed'): del data['entries'][idx]['updated_parsed'] data['feed_id'] = feed['id'] #data['last_modified'] = feed['last_modified'] try: # If this throws an error, don't put message in queue json_data = JSON_ENCODER.encode(data) if settings.DEBUG: filename = 'tmp/feed_' + str(feed['id']) + '.json' with open(filename, 'w') as f: #simplejson.dump(json_data, f) f.write(json_data) LOGGER.info('Publishing data to queue') self.channel.basic_publish( exchange=settings. RABBITMQ_NEWSFEED_RAW_FEED_EXCHANGE_NAME, # The exchange to publish to routing_key=settings. RABBITMQ_NEWSFEED_RAW_FEED_ROUTING_KEY, # The routing key to bind on body=json_data) # The message body LOGGER.info('Publishing data to queue completed') except simplejson.JSONDecodeError: LOGGER.info(sys.exc_info()) except KeyboardInterrupt: LOGGER.info("KeyboardInterrupt, so quitting! Bye!") quit() except SystemExit: quit() except: LOGGER.info(sys.exc_info()) # Suspend execution for the given number of seconds. LOGGER.info('Sleeping ' + str(self.options.sleeptime) + ' seconds. Zzz...') sleep(self.options.sleeptime) def stop(self): pass
class dataChannel: """ The dataChannel is the base class of all our datasource. It's purpose is to: a). Setup the queues""" def __init__(self,ds_name): self.channel = None self.dc_id = "eek"#dc_id ## query mongoDB to find all the particulars about this ## data channel including: which queue is listening to, ## which exchange, the routing key..etc. self.ret_queue = "ret_queue" self.connection = None self.channel = None self.connected = False; self.connecting = False; self.exchange = "test_x"; self.queue = "test_q" self.routing_key = "test_q" ## use the ds to the find which exchange and which queue this ## datachannel listens def mongo_db(self): ## connect to the mongodb mongo_conn = Connection('localhost',27017) db = mongo_conn['data_channels'] coll= db['bbox_pts'] def connect(self): print self if self.connecting: print ('1...PikaClient: Already connecting to RabbitMQ') return print ('1...PikaClient: Connecting to RabbitMQ on localhost:5672') self.connecting = True credentials = pika.PlainCredentials('guest', 'guest') param = pika.ConnectionParameters(host='localhost', port=5672, virtual_host="/", credentials=credentials) host = (len(sys.argv) > 1) and sys.argv[1] or '127.0.0.1' self.connection = SelectConnection(ConnectionParameters(host), self.on_connected) if self.connection != None: print self.connection print 'connection' def on_connected(self,connection): print '2...PikaClient: Connected to RabbitMQ on localhost:5672' self.connection = connection self.connection.channel(self.on_channel_open) self.connected = True def on_channel_open(self, channel): print ('3...PikaClient: Channel Open, Declaring Exchange') self.channel = channel self.channel.exchange_declare(exchange=self.exchange, type="direct", auto_delete=False, durable=True, callback=self.on_exchange_declared) def on_exchange_declared(self, frame): print ('4...PikaClient: Exchange Declared, Declaring Queue') self.channel.queue_declare(queue=self.queue, auto_delete=False, durable=True, exclusive=False, callback=self.on_queue_declared) def on_queue_declared(self, frame): print('5...PikaClient: Queue Declared, Binding Queue') print "demo_receive: Queue Declared" # self.channel.basic_consume(self.handle_delivery, queue='test_q') self.channel.queue_bind(exchange=self.exchange, queue=self.queue, routing_key=self.routing_key, callback=self.on_queue_bound) def on_queue_bound(self, frame): print('6...PikaClient: Queue Bound, Issuing Basic Consume') self.channel.basic_consume(consumer_callback=self.handle_delivery, queue=self.queue) def handle_delivery(self,channel, method_frame, header_frame, body): print "7...Basic.Deliver %s delivery-tag %i: %s" %\ (header_frame.content_type, method_frame.delivery_tag, body) print body channel.basic_ack(delivery_tag=method_frame.delivery_tag) def get_data(self,args): print "Please implement get_data"
def __init__(self, parameters=None, on_open_callback=None, reconnection_strategy=None): SelectConnection.__init__(self, parameters=parameters, on_open_callback=on_open_callback, reconnection_strategy=reconnection_strategy) self._bad_channel_numbers = set() self._pending = set()
class RouterServer(object): """Validates and routes notifications. This acts as a separate server used only for production systems. In a development environment, the post office performs the validation itself, however this does not allow it to scale in situations where a large spike of requests come in (as the requests take longer to complete when validation is performed). Thus in production the post office simply dumps messages into queue which are then consumed, validated, and then routed to their destination. """ def __init__(self, config): self.delivery_conn = None self.delivery_channel = None self.notifs_conn = None self.notifs_channel = None # Extract configuration self.broker_username = config['broker.username'] self.broker_password = config['broker.password'] self.broker_host = config['broker.host'] self.broker_amqp_port = config['broker.amqp_port'] self.broker_http_port = config['broker.http_port'] self.broker_vhost = config['broker.vhost'] self.incoming_exchange_name = config['broker.incoming_exchange_name'] self.notifs_queue_name = config['broker.notifications_queue_name'] # Create connection parameters object for easy reuse self.conn_params = pika.ConnectionParameters( credentials=pika.PlainCredentials( self.broker_username, self.broker_password, ), host=self.broker_host, port=self.broker_amqp_port, virtual_host=self.broker_vhost, ) self.notifs_conn = SelectConnection( self.conn_params, self.on_notifs_connected, ) @wsgify def __call__(self, request): """Allows router to be called directly by POST Office to perform validation. Intended to simplify development -- SHOULD NOT be used in a production system. """ try: self.process_notification(request.body) except KeyError as kerr: raise HTTPBadRequest() except Exception as ex: raise HTTPInternalServerError() return HTTPAccepted() # XXX: Ugh...why must Pika be so difficult with multiple connections? def start(self, blocking=True): #Thread(target=self.delivery_conn.ioloop.start).start() if blocking: self.notifs_conn.ioloop.start() else: Thread(target=self.notifs_conn.ioloop.start).start() def shutdown(self): self.delivery_channel.close() self.notifs_channel.close() self.delivery_conn.close() self.notifs_conn.close() # Loop until everything shuts down self.notifs_conn.ioloop.start() def on_delivery_connected(self, connection): connection.channel(self.on_delivery_channel_open) def on_delivery_channel_open(self, channel): self.delivery_channel = channel def on_notifs_connected(self, connection): connection.channel(self.on_notifications_channel_open) # TODO: Figure out how to get 2 connections working in Pika. # This is a hack for now, since we know we only have one broker. self.on_delivery_connected(connection) def on_notifications_channel_open(self, channel): self.notifs_channel = channel channel.queue_declare( queue=self.notifs_queue_name, durable=False, exclusive=False, auto_delete=False, callback=self.on_notifications_queue_declared, ) def on_notifications_queue_declared(self, frame): self.notifs_channel.basic_consume( self.handle_notification, queue=self.notifs_queue_name, no_ack=True, ) def handle_notification(self, channel, method, properties, body): print "Received notification" try: self.process_notification(body) print "Processed notification" except Exception as ex: print "Error processing notification: %r" % ex def process_notification(self, message): """Processes a message consumed from the incoming queue.""" print " [x] %s" % message # Make sure JSON is valid notif = json.loads(message) if 'token' not in notif: raise KeyError('Notification key "token" not found.') if 'type' not in notif: # No type specified; use default notif['type'] = "text" if 'timestamp' not in notif: # No timestamp specified; create one notif['timestamp'] = int(time.time()) if 'ttl' not in notif: # No TTL specified; create one (30 days) notif['ttl'] = 30*24*60*60 if 'payload' not in notif: raise KeyError('Notification key "payload" not found.') if 'ciphertext' not in notif['payload']: raise KeyError('Notification key "payload.ciphertext" not found.') try: # Assert exchange is declared self.delivery_channel.exchange_declare( exchange=self.incoming_exchange_name, durable=True, type='fanout' ) # Python's JSON parser assumes everything is Unicode, and Pika # uses the "+" operator when it shouldn't. token = notif['token'].encode('ascii') self.delivery_channel.basic_publish( exchange=self.incoming_exchange_name, routing_key=token, body=message ) print "Notification routed to user exchange" except Exception as ex: # TODO: Either put the message back in the incoming queue, or send # a NACK to the broker if we're going to do ACK/NACK crap raise ex
routing_key="krqueue", body=jsonmsg, properties=properties) print "demo_send:" + jsonmsg # Close our connection print "Closing client" connection.close() if __name__ == '__main__': pika.log.setup(level=pika.log.INFO) # Connect to RabbitMQ host = (len(sys.argv) > 1) and sys.argv[1] or '127.0.0.1' connection = SelectConnection(ConnectionParameters(host), on_connected) # Loop until CTRL-C try: # Start our blocking loop connection.ioloop.start() except KeyboardInterrupt: # Close the connection connection.close() # Loop until the connection is closed connection.ioloop.stop()
def test_rule_router(self): consumer = create_consumer() mock_connection = SelectConnection() mock_channel = Channel(mock_connection, 10, None) consumer.connection = mock_connection consumer.channel = mock_channel with self.assertRaises(AssertionError): RuleRouter(consumer, 'None rule object') router = RuleRouter(consumer) self.assertEqual(router.consumer, consumer) envelope = create_envelope() self.assertIsNone(router.find_handler(envelope)) default_rule = Rule(AnyMatches(), MessageHandler, None) router.set_default_rule(default_rule) self.assertIsNotNone(router.find_handler(envelope)) handler = router.find_handler(envelope) self.assertEqual(isinstance(handler, Handler), True) router.set_default_rule(None) self.assertIsNone(router.find_handler(envelope)) router = RuleRouter(consumer, default_rule) self.assertIsNotNone(router.find_handler(envelope)) class CustomHandler(MessageHandler): def initialize(self, arg1, arg2, **kwargs): super(CustomHandler, self).initialize(**kwargs) self.arg1 = arg1 self.arg2 = arg2 message_type_matcher = MessageTypeMatches(r'message_a') rule = Rule(message_type_matcher, CustomHandler, { 'arg1': 1, 'arg2': 2 }) router.add_rule(rule) envelope.properties.type = 'message_a' handler = router.find_handler(envelope) self.assertEqual(isinstance(handler, CustomHandler), True) self.assertEqual(handler.arg1, 1) self.assertEqual(handler.arg2, 2) self.assertEqual(handler.consumer, consumer) router.set_default_rule(default_rule) handler = router.find_handler(envelope) self.assertEqual(isinstance(handler, CustomHandler), True) envelope = create_envelope() handler = router.find_handler(envelope) self.assertEqual(isinstance(handler, Handler), True) # test subrouting default_subrouter_rule = Rule(message_type_matcher, CustomHandler, { 'arg1': 1, 'arg2': 2 }) subrouter = RuleRouter(consumer, default_subrouter_rule) # default rule for main router is the subrouter main_default_rule = Rule(AnyMatches(), subrouter, None) main_router = RuleRouter(consumer, main_default_rule) handler = main_router.find_handler(envelope) self.assertIsNone(handler) envelope.properties.type = 'message_a' handler = main_router.find_handler(envelope) self.assertEqual(isinstance(handler, CustomHandler), True) self.assertEqual(handler.arg1, 1) self.assertEqual(handler.arg2, 2) self.assertEqual(handler.consumer, consumer)
def on_queue_declared(frame): print "demo_receive: Queue Declared" channel.basic_consume(handle_delivery, queue='test') def handle_delivery(channel, method_frame, header_frame, body): print "Basic.Deliver %s delivery-tag %i: %s" %\ (header_frame.content_type, method_frame.delivery_tag, body) channel.basic_ack(delivery_tag=method_frame.delivery_tag) if __name__ == '__main__': # Connect to RabbitMQ host = (len(sys.argv) > 1) and sys.argv[1] or '127.0.0.1' connection = SelectConnection(ConnectionParameters(host), on_connected) # Loop until CTRL-C try: # Start our blocking loop connection.ioloop.start() except KeyboardInterrupt: # Close the connection connection.close() # Loop until the conneciton is closed connection.ioloop.start()
class NewsFeedOpinion(Thread): def __init__(self): super(NewsFeedOpinion, self).__init__() self.feeds = [] self.update_sentimentwords() self.channel = None self.connection = None self.processopinion = ProcessOpinion() self.alive = False self.stop_flag = False self.r = None self.r_last_touched = datetime.now() def update_sentimentwords(self): global NEG_WORDS global DB_CONN try: LOGGER.info("Obtaining sentiment word list from postgre") if DB_CONN is None: connect_pg() cursor = DB_CONN.cursor() cursor.execute( "SELECT name FROM sentidict_sentimentword WHERE mode='2'") results = cursor.fetchall() for result in results: NEG_WORDS.append(str(result[0])) LOGGER.info("Obtaining sentiment word list from postgre completed") except: DB_CONN.rollback() logger(sys.exc_info()) def post_init(self): try: LOGGER.info('Opening a connection') self.connection = SelectConnection( parameters=settings.pika_parameters, on_open_callback=self.on_connection_open) try: LOGGER.info('Starting ioloop') self.connection.ioloop.start() except KeyboardInterrupt: # Gracefully close the connection self.connection.close() # Loop until we're fully closed, will stop on its own self.connection.ioloop.start() except: (etype, eobj, etb) = sys.exc_info() print traceback.format_exception(etype, eobj, etb) def on_connection_open(self, unused_connection): LOGGER.info('Opening a connection completed') self.open_channel() def open_channel(self): LOGGER.info('Opening a channel') self.connection.channel(on_open_callback=self.on_channel_open) def on_channel_open(self, new_channel): LOGGER.info('Opening a channel completed') self.channel = new_channel self.declare_exchange() def declare_exchange(self): LOGGER.info('Declaring an exchange') self.channel.exchange_declare( exchange=settings.RABBITMQ_NEWSFEED_ENTRY_EXCHANGE_NAME, exchange_type=settings.RABBITMQ_NEWSFEED_ENTRY_EXCHANGE_TYPE, passive=settings.RABBITMQ_NEWSFEED_ENTRY_EXCHANGE_PASSIVE, durable=settings.RABBITMQ_NEWSFEED_ENTRY_EXCHANGE_DURABLE, auto_delete=settings.RABBITMQ_NEWSFEED_ENTRY_EXCHANGE_AUTO_DELETE, internal=settings.RABBITMQ_NEWSFEED_ENTRY_EXCHANGE_INTERNAL, nowait=settings.RABBITMQ_NEWSFEED_ENTRY_EXCHANGE_NOWAIT, arguments=None, # Custom key/value pair arguments for the exchange callback=self.on_exchange_declared ) # Call this method on Exchange.DeclareOk def on_exchange_declared(self, unused_frame): LOGGER.info('Declaring an exchange completed') self.declare_queue() def declare_queue(self): LOGGER.info('Declaring a queue') self.channel.queue_declare( callback=self.on_queue_declared, queue=settings.RABBITMQ_NEWSFEED_ENTRY_QUEUE_NAME, passive=settings.RABBITMQ_NEWSFEED_ENTRY_QUEUE_PASSIVE, durable=settings.RABBITMQ_NEWSFEED_ENTRY_QUEUE_DURABLE, exclusive=settings.RABBITMQ_NEWSFEED_ENTRY_QUEUE_EXCLUSIVE, auto_delete=settings.RABBITMQ_NEWSFEED_ENTRY_QUEUE_AUTO_DELETE, nowait=settings.RABBITMQ_NEWSFEED_ENTRY_QUEUE_NOWAIT, arguments=None) def on_queue_declared(self, method_frame): LOGGER.info('Declaring a queue completed') self.bind_queue() def bind_queue(self): LOGGER.info('Binding a queue') self.channel.queue_bind( callback=self.on_queue_binded, queue=settings.RABBITMQ_NEWSFEED_ENTRY_QUEUE_NAME, exchange=settings.RABBITMQ_NEWSFEED_ENTRY_EXCHANGE_NAME, routing_key=settings.RABBITMQ_NEWSFEED_ENTRY_ROUTING_KEY) def on_queue_binded(self, frame): LOGGER.info('Binding a queue completed') # Start the thread's activity self.start() def on_message(self, channel, basic_deliver, properties, body): """Invoked by pika when a message is delivered from RabbitMQ. The channel is passed for your convenience. The basic_deliver object that is passed in carries the exchange, routing key, delivery tag and a redelivered flag for the message. The properties passed in is an instance of BasicProperties with the message properties and the body is the message that was sent. :param pika.channel.Channel channel: The channel object :param pika.Spec.Basic.Deliver: basic_deliver method :param pika.Spec.BasicProperties: properties :param str|unicode body: The message body """ LOGGER.info('Received message # %s from %s', basic_deliver.delivery_tag, properties.app_id) self.ack_message(basic_deliver.delivery_tag) try: data = JSON_DECODER.decode(body) #pprint(data) self.processopinion.process(data) except simplejson.JSONDecodeError: LOGGER.info(sys.exc_info()) def ack_message(self, delivery_tag): """Acknowledge the message delivery from RabbitMQ by sending a Basic.Ack RPC method for the delivery tag. :param int delivery_tag: The delivery tag from the Basic.Deliver frame """ LOGGER.info('Acknowledging message %s', delivery_tag) self.channel.basic_ack(delivery_tag) def run(self): LOGGER.info('Start to consume message from queue') self.channel.basic_consume( consumer_callback=self.on_message, queue=settings.RABBITMQ_NEWSFEED_ENTRY_QUEUE_NAME, no_ack= False, # Set to True means tell the broker to not expect a response exclusive= False, # Set to True means don't allow other consumers on the queue consumer_tag=None) # Specify your own consumer tag
class NewsFeedMaggot(Thread): def __init__(self): super(NewsFeedMaggot, self).__init__() self.feeds = [] self.update_track_dict() self.processfeed = ProcessFeed() self.channel = None self.connection = None self.alive = False self.stop_flag = False self.r = None self.r_last_touched = datetime.now() def update_track_dict(self): global DB_CONN try: LOGGER.info("Obtaining track from postgre") if DB_CONN is None: connect_pg() # Get all ids from "feedjack_track" track_ids = [] cursor = DB_CONN.cursor(cursor_factory=psycopg2.extras.DictCursor) cursor.execute("SELECT id FROM feedjack_track") results = cursor.fetchall() for result in results: track_ids.append(result['id']) # Get "trackedphrase" for each "track" for track_id in track_ids: TRACK_DICT[track_id] = [] cursor.execute("SELECT id, name, mode " \ "FROM feedjack_trackedphrase " \ "WHERE track_id=%s", (track_id,)) results = cursor.fetchall() for result in results: TRACK_DICT[track_id].append({ 'trackedphrase_id': result['id'], 'name': result['name'], 'mode': result['mode'] }) LOGGER.info("Obtaining trackedphrase completed") #pprint(TRACK_DICT) except: DB_CONN.rollback() LOGGER.info(sys.exc_info()) def post_init(self): #初始化 新建RabbitMQ 队列和exchange并绑定(2个) LOGGER.info('Initializing a FeedProcessor') #self.feedprocessor.post_init() LOGGER.info('Initializing a FeedProcessor completed') try: LOGGER.info('Opening a connection') #self.dispatcher = Dispatcher(self.options) self.connection = SelectConnection( parameters=settings.pika_parameters, on_open_callback=self.on_connection_open) try: LOGGER.info('Starting ioloop') self.connection.ioloop.start() except KeyboardInterrupt: # Gracefully close the connection self.connection.close() # Loop until we're fully closed, will stop on its own self.connection.ioloop.start() except: (etype, eobj, etb) = sys.exc_info() print traceback.format_exception(etype, eobj, etb) def on_connection_open(self, unused_connection): LOGGER.info('Opening a connection completed') self.open_channel() def open_channel(self): LOGGER.info('Opening a channel') self.connection.channel(on_open_callback=self.on_channel_open) def on_channel_open(self, new_channel): LOGGER.info('Opening a channel completed') self.channel = new_channel self.declare_exchange() self.declare_exchange2() def declare_exchange(self): LOGGER.info('Declaring an exchange') self.channel.exchange_declare( exchange=settings.RABBITMQ_NEWSFEED_RAW_FEED_EXCHANGE_NAME, exchange_type=settings.RABBITMQ_NEWSFEED_RAW_FEED_EXCHANGE_TYPE, passive=settings.RABBITMQ_NEWSFEED_RAW_FEED_EXCHANGE_PASSIVE, durable=settings.RABBITMQ_NEWSFEED_RAW_FEED_EXCHANGE_DURABLE, auto_delete=settings. RABBITMQ_NEWSFEED_RAW_FEED_EXCHANGE_AUTO_DELETE, internal=settings.RABBITMQ_NEWSFEED_RAW_FEED_EXCHANGE_INTERNAL, nowait=settings.RABBITMQ_NEWSFEED_RAW_FEED_EXCHANGE_NOWAIT, arguments=None, # Custom key/value pair arguments for the exchange callback=self.on_exchange_declared ) # Call this method on Exchange.DeclareOk def on_exchange_declared(self, unused_frame): LOGGER.info('Declaring an exchange completed') self.declare_queue() def declare_queue(self): LOGGER.info('Declaring a queue') self.channel.queue_declare( self.on_queue_declared, settings.RABBITMQ_NEWSFEED_RAW_FEED_QUEUE_NAME) def on_queue_declared(self, method_frame): LOGGER.info('Declaring a queue completed') self.bind_queue() def bind_queue(self): LOGGER.info('Binding a queue') self.channel.queue_bind( callback=self.on_queue_binded, queue=settings.RABBITMQ_NEWSFEED_RAW_FEED_QUEUE_NAME, exchange=settings.RABBITMQ_NEWSFEED_RAW_FEED_EXCHANGE_NAME, routing_key=settings.RABBITMQ_NEWSFEED_RAW_FEED_ROUTING_KEY) def on_queue_binded(self, frame): LOGGER.info('Binding a queue completed') # Start the thread's activity self.start() ############################### def declare_exchange2(self): LOGGER.info('Declaring an exchange2') self.channel.exchange_declare( exchange=settings.RABBITMQ_NEWSFEED_ENTRY_EXCHANGE_NAME, exchange_type=settings.RABBITMQ_NEWSFEED_ENTRY_EXCHANGE_TYPE, passive=settings.RABBITMQ_NEWSFEED_ENTRY_EXCHANGE_PASSIVE, durable=settings.RABBITMQ_NEWSFEED_ENTRY_EXCHANGE_DURABLE, auto_delete=settings.RABBITMQ_NEWSFEED_ENTRY_EXCHANGE_AUTO_DELETE, internal=settings.RABBITMQ_NEWSFEED_ENTRY_EXCHANGE_INTERNAL, nowait=settings.RABBITMQ_NEWSFEED_ENTRY_EXCHANGE_NOWAIT, arguments=None, # Custom key/value pair arguments for the exchange callback=self.on_exchange_declared2 ) # Call this method on Exchange.DeclareOk def on_exchange_declared2(self, frame): LOGGER.info('Declaring an exchange2 completed') self.declare_queue2() def declare_queue2(self): LOGGER.info('Declaring a queue2') self.channel.queue_declare( callback=self.on_queue_declared2, queue=settings.RABBITMQ_NEWSFEED_ENTRY_QUEUE_NAME, passive=settings.RABBITMQ_NEWSFEED_ENTRY_QUEUE_PASSIVE, durable=settings.RABBITMQ_NEWSFEED_ENTRY_QUEUE_DURABLE, exclusive=settings.RABBITMQ_NEWSFEED_ENTRY_QUEUE_EXCLUSIVE, auto_delete=settings.RABBITMQ_NEWSFEED_ENTRY_QUEUE_AUTO_DELETE, nowait=settings.RABBITMQ_NEWSFEED_ENTRY_QUEUE_NOWAIT, arguments=None) def on_queue_declared2(self, method_frame): LOGGER.info('Declaring a queue2 completed') self.bind_queue2() def bind_queue2(self): LOGGER.info('Binding a queue2') self.channel.queue_bind( callback=self.on_queue_binded2, queue=settings.RABBITMQ_NEWSFEED_ENTRY_QUEUE_NAME, exchange=settings.RABBITMQ_NEWSFEED_ENTRY_EXCHANGE_NAME, routing_key=settings.RABBITMQ_NEWSFEED_ENTRY_ROUTING_KEY) def on_queue_binded2(self, frame): LOGGER.info('Binding a queue completed on 2') def on_message(self, channel, basic_deliver, properties, body): """Invoked by pika when a message is delivered from RabbitMQ. The channel is passed for your convenience. The basic_deliver object that is passed in carries the exchange, routing key, delivery tag and a redelivered flag for the message. The properties passed in is an instance of BasicProperties with the message properties and the body is the message that was sent 收到消息message,processfeed 类处理成单个postlist并发送给websocket :param pika.channel.Channel channel: The channel object :param pika.Spec.Basic.Deliver: basic_deliver method :param pika.Spec.BasicProperties: properties :param str|unicode body: The message body """ LOGGER.info('Received message # %s from %s', basic_deliver.delivery_tag, properties.app_id) self.ack_message(basic_deliver.delivery_tag) try: data = JSON_DECODER.decode(body) #pprint(data) (entries_status, post_list) = self.processfeed.process(data) for post in post_list: LOGGER.info('Publishing data (post id: %d) to %s', post['post_id'], settings.RABBITMQ_NEWSFEED_ENTRY_EXCHANGE_NAME) json_data = JSON_ENCODER.encode(post) self.channel.basic_publish( exchange=settings.RABBITMQ_NEWSFEED_ENTRY_EXCHANGE_NAME, routing_key=settings.RABBITMQ_NEWSFEED_ENTRY_ROUTING_KEY, body=json_data) LOGGER.info('Publishing data completed') except simplejson.JSONDecodeError: LOGGER.info(sys.exc_info()) #except simplejson.JSONEncodeError: # LOGGER.info(sys.exc_info()) def ack_message(self, delivery_tag): """Acknowledge the message delivery from RabbitMQ by sending a Basic.Ack RPC method for the delivery tag. :param int delivery_tag: The delivery tag from the Basic.Deliver frame """ LOGGER.info('Acknowledging message %s', delivery_tag) self.channel.basic_ack(delivery_tag) def run(self): #设置channel.basic_consume MQ消息接收 LOGGER.info('Start to consume message from queue') self.channel.basic_consume( consumer_callback=self.on_message, queue=settings.RABBITMQ_NEWSFEED_RAW_FEED_QUEUE_NAME, no_ack= False, # Set to True means tell the broker to not expect a response exclusive= False, # Set to True means don't allow other consumers on the queue consumer_tag=None) # Specify your own consumer tag
class Amqp(object): def __init__(self, conf): # RabbitMQ general options self.cacertfile = conf['cacertfile'] self.certfile = conf['certfile'] self.exchange = conf['exchange'] self.status_exchange = conf['status_exchange'] self.fail_if_no_peer_cert = conf['fail_if_no_peer_cert'] self.heartbeat = conf['heartbeat'] self.host = conf['host'] self.keyfile = conf['keyfile'] self.password = conf['password'] self.port = conf['port'] self.ssl_port = conf['ssl_port'] self.queue = conf['uuid'] self.retry_timeout = conf['retry_timeout'] self.ssl_auth = conf['ssl_auth'] self.use_ssl = conf['use_ssl'] self.username = conf['username'] self.vhost = conf['vhost'] # Connection and channel initialization self.connection = None self.channel = None # Plain credentials credentials = PlainCredentials(self.username, self.password) pika_options = { 'host': self.host, 'port': self.port, 'virtual_host': self.vhost, 'credentials': credentials } # SSL options if self.use_ssl: pika_options['ssl'] = True pika_options['port'] = self.ssl_port if self.ssl_auth: pika_options['credentials'] = ExternalCredentials() pika_options['ssl_options'] = { 'ca_certs': self.cacertfile, 'certfile': self.certfile, 'keyfile': self.keyfile, 'cert_reqs': CERT_REQUIRED } if self.heartbeat: pika_options['heartbeat'] = self.heartbeat self.parameters = None try: self.parameters = pika.ConnectionParameters(**pika_options) except TypeError as err: self.logger.debug(err) # Let's be compatible with original pika version (no integer for # heartbeats and no ssl. self.logger.warning("Wrong pika lib version, won't use ssl.") pika_options['heartbeat'] = True if self.use_ssl: self.use_ssl = False pika_options['port'] = self.port del pika_options['ssl'] if self.ssl_auth: self.ssl_auth = False del pika_options['ssl_options'] self.parameters = pika.ConnectionParameters(**pika_options) def connect(self): SelectPoller.TIMEOUT = .1 self.connection = SelectConnection(self.parameters, self.on_connected) self.connection.ioloop.start() def close(self, amqperror=False): if (self.connection and not self.connection.closing and not self.connection.closed): self.logger.debug("Closing connection") self.connection.close() #self.connection.ioloop.start() def on_remote_close(self, code, text): self.logger.debug("Remote channel close, code %d" % code) time.sleep(2) if code != 200: self.close() raise AmqpError(text) def on_connection_closed(self, frame): self.connection.ioloop.stop() def on_connected(self, connection): self.connection = connection self.connection.add_on_close_callback(self.on_connection_closed) self.connection.channel(self.on_channel_open)