def test_maybe_switch_next(self): c = Connection('amqp://foo;redis://example.com//3') c.maybe_switch_next() self.assertFalse(c._closed) self.assertEqual(c.hostname, 'example.com') self.assertEqual(c.transport_cls, 'redis') self.assertEqual(c.virtual_host, '/3')
def _connect(self): result = None if not self.connected(): self.logger.info( "Connect to RabbitMQ Broker %s" % (self.uri)) uri = self.uri parsed_url = urlparse(uri) uri = 'amqp://%s/%s' % (parsed_url.netloc, parsed_url.path) result = Connection(uri) try: result.connect() self.logger.info("Connected to AMQP Broker.") except Exception as e: result.release() self.logger.error( "Connection failure to %s: %s" % (uri, e)) else: self.logger.debug("Allready connected") return result
def test_sentinel_pool(self, mock_get_redis_via_sentinel): connection = Connection() connection.transport_options = BROKER_TRANSPORT_OPTIONS transport = SentinelTransport(app=app, client=connection) mock_get_redis_via_sentinel.return_value.connection_pool.get_master_address.return_value = ( '192.168.1.128', '6379', ) channel = SentinelChannel(connection=transport) channel.sentinels = BROKER_TRANSPORT_OPTIONS['sentinels'] channel.service_name = BROKER_TRANSPORT_OPTIONS['service_name'] channel.socket_timeout = BROKER_TRANSPORT_OPTIONS['socket_timeout'] del channel.sentinel_pool mock_get_redis_via_sentinel.reset_mock() pool = channel.sentinel_pool assert pool == mock_get_redis_via_sentinel.return_value.connection_pool mock_get_redis_via_sentinel.assert_called_once_with( host=mock.ANY, max_connections=mock.ANY, password=mock.ANY, port=mock.ANY, connection_pool_class=CelerySentinelConnectionPool, redis_class=channel.Client, db=0, sentinels=[('192.168.1.1', 26379), ('192.168.1.2', 26379), ('192.168.1.3', 26379)], service_name='master', socket_timeout=1, )
def test_close_disconnects(self): c = Connection(transport=Transport).channel() conn1 = c.client.connection conn2 = c.subclient.connection c.close() self.assertTrue(conn1.disconnected) self.assertTrue(conn2.disconnected)
def test_redis_info_raises(self): pool = Mock(name='pool') pool_at_init = [pool] client = Mock(name='client') class XChannel(Channel): def __init__(self, *args, **kwargs): self._pool = pool_at_init[0] super(XChannel, self).__init__(*args, **kwargs) def _get_client(self): return lambda *_, **__: client class XTransport(Transport): Channel = XChannel conn = Connection(transport=XTransport) client.info.side_effect = RuntimeError() with self.assertRaises(RuntimeError): conn.channel() pool.disconnect.assert_called_with() pool.disconnect.reset_mock() pool_at_init = [None] with self.assertRaises(RuntimeError): conn.channel() self.assertFalse(pool.disconnect.called)
def _do_test(): conn = Connection(transport=Transport) chan = conn.channel() self.assertTrue(chan.Client) self.assertTrue(chan.ResponseError) self.assertTrue(conn.transport.connection_errors) self.assertTrue(conn.transport.channel_errors)
def setup(self): if sys.platform == 'win32': raise SkipTest('Needs win32con module') try: data_folder_in = tempfile.mkdtemp() data_folder_out = tempfile.mkdtemp() except Exception: raise SkipTest('filesystem transport: cannot create tempfiles') self.c = Connection(transport='filesystem', transport_options={ 'data_folder_in': data_folder_in, 'data_folder_out': data_folder_out, }) self.p = Connection(transport='filesystem', transport_options={ 'data_folder_in': data_folder_out, 'data_folder_out': data_folder_in, }) self.e = Exchange('test_transport_filesystem') self.q = Queue('test_transport_filesystem', exchange=self.e, routing_key='test_transport_filesystem') self.q2 = Queue('test_transport_filesystem2', exchange=self.e, routing_key='test_transport_filesystem2')
class PollingQueueConsumer(object): """ Implements a minimum interface of the :class:`~messaging.QueueConsumer`. Instead of processing messages in a separate thread it provides a polling method to block until a message with the same correlation ID of the RPC-proxy call arrives. """ def register_provider(self, provider): self.provider = provider self.connection = Connection(provider.container.config['AMQP_URI']) self.channel = self.connection.channel() self.queue = provider.queue maybe_declare(self.queue, self.channel) def unregister_provider(self, provider): self.connection.close() def ack_message(self, msg): msg.ack() def poll_messages(self, correlation_id): channel = self.channel conn = channel.connection for body, msg in itermessages(conn, channel, self.queue, limit=None): if correlation_id == msg.properties.get('correlation_id'): self.provider.handle_message(body, msg) break
def test_maybe_switch_next(self): c = Connection('amqp://foo;redis://example.com//3') c.maybe_switch_next() assert not c._closed assert c.hostname == 'example.com' assert c.transport_cls == 'redis' assert c.virtual_host == '/3'
def test_socket_connection(self): connection = Connection('redis+socket:///tmp/redis.sock', transport=Transport) connparams = connection.channel()._connparams() self.assertEqual(connparams['connection_class'], redis.redis.UnixDomainSocketConnection) self.assertEqual(connparams['path'], '/tmp/redis.sock')
def setup(self): self.channels = set() try: data_folder_in = tempfile.mkdtemp() data_folder_out = tempfile.mkdtemp() except Exception: raise SkipTest('filesystem transport: cannot create tempfiles') self.c = Connection(transport='filesystem', transport_options={ 'data_folder_in': data_folder_in, 'data_folder_out': data_folder_out, }) self.channels.add(self.c.default_channel) self.p = Connection(transport='filesystem', transport_options={ 'data_folder_in': data_folder_out, 'data_folder_out': data_folder_in, }) self.channels.add(self.p.default_channel) self.e = Exchange('test_transport_filesystem') self.q = Queue('test_transport_filesystem', exchange=self.e, routing_key='test_transport_filesystem') self.q2 = Queue('test_transport_filesystem2', exchange=self.e, routing_key='test_transport_filesystem2')
def __setup_rackhd_style_amqp(self): """ Need to make exchanges and named queus to make this look like a RackHD instance amqp. """ # A freshly spun up on-demand docker likes to say it's there, but will # then reset the connection. So, catch that scenario w/ a few retries. con = None done_time = time.time() + 30.0 while con is None: con = Connection(hostname=self.host, port=self.ssl_port, ssl=False) try: con.connect() except Exception as ex: if time.time() > done_time: raise ex con = None if con is None: time.sleep(0.1) on_task = self.__assure_exchange(con, 'on.task', 'topic') self.__assure_named_queue(con, on_task, 'ipmi.command.sel.result') self.__assure_named_queue(con, on_task, 'ipmi.command.sdr.result') self.__assure_named_queue(con, on_task, 'ipmi.command.chassis.result') on_events = self.__assure_exchange(con, 'on.events', 'topic') self.__assure_named_queue(con, on_events, 'graph.finished') self.__assure_named_queue(con, on_events, 'polleralert.sel.updated', '#')
def test_register_with_event_loop(self): c = Connection(transport=Mock) loop = Mock(name='loop') c.register_with_event_loop(loop) c.transport.register_with_event_loop.assert_called_with( c.connection, loop, )
def test_check_at_least_we_try_to_connect_and_fail(self): import redis connection = Connection('redis://localhost:65534/') with pytest.raises(redis.exceptions.ConnectionError): chan = connection.channel() chan._size('some_queue')
def test_maybe_switch_next(self): c = Connection("amqp://foo;redis://example.com//3") c.maybe_switch_next() self.assertFalse(c._closed) self.assertEqual(c.hostname, "example.com") self.assertEqual(c.transport_cls, "redis") self.assertEqual(c.virtual_host, "/3")
def test_disable_ack_emulation(self): conn = Connection(transport=Transport, transport_options={ 'ack_emulation': False, }) chan = conn.channel() assert not chan.ack_emulation assert chan.QoS == virtual.QoS
def test_switch(self): c = Connection('amqp://foo') c._closed = True c.switch('redis://example.com//3') assert not c._closed assert c.hostname == 'example.com' assert c.transport_cls == 'redis' assert c.virtual_host == '/3'
def test_parse_generated_as_uri(self): conn = Connection(self.url) info = conn.info() for k, v in self.expected.items(): assert info[k] == v # by default almost the same- no password assert conn.as_uri() == self.nopass assert conn.as_uri(include_password=True) == self.url
def test_clone(self): hostname = 'sqlite:///celerydb.sqlite' x = Connection('+'.join(['sqla', hostname])) self.assertEqual(x.uri_prefix, 'sqla') self.assertEqual(x.hostname, hostname) clone = x.clone() self.assertEqual(clone.hostname, hostname) self.assertEqual(clone.uri_prefix, 'sqla')
def test_can_create_connection(self): from redis.exceptions import ConnectionError with self.assertRaises(ConnectionError): connection = Connection('sentinel://localhost:65534/', transport_options={ 'master_name': 'not_important' }) connection.channel()
def test_get_client(self): with mock.module_exists(*_redis_modules()): conn = Connection(transport=Transport) chan = conn.channel() assert chan.Client assert chan.ResponseError assert conn.transport.connection_errors assert conn.transport.channel_errors
def test_disable_ack_emulation(self): conn = Connection(transport=Transport, transport_options={ 'ack_emulation': False, }) chan = conn.channel() self.assertFalse(chan.ack_emulation) self.assertEqual(chan.QoS, virtual.QoS)
def test_parse_generated_as_uri(self): conn = Connection(self.url) info = conn.info() for k, v in self.expected.items(): self.assertEqual(info[k], v) # by default almost the same- no password self.assertEqual(conn.as_uri(), self.nopass) self.assertEqual(conn.as_uri(include_password=True), self.url)
def test_switch(self): c = Connection("amqp://foo") c._closed = True c.switch("redis://example.com//3") self.assertFalse(c._closed) self.assertEqual(c.hostname, "example.com") self.assertEqual(c.transport_cls, "redis") self.assertEqual(c.virtual_host, "/3")
def test_switch(self): c = Connection('amqp://foo') c._closed = True c.switch('redis://example.com//3') self.assertFalse(c._closed) self.assertEqual(c.hostname, 'example.com') self.assertEqual(c.transport_cls, 'redis') self.assertEqual(c.virtual_host, '/3')
class _AMQPServerWrapper(object): def __init__(self, amqp_url, logs): self.__logs = logs self.__amqp_url = amqp_url self.__monitors = {} self.__connection = Connection(self.__amqp_url) self.__connection.connect() self.__running = True self.__consumer_gl = gevent.spawn(self.__consumer_greenlet_main) self.__consumer_gl.greenlet_name = 'amqp-consumer-gl' # allowing flogging to print a nice name gevent.sleep(0.0) def __consumer_greenlet_main(self): gevent.sleep(0) while self.__running: try: self.__connection.drain_events(timeout=0.5) except Exception as ex: # NOQA: assigned but not used (left in for super-duper-low-level-debug) # print("was woken because {}".format(ex)) pass gevent.sleep(0.1) # make -sure- to yield cpu... # print("---loop") def stop_greenlet(self): self.__running = False @property def connected(self): return self.__connection.connected def create_add_tracker(self, exchange, routing_key, event_cb, queue_name=None): self.__logs.irl.debug("AMQPServerWrapper: create_add_tracker ex=%s, rk=%s, event_cb=%s", exchange, routing_key, event_cb) mon = _KeyedConsumerHandler.get_keyed_consumer( self.__logs, self.__connection, exchange, routing_key, queue_name, event_cb) return mon.exchange def inject(self, exchange, routing_key, payload): self.__logs.irl.debug("Injecting a test AMQP message: ex=%s, rk=%s, payload=%s", exchange, routing_key, payload) if not isinstance(exchange, Exchange): exchange = Exchange(exchange, 'topic') prod = Producer(self.__connection, exchange=exchange, routing_key=routing_key) prod.publish(payload) def test_helper_sync_send_msg(self, exchange, ex_rk, send_rk, payload): ex = Exchange(exchange, 'topic') queue = Queue(exchange=ex, routing_key=ex_rk + '.*', exclusive=True, channel=self.__connection) queue.declare() prod = Producer(self.__connection, exchange=ex, routing_key=send_rk) prod.publish(payload) return queue def test_helper_sync_recv_msg(self, queue): for tick in range(10): msg = queue.get() if msg is not None: break return msg
def init(host,port,virtual_host,usr,psw,queue_name): global connection,channel,producer,task_queue,consumer connection = Connection(hostname=host,port=port,userid=usr,password=psw,virtual_host=virtual_host) channel = connection.channel() producer=Producer(channel) task_queue = Queue(queue_name,durable=True) consumer = Consumer(channel,task_queue,no_ack=False) consumer.qos(prefetch_count=1) consumer.register_callback(RequestCallBack)
def worker(mq_url): connection = Connection(mq_url) channel = connection.channel() consumer_json = Consumer(channel, task_json_queue, callbacks=[process_json], accept=["json"]) consumer_json.consume() consumer_pickle = Consumer(channel, task_pickle_queue, callbacks=[process_pickle], accept=["pickle"]) consumer_pickle.consume() while True: connection.drain_events()
def send_task(obj, message): log.info('send message: %s' % message['event_type']) try: connection = Connection('amqp://%s:%s@%s:%s/%s' % (BROKER_USER, BROKER_PASSWORD, BROKER_HOST, BROKER_PORT, BROKER_VHOST_PYPO)) simple_queue = connection.SimpleQueue(BROKER_QUEUE) simple_queue.put(json.dumps(message)) simple_queue.close() connection.close() except Exception, e: log.error('error sending message: %s' % e)
def test_method_called(self): from kombu.transport.redis import SentinelChannel with patch.object(SentinelChannel, '_sentinel_managed_pool') as patched: connection = Connection('sentinel://localhost:65534/', transport_options={ 'master_name': 'not_important' }) connection.channel() self.assertTrue(patched.called)
def __init__(self): self.connection = Connection(settings.CELERY_LOG_BROKER_URL)
def test_bogus_scheme(self): with pytest.raises(KeyError): Connection('bogus://localhost:7421').transport
def create_resource(self, limit): return Connection(port=5672, transport=Transport).ChannelPool(limit)
def get_worker(): with Connection(transport_utils.get_messaging_urls()) as conn: return ExecutionsExporter(conn, [EXPORTER_WORK_QUEUE])
from kombu import Connection from kombu.messaging import Producer from kombu.transport.base import Message from kombu_queues import task_exchange from kombu_tasks import echo_task rabbitmq_url = 'amqp://*****:*****@localhost:5672//' message_num = 400 if __name__ == '__main__': connection = Connection(rabbitmq_url) channel = connection.channel() body_json = {'url': 'http://127.0.0.1', 'delay': 5} message_json = Message(channel, body=body_json) body_pickle = { 'func': echo_task, 'args': ('Hello Rabbit', 5), 'kwargs': {} } message_pickle = Message(channel, body=body_pickle) producer_json = Producer(channel, exchange=task_exchange) producer_pickle = Producer(channel, exchange=task_exchange, serializer='pickle') for i in xrange(message_num): producer_json.publish(message_json.body, routing_key='json_queue') producer_pickle.publish(message_pickle.body, routing_key='pickle_queue')
from kombu import Connection, Exchange, Queue, Consumer rabbit_url = "amqp://localhost:5672/" conn = Connection(rabbit_url) exchange = Exchange("example-exchange", type="direct") queue = Queue(name="example-queue", exchange=exchange, routing_key="BOB") def process_message(body, message): print("The body is {}".format(body)) message.ack() with Consumer(conn, queues=queue, callbacks=[process_message], accept=["text/plain"]): conn.drain_events(timeout=2)
def setup(self): self.conn = Connection(port=5672, transport=Transport, transport_options=self.transport_options)
class test_Channel(TestCase): def setUp(self): self.connection = Connection(transport=Transport) self.channel = self.connection.channel() def test_basic_consume_when_fanout_queue(self): self.channel.exchange_declare(exchange='txconfan', type='fanout') self.channel.queue_declare(queue='txconfanq') self.channel.queue_bind(queue='txconfanq', exchange='txconfan') self.assertIn('txconfanq', self.channel._fanout_queues) self.channel.basic_consume('txconfanq', False, None, 1) self.assertIn('txconfanq', self.channel.active_fanout_queues) self.assertEqual(self.channel._fanout_to_queue.get('txconfan'), 'txconfanq') def test_basic_cancel_unknown_delivery_tag(self): self.assertIsNone(self.channel.basic_cancel('txaseqwewq')) def test_subscribe_no_queues(self): self.channel.subclient = Mock() self.channel.active_fanout_queues.clear() self.channel._subscribe() self.assertFalse(self.channel.subclient.subscribe.called) def test_subscribe(self): self.channel.subclient = Mock() self.channel.active_fanout_queues.add('a') self.channel.active_fanout_queues.add('b') self.channel._fanout_queues.update(a='a', b='b') self.channel._subscribe() self.assertTrue(self.channel.subclient.subscribe.called) s_args, _ = self.channel.subclient.subscribe.call_args self.assertItemsEqual(s_args[0], ['a', 'b']) self.channel.subclient.connection._sock = None self.channel._subscribe() self.channel.subclient.connection.connect.assert_called_with() def test_handle_unsubscribe_message(self): s = self.channel.subclient s.subscribed = True self.channel._handle_message(s, ['unsubscribe', 'a', 0]) self.assertFalse(s.subscribed) def test_handle_pmessage_message(self): self.assertDictEqual( self.channel._handle_message( self.channel.subclient, ['pmessage', 'pattern', 'channel', 'data'], ), { 'type': 'pmessage', 'pattern': 'pattern', 'channel': 'channel', 'data': 'data', }, ) def test_handle_message(self): self.assertDictEqual( self.channel._handle_message( self.channel.subclient, ['type', 'channel', 'data'], ), { 'type': 'type', 'pattern': None, 'channel': 'channel', 'data': 'data', }, ) def test_brpop_start_but_no_queues(self): self.assertIsNone(self.channel._brpop_start()) def test_receive(self): s = self.channel.subclient = Mock() self.channel._fanout_to_queue['a'] = 'b' s.parse_response.return_value = [ 'message', 'a', dumps({'hello': 'world'}) ] payload, queue = self.channel._receive() self.assertDictEqual(payload, {'hello': 'world'}) self.assertEqual(queue, 'b') def test_receive_raises(self): self.channel._in_listen = True s = self.channel.subclient = Mock() s.parse_response.side_effect = KeyError('foo') with self.assertRaises(redis.Empty): self.channel._receive() self.assertFalse(self.channel._in_listen) def test_receive_empty(self): s = self.channel.subclient = Mock() s.parse_response.return_value = None with self.assertRaises(redis.Empty): self.channel._receive() def test_receive_different_message_Type(self): s = self.channel.subclient = Mock() s.parse_response.return_value = ['pmessage', '/foo/', 0, 'data'] with self.assertRaises(redis.Empty): self.channel._receive() def test_brpop_read_raises(self): c = self.channel.client = Mock() c.parse_response.side_effect = KeyError('foo') with self.assertRaises(redis.Empty): self.channel._brpop_read() c.connection.disconnect.assert_called_with() def test_brpop_read_gives_None(self): c = self.channel.client = Mock() c.parse_response.return_value = None with self.assertRaises(redis.Empty): self.channel._brpop_read() def test_poll_error(self): c = self.channel.client = Mock() c.parse_response = Mock() self.channel._poll_error('BRPOP') c.parse_response.assert_called_with('BRPOP') c.parse_response.side_effect = KeyError('foo') self.assertIsNone(self.channel._poll_error('BRPOP')) def test_put_fanout(self): self.channel._in_poll = False c = self.channel.client = Mock() body = {'hello': 'world'} self.channel._put_fanout('exchange', body) c.publish.assert_called_with('exchange', dumps(body)) def test_delete(self): x = self.channel self.channel._in_poll = False delete = x.client.delete = Mock() srem = x.client.srem = Mock() x._delete('queue', 'exchange', 'routing_key', None) delete.assert_has_call('queue') srem.assert_has_call(x.keyprefix_queue % ('exchange', ), x.sep.join(['routing_key', '', 'queue'])) def test_has_queue(self): self.channel._in_poll = False exists = self.channel.client.exists = Mock() exists.return_value = True self.assertTrue(self.channel._has_queue('foo')) exists.assert_has_call('foo') exists.return_value = False self.assertFalse(self.channel._has_queue('foo')) def test_close_when_closed(self): self.channel.closed = True self.channel.close() def test_close_client_close_raises(self): c = self.channel.client = Mock() c.connection.disconnect.side_effect = self.channel.ResponseError() self.channel.close() c.connection.disconnect.assert_called_with() def test_invalid_database_raises_ValueError(self): with self.assertRaises(ValueError): self.channel.connection.client.virtual_host = 'dwqeq' self.channel._connparams() @skip_if_not_module('redis') def test_get_client(self): import redis as R KombuRedis = redis.Channel._get_client(self.channel) self.assertTrue(KombuRedis) Rv = getattr(R, 'VERSION', None) try: R.VERSION = (2, 4, 0) with self.assertRaises(VersionMismatch): redis.Channel._get_client(self.channel) finally: if Rv is not None: R.VERSION = Rv @skip_if_not_module('redis') def test_get_response_error(self): from redis.exceptions import ResponseError self.assertIs(redis.Channel._get_response_error(self.channel), ResponseError) def test_avail_client_when_not_in_poll(self): self.channel._in_poll = False c = self.channel.client = Mock() with self.channel.conn_or_acquire() as client: self.assertIs(client, c) def test_avail_client_when_in_poll(self): self.channel._in_poll = True self.channel._pool = Mock() cc = self.channel._create_client = Mock() client = cc.return_value = Mock() with self.channel.conn_or_acquire(): pass self.channel.pool.release.assert_called_with(client.connection) cc.assert_called_with() @skip_if_not_module('redis') def test_transport_get_errors(self): self.assertTrue(redis.Transport._get_errors(self.connection.transport)) @skip_if_not_module('redis') def test_transport_get_errors_when_InvalidData_used(self): from redis import exceptions class ID(Exception): pass DataError = getattr(exceptions, 'DataError', None) InvalidData = getattr(exceptions, 'InvalidData', None) exceptions.InvalidData = ID exceptions.DataError = None try: errors = redis.Transport._get_errors(self.connection.transport) self.assertTrue(errors) self.assertIn(ID, errors[1]) finally: if DataError is not None: exceptions.DataError = DataError if InvalidData is not None: exceptions.InvalidData = InvalidData def test_empty_queues_key(self): channel = self.channel channel._in_poll = False key = channel.keyprefix_queue % 'celery' # Everything is fine, there is a list of queues. channel.client.sadd(key, 'celery\x06\x16\x06\x16celery') self.assertListEqual(channel.get_table('celery'), [('celery', '', 'celery')]) # ... then for some reason, the _kombu.binding.celery key gets lost channel.client.srem(key) # which raises a channel error so that the consumer/publisher # can recover by redeclaring the required entities. with self.assertRaises(InconsistencyError): self.channel.get_table('celery')
def test_close_ResponseError(self): c = Connection(transport=Transport).channel() c.client.bgsave_raises_ResponseError = True c.close()
class Collector(): def __init__(self, broker_cloud, mode, time_collect): self.mode = mode self.time_collect = time_collect self.producer_connection = Connection(broker_cloud) self.consumer_connection = Connection(broker_cloud) self.exchange = Exchange("IoT", type="direct") self.list_platform_id = [] def collect(self): print("Collect the states of the devices") for platform_id in self.list_platform_id: self.collect_by_platform_id(platform_id) threading.Timer(self.time_collect, self.collect).start() def collect_by_platform_id(self, platform_id): print('Collect data from platform_id: ', str(platform_id)) message_request = { 'reply_to': 'driver.response.collector.api_get_states', 'platform_id': platform_id } request_queue = Queue(name='driver.request.api_get_states', exchange=self.exchange, routing_key='driver.request.api_get_states') request_routing_key = 'driver.request.api_get_states' self.producer_connection.ensure_connection() with Producer(self.producer_connection) as producer: producer.publish( json.dumps(message_request), exchange=self.exchange.name, routing_key=request_routing_key, declare=[request_queue], retry=True ) def handle_collect_by_platform_id(self, body, message): print('Recived state from platform_id: ', json.loads(body)['platform_id']) # print(msg.payload.decode('utf-8')) # print(ast.literal_eval(msg.payload.decode('utf-8'))) list_things = json.loads(body) # print(list_things) request_queue = Queue(name='dbwriter.request.api_write_db', exchange=self.exchange, routing_key='dbwriter.request.api_write_db') request_routing_key = 'dbwriter.request.api_write_db' self.producer_connection.ensure_connection() with Producer(self.producer_connection) as producer: producer.publish( json.dumps(list_things), exchange=self.exchange.name, routing_key=request_routing_key, declare=[request_queue], retry=True ) print('Send new state to Dbwriter') def get_list_platforms(self): print("Get list platforms from Registry") message = { 'reply_to': 'registry.response.collector.api_get_list_platforms', 'platform_status': "active" } queue = Queue(name='registry.request.api_get_list_platforms', exchange=self.exchange, routing_key='registry.request.api_get_list_platforms') routing_key = 'registry.request.api_get_list_platforms' self.producer_connection.ensure_connection() with Producer(self.producer_connection) as producer: producer.publish( json.dumps(message), exchange=self.exchange.name, routing_key=routing_key, declare=[queue], retry=True ) def handle_get_list(self, body, message): list_platforms = json.loads(body)['list_platforms'] temp = [] for platform in list_platforms: temp.append(platform['platform_id']) self.list_platform_id = temp print('Updated list of platform_id: ', str(self.list_platform_id)) def handle_notification(self, body, message): print('Have Notification') if json.loads(body)['notification'] == 'Have Platform_id change': self.get_list_platforms() def run(self): queue_notification = Queue(name='collector.request.notification', exchange=self.exchange, routing_key='collector.request.notification') queue_list_platforms = Queue(name='registry.response.collector.api_get_list_platforms', exchange=self.exchange, routing_key='registry.response.collector.api_get_list_platforms') queue_get_states = Queue(name='driver.response.collector.api_get_states', exchange=self.exchange, routing_key='driver.response.collector.api_get_states') if self.mode == 'PULL': print("Collector use Mode: PULL Data") self.get_list_platforms() self.collect() while 1: try: self.consumer_connection.ensure_connection(max_retries=1) with nested(Consumer(self.consumer_connection, queues=queue_notification, callbacks=[self.handle_notification], no_ack=True), Consumer(self.consumer_connection, queues=queue_list_platforms, callbacks=[self.handle_get_list], no_ack=True), Consumer(self.consumer_connection, queues=queue_get_states, callbacks=[self.handle_collect_by_platform_id], no_ack=True)): while True: self.consumer_connection.drain_events() except (ConnectionRefusedError, exceptions.OperationalError): print('Connection lost') except self.consumer_connection.connection_errors: print('Connection error')
def task_publish_to_core(self=None, publish_node=None): """task_publish_to_core :param self: parent task object for bind=True :param publish_node: dictionary to send to the AntiNex Core Worker """ if settings.ANTINEX_WORKER_ENABLED: conn = None dataset = publish_node["body"].get("dataset", None) predict_rows = publish_node["body"].get("predict_rows", None) if not dataset and not predict_rows: log.info( ("skipping antinex core publish body={} - " "is missing dataset and predict_rows").format(publish_node)) return None # end of checking for supported requests to the core log.info(("task_publish_to_core - start req={}").format( str(publish_node)[0:32])) if not predict_rows: log.info(("building predict_rows from dataset={}").format(dataset)) predict_rows = [] predict_rows_df = pd.read_csv(dataset) for idx, org_row in predict_rows_df.iterrows(): new_row = json.loads(org_row.to_json()) new_row["idx"] = len(predict_rows) + 1 predict_rows.append(new_row) # end of building predict rows publish_node["body"]["apply_scaler"] = True publish_node["body"]["predict_rows"] = pd.DataFrame( predict_rows).to_json() # end of validating publish_node["body"]["ml_type"] = \ publish_node["body"]["manifest"]["ml_type"] log.debug(("NEXCORE - ssl={} exchange={} routing_key={}").format( settings.ANTINEX_SSL_OPTIONS, settings.ANTINEX_EXCHANGE_NAME, settings.ANTINEX_ROUTING_KEY)) try: if settings.ANTINEX_WORKER_SSL_ENABLED: log.debug("connecting with ssl") conn = Connection(settings.ANTINEX_AUTH_URL, login_method="EXTERNAL", ssl=settings.ANTINEX_SSL_OPTIONS) else: log.debug("connecting without ssl") conn = Connection(settings.ANTINEX_AUTH_URL) # end of connecting conn.connect() log.debug("getting channel") channel = conn.channel() core_exchange = Exchange(settings.ANTINEX_EXCHANGE_NAME, type=settings.ANTINEX_EXCHANGE_TYPE, durable=True) log.debug("creating producer") producer = Producer(channel=channel, auto_declare=True, serializer="json") try: log.debug("declaring exchange") producer.declare() except Exception as k: log.error(("declare exchange failed with ex={}").format(k)) # end of try to declare exchange which can fail if it exists core_queue = Queue(settings.ANTINEX_QUEUE_NAME, core_exchange, routing_key=settings.ANTINEX_ROUTING_KEY, durable=True) try: log.debug("declaring queue") core_queue.maybe_bind(conn) core_queue.declare() except Exception as k: log.error(("declare queue={} routing_key={} failed with ex={}" ).format(settings.ANTINEX_QUEUE_NAME, settings.ANTINEX_ROUTING_KEY, k)) # end of try to declare queue which can fail if it exists log.info( ("publishing exchange={} routing_key={} persist={}").format( core_exchange.name, settings.ANTINEX_ROUTING_KEY, settings.ANTINEX_DELIVERY_MODE)) producer.publish(body=publish_node["body"], exchange=core_exchange.name, routing_key=settings.ANTINEX_ROUTING_KEY, auto_declare=True, serializer="json", delivery_mode=settings.ANTINEX_DELIVERY_MODE) except Exception as e: log.info(("Failed to publish to core req={} with ex={}").format( publish_node, e)) # try/ex if conn: conn.release() log.info(("task_publish_to_core - done")) else: log.debug("core - disabled") # publish to the core if enabled return None
import paho.mqtt.client as mqtt from influxdb import InfluxDBClient import ast import json import threading from kombu import Producer, Connection, Consumer, exceptions, Exchange, Queue, uuid from kombu.utils.compat import nested BROKER_CLOUD = "localhost" producer_connection = Connection(BROKER_CLOUD) consumer_connection = Connection(BROKER_CLOUD) MODE = "PULL" # PUSH or PULL exchange = Exchange("IoT", type="direct") TIME_COLLECT = 5 list_platform_id = [] def collect(): print("Collect the states of the devices") for platform_id in list_platform_id: collect_by_platform_id(platform_id) threading.Timer(TIME_COLLECT, collect).start() def collect_by_platform_id(platform_id): print('Collect data from platform_id: ', str(platform_id)) message_request = {
def setUp(self): self.connection = Connection(transport=Transport) self.channel = self.connection.channel()
def test_custom_port(self): class Transport(pyamqp.Transport): Connection = MockConnection c = Connection(port=1337, transport=Transport).connect() assert c['host'] == '127.0.0.1:1337'
def test_close_poller_not_active(self): c = Connection(transport=Transport).channel() cycle = c.connection.cycle c.client.connection c.close() assert c not in cycle._channels
class Worker(ConsumerMixin): def __init__(self, connection): self.connection = connection def get_consumers(self, Consumer, channel): return [Consumer(queues=task_queues, callbacks=[self.process_task])] def process_task(self, body, message): fun = body['fun'] args = body['args'] kwargs = body['kwargs'] logger.info('Got task: %s', reprcall(fun.__name__, args, kwargs)) try: fun(*args, **kwdict(kwargs)) except Exception, exc: logger.error('task raised exception: %r', exc) message.ack() if __name__ == '__main__': from kombu import Connection from kombu.utils.debug import setup_logging setup_logging(loglevel='INFO') with Connection('amqp://*****:*****@localhost:5672//') as conn: try: Worker(conn).run() except KeyboardInterrupt: print('bye bye')
def setup(self): self.connection = Connection('pyamqp://') self.transport = self.connection.transport
def test_db_port(self): c1 = Connection(port=None, transport=Transport).channel() c1.close() c2 = Connection(port=9999, transport=Transport).channel() c2.close()
def test_default_port(self): class Transport(pyamqp.Transport): Connection = MockConnection c = Connection(port=None, transport=Transport).connect() assert c['host'] == '127.0.0.1:%s' % (Transport.default_port, )
def setup(self): self.connection = Connection(transport=Transport) self.exchange = Exchange('test_Redis', type='direct') self.queue = Queue('test_Redis', self.exchange, 'test_Redis')
def get_notifier(): with Connection(transport_utils.get_messaging_urls()) as conn: return Notifier(conn, [ACTIONUPDATE_WORK_Q], trigger_dispatcher=TriggerDispatcher(LOG))
class test_Redis: def setup(self): self.connection = Connection(transport=Transport) self.exchange = Exchange('test_Redis', type='direct') self.queue = Queue('test_Redis', self.exchange, 'test_Redis') def teardown(self): self.connection.close() def test_publish__get(self): channel = self.connection.channel() producer = Producer(channel, self.exchange, routing_key='test_Redis') self.queue(channel).declare() producer.publish({'hello': 'world'}) assert self.queue(channel).get().payload == {'hello': 'world'} assert self.queue(channel).get() is None assert self.queue(channel).get() is None assert self.queue(channel).get() is None def test_publish__consume(self): connection = Connection(transport=Transport) channel = connection.channel() producer = Producer(channel, self.exchange, routing_key='test_Redis') consumer = Consumer(channel, queues=[self.queue]) producer.publish({'hello2': 'world2'}) _received = [] def callback(message_data, message): _received.append(message_data) message.ack() consumer.register_callback(callback) consumer.consume() assert channel in channel.connection.cycle._channels try: connection.drain_events(timeout=1) assert _received with pytest.raises(socket.timeout): connection.drain_events(timeout=0.01) finally: channel.close() def test_purge(self): channel = self.connection.channel() producer = Producer(channel, self.exchange, routing_key='test_Redis') self.queue(channel).declare() for i in range(10): producer.publish({'hello': 'world-%s' % (i, )}) assert channel._size('test_Redis') == 10 assert self.queue(channel).purge() == 10 channel.close() def test_db_values(self): Connection(virtual_host=1, transport=Transport).channel() Connection(virtual_host='1', transport=Transport).channel() Connection(virtual_host='/1', transport=Transport).channel() with pytest.raises(Exception): Connection('redis:///foo').channel() def test_db_port(self): c1 = Connection(port=None, transport=Transport).channel() c1.close() c2 = Connection(port=9999, transport=Transport).channel() c2.close() def test_close_poller_not_active(self): c = Connection(transport=Transport).channel() cycle = c.connection.cycle c.client.connection c.close() assert c not in cycle._channels def test_close_ResponseError(self): c = Connection(transport=Transport).channel() c.client.bgsave_raises_ResponseError = True c.close() def test_close_disconnects(self): c = Connection(transport=Transport).channel() conn1 = c.client.connection conn2 = c.subclient.connection c.close() assert conn1.disconnected assert conn2.disconnected def test_get__Empty(self): channel = self.connection.channel() with pytest.raises(Empty): channel._get('does-not-exist') channel.close() def test_get_client(self): with mock.module_exists(*_redis_modules()): conn = Connection(transport=Transport) chan = conn.channel() assert chan.Client assert chan.ResponseError assert conn.transport.connection_errors assert conn.transport.channel_errors def test_check_at_least_we_try_to_connect_and_fail(self): import redis connection = Connection('redis://localhost:65534/') with pytest.raises(redis.exceptions.ConnectionError): chan = connection.channel() chan._size('some_queue')
from typing import List, AnyStr, Callable from kombu import Connection, Exchange, Queue, Message, Consumer from kombu.mixins import ConsumerMixin rabbit_url = "amqp://" class Worker(ConsumerMixin): def __init__(self, queues: List[Queue], connection: Connection) -> None: self.connection = connection self.queues = queues def get_consumers(self, consumer: Callable, channel) -> List[Consumer]: return [consumer( queues=self.queues, callbacks=[self.on_message], )] def on_message(self, body: AnyStr, message: Message) -> None: print('Got message: {0}'.format(body)) message.ack() ex = Exchange('exchange-1', type='direct') q = Queue(name='q-1', exchange=ex, routing_key='BOB') with Connection(rabbit_url, heartbeat=4) as conn: worker = Worker([q], conn) worker.run()
def create_connection(self, **kwargs): kwargs.setdefault('transport_options', {'fanout_patterns': True}) return Connection(transport=Transport, **kwargs)
def test_prepare_not_callable(self): P = self.create_resource(10) conn = Connection('memory://') chan = conn.default_channel assert P.prepare(chan) is chan
def test_start_shutdown(self): with Connection(cfg.CONF.messaging.url) as conn: tracker = results_tracker.ResultsTracker(q_connection=conn) eventlet.spawn(tracker.start) eventlet.sleep(0.1) eventlet.spawn(tracker.shutdown)
def test_prepare_not_callable(self): P = self.create_resource(None) conn = Connection('memory://') assert P.prepare(conn) is conn
class test_Redis(TestCase): def setUp(self): self.connection = Connection(transport=Transport) self.exchange = Exchange('test_Redis', type='direct') self.queue = Queue('test_Redis', self.exchange, 'test_Redis') def tearDown(self): self.connection.close() def test_publish__get(self): channel = self.connection.channel() producer = Producer(channel, self.exchange, routing_key='test_Redis') self.queue(channel).declare() producer.publish({'hello': 'world'}) self.assertDictEqual( self.queue(channel).get().payload, {'hello': 'world'}) self.assertIsNone(self.queue(channel).get()) self.assertIsNone(self.queue(channel).get()) self.assertIsNone(self.queue(channel).get()) def test_publish__consume(self): connection = Connection(transport=Transport) channel = connection.channel() producer = Producer(channel, self.exchange, routing_key='test_Redis') consumer = Consumer(channel, self.queue) producer.publish({'hello2': 'world2'}) _received = [] def callback(message_data, message): _received.append(message_data) message.ack() consumer.register_callback(callback) consumer.consume() self.assertIn(channel, channel.connection.cycle._channels) try: connection.drain_events(timeout=1) self.assertTrue(_received) with self.assertRaises(socket.timeout): connection.drain_events(timeout=0.01) finally: channel.close() def test_purge(self): channel = self.connection.channel() producer = Producer(channel, self.exchange, routing_key='test_Redis') self.queue(channel).declare() for i in range(10): producer.publish({'hello': 'world-%s' % (i, )}) self.assertEqual(channel._size('test_Redis'), 10) self.assertEqual(self.queue(channel).purge(), 10) channel.close() def test_db_values(self): Connection(virtual_host=1, transport=Transport).channel() Connection(virtual_host='1', transport=Transport).channel() Connection(virtual_host='/1', transport=Transport).channel() with self.assertRaises(Exception): Connection('redis:///foo').channel() def test_db_port(self): c1 = Connection(port=None, transport=Transport).channel() c1.close() c2 = Connection(port=9999, transport=Transport).channel() c2.close() def test_close_poller_not_active(self): c = Connection(transport=Transport).channel() cycle = c.connection.cycle c.client.connection c.close() self.assertNotIn(c, cycle._channels) def test_close_ResponseError(self): c = Connection(transport=Transport).channel() c.client.bgsave_raises_ResponseError = True c.close() def test_close_disconnects(self): c = Connection(transport=Transport).channel() conn1 = c.client.connection conn2 = c.subclient.connection c.close() self.assertTrue(conn1.disconnected) self.assertTrue(conn2.disconnected) def test_get__Empty(self): channel = self.connection.channel() with self.assertRaises(Empty): channel._get('does-not-exist') channel.close() def test_get_client(self): myredis, exceptions = _redis_modules() @module_exists(myredis, exceptions) def _do_test(): conn = Connection(transport=Transport) chan = conn.channel() self.assertTrue(chan.Client) self.assertTrue(chan.ResponseError) self.assertTrue(conn.transport.connection_errors) self.assertTrue(conn.transport.channel_errors) _do_test()
def test(self): """ Loads a test file that includes crafted bgp updates as input and expected messages as output. """ RABBITMQ_USER = os.getenv("RABBITMQ_USER", "guest") RABBITMQ_PASS = os.getenv("RABBITMQ_PASS", "guest") RABBITMQ_HOST = os.getenv("RABBITMQ_HOST", "rabbitmq") RABBITMQ_PORT = os.getenv("RABBITMQ_PORT", 5672) RABBITMQ_URI = "amqp://{}:{}@{}:{}//".format(RABBITMQ_USER, RABBITMQ_PASS, RABBITMQ_HOST, RABBITMQ_PORT) RPKI_VALIDATOR_HOST = os.getenv("RPKI_VALIDATOR_HOST", "routinator") RPKI_VALIDATOR_PORT = os.getenv("RPKI_VALIDATOR_PORT", 3323) # check RPKI RTR manager connectivity while True: try: rtrmanager = RTRManager(RPKI_VALIDATOR_HOST, RPKI_VALIDATOR_PORT) rtrmanager.start() print("Connected to RPKI VALIDATOR '{}:{}'".format( RPKI_VALIDATOR_HOST, RPKI_VALIDATOR_PORT)) rtrmanager.stop() break except Exception: print("Could not connect to RPKI VALIDATOR '{}:{}'".format( RPKI_VALIDATOR_HOST, RPKI_VALIDATOR_PORT)) print("Retrying in 5 seconds...") time.sleep(5) # exchanges self.update_exchange = Exchange("bgp-update", type="direct", durable=False, delivery_mode=1) self.hijack_exchange = Exchange("hijack-update", type="direct", durable=False, delivery_mode=1) self.pg_amq_bridge = Exchange("amq.direct", type="direct", durable=True, delivery_mode=1) # queues self.update_queue = Queue( "detection-testing", exchange=self.pg_amq_bridge, routing_key="update-update", durable=False, auto_delete=True, max_priority=1, consumer_arguments={"x-priority": 1}, ) self.hijack_queue = Queue( "hijack-testing", exchange=self.hijack_exchange, routing_key="update", durable=False, auto_delete=True, max_priority=1, consumer_arguments={"x-priority": 1}, ) self.hijack_db_queue = Queue( "hijack-db-testing", exchange=self.pg_amq_bridge, routing_key="hijack-update", durable=False, auto_delete=True, max_priority=1, consumer_arguments={"x-priority": 1}, ) with Connection(RABBITMQ_URI) as connection: print("Waiting for pg_amq exchange..") Tester.waitExchange(self.pg_amq_bridge, connection.default_channel) print("Waiting for hijack exchange..") Tester.waitExchange(self.hijack_exchange, connection.default_channel) print("Waiting for update exchange..") Tester.waitExchange(self.update_exchange, connection.default_channel) self.supervisor.supervisor.startAllProcesses() # print( # "Sleeping for 60 seconds to allow the RTR server to populate its db..." # ) # time.sleep(60) # query database for the states of the processes db_con = self.getDbConnection() db_cur = db_con.cursor() query = "SELECT name FROM process_states WHERE running=True" running_modules = set() # wait until all 6 modules are running while len(running_modules) < 6: db_cur.execute(query) entries = db_cur.fetchall() for entry in entries: running_modules.add(entry[0]) db_con.commit() print("Running modules: {}".format(running_modules)) print("{}/6 modules are running.".format(len(running_modules))) time.sleep(1) Tester.config_request_rpc(connection) time.sleep(10) for testfile in os.listdir("testfiles/"): self.clear() self.curr_test = testfile self.messages = {} # load test with open("testfiles/{}".format(testfile), "r") as f: self.messages = json.load(f) send_len = len(self.messages) with nested( connection.Consumer( self.hijack_queue, callbacks=[self.validate_message], accept=["ujson"], ), connection.Consumer( self.update_queue, callbacks=[self.validate_message], accept=["ujson", "txtjson"], ), connection.Consumer( self.hijack_db_queue, callbacks=[self.validate_message], accept=["ujson", "txtjson"], ), ): send_cnt = 0 # send and validate all messages in the messages.json file while send_cnt < send_len: self.curr_idx = send_cnt self.send_next_message(connection) send_cnt += 1 # sleep until we receive all expected messages while self.curr_idx != send_cnt: time.sleep(0.1) try: connection.drain_events(timeout=10) except socket.timeout: # avoid infinite loop by timeout assert False, "Consumer timeout" connection.close() time.sleep(5) self.supervisor.supervisor.stopAllProcesses() self.waitProcess("listener", 0) # 0 STOPPED self.waitProcess("clock", 0) # 0 STOPPED self.waitProcess("detection", 0) # 0 STOPPED self.waitProcess("mitigation", 0) # 0 STOPPED self.waitProcess("configuration", 0) # 0 STOPPED self.waitProcess("database", 0) # 0 STOPPED self.waitProcess("observer", 0) # 0 STOPPED
class test_FilesystemTransport: def setup(self): self.channels = set() try: data_folder_in = tempfile.mkdtemp() data_folder_out = tempfile.mkdtemp() except Exception: raise SkipTest('filesystem transport: cannot create tempfiles') self.c = Connection(transport='filesystem', transport_options={ 'data_folder_in': data_folder_in, 'data_folder_out': data_folder_out, }) self.channels.add(self.c.default_channel) self.p = Connection(transport='filesystem', transport_options={ 'data_folder_in': data_folder_out, 'data_folder_out': data_folder_in, }) self.channels.add(self.p.default_channel) self.e = Exchange('test_transport_filesystem') self.q = Queue('test_transport_filesystem', exchange=self.e, routing_key='test_transport_filesystem') self.q2 = Queue('test_transport_filesystem2', exchange=self.e, routing_key='test_transport_filesystem2') def teardown(self): # make sure we don't attempt to restore messages at shutdown. for channel in self.channels: try: channel._qos._dirty.clear() except AttributeError: pass try: channel._qos._delivered.clear() except AttributeError: pass def _add_channel(self, channel): self.channels.add(channel) return channel def test_produce_consume_noack(self): producer = Producer(self._add_channel(self.p.channel()), self.e) consumer = Consumer(self._add_channel(self.c.channel()), self.q, no_ack=True) for i in range(10): producer.publish({'foo': i}, routing_key='test_transport_filesystem') _received = [] def callback(message_data, message): _received.append(message) consumer.register_callback(callback) consumer.consume() while 1: if len(_received) == 10: break self.c.drain_events() assert len(_received) == 10 def test_produce_consume(self): producer_channel = self._add_channel(self.p.channel()) consumer_channel = self._add_channel(self.c.channel()) producer = Producer(producer_channel, self.e) consumer1 = Consumer(consumer_channel, self.q) consumer2 = Consumer(consumer_channel, self.q2) self.q2(consumer_channel).declare() for i in range(10): producer.publish({'foo': i}, routing_key='test_transport_filesystem') for i in range(10): producer.publish({'foo': i}, routing_key='test_transport_filesystem2') _received1 = [] _received2 = [] def callback1(message_data, message): _received1.append(message) message.ack() def callback2(message_data, message): _received2.append(message) message.ack() consumer1.register_callback(callback1) consumer2.register_callback(callback2) consumer1.consume() consumer2.consume() while 1: if len(_received1) + len(_received2) == 20: break self.c.drain_events() assert len(_received1) + len(_received2) == 20 # compression producer.publish({'compressed': True}, routing_key='test_transport_filesystem', compression='zlib') m = self.q(consumer_channel).get() assert m.payload == {'compressed': True} # queue.delete for i in range(10): producer.publish({'foo': i}, routing_key='test_transport_filesystem') assert self.q(consumer_channel).get() self.q(consumer_channel).delete() self.q(consumer_channel).declare() assert self.q(consumer_channel).get() is None # queue.purge for i in range(10): producer.publish({'foo': i}, routing_key='test_transport_filesystem2') assert self.q2(consumer_channel).get() self.q2(consumer_channel).purge() assert self.q2(consumer_channel).get() is None