def _do_test(): conn = Connection(transport=Transport) chan = conn.channel() self.assertTrue(chan.Client) self.assertTrue(chan.ResponseError) self.assertTrue(conn.transport.connection_errors) self.assertTrue(conn.transport.channel_errors)
def __init__(self, host, port, user_id, password, virt_host, exchange_name, routing_key, consumer_queue_name): super(RPCListener, self).__init__() self._exit = threading.Event() self._exchange_name = exchange_name self._routing_key = routing_key self._consumer_queue_name = consumer_queue_name self._exchange = Exchange(self._exchange_name, type='topic', durable=False) self._connection = Connection(host, user_id, password, virt_host, port) self._rpc_receive_queue = Queue(self._consumer_queue_name, durable=True, exchange=self._exchange, routing_key=self._routing_key) self._consumer = Consumer(self._connection, self._rpc_receive_queue) self._consumer.register_callback(self._callback) self._message_queue = selectable.ThreadQueue(consumer_queue_name) self._message_filters_lock = threading.RLock() self._message_filters = dict() self._message_handlers = dict() selobj.selobj_add_read_obj(self._message_queue.selobj, self._dispatch_messages)
def test_close_disconnects(self): c = Connection(transport=Transport).channel() conn1 = c.client.connection conn2 = c.subclient.connection c.close() self.assertTrue(conn1.disconnected) self.assertTrue(conn2.disconnected)
def test_clone(self): hostname = 'sqlite:///celerydb.sqlite' x = Connection('+'.join(['sqla', hostname])) self.assertEqual(x.uri_prefix, 'sqla') self.assertEqual(x.hostname, hostname) clone = x.clone() self.assertEqual(clone.hostname, hostname) self.assertEqual(clone.uri_prefix, 'sqla')
def setUp(self): self.c = Connection(transport='memory') self.e = Exchange('test_transport_memory') self.q = Queue('test_transport_memory', exchange=self.e, routing_key='test_transport_memory') self.q2 = Queue('test_transport_memory2', exchange=self.e, routing_key='test_transport_memory2')
def __init__(self, name, uri, exchange, durable=False, auto_delete=False, serializer=None, transport_options=None, ssl=False, heartbeat=DEFAULT_HEARTBEAT, sysname=None, retry=None, errback=None): """Set up a Dashi connection @param name: name of destination service queue used by consumers @param uri: broker URI (e.g. 'amqp://*****:*****@localhost:5672//') @param exchange: name of exchange to create and use @param durable: if True, destination service queue and exchange will be created as durable @param auto_delete: if True, destination service queue and exchange will be deleted when all consumers are gone @param serializer: specify a serializer for message encoding @param transport_options: custom parameter dict for the transport backend @param heartbeat: amqp heartbeat interval @param sysname: a prefix for exchanges and queues for namespacing @param retry: a RetryBackoff object, or None to use defaults @param errback: callback called within except block of connection failures """ self._heartbeat_interval = heartbeat self._conn = Connection(uri, transport_options=transport_options, ssl=ssl, heartbeat=self._heartbeat_interval) if heartbeat: # create a connection template for pooled connections. These cannot # have heartbeat enabled. self._pool_conn = Connection(uri, transport_options=transport_options, ssl=ssl) else: self._pool_conn = self._conn self._name = name self._sysname = sysname if self._sysname is not None: self._exchange_name = "%s.%s" % (self._sysname, exchange) else: self._exchange_name = exchange self._exchange = Exchange(name=self._exchange_name, type='direct', durable=durable, auto_delete=auto_delete) # visible attributes self.durable = durable self.auto_delete = auto_delete self._consumer = None self._linked_exceptions = {} self._serializer = serializer if retry is None: self.retry = RetryBackoff() else: self.retry = retry self._errback = errback
def main(): connection = Connection('amqp://*****:*****@localhost:5672//') _channel = connection.channel() _exchange = Exchange('neutron', type='topic') pro = Producer(channel=_channel, exchange=_exchange, routing_key='q-plugin') pro.publish(MSG)
def __init__(self, host_name, port, userid, password, virtual_host, encoder_class): self.connection = Connection(hostname=host_name, port=port, userid=userid, password=password, virtual_host=virtual_host) self.encoder = encoder_class() dispatcher.connect(self.spider_opened, signals.spider_opened) dispatcher.connect(self.spider_closed, signals.spider_closed)
def test_url_parser(self): with patch('kombu.transport.sqlalchemy.Channel._open'): url = 'sqlalchemy+sqlite:///celerydb.sqlite' Connection(url).connect() url = 'sqla+sqlite:///celerydb.sqlite' Connection(url).connect() # Should prevent regression fixed by f187ccd url = 'sqlb+sqlite:///celerydb.sqlite' with self.assertRaises(KeyError): Connection(url).connect()
def test_db_port(self): c1 = Connection(port=None, transport=Transport).channel() self.assertEqual(c1.client.port, Transport.default_port) c1.close() c2 = Connection(port=9999, transport=Transport).channel() self.assertEqual(c2.client.port, 9999) c2.close()
def setUpClass(cls): Unittest_with_player.setUpClass() settings = {'migrate.celery.CELERY_ALWAYS_EAGER': True} setup_celery(settings) Test._connection = Connection(get_broker_url()) Test._queue = Test._connection.SimpleQueue(unittest_with_player.queue, no_ack=True) conductor.logger = MockLogger('test')
def get(self): """ Get subscriptions """ with open(SUBSCRIPTIONS_FILE) as s: subscriptions = json.load(s) app_subscriptions = [ { "name": utils.infer_subscription_name(subscription), **subscription, "broker": Connection(subscription.get("broker")).as_uri(), "backend": Connection(subscription.get("backend")).as_uri() if subscription.get("backend") else None } for subscription in subscriptions if subscription.get("app_name") == g.app_name and subscription.get("org_name") == g.org_name] return app_subscriptions, 200
def __init__(self, settings): self.connection = Connection( settings['redis.url'], virtual_host=settings['redis.db_queue'] ) self.exchange = Exchange(settings['redis.exchange'], type='direct') self.queue = Queue(settings['redis.queue_es_sync'], self.exchange)
def test_db_values(self): c1 = Connection(virtual_host=1, transport=Transport).channel() self.assertEqual(c1.client.db, 1) c2 = Connection(virtual_host='1', transport=Transport).channel() self.assertEqual(c2.client.db, 1) c3 = Connection(virtual_host='/1', transport=Transport).channel() self.assertEqual(c3.client.db, 1) with self.assertRaises(Exception): Connection(virtual_host='/foo', transport=Transport).channel()
def test_default_port(self): class Transport(pyamqp.Transport): Connection = MockConnection c = Connection(port=None, transport=Transport).connect() self.assertEqual(c['host'], '127.0.0.1:%s' % (Transport.default_port, ))
def __init__(self, config: ConfigDict) -> None: self.config: ConfigDict = config self.name: str = config["QUEUE_NAME"] self.broker_url: str = config["BROKER_URL"] self.conn: Connection = Connection( self.broker_url, connect_timeout=self.connection_timeout) self.conn.ensure_connection(timeout=self.connection_timeout) self.queue: SimpleQueue = self.conn.SimpleQueue( self.name, serializer=self.serializer)
def get(self): """ Get subscriptions """ app_name = request.headers["x-leek-app-name"] with open(SUBSCRIPTIONS_FILE) as s: subscriptions = json.load(s) app_subscriptions = [{ "name": subscription_name, **subscription, "broker": Connection(subscription.get("broker")).as_uri(), "backend": Connection(subscription.get("backend")).as_uri() if subscription.get("backend") else None } for subscription_name, subscription in subscriptions.items() if subscription.get("app_name") == app_name and subscription.get("org_name") == g.org_name] return app_subscriptions, 200
def __init__(self): self.connection = Connection('amqp://*****:*****@fish.rmq.cloudamqp.com/kgmcrbkn') # exchange queue - Direct connection self.direct_exchange = Exchange(name='test1',type='direct') # first queue self.task_queue1 = Queue(name ='queue1',exchange=self.direct_exchange, routing_key='tasks_queue1') # second queue self.task_queue2 = Queue(name ='queue2',exchange=self.direct_exchange, routing_key='tasks_queue2') # Third queue self.task_queue3 = Queue(name ='queue3',exchange=self.direct_exchange, routing_key='tasks_queue3')
def main(global_config, **settings): """ This function returns a Pyramid WSGI application. """ config = Configurator(settings=settings) config.include('pyramid_chameleon') config.add_static_view('static', 'static', cache_max_age=3600) config.add_route('home', '/') config.scan() config.registry.queue_connection = Connection('redis://localhost:6379/') return config.make_wsgi_app()
def post_to_archived_queue(payload): if settings.PROCESSED_EXCHANGE_ENABLED: retry_policy = { 'interval_start': 0, 'interval_step': 1, 'interval_max': 4, 'max_retries': 5, } processed_exchange = Exchange(settings.PROCESSED_EXCHANGE_NAME, type='fanout') with Connection(settings.QUEUE_BROKER_URL, transport_options=retry_policy) as conn: producer = conn.Producer(exchange=processed_exchange) producer.publish(payload, delivery_mode='persistent', retry=True, retry_policy=retry_policy)
def post(self): """ Add subscription """ data = request.get_json() app_name = request.headers["x-leek-app-name"] subscription = SubscriptionSchema.validate(data) subscription.update({ "org_name": g.org_name, "app_name": app_name, "app_key": settings.LEEK_AGENT_API_SECRET, "api_url": settings.LEEK_API_URL }) name = subscription.pop("name") # Check if there is already a subscription with the same name with open(SUBSCRIPTIONS_FILE) as s: subscriptions = json.load(s) s = subscriptions.get(name) if s: return responses.subscription_already_exist # Ensure connection try: connection = Connection(subscription["broker"]) connection.ensure_connection(max_retries=2) connection.release() except AccessRefused: return responses.wrong_access_refused except Exception: return responses.broker_not_reachable # Add subscription subscriptions[name] = subscription with open(SUBSCRIPTIONS_FILE, 'w') as f: json.dump(subscriptions, f, indent=4, sort_keys=False) return {"name": name, **subscription}, 200
def connection_thread(url, results, hide_password=False): from oslo_config import cfg from oslo_messaging.transport import TransportURL from pika import exceptions as pika_exceptions from pika import URLParameters as PikaUrlParameters from pika import BlockingConnection as PikaBlockingConnection try: parsed_url = TransportURL.parse(cfg.CONF, url) if hide_password: url = re.sub(':+[^:@]+@', ':******@', url) except Exception as e: results.append({'url': url, 'exception': e}) else: test_url, driver = parse_test_url(parsed_url) try: if driver == 'kombu': connection = Connection(test_url) connection.connect() connection.close() elif driver == 'pika': params = PikaUrlParameters(test_url) params.socket_timeout = 5 conn = PikaBlockingConnection(params) conn.close() except (OSError, pika_exceptions.ConnectionClosed): results.append({'url': url, 'exception': _('Url not reachable')}) except (AccessRefused, pika_exceptions.ProbableAuthenticationError): results.append({ 'url': url, 'exception': _('Credentials incorrect') }) except Exception as e: results.append({'url': url, 'exception': force_text(e)}) else: results.append({'url': url})
def setUp(self): try: data_folder_in = tempfile.mkdtemp() data_folder_out = tempfile.mkdtemp() except Exception: raise SkipTest('filesystem transport: cannot create tempfiles') self.c = Connection(transport='filesystem', transport_options={ 'data_folder_in': data_folder_in, 'data_folder_out': data_folder_out, }) self.p = Connection(transport='filesystem', transport_options={ 'data_folder_in': data_folder_out, 'data_folder_out': data_folder_in, }) self.e = Exchange('test_transport_filesystem') self.q = Queue('test_transport_filesystem', exchange=self.e, routing_key='test_transport_filesystem') self.q2 = Queue('test_transport_filesystem2', exchange=self.e, routing_key='test_transport_filesystem2')
def main(): try: connection = Connection('amqp://*****:*****@192.168.8.108:5672//') except Exception: raise print 'connecting to amqp server succeed!' channel = connection.channel() _exchange = Exchange('media', type='direct', channel=channel) video_queue = Queue('video', exchange=_exchange, routing_key='video', channel=channel) consumer = Consumer(channel, queues=[video_queue], callbacks=[process_data]) consumer.consume() while True: connection.drain_events(timeout=10) consumer.cancel()
class MessageQueuePipeline(object): """Emit processed items to a RabbitMQ exchange/queue""" def __init__(self, host_name, port, userid, password, virtual_host, encoder_class): self.connection = Connection(hostname=host_name, port=port, userid=userid, password=password, virtual_host=virtual_host) self.encoder = encoder_class() dispatcher.connect(self.spider_opened, signals.spider_opened) dispatcher.connect(self.spider_closed, signals.spider_closed) @classmethod def from_settings(cls, settings): host_name = settings.get('BROKER_HOST') port = settings.get('BROKER_PORT') userid = settings.get('BROKER_USERID') password = settings.get('BROKER_PASSWORD') virtual_host = settings.get('BROKER_VIRTUAL_HOST') encoder_class = settings.get('MESSAGE_Q_SERIALIZER', ScrapyJSONEncoder) return cls(host_name, port, userid, password, virtual_host, encoder_class) def spider_opened(self, spider): self.queue = self.connection.SimpleQueue(spider.name) def spider_closed(self, spider): self.queue.close() self.connection.close() def process_item(self, item, spider): return deferToThread(self._process_item, item, spider) def _process_item(self, item, spider): self.queue.put(self.encoder.encode(dict(item))) return item
def test_url_parser(self): from kombu.transport import mongodb from pymongo.errors import ConfigurationError raise SkipTest('Test is functional: it actually connects to mongod') class Transport(mongodb.Transport): Connection = MockConnection url = 'mongodb://' c = Connection(url, transport=Transport).connect() client = c.channels[0].client self.assertEquals(client.name, 'kombu_default') self.assertEquals(client.connection.host, '127.0.0.1') url = 'mongodb://localhost' c = Connection(url, transport=Transport).connect() client = c.channels[0].client self.assertEquals(client.name, 'kombu_default') url = 'mongodb://localhost/dbname' c = Connection(url, transport=Transport).connect() client = c.channels[0].client self.assertEquals(client.name, 'dbname') url = 'mongodb://localhost,example.org:29017/dbname' c = Connection(url, transport=Transport).connect() client = c.channels[0].client nodes = client.connection.nodes self.assertEquals(len(nodes), 2) self.assertTrue(('example.org', 29017) in nodes) self.assertEquals(client.name, 'dbname') # Passing options breaks kombu's _init_params method # url = 'mongodb://localhost,localhost2:29017/dbname?safe=true' # c = Connection(url, transport=Transport).connect() # client = c.channels[0].client url = 'mongodb://*****:*****@localhost/dbname' c = Connection(url, transport=Transport).connect() # Assuming there's no user 'username' with password 'password' # configured in mongodb # Needed, otherwise the error would be rose before # the assertRaises is called def get_client(): c.channels[0].client self.assertRaises(ConfigurationError, get_client)
def init_rabbit_mq(self): self.logger.info("Initializing RabbitMQ stuff") try: schedule_exchange = Exchange("airtime-pypo", "direct", durable=True, auto_delete=True) schedule_queue = Queue("pypo-fetch", exchange=schedule_exchange, key="foo") with Connection(self.config["host"], \ self.config["user"], \ self.config["password"], \ self.config["vhost"], \ heartbeat = 5) as connection: rabbit = RabbitConsumer(connection, [schedule_queue], self) rabbit.run() except Exception as e: self.logger.error(e)
def init_rabbit_mq(self): logger.info("Initializing RabbitMQ stuff") try: schedule_exchange = Exchange("airtime-pypo", "direct", durable=True, auto_delete=True) schedule_queue = Queue("pypo-fetch", exchange=schedule_exchange, key="foo") with Connection( f"amqp://{self.config.user}:{self.config.password}" f"@{self.config.host}:{self.config.port}" f"/{self.config.vhost}", heartbeat=5, ) as connection: rabbit = RabbitConsumer(connection, [schedule_queue], self) rabbit.run() except Exception as e: logger.error(e)
def post(self): """ Add subscription """ data = request.get_json() subscription = SubscriptionSchema.validate(data) if subscription["batch_max_number_of_messages"] > subscription[ "prefetch_count"]: raise SchemaError( "Batch max number of messages should be <= prefetch count!") subscription.update({ "org_name": g.org_name, "app_name": g.app_name, "app_key": settings.LEEK_AGENT_API_SECRET, "api_url": settings.LEEK_API_URL }) # Check subscription already exist exist, _ = utils.lookup_subscription(subscription["app_name"], subscription["app_env"]) if exist: return responses.subscription_already_exist # Ensure connection try: connection = Connection(subscription["broker"]) connection.ensure_connection(max_retries=2) connection.release() except AccessRefused: return responses.wrong_access_refused except Exception: return responses.broker_not_reachable # Add subscription with open(SUBSCRIPTIONS_FILE, "r+") as subscriptions_file: subscriptions = json.load(subscriptions_file) subscriptions.append(subscription) subscriptions_file.seek(0) json.dump(subscriptions, subscriptions_file) return { "name": utils.infer_subscription_name(subscription), **subscription }, 200
def test_connection(host, port, user_id, password, virt_host, exchange_name, queue_name): """ Test a connection to an exchange on a virtual host """ connection = None connected = False success = False try: # Connect to the virtual host - will raise exception if it fails. connection = Connection(host, user_id, password, virt_host, port) connection.connect() connected = connection.connected if connected: # Check whether exchange exists - will raise exception if it fails. exchange = Exchange(exchange_name, channel=connection, type='topic', durable=False, passive=True) exchange.declare() # Check whether the queue exists - will raise exception if it # fails. rpc_receive_queue = Queue(queue_name, durable=True, exchange=exchange, channel=connection) rpc_receive_queue.queue_declare(passive=True) success = True except Exception as e: DLOG.info("Unable to connect to virt_host %s, exchange %s, error: %s" % (virt_host, exchange_name, e)) finally: if connected: connection.close() return success
def test_publish__consume(self): connection = Connection(transport=Transport) channel = connection.channel() producer = Producer(channel, self.exchange, routing_key='test_Redis') consumer = Consumer(channel, self.queue) producer.publish({'hello2': 'world2'}) _received = [] def callback(message_data, message): _received.append(message_data) message.ack() consumer.register_callback(callback) consumer.consume() self.assertIn(channel, channel.connection.cycle._channels) try: connection.drain_events(timeout=1) self.assertTrue(_received) with self.assertRaises(socket.timeout): connection.drain_events(timeout=0.01) finally: channel.close()
def setUp(self): self.connection = Connection(transport=Transport) self.exchange = Exchange('test_Redis', type='direct') self.queue = Queue('test_Redis', self.exchange, 'test_Redis')
class test_MemoryTransport(TestCase): def setUp(self): self.c = Connection(transport='memory') self.e = Exchange('test_transport_memory') self.q = Queue('test_transport_memory', exchange=self.e, routing_key='test_transport_memory') self.q2 = Queue('test_transport_memory2', exchange=self.e, routing_key='test_transport_memory2') def test_produce_consume_noack(self): channel = self.c.channel() producer = Producer(channel, self.e) consumer = Consumer(channel, self.q, no_ack=True) for i in range(10): producer.publish({'foo': i}, routing_key='test_transport_memory') _received = [] def callback(message_data, message): _received.append(message) consumer.register_callback(callback) consumer.consume() while 1: if len(_received) == 10: break self.c.drain_events() self.assertEqual(len(_received), 10) def test_produce_consume(self): channel = self.c.channel() producer = Producer(channel, self.e) consumer1 = Consumer(channel, self.q) consumer2 = Consumer(channel, self.q2) self.q2(channel).declare() for i in range(10): producer.publish({'foo': i}, routing_key='test_transport_memory') for i in range(10): producer.publish({'foo': i}, routing_key='test_transport_memory2') _received1 = [] _received2 = [] def callback1(message_data, message): _received1.append(message) message.ack() def callback2(message_data, message): _received2.append(message) message.ack() consumer1.register_callback(callback1) consumer2.register_callback(callback2) consumer1.consume() consumer2.consume() while 1: if len(_received1) + len(_received2) == 20: break self.c.drain_events() self.assertEqual(len(_received1) + len(_received2), 20) # compression producer.publish({'compressed': True}, routing_key='test_transport_memory', compression='zlib') m = self.q(channel).get() self.assertDictEqual(m.payload, {'compressed': True}) # queue.delete for i in range(10): producer.publish({'foo': i}, routing_key='test_transport_memory') self.assertTrue(self.q(channel).get()) self.q(channel).delete() self.q(channel).declare() self.assertIsNone(self.q(channel).get()) # queue.purge for i in range(10): producer.publish({'foo': i}, routing_key='test_transport_memory2') self.assertTrue(self.q2(channel).get()) self.q2(channel).purge() self.assertIsNone(self.q2(channel).get()) def test_drain_events(self): with self.assertRaises(socket.timeout): self.c.drain_events(timeout=0.1) c1 = self.c.channel() c2 = self.c.channel() with self.assertRaises(socket.timeout): self.c.drain_events(timeout=0.1) del(c1) # so pyflakes doesn't complain. del(c2) def test_drain_events_unregistered_queue(self): c1 = self.c.channel() class Cycle(object): def get(self, timeout=None): return ('foo', 'foo'), c1 self.c.transport.cycle = Cycle() with self.assertRaises(KeyError): self.c.drain_events() def test_queue_for(self): chan = self.c.channel() chan.queues.clear() x = chan._queue_for('foo') self.assertTrue(x) self.assertIs(chan._queue_for('foo'), x)
class test_FilesystemTransport(TestCase): def setUp(self): try: data_folder_in = tempfile.mkdtemp() data_folder_out = tempfile.mkdtemp() except Exception: raise SkipTest('filesystem transport: cannot create tempfiles') self.c = Connection(transport='filesystem', transport_options={ 'data_folder_in': data_folder_in, 'data_folder_out': data_folder_out, }) self.p = Connection(transport='filesystem', transport_options={ 'data_folder_in': data_folder_out, 'data_folder_out': data_folder_in, }) self.e = Exchange('test_transport_filesystem') self.q = Queue('test_transport_filesystem', exchange=self.e, routing_key='test_transport_filesystem') self.q2 = Queue('test_transport_filesystem2', exchange=self.e, routing_key='test_transport_filesystem2') def test_produce_consume_noack(self): producer = Producer(self.p.channel(), self.e) consumer = Consumer(self.c.channel(), self.q, no_ack=True) for i in range(10): producer.publish({'foo': i}, routing_key='test_transport_filesystem') _received = [] def callback(message_data, message): _received.append(message) consumer.register_callback(callback) consumer.consume() while 1: if len(_received) == 10: break self.c.drain_events() self.assertEqual(len(_received), 10) def test_produce_consume(self): producer_channel = self.p.channel() consumer_channel = self.c.channel() producer = Producer(producer_channel, self.e) consumer1 = Consumer(consumer_channel, self.q) consumer2 = Consumer(consumer_channel, self.q2) self.q2(consumer_channel).declare() for i in range(10): producer.publish({'foo': i}, routing_key='test_transport_filesystem') for i in range(10): producer.publish({'foo': i}, routing_key='test_transport_filesystem2') _received1 = [] _received2 = [] def callback1(message_data, message): _received1.append(message) message.ack() def callback2(message_data, message): _received2.append(message) message.ack() consumer1.register_callback(callback1) consumer2.register_callback(callback2) consumer1.consume() consumer2.consume() while 1: if len(_received1) + len(_received2) == 20: break self.c.drain_events() self.assertEqual(len(_received1) + len(_received2), 20) # compression producer.publish({'compressed': True}, routing_key='test_transport_filesystem', compression='zlib') m = self.q(consumer_channel).get() self.assertDictEqual(m.payload, {'compressed': True}) # queue.delete for i in range(10): producer.publish({'foo': i}, routing_key='test_transport_filesystem') self.assertTrue(self.q(consumer_channel).get()) self.q(consumer_channel).delete() self.q(consumer_channel).declare() self.assertIsNone(self.q(consumer_channel).get()) # queue.purge for i in range(10): producer.publish({'foo': i}, routing_key='test_transport_filesystem2') self.assertTrue(self.q2(consumer_channel).get()) self.q2(consumer_channel).purge() self.assertIsNone(self.q2(consumer_channel).get())
class test_Channel(TestCase): def setUp(self): self.connection = Connection(transport=Transport) self.channel = self.connection.channel() def test_basic_consume_when_fanout_queue(self): self.channel.exchange_declare(exchange='txconfan', type='fanout') self.channel.queue_declare(queue='txconfanq') self.channel.queue_bind(queue='txconfanq', exchange='txconfan') self.assertIn('txconfanq', self.channel._fanout_queues) self.channel.basic_consume('txconfanq', False, None, 1) self.assertIn('txconfanq', self.channel.active_fanout_queues) self.assertEqual(self.channel._fanout_to_queue.get('txconfan'), 'txconfanq') def test_basic_cancel_unknown_delivery_tag(self): self.assertIsNone(self.channel.basic_cancel('txaseqwewq')) def test_subscribe_no_queues(self): self.channel.subclient = Mock() self.channel.active_fanout_queues.clear() self.channel._subscribe() self.assertFalse(self.channel.subclient.subscribe.called) def test_subscribe(self): self.channel.subclient = Mock() self.channel.active_fanout_queues.add('a') self.channel.active_fanout_queues.add('b') self.channel._fanout_queues.update(a='a', b='b') self.channel._subscribe() self.assertTrue(self.channel.subclient.subscribe.called) s_args, _ = self.channel.subclient.subscribe.call_args self.assertItemsEqual(s_args[0], ['a', 'b']) self.channel.subclient.connection._sock = None self.channel._subscribe() self.channel.subclient.connection.connect.assert_called_with() def test_handle_unsubscribe_message(self): s = self.channel.subclient s.subscribed = True self.channel._handle_message(s, ['unsubscribe', 'a', 0]) self.assertFalse(s.subscribed) def test_handle_pmessage_message(self): self.assertDictEqual(self.channel._handle_message( self.channel.subclient, ['pmessage', 'pattern', 'channel', 'data']), {'type': 'pmessage', 'pattern': 'pattern', 'channel': 'channel', 'data': 'data'}) def test_handle_message(self): self.assertDictEqual(self.channel._handle_message( self.channel.subclient, ['type', 'channel', 'data']), {'type': 'type', 'pattern': None, 'channel': 'channel', 'data': 'data'}) def test_brpop_start_but_no_queues(self): self.assertIsNone(self.channel._brpop_start()) def test_receive(self): s = self.channel.subclient = Mock() self.channel._fanout_to_queue['a'] = 'b' s.parse_response.return_value = ['message', 'a', dumps({'hello': 'world'})] payload, queue = self.channel._receive() self.assertDictEqual(payload, {'hello': 'world'}) self.assertEqual(queue, 'b') def test_receive_raises(self): self.channel._in_listen = True s = self.channel.subclient = Mock() s.parse_response.side_effect = KeyError('foo') with self.assertRaises(redis.Empty): self.channel._receive() self.assertFalse(self.channel._in_listen) def test_receive_empty(self): s = self.channel.subclient = Mock() s.parse_response.return_value = None with self.assertRaises(redis.Empty): self.channel._receive() def test_receive_different_message_Type(self): s = self.channel.subclient = Mock() s.parse_response.return_value = ['pmessage', '/foo/', 0, 'data'] with self.assertRaises(redis.Empty): self.channel._receive() def test_brpop_read_raises(self): c = self.channel.client = Mock() c.parse_response.side_effect = KeyError('foo') with self.assertRaises(redis.Empty): self.channel._brpop_read() c.connection.disconnect.assert_called_with() def test_brpop_read_gives_None(self): c = self.channel.client = Mock() c.parse_response.return_value = None with self.assertRaises(redis.Empty): self.channel._brpop_read() def test_poll_error(self): c = self.channel.client = Mock() c.parse_response = Mock() self.channel._poll_error('BRPOP') c.parse_response.assert_called_with('BRPOP') c.parse_response.side_effect = KeyError('foo') self.assertIsNone(self.channel._poll_error('BRPOP')) def test_put_fanout(self): self.channel._in_poll = False c = self.channel.client = Mock() body = {'hello': 'world'} self.channel._put_fanout('exchange', body) c.publish.assert_called_with('exchange', dumps(body)) def test_delete(self): x = self.channel self.channel._in_poll = False delete = x.client.delete = Mock() srem = x.client.srem = Mock() x._delete('queue', 'exchange', 'routing_key', None) delete.assert_has_call('queue') srem.assert_has_call(x.keyprefix_queue % ('exchange', ), x.sep.join(['routing_key', '', 'queue'])) def test_has_queue(self): self.channel._in_poll = False exists = self.channel.client.exists = Mock() exists.return_value = True self.assertTrue(self.channel._has_queue('foo')) exists.assert_has_call('foo') exists.return_value = False self.assertFalse(self.channel._has_queue('foo')) def test_close_when_closed(self): self.channel.closed = True self.channel.close() def test_close_client_close_raises(self): c = self.channel.client = Mock() c.connection.disconnect.side_effect = self.channel.ResponseError() self.channel.close() c.connection.disconnect.assert_called_with() def test_invalid_database_raises_ValueError(self): self.channel.connection.client.virtual_host = 'xfeqwewkfk' with self.assertRaises(ValueError): self.channel._create_client() @skip_if_not_module('redis') def test_get_client(self): import redis as R KombuRedis = redis.Channel._get_client(self.channel) self.assertTrue(KombuRedis) Rv = getattr(R, 'VERSION') try: R.VERSION = (2, 4, 0) with self.assertRaises(VersionMismatch): redis.Channel._get_client(self.channel) finally: if Rv is not None: R.VERSION = Rv @skip_if_not_module('redis') def test_get_response_error(self): from redis.exceptions import ResponseError self.assertIs(redis.Channel._get_response_error(self.channel), ResponseError) def test_avail_client_when_not_in_poll(self): self.channel._in_poll = False c = self.channel.client = Mock() self.assertIs(self.channel._avail_client, c) def test_avail_client_when_in_poll(self): self.channel._in_poll = True cc = self.channel._create_client = Mock() self.assertTrue(self.channel._avail_client) cc.assert_called_with() @skip_if_not_module('redis') def test_transport_get_errors(self): self.assertTrue(redis.Transport._get_errors(self.connection.transport)) @skip_if_not_module('redis') def test_transport_get_errors_when_InvalidData_used(self): from redis import exceptions class ID(Exception): pass DataError = getattr(exceptions, 'DataError', None) InvalidData = getattr(exceptions, 'InvalidData', None) exceptions.InvalidData = ID exceptions.DataError = None try: errors = redis.Transport._get_errors(self.connection.transport) self.assertTrue(errors) self.assertIn(ID, errors[1]) finally: if DataError is not None: exceptions.DataError = DataError if InvalidData is not None: exceptions.InvalidData = InvalidData def test_empty_queues_key(self): channel = self.channel channel._in_poll = False key = channel.keyprefix_queue % 'celery' # Everything is fine, there is a list of queues. channel.client.sadd(key, 'celery\x06\x16\x06\x16celery') self.assertListEqual(channel.get_table('celery'), [('celery', '', 'celery')]) # ... then for some reason, the _kombu.binding.celery key gets lost channel.client.srem(key) # which raises a channel error so that the consumer/publisher # can recover by redeclaring the required entities. with self.assertRaises(InconsistencyError): self.channel.get_table('celery')
def setUp(self): self.connection = Connection(transport=Transport) self.channel = self.connection.channel()
class test_Redis(TestCase): def setUp(self): self.connection = Connection(transport=Transport) self.exchange = Exchange('test_Redis', type='direct') self.queue = Queue('test_Redis', self.exchange, 'test_Redis') def tearDown(self): self.connection.close() def test_publish__get(self): channel = self.connection.channel() producer = Producer(channel, self.exchange, routing_key='test_Redis') self.queue(channel).declare() producer.publish({'hello': 'world'}) self.assertDictEqual(self.queue(channel).get().payload, {'hello': 'world'}) self.assertIsNone(self.queue(channel).get()) self.assertIsNone(self.queue(channel).get()) self.assertIsNone(self.queue(channel).get()) def test_publish__consume(self): connection = Connection(transport=Transport) channel = connection.channel() producer = Producer(channel, self.exchange, routing_key='test_Redis') consumer = Consumer(channel, self.queue) producer.publish({'hello2': 'world2'}) _received = [] def callback(message_data, message): _received.append(message_data) message.ack() consumer.register_callback(callback) consumer.consume() self.assertIn(channel, channel.connection.cycle._channels) try: connection.drain_events(timeout=1) self.assertTrue(_received) with self.assertRaises(socket.timeout): connection.drain_events(timeout=0.01) finally: channel.close() def test_purge(self): channel = self.connection.channel() producer = Producer(channel, self.exchange, routing_key='test_Redis') self.queue(channel).declare() for i in range(10): producer.publish({'hello': 'world-%s' % (i, )}) self.assertEqual(channel._size('test_Redis'), 10) self.assertEqual(self.queue(channel).purge(), 10) channel.close() def test_db_values(self): c1 = Connection(virtual_host=1, transport=Transport).channel() self.assertEqual(c1.client.db, 1) c2 = Connection(virtual_host='1', transport=Transport).channel() self.assertEqual(c2.client.db, 1) c3 = Connection(virtual_host='/1', transport=Transport).channel() self.assertEqual(c3.client.db, 1) with self.assertRaises(Exception): Connection(virtual_host='/foo', transport=Transport).channel() def test_db_port(self): c1 = Connection(port=None, transport=Transport).channel() self.assertEqual(c1.client.port, Transport.default_port) c1.close() c2 = Connection(port=9999, transport=Transport).channel() self.assertEqual(c2.client.port, 9999) c2.close() def test_close_poller_not_active(self): c = Connection(transport=Transport).channel() cycle = c.connection.cycle c.client.connection c.close() self.assertNotIn(c, cycle._channels) def test_close_ResponseError(self): c = Connection(transport=Transport).channel() c.client.bgsave_raises_ResponseError = True c.close() def test_close_disconnects(self): c = Connection(transport=Transport).channel() conn1 = c.client.connection conn2 = c.subclient.connection c.close() self.assertTrue(conn1.disconnected) self.assertTrue(conn2.disconnected) def test_get__Empty(self): channel = self.connection.channel() with self.assertRaises(Empty): channel._get('does-not-exist') channel.close() def test_get_client(self): myredis, exceptions = _redis_modules() @module_exists(myredis, exceptions) def _do_test(): conn = Connection(transport=Transport) chan = conn.channel() self.assertTrue(chan.Client) self.assertTrue(chan.ResponseError) self.assertTrue(conn.transport.connection_errors) self.assertTrue(conn.transport.channel_errors) _do_test()
def test_close_poller_not_active(self): c = Connection(transport=Transport).channel() cycle = c.connection.cycle c.client.connection c.close() self.assertNotIn(c, cycle._channels)
def test_close_ResponseError(self): c = Connection(transport=Transport).channel() c.client.bgsave_raises_ResponseError = True c.close()