def test_db_port(self): c1 = Connection(port=None, transport=Transport).channel() self.assertEqual(c1.client.port, Transport.default_port) c1.close() c2 = Connection(port=9999, transport=Transport).channel() self.assertEqual(c2.client.port, 9999) c2.close()
def __init__(self, name, uri, exchange, durable=False, auto_delete=False, serializer=None, transport_options=None, ssl=False, heartbeat=DEFAULT_HEARTBEAT, sysname=None, retry=None, errback=None): """Set up a Dashi connection @param name: name of destination service queue used by consumers @param uri: broker URI (e.g. 'amqp://*****:*****@localhost:5672//') @param exchange: name of exchange to create and use @param durable: if True, destination service queue and exchange will be created as durable @param auto_delete: if True, destination service queue and exchange will be deleted when all consumers are gone @param serializer: specify a serializer for message encoding @param transport_options: custom parameter dict for the transport backend @param heartbeat: amqp heartbeat interval @param sysname: a prefix for exchanges and queues for namespacing @param retry: a RetryBackoff object, or None to use defaults @param errback: callback called within except block of connection failures """ self._heartbeat_interval = heartbeat self._conn = Connection(uri, transport_options=transport_options, ssl=ssl, heartbeat=self._heartbeat_interval) if heartbeat: # create a connection template for pooled connections. These cannot # have heartbeat enabled. self._pool_conn = Connection(uri, transport_options=transport_options, ssl=ssl) else: self._pool_conn = self._conn self._name = name self._sysname = sysname if self._sysname is not None: self._exchange_name = "%s.%s" % (self._sysname, exchange) else: self._exchange_name = exchange self._exchange = Exchange(name=self._exchange_name, type='direct', durable=durable, auto_delete=auto_delete) # visible attributes self.durable = durable self.auto_delete = auto_delete self._consumer = None self._linked_exceptions = {} self._serializer = serializer if retry is None: self.retry = RetryBackoff() else: self.retry = retry self._errback = errback
def test_url_parser(self): with patch('kombu.transport.sqlalchemy.Channel._open'): url = 'sqlalchemy+sqlite:///celerydb.sqlite' Connection(url).connect() url = 'sqla+sqlite:///celerydb.sqlite' Connection(url).connect() # Should prevent regression fixed by f187ccd url = 'sqlb+sqlite:///celerydb.sqlite' with self.assertRaises(KeyError): Connection(url).connect()
def setUpClass(cls): Unittest_with_player.setUpClass() settings = {'migrate.celery.CELERY_ALWAYS_EAGER': True} setup_celery(settings) Test._connection = Connection(get_broker_url()) Test._queue = Test._connection.SimpleQueue(unittest_with_player.queue, no_ack=True) conductor.logger = MockLogger('test')
def __init__(self, settings): self.connection = Connection( settings['redis.url'], virtual_host=settings['redis.db_queue'] ) self.exchange = Exchange(settings['redis.exchange'], type='direct') self.queue = Queue(settings['redis.queue_es_sync'], self.exchange)
def _do_test(): conn = Connection(transport=Transport) chan = conn.channel() self.assertTrue(chan.Client) self.assertTrue(chan.ResponseError) self.assertTrue(conn.transport.connection_errors) self.assertTrue(conn.transport.channel_errors)
def test_close_disconnects(self): c = Connection(transport=Transport).channel() conn1 = c.client.connection conn2 = c.subclient.connection c.close() self.assertTrue(conn1.disconnected) self.assertTrue(conn2.disconnected)
def test_db_values(self): c1 = Connection(virtual_host=1, transport=Transport).channel() self.assertEqual(c1.client.db, 1) c2 = Connection(virtual_host='1', transport=Transport).channel() self.assertEqual(c2.client.db, 1) c3 = Connection(virtual_host='/1', transport=Transport).channel() self.assertEqual(c3.client.db, 1) with self.assertRaises(Exception): Connection(virtual_host='/foo', transport=Transport).channel()
def __init__(self, host, port, user_id, password, virt_host, exchange_name, routing_key, consumer_queue_name): super(RPCListener, self).__init__() self._exit = threading.Event() self._exchange_name = exchange_name self._routing_key = routing_key self._consumer_queue_name = consumer_queue_name self._exchange = Exchange(self._exchange_name, type='topic', durable=False) self._connection = Connection(host, user_id, password, virt_host, port) self._rpc_receive_queue = Queue(self._consumer_queue_name, durable=True, exchange=self._exchange, routing_key=self._routing_key) self._consumer = Consumer(self._connection, self._rpc_receive_queue) self._consumer.register_callback(self._callback) self._message_queue = selectable.ThreadQueue(consumer_queue_name) self._message_filters_lock = threading.RLock() self._message_filters = dict() self._message_handlers = dict() selobj.selobj_add_read_obj(self._message_queue.selobj, self._dispatch_messages)
def post(self): """ Add subscription """ data = request.get_json() app_name = request.headers["x-leek-app-name"] subscription = SubscriptionSchema.validate(data) subscription.update({ "org_name": g.org_name, "app_name": app_name, "app_key": settings.LEEK_AGENT_API_SECRET, "api_url": settings.LEEK_API_URL }) name = subscription.pop("name") # Check if there is already a subscription with the same name with open(SUBSCRIPTIONS_FILE) as s: subscriptions = json.load(s) s = subscriptions.get(name) if s: return responses.subscription_already_exist # Ensure connection try: connection = Connection(subscription["broker"]) connection.ensure_connection(max_retries=2) connection.release() except AccessRefused: return responses.wrong_access_refused except Exception: return responses.broker_not_reachable # Add subscription subscriptions[name] = subscription with open(SUBSCRIPTIONS_FILE, 'w') as f: json.dump(subscriptions, f, indent=4, sort_keys=False) return {"name": name, **subscription}, 200
def test_default_port(self): class Transport(pyamqp.Transport): Connection = MockConnection c = Connection(port=None, transport=Transport).connect() self.assertEqual(c['host'], '127.0.0.1:%s' % (Transport.default_port, ))
def get(self): """ Get subscriptions """ with open(SUBSCRIPTIONS_FILE) as s: subscriptions = json.load(s) app_subscriptions = [ { "name": utils.infer_subscription_name(subscription), **subscription, "broker": Connection(subscription.get("broker")).as_uri(), "backend": Connection(subscription.get("backend")).as_uri() if subscription.get("backend") else None } for subscription in subscriptions if subscription.get("app_name") == g.app_name and subscription.get("org_name") == g.org_name] return app_subscriptions, 200
def connection_thread(url, results, hide_password=False): from oslo_config import cfg from oslo_messaging.transport import TransportURL from pika import exceptions as pika_exceptions from pika import URLParameters as PikaUrlParameters from pika import BlockingConnection as PikaBlockingConnection try: parsed_url = TransportURL.parse(cfg.CONF, url) if hide_password: url = re.sub(':+[^:@]+@', ':******@', url) except Exception as e: results.append({'url': url, 'exception': e}) else: test_url, driver = parse_test_url(parsed_url) try: if driver == 'kombu': connection = Connection(test_url) connection.connect() connection.close() elif driver == 'pika': params = PikaUrlParameters(test_url) params.socket_timeout = 5 conn = PikaBlockingConnection(params) conn.close() except (OSError, pika_exceptions.ConnectionClosed): results.append({'url': url, 'exception': _('Url not reachable')}) except (AccessRefused, pika_exceptions.ProbableAuthenticationError): results.append({ 'url': url, 'exception': _('Credentials incorrect') }) except Exception as e: results.append({'url': url, 'exception': force_text(e)}) else: results.append({'url': url})
def test_clone(self): hostname = 'sqlite:///celerydb.sqlite' x = Connection('+'.join(['sqla', hostname])) self.assertEqual(x.uri_prefix, 'sqla') self.assertEqual(x.hostname, hostname) clone = x.clone() self.assertEqual(clone.hostname, hostname) self.assertEqual(clone.uri_prefix, 'sqla')
def setUp(self): self.c = Connection(transport='memory') self.e = Exchange('test_transport_memory') self.q = Queue('test_transport_memory', exchange=self.e, routing_key='test_transport_memory') self.q2 = Queue('test_transport_memory2', exchange=self.e, routing_key='test_transport_memory2')
def __init__(self, config: ConfigDict) -> None: self.config: ConfigDict = config self.name: str = config["QUEUE_NAME"] self.broker_url: str = config["BROKER_URL"] self.conn: Connection = Connection( self.broker_url, connect_timeout=self.connection_timeout) self.conn.ensure_connection(timeout=self.connection_timeout) self.queue: SimpleQueue = self.conn.SimpleQueue( self.name, serializer=self.serializer)
def get(self): """ Get subscriptions """ app_name = request.headers["x-leek-app-name"] with open(SUBSCRIPTIONS_FILE) as s: subscriptions = json.load(s) app_subscriptions = [{ "name": subscription_name, **subscription, "broker": Connection(subscription.get("broker")).as_uri(), "backend": Connection(subscription.get("backend")).as_uri() if subscription.get("backend") else None } for subscription_name, subscription in subscriptions.items() if subscription.get("app_name") == app_name and subscription.get("org_name") == g.org_name] return app_subscriptions, 200
def __init__(self): self.connection = Connection('amqp://*****:*****@fish.rmq.cloudamqp.com/kgmcrbkn') # exchange queue - Direct connection self.direct_exchange = Exchange(name='test1',type='direct') # first queue self.task_queue1 = Queue(name ='queue1',exchange=self.direct_exchange, routing_key='tasks_queue1') # second queue self.task_queue2 = Queue(name ='queue2',exchange=self.direct_exchange, routing_key='tasks_queue2') # Third queue self.task_queue3 = Queue(name ='queue3',exchange=self.direct_exchange, routing_key='tasks_queue3')
def __init__(self, host_name, port, userid, password, virtual_host, encoder_class): self.connection = Connection(hostname=host_name, port=port, userid=userid, password=password, virtual_host=virtual_host) self.encoder = encoder_class() dispatcher.connect(self.spider_opened, signals.spider_opened) dispatcher.connect(self.spider_closed, signals.spider_closed)
def main(): connection = Connection('amqp://*****:*****@localhost:5672//') _channel = connection.channel() _exchange = Exchange('neutron', type='topic') pro = Producer(channel=_channel, exchange=_exchange, routing_key='q-plugin') pro.publish(MSG)
def main(global_config, **settings): """ This function returns a Pyramid WSGI application. """ config = Configurator(settings=settings) config.include('pyramid_chameleon') config.add_static_view('static', 'static', cache_max_age=3600) config.add_route('home', '/') config.scan() config.registry.queue_connection = Connection('redis://localhost:6379/') return config.make_wsgi_app()
def post_to_archived_queue(payload): if settings.PROCESSED_EXCHANGE_ENABLED: retry_policy = { 'interval_start': 0, 'interval_step': 1, 'interval_max': 4, 'max_retries': 5, } processed_exchange = Exchange(settings.PROCESSED_EXCHANGE_NAME, type='fanout') with Connection(settings.QUEUE_BROKER_URL, transport_options=retry_policy) as conn: producer = conn.Producer(exchange=processed_exchange) producer.publish(payload, delivery_mode='persistent', retry=True, retry_policy=retry_policy)
def setUp(self): try: data_folder_in = tempfile.mkdtemp() data_folder_out = tempfile.mkdtemp() except Exception: raise SkipTest('filesystem transport: cannot create tempfiles') self.c = Connection(transport='filesystem', transport_options={ 'data_folder_in': data_folder_in, 'data_folder_out': data_folder_out, }) self.p = Connection(transport='filesystem', transport_options={ 'data_folder_in': data_folder_out, 'data_folder_out': data_folder_in, }) self.e = Exchange('test_transport_filesystem') self.q = Queue('test_transport_filesystem', exchange=self.e, routing_key='test_transport_filesystem') self.q2 = Queue('test_transport_filesystem2', exchange=self.e, routing_key='test_transport_filesystem2')
def test_url_parser(self): from kombu.transport import mongodb from pymongo.errors import ConfigurationError raise SkipTest('Test is functional: it actually connects to mongod') class Transport(mongodb.Transport): Connection = MockConnection url = 'mongodb://' c = Connection(url, transport=Transport).connect() client = c.channels[0].client self.assertEquals(client.name, 'kombu_default') self.assertEquals(client.connection.host, '127.0.0.1') url = 'mongodb://localhost' c = Connection(url, transport=Transport).connect() client = c.channels[0].client self.assertEquals(client.name, 'kombu_default') url = 'mongodb://localhost/dbname' c = Connection(url, transport=Transport).connect() client = c.channels[0].client self.assertEquals(client.name, 'dbname') url = 'mongodb://localhost,example.org:29017/dbname' c = Connection(url, transport=Transport).connect() client = c.channels[0].client nodes = client.connection.nodes self.assertEquals(len(nodes), 2) self.assertTrue(('example.org', 29017) in nodes) self.assertEquals(client.name, 'dbname') # Passing options breaks kombu's _init_params method # url = 'mongodb://localhost,localhost2:29017/dbname?safe=true' # c = Connection(url, transport=Transport).connect() # client = c.channels[0].client url = 'mongodb://*****:*****@localhost/dbname' c = Connection(url, transport=Transport).connect() # Assuming there's no user 'username' with password 'password' # configured in mongodb # Needed, otherwise the error would be rose before # the assertRaises is called def get_client(): c.channels[0].client self.assertRaises(ConfigurationError, get_client)
def post(self): """ Add subscription """ data = request.get_json() subscription = SubscriptionSchema.validate(data) if subscription["batch_max_number_of_messages"] > subscription[ "prefetch_count"]: raise SchemaError( "Batch max number of messages should be <= prefetch count!") subscription.update({ "org_name": g.org_name, "app_name": g.app_name, "app_key": settings.LEEK_AGENT_API_SECRET, "api_url": settings.LEEK_API_URL }) # Check subscription already exist exist, _ = utils.lookup_subscription(subscription["app_name"], subscription["app_env"]) if exist: return responses.subscription_already_exist # Ensure connection try: connection = Connection(subscription["broker"]) connection.ensure_connection(max_retries=2) connection.release() except AccessRefused: return responses.wrong_access_refused except Exception: return responses.broker_not_reachable # Add subscription with open(SUBSCRIPTIONS_FILE, "r+") as subscriptions_file: subscriptions = json.load(subscriptions_file) subscriptions.append(subscription) subscriptions_file.seek(0) json.dump(subscriptions, subscriptions_file) return { "name": utils.infer_subscription_name(subscription), **subscription }, 200
def init_rabbit_mq(self): self.logger.info("Initializing RabbitMQ stuff") try: schedule_exchange = Exchange("airtime-pypo", "direct", durable=True, auto_delete=True) schedule_queue = Queue("pypo-fetch", exchange=schedule_exchange, key="foo") with Connection(self.config["host"], \ self.config["user"], \ self.config["password"], \ self.config["vhost"], \ heartbeat = 5) as connection: rabbit = RabbitConsumer(connection, [schedule_queue], self) rabbit.run() except Exception as e: self.logger.error(e)
def init_rabbit_mq(self): logger.info("Initializing RabbitMQ stuff") try: schedule_exchange = Exchange("airtime-pypo", "direct", durable=True, auto_delete=True) schedule_queue = Queue("pypo-fetch", exchange=schedule_exchange, key="foo") with Connection( f"amqp://{self.config.user}:{self.config.password}" f"@{self.config.host}:{self.config.port}" f"/{self.config.vhost}", heartbeat=5, ) as connection: rabbit = RabbitConsumer(connection, [schedule_queue], self) rabbit.run() except Exception as e: logger.error(e)
def test_connection(host, port, user_id, password, virt_host, exchange_name, queue_name): """ Test a connection to an exchange on a virtual host """ connection = None connected = False success = False try: # Connect to the virtual host - will raise exception if it fails. connection = Connection(host, user_id, password, virt_host, port) connection.connect() connected = connection.connected if connected: # Check whether exchange exists - will raise exception if it fails. exchange = Exchange(exchange_name, channel=connection, type='topic', durable=False, passive=True) exchange.declare() # Check whether the queue exists - will raise exception if it # fails. rpc_receive_queue = Queue(queue_name, durable=True, exchange=exchange, channel=connection) rpc_receive_queue.queue_declare(passive=True) success = True except Exception as e: DLOG.info("Unable to connect to virt_host %s, exchange %s, error: %s" % (virt_host, exchange_name, e)) finally: if connected: connection.close() return success
def main(): try: connection = Connection('amqp://*****:*****@192.168.8.108:5672//') except Exception: raise print 'connecting to amqp server succeed!' channel = connection.channel() _exchange = Exchange('media', type='direct', channel=channel) video_queue = Queue('video', exchange=_exchange, routing_key='video', channel=channel) consumer = Consumer(channel, queues=[video_queue], callbacks=[process_data]) consumer.consume() while True: connection.drain_events(timeout=10) consumer.cancel()
def test_publish__consume(self): connection = Connection(transport=Transport) channel = connection.channel() producer = Producer(channel, self.exchange, routing_key='test_Redis') consumer = Consumer(channel, self.queue) producer.publish({'hello2': 'world2'}) _received = [] def callback(message_data, message): _received.append(message_data) message.ack() consumer.register_callback(callback) consumer.consume() self.assertIn(channel, channel.connection.cycle._channels) try: connection.drain_events(timeout=1) self.assertTrue(_received) with self.assertRaises(socket.timeout): connection.drain_events(timeout=0.01) finally: channel.close()