def test_assert_is_bound(self): exchange = Exchange("foo", "direct") self.assertRaises(NotBoundError, exchange.declare) chan = Channel() exchange.bind(chan).declare() self.assertIn("exchange_declare", chan)
def test_eq(self): q1 = Queue("xxx", Exchange("xxx", "direct"), "xxx") q2 = Queue("xxx", Exchange("xxx", "direct"), "xxx") self.assertEqual(q1, q2) self.assertFalse(q1.__eq__(True)) q3 = Queue("yyy", Exchange("xxx", "direct"), "xxx") self.assertNotEqual(q1, q3)
def test_eq(self): q1 = Queue('xxx', Exchange('xxx', 'direct'), 'xxx') q2 = Queue('xxx', Exchange('xxx', 'direct'), 'xxx') self.assertEqual(q1, q2) self.assertFalse(q1.__eq__(True)) q3 = Queue('yyy', Exchange('xxx', 'direct'), 'xxx') self.assertNotEqual(q1, q3)
def test_eq(self): e1 = Exchange('foo', 'direct') e2 = Exchange('foo', 'direct') self.assertEqual(e1, e2) e3 = Exchange('foo', 'topic') self.assertNotEqual(e1, e3) self.assertFalse(e1.__eq__(True))
def test_assert_is_bound(self): exchange = Exchange("foo", "direct") with self.assertRaises(NotBoundError): exchange.declare() conn = get_conn() chan = conn.channel() exchange.bind(chan).declare() self.assertIn("exchange_declare", chan)
def test_eq(self): e1 = Exchange("foo", "direct") e2 = Exchange("foo", "direct") self.assertEqual(e1, e2) e3 = Exchange("foo", "topic") self.assertNotEqual(e1, e3) self.assertFalse(e1.__eq__(True))
def test_assert_is_bound(self): exchange = Exchange('foo', 'direct') with self.assertRaises(NotBoundError): exchange.declare() conn = get_conn() chan = conn.channel() exchange.bind(chan).declare() self.assertIn('exchange_declare', chan)
def test_bound(self): exchange = Exchange("foo", "direct") self.assertFalse(exchange.is_bound) self.assertIn("<unbound", repr(exchange)) chan = get_conn().channel() bound = exchange.bind(chan) self.assertTrue(bound.is_bound) self.assertIs(bound.channel, chan) self.assertIn("<bound", repr(bound))
def test_bound(self): exchange = Exchange('foo', 'direct') self.assertFalse(exchange.is_bound) self.assertIn('<unbound', repr(exchange)) chan = get_conn().channel() bound = exchange.bind(chan) self.assertTrue(bound.is_bound) self.assertIs(bound.channel, chan) self.assertIn('<bound', repr(bound))
def test_bound(self): exchange = Exchange("foo", "direct") self.assertFalse(exchange.is_bound) self.assertIn("<unbound", repr(exchange)) chan = Channel() bound = exchange.bind(chan) self.assertTrue(bound.is_bound) self.assertIs(bound.channel, chan) self.assertIn("<bound", repr(bound))
def test_publish_retry_with_declare(self): p = self.connection.Producer() p.maybe_declare = Mock() p.connection.ensure = Mock() ex = Exchange('foo') p._publish('hello', 'rk', 0, 0, ex, declare=[ex]) p.maybe_declare.assert_called_with(ex)
def test_add_remove_binding_when_routing_key_is_empty(self, conn): a = A(conn) routing_key, mock_entity_type = "", ACTOR_TYPE.SCATTER source_ex = Exchange('bar.foo.bar', mock_entity_type) exchange = self.mock_exchange(a, mock_entity_type) a._add_binding(source_ex.as_dict(), routing_key, mock_entity_type) exchange.bind_to.assert_called_with(exchange=source_ex, routing_key=routing_key) a._remove_binding(source_ex.as_dict(), routing_key, mock_entity_type) exchange.exchange_unbind.assert_called_with(exchange=source_ex, routing_key=routing_key)
def test_revive(self): exchange = Exchange("foo", "direct") chan = Channel() # reviving unbound channel is a noop. exchange.revive(chan) self.assertFalse(exchange.is_bound) self.assertIsNone(exchange._channel) bound = exchange.bind(chan) self.assertTrue(bound.is_bound) self.assertIs(bound.channel, chan) chan2 = Channel() bound.revive(chan2) self.assertTrue(bound.is_bound) self.assertIs(bound._channel, chan2)
def setUp(self): self.__conn = Connection(transport=Transport) self.__conn.connect() self.__exchange = Exchange('test_exchange') self.__routing_key = 'test.routing' self.__queue = Queue('test_queue', self.__exchange, routing_key=self.__routing_key)
def setUp(self): self.c = BrokerConnection(transport="memory") self.e = Exchange("test_transport_memory") self.q = Queue("test_transport_memory", exchange=self.e, routing_key="test_transport_memory") self.q2 = Queue("test_transport_memory2", exchange=self.e, routing_key="test_transport_memory2")
def setUp(self): self.c = Connection(transport='memory') self.e = Exchange('test_transport_memory') self.q = Queue('test_transport_memory', exchange=self.e, routing_key='test_transport_memory') self.q2 = Queue('test_transport_memory2', exchange=self.e, routing_key='test_transport_memory2')
def test_revive(self): exchange = Exchange('foo', 'direct') conn = get_conn() chan = conn.channel() # reviving unbound channel is a noop. exchange.revive(chan) self.assertFalse(exchange.is_bound) self.assertIsNone(exchange._channel) bound = exchange.bind(chan) self.assertTrue(bound.is_bound) self.assertIs(bound.channel, chan) chan2 = conn.channel() bound.revive(chan2) self.assertTrue(bound.is_bound) self.assertIs(bound._channel, chan2)
def __init__(self, name, uri, exchange, durable=False, auto_delete=False, serializer=None, transport_options=None, ssl=False, heartbeat=DEFAULT_HEARTBEAT, sysname=None, retry=None, errback=None): """Set up a Dashi connection @param name: name of destination service queue used by consumers @param uri: broker URI (e.g. 'amqp://*****:*****@localhost:5672//') @param exchange: name of exchange to create and use @param durable: if True, destination service queue and exchange will be created as durable @param auto_delete: if True, destination service queue and exchange will be deleted when all consumers are gone @param serializer: specify a serializer for message encoding @param transport_options: custom parameter dict for the transport backend @param heartbeat: amqp heartbeat interval @param sysname: a prefix for exchanges and queues for namespacing @param retry: a RetryBackoff object, or None to use defaults @param errback: callback called within except block of connection failures """ self._heartbeat_interval = heartbeat self._conn = Connection(uri, transport_options=transport_options, ssl=ssl, heartbeat=self._heartbeat_interval) if heartbeat: # create a connection template for pooled connections. These cannot # have heartbeat enabled. self._pool_conn = Connection(uri, transport_options=transport_options, ssl=ssl) else: self._pool_conn = self._conn self._name = name self._sysname = sysname if self._sysname is not None: self._exchange_name = "%s.%s" % (self._sysname, exchange) else: self._exchange_name = exchange self._exchange = Exchange(name=self._exchange_name, type='direct', durable=durable, auto_delete=auto_delete) # visible attributes self.durable = durable self.auto_delete = auto_delete self._consumer = None self._linked_exceptions = {} self._serializer = serializer if retry is None: self.retry = RetryBackoff() else: self.retry = retry self._errback = errback
def setup(self): self.mock_chaos_connection = BrokerConnection( "pyamqp://*****:*****@localhost:5672") self._connections = {self.mock_chaos_connection} self._exchange = Exchange('navitia', durable=True, delivry_mode=2, type='topic') self.mock_chaos_connection.connect()
def setup(self): #Note: not a setup_class method, not to conflict with AbstractTestFixture's setup self._mock_rabbit_connection = BrokerConnection("pyamqp://*****:*****@localhost:5672") self._connections = {self._mock_rabbit_connection} self._exchange = Exchange('navitia', durable=True, delivry_mode=2, type='topic') self._mock_rabbit_connection.connect() #wait for the cnx to run the test self._wait_for_rabbitmq_cnx()
def setUp(self): # clean up singleton durign testing Player._instances.clear() self.hostname = 'localhost' self.cluster = 'test_cluster' self.routing_key = Constants.CONDUCTOR_ROUTING_KEY self.connection = Connection(get_broker_url()) self.exchange = Exchange(Constants.CONDUCTOR_EXCHANGE) # turn on mocket Mocket.enable()
def main(): connection = Connection('amqp://*****:*****@localhost:5672//') _channel = connection.channel() _exchange = Exchange('neutron', type='topic') pro = Producer(channel=_channel, exchange=_exchange, routing_key='q-plugin') pro.publish(MSG)
def __init__(self, conf): super(Heartbeat, self).__init__() self.conf = conf exchange = Exchange('zenoss.openstack.heartbeats', type='topic') self.connection = AMQPConnection(self.conf, exchange) self.hostname = socket.gethostname() self.processname = os.path.basename(sys.argv[0]) self.processid = os.getpid()
def _publish_reply(self, reply, exchange, routing_key, channel=None): chan = channel or self.connection.channel() try: exchange = Exchange(exchange, exchange_type="direct", delivery_mode="transient", durable=False, auto_delete=True) producer = Producer(chan, exchange=exchange, auto_declare=True) producer.publish(reply, routing_key=routing_key) finally: channel or chan.close()
def test_publish_retry_with_declare(self): p = self.connection.Producer() p.maybe_declare = Mock() ensure = p.connection.ensure = Mock() ex = Exchange("foo") p.publish("hello", exchange=ex, declare=[ex], retry=True, retry_policy={"step": 4}) p.maybe_declare.assert_called_with(ex, True, step=4) ensure.assert_called_with(p, p.exchange.publish, step=4)
def getCabbage(self): connectUri = CabbageHolder._getConnectUri() work = CacheHolder.getCache().get(HOST_NAME, WORKS) if work.queues and len(work.queues) > 0: queues = [] for queueName in work.queues: brokerQueue = StoreHolder.getStore().getQueue(queueName) queues.append( Queue(name=brokerQueue.queueName, exchange=Exchange(brokerQueue.exchangeName), routing_key=brokerQueue.routingKey)) celeryconfig.CELERY_QUEUES = tuple(queues) return Cabbage(broker=str(connectUri))
class Worker(ConsumerMixin): task_queue = Queue('xadmin-notify', Exchange(name='xnotify', type='fanout'), routing_key='xnotify') def __init__(self, app): self._app = app self.logger = app.logger self.connection = celery_app.connection_for_read() def get_consumers(self, Consumer, channel): return [Consumer(queues=[self.task_queue], callbacks=[self.on_event])] def on_event(self, body, message): # print('Got task: {0!r}'.format(body)) # type_, data = json.loads(body) type_, data = body if type_.startswith('job-'): self._on_job(type_, data) message.ack() def _on_job(self, type_, data): self.logger.info('Received: %s - %s', type_, data) obj = Job.objects.filter(alias=data['job']).first() # print('Job =>', obj) if obj.last_batch_id != data['batch_id']: # print('!!last_batch_id==', obj.last_batch_id) if obj.type == 0: return elif obj.type == 1: obj.last_batch_id = data['batch_id'] job = JobAction(obj.project.alias, obj.alias, obj.last_batch_id) if type_ == 'job-finished': # job-finished r = job.stop() obj.status = obj.status & 0xf0 | STATUS_STOP elif type_ == 'job-pause': r = job.pause() obj.status = obj.status & 0xf0 | STATUS_PAUSE elif type_ == 'job-started': if obj.type == 0: return elif obj.type == 1: obj.status = obj.status & 0xf0 | STATUS_START obj.save() def stop(self): self.should_stop = True
def testUrl(self): client = KombuClient( url= "amqp://*****:*****@172.16.4.134:5672/cabbage_vhost" ) conn = client.conn conn.connect() video_queue = Queue('hdfs', exchange=Exchange("hdfs"), routing_key='hdfs') with conn.Consumer(video_queue, callbacks=[process_media], accept=['json', 'pickle', 'msgpack', 'yaml']) as consumer: # Process messages and handle events on all channels while True: conn.drain_events()
def __init__(self, channel, exchange=None, routing_key=None, serializer=None, auto_declare=None, compression=None, on_return=None): self.channel = channel self.exchange = exchange or self.exchange if self.exchange is None: self.exchange = Exchange("") self.routing_key = routing_key or self.routing_key self.serializer = serializer or self.serializer self.compression = compression or self.compression self.on_return = on_return or self.on_return if auto_declare is not None: self.auto_declare = auto_declare self.exchange = self.exchange(self.channel) if self.auto_declare: self.declare() if self.on_return: self.channel.events["basic_return"].append(self.on_return)
def _initJobs(self, cabbage): store = StoreHolder.getRetryStore() jobs = store.getJobs() work = store.getWork(HOST_NAME) queues = work.queues routes = {} queues_celery = [] for que in queues: que = store.getQueue(que) queues_celery.append( Queue(que.queueName, Exchange(que.queueName), routing_key=que.queueName, queue_arguments={'x-max-priority': int(que.priority)})) for job in jobs: if job.status != JOB_DELETE and job.brokerQueue in queues: #fixbug 动态扩容时,缓存JOB if not CacheHolder.getCache().hasKey(job.jobId, JOBS): CacheHolder.getCache().put(job.jobId, job, JOBS) clientDir = ConfigHolder.getConfig().getProperty( BASE, CLIENT_FILE_DIRECTORY) path = clientDir + "/" + job.jobId if not os.path.isdir(path): # @FIX BUG 文件不同步 syncJob(job.jobId, store) self.addScriptJobId(job.jobId, cabbage) for taskName in job.tasks: que = store.getQueue(job.brokerQueue) routes[taskName] = { 'queue': que.queueName, 'routing_key': que.routingKey } log.info(routes) celeryconfig.CELERY_QUEUES = tuple(queues_celery) celeryconfig.CELERY_ROUTES = routes
def setUp(self): try: data_folder_in = tempfile.mkdtemp() data_folder_out = tempfile.mkdtemp() except Exception: raise SkipTest('filesystem transport: cannot create tempfiles') self.c = Connection(transport='filesystem', transport_options={ 'data_folder_in': data_folder_in, 'data_folder_out': data_folder_out, }) self.p = Connection(transport='filesystem', transport_options={ 'data_folder_in': data_folder_out, 'data_folder_out': data_folder_in, }) self.e = Exchange('test_transport_filesystem') self.q = Queue('test_transport_filesystem', exchange=self.e, routing_key='test_transport_filesystem') self.q2 = Queue('test_transport_filesystem2', exchange=self.e, routing_key='test_transport_filesystem2')
""" UNKNOWN_SIMPLE_FORMAT_KEY = """ Unknown format %{0} in string {1!r}. Possible causes: Did you forget to escape the expand sign (use '%%{0!r}'), or did you escape and the value was expanded twice? (%%N -> %N -> %hostname)? """.strip() #: Billiard sets this when execv is enabled. #: We use it to find out the name of the original ``__main__`` #: module, so that we can properly rewrite the name of the #: task to be that of ``App.main``. MP_MAIN_FILE = os.environ.get('MP_MAIN_FILE') #: Exchange for worker direct queues. WORKER_DIRECT_EXCHANGE = Exchange('C.dq2') #: Format for worker direct queue names. WORKER_DIRECT_QUEUE_FORMAT = '{hostname}.dq2' #: Separator for worker node name and hostname. NODENAME_SEP = '@' NODENAME_DEFAULT = 'celery' RE_FORMAT = re.compile(r'%(\w)') gethostname = memoize(1, Cache=dict)(socket.gethostname) def worker_direct(hostname): """Return :class:`kombu.Queue` that is a direct route to
def test_exchange_unbind(self): chan = get_conn().channel() foo = Exchange('foo', 'topic') bar = Exchange('bar', 'topic') foo.exchange_unbind(bar) self.assertIn('exchange_unbind', chan)
class Producer(object): """Message Producer. :param channel: Connection channel. :keyword exchange: Default exchange. :keyword routing_key: Default routing key. :keyword serializer: Default serializer. Default is `"json"`. :keyword compression: Default compression method. Default is no compression. :keyword auto_declare: Automatically declare the exchange at instantiation. Default is :const:`True`. :keyword on_return: Callback to call for undeliverable messages, when the `mandatory` or `immediate` arguments to :meth:`publish` is used. This callback needs the following signature: `(exception, exchange, routing_key, message)`. Note that the producer needs to drain events to use this feature. """ #: The connection channel used. channel = None #: Default exchange. exchange = None # Default routing key. routing_key = "" #: Default serializer to use. Default is JSON. serializer = None #: Default compression method. Disabled by default. compression = None #: By default the exchange is declared at instantiation. #: If you want to declare manually then you can set this #: to :const:`False`. auto_declare = True #: Basic return callback. on_return = None def __init__(self, channel, exchange=None, routing_key=None, serializer=None, auto_declare=None, compression=None, on_return=None): self.channel = channel self.exchange = exchange or self.exchange if self.exchange is None: self.exchange = Exchange("") self.routing_key = routing_key or self.routing_key self.serializer = serializer or self.serializer self.compression = compression or self.compression self.on_return = on_return or self.on_return if auto_declare is not None: self.auto_declare = auto_declare self.exchange = self.exchange(self.channel) if self.auto_declare: self.declare() if self.on_return: self.channel.events["basic_return"].append(self.on_return) def declare(self): """Declare the exchange. This is done automatically at instantiation if :attr:`auto_declare` is set to :const:`True`. """ if self.exchange.name: self.exchange.declare() def publish(self, body, routing_key=None, delivery_mode=None, mandatory=False, immediate=False, priority=0, content_type=None, content_encoding=None, serializer=None, headers=None, compression=None, exchange=None): """Publish message to the specified exchange. :param body: Message body. :keyword routing_key: Message routing key. :keyword delivery_mode: See :attr:`delivery_mode`. :keyword mandatory: Currently not supported. :keyword immediate: Currently not supported. :keyword priority: Message priority. A number between 0 and 9. :keyword content_type: Content type. Default is autodetect. :keyword content_encoding: Content encoding. Default is autodetect. :keyword serializer: Serializer to use. Default is autodetect. :keyword headers: Mapping of arbitrary headers to pass along with the message body. :keyword exchange: Override the exchange. Note that this exchange must have been declared. """ headers = headers or {} if routing_key is None: routing_key = self.routing_key if compression is None: compression = self.compression body, content_type, content_encoding = self._prepare( body, serializer, content_type, content_encoding, compression, headers) message = self.exchange.Message(body, delivery_mode, priority, content_type, content_encoding, headers=headers) return self.exchange.publish(message, routing_key, mandatory, immediate, exchange=exchange) def revive(self, channel): """Revive the producer after connection loss.""" self.channel = channel self.exchange.revive(channel) def _prepare(self, body, serializer=None, content_type=None, content_encoding=None, compression=None, headers=None): # No content_type? Then we're serializing the data internally. if not content_type: serializer = serializer or self.serializer (content_type, content_encoding, body) = encode(body, serializer=serializer) else: # If the programmer doesn't want us to serialize, # make sure content_encoding is set. if isinstance(body, unicode): if not content_encoding: content_encoding = 'utf-8' body = body.encode(content_encoding) # If they passed in a string, we can't know anything # about it. So assume it's binary data. elif not content_encoding: content_encoding = 'binary' if compression: body, headers["compression"] = compress(body, compression) return body, content_type, content_encoding