def setup(self): current_date = datetime.now() self.event_index_name = current_date.strftime("events-%Y%m%d") self.previous_event_index_name = (current_date - timedelta(days=1)).strftime("events-%Y%m%d") self.alert_index_name = current_date.strftime("alerts-%Y%m") self.parse_config() # Elasticsearch self.es_client = ElasticsearchClient(list('{0}'.format(s) for s in self.options.esservers)) # RabbitMQ mqConnString = 'amqp://{0}:{1}@{2}:{3}//'.format(self.options.mquser, self.options.mqpassword, self.options.mqalertserver, self.options.mqport) mqAlertConn = Connection(mqConnString) alertExchange = Exchange(name=self.options.alertExchange, type='topic', durable=True, delivery_mode=1) alertExchange(mqAlertConn).declare() alertQueue = Queue(self.options.queueName, exchange=alertExchange, routing_key=self.options.alerttopic, durable=False, no_ack=(not self.options.mqack)) alertQueue(mqAlertConn).declare() self.rabbitmq_alerts_consumer = mqAlertConn.Consumer(alertQueue, accept=['json']) if pytest.config.option.delete_indexes: self.reset_elasticsearch() self.setup_elasticsearch() if pytest.config.option.delete_queues: self.reset_rabbitmq()
class Subscriber(ConsumerProducerMixin): """ Consumes messages from messaging server. """ def __init__(self, rabbitmq_url: str, rabbitmq_queue: str): """ Initializes connection to messaging server. :param rabbitmq_url: RabbitMQ URL :param rabbitmq_queue: RabbitMQ queue name """ self.connection = Connection(rabbitmq_url, connect_timeout=10) self.queue = Queue(name=rabbitmq_queue, routing_key=rabbitmq_queue) self.consumer = self.connection.Consumer(queues=self.queue) self.dashboard = SocketIO(message_queue=rabbitmq_url) def get_consumers(self, consumer, channel): """ Accepts incoming messages from RabbitMQ server. """ return [consumer(self.queue, callbacks=[self.analyze])] @abstractmethod def analyze(self, body: Dict, message: Message): """ Analyzes a message. To be implemented by consumer. """ pass
class KombuConsumer( AbstractConsumer, ): """ 使用kombu作为中间件,这个能直接一次性支持很多种小众中间件,但性能很差,除非是分布式函数调度框架没实现的中间件种类用户才可以用这种,用户也可以自己对比性能。 """ BROKER_KIND = 15 def custom_init(self): self._middware_name = frame_config.KOMBU_URL.split(":")[0] logger_name = f'{self._logger_prefix}{self.__class__.__name__}--{self._middware_name}--{self._queue_name}' self.logger = LogManager(logger_name).get_logger_and_add_handlers( self._log_level, log_filename=f'{logger_name}.log' if self._create_logger_file else None, formatter_template=frame_config. NB_LOG_FORMATER_INDEX_FOR_CONSUMER_AND_PUBLISHER, ) # patch_kombu_redis() # noinspection DuplicatedCode def _shedual_task(self): # 这个倍while 1 启动的,会自动重连。 def callback(body: dict, message: kombu.transport.virtual.base.Message): # print(type(body),body,type(message),message) self._print_message_get_from_broker('kombu', body) # self.logger.debug(f""" 从 kombu {self._middware_name} 中取出的消息是 {body}""") kw = { 'body': body, 'message': message, } self._submit_task(kw) self.exchange = Exchange('distributed_framework_exchange', 'direct', durable=True) self.queue = Queue(self._queue_name, exchange=self.exchange, routing_key=self._queue_name, auto_delete=False) self.conn = Connection(frame_config.KOMBU_URL, transport_options={"visibility_timeout": 600}) # 默认3600秒unacked重回队列 self.queue(self.conn).declare() with self.conn.Consumer(self.queue, callbacks=[callback], no_ack=False, prefetch_count=100) as consumer: # Process messages and handle events on all channels channel = consumer.channel # type:Channel channel.body_encoding = 'no_encode' # 这里改了编码,存到中间件的参数默认把消息base64了,我觉得没必要不方便查看消息明文。 while True: self.conn.drain_events() def _confirm_consume(self, kw): pass # redis没有确认消费的功能。 kw['message'].ack() def _requeue(self, kw): kw['message'].requeue()
def run_pulse_listener(username, password, timeout, no_send): """Run a Pulse message queue listener.""" connection = Connection( hostname='pulse.mozilla.org', port=5671, ssl=True, userid=username, password=password, ) # Connect and pass in our own low value for retries so the connection # fails fast if there is a problem. connection.ensure_connection( max_retries=1 ) # Retries must be >=1 or it will retry forever. with closing(connection): hgpush_exchange = Exchange(config.PULSE_EXCHANGE, 'topic', channel=connection) # Pulse queue names need to be prefixed with the username queue_name = f'queue/{username}/{config.PULSE_QUEUE_NAME}' queue = Queue( queue_name, exchange=hgpush_exchange, routing_key=config.PULSE_QUEUE_ROUTING_KEY, durable=True, exclusive=False, auto_delete=False, channel=connection, ) # Passing passive=True will assert that the exchange exists but won't # try to declare it. The Pulse server forbids declaring exchanges. hgpush_exchange.declare(passive=True) # Queue.declare() also declares the exchange, which isn't allowed by # the Pulse server. Use the low-level Queue API to only declare the # queue itself. queue.queue_declare() queue.queue_bind() callback = partial(process_push_message, no_send=no_send) # Pass auto_declare=False so that Consumer does not try to declare the # exchange. Declaring exchanges is not allowed by the Pulse server. with connection.Consumer( queue, callbacks=[callback], auto_declare=False ) as consumer: if no_send: log.info('transmission of ping data has been disabled') log.info('message acks has been disabled') log.info('reading messages') try: connection.drain_events(timeout=timeout) except socket.timeout: log.info('message queue is empty, nothing to do') log.info('done')
def test_accept__content_disallowed(self): conn = Connection('memory://') q = Queue('foo', exchange=self.exchange) p = conn.Producer() p.publish( {'complex': object()}, declare=[q], exchange=self.exchange, serializer='pickle', ) callback = Mock(name='callback') with conn.Consumer(queues=[q], callbacks=[callback]) as consumer: with self.assertRaises(consumer.ContentDisallowed): conn.drain_events(timeout=1) callback.assert_not_called()
def test_accept__content_allowed(self): conn = Connection('memory://') q = Queue('foo', exchange=self.exchange) p = conn.Producer() p.publish( {'complex': object()}, declare=[q], exchange=self.exchange, serializer='pickle', ) callback = Mock(name='callback') with conn.Consumer(queues=[q], accept=['pickle'], callbacks=[callback]): conn.drain_events(timeout=1) callback.assert_called() body, message = callback.call_args[0] self.assertTrue(body['complex'])
def start(self): log.info("Listening for Pulse messages") self.running = True connection = Connection( hostname=self.pulse_host, userid=self.pulse_user, password=self.pulse_password, ssl=True, # Kombu doesn't support the port correctly for amqp with ssl... port=5671, ) consumers = [] for event in self.events: log.debug("Setting up queue on exchange: %s with routing_key: %s", event.exchange, event.routing_key) # Passive exchanges must be used, otherwise kombu will try to # create the exchange (which we don't want, we're consuming # an existing one!) e = Exchange(name=event.exchange, type="topic", passive=True) q = Queue( name=event.queue_name, exchange=e, routing_key=event.routing_key, durable=True, exclusive=False, auto_delete=False ) c = connection.Consumer( queues=[q], callbacks=[event.callback] ) c.consume() consumers.append(c) try: # XXX: drain_events only returns after receiving a message. Is # there a way we can have it return regularly to be non-blocking? # Its timeout parameter seems to break receiving of messages. # Maybe it doesn't matter if we can't shut down gracefully since # messages will be reprocessed next time. while self.running: connection.drain_events() finally: for c in consumers: c.close() connection.close()
class Amqp(object): def __init__(self, url, exchange, queue, routing_key): self.conn = Connection(url) self.exchange = Exchange(exchange, 'direct') self.routing_key = routing_key self.queue = Queue(queue, self.exchange, self.routing_key) self.producer = None self.consumer = None def send(self, obj): if not self.producer: self.producer = self.conn.Producer() self.producer.publish(obj, exchange=self.exchange, routing_key=self.routing_key, declare=[self.queue], serializer='json', compression='zlib') def poll(self, cb_func): if not self.consumer: self.consumer = self.conn.Consumer(self.queue, callbacks=[cb_func]) self.consumer.qos(prefetch_count=1) self.consumer.consume() while True: self.conn.drain_events() def _release(self): if self.consumer: self.consumer.close() self.consumer = None if self.producer: self.producer.close() self.producer = None if self.conn: self.conn.release() self.conn = None def __enter__(self): return self def __exit__(self, exec_type, exc_value, traceback): self._release()
def listen(self): logger = utils.getLogger() connect_timeout = 5 wait = 30 connection = None restart = True while restart: restart = False try: # connection does not connect to the server until # either the connection.connect() method is called # explicitly or until kombu calls it implicitly as # needed. logger.debug('AutophonePulseMonitor: Connection()') connection = Connection(hostname=self.hostname, userid=self.userid, password=self.password, virtual_host=self.virtual_host, port=DEFAULT_SSL_PORT, ssl=True, connect_timeout=connect_timeout) logger.debug('AutophonePulseMonitor: connection.Consumer()') consumer = connection.Consumer(self.queues, callbacks=[self.handle_message], accept=['json'], auto_declare=False) logger.debug('AutophonePulseMonitor: bind queues') for queue in self.queues: queue(connection).queue_declare(passive=False) queue(connection).queue_bind() with consumer: while not self._stopping.is_set(): try: connection.drain_events(timeout=self.timeout) except socket.timeout: pass except socket.error, e: if "timed out" not in str(e): raise logger.debug('AutophonePulseMonitor.listen: stopping') except:
def setup_rabbitmq_client(options): global RABBITMQ_CLIENT try: RABBITMQ_CLIENT except NameError: mqConnString = 'amqp://{0}:{1}@{2}:{3}//'.format( options.mquser, options.mqpassword, options.mqalertserver, options.mqport) mqAlertConn = Connection(mqConnString) alertExchange = Exchange(name=options.alertExchange, type='topic', durable=True, delivery_mode=1) alertExchange(mqAlertConn).declare() alertQueue = Queue(options.queueName, exchange=alertExchange, routing_key=options.alerttopic, durable=False, no_ack=(not options.mqack)) alertQueue(mqAlertConn).declare() RABBITMQ_CLIENT = mqAlertConn.Consumer(alertQueue, accept=['json']) return RABBITMQ_CLIENT
while True: connection.drain_events() ''' def handle_message(body, message): try: print " message is: ", body message.ack() except Exception, e: print 'handle message error' connection = Connection('amqp://*****:*****@192.168.3.82:5672//') consumer = connection.Consumer(queue, callbacks=[handle_message]) consumer.qos(prefetch_count=1) consumer.consume() for t in range(10000): try: connection.drain_events() #eventloop(connection) except Exception, e: print e pass # with Consumer(connection, queue, callbacks=[handle_message]) as consumer: # for i in range(3): # print 'consumer start' # connection.drain_events(timeout=1)
def listen(self): connect_timeout = 5 wait = 30 connection = None restart = True while restart: restart = False if self.verbose: LOGGER.debug( 'AutophonePulseMonitor: start shared_lock.acquire') self.shared_lock.acquire() try: # connection does not connect to the server until # either the connection.connect() method is called # explicitly or until kombu calls it implicitly as # needed. LOGGER.debug('AutophonePulseMonitor: Connection()') connection = Connection(hostname=self.hostname, userid=self.userid, password=self.password, virtual_host=self.virtual_host, port=DEFAULT_SSL_PORT, ssl=True, connect_timeout=connect_timeout) LOGGER.debug('AutophonePulseMonitor: connection.Consumer()') consumer = connection.Consumer(self.queues, callbacks=[self.handle_message], accept=['json'], auto_declare=False) LOGGER.debug('AutophonePulseMonitor: bind queues') for queue in self.queues: queue(connection).queue_declare(passive=False) queue(connection).queue_bind() with consumer: while not self._stopping.is_set(): try: if self.verbose: LOGGER.debug( 'AutophonePulseMonitor shared_lock.release' ) self.shared_lock.release() connection.drain_events(timeout=self.timeout) except socket.timeout: pass finally: if self.verbose: LOGGER.debug( 'AutophonePulseMonitor shared_lock.acquire' ) self.shared_lock.acquire() LOGGER.debug('AutophonePulseMonitor.listen: stopping') except: LOGGER.exception('AutophonePulseMonitor Exception') if connection: connection.release() if self.verbose: LOGGER.debug( 'AutophonePulseMonitor exit shared_lock.release') self.shared_lock.release() if not self._stopping.is_set(): restart = True time.sleep(wait) if self.verbose: LOGGER.debug('AutophonePulseMonitor shared_lock.acquire') self.shared_lock.acquire() finally: if self.verbose: LOGGER.debug( 'AutophonePulseMonitor exit shared_lock.release') if connection and not restart: connection.release() self.shared_lock.release()
class test_PyroTransport: def setup(self): self.c = Connection(transport='pyro', virtual_host="kombu.broker") self.e = Exchange('test_transport_pyro') self.q = Queue('test_transport_pyro', exchange=self.e, routing_key='test_transport_pyro') self.q2 = Queue('test_transport_pyro2', exchange=self.e, routing_key='test_transport_pyro2') self.fanout = Exchange('test_transport_pyro_fanout', type='fanout') self.q3 = Queue('test_transport_pyro_fanout1', exchange=self.fanout) self.q4 = Queue('test_transport_pyro_fanout2', exchange=self.fanout) def test_driver_version(self): assert self.c.transport.driver_version() @pytest.mark.skip("requires running Pyro nameserver and Kombu Broker") def test_produce_consume_noack(self): channel = self.c.channel() producer = Producer(channel, self.e) consumer = Consumer(channel, self.q, no_ack=True) for i in range(10): producer.publish({'foo': i}, routing_key='test_transport_pyro') _received = [] def callback(message_data, message): _received.append(message) consumer.register_callback(callback) consumer.consume() while 1: if len(_received) == 10: break self.c.drain_events() assert len(_received) == 10 def test_drain_events(self): with pytest.raises(socket.timeout): self.c.drain_events(timeout=0.1) c1 = self.c.channel() c2 = self.c.channel() with pytest.raises(socket.timeout): self.c.drain_events(timeout=0.1) del (c1) # so pyflakes doesn't complain. del (c2) @pytest.mark.skip("requires running Pyro nameserver and Kombu Broker") def test_drain_events_unregistered_queue(self): c1 = self.c.channel() producer = self.c.Producer() consumer = self.c.Consumer([self.q2]) producer.publish( {'hello': 'world'}, declare=consumer.queues, routing_key=self.q2.routing_key, exchange=self.q2.exchange, ) message = consumer.queues[0].get()._raw class Cycle(object): def get(self, callback, timeout=None): return (message, 'foo'), c1 self.c.transport.cycle = Cycle() self.c.drain_events() @pytest.mark.skip("requires running Pyro nameserver and Kombu Broker") def test_queue_for(self): chan = self.c.channel() x = chan._queue_for('foo') assert x assert chan._queue_for('foo') is x
class MozReviewBot(object): def __init__(self, config_path=None, reviewboard_url=None, reviewboard_user=None, reviewboard_password=None, pulse_host=None, pulse_port=None, pulse_userid=None, pulse_password=None, exchange=None, queue=None, routing_key=None, pulse_timeout=None, pulse_ssl=False, repo_root=None, logger=None): if logger is None: self.logger = logging.getLogger('mozreviewbot') else: self.logger = logger # We use options passed into __init__ preferentially. If any of these # are not specified, we next check the configuration file, if any. # Finally, we use environment variables. if config_path and not os.path.isfile(config_path): # ConfigParser doesn't seem to throw if it is unable to find the # config file so we'll explicitly check that it exists. self.logger.error('could not locate config file: %s' % (config_path)) config_path = None if config_path: try: config = ConfigParser() config.read(config_path) reviewboard_url = (reviewboard_url or config.get('reviewboard', 'url')) reviewboard_user = (reviewboard_user or config.get('reviewboard', 'user')) reviewboard_password = (reviewboard_password or config.get( 'reviewboard', 'password')) pulse_host = pulse_host or config.get('pulse', 'host') pulse_port = pulse_port or config.get('pulse', 'port') pulse_userid = pulse_userid or config.get('pulse', 'userid') pulse_password = pulse_password or config.get( 'pulse', 'password') exchange = exchange or config.get('pulse', 'exchange') queue = queue or config.get('pulse', 'queue') routing_key = routing_key or config.get('pulse', 'routing_key') pulse_timeout = pulse_timeout or config.get('pulse', 'timeout') if pulse_ssl is None: pulse_ssl = config.get('pulse', 'ssl') except NoSectionError as e: self.logger.error('configuration file missing section: %s' % e.section) try: repo_root = repo_root or config.get('hg', 'repo_root') except (NoOptionError, NoSectionError): # Subclasses do not need to define repo root if they do not # plan on using the hg functionality. pass # keep config around in case any subclasses would like to extract # options from it. self.config = config else: self.config = None reviewboard_url = reviewboard_url or os.environ.get('REVIEWBOARD_URL') pulse_host = pulse_host or os.environ.get('PULSE_HOST') pulse_port = pulse_port or os.environ.get('PULSE_PORT') self.rbclient = RBClient(reviewboard_url, username=reviewboard_user, password=reviewboard_password) self.api_root = self.rbclient.get_root() self.conn = Connection(hostname=pulse_host, port=pulse_port, userid=pulse_userid, password=pulse_password, ssl=pulse_ssl) self.exchange = Exchange(exchange, type='topic', durable=True) self.queue = Queue(name=queue, exchange=self.exchange, durable=True, routing_key=routing_key, exclusive=False, auto_delete=False) self.pulse_timeout = float(pulse_timeout) self.repo_root = repo_root self.hg = None for DIR in os.environ['PATH'].split(os.pathsep): p = os.path.join(DIR, 'hg') if os.path.exists(p): self.hg = p def _get_available_messages(self): messages = [] def onmessage(body, message): messages.append((body, message)) consumer = self.conn.Consumer([self.queue], callbacks=[onmessage], auto_declare=True) with consumer: try: self.conn.drain_events(timeout=self.pulse_timeout) except socket.timeout: pass return messages def _run_hg(self, hg_args): # TODO: Use hgtool. args = [self.hg] + hg_args env = dict(os.environ) env['HGENCODING'] = 'utf-8' null = open(os.devnull, 'w') # Execute at / to prevent Mercurial's path traversal logic from # kicking in and picking up unwanted config files. return subprocess.check_output(args, stdin=null, stderr=null, env=env, cwd='/') def ensure_hg_repo_exists(self, landing_repo_url, repo_url, pull_rev=None): # TODO: Use the root changeset in each repository as an identifier. # This will enable "forks" to share the same local clone. # The "share" extension now has support for this. # Read hg help -e share for details about "pooled storage." # We should probably deploy that. url = landing_repo_url or repo_url sha1 = hashlib.sha1(url).hexdigest() repo_path = os.path.join(self.repo_root, sha1) if not os.path.exists(repo_path): args = ['clone', url, repo_path] self.logger.debug('cloning %s' % url) self._run_hg(args) self.logger.debug('finished cloning %s' % url) args = ['-R', repo_path, 'pull', repo_url] if pull_rev: args.extend(['-r', pull_rev]) self.logger.debug('pulling %s' % repo_url) self._run_hg(args) self.logger.debug('finished pulling %s' % repo_url) return repo_path def hg_commit_changes(self, repo_path, node, diff_context=None): """Obtain information about what changed in a Mercurial commit. The return value is a tuple of: (set(adds), set(dels), set(mods), None, diff) The first 4 items list what files changed in the changeset. The last item is a unified diff of the changeset. File copies are currently not returned. ``None`` is being used as a placeholder until support is needed. """ part_delim = str(uuid.uuid4()) item_delim = str(uuid.uuid4()) parts = [ '{join(file_adds, "%s")}' % item_delim, '{join(file_dels, "%s")}' % item_delim, '{join(file_mods, "%s")}' % item_delim, '{join(file_copies, "%s")}' % item_delim, ] template = part_delim.join(parts) self._run_hg(['-R', repo_path, 'up', '-C', node]) res = self._run_hg( ['-R', repo_path, 'log', '-r', node, '-T', template]) diff_args = ['-R', repo_path, 'diff', '-c', node] if diff_context is not None: diff_args.extend(['-U', str(diff_context)]) diff = self._run_hg(diff_args) adds, dels, mods, copies = res.split(part_delim) adds = set(f for f in adds.split(item_delim) if f) dels = set(f for f in dels.split(item_delim) if f) mods = set(f for f in mods.split(item_delim) if f) # TODO parse the copies. return adds, dels, mods, None, diff def strip_nonpublic_changesets(self, repo_path): """Strip non-public changesets from a repository. Pulling changesets over and over results in many heads in a repository. This makes Mercurial slow. So, we prune non-public changesets/heads to keep repositories fast. """ self._run_hg([ '-R', repo_path, '--config', 'extensions.strip=', 'strip', '--no-backup', '-r', 'not public()' ]) def get_commit_files(self, commit): """Fetches a list of files that were changed by this commit.""" rrid = commit['review_request_id'] diff_revision = commit['diffset_revision'] start = 0 files = [] while True: result = self.api_root.get_files(review_request_id=rrid, diff_revision=diff_revision, start=start) files.extend(result) start += result.num_items if result.num_items == 0 or start >= result.total_results: break return files def handle_available_messages(self): for body, message in self._get_available_messages(): payload = body['payload'] repo_url = payload['repository_url'] landing_repo_url = payload['landing_repository_url'] commits = payload['commits'] # TODO: should we allow process commits to signal that we should # skip acknowledging the message? try: for commit in commits: rrid = commit['review_request_id'] diff_revision = commit['diffset_revision'] review = BatchReview(self.api_root, rrid, diff_revision) self.process_commit(review, landing_repo_url, repo_url, commit) finally: # This prevents the queue from growing indefinitely but # prevents us from fixing whatever caused the exception # and restarting the bot to handle the message. message.ack() def listen_forever(self): while True: self.handle_available_messages() def process_commit(self, review, repo_url, commits): pass
#!/usr/bin/env python from kombu import Connection, Exchange, Queue def process_message(body, message): print body message.ack() nova_exchange = Exchange('nova', 'topic', durable=False) notifications_queue = Queue('notification-listener', exchange=nova_exchange, routing_key='notifications.info') conn = Connection('amqp://*****:*****@192.168.0.10//') consumer = conn.Consumer(notifications_queue, callbacks=[process_message]) consumer.consume() while True: conn.drain_events()
#!/usr/bin/env python from kombu import Connection, Exchange, Queue def process_message(body, message): print body message.ack() nova_exchange = Exchange('nova', 'topic', durable=False) nova_queue = Queue('listener', exchange = nova_exchange, routing_key='#') conn = Connection('amqp://*****:*****@192.168.0.10//') consumer = conn.Consumer(nova_queue, callbacks=[process_message]) consumer.consume() while True: conn.drain_events()
class GenericConsumer(object): """Generic publisher class that specific consumers inherit from. FIXME: Mandatory properties, like "topic", should not be set from generic functions like configure() but should instead be explicitly required somewhere, e.g. the constructor. """ def __init__(self, config, exchange=None, connect=True, heartbeat=False, timeout=None, **kwargs): self.config = config self.exchange = exchange self.connection = None self.durable = False self.applabel = '' self.heartbeat = heartbeat self.timeout = timeout for x in ['applabel', 'topic', 'callback', 'durable']: if x in kwargs: setattr(self, x, kwargs[x]) del kwargs[x] # Only used if there is no applabel. self.queue_gen_name = None if connect: self.connect() @property def queue_name(self): # This is a property instead of being set in the constructor since # applabel can be set later via configure(). queue_name = 'queue/%s/' % self.config.user if self.applabel: queue_name += self.applabel else: if not self.queue_gen_name: self.queue_gen_name = str(uuid.uuid1()) queue_name += self.queue_gen_name return queue_name def configure(self, **kwargs): """Sets variables.""" for x in kwargs: setattr(self, x, kwargs[x]) def connect(self): if not self.connection: self.connection = Connection(hostname=self.config.host, port=self.config.port, userid=self.config.user, password=self.config.password, virtual_host=self.config.vhost, ssl=self.config.ssl) def disconnect(self): if self.connection: self.connection.release() self.connection = None def purge_existing_messages(self): """Purge messages that are already in the queue on the broker. TODO: I think this is only supported by the amqp backend. """ if self.durable and not self.applabel: raise InvalidAppLabel('Durable consumers must have an applabel') if not self.connection: self.connect() queue = self._create_queue() try: queue(self.connection).purge() except ChannelError as e: if e.message == 404: pass raise def queue_exists(self): self._check_params() if not self.connection: self.connect() queue = self._create_queue() try: queue(self.connection).queue_declare(passive=True) except ChannelError as e: if e.message == 404 or e.reply_code == 404: return False raise return True def delete_queue(self): self._check_params() if not self.connection: self.connect() queue = self._create_queue() try: queue(self.connection).delete() except ChannelError as e: if e.message != 404: raise def listen(self, callback=None, on_connect_callback=None): """Blocks and calls the callback when a message comes into the queue. For info on one script listening to multiple channels, see http://ask.github.com/carrot/changelog.html#id1. """ while True: consumer = self._build_consumer( callback=callback, on_connect_callback=on_connect_callback) with consumer: self._drain_events_loop() def _build_consumer(self, callback=None, on_connect_callback=None): # One can optionally provide a callback to listen (if it wasn't already) if callback: self.callback = callback self._check_params() if not self.connection: self.connect() exchange = Exchange(self.exchange[0], type='topic') # Raise an error if the exchange doesn't exist. exchange(self.connection).declare(passive=True) # Create a queue. queue = self._create_queue(exchange, self.topic[0]) if on_connect_callback: on_connect_callback() # Don't autodeclare, since we don't want consumers trying to # declare exchanges. consumer = self.connection.Consumer(queue, auto_declare=False, callbacks=[self.callback]) consumer.queues[0].queue_declare() # Bind to the first key. consumer.queues[0].queue_bind() # Bind to any additional keys. for new_exchange, new_topic in zip(self.exchange[1:], self.topic[1:]): exchange = Exchange(new_exchange, type='topic') exchange(self.connection).declare(passive=True) consumer.queues[0].bind_to(new_exchange, new_topic) if self.heartbeat: hb_exchange = Exchange('exchange/pulse/test', type='topic') consumer.queues[0].bind_to(hb_exchange, 'heartbeat') return consumer def _drain_events_loop(self): while True: try: self.connection.drain_events(timeout=self.timeout) except socket_timeout: logging.warning("Timeout! Restarting pulse consumer.") try: self.disconnect() except Exception: logging.warning("Problem with disconnect().") break def _check_params(self): if not self.exchange: raise InvalidExchange(self.exchange) if not self.topic: raise InvalidTopic(self.topic) if self.durable and not self.applabel: raise InvalidAppLabel('Durable consumers must have an applabel') if not self.callback or not hasattr(self.callback, '__call__'): raise InvalidCallback(self.callback) # We support multiple bindings if we were given an array for the topic. if not isinstance(self.topic, list): self.topic = [self.topic] # We support multiple exchanges if we were given an array for # the exchange. In this case, the exchange list and the topic # list must have the same length. if not isinstance(self.exchange, list): self.exchange = [self.exchange] * len(self.topic) elif len(self.exchange) != len(self.topic): raise InvalidExchange( "The list of exchanges must have the same length as the list of" " topics.") def _create_queue(self, exchange=None, routing_key=''): return Queue(name=self.queue_name, exchange=exchange, routing_key=routing_key, durable=self.durable, exclusive=False, auto_delete=not self.durable)
class camqp(threading.Thread): def __init__(self, host="localhost", port=5672, userid="guest", password="******", virtual_host="canopsis", exchange_name="canopsis", logging_name="camqp", logging_level=logging.INFO, read_config_file=True, auto_connect=True, on_ready=None): threading.Thread.__init__(self) self.logger = logging.getLogger(logging_name) self.host = host self.port = port self.userid = userid self.password = password self.virtual_host = virtual_host self.exchange_name = exchange_name self.logging_level = logging_level if (read_config_file): self.read_config("amqp") self.amqp_uri = "amqp://%s:%s@%s:%s/%s" % (self.userid, self.password, self.host, self.port, self.virtual_host) self.logger.setLevel(logging_level) self.exchange_name_events = exchange_name + ".events" self.exchange_name_alerts = exchange_name + ".alerts" self.exchange_name_incidents = exchange_name + ".incidents" self.chan = None self.conn = None self.connected = False self.on_ready = on_ready self.RUN = True self.exchanges = {} self.queues = {} self.paused = False self.connection_errors = ( ConnectionError, socket.error, IOError, OSError, ) #AttributeError) ## create exchange self.logger.debug("Create exchanges object") for exchange_name in [ self.exchange_name, self.exchange_name_events, self.exchange_name_alerts, self.exchange_name_incidents ]: self.logger.debug(" + %s" % exchange_name) self.get_exchange(exchange_name) if auto_connect: self.connect() self.logger.debug("Object canamqp initialized") def run(self): self.logger.debug("Start thread ...") reconnect = False while self.RUN: self.connect() #self.wait_connection() if self.connected: self.init_queue(reconnect=reconnect) self.logger.debug("Drain events ...") while self.RUN: try: if not self.paused: self.conn.drain_events(timeout=0.5) else: time.sleep(0.5) except socket.timeout: pass except self.connection_errors as err: self.logger.error("Connection error ! (%s)" % err) break except Exception as err: self.logger.error("Unknown error: %s (%s)" % (err, type(err))) traceback.print_exc(file=sys.stdout) break self.disconnect() if self.RUN: self.logger.error( "Connection loss, try to reconnect in few seconds ...") reconnect = True self.wait_connection(timeout=5) self.logger.debug("End of thread ...") def stop(self): self.logger.debug("Stop thread ...") self.RUN = False def connect(self): if not self.connected: self.logger.info("Connect to AMQP Broker (%s:%s)" % (self.host, self.port)) self.conn = Connection(self.amqp_uri) try: self.logger.debug(" + Connect") self.conn.connect() self.logger.info("Connected to AMQP Broker.") self.producers = kombu.pools.Producers(limit=10) self.connected = True except Exception as err: self.conn.release() self.logger.error("Impossible to connect (%s)" % err) if self.connected: self.logger.debug(" + Open channel") try: self.chan = self.conn.channel() self.logger.debug( "Channel openned. Ready to send messages") try: ## declare exchange self.logger.debug("Declare exchanges") for exchange_name in self.exchanges: self.logger.debug(" + %s" % exchange_name) self.exchanges[exchange_name](self.chan).declare() except Exception as err: self.logger.error( "Impossible to declare exchange (%s)" % err) except Exception as err: self.logger.error(err) else: self.logger.debug("Allready connected") def get_exchange(self, name): if name: try: return self.exchanges[name] except: if name == "amq.direct": self.exchanges[name] = Exchange(name, "direct", durable=True) else: self.exchanges[name] = Exchange(name, "topic", durable=True, auto_delete=False) return self.exchanges[name] else: return None def init_queue(self, reconnect=False): if self.queues: self.logger.debug("Init queues") for queue_name in self.queues.keys(): self.logger.debug(" + %s" % queue_name) qsettings = self.queues[queue_name] if not qsettings['queue']: self.logger.debug(" + Create queue") # copy list routing_keys = list(qsettings['routing_keys']) routing_key = None if len(routing_keys): routing_key = routing_keys[0] routing_keys = routing_keys[1:] exchange = self.get_exchange(qsettings['exchange_name']) if (qsettings['exchange_name'] == "amq.direct" and not routing_key): routing_key = queue_name #self.logger.debug(" + exchange: '%s', routing_key: '%s', exclusive: %s, auto_delete: %s, no_ack: %s" % (qsettings['exchange_name'], routing_key, qsettings['exclusive'], qsettings['auto_delete'], qsettings['no_ack'])) self.logger.debug( " + exchange: '%s', exclusive: %s, auto_delete: %s, no_ack: %s" % (qsettings['exchange_name'], qsettings['exclusive'], qsettings['auto_delete'], qsettings['no_ack'])) qsettings['queue'] = Queue( queue_name, exchange=exchange, routing_key=routing_key, exclusive=qsettings['exclusive'], auto_delete=qsettings['auto_delete'], no_ack=qsettings['no_ack'], channel=self.conn.channel()) qsettings['queue'].declare() if len(routing_keys): self.logger.debug(" + Bind on all routing keys") for routing_key in routing_keys: self.logger.debug(" + routing_key: '%s'" % routing_key) try: qsettings['queue'].bind_to( exchange=exchange, routing_key=routing_key) except: self.logger.error( "You need upgrade your Kombu version (%s)" % kombu.__version__) if not qsettings['consumer']: self.logger.debug(" + Create Consumer") qsettings['consumer'] = self.conn.Consumer( qsettings['queue'], callbacks=[qsettings['callback']]) self.logger.debug(" + Consume queue") qsettings['consumer'].consume() if self.on_ready: self.on_ready() def add_queue(self, queue_name, routing_keys, callback, exchange_name=None, no_ack=True, exclusive=False, auto_delete=True): #if exchange_name == "amq.direct": # routing_keys = queue_name c_routing_keys = [] if not isinstance(routing_keys, list): if isinstance(routing_keys, str): c_routing_keys = [routing_keys] else: c_routing_keys = routing_keys if not exchange_name: exchange_name = self.exchange_name self.queues[queue_name] = { 'queue': False, 'consumer': False, 'queue_name': queue_name, 'routing_keys': c_routing_keys, 'callback': callback, 'exchange_name': exchange_name, 'no_ack': no_ack, 'exclusive': exclusive, 'auto_delete': auto_delete } def publish(self, msg, routing_key, exchange_name=None, serializer="json", compression=None, content_type=None, content_encoding=None): self.wait_connection() if self.connected: if not exchange_name: exchange_name = self.exchange_name self.logger.debug("Send message to %s in %s" % (routing_key, exchange_name)) with self.producers[self.conn].acquire(block=True) as producer: try: _msg = msg.copy() camqp._clean_msg_for_serialization(_msg) producer.publish(_msg, serializer=serializer, compression=compression, routing_key=routing_key, exchange=self.get_exchange(exchange_name)) self.logger.debug(" + Sended") except Exception, err: self.logger.error(" + Impossible to send (%s)" % err) else:
class test_MemoryTransport(Case): def setUp(self): self.c = Connection(transport='memory') self.e = Exchange('test_transport_memory') self.q = Queue('test_transport_memory', exchange=self.e, routing_key='test_transport_memory') self.q2 = Queue('test_transport_memory2', exchange=self.e, routing_key='test_transport_memory2') self.fanout = Exchange('test_transport_memory_fanout', type='fanout') self.q3 = Queue('test_transport_memory_fanout1', exchange=self.fanout) self.q4 = Queue('test_transport_memory_fanout2', exchange=self.fanout) def test_driver_version(self): self.assertTrue(self.c.transport.driver_version()) def test_produce_consume_noack(self): channel = self.c.channel() producer = Producer(channel, self.e) consumer = Consumer(channel, self.q, no_ack=True) for i in range(10): producer.publish({'foo': i}, routing_key='test_transport_memory') _received = [] def callback(message_data, message): _received.append(message) consumer.register_callback(callback) consumer.consume() while 1: if len(_received) == 10: break self.c.drain_events() self.assertEqual(len(_received), 10) def test_produce_consume_fanout(self): producer = self.c.Producer() consumer = self.c.Consumer([self.q3, self.q4]) producer.publish( {'hello': 'world'}, declare=consumer.queues, exchange=self.fanout, ) self.assertEqual(self.q3(self.c).get().payload, {'hello': 'world'}) self.assertEqual(self.q4(self.c).get().payload, {'hello': 'world'}) self.assertIsNone(self.q3(self.c).get()) self.assertIsNone(self.q4(self.c).get()) def test_produce_consume(self): channel = self.c.channel() producer = Producer(channel, self.e) consumer1 = Consumer(channel, self.q) consumer2 = Consumer(channel, self.q2) self.q2(channel).declare() for i in range(10): producer.publish({'foo': i}, routing_key='test_transport_memory') for i in range(10): producer.publish({'foo': i}, routing_key='test_transport_memory2') _received1 = [] _received2 = [] def callback1(message_data, message): _received1.append(message) message.ack() def callback2(message_data, message): _received2.append(message) message.ack() consumer1.register_callback(callback1) consumer2.register_callback(callback2) consumer1.consume() consumer2.consume() while 1: if len(_received1) + len(_received2) == 20: break self.c.drain_events() self.assertEqual(len(_received1) + len(_received2), 20) # compression producer.publish({'compressed': True}, routing_key='test_transport_memory', compression='zlib') m = self.q(channel).get() self.assertDictEqual(m.payload, {'compressed': True}) # queue.delete for i in range(10): producer.publish({'foo': i}, routing_key='test_transport_memory') self.assertTrue(self.q(channel).get()) self.q(channel).delete() self.q(channel).declare() self.assertIsNone(self.q(channel).get()) # queue.purge for i in range(10): producer.publish({'foo': i}, routing_key='test_transport_memory2') self.assertTrue(self.q2(channel).get()) self.q2(channel).purge() self.assertIsNone(self.q2(channel).get()) def test_drain_events(self): with self.assertRaises(socket.timeout): self.c.drain_events(timeout=0.1) c1 = self.c.channel() c2 = self.c.channel() with self.assertRaises(socket.timeout): self.c.drain_events(timeout=0.1) del (c1) # so pyflakes doesn't complain. del (c2) def test_drain_events_unregistered_queue(self): c1 = self.c.channel() class Cycle(object): def get(self, timeout=None): return ('foo', 'foo'), c1 self.c.transport.cycle = Cycle() with self.assertRaises(KeyError): self.c.drain_events() def test_queue_for(self): chan = self.c.channel() chan.queues.clear() x = chan._queue_for('foo') self.assertTrue(x) self.assertIs(chan._queue_for('foo'), x)
class test_Consumer: def setup(self): self.connection = Connection(transport=Transport) self.connection.connect() assert self.connection.connection.connected self.exchange = Exchange('foo', 'direct') def test_accept(self): a = Consumer(self.connection) assert a.accept is None b = Consumer(self.connection, accept=['json', 'pickle']) assert b.accept == { 'application/json', 'application/x-python-serialize', } c = Consumer(self.connection, accept=b.accept) assert b.accept == c.accept def test_enter_exit_cancel_raises(self): c = Consumer(self.connection) c.cancel = Mock(name='Consumer.cancel') c.cancel.side_effect = KeyError('foo') with c: pass c.cancel.assert_called_with() def test_receive_callback_accept(self): message = Mock(name='Message') message.errors = [] callback = Mock(name='on_message') c = Consumer(self.connection, accept=['json'], on_message=callback) c.on_decode_error = None c.channel = Mock(name='channel') c.channel.message_to_python = None c._receive_callback(message) callback.assert_called_with(message) assert message.accept == c.accept def test_accept__content_disallowed(self): conn = Connection('memory://') q = Queue('foo', exchange=self.exchange) p = conn.Producer() p.publish( {'complex': object()}, declare=[q], exchange=self.exchange, serializer='pickle', ) callback = Mock(name='callback') with conn.Consumer(queues=[q], callbacks=[callback]) as consumer: with pytest.raises(consumer.ContentDisallowed): conn.drain_events(timeout=1) callback.assert_not_called() def test_accept__content_allowed(self): conn = Connection('memory://') q = Queue('foo', exchange=self.exchange) p = conn.Producer() p.publish( {'complex': object()}, declare=[q], exchange=self.exchange, serializer='pickle', ) callback = Mock(name='callback') with conn.Consumer(queues=[q], accept=['pickle'], callbacks=[callback]): conn.drain_events(timeout=1) callback.assert_called() body, message = callback.call_args[0] assert body['complex'] def test_set_no_channel(self): c = Consumer(None) assert c.channel is None c.revive(Mock()) assert c.channel def test_set_no_ack(self): channel = self.connection.channel() queue = Queue('qname', self.exchange, 'rkey') consumer = Consumer(channel, queue, auto_declare=True, no_ack=True) assert consumer.no_ack def test_add_queue_when_auto_declare(self): consumer = self.connection.Consumer(auto_declare=True) q = Mock() q.return_value = q consumer.add_queue(q) assert q in consumer.queues q.declare.assert_called_with() def test_add_queue_when_not_auto_declare(self): consumer = self.connection.Consumer(auto_declare=False) q = Mock() q.return_value = q consumer.add_queue(q) assert q in consumer.queues assert not q.declare.call_count def test_consume_without_queues_returns(self): consumer = self.connection.Consumer() consumer.queues[:] = [] assert consumer.consume() is None def test_consuming_from(self): consumer = self.connection.Consumer() consumer.queues[:] = [Queue('a'), Queue('b'), Queue('d')] consumer._active_tags = {'a': 1, 'b': 2} assert not consumer.consuming_from(Queue('c')) assert not consumer.consuming_from('c') assert not consumer.consuming_from(Queue('d')) assert not consumer.consuming_from('d') assert consumer.consuming_from(Queue('a')) assert consumer.consuming_from(Queue('b')) assert consumer.consuming_from('b') def test_receive_callback_without_m2p(self): channel = self.connection.channel() c = channel.Consumer() m2p = getattr(channel, 'message_to_python') channel.message_to_python = None try: message = Mock() message.errors = [] message.decode.return_value = 'Hello' recv = c.receive = Mock() c._receive_callback(message) recv.assert_called_with('Hello', message) finally: channel.message_to_python = m2p def test_receive_callback__message_errors(self): channel = self.connection.channel() channel.message_to_python = None c = channel.Consumer() message = Mock() try: raise KeyError('foo') except KeyError: message.errors = [sys.exc_info()] message._reraise_error.side_effect = KeyError() with pytest.raises(KeyError): c._receive_callback(message) def test_set_callbacks(self): channel = self.connection.channel() queue = Queue('qname', self.exchange, 'rkey') callbacks = [lambda x, y: x, lambda x, y: x] consumer = Consumer(channel, queue, auto_declare=True, callbacks=callbacks) assert consumer.callbacks == callbacks def test_auto_declare(self): channel = self.connection.channel() queue = Queue('qname', self.exchange, 'rkey') consumer = Consumer(channel, queue, auto_declare=True) consumer.consume() consumer.consume() # twice is a noop assert consumer.queues[0] is not queue assert consumer.queues[0].is_bound assert consumer.queues[0].exchange.is_bound assert consumer.queues[0].exchange is not self.exchange for meth in ('exchange_declare', 'queue_declare', 'queue_bind', 'basic_consume'): assert meth in channel assert channel.called.count('basic_consume') == 1 assert consumer._active_tags consumer.cancel_by_queue(queue.name) consumer.cancel_by_queue(queue.name) assert not consumer._active_tags def test_consumer_tag_prefix(self): channel = self.connection.channel() queue = Queue('qname', self.exchange, 'rkey') consumer = Consumer(channel, queue, tag_prefix='consumer_') consumer.consume() assert consumer._active_tags[queue.name].startswith('consumer_') def test_manual_declare(self): channel = self.connection.channel() queue = Queue('qname', self.exchange, 'rkey') consumer = Consumer(channel, queue, auto_declare=False) assert consumer.queues[0] is not queue assert consumer.queues[0].is_bound assert consumer.queues[0].exchange.is_bound assert consumer.queues[0].exchange is not self.exchange for meth in ('exchange_declare', 'queue_declare', 'basic_consume'): assert meth not in channel consumer.declare() for meth in ('exchange_declare', 'queue_declare', 'queue_bind'): assert meth in channel assert 'basic_consume' not in channel consumer.consume() assert 'basic_consume' in channel def test_consume__cancel(self): channel = self.connection.channel() queue = Queue('qname', self.exchange, 'rkey') consumer = Consumer(channel, queue, auto_declare=True) consumer.consume() consumer.cancel() assert 'basic_cancel' in channel assert not consumer._active_tags def test___enter____exit__(self): channel = self.connection.channel() queue = Queue('qname', self.exchange, 'rkey') consumer = Consumer(channel, queue, auto_declare=True) context = consumer.__enter__() assert context is consumer assert consumer._active_tags res = consumer.__exit__(None, None, None) assert not res assert 'basic_cancel' in channel assert not consumer._active_tags def test_flow(self): channel = self.connection.channel() queue = Queue('qname', self.exchange, 'rkey') consumer = Consumer(channel, queue, auto_declare=True) consumer.flow(False) assert 'flow' in channel def test_qos(self): channel = self.connection.channel() queue = Queue('qname', self.exchange, 'rkey') consumer = Consumer(channel, queue, auto_declare=True) consumer.qos(30, 10, False) assert 'basic_qos' in channel def test_purge(self): channel = self.connection.channel() b1 = Queue('qname1', self.exchange, 'rkey') b2 = Queue('qname2', self.exchange, 'rkey') b3 = Queue('qname3', self.exchange, 'rkey') b4 = Queue('qname4', self.exchange, 'rkey') consumer = Consumer(channel, [b1, b2, b3, b4], auto_declare=True) consumer.purge() assert channel.called.count('queue_purge') == 4 def test_multiple_queues(self): channel = self.connection.channel() b1 = Queue('qname1', self.exchange, 'rkey') b2 = Queue('qname2', self.exchange, 'rkey') b3 = Queue('qname3', self.exchange, 'rkey') b4 = Queue('qname4', self.exchange, 'rkey') consumer = Consumer(channel, [b1, b2, b3, b4]) consumer.consume() assert channel.called.count('exchange_declare') == 4 assert channel.called.count('queue_declare') == 4 assert channel.called.count('queue_bind') == 4 assert channel.called.count('basic_consume') == 4 assert len(consumer._active_tags) == 4 consumer.cancel() assert channel.called.count('basic_cancel') == 4 assert not len(consumer._active_tags) def test_receive_callback(self): channel = self.connection.channel() b1 = Queue('qname1', self.exchange, 'rkey') consumer = Consumer(channel, [b1]) received = [] def callback(message_data, message): received.append(message_data) message.ack() message.payload # trigger cache consumer.register_callback(callback) consumer._receive_callback({'foo': 'bar'}) assert 'basic_ack' in channel assert 'message_to_python' in channel assert received[0] == {'foo': 'bar'} def test_basic_ack_twice(self): channel = self.connection.channel() b1 = Queue('qname1', self.exchange, 'rkey') consumer = Consumer(channel, [b1]) def callback(message_data, message): message.ack() message.ack() consumer.register_callback(callback) with pytest.raises(MessageStateError): consumer._receive_callback({'foo': 'bar'}) def test_basic_reject(self): channel = self.connection.channel() b1 = Queue('qname1', self.exchange, 'rkey') consumer = Consumer(channel, [b1]) def callback(message_data, message): message.reject() consumer.register_callback(callback) consumer._receive_callback({'foo': 'bar'}) assert 'basic_reject' in channel def test_basic_reject_twice(self): channel = self.connection.channel() b1 = Queue('qname1', self.exchange, 'rkey') consumer = Consumer(channel, [b1]) def callback(message_data, message): message.reject() message.reject() consumer.register_callback(callback) with pytest.raises(MessageStateError): consumer._receive_callback({'foo': 'bar'}) assert 'basic_reject' in channel def test_basic_reject__requeue(self): channel = self.connection.channel() b1 = Queue('qname1', self.exchange, 'rkey') consumer = Consumer(channel, [b1]) def callback(message_data, message): message.requeue() consumer.register_callback(callback) consumer._receive_callback({'foo': 'bar'}) assert 'basic_reject:requeue' in channel def test_basic_reject__requeue_twice(self): channel = self.connection.channel() b1 = Queue('qname1', self.exchange, 'rkey') consumer = Consumer(channel, [b1]) def callback(message_data, message): message.requeue() message.requeue() consumer.register_callback(callback) with pytest.raises(MessageStateError): consumer._receive_callback({'foo': 'bar'}) assert 'basic_reject:requeue' in channel def test_receive_without_callbacks_raises(self): channel = self.connection.channel() b1 = Queue('qname1', self.exchange, 'rkey') consumer = Consumer(channel, [b1]) with pytest.raises(NotImplementedError): consumer.receive(1, 2) def test_decode_error(self): channel = self.connection.channel() b1 = Queue('qname1', self.exchange, 'rkey') consumer = Consumer(channel, [b1]) consumer.channel.throw_decode_error = True with pytest.raises(ValueError): consumer._receive_callback({'foo': 'bar'}) def test_on_decode_error_callback(self): channel = self.connection.channel() b1 = Queue('qname1', self.exchange, 'rkey') thrown = [] def on_decode_error(msg, exc): thrown.append((msg.body, exc)) consumer = Consumer(channel, [b1], on_decode_error=on_decode_error) consumer.channel.throw_decode_error = True consumer._receive_callback({'foo': 'bar'}) assert thrown m, exc = thrown[0] assert json.loads(m) == {'foo': 'bar'} assert isinstance(exc, ValueError) def test_recover(self): channel = self.connection.channel() b1 = Queue('qname1', self.exchange, 'rkey') consumer = Consumer(channel, [b1]) consumer.recover() assert 'basic_recover' in channel def test_revive(self): channel = self.connection.channel() b1 = Queue('qname1', self.exchange, 'rkey') consumer = Consumer(channel, [b1]) channel2 = self.connection.channel() consumer.revive(channel2) assert consumer.channel is channel2 assert consumer.queues[0].channel is channel2 assert consumer.queues[0].exchange.channel is channel2 def test_revive__with_prefetch_count(self): channel = Mock(name='channel') b1 = Queue('qname1', self.exchange, 'rkey') Consumer(channel, [b1], prefetch_count=14) channel.basic_qos.assert_called_with(0, 14, False) def test__repr__(self): channel = self.connection.channel() b1 = Queue('qname1', self.exchange, 'rkey') assert repr(Consumer(channel, [b1])) def test_connection_property_handles_AttributeError(self): p = self.connection.Consumer() p.channel = object() assert p.connection is None
class test_Consumer(TestCase): def setUp(self): self.connection = Connection(transport=Transport) self.connection.connect() self.assertTrue(self.connection.connection.connected) self.exchange = Exchange('foo', 'direct') def test_set_no_ack(self): channel = self.connection.channel() queue = Queue('qname', self.exchange, 'rkey') consumer = Consumer(channel, queue, auto_declare=True, no_ack=True) self.assertTrue(consumer.no_ack) def test_add_queue_when_auto_declare(self): consumer = self.connection.Consumer(auto_declare=True) q = Mock() q.return_value = q consumer.add_queue(q) self.assertIn(q, consumer.queues) q.declare.assert_called_with() def test_add_queue_when_not_auto_declare(self): consumer = self.connection.Consumer(auto_declare=False) q = Mock() q.return_value = q consumer.add_queue(q) self.assertIn(q, consumer.queues) self.assertFalse(q.declare.call_count) def test_consume_without_queues_returns(self): consumer = self.connection.Consumer() consumer.queues[:] = [] self.assertIsNone(consumer.consume()) def test_consuming_from(self): consumer = self.connection.Consumer() consumer.queues[:] = [Queue('a'), Queue('b')] self.assertFalse(consumer.consuming_from(Queue('c'))) self.assertFalse(consumer.consuming_from('c')) self.assertTrue(consumer.consuming_from(Queue('a'))) self.assertTrue(consumer.consuming_from(Queue('b'))) self.assertTrue(consumer.consuming_from('b')) def test_receive_callback_without_m2p(self): channel = self.connection.channel() c = channel.Consumer() m2p = getattr(channel, 'message_to_python') channel.message_to_python = None try: message = Mock() message.decode.return_value = 'Hello' recv = c.receive = Mock() c._receive_callback(message) recv.assert_called_with('Hello', message) finally: channel.message_to_python = m2p def test_set_callbacks(self): channel = self.connection.channel() queue = Queue('qname', self.exchange, 'rkey') callbacks = [lambda x, y: x, lambda x, y: x] consumer = Consumer(channel, queue, auto_declare=True, callbacks=callbacks) self.assertEqual(consumer.callbacks, callbacks) def test_auto_declare(self): channel = self.connection.channel() queue = Queue('qname', self.exchange, 'rkey') consumer = Consumer(channel, queue, auto_declare=True) consumer.consume() consumer.consume() # twice is a noop self.assertIsNot(consumer.queues[0], queue) self.assertTrue(consumer.queues[0].is_bound) self.assertTrue(consumer.queues[0].exchange.is_bound) self.assertIsNot(consumer.queues[0].exchange, self.exchange) for meth in ('exchange_declare', 'queue_declare', 'queue_bind', 'basic_consume'): self.assertIn(meth, channel) self.assertEqual(channel.called.count('basic_consume'), 1) self.assertTrue(consumer._active_tags) consumer.cancel_by_queue(queue.name) consumer.cancel_by_queue(queue.name) self.assertFalse(consumer._active_tags) def test_manual_declare(self): channel = self.connection.channel() queue = Queue('qname', self.exchange, 'rkey') consumer = Consumer(channel, queue, auto_declare=False) self.assertIsNot(consumer.queues[0], queue) self.assertTrue(consumer.queues[0].is_bound) self.assertTrue(consumer.queues[0].exchange.is_bound) self.assertIsNot(consumer.queues[0].exchange, self.exchange) for meth in ('exchange_declare', 'queue_declare', 'basic_consume'): self.assertNotIn(meth, channel) consumer.declare() for meth in ('exchange_declare', 'queue_declare', 'queue_bind'): self.assertIn(meth, channel) self.assertNotIn('basic_consume', channel) consumer.consume() self.assertIn('basic_consume', channel) def test_consume__cancel(self): channel = self.connection.channel() queue = Queue('qname', self.exchange, 'rkey') consumer = Consumer(channel, queue, auto_declare=True) consumer.consume() consumer.cancel() self.assertIn('basic_cancel', channel) self.assertFalse(consumer._active_tags) def test___enter____exit__(self): channel = self.connection.channel() queue = Queue('qname', self.exchange, 'rkey') consumer = Consumer(channel, queue, auto_declare=True) context = consumer.__enter__() self.assertIs(context, consumer) self.assertTrue(consumer._active_tags) res = consumer.__exit__(None, None, None) self.assertFalse(res) self.assertIn('basic_cancel', channel) self.assertFalse(consumer._active_tags) def test_flow(self): channel = self.connection.channel() queue = Queue('qname', self.exchange, 'rkey') consumer = Consumer(channel, queue, auto_declare=True) consumer.flow(False) self.assertIn('flow', channel) def test_qos(self): channel = self.connection.channel() queue = Queue('qname', self.exchange, 'rkey') consumer = Consumer(channel, queue, auto_declare=True) consumer.qos(30, 10, False) self.assertIn('basic_qos', channel) def test_purge(self): channel = self.connection.channel() b1 = Queue('qname1', self.exchange, 'rkey') b2 = Queue('qname2', self.exchange, 'rkey') b3 = Queue('qname3', self.exchange, 'rkey') b4 = Queue('qname4', self.exchange, 'rkey') consumer = Consumer(channel, [b1, b2, b3, b4], auto_declare=True) consumer.purge() self.assertEqual(channel.called.count('queue_purge'), 4) def test_multiple_queues(self): channel = self.connection.channel() b1 = Queue('qname1', self.exchange, 'rkey') b2 = Queue('qname2', self.exchange, 'rkey') b3 = Queue('qname3', self.exchange, 'rkey') b4 = Queue('qname4', self.exchange, 'rkey') consumer = Consumer(channel, [b1, b2, b3, b4]) consumer.consume() self.assertEqual(channel.called.count('exchange_declare'), 4) self.assertEqual(channel.called.count('queue_declare'), 4) self.assertEqual(channel.called.count('queue_bind'), 4) self.assertEqual(channel.called.count('basic_consume'), 4) self.assertEqual(len(consumer._active_tags), 4) consumer.cancel() self.assertEqual(channel.called.count('basic_cancel'), 4) self.assertFalse(len(consumer._active_tags)) def test_receive_callback(self): channel = self.connection.channel() b1 = Queue('qname1', self.exchange, 'rkey') consumer = Consumer(channel, [b1]) received = [] def callback(message_data, message): received.append(message_data) message.ack() message.payload # trigger cache consumer.register_callback(callback) consumer._receive_callback({u'foo': u'bar'}) self.assertIn('basic_ack', channel) self.assertIn('message_to_python', channel) self.assertEqual(received[0], {u'foo': u'bar'}) def test_basic_ack_twice(self): channel = self.connection.channel() b1 = Queue('qname1', self.exchange, 'rkey') consumer = Consumer(channel, [b1]) def callback(message_data, message): message.ack() message.ack() consumer.register_callback(callback) with self.assertRaises(MessageStateError): consumer._receive_callback({'foo': 'bar'}) def test_basic_reject(self): channel = self.connection.channel() b1 = Queue('qname1', self.exchange, 'rkey') consumer = Consumer(channel, [b1]) def callback(message_data, message): message.reject() consumer.register_callback(callback) consumer._receive_callback({'foo': 'bar'}) self.assertIn('basic_reject', channel) def test_basic_reject_twice(self): channel = self.connection.channel() b1 = Queue('qname1', self.exchange, 'rkey') consumer = Consumer(channel, [b1]) def callback(message_data, message): message.reject() message.reject() consumer.register_callback(callback) with self.assertRaises(MessageStateError): consumer._receive_callback({'foo': 'bar'}) self.assertIn('basic_reject', channel) def test_basic_reject__requeue(self): channel = self.connection.channel() b1 = Queue('qname1', self.exchange, 'rkey') consumer = Consumer(channel, [b1]) def callback(message_data, message): message.requeue() consumer.register_callback(callback) consumer._receive_callback({'foo': 'bar'}) self.assertIn('basic_reject:requeue', channel) def test_basic_reject__requeue_twice(self): channel = self.connection.channel() b1 = Queue('qname1', self.exchange, 'rkey') consumer = Consumer(channel, [b1]) def callback(message_data, message): message.requeue() message.requeue() consumer.register_callback(callback) with self.assertRaises(MessageStateError): consumer._receive_callback({'foo': 'bar'}) self.assertIn('basic_reject:requeue', channel) def test_receive_without_callbacks_raises(self): channel = self.connection.channel() b1 = Queue('qname1', self.exchange, 'rkey') consumer = Consumer(channel, [b1]) with self.assertRaises(NotImplementedError): consumer.receive(1, 2) def test_decode_error(self): channel = self.connection.channel() b1 = Queue('qname1', self.exchange, 'rkey') consumer = Consumer(channel, [b1]) consumer.channel.throw_decode_error = True with self.assertRaises(ValueError): consumer._receive_callback({'foo': 'bar'}) def test_on_decode_error_callback(self): channel = self.connection.channel() b1 = Queue('qname1', self.exchange, 'rkey') thrown = [] def on_decode_error(msg, exc): thrown.append((msg.body, exc)) consumer = Consumer(channel, [b1], on_decode_error=on_decode_error) consumer.channel.throw_decode_error = True consumer._receive_callback({'foo': 'bar'}) self.assertTrue(thrown) m, exc = thrown[0] self.assertEqual(anyjson.loads(m), {'foo': 'bar'}) self.assertIsInstance(exc, ValueError) def test_recover(self): channel = self.connection.channel() b1 = Queue('qname1', self.exchange, 'rkey') consumer = Consumer(channel, [b1]) consumer.recover() self.assertIn('basic_recover', channel) def test_revive(self): channel = self.connection.channel() b1 = Queue('qname1', self.exchange, 'rkey') consumer = Consumer(channel, [b1]) channel2 = self.connection.channel() consumer.revive(channel2) self.assertIs(consumer.channel, channel2) self.assertIs(consumer.queues[0].channel, channel2) self.assertIs(consumer.queues[0].exchange.channel, channel2) def test__repr__(self): channel = self.connection.channel() b1 = Queue('qname1', self.exchange, 'rkey') self.assertTrue(repr(Consumer(channel, [b1]))) def test_connection_property_handles_AttributeError(self): p = self.connection.Consumer() p.channel = object() self.assertIsNone(p.connection)
class Amqp(Thread): def __init__(self, host="localhost", port=5672, userid="guest", password="******", virtual_host="canopsis", exchange_name="canopsis", logging_name="Amqp", logging_level=INFO, read_config_file=True, auto_connect=True, on_ready=None, max_retries=5): super(Amqp, self).__init__() self.logger = getLogger(logging_name) self.host = host self.port = port self.userid = userid self.password = password self.virtual_host = virtual_host self.exchange_name = exchange_name self.logging_level = logging_level if read_config_file: self.read_config("amqp") self.amqp_uri = "amqp://{0}:{1}@{2}:{3}/{4}".format( self.userid, self.password, self.host, self.port, self.virtual_host) self.logger.setLevel(logging_level) # Event sent try count before event drop in case of connection problem if max_retries != 5: self.logger.info('Custom retries value : {} {}'.format( max_retries, type(max_retries))) self.max_retries = max_retries self.exchange_name_events = exchange_name + ".events" self.exchange_name_alerts = exchange_name + ".alerts" self.exchange_name_incidents = exchange_name + ".incidents" self.chan = None self.conn = None self.connected = False self.on_ready = on_ready self.RUN = True self.exchanges = {} self.queues = {} self.producers = None self.paused = False self.connection_errors = (ConnectionError, error, IOError, OSError) # Create exchange self.logger.debug("Create exchanges object") for exchange_name in [ self.exchange_name, self.exchange_name_events, self.exchange_name_alerts, self.exchange_name_incidents ]: self.logger.debug(' + {}'.format(exchange_name)) self.get_exchange(exchange_name) if auto_connect: self.connect() self.logger.debug("Object canamqp initialized") def run(self): """ main thread loop - connects to rabbit - calls drain_events - disconnects on error """ self.logger.debug("Start thread ...") reconnect = False while self.RUN: self.connect() if self.connected: self.init_queue(reconnect=reconnect) self.logger.debug("Drain events ...") while self.RUN: try: if not self.paused: self.conn.drain_events(timeout=0.5) else: sleep(0.5) except KombuSerializationError as exc: self.logger.error( "Kombu serialization error: invalid message " "received: {}".format(exc)) except timeout: pass except self.connection_errors as err: self.logger.error( "Connection error ! ({})".format(err)) break except KombuSerializationError as exc: self.logger.error( "Kombu serialization error: invalid message " "received: {}".format(exc)) except Exception as err: self.logger.error("Error: {} ({})".format( err, type(err))) print_exc(file=stdout) break self.disconnect() if self.RUN: self.logger.error( 'Connection lost, try to reconnect in few seconds ...') reconnect = True self.wait_connection(timeout=5) self.logger.debug("End of thread ...") def stop(self): self.logger.debug("Stop thread ...") self.RUN = False def connect(self): """ Create the connection Inits the producers Init the only channel Declares exchanges """ if not self.connected: self.logger.info("Connect to AMQP Broker (%s:%s)" % (self.host, self.port)) self.conn = Connection(self.amqp_uri) try: self.logger.debug(" + Connect") self.conn.connect() self.logger.info("Connected to AMQP Broker.") self.producers = pools.Producers(limit=10) self.connected = True except Exception as err: self.conn.release() self.logger.error("Impossible to connect ({})".format(err)) if self.connected: self.logger.debug(" + Open channel") try: self.chan = self.conn.channel() self.logger.info("Channel openned. Ready to send messages") try: # Declare exchange self.logger.debug("Declare exchanges") for exchange_name in self.exchanges: self.logger.debug(" + {}".format(exchange_name)) self.exchanges[exchange_name](self.chan).declare() except Exception as err: self.logger.error( "Impossible to declare exchange ({})".format(err)) except Exception as err: self.logger.error(err) else: self.logger.debug("Already connected") def get_exchange(self, name): """ Returns an exchange if stored in self.exchanges. Otherwise, creates it and returns it :param string name: name of the exchange to get/create :rtype: Exchange|None """ if name: try: return self.exchanges[name] except Exception: if name == "amq.direct": self.exchanges[name] = Exchange(name, "direct", durable=True) else: self.exchanges[name] = Exchange(name, "topic", durable=True, auto_delete=False) return self.exchanges[name] else: return None def init_queue(self, reconnect=False): """ Loads queue settings Creates queues and attaches the same channel to each of them Binds queues to exchange Revives or creates consumers calls consume """ if self.queues: self.logger.debug("Init queues") for queue_name in self.queues.keys(): self.logger.debug(" + {}".format(queue_name)) qsettings = self.queues[queue_name] if not qsettings['queue']: self.logger.debug(" + Create queue") # copy list routing_keys = list(qsettings['routing_keys']) routing_key = None if len(routing_keys): routing_key = routing_keys[0] routing_keys = routing_keys[1:] exchange = self.get_exchange(qsettings['exchange_name']) if qsettings['exchange_name'] == "amq.direct" \ and not routing_key: routing_key = queue_name self.logger.debug("exchange: '{}', exclusive: {}," " auto_delete: {},no_ack: {}".format( qsettings['exchange_name'], qsettings['exclusive'], qsettings['auto_delete'], qsettings['no_ack'])) qsettings['queue'] = Queue( queue_name, exchange=exchange, routing_key=routing_key, exclusive=qsettings['exclusive'], auto_delete=qsettings['auto_delete'], no_ack=qsettings['no_ack'], channel=self.conn.channel()) qsettings['queue'].declare() if len(routing_keys): self.logger.debug(" + Bind on all routing keys") for routing_key in routing_keys: self.logger.debug( " + routing_key: '{}'".format(routing_key)) try: qsettings['queue'].bind_to( exchange=exchange, routing_key=routing_key) except Exception: self.logger.error( "You need upgrade your Kombu version ({})". format(__version__)) if qsettings['consumer'] and not reconnect: qsettings['consumer'].revive(self.chan) elif not qsettings['consumer'] or reconnect: self.logger.debug(" + Create Consumer") qsettings['consumer'] = self.conn.Consumer( qsettings['queue'], callbacks=[qsettings['callback']]) self.logger.debug(" + Consume queue") qsettings['consumer'].consume() if self.on_ready: self.on_ready() else: self.logger.info('Queue already inited') def add_queue(self, queue_name, routing_keys, callback, exchange_name=None, no_ack=True, exclusive=False, auto_delete=True): """ Initializes the queue configuration maps the callback on the queue """ c_routing_keys = [] if not isinstance(routing_keys, list): if isinstance(routing_keys, basestring): c_routing_keys = [routing_keys] else: c_routing_keys = routing_keys # aka if rk is nor a list nor a basetring, leave it empty if not exchange_name: exchange_name = self.exchange_name self.queues[queue_name] = { 'queue': False, 'consumer': False, 'queue_name': queue_name, 'routing_keys': c_routing_keys, 'callback': callback, 'exchange_name': exchange_name, 'no_ack': no_ack, 'exclusive': exclusive, 'auto_delete': auto_delete } def publish(self, msg, routing_key, exchange_name=None, serializer="json", compression=None, content_type=None, content_encoding=None): """ Tries to publish an event In case of failure, tries to reconnect and retry until (self.max_retries) :returns: operation success status :rtype: bool """ self.logger.warning("Publishing from old.rabbitmq.Amqp is deprecated") operation_success = False retries = 0 while not operation_success and retries < self.max_retries: retries += 1 if self.connected: if not exchange_name: exchange_name = self.exchange_name with self.producers[self.conn].acquire(block=True) as producer: try: _msg = msg.copy() Amqp._clean_msg_for_serialization(_msg) exchange = self.get_exchange( exchange_name.encode('utf-8')) producer.publish(_msg, serializer=serializer, compression=compression, routing_key=routing_key, exchange=exchange) self.logger.debug('publish {} in exchange {}'.format( routing_key, exchange_name)) operation_success = True except AmqpStructError: self.logger.warning( 'Malformed message: routing key is too long. ' 'Cancelling message') return False except Exception: self.logger.error(' + Impossible to send {}'.format( traceback.format_exc())) self.disconnect() self.connect() self.init_queue(reconnect=False) else: self.logger.error('Not connected ... try reconnecting') self.connect() if not operation_success: # Event and it's information are buffered until next send retry self.logger.info('Retry count {}'.format(retries)) if not operation_success: # Event and it's information are buffered until next send retry self.logger.error( 'Too much retries for event {}, give up'.format(routing_key)) return operation_success @staticmethod def _clean_msg_for_serialization(msg): from bson import objectid for key in msg: if isinstance(msg[key], objectid.ObjectId): msg[key] = str(msg[key]) def cancel_queues(self): if self.connected: for queue_name in self.queues.keys(): if self.queues[queue_name]['consumer']: self.logger.debug( " + Cancel consumer on {}".format(queue_name)) try: self.queues[queue_name]['consumer'].cancel() except Exception: pass del (self.queues[queue_name]['consumer']) self.queues[queue_name]['consumer'] = False del (self.queues[queue_name]['queue']) self.queues[queue_name]['queue'] = False def disconnect(self): if self.connected: self.logger.info("Disconnect from AMQP Broker") self.cancel_queues() # Force producers closing to permit a clean reconnect after # ... especially on timeout errors self.producers[self.conn].force_close_all() for exchange in self.exchanges: del exchange self.exchanges = {} try: pools.reset() except Exception as err: self.logger.error( "Impossible to reset kombu pools: {} ({})".format( err, type(err))) try: self.conn.release() del self.conn except Exception as err: self.logger.error( "Impossible to release connection: {} ({})".format( err, type(err))) self.connected = False def wait_connection(self, timeout=5): i = 0 while self.RUN and not self.connected and i < (timeout * 2): sleep(0.5) i += 1 def read_config(self, name): filename = join(root_path, 'etc', '{0}.conf'.format(name)) import ConfigParser self.config = ConfigParser.RawConfigParser() try: self.config.read(filename) section = 'master' self.host = self.config.get(section, "host") self.port = self.config.getint(section, "port") self.userid = self.config.get(section, "userid") self.password = self.config.get(section, "password") self.virtual_host = self.config.get(section, "virtual_host") self.exchange_name = self.config.get(section, "exchange_name") except Exception as err: self.logger.error( "Can't to load configurations ({}), use default ...".format( err)) def __del__(self): self.stop()
class AppCrawlerQueue(object): """ crawler result put into the rabbit mq """ def __init__(self, addr, exchange, routing_key, queue_name, logger=None): self._exch = Exchange(exchange) self._addr = addr self._routing_key = routing_key self._queue_name = queue_name self._queue = Queue(queue_name, self._exch, self._routing_key) self._conn = Connection(addr) self._task = None self._logger = logger self._producer = None def setConsumer(self): self._consumer = self._conn.Consumer(self._queue, callbacks=[self.processTask]) self._consumer.qos(prefetch_count=1) self._consumer.consume() def setProducer(self): if not self._producer: self._producer = Producer(self._conn) def reConn(self): self._conn.release() self._conn = Connection(self._addr) def processTask(self, body, message): try: if not body: message.ack() return self._task = body message.ack() except: pass def getTask(self): self._task = None try: self._conn.drain_events(timeout=1) except socket.timeout: pass except: self._logger.error('unknown error[%s]' % traceback.format_exc()) pass finally: time.sleep(0.1) return self._task def setTask(self, task): result = task for i in range(5): try: self._producer.publish(result, exchang=self._exch, declare=[self._queue], routing_key=self._queue_name) break except: self._logger.error('send info error[%s]' % traceback.format_exc()) time.sleep(10) self.reConn() self.setProducer() continue def __del__(self): self._conn.release()
class test_Consumer(Case): def setUp(self): self.connection = Connection(transport=Transport) self.connection.connect() self.assertTrue(self.connection.connection.connected) self.exchange = Exchange('foo', 'direct') def test_accept(self): a = Consumer(self.connection) self.assertIsNone(a.accept) b = Consumer(self.connection, accept=['json', 'pickle']) self.assertSetEqual( b.accept, set(['application/json', 'application/x-python-serialize']), ) c = Consumer(self.connection, accept=b.accept) self.assertSetEqual(b.accept, c.accept) def test_enter_exit_cancel_raises(self): c = Consumer(self.connection) c.cancel = Mock(name='Consumer.cancel') c.cancel.side_effect = KeyError('foo') with c: pass c.cancel.assert_called_with() def test_receive_callback_accept(self): message = Mock(name='Message') message.errors = [] callback = Mock(name='on_message') c = Consumer(self.connection, accept=['json'], on_message=callback) c.on_decode_error = None c.channel = Mock(name='channel') c.channel.message_to_python = None c._receive_callback(message) callback.assert_called_with(message) self.assertSetEqual(message.accept, c.accept) def test_accept__content_disallowed(self): conn = Connection('memory://') q = Queue('foo', exchange=self.exchange) p = conn.Producer() p.publish( {'complex': object()}, declare=[q], exchange=self.exchange, serializer='pickle', ) callback = Mock(name='callback') with conn.Consumer(queues=[q], callbacks=[callback]) as consumer: with self.assertRaises(consumer.ContentDisallowed): conn.drain_events(timeout=1) self.assertFalse(callback.called) def test_accept__content_allowed(self): conn = Connection('memory://') q = Queue('foo', exchange=self.exchange) p = conn.Producer() p.publish( {'complex': object()}, declare=[q], exchange=self.exchange, serializer='pickle', ) callback = Mock(name='callback') with conn.Consumer(queues=[q], accept=['pickle'], callbacks=[callback]): conn.drain_events(timeout=1) self.assertTrue(callback.called) body, message = callback.call_args[0] self.assertTrue(body['complex']) def test_set_no_channel(self): c = Consumer(None) self.assertIsNone(c.channel) c.revive(Mock()) self.assertTrue(c.channel) def test_set_no_ack(self): channel = self.connection.channel() queue = Queue('qname', self.exchange, 'rkey') consumer = Consumer(channel, queue, auto_declare=True, no_ack=True) self.assertTrue(consumer.no_ack) def test_add_queue_when_auto_declare(self): consumer = self.connection.Consumer(auto_declare=True) q = Mock() q.return_value = q consumer.add_queue(q) self.assertIn(q, consumer.queues) q.declare.assert_called_with() def test_add_queue_when_not_auto_declare(self): consumer = self.connection.Consumer(auto_declare=False) q = Mock() q.return_value = q consumer.add_queue(q) self.assertIn(q, consumer.queues) self.assertFalse(q.declare.call_count) def test_consume_without_queues_returns(self): consumer = self.connection.Consumer() consumer.queues[:] = [] self.assertIsNone(consumer.consume()) def test_consuming_from(self): consumer = self.connection.Consumer() consumer.queues[:] = [Queue('a'), Queue('b'), Queue('d')] consumer._active_tags = {'a': 1, 'b': 2} self.assertFalse(consumer.consuming_from(Queue('c'))) self.assertFalse(consumer.consuming_from('c')) self.assertFalse(consumer.consuming_from(Queue('d'))) self.assertFalse(consumer.consuming_from('d')) self.assertTrue(consumer.consuming_from(Queue('a'))) self.assertTrue(consumer.consuming_from(Queue('b'))) self.assertTrue(consumer.consuming_from('b')) def test_receive_callback_without_m2p(self): channel = self.connection.channel() c = channel.Consumer() m2p = getattr(channel, 'message_to_python') channel.message_to_python = None try: message = Mock() message.errors = [] message.decode.return_value = 'Hello' recv = c.receive = Mock() c._receive_callback(message) recv.assert_called_with('Hello', message) finally: channel.message_to_python = m2p def test_set_callbacks(self): channel = self.connection.channel() queue = Queue('qname', self.exchange, 'rkey') callbacks = [lambda x, y: x, lambda x, y: x] consumer = Consumer(channel, queue, auto_declare=True, callbacks=callbacks) self.assertEqual(consumer.callbacks, callbacks) def test_auto_declare(self): channel = self.connection.channel() queue = Queue('qname', self.exchange, 'rkey') consumer = Consumer(channel, queue, auto_declare=True) consumer.consume() consumer.consume() # twice is a noop self.assertIsNot(consumer.queues[0], queue) self.assertTrue(consumer.queues[0].is_bound) self.assertTrue(consumer.queues[0].exchange.is_bound) self.assertIsNot(consumer.queues[0].exchange, self.exchange) for meth in ('exchange_declare', 'queue_declare', 'queue_bind', 'basic_consume'): self.assertIn(meth, channel) self.assertEqual(channel.called.count('basic_consume'), 1) self.assertTrue(consumer._active_tags) consumer.cancel_by_queue(queue.name) consumer.cancel_by_queue(queue.name) self.assertFalse(consumer._active_tags) def test_consumer_tag_prefix(self): channel = self.connection.channel() queue = Queue('qname', self.exchange, 'rkey') consumer = Consumer(channel, queue, tag_prefix='consumer_') consumer.consume() self.assertTrue( consumer._active_tags[queue.name].startswith('consumer_'), ) def test_manual_declare(self): channel = self.connection.channel() queue = Queue('qname', self.exchange, 'rkey') consumer = Consumer(channel, queue, auto_declare=False) self.assertIsNot(consumer.queues[0], queue) self.assertTrue(consumer.queues[0].is_bound) self.assertTrue(consumer.queues[0].exchange.is_bound) self.assertIsNot(consumer.queues[0].exchange, self.exchange) for meth in ('exchange_declare', 'queue_declare', 'basic_consume'): self.assertNotIn(meth, channel) consumer.declare() for meth in ('exchange_declare', 'queue_declare', 'queue_bind'): self.assertIn(meth, channel) self.assertNotIn('basic_consume', channel) consumer.consume() self.assertIn('basic_consume', channel) def test_consume__cancel(self): channel = self.connection.channel() queue = Queue('qname', self.exchange, 'rkey') consumer = Consumer(channel, queue, auto_declare=True) consumer.consume() consumer.cancel() self.assertIn('basic_cancel', channel) self.assertFalse(consumer._active_tags) def test___enter____exit__(self): channel = self.connection.channel() queue = Queue('qname', self.exchange, 'rkey') consumer = Consumer(channel, queue, auto_declare=True) context = consumer.__enter__() self.assertIs(context, consumer) self.assertTrue(consumer._active_tags) res = consumer.__exit__(None, None, None) self.assertFalse(res) self.assertIn('basic_cancel', channel) self.assertFalse(consumer._active_tags) def test_flow(self): channel = self.connection.channel() queue = Queue('qname', self.exchange, 'rkey') consumer = Consumer(channel, queue, auto_declare=True) consumer.flow(False) self.assertIn('flow', channel) def test_qos(self): channel = self.connection.channel() queue = Queue('qname', self.exchange, 'rkey') consumer = Consumer(channel, queue, auto_declare=True) consumer.qos(30, 10, False) self.assertIn('basic_qos', channel) def test_purge(self): channel = self.connection.channel() b1 = Queue('qname1', self.exchange, 'rkey') b2 = Queue('qname2', self.exchange, 'rkey') b3 = Queue('qname3', self.exchange, 'rkey') b4 = Queue('qname4', self.exchange, 'rkey') consumer = Consumer(channel, [b1, b2, b3, b4], auto_declare=True) consumer.purge() self.assertEqual(channel.called.count('queue_purge'), 4) def test_multiple_queues(self): channel = self.connection.channel() b1 = Queue('qname1', self.exchange, 'rkey') b2 = Queue('qname2', self.exchange, 'rkey') b3 = Queue('qname3', self.exchange, 'rkey') b4 = Queue('qname4', self.exchange, 'rkey') consumer = Consumer(channel, [b1, b2, b3, b4]) consumer.consume() self.assertEqual(channel.called.count('exchange_declare'), 4) self.assertEqual(channel.called.count('queue_declare'), 4) self.assertEqual(channel.called.count('queue_bind'), 4) self.assertEqual(channel.called.count('basic_consume'), 4) self.assertEqual(len(consumer._active_tags), 4) consumer.cancel() self.assertEqual(channel.called.count('basic_cancel'), 4) self.assertFalse(len(consumer._active_tags)) def test_receive_callback(self): channel = self.connection.channel() b1 = Queue('qname1', self.exchange, 'rkey') consumer = Consumer(channel, [b1]) received = [] def callback(message_data, message): received.append(message_data) message.ack() message.payload # trigger cache consumer.register_callback(callback) consumer._receive_callback({'foo': 'bar'}) self.assertIn('basic_ack', channel) self.assertIn('message_to_python', channel) self.assertEqual(received[0], {'foo': 'bar'}) def test_basic_ack_twice(self): channel = self.connection.channel() b1 = Queue('qname1', self.exchange, 'rkey') consumer = Consumer(channel, [b1]) def callback(message_data, message): message.ack() message.ack() consumer.register_callback(callback) with self.assertRaises(MessageStateError): consumer._receive_callback({'foo': 'bar'}) def test_basic_reject(self): channel = self.connection.channel() b1 = Queue('qname1', self.exchange, 'rkey') consumer = Consumer(channel, [b1]) def callback(message_data, message): message.reject() consumer.register_callback(callback) consumer._receive_callback({'foo': 'bar'}) self.assertIn('basic_reject', channel) def test_basic_reject_twice(self): channel = self.connection.channel() b1 = Queue('qname1', self.exchange, 'rkey') consumer = Consumer(channel, [b1]) def callback(message_data, message): message.reject() message.reject() consumer.register_callback(callback) with self.assertRaises(MessageStateError): consumer._receive_callback({'foo': 'bar'}) self.assertIn('basic_reject', channel) def test_basic_reject__requeue(self): channel = self.connection.channel() b1 = Queue('qname1', self.exchange, 'rkey') consumer = Consumer(channel, [b1]) def callback(message_data, message): message.requeue() consumer.register_callback(callback) consumer._receive_callback({'foo': 'bar'}) self.assertIn('basic_reject:requeue', channel) def test_basic_reject__requeue_twice(self): channel = self.connection.channel() b1 = Queue('qname1', self.exchange, 'rkey') consumer = Consumer(channel, [b1]) def callback(message_data, message): message.requeue() message.requeue() consumer.register_callback(callback) with self.assertRaises(MessageStateError): consumer._receive_callback({'foo': 'bar'}) self.assertIn('basic_reject:requeue', channel) def test_receive_without_callbacks_raises(self): channel = self.connection.channel() b1 = Queue('qname1', self.exchange, 'rkey') consumer = Consumer(channel, [b1]) with self.assertRaises(NotImplementedError): consumer.receive(1, 2) def test_decode_error(self): channel = self.connection.channel() b1 = Queue('qname1', self.exchange, 'rkey') consumer = Consumer(channel, [b1]) consumer.channel.throw_decode_error = True with self.assertRaises(ValueError): consumer._receive_callback({'foo': 'bar'}) def test_on_decode_error_callback(self): channel = self.connection.channel() b1 = Queue('qname1', self.exchange, 'rkey') thrown = [] def on_decode_error(msg, exc): thrown.append((msg.body, exc)) consumer = Consumer(channel, [b1], on_decode_error=on_decode_error) consumer.channel.throw_decode_error = True consumer._receive_callback({'foo': 'bar'}) self.assertTrue(thrown) m, exc = thrown[0] self.assertEqual(anyjson.loads(m), {'foo': 'bar'}) self.assertIsInstance(exc, ValueError) def test_recover(self): channel = self.connection.channel() b1 = Queue('qname1', self.exchange, 'rkey') consumer = Consumer(channel, [b1]) consumer.recover() self.assertIn('basic_recover', channel) def test_revive(self): channel = self.connection.channel() b1 = Queue('qname1', self.exchange, 'rkey') consumer = Consumer(channel, [b1]) channel2 = self.connection.channel() consumer.revive(channel2) self.assertIs(consumer.channel, channel2) self.assertIs(consumer.queues[0].channel, channel2) self.assertIs(consumer.queues[0].exchange.channel, channel2) def test__repr__(self): channel = self.connection.channel() b1 = Queue('qname1', self.exchange, 'rkey') self.assertTrue(repr(Consumer(channel, [b1]))) def test_connection_property_handles_AttributeError(self): p = self.connection.Consumer() p.channel = object() self.assertIsNone(p.connection)
class test_MemoryTransport: def setup(self): self.c = Connection(transport='memory') self.e = Exchange('test_transport_memory') self.q = Queue('test_transport_memory', exchange=self.e, routing_key='test_transport_memory') self.q2 = Queue('test_transport_memory2', exchange=self.e, routing_key='test_transport_memory2') self.fanout = Exchange('test_transport_memory_fanout', type='fanout') self.q3 = Queue('test_transport_memory_fanout1', exchange=self.fanout) self.q4 = Queue('test_transport_memory_fanout2', exchange=self.fanout) def test_driver_version(self): assert self.c.transport.driver_version() def test_produce_consume_noack(self): channel = self.c.channel() producer = Producer(channel, self.e) consumer = Consumer(channel, self.q, no_ack=True) for i in range(10): producer.publish({'foo': i}, routing_key='test_transport_memory') _received = [] def callback(message_data, message): _received.append(message) consumer.register_callback(callback) consumer.consume() while 1: if len(_received) == 10: break self.c.drain_events() assert len(_received) == 10 def test_produce_consume_fanout(self): producer = self.c.Producer() consumer = self.c.Consumer([self.q3, self.q4]) producer.publish( {'hello': 'world'}, declare=consumer.queues, exchange=self.fanout, ) assert self.q3(self.c).get().payload == {'hello': 'world'} assert self.q4(self.c).get().payload == {'hello': 'world'} assert self.q3(self.c).get() is None assert self.q4(self.c).get() is None def test_produce_consume(self): channel = self.c.channel() producer = Producer(channel, self.e) consumer1 = Consumer(channel, self.q) consumer2 = Consumer(channel, self.q2) self.q2(channel).declare() for i in range(10): producer.publish({'foo': i}, routing_key='test_transport_memory') for i in range(10): producer.publish({'foo': i}, routing_key='test_transport_memory2') _received1 = [] _received2 = [] def callback1(message_data, message): _received1.append(message) message.ack() def callback2(message_data, message): _received2.append(message) message.ack() consumer1.register_callback(callback1) consumer2.register_callback(callback2) consumer1.consume() consumer2.consume() while 1: if len(_received1) + len(_received2) == 20: break self.c.drain_events() assert len(_received1) + len(_received2) == 20 # compression producer.publish({'compressed': True}, routing_key='test_transport_memory', compression='zlib') m = self.q(channel).get() assert m.payload == {'compressed': True} # queue.delete for i in range(10): producer.publish({'foo': i}, routing_key='test_transport_memory') assert self.q(channel).get() self.q(channel).delete() self.q(channel).declare() assert self.q(channel).get() is None # queue.purge for i in range(10): producer.publish({'foo': i}, routing_key='test_transport_memory2') assert self.q2(channel).get() self.q2(channel).purge() assert self.q2(channel).get() is None def test_drain_events(self): with pytest.raises(socket.timeout): self.c.drain_events(timeout=0.1) c1 = self.c.channel() c2 = self.c.channel() with pytest.raises(socket.timeout): self.c.drain_events(timeout=0.1) del (c1) # so pyflakes doesn't complain. del (c2) def test_drain_events_unregistered_queue(self): c1 = self.c.channel() producer = self.c.Producer() consumer = self.c.Consumer([self.q2]) producer.publish( {'hello': 'world'}, declare=consumer.queues, routing_key=self.q2.routing_key, exchange=self.q2.exchange, ) message = consumer.queues[0].get()._raw class Cycle: def get(self, callback, timeout=None): return (message, 'foo'), c1 self.c.transport.cycle = Cycle() self.c.drain_events() def test_queue_for(self): chan = self.c.channel() chan.queues.clear() x = chan._queue_for('foo') assert x assert chan._queue_for('foo') is x # see the issue # https://github.com/celery/kombu/issues/1050 def test_producer_on_return(self): def on_return(_exception, _exchange, _routing_key, _message): pass channel = self.c.channel() producer = Producer(channel, on_return=on_return) consumer = self.c.Consumer([self.q3]) producer.publish( {'hello': 'on return'}, declare=consumer.queues, exchange=self.fanout, ) assert self.q3(self.c).get().payload == {'hello': 'on return'} assert self.q3(self.c).get() is None