def test_fanout(self, name='test_mongodb_fanout'): if not self.verify_alive(): return c = self.connection self.e = Exchange(name, type='fanout') self.q = Queue(name, exchange=self.e, routing_key=name) self.q2 = Queue(name + '2', exchange=self.e, routing_key=name + '2') channel = c.default_channel producer = Producer(channel, self.e) consumer1 = Consumer(channel, self.q) consumer2 = Consumer(channel, self.q2) self.q2(channel).declare() for i in range(10): producer.publish({'foo': i}, routing_key=name) for i in range(10): producer.publish({'foo': i}, routing_key=name + '2') _received1 = [] _received2 = [] def callback1(message_data, message): _received1.append(message) message.ack() def callback2(message_data, message): _received2.append(message) message.ack() consumer1.register_callback(callback1) consumer2.register_callback(callback2) with nested(consumer1, consumer2): while 1: if len(_received1) + len(_received2) == 20: break c.drain_events(timeout=60) self.assertEqual(len(_received1) + len(_received2), 20) # queue.delete for i in range(10): producer.publish({'foo': i}, routing_key=name) self.assertTrue(self.q(channel).get()) self.q(channel).delete() self.q(channel).declare() self.assertIsNone(self.q(channel).get()) # queue.purge for i in range(10): producer.publish({'foo': i}, routing_key=name + '2') self.assertTrue(self.q2(channel).get()) self.q2(channel).purge() self.assertIsNone(self.q2(channel).get())
def fire_timers(self, min_delay=1, max_delay=10, max_timers=10, propagate=()): timer = self.timer delay = None if timer and timer._queue: for i in range(max_timers): delay, entry = next(self.scheduler) if entry is None: break try: entry() except propagate: raise except (MemoryError, AssertionError): raise except OSError as exc: if exc.errno == errno.ENOMEM: raise logger.error('Error in timer: %r', exc, exc_info=1) except Exception as exc: logger.error('Error in timer: %r', exc, exc_info=1) return min(delay or min_delay, max_delay)
def test_acquire__release(self): P = self.create_resource(10) self.assert_state(P, 10, 0) chans = [P.acquire() for _ in range(10)] self.assert_state(P, 0, 10) with pytest.raises(P.LimitExceeded): P.acquire() chans.pop().release() self.assert_state(P, 1, 9) [chan.release() for chan in chans] self.assert_state(P, 10, 0)
def test_produce__consume_large_messages( self, bytes=1048576, n=10, charset=string.punctuation + string.ascii_letters + string.digits): if not self.verify_alive(): return bytes = min(x for x in [bytes, self.message_size_limit] if x) messages = [ ''.join(random.choice(charset) for j in range(bytes)) + '--%s' % n for i in range(n) ] digests = [] chan1 = self.connection.channel() consumer = chan1.Consumer(self.queue) self.purge_consumer(consumer) producer = chan1.Producer(self.exchange) for i, message in enumerate(messages): producer.publish({ 'text': message, 'i': i }, routing_key=self.prefix) digests.append(self._digest(message)) received = [(msg['i'], msg['text']) for msg in consumeN(self.connection, consumer, n)] self.assertEqual(len(received), n) ordering = [i for i, _ in received] if ordering != list(range(n)) and not self.suppress_disorder_warning: warnings.warn('%s did not deliver messages in FIFO order: %r' % (self.transport, ordering)) for i, text in received: if text != messages[i]: raise AssertionError('%i: %r is not %r' % (i, text[-100:], messages[i][-100:])) self.assertEqual(self._digest(text), digests[i]) chan1.close() self.purge([self.queue.name])
def _createref(): conn = self.get_connection() conn.connect() chanrefs = [] try: for i in range(100): channel = conn.channel() chanrefs.append(weakref.ref(channel)) channel.close() finally: conn.close() return chanrefs
def _purge(self, queue): """Delete all current messages in a queue.""" q = self._new_queue(queue) # SQS is slow at registering messages, so run for a few # iterations to ensure messages are detected and deleted. size = 0 for i in range(10): size += int(self._size(queue)) if not size: break self.sqs.purge_queue(QueueUrl=q) return size
def _purge(self, queue): """Delete all current messages in a queue.""" q = self._new_queue(queue) # SQS is slow at registering messages, so run for a few # iterations to ensure messages are deleted. size = 0 for i in range(10): size += q.count() if not size: break q.clear() return size
def test_acquire__release(self): if self.abstract: return P = self.create_resource(10, 0) self.assertState(P, 10, 0) chans = [P.acquire() for _ in range(10)] self.assertState(P, 0, 10) with self.assertRaises(P.LimitExceeded): P.acquire() chans.pop().release() self.assertState(P, 1, 9) [chan.release() for chan in chans] self.assertState(P, 10, 0)
def fire_timers(self, min_delay=1, max_delay=10, max_timers=10, propagate=()): timer = self.timer delay = None if timer and timer._queue: for i in range(max_timers): delay, entry = next(self.scheduler) if entry is None: break try: entry() except propagate: raise except Exception as exc: logger.error("Error in timer: %r", exc, exc_info=1) return min(max(delay or 0, min_delay), max_delay)
def test_produce__consume_large_messages( self, bytes=1048576, n=10, charset=string.punctuation + string.letters + string.digits): if not self.verify_alive(): return bytes = min(x for x in [bytes, self.message_size_limit] if x) messages = [''.join(random.choice(charset) for j in range(bytes)) + '--%s' % n for i in range(n)] digests = [] chan1 = self.connection.channel() consumer = chan1.Consumer(self.queue) self.purge_consumer(consumer) producer = chan1.Producer(self.exchange) for i, message in enumerate(messages): producer.publish({'text': message, 'i': i}, routing_key=self.prefix) digests.append(self._digest(message)) received = [(msg['i'], msg['text']) for msg in consumeN(self.connection, consumer, n)] self.assertEqual(len(received), n) ordering = [i for i, _ in received] if ordering != list(range(n)) and not self.suppress_disorder_warning: warnings.warn( '%s did not deliver messages in FIFO order: %r' % ( self.transport, ordering)) for i, text in received: if text != messages[i]: raise AssertionError('%i: %r is not %r' % ( i, text[-100:], messages[i][-100:])) self.assertEqual(self._digest(text), digests[i]) chan1.close() self.purge([self.queue.name])
def test_purge(self): if not self.verify_alive(): return chan1 = self.connection.channel() consumer = chan1.Consumer(self.queue) self.purge_consumer(consumer) producer = chan1.Producer(self.exchange) for i in range(10): producer.publish({'foo': 'bar'}, routing_key=self.prefix) if self.reliable_purge: self.assertEqual(consumer.purge(), 10) self.assertEqual(consumer.purge(), 0) else: purged = 0 while purged < 9: purged += self.purge_consumer(consumer)
def fire_timers(self, min_delay=1, max_delay=10, max_timers=10, propagate=()): timer = self.timer delay = None if timer and timer._queue: for i in range(max_timers): delay, entry = next(self.scheduler) if entry is None: break try: entry() except propagate: raise except MemoryError: raise except Exception as exc: logger.error('Error in timer: %r', exc, exc_info=1) return min(max(delay or 0, min_delay), max_delay)
def test_basic_get(self): if not self.verify_alive(): return chan1 = self.connection.channel() producer = chan1.Producer(self.exchange) chan2 = self.connection.channel() queue = Queue(self.P('basic_get'), self.exchange, 'basic_get') queue = queue(chan2) queue.declare() producer.publish({'basic.get': 'this'}, routing_key='basic_get') chan1.close() for i in range(self.event_loop_max): m = queue.get() if m: break time.sleep(0.1) self.assertEqual(m.payload, {'basic.get': 'this'}) self.purge([queue.name]) chan2.close()