def doche(onerror): yield self.client.dropConnection() yield self.clearQueue(QX) self.messages_cnt = 0 sql = self.client.setupQueueConsuming( QX, rcv, on_error=onerror, requeue_delay=0.1) self.publish = True yield self.client.publishMessage( exchange='', routing_key=QX, body=("***" + onerror)) yield sleep(0.95) x1 = self.messages_cnt self.messages_cnt = 0 yield self.client.dropConnection() yield sleep(0.95) x2 = self.messages_cnt self.messages_cnt = 0 yield self.client.dropConnection() yield sleep(0.95) x3 = self.messages_cnt self.messages_cnt = 0 yield sql.stopService() defer.returnValue((x1, x2, x3))
def test_send_to_exchange(self): result = [] sels = [self.client.setupExchangeConsuming(E1, result.append) for _ in range(5)] yield sleep(0.2) yield self.client.publishMessage(exchange=E1, routing_key=Q1, body="tm1") yield self.client.publishMessage(exchange=E1, routing_key=Q1, body="tm2") yield sleep(0.2) for sel in sels: yield sel.stopService() yield sleep(0.2) yield self.client.publishMessage(exchange=E1, routing_key=Q1, body="tm_after") yield sleep(1) self.assertEqual( sorted(["tm1", "tm2"] * len(sels)), sorted(result), ) del result[:] for sel in sels: yield sel.startService() yield sleep(0.2) self.assertEqual([], result) for sel in sels: yield sel.stopService()
def test_auto_disconnection(self): self.client.disconnect_delay = 0.5 self.client.resetReconnectDelay() self.disconnections = 0 def dropConnection(): self.disconnections += 1 old_dropConnection() old_dropConnection = self.client.dropConnection self.client.dropConnection = dropConnection sql = self.client.setupQueueConsuming(QX, self.rcv) yield self.pushToken() yield sleep(5) # expect autodisconnects here # disable disconnects - we want to consume initial token self.client.disconnect_delay = None self.client.resetReconnectDelay() yield sleep(1) cnt = yield self.consumeToken(sleep_time=2) self.assertTrue(self.disconnections > 2, "auto disconnects was made") self.assertTrue(cnt > 0, "initial message was lost") yield sql.stopService()
def test_multi_client(self): m = self.memcache.multiClient(self.clientByKey) keys = gr(50) expected_vs = {} for i, key in enumerate(keys): yield m.set(key, str(i)) expected_vs[key] = 0, str(i) yield self.memcache['c1'].dropConnection() yield timed.sleep(0.1) try: yield m.getMultiple(keys, ignoreErrors=False) except defer.FirstError as e: self.assertTrue(e.subFailure.check(error.ConnectionClosed)) else: self.fail("expected ConnectionClosed") vs0 = yield m.getMultiple(keys) yield timed.sleep(0.3) self.assertTrue( all( isinstance(e.value, error.ConnectionClosed) for e in self.flushLoggedErrors())) vs = yield m.getMultiple(keys) self.assertEqual(expected_vs, vs) self.assertTrue(vs0) self.assertTrue(len(vs0) < len(vs))
def test_parallel_consumers(self): results = [] def on_msg(m): results.append(m) return sleep(0.2) # process in 10 "threads" sql = self.client.setupQueueConsuming(Q1, on_msg, parallel=1) yield sql.stopService() self.assertEqual([], results) for i in range(5): self.client.publishMessage( exchange='', routing_key=Q1, confirm=0, body=str(i), content_type='plain/text') # each message takes 0.2s, 10 threads give us 20 processed messages yield sql.startService() yield sleep(0.3) self.assertEqual(2, len(results), "should recieve exact 20 messages") yield sql.stopService() del results[:] yield sleep(0.5) self.assertEqual(0, len(results), "no more messages (consuming cancelled)")
def test_multi_client(self): m = self.memcache.multiClient(self.clientByKey) keys = gr(50) expected_vs = {} for i, key in enumerate(keys): yield m.set(key, str(i)) expected_vs[key] = 0, str(i) yield self.memcache['c1'].dropConnection() yield timed.sleep(0.1) try: yield m.getMultiple(keys, ignoreErrors=False) except defer.FirstError as e: self.assertTrue(e.subFailure.check(error.ConnectionClosed)) else: self.fail("expected ConnectionClosed") vs0 = yield m.getMultiple(keys) yield timed.sleep(0.3) self.assertTrue(all( isinstance(e.value, error.ConnectionClosed) for e in self.flushLoggedErrors() )) vs = yield m.getMultiple(keys) self.assertEqual(expected_vs, vs) self.assertTrue(vs0) self.assertTrue(len(vs0) < len(vs))
def test_several_messages_without_serialization(self): yield self.client.publishMessage(exchange='', routing_key=Q1, body="tm1") result = [] sql = self.client.setupQueueConsuming(Q1, result.append) yield self.client.publishMessage(exchange='', routing_key=Q1, body="tm2") yield self.client.publishMessage(exchange='', routing_key=Q1, body="tm3") yield sleep(0.1) yield sql.stopService() yield self.client.publishMessage(exchange='', routing_key=Q1, body="tm_after") yield sleep(0.1) self.assertEqual(result, ["tm1", "tm2", "tm3"]) del result[:] yield sql.startService() yield sleep(0.2) self.assertEqual(result, ["tm_after"])
def test_outgoing_queue(self): # TODO: duplication of 'test_reconnection', fix it self.client.reconnect_max_delay = 0.1 processed_x = set() def rcv_unsafe(x): # some messages can be delivered several times - it's OK, just skip if x in processed_x: return processed_x.add(x) self.messages_cnt += 1 if self.publish: self.client.publishMessage( exchange='', routing_key=QX, body=str(int(x) + 1), confirm=True, # !! ).addBoth(lambda _: None) # ignore result/errors sql = self.client.setupQueueConsuming(QX, rcv_unsafe) yield self.clearQueue(QX) yield self.client.publishMessage(exchange='', routing_key=QX, body='0') for i in range(5): yield self.client.dropConnection() yield sleep(i / 10) cnt = yield self.consumeToken() yield sql.stopService() self.assertTrue(cnt > 0, "initial message was lost")
def stopService(self): if not self.running: return logger.debug("service %r already stopped", self) if self._consume_deferred: logger.debug("wait previous consuming request...") timed.timeoutDeferred(self._consume_deferred, self.cancel_consuming_timeout) try: yield self._consume_deferred except Exception: logger.exception("upps") logger.debug("too quick consume-unconsume - sleep for 0.5 sec") yield timed.sleep(0.5) logger.debug("stop service %s", self) p1 = self._protocol_instance p2 = self.parent.amqp_service.getProtocol() if p1 is p2 and self.consumer_tag: logger.debug("protocol didn't change - cancel consuming") d = p1.cancelConsuming(self.consumer_tag) timed.timeoutDeferred(d, self.cancel_consuming_timeout) try: yield d except Exception: logger.exception("Can't cancel consuming") yield self._cancelActiveCallbacks() yield defer.maybeDeferred(service.Service.stopService, self)
def clearQueue(self, queue, sleep_time=0.3): def dropmsg(m): logger.info("DROP MSG %r", m) sql = self.client.setupQueueConsuming(queue, dropmsg) yield sleep(sleep_time) yield sql.stopService()
def test_timeout_deferred(self): d = timed.sleep(10000).addCallback(lambda _: 1) try: yield timed.timeoutDeferred(d, timeout=0.1) except timed.TimeoutError: pass else: self.fail("Expected TimeoutError")
def setUp(self): params = self.clientParams() self.endpoint = endpoints.TCP4ClientEndpoint(reactor, "localhost", 5672) self.factory = amqp.AMQPFactory(**params) self.client = amqp.AMQPService(self.endpoint, self.factory, **params) self.client.startService() yield sleep(1)
def consumeToken(self, sleep_time=0.3): logger.debug("start token consuming...") self.publish = False cnt = self.messages_cnt yield sleep(sleep_time) cnt = self.messages_cnt - cnt self.publish = True logger.debug("finish token consuming: %s", cnt) defer.returnValue(cnt)
def render_GET(self, request): t1 = time.time() delay = float(request.args.get('delay', [random.random()])[0]) yield timed.sleep(delay) dt = (time.time() - t1) * 1000 request.setHeader('content-type', 'text/plain') defer.returnValue("date: %s\nrendered in %d ms" % (time.ctime(), dt))
def test_reconnection(self): sql = self.client.setupQueueConsuming(QX, self.rcv) yield self.clearQueue(QX) yield self.pushToken() for i in range(5): yield self.client.dropConnection() yield sleep(i / 10) cnt = yield self.consumeToken() yield sql.stopService() self.assertTrue(cnt > 0, "initial message was lost")
def test_invalid_handler(self): self.client.reconnect_max_delay = 0.01 yield self.client.dropConnection() def rcv(x): self.messages_cnt += 1 if self.publish: raise Exception("some error here") sql = self.client.setupQueueConsuming( QX, rcv, on_error='requeue_hold', requeue_delay=0.2, ) yield sleep(0.1) self.publish = True yield self.client.publishMessage(exchange='', routing_key=QX, body="***") yield sleep(0.05) self.assertEqual(1, self.messages_cnt, "no redelivery yet") yield sleep(1.0) self.assertEqual(2, self.messages_cnt, "just 1 redelivery") yield self.client.dropConnection() yield sleep(1.0) self.assertEqual(3, self.messages_cnt, "more after disconnect") yield self.client.dropConnection() self.publish = False yield self.consumeToken() yield sql.stopService() self.assertEqual(4, self.messages_cnt, "message processed")
def setUp(self): self.config = { 'MEMCACHE_SERVERS': { 'c0': {'host': 'localhost'}, 'c1': {'host': 'localhost'}, 'c2': {'host': 'localhost'}, }, } self.config_id = conf.settings.add_config(self.config) self.app = Application(__name__) self.memcache = app.build_memcache(self.app) IService(self.app).startService() yield timed.sleep(1)
def consumeQueue( self, queue='', callback=None, no_ack=False, requeue_delay=None, on_error=None, consumer_tag=None, parallel=0, **kwargs): assert callback logger.info("consume queue '%s/%s'", self.virtual_host, queue) consumer_tag = consumer_tag or self._generateConsumerTag() ch = yield self.channel() if self.prefetch_count is not None: logger.debug("set qos prefetch_count to %d", self.prefetch_count) yield ch.basic_qos(prefetch_count=self.prefetch_count, all_channels=0) queue_obj, ct = yield ch.basic_consume( queue=queue, no_ack=no_ack, consumer_tag=consumer_tag, **kwargs) assert ct == consumer_tag ch.add_on_close_callback(functools.partial(self._on_consuming_channel_closed, ct)) logger.debug("open channel (read) %r for ct %r", ch, ct) self._consumer_state[ct] = dict( channel=ch, queue_obj=queue_obj, ct=ct, no_ack=no_ack, callback=callback, parallel=parallel, kwargs=kwargs, queue=queue, requeue_delay=requeue_delay, on_error=on_error, ) # pika don't wait 'ConsumeOk' message # HACK-1: run consuming-loop a bit later to avoid races self.clock.callLater( 0.05, self._queueCounsumingLoop, ct, queue_obj, callback, no_ack=no_ack, parallel=parallel, ) # HACK-2: simulate waiting of 'ConsumeOk' yield timed.sleep(0.1) logger.debug("consumer tag is %r", consumer_tag) defer.returnValue(ct)
def test_reconnect(self): key, val = gr(2) c = self.memcache['c1'] yield c.set(key, val) yield c.dropConnection() try: yield c.get(key, val) except pclient.NoPersisentClientConnection: pass else: self.fail("expected ConnectionClosed") yield timed.sleep(0.3) fv = yield c.get(key) self.assertEqual((0, val), fv)
def test_ping_pong(self): messages_cnt = [0] def rcv(x): messages_cnt[0] += 1 return self.client.publishMessage( exchange='', routing_key=Q1, body=str(int(x) + 1)) sql = self.client.setupQueueConsuming(Q1, rcv, no_ack=True, parallel=10) for i in range(10): yield self.client.publishMessage(exchange='', routing_key=Q1, body="0") for i in range(20): yield sleep(0.1) self.assertTrue(messages_cnt[0] > 10, "greater than 100 messages per second") messages_cnt[0] = 0 yield sql.stopService() yield self.clearQueue(Q1)
def setUp(self): self.config = { 'MEMCACHE_SERVERS': { 'c0': { 'host': 'localhost' }, 'c1': { 'host': 'localhost' }, 'c2': { 'host': 'localhost' }, }, } self.config_id = conf.settings.add_config(self.config) self.app = Application(__name__) self.memcache = app.build_memcache(self.app) IService(self.app).startService() yield timed.sleep(1)
def render_POST_slow(self, request): for i in range(1000): request.write("TeesT" * 10) yield timed.sleep(0.01)
def render_GET_nocanceller(self, request): return timed.sleep(0.1)
def render_GET_canceller(self, request): def canceller(_): request.write("cancelled") d = defer.Deferred(canceller=canceller) return timed.sleep(0.1).addCallback(lambda _: d)
def render_GET_failure_with_deferred(self, request): yield timed.sleep(0.01) raise MyCustomError("error")
def render_GET_write_with_deferred(self, request): request.write("o") yield timed.sleep(0.01) request.write("k")
def render_GET_deferred(self, request): yield timed.sleep(0.01) defer.returnValue("ok")
def _sleep(self, t): d = timed.sleep(t) d.addErrback(lambda f: f.trap(defer.CancelledError) and None) self._sleeps.append(d) return d
def call(): self.cnt += 1 self.max_cnt = max(self.max_cnt, self.cnt) yield timed.sleep(0.05) self.cnt -= 1
def clearTestingQueue(self): self.publish = False # consume & skip all messages yield sleep(0.1) self.messages_cnt = 0 self.publish = True
def render_GET_write_with_deferred(self, request): request.write('o') yield timed.sleep(0.01) request.write('k')
def render_GET_deferred(self, request): yield timed.sleep(0.01) defer.returnValue('ok')
def on_msg(m): results.append(m) return sleep(0.2)