def go(loop): nc = NATS() try: yield from nc.connect(io_loop=loop) except: pass @asyncio.coroutine def message_handler(msg): print("[Received on '{}']: {}".format(msg.subject, msg.data.decode())) try: # Interested in receiving 2 messages from the 'discover' subject. sid = yield from nc.subscribe("discover", "", message_handler) yield from nc.auto_unsubscribe(sid, 2) yield from nc.publish("discover", b'hello') yield from nc.publish("discover", b'world') # Following 2 messages won't be received. yield from nc.publish("discover", b'again') yield from nc.publish("discover", b'!!!!!') except ErrConnectionClosed: print("Connection closed prematurely") @asyncio.coroutine def request_handler(msg): print("[Request on '{} {}']: {}".format(msg.subject, msg.reply, msg.data.decode())) yield from nc.publish(msg.reply, b'OK') if nc.is_connected: # Subscription using a 'workers' queue so that only a single subscriber # gets a request at a time. yield from nc.subscribe("help", "workers", cb=request_handler) try: # Make a request expecting a single response within 500 ms, # otherwise raising a timeout error. msg = yield from nc.timed_request("help", b'help please', 0.500) print("[Response]: {}".format(msg.data)) # Make a roundtrip to the server to ensure messages # that sent messages have been processed already. yield from nc.flush(0.500) except ErrTimeout: print("[Error] Timeout!") # Wait a bit for message to be dispatched... yield from asyncio.sleep(1, loop=loop) # Detach from the server. yield from nc.close() if nc.last_error is not None: print("Last Error: {}".format(nc.last_error)) if nc.is_closed: print("Disconnected.")
def test_subscribe(self): nc = NATS() msgs = [] @asyncio.coroutine def subscription_handler(msg): msgs.append(msg) payload = b'hello world' yield from nc.connect(io_loop=self.loop, servers=['nats://localhost:4224'], tls=self.ssl_ctx) sid = yield from nc.subscribe("foo", cb=subscription_handler) yield from nc.publish("foo", payload) yield from nc.publish("bar", payload) with self.assertRaises(ErrBadSubject): yield from nc.publish("", b'') # Wait a bit for message to be received. yield from asyncio.sleep(0.2, loop=self.loop) self.assertEqual(1, len(msgs)) msg = msgs[0] self.assertEqual('foo', msg.subject) self.assertEqual('', msg.reply) self.assertEqual(payload, msg.data) self.assertEqual(1, nc._subs[sid].received) yield from nc.close()
def run(loop): nc = Nats() yield from nc.connect(io_loop=loop) # Send a request and expect a single response and trigger timeout if not # faster than 50 ms. try: response = yield from nc.timed_request("conf.host", b'host', 0.050) print("Received response: {message}".format(message=response.data.decode())) except ErrTimeout: print("Request timed out") yield from nc.publish("log.info", b'initializing') yield from nc.publish("log.info", b'scraping item 1') @asyncio.coroutine def help_request(msg): subject = msg.subject reply = msg.reply data = msg.data.decode() print("Received a message on '{subject} {reply}': {data}".format( subject=subject, reply=reply, data=data ) ) yield from nc.publish(reply, b'I can help') # Use queue named 'workers' for distributing requests among subscribers. yield from nc.subscribe("cmd.help", "workers", help_request) yield from asyncio.sleep(20, loop=loop) yield from nc.close()
class Notification(object): def __init__(self, nats_host): self._nats = NATS() self._io_loop = asyncio.get_event_loop() asyncio.run_coroutine_threadsafe(self._initialize_nats(nats_host), self._io_loop) async def _initialize_nats(self, nats_host): options = { "servers": nats_host, "io_loop": self._io_loop, "max_reconnect_attemps": 60, "disconnected_cb": self._nats_disconnected_cb, "reconnected_cb": self._nats_reconnected_cb, "error_cb": self._nats_error_cb, "closed_cb": self._nats_closed_cb } try: await self._nats.connect(**options) except ErrNoServers as e: logging.error(str(e)) raise async def _nats_disconnected_cb(self): logging.info("[NATS] disconnected") async def _nats_reconnected_cb(self): logging.info("[NATS] reconnected") async def _nats_error_cb(self, e): logging.error("[NATS] ERROR: {}".format(e)) async def _nats_closed(self): logging.info("[NATS] connection is closed") def cleanup(self): self._nats_client.close() self._io_loop.close() def __del__(self): self.cleanup() def push(self, category, content, timestamp=None): logging.info("Pushing notification: {}: {}".format(category, content)) if timestamp is None: timestamp = time.time() msg = { "timestamp": timestamp, "category": category, "content": content } try: self._nats.publish(CHANNEL_NAME, msg) except ErrConnectionClosed: logging.error("Connection closed prematurely.") raise except ErrTimeout: logging.error("Timeout occurred when publishing" " event: {}".format(event)) raise
def run(loop): nc = NATS() yield from nc.connect(servers=["nats://127.0.0.1:4222"], io_loop=loop) yield from nc.publish("foo.thing", b'Hello!') message = 'Current date: at {now}'.format(now=datetime.now().isoformat()) yield from nc.publish("foo.thing", message.encode()) yield from nc.flush() yield from nc.close()
def test_async_await_messages_delivery_order(self): nc = NATS() msgs = [] errors = [] async def error_handler(e): errors.push(e) yield from nc.connect(io_loop=self.loop, error_cb=error_handler) @asyncio.coroutine def handler_foo(msg): msgs.append(msg) # Should not block other subscriptions from receiving messages. yield from asyncio.sleep(0.2, loop=self.loop) if msg.reply != "": yield from nc.publish(msg.reply, msg.data*2) yield from nc.subscribe("foo", cb=handler_foo) async def handler_bar(msg): msgs.append(msg) if msg.reply != "": await nc.publish(msg.reply, b'') yield from nc.subscribe("bar", cb=handler_bar) yield from nc.publish("foo", b'1') yield from nc.publish("foo", b'2') yield from nc.publish("foo", b'3') # Will be processed before the others since no head of line # blocking among the subscriptions. yield from nc.publish("bar", b'4') response = yield from nc.request("foo", b'hello1', 1) self.assertEqual(response.data, b'hello1hello1') with self.assertRaises(ErrTimeout): yield from nc.request("foo", b'hello2', 0.1) yield from nc.publish("bar", b'5') response = yield from nc.request("foo", b'hello2', 1) self.assertEqual(response.data, b'hello2hello2') self.assertEqual(msgs[0].data, b'1') self.assertEqual(msgs[1].data, b'4') self.assertEqual(msgs[2].data, b'2') self.assertEqual(msgs[3].data, b'3') self.assertEqual(msgs[4].data, b'hello1') self.assertEqual(msgs[5].data, b'hello2') self.assertEqual(len(errors), 0) yield from nc.close()
def run(loop): nc = NATS() ssl_ctx = ssl.create_default_context(purpose=ssl.Purpose.SERVER_AUTH) ssl_ctx.protocol = ssl.PROTOCOL_TLSv1_2 ssl_ctx.load_verify_locations('../tests/certs/ca.pem') ssl_ctx.load_cert_chain(certfile='../tests/certs/client-cert.pem', keyfile='../tests/certs/client-key.pem') yield from nc.connect(io_loop=loop, tls=ssl_ctx) @asyncio.coroutine def message_handler(msg): subject = msg.subject reply = msg.reply data = msg.data.decode() print("Received a message on '{subject} {reply}': {data}".format( subject=subject, reply=reply, data=data)) # Simple publisher and async subscriber via coroutine. sid = yield from nc.subscribe("foo", cb=message_handler) # Stop receiving after 2 messages. yield from nc.auto_unsubscribe(sid, 2) yield from nc.publish("foo", b'Hello') yield from nc.publish("foo", b'World') yield from nc.publish("foo", b'!!!!!') @asyncio.coroutine def help_request(msg): subject = msg.subject reply = msg.reply data = msg.data.decode() print("Received a message on '{subject} {reply}': {data}".format( subject=subject, reply=reply, data=data)) yield from nc.publish(reply, b'I can help') # Use queue named 'workers' for distributing requests # among subscribers. yield from nc.subscribe("help", "workers", help_request) # Send a request and expect a single response # and trigger timeout if not faster than 50 ms. try: response = yield from nc.timed_request("help", b'help me', 0.050) print("Received response: {message}".format( message=response.data.decode())) except ErrTimeout: print("Request timed out") yield from asyncio.sleep(1, loop=loop) yield from nc.close()
def test_subscription_slow_consumer_pending_bytes_limit(self): nc = NATS() msgs = [] errors = [] async def error_handler(e): if type(e) is ErrSlowConsumer: errors.append(e) yield from nc.connect(io_loop=self.loop, error_cb=error_handler) @asyncio.coroutine def handler_foo(msg): yield from asyncio.sleep(0.2, loop=self.loop) msgs.append(msg) if msg.reply != "": yield from nc.publish(msg.reply, msg.data*2) yield from nc.subscribe("foo", cb=handler_foo, pending_bytes_limit=10) async def handler_bar(msg): msgs.append(msg) if msg.reply != "": await nc.publish(msg.reply, msg.data*3) yield from nc.subscribe("bar", cb=handler_bar) for i in range(10): yield from nc.publish("foo", "AAA{}".format(i).encode()) # Will be processed before the others since no head of line # blocking among the subscriptions. yield from nc.publish("bar", b'14') response = yield from nc.request("bar", b'hi1', 2) self.assertEqual(response.data, b'hi1hi1hi1') self.assertEqual(len(msgs), 2) self.assertEqual(msgs[0].data, b'14') self.assertEqual(msgs[1].data, b'hi1') # Consumed a few messages but the rest were slow consumers. self.assertTrue(7 <= len(errors) <= 8) for e in errors: self.assertEqual(type(e), ErrSlowConsumer) self.assertEqual(errors[0].sid, 1) # Try again a few seconds later and it should have recovered yield from asyncio.sleep(3, loop=self.loop) response = yield from nc.request("foo", b'B', 1) self.assertEqual(response.data, b'BB') yield from nc.close()
def run(loop): nc = NATS() ssl_ctx = ssl.create_default_context(purpose=ssl.Purpose.SERVER_AUTH) ssl_ctx.protocol = ssl.PROTOCOL_TLSv1_2 ssl_ctx.load_verify_locations('../tests/certs/ca.pem') ssl_ctx.load_cert_chain(certfile='../tests/certs/client-cert.pem', keyfile='../tests/certs/client-key.pem') yield from nc.connect(io_loop=loop, tls=ssl_ctx) @asyncio.coroutine def message_handler(msg): subject = msg.subject reply = msg.reply data = msg.data.decode() print("Received a message on '{subject} {reply}': {data}".format( subject=subject, reply=reply, data=data)) # Simple publisher and async subscriber via coroutine. sid = yield from nc.subscribe("foo", cb=message_handler) # Stop receiving after 2 messages. yield from nc.auto_unsubscribe(sid, 2) yield from nc.publish("foo", b'Hello') yield from nc.publish("foo", b'World') yield from nc.publish("foo", b'!!!!!') @asyncio.coroutine def help_request(msg): subject = msg.subject reply = msg.reply data = msg.data.decode() print("Received a message on '{subject} {reply}': {data}".format( subject=subject, reply=reply, data=data)) yield from nc.publish(reply, b'I can help') # Use queue named 'workers' for distributing requests # among subscribers. yield from nc.subscribe("help", "workers", help_request) # Send a request and expect a single response # and trigger timeout if not faster than 50 ms. try: response = yield from nc.timed_request("help", b'help me', 0.050) print("Received response: {message}".format(message=response.data.decode())) except ErrTimeout: print("Request timed out") yield from asyncio.sleep(1, loop=loop) yield from nc.close()
def run(loop): nc = NATS() options = { "servers": [ "nats://tilient.org:44222", "nats://wiffel.space:44222", ], "io_loop": loop, } yield from nc.connect(**options) yield from nc.publish("count", b"33") yield from nc.flush(0.500) yield from nc.publish("cmd", b"quit") yield from nc.flush(0.500) yield from nc.close()
def run(loop): nc = NATS() yield from nc.connect("127.0.0.1:4222", loop=loop) @asyncio.coroutine def message_handler(msg): subject = msg.subject reply = msg.reply data = msg.data.decode() print("Received a message on '{subject} {reply}': {data}".format( subject=subject, reply=reply, data=data)) # Simple publisher and async subscriber via coroutine. sid = yield from nc.subscribe("foo", cb=message_handler) # Stop receiving after 2 messages. yield from nc.auto_unsubscribe(sid, 2) yield from nc.publish("foo", b'Hello') yield from nc.publish("foo", b'World') yield from nc.publish("foo", b'!!!!!') @asyncio.coroutine def help_request(msg): subject = msg.subject reply = msg.reply data = msg.data.decode() print("Received a message on '{subject} {reply}': {data}".format( subject=subject, reply=reply, data=data)) yield from nc.publish(reply, b'I can help') # Use queue named 'workers' for distributing requests # among subscribers. sid = yield from nc.subscribe("help", "workers", help_request) # Send a request and expect a single response # and trigger timeout if not faster than 50 ms. try: response = yield from nc.request("help", b'help me', 0.050) print("Received response: {message}".format( message=response.data.decode())) except ErrTimeout: print("Request timed out") # Remove interest in subscription. yield from nc.unsubscribe(sid) yield from nc.close()
def test_subscribe(self): nc = NATS() msgs = [] @asyncio.coroutine def subscription_handler(msg): msgs.append(msg) payload = b'hello world' yield from nc.connect(io_loop=self.loop) sid = yield from nc.subscribe("foo", cb=subscription_handler) yield from nc.publish("foo", payload) yield from nc.publish("bar", payload) with self.assertRaises(ErrBadSubject): yield from nc.publish("", b'') # Wait a bit for message to be received. yield from asyncio.sleep(0.2, loop=self.loop) self.assertEqual(1, len(msgs)) msg = msgs[0] self.assertEqual('foo', msg.subject) self.assertEqual('', msg.reply) self.assertEqual(payload, msg.data) self.assertEqual(1, nc._subs[sid].received) yield from nc.close() # After close, the subscription is gone with self.assertRaises(KeyError): nc._subs[sid] self.assertEqual(1, nc.stats['in_msgs']) self.assertEqual(11, nc.stats['in_bytes']) self.assertEqual(2, nc.stats['out_msgs']) self.assertEqual(22, nc.stats['out_bytes']) endpoint = '127.0.0.1:{port}'.format( port=self.server_pool[0].http_port) httpclient = http.client.HTTPConnection(endpoint, timeout=5) httpclient.request('GET', '/connz') response = httpclient.getresponse() connz = json.loads((response.read()).decode()) self.assertEqual(1, len(connz['connections'])) self.assertEqual(2, connz['connections'][0]['in_msgs']) self.assertEqual(22, connz['connections'][0]['in_bytes']) self.assertEqual(1, connz['connections'][0]['out_msgs']) self.assertEqual(11, connz['connections'][0]['out_bytes'])
def run(loop): nc = NATS() yield from nc.connect(servers=["nats://127.0.0.1:4222"], io_loop=loop) @asyncio.coroutine def message_handler(msg): subject = msg.subject reply = msg.reply data = msg.data.decode() print("Received a message on '{subject} {reply}': {data}".format( subject=subject, reply=reply, data=data)) # "*" matches any token, at any level of the subject. yield from nc.subscribe("foo.*.baz", cb=message_handler) yield from nc.subscribe("foo.bar.*", cb=message_handler) # ">" matches any length of the tail of a subject, and can only be the last token # E.g. 'foo.>' will match 'foo.bar', 'foo.bar.baz', 'foo.foo.bar.bax.22' yield from nc.subscribe("foo.>", cb=message_handler) # Matches all of the above. yield from nc.publish("foo.bar.baz", b'Hello World') yield from asyncio.sleep(1, loop=loop) yield from nc.close()
def run(loop): nc = NATS() @asyncio.coroutine def closed_cb(): logging.info("Connection to NATS is closed.") yield from asyncio.sleep(0.1, loop=loop) loop.stop() options = {"servers": [natsURL], "io_loop": loop, "closed_cb": closed_cb} yield from nc.connect(**options) logging.info("Connected to NATS at {}...".format(nc.connected_url.netloc)) def signal_handler(): if nc.is_closed: return logging.info("Disconnecting...") loop.create_task(nc.close()) for sig in ('SIGINT', 'SIGTERM'): loop.add_signal_handler(getattr(signal, sig), signal_handler) while True: msg = str(time.time()) logging.info("Publishing to NATS topic: " + topic) logging.info("Publishing msg: " + msg) yield from nc.publish(topic, msg.encode()) yield from asyncio.sleep(5, loop=loop)
def test_pending_data_size_flush_on_close(self): nc = NATS() disconnected_count = 0 reconnected_count = 0 closed_count = 0 err_count = 0 @asyncio.coroutine def disconnected_cb(): nonlocal disconnected_count disconnected_count += 1 @asyncio.coroutine def reconnected_cb(): nonlocal reconnected_count reconnected_count += 1 @asyncio.coroutine def closed_cb(): nonlocal closed_count closed_count += 1 options = { 'dont_randomize': True, 'io_loop': self.loop, 'disconnected_cb': disconnected_cb, 'closed_cb': closed_cb, 'reconnected_cb': reconnected_cb, 'reconnect_time_wait': 0.01 } yield from nc.connect(**options) total_received = 0 future = asyncio.Future(loop=self.loop) @asyncio.coroutine def receiver_cb(msg): nonlocal total_received total_received += 1 if total_received == 200: future.set_result(True) # Extra connection which should be receiving all the messages nc2 = NATS() yield from nc2.connect(**options) yield from nc2.subscribe("example.*", cb=receiver_cb) yield from nc2.flush() for i in range(0, 200): yield from nc.publish("example.{}".format(i), b'A' * 20) # All pending messages should have been emitted to the server # by the first connection at this point. yield from nc.close() # Wait for the server to flush all the messages back to the receiving client yield from asyncio.wait_for(future, 1, loop=self.loop) yield from nc2.close() self.assertEqual(total_received, 200)
def run(loop): nc = NATS() yield from nc.connect(io_loop=loop) @asyncio.coroutine def message_handler(msg): subject = msg.subject reply = msg.reply data = msg.data.decode() print("Received a message on '{subject} {reply}': {data}".format(subject=subject, reply=reply, data=data)) # "*" matches any token, at any level of the subject. yield from nc.subscribe("foo.*.baz", cb=message_handler) yield from nc.subscribe("foo.bar.*", cb=message_handler) # ">" matches any length of the tail of a subject, and can only be the last token # E.g. 'foo.>' will match 'foo.bar', 'foo.bar.baz', 'foo.foo.bar.bax.22' yield from nc.subscribe("foo.>", cb=message_handler) # Matches all of the above. yield from nc.publish("foo.bar.baz", b"Hello World") yield from asyncio.sleep(1, loop=loop) yield from nc.close()
def test_unsubscribe(self): nc = NATS() msgs = [] @asyncio.coroutine def subscription_handler(msg): msgs.append(msg) yield from nc.connect(io_loop=self.loop) sid = yield from nc.subscribe("foo", cb=subscription_handler) yield from nc.publish("foo", b'A') yield from nc.publish("foo", b'B') # Wait a bit to receive the messages yield from asyncio.sleep(0.5, loop=self.loop) self.assertEqual(2, len(msgs)) yield from nc.unsubscribe(sid) yield from nc.publish("foo", b'C') yield from nc.publish("foo", b'D') # Ordering should be preserverd in these at least self.assertEqual(b'A', msgs[0].data) self.assertEqual(b'B', msgs[1].data) # Should not exist by now with self.assertRaises(KeyError): nc._subs[sid].received yield from asyncio.sleep(1, loop=self.loop) endpoint = '127.0.0.1:{port}'.format( port=self.server_pool[0].http_port) httpclient = http.client.HTTPConnection(endpoint, timeout=5) httpclient.request('GET', '/connz') response = httpclient.getresponse() connz = json.loads((response.read()).decode()) self.assertEqual(1, len(connz['connections'])) self.assertEqual(0, connz['connections'][0]['subscriptions']) self.assertEqual(4, connz['connections'][0]['in_msgs']) self.assertEqual(4, connz['connections'][0]['in_bytes']) self.assertEqual(2, connz['connections'][0]['out_msgs']) self.assertEqual(2, connz['connections'][0]['out_bytes']) yield from nc.close() self.assertEqual(2, nc.stats['in_msgs']) self.assertEqual(2, nc.stats['in_bytes']) self.assertEqual(4, nc.stats['out_msgs']) self.assertEqual(4, nc.stats['out_bytes'])
class MsgBrokerClient: msg_schema_validator = t.Dict({ t.Key('user'): t.Int(gte=0), t.Key('cmd'): t.String(), t.Key('data'): t.Type(dict), t.Key('priority', default=100): t.Int(gte=0) }) def __init__(self, server_addr: str, service_name: str): self.server_addr = server_addr self.service_name = service_name self.client = Client() self.subscriber_queue = asyncio.Queue() async def run_client(self) -> None: await self.client.connect(servers=[f'nats://{self.server_addr}'], max_reconnect_attempts=-1) async def subscribe(self) -> int: return await self.client.subscribe_async(self.service_name, cb=self._handler) async def publish(self, receiver, msg: Dict[str, Any], reply=None) -> None: try: msg = self.pack_msg(msg) except t.DataError: logger.exception(f'Bad message: {msg}') else: if reply: # TODO: сделать проверку таймаута return await self.client.publish_request(receiver, reply, msg) else: asyncio.ensure_future(self.client.publish(receiver, msg)) @classmethod def pack_msg(cls, data: Dict[str, Any]) -> bytes: data = cls.msg_schema_validator(data) return msgpack.packb(data) @classmethod def unpack_msg(cls, data: bytes) -> Dict[str, Any]: data = msgpack.unpackb(data, encoding='utf-8') logger.debug(f'Received message:{data}') return cls.msg_schema_validator(data) async def _handler(self, msg: 'Msg') -> None: try: subject = msg.subject reply = msg.reply data = self.unpack_msg(msg.data) msg = Message(subject, data, reply) except t.DataError: logger.exception(f'Bad message: {msg}') else: asyncio.ensure_future(self.subscriber_queue.put(msg)) @property def is_connected(self) -> bool: return self.client.is_connected
def main(loop): parser = argparse.ArgumentParser() parser.add_argument('-n', '--count', default=DEFAULT_NUM_MSGS, type=int) parser.add_argument('-s', '--size', default=DEFAULT_MSG_SIZE, type=int) parser.add_argument('-S', '--subject', default='test') parser.add_argument('-b', '--batch', default=DEFAULT_BATCH_SIZE, type=int) parser.add_argument('--servers', default=[], action='append') args = parser.parse_args() data = [] for i in range(0, args.size): s = "%01x" % randint(0, 15) data.append(s.encode()) payload = b''.join(data) servers = args.servers if len(args.servers) < 1: servers = ["nats://127.0.0.1:4222"] opts = { "servers": servers, "io_loop": loop } # Make sure we're connected to a server first.. nc = NATS() try: yield from nc.connect(**opts) except Exception as e: sys.stderr.write("ERROR: {0}".format(e)) show_usage_and_die() # Start the benchmark start = time.time() to_send = args.count print("Sending {0} messages of size {1} bytes on [{2}]".format( args.count, args.size, args.subject)) while to_send > 0: for i in range(0, args.batch): to_send -= 1 yield from nc.publish(args.subject, payload) if (to_send % HASH_MODULO) == 0: sys.stdout.write("#") sys.stdout.flush() if to_send == 0: break # Minimal pause in between batches sent to server yield from asyncio.sleep(0.00001, loop=loop) # Additional roundtrip with server to try to ensure everything has been sent already. try: yield from nc.flush(DEFAULT_FLUSH_TIMEOUT) except ErrTimeout: print("Server flush timeout after {0}".format(DEFAULT_FLUSH_TIMEOUT)) elapsed = time.time() - start mbytes = "%.1f" % (((args.size * args.count)/elapsed) / (1024*1024)) print("\nTest completed : {0} msgs/sec ({1}) MB/sec".format( args.count/elapsed, mbytes)) yield from nc.close()
def main(loop): parser = argparse.ArgumentParser() parser.add_argument('-n', '--count', default=DEFAULT_NUM_MSGS, type=int) parser.add_argument('-s', '--size', default=DEFAULT_MSG_SIZE, type=int) parser.add_argument('-S', '--subject', default='test') parser.add_argument('-b', '--batch', default=DEFAULT_BATCH_SIZE, type=int) parser.add_argument('--servers', default=[], action='append') args = parser.parse_args() data = [] for i in range(0, args.size): s = "%01x" % randint(0, 15) data.append(s.encode()) payload = b''.join(data) servers = args.servers if len(args.servers) < 1: servers = ["nats://127.0.0.1:4222"] opts = {"servers": servers, "io_loop": loop} # Make sure we're connected to a server first.. nc = NATS() try: yield from nc.connect(**opts) except Exception as e: sys.stderr.write("ERROR: {0}".format(e)) show_usage_and_die() # Start the benchmark start = time.time() to_send = args.count print("Sending {0} messages of size {1} bytes on [{2}]".format( args.count, args.size, args.subject)) while to_send > 0: for i in range(0, args.batch): to_send -= 1 yield from nc.publish(args.subject, payload) if (to_send % HASH_MODULO) == 0: sys.stdout.write("#") sys.stdout.flush() if to_send == 0: break # Minimal pause in between batches sent to server yield from asyncio.sleep(0.00001, loop=loop) # Additional roundtrip with server to try to ensure everything has been sent already. try: yield from nc.flush(DEFAULT_FLUSH_TIMEOUT) except ErrTimeout: print("Server flush timeout after {0}".format(DEFAULT_FLUSH_TIMEOUT)) elapsed = time.time() - start mbytes = "%.1f" % (((args.size * args.count) / elapsed) / (1024 * 1024)) print("\nTest completed : {0} msgs/sec ({1}) MB/sec".format( args.count / elapsed, mbytes)) yield from nc.close()
def run(loop): nc = NATS() yield from nc.connect(io_loop=loop) @asyncio.coroutine def message_handler(msg): subject = msg.subject reply = msg.reply data = msg.data.decode() print("Received a message on '{subject} {reply}': {data}".format( subject=subject, reply=reply, data=data)) # Simple publisher and async subscriber via coroutine. sid = yield from nc.subscribe("foo", cb=message_handler) # Stop receiving after 2 messages. yield from nc.auto_unsubscribe(sid, 2) yield from nc.publish("foo", b'Hello') yield from nc.publish("foo", b'World') yield from nc.publish("foo", b'!!!!!') @asyncio.coroutine def help_request(msg): subject = msg.subject reply = msg.reply data = msg.data.decode() print("Received a message on '{subject} {reply}': {data}".format( subject=subject, reply=reply, data=data)) yield from nc.publish(reply, b'I can help') # Use queue named 'workers' for distributing requests # among subscribers. yield from nc.subscribe("help", "workers", help_request) # Send a request and expect a single response # and trigger timeout if not faster than 50 ms. try: response = yield from nc.timed_request("help", b'help me', 0.050) print("Received response: {message}".format(message=response.data.decode())) except ErrTimeout: print("Request timed out") yield from asyncio.sleep(1, loop=loop) yield from nc.close()
def test_flush(self): nc = NATS() yield from nc.connect(io_loop=self.loop) for i in range(0, 10): yield from nc.publish("flush.%d" % i, b'AA') yield from nc.flush() self.assertEqual(10, nc.stats['out_msgs']) self.assertEqual(20, nc.stats['out_bytes']) yield from nc.close()
def __run(self, loop, publisher , data): nc = NATS() print("before connect") yield from nc.connect(servers=["nats://127.0.0.1:4222"], io_loop=loop) print("after connect") yield from nc.publish(publisher, data) #yield from asyncio.sleep(1, loop=loop) yield from nc.close() print("done")
def test_pending_data_size_tracking(self): nc = NATS() yield from nc.connect(io_loop=self.loop) largest_pending_data_size = 0 for i in range(0,100): yield from nc.publish("example", b'A' * 100000) if nc.pending_data_size > 0: largest_pending_data_size = nc.pending_data_size self.assertTrue(largest_pending_data_size > 0) yield from nc.close()
def test_pending_data_size_tracking(self): nc = NATS() yield from nc.connect(io_loop=self.loop) largest_pending_data_size = 0 for i in range(0, 100): yield from nc.publish("example", b'A' * 100000) if nc.pending_data_size > 0: largest_pending_data_size = nc.pending_data_size self.assertTrue(largest_pending_data_size > 0) yield from nc.close()
def test_close(self): nc = NATS() disconnected_count = 0 reconnected_count = 0 closed_count = 0 err_count = 0 @asyncio.coroutine def disconnected_cb(): nonlocal disconnected_count disconnected_count += 1 @asyncio.coroutine def reconnected_cb(): nonlocal reconnected_count reconnected_count += 1 @asyncio.coroutine def closed_cb(): nonlocal closed_count closed_count += 1 @asyncio.coroutine def err_cb(e): nonlocal err_count err_count += 1 options = { 'io_loop': self.loop, 'disconnected_cb': disconnected_cb, 'closed_cb': closed_cb, 'reconnected_cb': reconnected_cb, 'error_cb': err_cb, } yield from nc.connect(**options) yield from nc.close() with self.assertRaises(ErrConnectionClosed): yield from nc.publish("foo", b'A') with self.assertRaises(ErrConnectionClosed): yield from nc.subscribe("bar", "workers") with self.assertRaises(ErrConnectionClosed): yield from nc.publish_request("bar", "inbox", b'B') with self.assertRaises(ErrConnectionClosed): yield from nc.flush() self.assertEqual(1, closed_count) self.assertEqual(1, disconnected_count) self.assertEqual(0, reconnected_count) self.assertEqual(0, err_count)
def test_close(self): nc = NATS() disconnected_count = 0 reconnected_count = 0 closed_count = 0 err_count = 0 @asyncio.coroutine def disconnected_cb(): nonlocal disconnected_count disconnected_count += 1 @asyncio.coroutine def reconnected_cb(): nonlocal reconnected_count reconnected_count += 1 @asyncio.coroutine def closed_cb(): nonlocal closed_count closed_count += 1 @asyncio.coroutine def err_cb(): nonlocal err_count err_count += 1 options = { 'io_loop': self.loop, 'disconnected_cb': disconnected_cb, 'closed_cb': closed_cb, 'reconnected_cb': reconnected_cb, 'error_cb': err_cb, } yield from nc.connect(**options) yield from nc.close() with self.assertRaises(ErrConnectionClosed): yield from nc.publish("foo", b'A') with self.assertRaises(ErrConnectionClosed): yield from nc.subscribe("bar", "workers") with self.assertRaises(ErrConnectionClosed): yield from nc.publish_request("bar", "inbox", b'B') with self.assertRaises(ErrConnectionClosed): yield from nc.flush() self.assertEqual(1, closed_count) self.assertEqual(1, disconnected_count) self.assertEqual(0, reconnected_count) self.assertEqual(0, err_count)
def pub_random(loop): nc = NATS() yield from nc.connect("localhost:4222", loop=loop) if nc.last_error: print("ERROR received from NATS: ", nc.last_error) else: print('Submitting random requests') for i in range(NUM_MESSAGES): jdata = {"i": i} yield from nc.publish('p1.s0', json.dumps(jdata).encode('utf-8'))
def _publish_event(args): nc = NATS() options = { "servers": ["nats://nats:4222"], "io_loop": loop, } yield from nc.connect(**options) payload = args['payload'].encode() yield from nc.publish(args['subject'], payload) yield from nc.close()
def test_publish(self): nc = NATS() yield from nc.connect(io_loop=self.loop) for i in range(0, 100): yield from nc.publish("hello.%d" % i, b'A') with self.assertRaises(ErrBadSubject): yield from nc.publish("", b'') yield from nc.flush() yield from nc.close() yield from asyncio.sleep(1, loop=self.loop) self.assertEqual(100, nc.stats['out_msgs']) self.assertEqual(100, nc.stats['out_bytes']) endpoint = '127.0.0.1:{port}'.format(port=self.server_pool[0].http_port) httpclient = http.client.HTTPConnection(endpoint, timeout=5) httpclient.request('GET', '/varz') response = httpclient.getresponse() varz = json.loads((response.read()).decode()) self.assertEqual(100, varz['in_msgs']) self.assertEqual(100, varz['in_bytes'])
def run(loop): nc = NATS() yield from nc.connect(servers=["nats://127.0.0.1:4222"], io_loop=loop) #yield from nc.publish("k8s.events", b'[{"cloudType":"K8s","action":"Item Created", "text":"Event from K8S"}]') yield from nc.publish( "aws.events", b'[{"cloudType":"AWS","action":"Item Created", "text":"Event from AWS"}]' ) #yield from nc.publish("k8s.alarms", b'Alarm from K8S') #yield from nc.publish("aws.alarms", b'Alarm from AWS') yield from nc.close()
def test_publish(self): nc = NATS() yield from nc.connect(io_loop=self.loop) for i in range(0, 100): yield from nc.publish("hello.%d" % i, b'A') with self.assertRaises(ErrBadSubject): yield from nc.publish("", b'') yield from nc.flush() yield from nc.close() yield from asyncio.sleep(1, loop=self.loop) self.assertEqual(100, nc.stats['out_msgs']) self.assertEqual(100, nc.stats['out_bytes']) endpoint = '127.0.0.1:{port}'.format( port=self.server_pool[0].http_port) httpclient = http.client.HTTPConnection(endpoint, timeout=5) httpclient.request('GET', '/varz') response = httpclient.getresponse() varz = json.loads((response.read()).decode()) self.assertEqual(100, varz['in_msgs']) self.assertEqual(100, varz['in_bytes'])
def publish_msg(self, subject, msg, loop): client = NATS() servers = [self.url] options = { "io_loop": loop, "servers": servers, } try: yield from client.connect(**options) yield from client.publish(subject, msg) yield from client.flush() yield from client.close() except Exception as e: print(e) pass
class NATSProducer: def __init__(self, addr, value_serializer=None, loop=None): """Light wrapper on a NATS client used to generate data and push it into a NATS subject.""" self.nc = NATS() self.value_serializer = value_serializer or ( lambda value: value.encode()) self.loop = loop or asyncio.get_event_loop() self.loop.run_until_complete(self.connect(addr)) async def connect(self, addr): await self.nc.connect(addr) def send(self, subject, msg): """ Pushes data into a NATS subject""" pub = self.nc.publish(subject, self.value_serializer(msg)) self.loop.run_until_complete(pub)
def run(loop): parser = argparse.ArgumentParser() # e.g. nats-pub hello -d "world" -s nats://127.0.0.1:4222 -s nats://127.0.0.1:4223 parser.add_argument('subject', default='hello', nargs='?') parser.add_argument('-d', '--data', default="hello world") parser.add_argument('-s', '--servers', default=[], action='append') parser.add_argument('--creds', default="") args = parser.parse_args() nc = NATS() @asyncio.coroutine def error_cb(e): print("Error:", e) @asyncio.coroutine def closed_cb(): print("Connection to NATS is closed.") @asyncio.coroutine def reconnected_cb(): print("Connected to NATS at {}...".format(nc.connected_url.netloc)) options = { "io_loop": loop, "error_cb": error_cb, "closed_cb": closed_cb, "reconnected_cb": reconnected_cb } if len(args.creds) > 0: options["user_credentials"] = args.creds try: if len(args.servers) > 0: options['servers'] = args.servers yield from nc.connect(**options) except Exception as e: print(e) show_usage_and_die() print("Connected to NATS at {}...".format(nc.connected_url.netloc)) yield from nc.publish(args.subject, args.data.encode()) yield from nc.flush() yield from nc.close()
def run(loop): parser = argparse.ArgumentParser() # e.g. nats-pub hello -d "world" -s nats://127.0.0.1:4222 -s nats://127.0.0.1:4223 parser.add_argument('subject', default='hello', nargs='?') parser.add_argument('-d', '--data', default="hello world") parser.add_argument('-s', '--servers', default=[], action='append') args = parser.parse_args() nc = NATS() @asyncio.coroutine def closed_cb(): print("Connection to NATS is closed.") loop.stop() @asyncio.coroutine def reconnected_cb(): print("Connected to NATS at {}...".format(nc.connected_url.netloc)) @asyncio.coroutine def subscribe_handler(msg): subject = msg.subject reply = msg.reply data = msg.data.decode() print("Received a message on '{subject} {reply}': {data}".format( subject=subject, reply=reply, data=data)) options = { "io_loop": loop, "closed_cb": closed_cb, "reconnected_cb": reconnected_cb } try: if len(args.servers) > 0: options['servers'] = args.servers yield from nc.connect(**options) except Exception as e: print(e) show_usage_and_die() print("Connected to NATS at {}...".format(nc.connected_url.netloc)) yield from nc.publish(args.subject, args.data.encode()) yield from nc.flush() yield from nc.close()
class NatsPublisher(): """Publisher to send information over NATS about devices in the system.""" def __init__(self, servers, system, event_loop, nats=None): """Initialize the publisher. :param servers: List of server URIs for connection. :param system: The system that we are simulating. We subscribe to the stream from that system. :param event_loop: The event loop for the asyncio. :param nats: If not none, use as the NATS client. """ LOGGER.info("Starting NATS publisher on %s", servers) self.nc = Nats() if nats is None else nats self.servers = servers self.system = system self.event_loop = event_loop # Subscribe to the stream of published profiles def publish(profiles): self.publish_async(profiles) self.system.subscribe(publish) async def start(self): """Start the publisher.""" await self.nc.connect(servers=self.servers, loop=self.event_loop) def publish_async(self, profile): """Handle subscriptions from the observable to send to the event loop. This bridges the reactive and async worlds in this application. """ def callback(): asyncio.ensure_future(self.publish(profile[0], profile[2])) asyncio.get_event_loop().call_soon(callback) @asyncio.coroutine def publish(self, device_mrid, profile): """Publish called from the asyncio thread to publish profiles. :param device_mrid: The MRID of the associated device. :param profile: The profile encoded as an OpenFMB protobuf object. """ subject = profile_to_subject(str(device_mrid), profile) yield from self.nc.publish(subject, profile.SerializeToString())
def add_subscription(loop): nc = NATS() @asyncio.coroutine def closed_cb(): print("Connection to NATS is closed.") yield from asyncio.sleep(0.1, loop=loop) loop.stop() options = { "servers": ["nats://127.0.0.1:4222"], "io_loop": loop, "closed_cb": closed_cb } yield from nc.connect(**options) print("Connected to NATS at {}...".format(nc.connected_url.netloc)) # PUBLISH print('Publishing some stuff.') yield from nc.publish('cc-subscriptions', b'5~CCCAGG~ETH~USD, 5~CCCAGG~BTC~USD') def signal_handler(): if nc.is_closed: return print("Disconnecting...") loop.create_task(nc.close()) for sig in ('SIGINT', 'SIGTERM'): loop.add_signal_handler(getattr(signal, sig), signal_handler) @asyncio.coroutine def message_handler(msg): subject = msg.subject reply = msg.reply data = msg.data.decode() print("Received a message on '{subject} {reply}': {data}".format( subject=subject, reply=reply, data=data)) yield from nc.subscribe('crypto-compare', cb=message_handler)
def test_subscribe_sync_call_soon(self): nc = NATS() msgs = [] def subscription_handler(msg): msgs.append(msg) yield from nc.connect(io_loop=self.loop) sid = yield from nc.subscribe("tests.>", cb=subscription_handler) for i in range(0, 5): yield from nc.publish("tests.{}".format(i), b'bar') # Wait a bit for messages to be received. yield from asyncio.sleep(1, loop=self.loop) self.assertEqual(5, len(msgs)) # Check that they were received sequentially. self.assertEqual("tests.1", msgs[1].subject) self.assertEqual("tests.3", msgs[3].subject) yield from nc.close()
async def publishNATS(loop, msg): nats_server = os.getenv('NATS_SERVER') # If no environment variable NATS_SERVER, assume NATS server runs on host machine if nats_server == None: nats_server = get_host_ip() print("NATS server: {}".format(nats_server)) nats_conn = NATS() async def error_cb(e): print("Error:", e) async def closed_cb(): print("Connection to NATS is closed.") async def reconnected_cb(): print("Reconnected to NATS at {} ...".format( nats_conn.connected_url.netloc)) options = { "loop": loop, "error_cb": error_cb, "closed_cb": closed_cb, "reconnected_cb": reconnected_cb, "servers": ["nats://{}:4222".format(nats_server)] } try: await (nats_conn.connect(**options)) except Exception as e: print(e) print("Connected to NATS at {} ...".format(nats_conn.connected_url.netloc)) encoded_msg = msg.encode('utf-8') await (nats_conn.publish(subject_mem, encoded_msg)) await nats_conn.flush() print("Message published with subject {}: {}".format(subject_mem, msg)) await nats_conn.close()
def test_async_await_subscribe_sync(self): nc = NATS() msgs = [] async def subscription_handler(msg): if msg.subject == "tests.1": await asyncio.sleep(0.5, loop=self.loop) if msg.subject == "tests.3": await asyncio.sleep(0.2, loop=self.loop) msgs.append(msg) yield from nc.connect(io_loop=self.loop) sid = yield from nc.subscribe_async("tests.>", cb=subscription_handler) for i in range(0, 5): yield from nc.publish("tests.{}".format(i), b'bar') # Wait a bit for messages to be received. yield from asyncio.sleep(1, loop=self.loop) self.assertEqual(5, len(msgs)) self.assertEqual("tests.1", msgs[4].subject) self.assertEqual("tests.3", msgs[3].subject) yield from nc.close()
class Requester(pykka.ThreadingActor): """ NATS requester implementation :param my_args: dict like {connection, request_q} """ def __init__(self, my_args=None, connection_args=None): """ NATS requester constructor :param my_args: dict like {connection, request_q} :param connection_args: dict like {user, password, host[, port, client_properties]} :return: self """ LOGGER.debug("natsd.Requester.__init__") if my_args is None: raise exceptions.ArianeConfError("requestor arguments") if 'request_q' not in my_args or my_args['request_q'] is None or not my_args['request_q']: raise exceptions.ArianeConfError("request_q") if 'fire_and_forget' not in my_args or my_args['fire_and_forget'] is None or not my_args['fire_and_forget']: self.fire_and_forget = False else: self.fire_and_forget = True if 'rpc_timeout' not in connection_args or connection_args['rpc_timeout'] is None or \ not connection_args['rpc_timeout']: # default timeout = no timeout self.rpc_timeout = 0 else: self.rpc_timeout = connection_args['rpc_timeout'] if 'rpc_timeout_err_count_max' not in connection_args or connection_args['rpc_timeout_err_count_max'] is None \ or not connection_args['rpc_timeout_err_count_max']: self.rpc_retry_timeout_err_count_max = 3 else: self.rpc_retry_timeout_err_count_max = connection_args['rpc_timeout_err_count_max'] self.rpc_retry_timeout_err_count = 0 if 'rpc_retry' not in connection_args or connection_args['rpc_retry'] is None or \ not connection_args['rpc_retry']: # default retry = no retry self.rpc_retry = 0 else: self.rpc_retry = connection_args['rpc_retry'] Driver.validate_driver_conf(connection_args) super(Requester, self).__init__() self.connection_args = copy.deepcopy(connection_args) self.servers = [ "nats://" + connection_args['user'] + ":" + connection_args['password'] + "@" + connection_args['host']+":"+str(connection_args['port']) ] self.name = self.connection_args['client_properties']['ariane.app'] + "@" + socket.gethostname() + \ " - requestor on " + my_args['request_q'] self.loop = None self.options = None self.service = None self.nc = Client() self.requestQ = my_args['request_q'] self.responseQ = None self.responseQS = None self.response = None self.split_responses = None self.split_responses_mid = None self.is_started = False self.trace = False self.max_payload = 0 if not self.fire_and_forget: self.responseQ = new_inbox() self.response = None self.corr_id = None def connect(self): LOGGER.debug("natsd.Requester.connect") try: yield from self.nc.connect(**self.options) if not self.fire_and_forget: self.responseQS = yield from self.nc.subscribe(self.responseQ, cb=self.on_response) self.max_payload = self.nc._max_payload self.is_started = True except ErrNoServers as e: print(e) return def run_event_loop(self): LOGGER.debug("natsd.Requester.run_event_loop") self.loop = asyncio.new_event_loop() asyncio.set_event_loop(self.loop) self.options = { "servers": self.servers, "name": self.name, # "disconnected_cb": self.disconnected_cb, # "reconnected_cb": self.reconnected_cb, # "error_cb": self.error_cb, # "closed_cb": self.closed_cb, "io_loop": self.loop, } self.loop.create_task(self.connect()) self.loop.run_forever() def on_start(self): """ start requester """ LOGGER.debug("natsd.Requester.on_start") self.service = threading.Thread(target=self.run_event_loop, name=self.requestQ + " requestor thread") self.service.start() while not self.is_started: time.sleep(0.01) def on_stop(self): """ stop requester """ LOGGER.debug("natsd.Requester.on_stop") self.is_started = False try: LOGGER.debug("natsd.Requester.on_stop - unsubscribe from " + str(self.responseQS)) next(self.nc.unsubscribe(self.responseQS)) except StopIteration as e: pass try: LOGGER.debug("natsd.Requester.on_stop - close nats connection") next(self.nc.close()) except StopIteration as e: pass LOGGER.debug("natsd.Requester.on_stop - nc is closed: " + str(self.nc.is_closed)) try: LOGGER.debug("natsd.Requester.on_stop - cancelling aio tasks loop") loop_to_stop = self.loop for task in asyncio.Task.all_tasks(loop_to_stop): LOGGER.debug("natsd.Requester.on_stop - cancelling task " + str(task)) task.cancel() LOGGER.debug("natsd.Requester.on_stop - stopping aio loop stop") loop_to_stop.stop() count = 0 while loop_to_stop.is_running(): count += 1 if count % 10 == 0: LOGGER.debug("natsd.Requester.on_stop - waiting aio loop to be stopped (" + str(asyncio.Task.all_tasks(loop_to_stop).__len__()) + " tasks left; " + "current task: " + str(asyncio.Task.current_task(loop_to_stop)) + ")") for task in asyncio.Task.all_tasks(loop_to_stop): LOGGER.debug("natsd.Requester.on_stop - cancelling task " + str(task)) task.cancel() time.sleep(1) if count == 120: LOGGER.error("natsd.Requester.on_stop - unable to stop aio loop after 120 sec (" + str(asyncio.Task.all_tasks(loop_to_stop).__len__()) + " tasks left; " + "current task: " + str(asyncio.Task.current_task(loop_to_stop)) + ")") break if not loop_to_stop.is_running(): LOGGER.debug("natsd.Requester.on_stop - close aio loop") loop_to_stop.close() except Exception as e: LOGGER.warn("natsd.Requester.on_stop - exception on aio clean : " + traceback.format_exc()) def _restart_on_error(self): LOGGER.debug("natsd.Requester._restart_on_error - restart begin !") stop_thread = threading.Thread(target=self.on_stop, name=self.requestQ + " restarter.stop on error thread") stop_thread.start() while not self.nc.is_closed: LOGGER.debug("natsd.Requester._restart_on_error - waiting nc to be closed") time.sleep(1) self.on_start() self.rpc_retry_timeout_err_count = 0 LOGGER.debug("natsd.Requester._restart_on_error - restart end !") def _restart_after_max_timeout_err_count(self): restarter = threading.Thread(target=self._restart_on_error, name=self.requestQ + " restarter on error thread") restarter.start() def on_failure(self, exception_type, exception_value, traceback_): LOGGER.error("natsd.Requester.on_failure - " + exception_type.__str__() + "/" + exception_value.__str__()) LOGGER.error("natsd.Requester.on_failure - " + traceback_.format_exc()) self.is_started = False try: next(self.nc.unsubscribe(self.responseQS)) except StopIteration as e: pass try: next(self.nc.close()) except StopIteration as e: pass try: loop_to_stop = self.loop for task in asyncio.Task.all_tasks(loop_to_stop): task.cancel() loop_to_stop.stop() while loop_to_stop.is_running(): time.sleep(1) loop_to_stop.close() except Exception as e: LOGGER.debug("natsd.Requester.on_failure - exception on aio clean : " + traceback.format_exc()) def on_response(self, msg): """ setup response if correlation id is the good one """ LOGGER.debug("natsd.Requester.on_response: " + str(sys.getsizeof(msg)) + " bytes received") working_response = json.loads(msg.data.decode()) working_properties = DriverTools.json2properties(working_response['properties']) working_body = b''+bytes(working_response['body'], 'utf8') if 'body' in working_response else None if DriverTools.MSG_CORRELATION_ID in working_properties: if self.corr_id == working_properties[DriverTools.MSG_CORRELATION_ID]: if DriverTools.MSG_SPLIT_COUNT in working_properties and \ int(working_properties[DriverTools.MSG_SPLIT_COUNT]) > 1: working_body_decoded = base64.b64decode(working_body) if working_body is not None else None if self.split_responses is None: self.split_responses = [] self.split_responses_mid = working_properties[DriverTools.MSG_SPLIT_MID] if working_properties[DriverTools.MSG_SPLIT_MID] == self.split_responses_mid: response = { 'properties': working_properties, 'body': working_body_decoded } self.split_responses.insert(int(working_properties[DriverTools.MSG_SPLIT_OID]), response) if self.split_responses.__len__() == int(working_properties[DriverTools.MSG_SPLIT_COUNT]): properties = {} body = b'' for num in range(0, self.split_responses.__len__()): properties.update(self.split_responses[num]['properties']) body += self.split_responses[num]['body'] self.response = { 'properties': properties, 'body': body } self.split_responses = None self.split_responses_mid = None else: LOGGER.warn("natsd.Requester.on_response - discarded response : (" + str(working_properties[DriverTools.MSG_CORRELATION_ID]) + "," + str(working_properties[DriverTools.MSG_SPLIT_MID]) + ")") LOGGER.debug("natsd.Requester.on_response - discarded response : " + str({ 'properties': working_properties, 'body': working_body_decoded })) else: working_body_decoded = base64.b64decode(working_body) if working_body is not None else \ bytes(json.dumps({}), 'utf8') self.response = { 'properties': working_properties, 'body': working_body_decoded } else: working_body_decoded = base64.b64decode(working_body) if working_body is not None else None LOGGER.warn("natsd.Requester.on_response - discarded response : " + str(working_properties[DriverTools.MSG_CORRELATION_ID])) LOGGER.debug("natsd.Requester.on_response - discarded response : " + str({ 'properties': working_properties, 'body': working_body_decoded })) else: working_body_decoded = base64.b64decode(working_body) if working_body is not None else None LOGGER.warn("natsd.Requester.on_response - discarded response (no correlation ID)") LOGGER.debug("natsd.Requester.on_response - discarded response : " + str({ 'properties': working_properties, 'body': working_body_decoded })) def _split_msg(self, split_mid, properties, body): messages = [] in_progress_messages = [] msg_counter = 0 in_progress_properties_field = copy.deepcopy(properties) if DriverTools.MSG_MESSAGE_ID in in_progress_properties_field: in_progress_properties_field.pop(DriverTools.MSG_MESSAGE_ID) if DriverTools.MSG_CORRELATION_ID in in_progress_properties_field: in_progress_properties_field.pop(DriverTools.MSG_CORRELATION_ID) if DriverTools.MSG_TRACE in in_progress_properties_field: in_progress_properties_field.pop(DriverTools.MSG_TRACE) if DriverTools.MSG_REPLY_TO in in_progress_properties_field: in_progress_properties_field.pop(DriverTools.MSG_REPLY_TO) wip_body = body wip_body_len = sys.getsizeof(wip_body) consumed_body_offset = 0 while (wip_body_len - consumed_body_offset) > 0 or in_progress_properties_field.__len__() > 0: # consume properties first : splitted_msg_size = 0 splitted_properties = {} if DriverTools.MSG_MESSAGE_ID in properties: splitted_properties[DriverTools.MSG_MESSAGE_ID] = properties[DriverTools.MSG_MESSAGE_ID] if DriverTools.MSG_CORRELATION_ID in properties: splitted_properties[DriverTools.MSG_CORRELATION_ID] = properties[DriverTools.MSG_CORRELATION_ID] if DriverTools.MSG_TRACE in properties: splitted_properties[DriverTools.MSG_TRACE] = properties[DriverTools.MSG_TRACE] if DriverTools.MSG_REPLY_TO in properties: splitted_properties[DriverTools.MSG_REPLY_TO] = properties[DriverTools.MSG_REPLY_TO] splitted_properties[DriverTools.MSG_SPLIT_MID] = split_mid splitted_properties[DriverTools.MSG_SPLIT_COUNT] = sys.maxsize splitted_properties[DriverTools.MSG_SPLIT_OID] = msg_counter splitted_typed_properties = None for key, value in properties.items(): if key in in_progress_properties_field.keys(): splitted_properties[key] = value tmp_splitted_typed_properties = [] for skey, svalue in splitted_properties.items(): tmp_splitted_typed_properties.append(DriverTools.property_params(skey, svalue)) msg_data = json.dumps({ 'properties': tmp_splitted_typed_properties }) msgb = b''+bytes(msg_data, 'utf8') tmp_splitted_msg_size = sys.getsizeof(msgb) if tmp_splitted_msg_size < self.max_payload: splitted_typed_properties = tmp_splitted_typed_properties in_progress_properties_field.pop(key) else: splitted_properties.pop(key) msg_data = json.dumps({ 'properties': splitted_typed_properties }) msgb = b''+bytes(msg_data, 'utf8') splitted_msg_size = sys.getsizeof(msgb) # then body splitted_body = None if wip_body_len > 0: chunk_size = self.max_payload - splitted_msg_size if chunk_size > (wip_body_len - consumed_body_offset): chunk_size = wip_body_len - consumed_body_offset splitted_body = wip_body[consumed_body_offset:consumed_body_offset+chunk_size] msg_data = json.dumps({ 'properties': splitted_typed_properties, 'body': base64.b64encode(b''+bytes(splitted_body, 'utf8')).decode("utf-8") }) msgb = b''+bytes(msg_data, 'utf8') tmp_splitted_msg_size = sys.getsizeof(msgb) while tmp_splitted_msg_size > self.max_payload: chunk_size -= (tmp_splitted_msg_size - self.max_payload + 1) splitted_body = wip_body[consumed_body_offset:consumed_body_offset+chunk_size] msg_data = json.dumps({ 'properties': splitted_typed_properties, 'body': base64.b64encode(b''+bytes(splitted_body, 'utf8')).decode("utf-8") }) msgb = b''+bytes(msg_data, 'utf8') tmp_splitted_msg_size = sys.getsizeof(msgb) consumed_body_offset += chunk_size # add splitted message into in_progress_messages if splitted_body is not None: in_progress_messages.append({ 'properties': splitted_properties, 'body': base64.b64encode(b''+bytes(splitted_body, 'utf8')).decode("utf-8") }) else: in_progress_messages.append({ 'properties': splitted_properties, 'body': '' }) msg_counter += 1 for message in in_progress_messages: message['properties'][DriverTools.MSG_SPLIT_COUNT] = msg_counter typed_properties = [] for skey, svalue in message['properties'].items(): typed_properties.append(DriverTools.property_params(skey, svalue)) if 'body' in message: msg_data = json.dumps({ 'properties': typed_properties, 'body': message['body'] }) else: msg_data = json.dumps({ 'properties': typed_properties, 'body': '' }) msgb = b''+bytes(msg_data, 'utf8') messages.append(msgb) return messages def _init_split_msg_group(self, split_mid, msg_split_dest): args = {'properties': {DriverTools.OPERATION_FDN: DriverTools.OP_MSG_SPLIT_FEED_INIT, DriverTools.PARAM_MSG_SPLIT_MID: split_mid, DriverTools.PARAM_MSG_SPLIT_FEED_DEST: msg_split_dest}} fire_and_forget_changed = False if self.fire_and_forget: fire_and_forget_changed = True self.fire_and_forget = False previous_corr_id = self.corr_id self.call(my_args=args) self.response = None self.corr_id = previous_corr_id if fire_and_forget_changed: self.fire_and_forget = True def _end_split_msg_group(self, split_mid): args = {'properties': {DriverTools.OPERATION_FDN: DriverTools.OP_MSG_SPLIT_FEED_END, DriverTools.PARAM_MSG_SPLIT_MID: split_mid}} fire_and_forget_changed = False if self.fire_and_forget: fire_and_forget_changed = True self.fire_and_forget = False previous_corr_id = self.corr_id self.call(my_args=args) self.response = None self.corr_id = previous_corr_id if fire_and_forget_changed: self.fire_and_forget = True def call(self, my_args=None): """ setup the request and call the remote service. Wait the answer (blocking call) :param my_args: dict like {properties, body} :return response """ if not self.is_started: raise ArianeError('natsd.Requester.call', 'Requester not started !') LOGGER.debug("natsd.Requester.call") if my_args is None: raise exceptions.ArianeConfError("requestor call arguments") if 'properties' not in my_args or my_args['properties'] is None: raise exceptions.ArianeConfError('requestor call properties') if 'body' not in my_args or my_args['body'] is None: my_args['body'] = '' self.response = None if not self.fire_and_forget: if DriverTools.MSG_CORRELATION_ID not in my_args['properties']: self.corr_id = str(uuid.uuid4()) properties = my_args['properties'] properties[DriverTools.MSG_CORRELATION_ID] = self.corr_id else: properties = my_args['properties'] self.corr_id = properties[DriverTools.MSG_CORRELATION_ID] else: properties = my_args['properties'] if 'sessionID' in properties and properties['sessionID'] is not None and properties['sessionID']: request_q = str(properties['sessionID']) + '-' + self.requestQ else: request_q = self.requestQ if self.trace: properties[DriverTools.MSG_TRACE] = True typed_properties = [] for key, value in properties.items(): typed_properties.append(DriverTools.property_params(key, value)) body = my_args['body'] if body: body = base64.b64encode(b''+bytes(body, 'utf8')).decode("utf-8") msg_data = json.dumps({ 'properties': typed_properties, 'body': body }) msgb = b''+bytes(msg_data, 'utf8') split_mid = None messages = [] if sys.getsizeof(msgb) > self.max_payload: split_mid = str(uuid.uuid4()) messages = self._split_msg(split_mid, properties, my_args['body']) else: messages.append(msgb) if not self.fire_and_forget: if split_mid is not None and ('sessionID' not in properties or properties['sessionID'] is None or not properties['sessionID']): request_q += "_" + split_mid self._init_split_msg_group(split_mid, request_q) for msgb in messages: try: LOGGER.debug("natsd.Requester.call - publish splitted request " + str(typed_properties) + " (size: " + str(sys.getsizeof(msgb)) + " bytes) on " + request_q) next(self.nc.publish_request(request_q, self.responseQ, msgb)) except StopIteration as e: pass LOGGER.debug("natsd.Requester.call - waiting answer from " + self.responseQ) else: try: LOGGER.debug("natsd.Requester.call - publish request " + str(typed_properties) + " on " + request_q) next(self.nc.publish(request_q, b''+bytes(msg_data, 'utf8'))) except StopIteration as e: pass try: next(self.nc.flush(1)) except StopIteration as e: pass start_time = timeit.default_timer() if not self.fire_and_forget: # Wait rpc_timeout sec before raising error if self.rpc_timeout > 0: exit_count = self.rpc_timeout * 100 else: exit_count = 1 while self.response is None and exit_count > 0: time.sleep(0.01) if self.rpc_timeout > 0: exit_count -= 1 if self.response is None: if self.rpc_retry > 0: if 'retry_count' not in my_args: my_args['retry_count'] = 1 LOGGER.debug("natsd.Requester.call - Retry (" + str(my_args['retry_count']) + ")") return self.call(my_args) elif 'retry_count' in my_args and (self.rpc_retry - my_args['retry_count']) > 0: LOGGER.warn("natsd.Requester.call - No response returned from request on " + request_q + " queue after " + str(self.rpc_timeout) + '*' + str(self.rpc_retry) + " sec ...") self.trace = True my_args['retry_count'] += 1 LOGGER.warn("natsd.Requester.call - Retry (" + str(my_args['retry_count']) + ")") return self.call(my_args) else: self.rpc_retry_timeout_err_count += 1 if self.rpc_retry_timeout_err_count >= self.rpc_retry_timeout_err_count_max: self._restart_after_max_timeout_err_count() raise ArianeMessagingTimeoutError('natsd.Requester.call', 'Request timeout (' + str(self.rpc_timeout) + '*' + str(self.rpc_retry) + ' sec) occured') else: self.rpc_retry_timeout_err_count += 1 if self.rpc_retry_timeout_err_count >= self.rpc_retry_timeout_err_count_max: self._restart_after_max_timeout_err_count() raise ArianeMessagingTimeoutError('natsd.Requester.call', 'Request timeout (' + str(self.rpc_timeout) + '*' + str(self.rpc_retry) + ' sec) occured') rpc_time = timeit.default_timer()-start_time LOGGER.debug('natsd.Requester.call - RPC time : ' + str(rpc_time)) if self.rpc_timeout > 0 and rpc_time > self.rpc_timeout*3/5: LOGGER.debug('natsd.Requester.call - slow RPC time (' + str(rpc_time) + ') on request ' + str(typed_properties)) self.trace = False self.rpc_retry_timeout_err_count = 0 rc_ = int(self.response['properties']['RC']) if rc_ != 0: try: content = json.loads(self.response['body'].decode("UTF-8")) except ValueError: content = self.response['body'].decode("UTF-8") dr = DriverResponse( rc=rc_, error_message=self.response['properties']['SERVER_ERROR_MESSAGE'] if 'SERVER_ERROR_MESSAGE' in self.response['properties'] else '', response_content=content ) else: try: if DriverTools.MSG_PROPERTIES in self.response['properties']: props = json.loads(self.response['properties'][DriverTools.MSG_PROPERTIES]) else: props = None except ValueError: if DriverTools.MSG_PROPERTIES in self.response['properties']: props = self.response['props'][DriverTools.MSG_PROPERTIES] else: props = None try: content = json.loads(self.response['body'].decode("UTF-8")) except ValueError: content = self.response['body'].decode("UTF-8") dr = DriverResponse( rc=rc_, response_properties=props, response_content=content ) if split_mid is not None and ('sessionID' not in properties or properties['sessionID'] is None or not properties['sessionID']): self._end_split_msg_group(split_mid) request_q = request_q.split("_" + split_mid)[0] return dr
def test_custom_flush_queue_reconnect(self): nc = NATS() disconnected_count = 0 reconnected_count = 0 closed_count = 0 err_count = 0 @asyncio.coroutine def disconnected_cb(): nonlocal disconnected_count disconnected_count += 1 @asyncio.coroutine def reconnected_cb(): nonlocal reconnected_count reconnected_count += 1 @asyncio.coroutine def closed_cb(): nonlocal closed_count closed_count += 1 options = { 'servers': [ "nats://*****:*****@127.0.0.1:4223", "nats://*****:*****@127.0.0.1:4224" ], 'dont_randomize': True, 'io_loop': self.loop, 'disconnected_cb': disconnected_cb, 'closed_cb': closed_cb, 'reconnected_cb': reconnected_cb, 'flusher_queue_size': 100, 'reconnect_time_wait': 0.01 } yield from nc.connect(**options) largest_pending_data_size = 0 post_flush_pending_data = None done_once = False @asyncio.coroutine def cb(msg): pass yield from nc.subscribe("example.*", cb=cb) for i in range(0, 500): yield from nc.publish("example.{}".format(i), b'A' * 20) if nc.pending_data_size > 0: largest_pending_data_size = nc.pending_data_size if nc.pending_data_size > 100: # Stop the first server and connect to another one asap. if not done_once: yield from nc.flush(2) post_flush_pending_data = nc.pending_data_size yield from self.loop.run_in_executor(None, self.server_pool[0].stop) done_once = True self.assertTrue(largest_pending_data_size > 0) self.assertTrue(post_flush_pending_data == 0) # Confirm we have reconnected eventually for i in range(0, 10): yield from asyncio.sleep(0, loop=self.loop) yield from asyncio.sleep(0.2, loop=self.loop) yield from asyncio.sleep(0, loop=self.loop) self.assertEqual(1, nc.stats['reconnects']) try: yield from nc.flush(2) except ErrTimeout: # If disconnect occurs during this flush, then we will have a timeout here pass finally: yield from nc.close() self.assertTrue(disconnected_count >= 1) self.assertTrue(closed_count >= 1)
def run(loop): nc = NATS() # Setup pool of servers from a NATS cluster. options = { "servers": [ "nats://*****:*****@127.0.0.1:4222", "nats://*****:*****@127.0.0.1:4223", "nats://*****:*****@127.0.0.1:4224", ], "io_loop": loop, } # Will try to connect to servers in order of configuration, # by defaults it connect to one in the pool randomly. options["dont_randomize"] = True # Optionally set reconnect wait and max reconnect attempts. # This example means 10 seconds total per backend. options["max_reconnect_attempts"] = 5 options["reconnect_time_wait"] = 2 @asyncio.coroutine def disconnected_cb(): print("Got disconnected!") @asyncio.coroutine def reconnected_cb(): # See who we are connected to on reconnect. print("Got reconnected to {url}".format(url=nc.connected_url.netloc)) # Setup callbacks to be notified on disconnects and reconnects options["disconnected_cb"] = disconnected_cb options["reconnected_cb"] = reconnected_cb @asyncio.coroutine def error_cb(e): print("There was an error: {}".format(e)) @asyncio.coroutine def closed_cb(): print("Connection is closed") # Setup callbacks to be notified when there is an error # or connection is closed. options["error_cb"] = error_cb options["closed_cb"] = closed_cb try: yield from nc.connect(**options) except ErrNoServers as e: # Could not connect to any server in the cluster. print(e) return if nc.is_connected: yield from nc.subscribe("help.*") max_messages = 1000000 start_time = datetime.now() print("Sending {} messages to NATS...".format(max_messages)) for i in range(0, max_messages): try: yield from nc.publish("help.{}".format(i), b'A') yield from nc.flush(0.500) except ErrConnectionClosed as e: print("Connection closed prematurely.") break except ErrTimeout as e: print("Timeout occured when publishing msg i={}: {}".format(i, e)) end_time = datetime.now() yield from nc.close() duration = end_time - start_time print("Duration: {}".format(duration)) try: yield from nc.publish("help", b"hello world") except ErrConnectionClosed: print("Can't publish since no longer connected.") err = nc.last_error if err is not None: print("Last Error: {}".format(err))