def publisher(): parser = argparse.ArgumentParser() # e.g. nats-pub hello -d "world" -s nats://127.0.0.1:4222 -s nats://127.0.0.1:4223 parser.add_argument("subject", default="hello", nargs="?") parser.add_argument("-m", "--maxmsg", default="10000") args = parser.parse_args() time.sleep(30) # Wait for all subscribe calls to complete before publishing print 'User', args.subject, "starting to publish" nc = NATS() try: servers = ["nats://146.148.76.9:4222"] #servers = ["nats://127.0.0.1:4222"] opts = { "servers": servers } yield nc.connect(**opts) for i in range(int(args.maxmsg)): message_data = args.subject + " " + get_time() + " " + str(i + 1) yield nc.publish(args.subject, message_data) yield nc.flush() except Exception, e: print(e) show_usage_and_die()
def test_close_connection(self): nc = Client() options = { "dont_randomize": True, "servers": [ "nats://*****:*****@127.0.0.1:4223", "nats://*****:*****@127.0.0.1:4224" ], "io_loop": self.io_loop } yield nc.connect(**options) self.assertEqual(True, nc._server_info["auth_required"]) log = Log() sid_1 = yield nc.subscribe("foo", "", log.persist) self.assertEqual(sid_1, 1) sid_2 = yield nc.subscribe("bar", "", log.persist) self.assertEqual(sid_2, 2) sid_3 = yield nc.subscribe("quux", "", log.persist) self.assertEqual(sid_3, 3) yield nc.publish("foo", "hello") yield tornado.gen.sleep(1.0) # Done yield nc.close() orig_gnatsd = self.server_pool.pop(0) orig_gnatsd.finish() try: a = nc._current_server # Wait and assert that we don't reconnect. yield tornado.gen.sleep(3) finally: b = nc._current_server self.assertEqual(a.uri, b.uri) self.assertFalse(nc.is_connected()) self.assertFalse(nc.is_reconnecting()) self.assertTrue(nc.is_closed()) with(self.assertRaises(ErrConnectionClosed)): yield nc.publish("hello", "world") with(self.assertRaises(ErrConnectionClosed)): yield nc.flush() with(self.assertRaises(ErrConnectionClosed)): yield nc.subscribe("hello", "worker") with(self.assertRaises(ErrConnectionClosed)): yield nc.publish_request("hello", "inbox", "world") with(self.assertRaises(ErrConnectionClosed)): yield nc.request("hello", "world") with(self.assertRaises(ErrConnectionClosed)): yield nc.timed_request("hello", "world")
def go(loop): nc = NATS() try: yield from nc.connect(io_loop=loop) except: pass def message_handler(msg): print("[Received on '{}']: {}".format(msg.subject, msg.data.decode())) try: # Interested in receiving 2 messages from the 'discover' subject. sid = yield from nc.subscribe("discover", "", message_handler) yield from nc.auto_unsubscribe(sid, 2) yield from nc.publish("discover", b'hello') yield from nc.publish("discover", b'world') # Following 2 messages won't be received. yield from nc.publish("discover", b'again') yield from nc.publish("discover", b'!!!!!') except ErrConnectionClosed: print("Connection closed prematurely") def request_handler(msg): print("[Request on '{} {}']: {}".format(msg.subject, msg.reply, msg.data.decode())) if nc.is_connected: # Subscription using a 'workers' queue so that only a single subscriber # gets a request at a time. yield from nc.subscribe("help", "workers", request_handler) try: # Make a request expecting a single response within 500 ms, # otherwise raising a timeout error. response = yield from nc.timed_request("help", b'help please', 0.500) print("[Response]: {}".format(msg.data)) # Make a roundtrip to the server to ensure messages # that sent messages have been processed already. yield from nc.flush(0.500) except ErrTimeout: print("[Error] Timeout!") # Wait a bit for message to be dispatched... yield from asyncio.sleep(1, loop=loop) # Detach from the server. yield from nc.close() if nc.last_error is not None: print("Last Error: {}".format(nc.last_error)) if nc.is_closed: print("Disconnected.")
class MyNATSClass: def __init__(self, t_name, queue, mqtt): self._queue = queue logger.debug("I am NATS") try: tmp = os.environ["NATSSERVERURI"] logger.debug("nats server uri = %s", tmp) self._servers_uri = [] self._servers_uri.append(tmp) except: logger.debug("Can not get enviro NATSSERVERURI") sys.exit(0) self._nats = NATS() self._mqtt = mqtt @tornado.gen.coroutine def _nats_main(self): opts = {"servers": self._servers_uri} yield self._nats.connect(**opts) logger.debug("subscribe ericssion.iot.nats.c3shadow") yield self._nats.subscribe("ericssion.iot.nats.c3shadow", "", self._subscribe) while gEnd == False: sent = self._nats.stats["out_msgs"] received = self._nats.stats["in_msgs"] yield tornado.gen.sleep(2) try: tmp = self._queue.get(False) except Queue.Empty: # logger.debug("send msg = %d, in_msgs = %d, queue is empty", sent, received) continue yield self._nats.publish("ericssion.iot.nats.c3engine", tmp) yield self._nats.flush() logger.debug("nats publish message = %s", tmp) logger.debug("I died") def _subscribe(self, msg): # logger.debug("get msg subject = %s, data = %s", msg.subject, str(msg.data)) try: tmp = str(msg.data).replace("'", '"') mydict = json.loads(tmp) except: logger.debug("message format is not correct, subject = %s, data = %s", msg.subject, str(msg.data)) return print "NATS:: received %s" % (str(mydict)) if mydict.has_key("data"): print "hello********************" # need MQTT publish self._mqtt.mqtt_publish(mydict) else: self._mqtt.mqtt_subscribe(mydict) def run(self): tornado.ioloop.IOLoop.instance().run_sync(self._nats_main)
def main(): nc = NATS() # Establish connection to the server. options = { "verbose": True, "servers": ["nats://127.0.0.1:4222"] } yield nc.connect(**options) def discover(msg=None): print("[Received]: %s" % msg.data) sid = yield nc.subscribe("discover", "", discover) # Only interested in 2 messages. yield nc.auto_unsubscribe(sid, 2) yield nc.publish("discover", "A") yield nc.publish("discover", "B") # Following two messages won't be received. yield nc.publish("discover", "C") yield nc.publish("discover", "D") # Request/Response def help_request_handler(msg): print("[Received]: %s" % msg.data) nc.publish(msg.reply.decode(), "OK, I can help!") # Susbcription using distributed queue yield nc.subscribe("help", "workers", help_request_handler) try: # Expect a single request and timeout after 500 ms response = yield nc.timed_request("help", "Hi, need help!", 500) print("[Response]: %s" % response.data) except tornado.gen.TimeoutError as e: print("Timeout! Need to retry...") # Customize number of responses to receive def many_responses(msg=None): print("[Response]: %s" % msg.data) yield nc.request("help", "please", expected=2, cb=many_responses) # Publish inbox my_inbox = new_inbox() yield nc.subscribe(my_inbox) yield nc.publish_request("help", my_inbox, "I can help too!") loop = tornado.ioloop.IOLoop.instance() yield tornado.gen.Task(loop.add_timeout, time.time() + 1) try: start = datetime.now() # Make roundtrip to the server and timeout after 1000 ms yield nc.flush(1000) end = datetime.now() print("Latency: %d µs" % (end.microsecond - start.microsecond)) except tornado.gen.TimeoutError as e: print("Timeout! Roundtrip too slow...")
def test_flush_timeout(self): class Parser(): def __init__(self, nc, t): self.nc = nc self.t = t def read(self, data=''): self.t.assertEqual(1, self.nc._pings_outstanding) self.t.assertEqual(1, len(self.nc._pongs)) self.nc._process_pong() self.t.assertEqual(0, len(self.nc._pongs)) nc = Client() nc._ps = Parser(nc, self) yield nc.connect(io_loop=self.io_loop) with self.assertRaises(tornado.gen.TimeoutError): yield nc.flush(1) self.assertEqual(1, len(nc._pongs)) nc.flush(1) self.assertEqual(2, nc._pings_outstanding) self.assertEqual(2, len(nc._pongs))
def main(user): nc = Client() try: server = Configuration('global').get('message_queue') opts = {"servers": ['nats://{}:{}'.format(server['host'], server['port'])]} print('Connecting to {}'.format(server)) yield nc.connect(**opts) data = make_message(user) yield nc.publish('userAction', data) yield nc.flush() print("Published to '{0}'".format(data)) except Exception as exc: print(exc) raise exc
def main(): parser = argparse.ArgumentParser() # e.g. nats-pub hello -d "world" -s nats://127.0.0.1:4222 -s nats://127.0.0.1:4223 parser.add_argument('subject', default='hello', nargs='?') parser.add_argument('-d', '--data', default="hello world") parser.add_argument('-s', '--servers', default=[], action='append') args = parser.parse_args() nc = NATS() try: servers = args.servers if len(args.servers) < 1: servers = ["nats://127.0.0.1:4222"] opts = {"servers": servers} yield nc.connect(**opts) yield nc.publish(args.subject, args.data) yield nc.flush() print("Published to '{0}'".format(args.subject)) except Exception, e: print(e) show_usage_and_die()
def _pub(self, testcase_name, subject, server, port=4222, measurement="alyvix", max_reconnect_attempts=5, reconnect_time_wait=2): #self._perf_manager.order_perfdata() #last_filled_perf = self._perf_manager.get_last_filled() perfdata_list = self._perf_manager.get_all_perfdata() keywords_timestamp_array = self._info_manager.get_info( 'KEYWORD TIMESTAMP') keywords_timeout_array = self._info_manager.get_info('KEYWORD TIMEOUT') current_timestamp = str(int(time.time() * 1000 * 1000 * 1000)) message_lines = [] testcase_name = testcase_name.replace(" ", "_") nc = NATS() options = {"servers": ["nats://" + str(server) + ":" + str(port)]} exception_occurred = False #if we cannot contact nats server then we have to save messages to cache file exception_file_name = "data.txt" system_drive = os.environ['systemdrive'] alyvix_programdata_path = system_drive + os.sep + "ProgramData\\Alyvix\\exception\\nats"\ + os.sep + os.environ['username'] + os.sep + testcase_name exception_file_full_name = alyvix_programdata_path + os.sep + exception_file_name try: yield nc.connect(**options) except: exception_occurred = True file_to_read = None biggest_cnt = 0 #read previous messages from cache. last txt file contains all messages if os.path.exists(alyvix_programdata_path): for file in os.listdir(alyvix_programdata_path): if file.endswith(".txt"): if file == "data.txt": if 0 > biggest_cnt: biggest_cnt = 0 else: cnt_str = file.replace("data_", "").replace(".txt", "") cnt_int = int(cnt_str) if cnt_int > biggest_cnt: biggest_cnt = cnt_int if biggest_cnt == 0: file_to_read = os.path.join(alyvix_programdata_path, "data.txt") else: file_to_read = os.path.join( alyvix_programdata_path, "data_" + str(biggest_cnt) + ".txt") #read cached messages if os.path.exists(file_to_read): with open(file_to_read) as f: message_lines.extend(f.readlines()) f.close() #delete all cache files for file in os.listdir(alyvix_programdata_path): if file.endswith(".txt"): try: os.remove(os.path.join(alyvix_programdata_path, file)) except: pass tmp_message_lines = [] for message in message_lines: message = message.replace("\r\n", "") message = message.replace("\r", "") message = message.replace("\n", "") if message == "": pass try: #try to publish cached messages yield nc.publish(subject, message) except: tmp_message_lines.append(message) exception_occurred = True message_lines = tmp_message_lines cumsum_value = 0 #publish current performance data perf_with_timestamp = [] perf_without_timestamp = [] for perfdata in perfdata_list: #check if current perf has a timestamp for cnt_kts in xrange(len(keywords_timestamp_array)): if keywords_timestamp_array[cnt_kts][0] == perfdata.name: perfdata.timestamp = keywords_timestamp_array[cnt_kts][1] perf_with_timestamp.append(perfdata) break if perfdata.timestamp == None: perf_without_timestamp.append(perfdata) perf_with_timestamp = sorted(perf_with_timestamp, key=lambda x: x.timestamp, reverse=False) perf_with_timestamp.extend(perf_without_timestamp) perfdata_list = perf_with_timestamp for perfdata in perfdata_list: #check if current perf has a timeout for cnt_ktout in xrange(len(keywords_timeout_array)): if keywords_timeout_array[cnt_ktout][0] == perfdata.name: perfdata.timeout_threshold = keywords_timeout_array[ cnt_ktout][1] break timed_out = False not_executed = False msg_extra = "" if perfdata.extra != None and perfdata.extra != "": msg_extra = ",extra=" + perfdata.extra msg_warning = "" if perfdata.warning_threshold != None and perfdata.warning_threshold != "": msg_warning = ",warning_threshold=" + str( int(perfdata.warning_threshold * 1000)) msg_critical = "" if perfdata.critical_threshold != None and perfdata.critical_threshold != "": msg_critical = ",critical_threshold=" + str( int(perfdata.critical_threshold * 1000)) msg_timeout = "" if perfdata.timeout_threshold != None and perfdata.timeout_threshold != "": msg_timeout = ",timeout_threshold=" + str( int(perfdata.timeout_threshold * 1000)) try: perf_timestamp = str(int(perfdata.timestamp * 1000 * 1000)) except: perf_timestamp = current_timestamp not_executed = True msg_perf = "" if perfdata.value != "" and perfdata.value is not None: msg_perf = ",performance=" + str(int(perfdata.value * 1000)) elif not_executed is False: #msg_perf = ",performance=" + str(int(perfdata.timeout_threshold * 1000)) timed_out = True msg_cumsum = "" msg_cumsumpre = ",cumulative=" + str(cumsum_value) if perfdata.value != "" and perfdata.value is not None: value = int(cumsum_value + (perfdata.value * 1000)) msg_cumsum = ",cumulative=" + str(value) cumsum_value = value elif not_executed is False: #timedout value = int(cumsum_value + (perfdata.timeout_threshold * 1000)) msg_cumsum = ",cumulative=" + str(value) cumsum_value = value else: #not_executed msg_cumsum = ",cumulative=" + str(cumsum_value) perfdata_state = "ok" if timed_out is True: perfdata_state = "timedout" elif not_executed is True: perfdata_state = "not_executed" elif perfdata.state == 1: perfdata_state = "warning" elif perfdata.state == 2: perfdata_state = "critical" elif perfdata.state == 3: perfdata_state = "unknown" msg_errorlevel = ",error_level=0" if perfdata.value == "" or perfdata.value is None: msg_errorlevel = ",error_level=3" elif perfdata.state == 1: msg_errorlevel = ",error_level=1" elif perfdata.state == 2: msg_errorlevel = ",error_level=2" elif perfdata.state == 3: msg_errorlevel = ",error_level=3" point_pre_msg = "" point_start_msg = "" if not_executed is False: point_pre_msg = ",point=pre" point_start_msg = ",point=start" msg_custom_tags = "" for tag in perfdata.custom_tags.keys(): msg_custom_tags += "," + tag + "=" + perfdata.custom_tags[tag] msg_custom_fields = "" for field in perfdata.custom_fields.keys(): is_string = False field_value = perfdata.custom_fields[field] try: int(field_value) except: field_value = "\"" + field_value + "\"" msg_custom_fields += "," + field + "=" + field_value user_msg = ",username="******",host=" + str(gethostname()) unique_tag_msg = "" try: unique_tag_msg = str(gethostname())[0] unique_tag_msg = unique_tag_msg + str(gethostname())[1] unique_tag_msg = unique_tag_msg + str(gethostname())[-2] unique_tag_msg = unique_tag_msg + str(gethostname())[-1] except: pass try: unique_tag_msg = unique_tag_msg + os.environ['username'][0] unique_tag_msg = unique_tag_msg + os.environ['username'][1] unique_tag_msg = unique_tag_msg + os.environ['username'][-2] unique_tag_msg = unique_tag_msg + os.environ['username'][-1] except: pass if self._info_manager.get_info('SUITE NAME') is not None: try: unique_tag_msg = unique_tag_msg + self._info_manager.get_info( 'SUITE NAME')[0] unique_tag_msg = unique_tag_msg + self._info_manager.get_info( 'SUITE NAME')[1] unique_tag_msg = unique_tag_msg + self._info_manager.get_info( 'SUITE NAME')[-2] unique_tag_msg = unique_tag_msg + self._info_manager.get_info( 'SUITE NAME')[-1] except: pass unique_tag_msg = ",run_code=\"" + unique_tag_msg + str( self._info_manager.get_info('START TIME')) + "\"" #unique_tag_msg = "" """ if point_pre_msg != "": message = str(measurement) + user_msg + host_msg + ",test_name=" + str(testcase_name) \ + ",transaction_name=" + str(perfdata.name).replace(" ", "_") + ",state=" + perfdata_state + \ msg_extra + msg_custom_tags + point_pre_msg + unique_tag_msg +" " + msg_warning + msg_critical + \ msg_timeout + msg_perf + msg_cumsumpre + msg_errorlevel + " " + perf_timestamp message = message.replace(" ,", " ") try: yield nc.publish(subject, message) except: # store to cache list if we cannot publish messages message_lines.append(message) exception_occurred = True """ message= str(measurement) + user_msg + host_msg + ",test_name=" +str(testcase_name)\ + ",transaction_name=" + str(perfdata.name).replace(" ", "_") + ",state=" + perfdata_state +\ msg_extra + msg_custom_tags + " " + msg_warning + msg_critical +\ msg_timeout + msg_perf + msg_cumsum + msg_errorlevel + msg_custom_fields + unique_tag_msg + " " + perf_timestamp message = message.replace(" ,", " ") try: yield nc.publish(subject, message) except: #store to cache list if we cannot publish messages message_lines.append(message) exception_occurred = True if not_executed is False: #alyvix saves timestamp in millisconds, so first of all we have to restore it in seconds interval, then #we have to add perfdata value if timed_out is True: end_timestamp_in_seconds = ( float(perfdata.timestamp) / 1000) + perfdata.timeout_threshold else: if perfdata.end_timestamp_only_for_summed_perf is None: end_timestamp_in_seconds = (float(perfdata.timestamp) / 1000) + perfdata.value else: end_timestamp_in_seconds = (float( perfdata.end_timestamp_only_for_summed_perf) / 1000) #convert timestamp in seconds to timestamp in nanoseconds end_timestamp_in_nanoseconds = int(end_timestamp_in_seconds * 1000 * 1000 * 1000) """ message = str(measurement) + user_msg + host_msg + ",test_name=" + str(testcase_name) \ + ",transaction_name=" + str(perfdata.name).replace(" ", "_") + ",state=" + perfdata_state + \ msg_extra + msg_custom_tags + ",point=end" + unique_tag_msg + " " + msg_warning + msg_critical + msg_timeout + \ msg_perf + msg_cumsum + msg_errorlevel + " " + str(end_timestamp_in_nanoseconds) message = message.replace(" ,", " ") try: yield nc.publish(subject, message) except: # store to cache list if we cannot publish messages message_lines.append(message) exception_occurred = True """ try: yield nc.flush() except: exception_occurred = True #store cache list to cache file if exception_occurred is True: if not os.path.exists(alyvix_programdata_path): os.makedirs(alyvix_programdata_path) try: with open(exception_file_full_name, 'w') as f: for item in message_lines: f.write("%s\r\n" % item) f.close() except: filename = exception_file_full_name cnt = 0 while True: if not os.path.exists(filename): with open(filename, 'w') as f: for item in message_lines: f.write("%s\r\n" % item) f.close() break cnt += 1 if (cnt - 1) == 0: filename = filename.replace(".txt", "_" + str(cnt) + ".txt") else: filename = filename.replace( str(cnt - 1) + ".txt", str(cnt) + ".txt")