def main(): # Declare the protocol stack used for serialization. # Protocol stacks must match between clients and servers. prot_factory = FProtocolFactory(TBinaryProtocol.TBinaryProtocolFactory()) # Open a NATS connection to send requests nats_client = NATS() options = {"verbose": True, "servers": ["nats://127.0.0.1:4222"]} yield nats_client.connect(**options) # Create a nats transport using the connected client # The transport sends data on the music-service NATS topic nats_transport = FNatsTransport(nats_client, "music-service") try: yield nats_transport.open() except TTransportException as ex: root.error(ex) raise gen.Return() # Using the configured transport and protocol, create a client # to talk to the music store service. store_client = FStoreClient(FServiceProvider(nats_transport, prot_factory), middleware=logging_middleware) album = yield store_client.buyAlbum(FContext(), str(uuid.uuid4()), "ACT-12345") root.info("Bought an album %s\n", album) yield store_client.enterAlbumGiveaway(FContext(), "*****@*****.**", "Kevin") yield nats_transport.close() yield nats_client.close()
def main(): parser = argparse.ArgumentParser() parser.add_argument('-n', '--count', default=DEFAULT_NUM_MSGS, type=int) parser.add_argument('-s', '--size', default=DEFAULT_MSG_SIZE, type=int) parser.add_argument('-S', '--subject', default='test') parser.add_argument('-b', '--batch', default=DEFAULT_BATCH_SIZE, type=int) parser.add_argument('--servers', default=[], action='append') args = parser.parse_args() data = [] for i in range(0, args.size): data.append("W") payload = b''.join(data) servers = args.servers if len(args.servers) < 1: servers = ["nats://127.0.0.1:4222"] opts = { "servers": servers, "disconnected_cb": disconnected_cb, "close_cb": close_cb, "reconnected_cb": reconnected_cb, "allow_reconnect": False, } # Make sure we're connected to a server first... nc = NATS() try: yield nc.connect(**opts) except Exception, e: sys.stderr.write("ERROR: {0}".format(e)) show_usage_and_die()
def main(): nc = NATS() options = { "servers": ["nats://192.168.0.114:4222"], "user": "******", "password": "******", "tcp_nodelay": True } # Establish connection to the server. yield nc.connect(**options) @tornado.gen.coroutine def message_handler(msg): subject = msg.subject data = msg.data cmd = msgpack.unpackb(data) print("[Received on '{}'] : {}".format(subject, cmd)) print(cmd["gear"]) # Simple async subscriber sid = yield nc.subscribe("cmd", cb=message_handler) yield nc.publish("cmd", "test")
def main(): # Parse the command line arguments parser = argparse.ArgumentParser() # e.g. nats-sub hello -s nats://127.0.0.1:4222 parser.add_argument('subject', default='hello', nargs='?') parser.add_argument('-s', '--servers', default=[], action='append') parser.add_argument('-q', '--queue', default="") # Parse! args = parser.parse_args() # Create client and connect to server nc = NATS() servers = args.servers if len(args.servers) < 1: servers = ["nats://127.0.0.1:4222"] opts = { "servers": servers } yield nc.connect(**opts) def handler(msg): print("[Received: {0}] {1}".format(msg.subject, msg.data)) print("Subscribed to '{0}'".format(args.subject)) future = nc.subscribe(args.subject, args.queue, handler) sid = future.result()
def main(): nc = NATS() # Establish connection to the server. yield nc.connect(servers=["nats://127.0.0.1:4222"]) def discover(msg=None): print("[Received]: %s" % msg.data) sid = yield nc.subscribe("discover", "", discover) # Only interested in 2 messages. yield nc.auto_unsubscribe(sid, 2) yield nc.publish("discover", "A") yield nc.publish("discover", "B") # Following two messages won't be received. yield nc.publish("discover", "C") yield nc.publish("discover", "D") # Request/Response def help_request_handler(msg): print("[Received]: %s" % msg.data) nc.publish(msg.reply, "OK, I can help!") # Susbcription using distributed queue yield nc.subscribe("help", "workers", help_request_handler) try: # Expect a single request and timeout after 500 ms response = yield nc.request("help", "Hi, need help!", timeout=0.500) print("[Response]: %s" % response.data) except tornado.gen.TimeoutError, e: print("Timeout! Need to retry...")
def main(): # Declare the protocol stack used for serialization. # Protocol stacks must match between publishers and subscribers. prot_factory = FProtocolFactory(TBinaryProtocol.TBinaryProtocolFactory()) # Open a NATS connection to receive requests nats_client = NATS() options = {"verbose": True, "servers": ["nats://127.0.0.1:4222"]} yield nats_client.connect(**options) # Create a pub sub scope using the configured transport and protocol transport_factory = FNatsSubscriberTransportFactory(nats_client) provider = FScopeProvider(None, transport_factory, prot_factory) subscriber = AlbumWinnersSubscriber(provider) def event_handler(ctx, req): root.info("You won! {}".format(req)) def start_contest_handler(ctx, albums): root.info("Contest started, available albums: {}".format(albums)) yield subscriber.subscribe_Winner(event_handler) yield subscriber.subscribe_ContestStart(start_contest_handler) root.info("Subscriber starting...")
def main(): parser = argparse.ArgumentParser(description="Run a python tornado client") parser.add_argument('--port', dest='port', default='9090') parser.add_argument('--protocol', dest='protocol_type', default="binary", choices="binary, compact, json") parser.add_argument('--transport', dest='transport_type', default=NATS_NAME, choices="nats, http") args = parser.parse_args() protocol_factory = get_protocol_factory(args.protocol_type) nats_client = NATS() logging.debug("Connecting to NATS") yield nats_client.connect(**get_nats_options()) transport = None if args.transport_type == NATS_NAME: transport = FNatsTransport(nats_client, "frugal.foo.bar.rpc.{}".format(args.port)) elif args.transport_type == HTTP_NAME: # Set request and response capacity to 1mb max_size = 1048576 transport = FHttpTransport("http://localhost:" + str(args.port), request_capacity=max_size, response_capacity=max_size) else: print("Unknown transport type: {}".format(args.transport_type)) sys.exit(1) try: yield transport.open() except TTransportException as ex: logging.error(ex) raise gen.Return() client = FrugalTestClient(FServiceProvider(transport, protocol_factory), client_middleware) ctx = FContext("test") yield test_rpc(client, ctx, args.transport_type) if transport == NATS_NAME: yield test_pub_sub(nats_client, protocol_factory, args.port) global middleware_called if not middleware_called: print("Client middleware never invoked") exit(1) # Cleanup after tests yield nats_client.close()
def main(): nc = NATS() # Set pool servers in the cluster and give a name to the client # each with its own auth credentials. options = { "servers": [ "nats://*****:*****@127.0.0.1:4222", "nats://*****:*****@127.0.0.1:4223", "nats://*****:*****@127.0.0.1:4224" ] } # Error callback takes the error type as param. def error_cb(e): print("Error! ", e) def close_cb(): print("Connection was closed!") def disconnected_cb(): print("Disconnected!") def reconnected_cb(): print("Reconnected!") # Set callback to be dispatched whenever we get # protocol error message from the server. options["error_cb"] = error_cb # Called when we are not connected anymore to the NATS cluster. options["closed_cb"] = close_cb # Called whenever we become disconnected from a NATS server. options["disconnected_cb"] = disconnected_cb # Called when we connect to a node in the NATS cluster again. options["reconnected_cb"] = reconnected_cb yield nc.connect(**options) @tornado.gen.coroutine def subscriber(msg): yield nc.publish("pong", "pong:{0}".format(msg.data)) yield nc.subscribe("ping", "", subscriber) for i in range(0, 100): yield nc.publish("ping", "ping:{0}".format(i)) yield tornado.gen.sleep(0.1) yield nc.close() try: yield nc.publish("ping", "ping") except ErrConnectionClosed: print("No longer connected to NATS cluster.")
def main(): nc = NATS() # Establish secure connection to the server, tls options parameterize # the wrap_socket available from ssl python package. options = { "verbose": True, "servers": ["nats://127.0.0.1:4444"], "tls": { "cert_reqs": ssl.CERT_REQUIRED, "ca_certs": "./tests/configs/certs/ca.pem", "keyfile": "./tests/configs/certs/client-key.pem", "certfile": "./tests/configs/certs/client-cert.pem" } } yield nc.connect(**options) def discover(msg=None): print("[Received]: %s" % msg.data) sid = yield nc.subscribe("discover", "", discover) # Only interested in 2 messages. yield nc.auto_unsubscribe(sid, 2) yield nc.publish("discover", "A") yield nc.publish("discover", "B") # Following two messages won't be received. yield nc.publish("discover", "C") yield nc.publish("discover", "D") # Request/Response def help_request_handler(msg): print("[Received]: %s" % msg.data) nc.publish(msg.reply, "OK, I can help!") # Susbcription using distributed queue yield nc.subscribe("help", "workers", help_request_handler) try: # Expect a single request and timeout after 500 ms response = yield nc.timed_request("help", "Hi, need help!", timeout=0.500) print("[Response]: %s" % response.data) except tornado.gen.TimeoutError, e: print("Timeout! Need to retry...")
def main(): nc = NATS() yield nc.connect("nats://127.0.0.1:4222") exitCondition = None @tornado.gen.coroutine def help_request_handler(msg): subject = msg.subject data = msg.data print("[Received on '{}'] : {}".format(subject, data.decode())) yield nc.publish(msg.reply, b'OK, I will help!') try: yield nc.subscribe('agent_aws_1', "workers", help_request_handler) except Exception as e: print("!!! Exception : {}".format(e)) exitCondition = True while not exitCondition: yield tornado.gen.sleep(60)
def main(): nc = NATS() # Establish connection to the server. yield nc.connect("nats://demo.nats.io:4222") @tornado.gen.coroutine def message_handler(msg): subject = msg.subject data = msg.data print("[Received on '{}'] : {}".format(subject, data.decode())) # Simple async subscriber sid = yield nc.subscribe("foo", cb=message_handler) # Stop receiving after 2 messages. yield nc.auto_unsubscribe(sid, 2) yield nc.publish("foo", b'Hello') yield nc.publish("foo", b'World') yield nc.publish("foo", b'!!!!!') # Request/Response @tornado.gen.coroutine def help_request_handler(msg): print("[Received on '{}']: {}".format(msg.subject, msg.data)) yield nc.publish(msg.reply, "OK, I can help!") # Susbcription using distributed queue named 'workers' sid = yield nc.subscribe("help", "workers", help_request_handler) try: # Send a request and expect a single response # and trigger timeout if not faster than 200 ms. msg = yield nc.request("help", b"Hi, need help!", timeout=0.2) print("[Response]: %s" % msg.data) except tornado.gen.TimeoutError: print("Response Timeout!") # Remove interest in subscription. yield nc.unsubscribe(sid) # Terminate connection to NATS. yield nc.close()
def main(): parser = argparse.ArgumentParser() parser.add_argument('-n', '--count', default=DEFAULT_NUM_MSGS, type=int) parser.add_argument('-S', '--subject', default='test') parser.add_argument('-t', '--subtype', default='sync') parser.add_argument('--servers', default=[], action='append') args = parser.parse_args() servers = args.servers if len(args.servers) < 1: servers = ["nats://127.0.0.1:4222"] opts = {"servers": servers} # Make sure we're connected to a server first... nc = NATS() try: yield nc.connect(**opts) except Exception, e: sys.stderr.write("ERROR: {0}".format(e)) show_usage_and_die()
def main(): # Declare the protocol stack used for serialization. # Protocol stacks must match between clients and servers. prot_factory = FProtocolFactory(TBinaryProtocol.TBinaryProtocolFactory()) # Open a NATS connection to receive requests nats_client = NATS() options = {"verbose": True, "servers": ["nats://127.0.0.1:4222"]} yield nats_client.connect(**options) # Create a new server processor. # Incoming requests to the processor are passed to the handler. # Results from the handler are returned back to the client. processor = FStoreProcessor(StoreHandler()) # Create a new music store server using the processor, # The server will listen on the music-service NATS topic server = FNatsServer(nats_client, "music-service", processor, prot_factory) root.info("Starting server...") yield server.serve()
def pub_sub(subject, protocol_factory): nats_client = NATS() yield nats_client.connect(**get_nats_options()) # Setup subscriber, send response upon receipt pub_transport_factory = FNatsPublisherTransportFactory(nats_client) sub_transport_factory = FNatsSubscriberTransportFactory(nats_client) provider = FScopeProvider(pub_transport_factory, sub_transport_factory, protocol_factory) publisher = EventsPublisher(provider) yield publisher.open() @gen.coroutine def response_handler(context, event): print("received {} : {}".format(context, event)) preamble = context.get_request_header(PREAMBLE_HEADER) if preamble is None or preamble == "": logging.error("Client did not provide preamble header") return ramble = context.get_request_header(RAMBLE_HEADER) if ramble is None or ramble == "": logging.error("Client did not provide ramble header") return response_event = Event(Message="Sending Response") response_context = FContext("Call") yield publisher.publish_EventCreated(response_context, preamble, ramble, "response", "{}".format(subject), response_event) print("Published event={}".format(response_event)) publisher.close() subscriber = EventsSubscriber(provider) yield subscriber.subscribe_EventCreated("*", "*", "call", "{}".format(subject), response_handler)
def nats(server, subject, msg): """ NATS client implemented via tornado (NATS py2 approach), see https://github.com/nats-io/nats.py2 """ nc = NATS() try: yield nc.connect(server, max_reconnect_attempts=3) except Exception as exp: print("failed to connect to server: error {}".format(str(exp))) traceback.print_exc() return if isinstance(msg, list): for item in msg: yield nc.publish(subject, item) else: yield nc.publish(subject, msg) # Drain gracefully closes the connection, allowing all subscribers to # handle any pending messages inflight that the server may have sent. yield nc.drain() # Drain works async in the background #yield tornado.gen.sleep(1) yield nc.close()
def main(): parser = argparse.ArgumentParser() # e.g. nats-pub hello -d "world" -s nats://127.0.0.1:4222 -s nats://127.0.0.1:4223 parser.add_argument('subject', default='hello', nargs='?') parser.add_argument('-d', '--data', default="hello world") parser.add_argument('-s', '--servers', default=[], action='append') args = parser.parse_args() nc = NATS() try: servers = args.servers if len(args.servers) < 1: servers = ["nats://127.0.0.1:4222"] opts = {"servers": servers} yield nc.connect(**opts) yield nc.publish(args.subject, args.data) yield nc.flush() print("Published to '{0}'".format(args.subject)) except Exception, e: print(e) show_usage_and_die()
def main(): # Declare the protocol stack used for serialization. # Protocol stacks must match between publishers and subscribers. prot_factory = FProtocolFactory(TBinaryProtocol.TBinaryProtocolFactory()) # Open a NATS connection to receive requests nats_client = NATS() options = {"verbose": True, "servers": ["nats://127.0.0.1:4222"]} yield nats_client.connect(**options) # Create a pub sub scope using the configured transport and protocol transport_factory = FNatsPublisherTransportFactory(nats_client) provider = FScopeProvider(transport_factory, None, prot_factory) # Create a publisher publisher = AlbumWinnersPublisher(provider) yield publisher.open() # Publish an album win event album = Album() album.ASIN = str(uuid.uuid4()) album.duration = 12000 album.tracks = [ Track(title="Comme des enfants", artist="Coeur de pirate", publisher="Grosse Boîte", composer="Béatrice Martin", duration=169, pro=PerfRightsOrg.ASCAP) ] yield publisher.publish_Winner(FContext(), album) yield publisher.publish_ContestStart(FContext(), [album, album]) yield publisher.close() yield nats_client.close()
def main(): parser = argparse.ArgumentParser(description="Run a tornado python server") parser.add_argument('--port', dest='port', default='9090') parser.add_argument('--protocol', dest='protocol_type', default="binary", choices="binary, compact, json") parser.add_argument('--transport', dest="transport_type", default="stateless", choices="stateless, http") args = parser.parse_args() if args.protocol_type == "binary": protocol_factory = FProtocolFactory(TBinaryProtocolFactory()) elif args.protocol_type == "compact": protocol_factory = FProtocolFactory(TCompactProtocolFactory()) elif args.protocol_type == "json": protocol_factory = FProtocolFactory(TJSONProtocolFactory()) else: logging.error("Unknown protocol type: %s", args.protocol_type) sys.exit(1) nats_client = NATS() options = {"verbose": True, "servers": ["nats://127.0.0.1:4222"]} yield nats_client.connect(**options) global port port = args.port handler = FrugalTestHandler() subject = "frugal.*.*.rpc.{}".format(args.port) processor = Processor(handler) if args.transport_type == "stateless": server = FNatsServer(nats_client, [subject], processor, protocol_factory) # start healthcheck so the test runner knows the server is running thread.start_new_thread(healthcheck, (port, )) print("Starting {} server...".format(args.transport_type)) yield server.serve() elif args.transport_type == "http": factories = { 'processor': processor, 'protocol_factory': protocol_factory } server = Application([(r'/', FHttpHandler, factories)]) print("Starting {} server...".format(args.transport_type)) server.listen(port) else: logging.error("Unknown transport type: %s", args.transport_type) sys.exit(1) # Setup subscriber, send response upon receipt pub_transport_factory = FNatsPublisherTransportFactory(nats_client) sub_transport_factory = FNatsSubscriberTransportFactory(nats_client) provider = FScopeProvider(pub_transport_factory, sub_transport_factory, protocol_factory) global publisher publisher = EventsPublisher(provider) yield publisher.open() @gen.coroutine def response_handler(context, event): print("received {} : {}".format(context, event)) preamble = context.get_request_header(PREAMBLE_HEADER) if preamble is None or preamble == "": logging.error("Client did not provide preamble header") return ramble = context.get_request_header(RAMBLE_HEADER) if ramble is None or ramble == "": logging.error("Client did not provide ramble header") return response_event = Event(Message="Sending Response") response_context = FContext("Call") global publisher global port yield publisher.publish_EventCreated(response_context, preamble, ramble, "response", "{}".format(port), response_event) print("Published event={}".format(response_event)) subscriber = EventsSubscriber(provider) yield subscriber.subscribe_EventCreated("*", "*", "call", "{}".format(args.port), response_handler)
def _pub(self, testcase_name, subject, server, port=4222, measurement="alyvix", max_reconnect_attempts=5, reconnect_time_wait=2): #self._perf_manager.order_perfdata() #last_filled_perf = self._perf_manager.get_last_filled() perfdata_list = self._perf_manager.get_all_perfdata() keywords_timestamp_array = self._info_manager.get_info( 'KEYWORD TIMESTAMP') keywords_timeout_array = self._info_manager.get_info('KEYWORD TIMEOUT') current_timestamp = str(int(time.time() * 1000 * 1000 * 1000)) message_lines = [] testcase_name = testcase_name.replace(" ", "_") nc = NATS() options = {"servers": ["nats://" + str(server) + ":" + str(port)]} exception_occurred = False #if we cannot contact nats server then we have to save messages to cache file exception_file_name = "data.txt" system_drive = os.environ['systemdrive'] alyvix_programdata_path = system_drive + os.sep + "ProgramData\\Alyvix\\exception\\nats"\ + os.sep + os.environ['username'] + os.sep + testcase_name exception_file_full_name = alyvix_programdata_path + os.sep + exception_file_name try: yield nc.connect(**options) except: exception_occurred = True file_to_read = None biggest_cnt = 0 #read previous messages from cache. last txt file contains all messages if os.path.exists(alyvix_programdata_path): for file in os.listdir(alyvix_programdata_path): if file.endswith(".txt"): if file == "data.txt": if 0 > biggest_cnt: biggest_cnt = 0 else: cnt_str = file.replace("data_", "").replace(".txt", "") cnt_int = int(cnt_str) if cnt_int > biggest_cnt: biggest_cnt = cnt_int if biggest_cnt == 0: file_to_read = os.path.join(alyvix_programdata_path, "data.txt") else: file_to_read = os.path.join( alyvix_programdata_path, "data_" + str(biggest_cnt) + ".txt") #read cached messages if os.path.exists(file_to_read): with open(file_to_read) as f: message_lines.extend(f.readlines()) f.close() #delete all cache files for file in os.listdir(alyvix_programdata_path): if file.endswith(".txt"): try: os.remove(os.path.join(alyvix_programdata_path, file)) except: pass tmp_message_lines = [] for message in message_lines: message = message.replace("\r\n", "") message = message.replace("\r", "") message = message.replace("\n", "") if message == "": pass try: #try to publish cached messages yield nc.publish(subject, message) except: tmp_message_lines.append(message) exception_occurred = True message_lines = tmp_message_lines cumsum_value = 0 #publish current performance data perf_with_timestamp = [] perf_without_timestamp = [] for perfdata in perfdata_list: #check if current perf has a timestamp for cnt_kts in xrange(len(keywords_timestamp_array)): if keywords_timestamp_array[cnt_kts][0] == perfdata.name: perfdata.timestamp = keywords_timestamp_array[cnt_kts][1] perf_with_timestamp.append(perfdata) break if perfdata.timestamp == None: perf_without_timestamp.append(perfdata) perf_with_timestamp = sorted(perf_with_timestamp, key=lambda x: x.timestamp, reverse=False) perf_with_timestamp.extend(perf_without_timestamp) perfdata_list = perf_with_timestamp for perfdata in perfdata_list: #check if current perf has a timeout for cnt_ktout in xrange(len(keywords_timeout_array)): if keywords_timeout_array[cnt_ktout][0] == perfdata.name: perfdata.timeout_threshold = keywords_timeout_array[ cnt_ktout][1] break timed_out = False not_executed = False msg_extra = "" if perfdata.extra != None and perfdata.extra != "": msg_extra = ",extra=" + perfdata.extra msg_warning = "" if perfdata.warning_threshold != None and perfdata.warning_threshold != "": msg_warning = ",warning_threshold=" + str( int(perfdata.warning_threshold * 1000)) msg_critical = "" if perfdata.critical_threshold != None and perfdata.critical_threshold != "": msg_critical = ",critical_threshold=" + str( int(perfdata.critical_threshold * 1000)) msg_timeout = "" if perfdata.timeout_threshold != None and perfdata.timeout_threshold != "": msg_timeout = ",timeout_threshold=" + str( int(perfdata.timeout_threshold * 1000)) try: perf_timestamp = str(int(perfdata.timestamp * 1000 * 1000)) except: perf_timestamp = current_timestamp not_executed = True msg_perf = "" if perfdata.value != "" and perfdata.value is not None: msg_perf = ",performance=" + str(int(perfdata.value * 1000)) elif not_executed is False: #msg_perf = ",performance=" + str(int(perfdata.timeout_threshold * 1000)) timed_out = True msg_cumsum = "" msg_cumsumpre = ",cumulative=" + str(cumsum_value) if perfdata.value != "" and perfdata.value is not None: value = int(cumsum_value + (perfdata.value * 1000)) msg_cumsum = ",cumulative=" + str(value) cumsum_value = value elif not_executed is False: #timedout value = int(cumsum_value + (perfdata.timeout_threshold * 1000)) msg_cumsum = ",cumulative=" + str(value) cumsum_value = value else: #not_executed msg_cumsum = ",cumulative=" + str(cumsum_value) perfdata_state = "ok" if timed_out is True: perfdata_state = "timedout" elif not_executed is True: perfdata_state = "not_executed" elif perfdata.state == 1: perfdata_state = "warning" elif perfdata.state == 2: perfdata_state = "critical" elif perfdata.state == 3: perfdata_state = "unknown" msg_errorlevel = ",error_level=0" if perfdata.value == "" or perfdata.value is None: msg_errorlevel = ",error_level=3" elif perfdata.state == 1: msg_errorlevel = ",error_level=1" elif perfdata.state == 2: msg_errorlevel = ",error_level=2" elif perfdata.state == 3: msg_errorlevel = ",error_level=3" point_pre_msg = "" point_start_msg = "" if not_executed is False: point_pre_msg = ",point=pre" point_start_msg = ",point=start" msg_custom_tags = "" for tag in perfdata.custom_tags.keys(): msg_custom_tags += "," + tag + "=" + perfdata.custom_tags[tag] msg_custom_fields = "" for field in perfdata.custom_fields.keys(): is_string = False field_value = perfdata.custom_fields[field] try: int(field_value) except: field_value = "\"" + field_value + "\"" msg_custom_fields += "," + field + "=" + field_value user_msg = ",username="******",host=" + str(gethostname()) unique_tag_msg = "" try: unique_tag_msg = str(gethostname())[0] unique_tag_msg = unique_tag_msg + str(gethostname())[1] unique_tag_msg = unique_tag_msg + str(gethostname())[-2] unique_tag_msg = unique_tag_msg + str(gethostname())[-1] except: pass try: unique_tag_msg = unique_tag_msg + os.environ['username'][0] unique_tag_msg = unique_tag_msg + os.environ['username'][1] unique_tag_msg = unique_tag_msg + os.environ['username'][-2] unique_tag_msg = unique_tag_msg + os.environ['username'][-1] except: pass if self._info_manager.get_info('SUITE NAME') is not None: try: unique_tag_msg = unique_tag_msg + self._info_manager.get_info( 'SUITE NAME')[0] unique_tag_msg = unique_tag_msg + self._info_manager.get_info( 'SUITE NAME')[1] unique_tag_msg = unique_tag_msg + self._info_manager.get_info( 'SUITE NAME')[-2] unique_tag_msg = unique_tag_msg + self._info_manager.get_info( 'SUITE NAME')[-1] except: pass unique_tag_msg = ",run_code=\"" + unique_tag_msg + str( self._info_manager.get_info('START TIME')) + "\"" #unique_tag_msg = "" """ if point_pre_msg != "": message = str(measurement) + user_msg + host_msg + ",test_name=" + str(testcase_name) \ + ",transaction_name=" + str(perfdata.name).replace(" ", "_") + ",state=" + perfdata_state + \ msg_extra + msg_custom_tags + point_pre_msg + unique_tag_msg +" " + msg_warning + msg_critical + \ msg_timeout + msg_perf + msg_cumsumpre + msg_errorlevel + " " + perf_timestamp message = message.replace(" ,", " ") try: yield nc.publish(subject, message) except: # store to cache list if we cannot publish messages message_lines.append(message) exception_occurred = True """ message= str(measurement) + user_msg + host_msg + ",test_name=" +str(testcase_name)\ + ",transaction_name=" + str(perfdata.name).replace(" ", "_") + ",state=" + perfdata_state +\ msg_extra + msg_custom_tags + " " + msg_warning + msg_critical +\ msg_timeout + msg_perf + msg_cumsum + msg_errorlevel + msg_custom_fields + unique_tag_msg + " " + perf_timestamp message = message.replace(" ,", " ") try: yield nc.publish(subject, message) except: #store to cache list if we cannot publish messages message_lines.append(message) exception_occurred = True if not_executed is False: #alyvix saves timestamp in millisconds, so first of all we have to restore it in seconds interval, then #we have to add perfdata value if timed_out is True: end_timestamp_in_seconds = ( float(perfdata.timestamp) / 1000) + perfdata.timeout_threshold else: if perfdata.end_timestamp_only_for_summed_perf is None: end_timestamp_in_seconds = (float(perfdata.timestamp) / 1000) + perfdata.value else: end_timestamp_in_seconds = (float( perfdata.end_timestamp_only_for_summed_perf) / 1000) #convert timestamp in seconds to timestamp in nanoseconds end_timestamp_in_nanoseconds = int(end_timestamp_in_seconds * 1000 * 1000 * 1000) """ message = str(measurement) + user_msg + host_msg + ",test_name=" + str(testcase_name) \ + ",transaction_name=" + str(perfdata.name).replace(" ", "_") + ",state=" + perfdata_state + \ msg_extra + msg_custom_tags + ",point=end" + unique_tag_msg + " " + msg_warning + msg_critical + msg_timeout + \ msg_perf + msg_cumsum + msg_errorlevel + " " + str(end_timestamp_in_nanoseconds) message = message.replace(" ,", " ") try: yield nc.publish(subject, message) except: # store to cache list if we cannot publish messages message_lines.append(message) exception_occurred = True """ try: yield nc.flush() except: exception_occurred = True #store cache list to cache file if exception_occurred is True: if not os.path.exists(alyvix_programdata_path): os.makedirs(alyvix_programdata_path) try: with open(exception_file_full_name, 'w') as f: for item in message_lines: f.write("%s\r\n" % item) f.close() except: filename = exception_file_full_name cnt = 0 while True: if not os.path.exists(filename): with open(filename, 'w') as f: for item in message_lines: f.write("%s\r\n" % item) f.close() break cnt += 1 if (cnt - 1) == 0: filename = filename.replace(".txt", "_" + str(cnt) + ".txt") else: filename = filename.replace( str(cnt - 1) + ".txt", str(cnt) + ".txt")