def test_mqtt_logger(self): """ Test the MQTT logger using the standard format Note that mosquitto should be running first: $ docker run -ti -p 1883:1883 -p 9001:9001 toke/mosquitto or $ brew services start mosquitto """ # assert(mosquitto_is_running()) logging.raiseExceptions = True # noinspection PyTypeChecker mqtt_logger = dict(host=mqtt_ip, port=1883, topic="topics/test", loglevel=MON, qos=1) with HyperStream(file_logger=False, console_logger=False, mqtt_logger=mqtt_logger): with MqttClient() as client: # client.client.publish("topics/test", "{} ABC".format(utcnow())) logging.monitor("1234567890") sleep(1) print(client.last_messages["topics/test"]) assert (client.last_messages["topics/test"][24:] == '[MON ] 1234567890')
def run_event(self, event, synchronous=False): """ Will "run" or execute an event: * all handlers associated with a given ``event`` are executed in turn and are passed the event `args` and `kwargs`. Any encountered exception is dumped, but the processing continues. * the event callback is run when all handlers have been executed, with the event `args` and `kwargs`. .. note:: Caveheat: as of version 1.3, the event handlers are not run in paralled. .. versionadded:: 1.3 """ assert ltrace_func(TRACE_EVENTS) if events_collectors != []: workers.service_enqueue(priorities.HIGH, self.push_event_to_collectors_thread, event) logging.monitor(TRACE_EVENTS, TRACELEVEL_1, _('Processing event {0}'), (ST_NAME, event.name)) self.run_methods(event, events_handlers, _('event handler'), synchronous) self.run_methods(event, events_callbacks, _('event callback'), synchronous)
def __enter__(self): """ Entry point - called when workflow computation starts :return: self """ if self.monitor: try: logging.monitor(self.name, extra=dict(n="workflow_start")) except AttributeError: pass return self
def push_event_to_collectors_thread(self, event): """ Meant to be used in a ServiceThread, because it can take a long time. """ assert ltrace_func(TRACE_EVENTS) for collector in events_collectors[:]: try: logging.monitor(TRACE_EVENTS, TRACELEVEL_2, _('Event push to collector: {0} > {1}'), event.name, collector) collector.process_event(event) except: # The collector probably disconnected without warning us first. # Even without that, it produced an error; drop it. logging.exception(_(u'Exception while when pushing event ' u'{0} to collector {1}'), event, collector) unregister_collector(collector)
def __exit__(self, exc_type, exc_val, exc_tb): """ Exit point - called when workflow computation ends :param exc_type: exception type :param exc_val: exception value :param exc_tb: exception traceback :return: self """ if self.monitor: try: logging.monitor(self.name, extra=dict(n="workflow_end")) except AttributeError: pass # Re-raise any other errors return exc_val is None
def test_mqtt_logger_json(self): """ Test the MQTT logger using the JSON format Note that mosquitto should be running first: $ docker run -ti -p 1883:1883 -p 9001:9001 toke/mosquitto or $ brew services start mosquitto """ # assert (mosquitto_is_running()) logging.raiseExceptions = True def handleError(self, record): raise mqtthandler.MQTTHandler.handleError = handleError # noinspection PyTypeChecker mqtt_logger = dict(host=mqtt_ip, port=1883, topic="topics/test", loglevel=MON, qos=1, formatter=SenMLFormatter()) hs = HyperStream(file_logger=False, console_logger=False, mqtt_logger=mqtt_logger) with MqttClient() as client: # client.client.publish("topics/test", "{} ABC".format(utcnow())) logging.monitor("1234567890", extra=dict(n="blah")) sleep(1) # print(client.last_messages["topics/test"]) msg = json.loads(client.last_messages["topics/test"]) assert (msg['e'][0]['n'] == 'blah') assert (msg['e'][0]['v'] == '1234567890') assert (msg['uid'] == 'hyperstream') logging.monitor("1234567890") sleep(1) # print(client.last_messages["topics/test"]) msg = json.loads(client.last_messages["topics/test"]) assert (msg['e'][0]['n'] == 'default') assert (msg['e'][0]['v'] == '1234567890') assert (msg['uid'] == 'hyperstream')
def __exit__(self, exc_type, exc_val, exc_tb): if self.monitor: try: logging.monitor(self.name, extra=dict(n="workflow_end")) except AttributeError: pass
def __enter__(self): if self.monitor: try: logging.monitor(self.name, extra=dict(n="workflow_start")) except AttributeError: pass