def on_start(self): # Persister thread self._persist_greenlet = spawn(self._trigger_func, self.persist_interval) log.debug('Publisher Greenlet started in "%s"' % self.__class__.__name__) # Conv subscription to as many as it takes self.conv_sub = ConvSubscriber(callback=self._on_message) self.conv_sub.start() # Open repository self.conv_repository = ConvRepository()
def start(self): self.msg_log = [] self.event_sub = None self.conv_sub = None #Conv subscription self.conv_sub = ConvSubscriber(callback=self._msg_received) self.conv_sub.start() # Event subscription self.event_sub = EventSubscriber(pattern=EventSubscriber.ALL_EVENTS, callback=self._event_received, queue_name="event_persister") self.event_sub.start() self.started = True
class InteractionObserver(object): """ Observes ongoing interactions in the Exchange. Logs them to disk and makes them available in the local container (for development purposes) and on request. """ def start(self): self.msg_log = [] self.event_sub = None self.conv_sub = None #Conv subscription self.conv_sub = ConvSubscriber(callback=self._msg_received) self.conv_sub.start() # Event subscription self.event_sub = EventSubscriber(pattern=EventSubscriber.ALL_EVENTS, callback=self._event_received, queue_name="event_persister") self.event_sub.start() self.started = True def stop(self): # Stop event subscriber self.event_sub.stop() # Stop conv subscriber self.conv_sub.stop() self.started = False def _msg_received(self, msg, *args, **kwargs): self.log_message(args[0]) def _event_received(self, event, *args, **kwargs): if 'origin' in event: args[0]['origin'] = event.origin if 'origin_type' in event: args[0]['origin_type'] = event.origin_type if 'sub_type' in event: args[0]['sub_type'] = event.sub_type self.log_message(args[0], True) def log_message(self, mhdrs, evmsg=False): """ @param evmsg This message is an event, render it as such! """ mhdrs['_content_type'] = mhdrs.get('format', None) # TUPLE: timestamp (MS), type, boolean if its an event msg_rec = (get_ion_ts(), mhdrs, evmsg) self.msg_log.append(msg_rec) # Truncate if too long in increments of slice if len(self.msg_log) > MAX_MSGLOG + SLICE: self.msg_log = self.msg_log[SLICE:] def _get_data(self, msglog, response_msgs): """ Provides msc data in python format, to be converted either to msc text or to json for use with msc web monitor. Returns a list of hashes in the form: { to, from, content, type, ts, error (boolean), to_raw, from_raw, topline } """ msgdata = [] for msgtup in msglog: datatemp = {"to": None, "from": None, "content": None, "type": None, "ts": None, "error": False} msg = msgtup[1] convid = msg.get('conv-id', None) if (convid in response_msgs): response = response_msgs.pop(convid) sname = response.get('sender') rname = response.get('receiver') else: if (msg.get('sender-type', 'unknown') == 'service'): sname = msg.get('sender-service', msg.get('sender-name', msg.get('sender', 'unknown'))) else: sname = msg.get('sender-name', msg.get('sender', 'unknown')) rname = msg.get('receiver', 'unknown') if (convid is not None): response_msgs[convid] = {'sender': rname, 'receiver': sname} # from_raw is displayed as the header on the webpage datatemp["from_raw"] = sname sname = self._sanitize(sname) datatemp["from"] = sname datatemp["ts"] = msg.get("ts", "Unknown") datatemp["to_raw"] = rname rname = self._sanitize(rname) datatemp["to"] = rname if msgtup[2]: # this is an EVENT, show it as a box! datatemp["type"] = "events" #todo: not sure if we can hard code the splitting mechanism like done below !! datatemp["from"] = "events,"+ (msg.get('routing_key').split('._._.')[0]).split('.')[1] datatemp["from_raw"] = "events,"+(msg.get('routing_key').split('._._.')[0]).split('.')[1] datatemp["to"] = "events,"+ (msg.get('routing_key').split('._._.')[0]).split('.')[1] datatemp["to_raw"] = "events,"+ (msg.get('routing_key').split('._._.')[0]).split('.')[1] evlabel = "%s \nOrigin: %s" % (msg.get('routing_key'), msg.get('origin')) datatemp["content"] = evlabel datatemp["topline"] = msg.get('sub_type', '') + " " + msg.get('origin_type', '') if (datatemp['topline'] == " "): datatemp['topline'] = msg.get('origin') else: mlabel = "%s\n(%s->%s)\n<%s>" % (msg.get('op', None), sname.rsplit(",", 1)[-1], rname.rsplit(",", 1)[-1], msg.get('_content_type', '?content-type?')) datatemp["content"] = mlabel datatemp["topline"] = mlabel.split("\n")[0] if msg.get('protocol', None) == 'rpc': datatemp["type"] = "rpcres" performative = msg.get('performative', None) if performative == 'request': datatemp["type"] = "rpcreq" elif performative == 'timeout': pass # timeout, unfortunately you don't see this @TODO if performative == 'failure' or performative == 'error': datatemp["error"] = True else: # non rpc -> perhaps a data message? datatemp["type"] = "data" msgdata.append(datatemp) return msgdata, response_msgs @classmethod def _sanitize(cls, input): return string.replace(string.replace(input, ".", "_"), "-", "_")
class ConversationPersister(StandaloneProcess): def on_init(self): #get the fields from the header that need to be logged self.header_fields = CFG.get_safe('container.messaging.conversation_log.header_fields', []) # Time in between event persists self.persist_interval = 1.0 # Holds received events FIFO self.conv_queue = Queue() # Temporarily holds list of events to persist while datastore operation not yet completed self.convs_to_persist = None # bookkeeping for timeout greenlet self._persist_greenlet = None self._terminate_persist = Event() # when set, exits the timeout greenlet # The event subscriber self.conv_sub = None # The conv repository self.conv_repository = None def on_start(self): # Persister thread self._persist_greenlet = spawn(self._trigger_func, self.persist_interval) log.debug('Publisher Greenlet started in "%s"' % self.__class__.__name__) # Conv subscription to as many as it takes self.conv_sub = ConvSubscriber(callback=self._on_message) self.conv_sub.start() # Open repository self.conv_repository = ConvRepository() def on_quit(self): # Stop event subscriber self.conv_sub.stop() # tell the trigger greenlet we're done self._terminate_persist.set() # wait on the greenlet to finish cleanly self._persist_greenlet.join(timeout=10) # Close the repository self.conv_repository.close() def _on_message(self, msg, *args, **kwargs): conv = {} unknown = 'unknown' conv['sender'] = args[0].get('sender', unknown) conv['recipient'] = args[0].get('receiver', unknown) conv['conversation_id'] = args[0].get('conv-id', unknown) conv['protocol'] = args[0].get('protocol', unknown) if (len(self.header_fields) == 0): conv['headers'] = args[0] else: conv['headers'] = {} for key in self.header_fields: conv['headers'][key] = args[0].get([key], unknown) conv_msg = bootstrap.IonObject("ConversationMessage", conv) self.conv_queue.put(conv_msg) def _trigger_func(self, persist_interval): log.debug('Starting conv persister thread with persist_interval=%s', persist_interval) # Event.wait returns False on timeout (and True when set in on_quit), # so we use this to both exit cleanly and do our timeout in a loop while not self._terminate_persist.wait(timeout=persist_interval): try: self.convs_to_persist = [self.conv_queue.get() for x in xrange(self.conv_queue.qsize())] self._persist_convs(self.convs_to_persist) self.convs_to_persist = None except Exception as ex: log.exception("Failed to persist received conversations") return False def _persist_convs(self, conv_list): if conv_list: self.conv_repository.put_convs(conv_list)
class InteractionObserver(object): """ Observes ongoing interactions in the Exchange. Logs them to disk and makes them available in the local container (for development purposes) and on request. """ def start(self): self.msg_log = [] self.event_sub = None self.conv_sub = None #Conv subscription self.conv_sub = ConvSubscriber(callback=self._msg_received) self.conv_sub.start() # Event subscription self.event_sub = EventSubscriber(pattern=EventSubscriber.ALL_EVENTS, callback=self._event_received, queue_name="event_persister") self.event_sub.start() self.started = True def stop(self): # Stop event subscriber self.event_sub.stop() # Stop conv subscriber self.conv_sub.stop() self.started = False def _msg_received(self, msg, *args, **kwargs): self.log_message(args[0]) def _event_received(self, event, *args, **kwargs): if 'origin' in event: args[0]['origin'] = event.origin if 'origin_type' in event: args[0]['origin_type'] = event.origin_type if 'sub_type' in event: args[0]['sub_type'] = event.sub_type self.log_message(args[0], True) def log_message(self, mhdrs, evmsg=False): """ @param evmsg This message is an event, render it as such! """ mhdrs['_content_type'] = mhdrs.get('format', None) # TUPLE: timestamp (MS), type, boolean if its an event msg_rec = (get_ion_ts(), mhdrs, evmsg) self.msg_log.append(msg_rec) # Truncate if too long in increments of slice if len(self.msg_log) > MAX_MSGLOG + SLICE: self.msg_log = self.msg_log[SLICE:] def _get_data(self, msglog, response_msgs): """ Provides msc data in python format, to be converted either to msc text or to json for use with msc web monitor. Returns a list of hashes in the form: { to, from, content, type, ts, error (boolean), to_raw, from_raw, topline } """ msgdata = [] for msgtup in msglog: datatemp = { "to": None, "from": None, "content": None, "type": None, "ts": None, "error": False } msg = msgtup[1] convid = msg.get('conv-id', None) if (convid in response_msgs): response = response_msgs.pop(convid) sname = response.get('sender') rname = response.get('receiver') else: if (msg.get('sender-type', 'unknown') == 'service'): sname = msg.get( 'sender-service', msg.get('sender-name', msg.get('sender', 'unknown'))) else: sname = msg.get('sender-name', msg.get('sender', 'unknown')) rname = msg.get('receiver', 'unknown') if (convid is not None): response_msgs[convid] = { 'sender': rname, 'receiver': sname } # from_raw is displayed as the header on the webpage datatemp["from_raw"] = sname sname = self._sanitize(sname) datatemp["from"] = sname datatemp["ts"] = msg.get("ts", "Unknown") datatemp["to_raw"] = rname rname = self._sanitize(rname) datatemp["to"] = rname if msgtup[2]: # this is an EVENT, show it as a box! datatemp["type"] = "events" #todo: not sure if we can hard code the splitting mechanism like done below !! datatemp["from"] = "events," + ( msg.get('routing_key').split('._._.')[0]).split('.')[1] datatemp["from_raw"] = "events," + ( msg.get('routing_key').split('._._.')[0]).split('.')[1] datatemp["to"] = "events," + ( msg.get('routing_key').split('._._.')[0]).split('.')[1] datatemp["to_raw"] = "events," + ( msg.get('routing_key').split('._._.')[0]).split('.')[1] evlabel = "%s \nOrigin: %s" % (msg.get('routing_key'), msg.get('origin')) datatemp["content"] = evlabel datatemp["topline"] = msg.get('sub_type', '') + " " + msg.get( 'origin_type', '') if (datatemp['topline'] == " "): datatemp['topline'] = msg.get('origin') else: mlabel = "%s\n(%s->%s)\n<%s>" % (msg.get( 'op', None), sname.rsplit(",", 1)[-1], rname.rsplit( ",", 1)[-1], msg.get('_content_type', '?content-type?')) datatemp["content"] = mlabel datatemp["topline"] = mlabel.split("\n")[0] if msg.get('protocol', None) == 'rpc': datatemp["type"] = "rpcres" performative = msg.get('performative', None) if performative == 'request': datatemp["type"] = "rpcreq" elif performative == 'timeout': pass # timeout, unfortunately you don't see this @TODO if performative == 'failure' or performative == 'error': datatemp["error"] = True else: # non rpc -> perhaps a data message? datatemp["type"] = "data" msgdata.append(datatemp) return msgdata, response_msgs @classmethod def _sanitize(cls, input): return string.replace(string.replace(input, ".", "_"), "-", "_")
class ConversationPersister(StandaloneProcess): def on_init(self): #get the fields from the header that need to be logged self.header_fields = CFG.get_safe( 'container.messaging.conversation_log.header_fields', []) # Time in between event persists self.persist_interval = 1.0 # Holds received events FIFO self.conv_queue = Queue() # Temporarily holds list of events to persist while datastore operation not yet completed self.convs_to_persist = None # bookkeeping for timeout greenlet self._persist_greenlet = None self._terminate_persist = Event( ) # when set, exits the timeout greenlet # The event subscriber self.conv_sub = None # The conv repository self.conv_repository = None def on_start(self): # Persister thread self._persist_greenlet = spawn(self._trigger_func, self.persist_interval) log.debug('Publisher Greenlet started in "%s"' % self.__class__.__name__) # Conv subscription to as many as it takes self.conv_sub = ConvSubscriber(callback=self._on_message) self.conv_sub.start() # Open repository self.conv_repository = ConvRepository() def on_quit(self): # Stop event subscriber self.conv_sub.stop() # tell the trigger greenlet we're done self._terminate_persist.set() # wait on the greenlet to finish cleanly self._persist_greenlet.join(timeout=10) # Close the repository self.conv_repository.close() def _on_message(self, msg, *args, **kwargs): conv = {} unknown = 'unknown' conv['sender'] = args[0].get('sender', unknown) conv['recipient'] = args[0].get('receiver', unknown) conv['conversation_id'] = args[0].get('conv-id', unknown) conv['protocol'] = args[0].get('protocol', unknown) if (len(self.header_fields) == 0): conv['headers'] = args[0] else: conv['headers'] = {} for key in self.header_fields: conv['headers'][key] = args[0].get([key], unknown) conv_msg = bootstrap.IonObject("ConversationMessage", conv) self.conv_queue.put(conv_msg) def _trigger_func(self, persist_interval): log.debug('Starting conv persister thread with persist_interval=%s', persist_interval) # Event.wait returns False on timeout (and True when set in on_quit), # so we use this to both exit cleanly and do our timeout in a loop while not self._terminate_persist.wait(timeout=persist_interval): try: self.convs_to_persist = [ self.conv_queue.get() for x in xrange(self.conv_queue.qsize()) ] self._persist_convs(self.convs_to_persist) self.convs_to_persist = None except Exception as ex: log.exception("Failed to persist received conversations") return False def _persist_convs(self, conv_list): if conv_list: self.conv_repository.put_convs(conv_list)