def processMessage(self, message): try: hydrated = hydrateQueueMessage(message, self._queueSchema) except Exception as e: log.error("Failed to hydrate raw event: %s", e) yield self.queueConsumer.acknowledge(message) else: try: zepRawEvent = self.processor.processMessage(hydrated) if log.isEnabledFor(logging.DEBUG): log.debug("Publishing event: %s", to_dict(zepRawEvent)) yield self.queueConsumer.publishMessage(EXCHANGE_ZEP_ZEN_EVENTS, self._routing_key(zepRawEvent), zepRawEvent, declareExchange=False) yield self.queueConsumer.acknowledge(message) except DropEvent as e: if log.isEnabledFor(logging.DEBUG): log.debug('%s - %s' % (e.message, to_dict(e.event))) yield self.queueConsumer.acknowledge(message) except ProcessingException as e: log.error('%s - %s' % (e.message, to_dict(e.event))) log.exception(e) yield self.queueConsumer.reject(message) except Exception as e: log.exception(e) yield self.queueConsumer.reject(message)
def __call__(self, message, proto): try: result = self.processMessage(proto) if result: if not inspect.isgenerator(result): result = (result, ) for msg in result: if isinstance(msg, Publishable): yield msg else: exchange, routing_key, msg = result yield Publishable(msg, exchange=exchange, routingKey=routing_key) message.ack() except DropEvent as e: if log.isEnabledFor(logging.DEBUG): log.debug('%s - %s' % (e.message, to_dict(e.event))) message.ack() except ProcessingException as e: log.error('%s - %s' % (e.message, to_dict(e.event))) log.exception(e) message.reject() except Exception as e: log.exception(e) message.reject()
def __call__(self, message, proto): try: result = self.processMessage(proto) if result: if not inspect.isgenerator(result): result = (result,) for msg in result: if isinstance(msg, Publishable): yield msg else: exchange, routing_key, msg = result yield Publishable(msg, exchange=exchange, routingKey=routing_key) message.ack() except DropEvent as e: if log.isEnabledFor(logging.DEBUG): log.debug('%s - %s' % (e.message, to_dict(e.event))) message.ack() except ProcessingException as e: log.error('%s - %s' % (e.message, to_dict(e.event))) log.exception(e) message.reject() except Exception as e: log.exception(e) message.reject()
def processMessage(self, message, retry=True): """ Handles a queue message, can call "acknowledge" on the Queue Consumer class when it is done with the message """ self._synchronize_with_database() try: # extract event from message body zepevent = ZepRawEvent() zepevent.event.CopyFrom(message) log.debug("Received event: %s", to_dict(zepevent.event)) eventContext = EventContext(log, zepevent) with Timeout( zepevent, self.PROCESS_EVENT_TIMEOUT, error_message='while processing event' ): for pipe in self._pipes: with self._pipe_timers[pipe.name]: eventContext = pipe(eventContext) log.debug( 'After pipe %s, event context is %s', pipe.name, to_dict(eventContext.zepRawEvent) ) if eventContext.event.status == STATUS_DROPPED: raise DropEvent( 'Dropped by %s' % pipe, eventContext.event ) except AttributeError: # _manager throws Attribute errors # if connection to zope is lost - reset and retry ONE time if retry: log.debug("Resetting connection to catalogs") self._manager.reset() self.processMessage(message, retry=False) else: raise except DropEvent: # we want these to propagate out raise except Exception as error: log.info( "Failed to process event, forward original raw event: %s", to_dict(zepevent.event) ) # Pipes and plugins may raise ProcessingException's for their own # reasons. only log unexpected exceptions of other type # will insert stack trace in log if not isinstance(error, ProcessingException): log.exception(error) eventContext = self.create_exception_event(message, error) log.debug("Publishing event: %s", to_dict(eventContext.zepRawEvent)) return eventContext.zepRawEvent
def processMessage(self, message, retry=True): """ Handles a queue message, can call "acknowledge" on the Queue Consumer class when it is done with the message """ self._synchronize_with_database() try: # extract event from message body zepevent = ZepRawEvent() zepevent.event.CopyFrom(message) log.debug("Received event: %s", to_dict(zepevent.event)) eventContext = EventContext(log, zepevent) with Timeout(zepevent, self.PROCESS_EVENT_TIMEOUT, error_message='while processing event'): for pipe in self._pipes: with self._pipe_timers[pipe.name]: eventContext = pipe(eventContext) log.debug('After pipe %s, event context is %s', pipe.name, to_dict(eventContext.zepRawEvent)) if eventContext.event.status == STATUS_DROPPED: raise DropEvent('Dropped by %s' % pipe, eventContext.event) except AttributeError: # _manager throws Attribute errors # if connection to zope is lost - reset and retry ONE time if retry: log.debug("Resetting connection to catalogs") self._manager.reset() self.processMessage(message, retry=False) else: raise except DropEvent: # we want these to propagate out raise except Exception as error: log.info("Failed to process event, forward original raw event: %s", to_dict(zepevent.event)) # Pipes and plugins may raise ProcessingException's for their own # reasons. only log unexpected exceptions of other type # will insert stack trace in log if not isinstance(error, ProcessingException): log.exception(error) eventContext = self.create_exception_event(message, error) log.debug("Publishing event: %s", to_dict(eventContext.zepRawEvent)) return eventContext.zepRawEvent
def create_exception_event(self, message, exception): # construct wrapper event to report this event processing failure # including content of the original event orig_zep_event = ZepRawEvent() orig_zep_event.event.CopyFrom(message) failure_event = { 'uuid': guid.generate(), 'created_time': int(time() * 1000), 'fingerprint': '|'.join(['zeneventd', 'processMessage', repr(exception)]), # Don't send the *same* event class or we loop endlessly 'eventClass': '/', 'summary': 'Internal exception processing event: %r' % exception, 'message': 'Internal exception processing event: %r/%s' % (exception, to_dict(orig_zep_event.event)), 'severity': 4, } zep_raw_event = ZepRawEvent() zep_raw_event.event.CopyFrom(from_dict(Event, failure_event)) event_context = EventContext(log, zep_raw_event) event_context.eventProxy.device = 'zeneventd' event_context.eventProxy.component = 'processMessage' return event_context
def apply(self, eventProxy, dmd): # import pdb; pdb.set_trace() if not dmd.getProperty('eventReplayState'): return if hasattr(eventProxy, 'eventReplayInfo'): return rawEvent = to_dict(eventProxy._zepRawEvent) # if dmd.getProperty('eventReplayFilter'): # result = checkEvent(rawEvent, dmd.getProperty('eventReplayFilter')) # import pdb; pdb.set_trace() # if not result: # return r = redis.StrictRedis( dmd.getProperty('eventReplayRedisServer'), dmd.getProperty('eventReplayRedisPort'), db=dmd.getProperty('eventReplayRedisDB'), ) event = rawEvent.get('event', {}) uuid = event.get('uuid') if not uuid: return listId = r.rpush('eventKeyList', uuid) hashResult = r.hmset(uuid, dict(event=event)) if listId > dmd.getProperty('eventReplayLimit'): hashId = r.lpop('eventKeyList') r.delete(hashId) eventProxy.eventReplayId = uuid
def updateEventSummaries(self, update, eventFilter=None, exclusionFilter=None, limit=None, timeout=None): update_pb = from_dict(EventSummaryUpdate, update) event_filter_pb = None if (eventFilter is None) else from_dict(EventFilter, eventFilter) exclusion_filter_pb = None if (exclusionFilter is None) else from_dict(EventFilter, exclusionFilter) status, response = self.client.updateEventSummaries(update_pb, event_filter_pb, exclusion_filter_pb, limit=limit, timeout=timeout) return status, to_dict(response)
def _getEventSummaries(self, source, offset, limit=1000): response, content = source(offset=offset, limit=limit) return { 'total' : content.total, 'limit' : content.limit, 'next_offset' : content.next_offset if content.HasField('next_offset') else None, 'events' : (to_dict(event) for event in content.events), }
def reopenEventSummaries(self, eventFilter=None, exclusionFilter=None, limit=None, userName=None, timeout=None): arguments = self._processArgs(eventFilter, exclusionFilter, userName) eventFilter = arguments.get('eventFilter') exclusionFilter = arguments.get('exclusionFilter') userName = arguments.get('userName') userUuid = arguments.get('userUuid') status, response = self.client.reopenEventSummaries( userUuid, userName, eventFilter, exclusionFilter, limit, timeout=timeout) return status, to_dict(response)
def getTrigger(self, uuid): user = getSecurityManager().getUser() trigger = self._guidManager.getObject(uuid) log.debug('Trying to fetch trigger: %s' % trigger.id) if self.triggerPermissions.userCanViewTrigger(user, trigger): response, trigger = self.triggers_service.getTrigger(uuid) return to_dict(trigger) else: log.warning('User not authorized to view this trigger: %s' % trigger.id) raise Exception('User not authorized to view this trigger: %s' % trigger.id)
def getTriggers(self): self.synchronize() user = getSecurityManager().getUser() response, trigger_set = self.triggers_service.getTriggers() trigger_set = to_dict(trigger_set) if 'triggers' in trigger_set: return self.triggerPermissions.findTriggers(user, self._guidManager, trigger_set['triggers']) else: return []
def processMessage(self, message): """ Handles a queue message, can call "acknowledge" on the Queue Consumer class when it is done with the message """ zepRawEvent = self.processor.processMessage(message) if log.isEnabledFor(logging.DEBUG): log.debug("Publishing event: %s", to_dict(zepRawEvent)) yield Publishable(zepRawEvent, exchange=self._dest_exchange, routingKey=self._routing_key(zepRawEvent))
def updateContext(self, signal, context): ''' Provide additional context to notifications. ''' # Get information about the event summary. event = to_dict(signal)['event'] # Get information about the event occurrence. occurrence = event_dict['occurrence'][0] # Add something to the context. context['utcnow'] = str(datetime.datetime.utcnow())
def getTriggerList(self): """ Retrieve a list of all triggers by uuid and name. This is used by the UI to render triggers that a user may not have permission to otherwise view, edit or manage. """ response, trigger_set = self.triggers_service.getTriggers() trigger_set = to_dict(trigger_set) triggerList = [] if 'triggers' in trigger_set: for t in trigger_set['triggers']: triggerList.append(dict(uuid=t['uuid'], name=t['name'])) return sorted(triggerList, key=lambda k: k['name'])
def reopenEventSummaries(self, eventFilter=None, exclusionFilter=None, limit=None, userName=None, timeout=None): if eventFilter: eventFilter = from_dict(EventFilter, eventFilter) if exclusionFilter: exclusionFilter = from_dict(EventFilter, exclusionFilter) if not userName: userUuid, userName = self._findUserInfo() else: userUuid = self._getUserUuid(userName) status, response = self.client.reopenEventSummaries( userUuid, userName, eventFilter, exclusionFilter, limit, timeout=timeout) return status, to_dict(response)
def __call__(self, eventContext): evtproxy = eventContext.eventProxy if eventContext.deviceObject is None: # Clear title fields actor = eventContext.event.actor actor.ClearField(EventField.Actor.ELEMENT_TITLE) actor.ClearField(EventField.Actor.ELEMENT_UUID) actor.ClearField(EventField.Actor.ELEMENT_SUB_TITLE) actor.ClearField(EventField.Actor.ELEMENT_SUB_UUID) eventContext.log.debug( "device was cleared, must purge references in current event: %s" % to_dict(eventContext._zepRawEvent)) # clear out device-specific tags and details deviceOrganizerTypeNames = list( type for function, type in self.DEVICE_TAGGERS.values()) deviceDetailNames = set( deviceOrganizerTypeNames + self.DEVICE_TAGGERS.keys() + [ EventProxy.DEVICE_IP_ADDRESS_DETAIL_KEY, EventProxy.DEVICE_PRIORITY_DETAIL_KEY, EventProxy.PRODUCTION_STATE_DETAIL_KEY, ]) # clear device context details for detail in deviceDetailNames: evtproxy.resetReadOnly(detail) if detail in evtproxy.details: del evtproxy.details[detail] # clear device-dependent tags evtproxy.tags.clearType(self.DEVICE_TAGGERS.keys()) eventContext.log.debug( "reset device values in event before reidentifying: %s" % to_dict(eventContext._zepRawEvent)) return eventContext
def getTriggerList(self): """ Retrieve a list of all triggers by uuid and name. This is used by the UI to render triggers that a user may not have permission to otherwise view, edit or manage. """ response, trigger_set = self.triggers_service.getTriggers() trigger_set = to_dict(trigger_set) triggerList = [] if 'triggers' in trigger_set: for t in trigger_set['triggers']: triggerList.append(dict( uuid = t['uuid'], name = t['name'] )) return sorted(triggerList, key=lambda k: k['name'])
def getTriggerList(self): """ Retrieve a list of all triggers by uuid and name. This is used by the UI to render triggers that a user may not have permission to otherwise view, edit or manage. """ response, trigger_set = self.triggers_service.getTriggers() trigger_set = to_dict(trigger_set) triggerList = [] user = getSecurityManager().getUser() for t in trigger_set.get('triggers', []): trigger = self._guidManager.getObject(t['uuid']) if self.triggerPermissions.userCanViewTrigger(user, trigger): triggerList.append({ 'uuid': t['uuid'], 'name': t['name'] }) return sorted(triggerList, key=lambda k: k['name'])
def _initDetails(self): self._sortMap = dict(ZepFacade.DEFAULT_SORT_MAP) response, content = self._configClient.getDetails() detailsResponseDict = to_dict(content) self._details = detailsResponseDict.get('details', []) self._unmappedDetails = [] self._detailsMap = {} for detail_item in self._details: detailKey = detail_item['key'] sortField = { 'field': EventSort.DETAIL, 'detail_key': detailKey } mappedName = ZepFacade.ZENOSS_DETAIL_NEW_TO_OLD_MAPPING.get(detailKey, None) # If we have a mapped name, add it to the sort map to support sorting using old or new names if mappedName: self._sortMap[mappedName.lower()] = sortField else: self._unmappedDetails.append(detail_item) self._sortMap[detailKey.lower()] = sortField self._detailsMap[detailKey] = detail_item self._initialized = True
def pickleFailedEvent(self, evt): obj = zope.component.getUtility(IDaemonConfig, 'zeneventd_config') config = obj.getConfig() # By default there are 100 pickle files in failed_transformed_events folder. # To change this value set maxpickle value in /opt/zenoss/etc/zeneventd.conf max_pickle = config.maxpickle - 1 # By default the path to save pickle files is # $ZENHOME/var/zeneventd/failed_transformed_events. # To change this value set pickledir value in /opt/zenoss/etc/zeneventd.conf pickle_dir = config.pickledir if not os.path.exists(pickle_dir): os.makedirs(pickle_dir) file_list = [] pickles_count = 0 for file in os.listdir(pickle_dir): filepath = os.path.join(pickle_dir, file) modified = os.stat(filepath).st_mtime file_tuple = modified, file file_list.append(file_tuple) file_list.sort(reverse=True) files_to_delete = file_list[max_pickle:] for time, file in files_to_delete: filepath = os.path.join(pickle_dir, file) if os.path.isfile(filepath): if pickles_count == 0: log.info("Deleting old pickle files ...") try: os.remove(filepath) pickles_count += 1 except Exception as e: log.exception("Unable to delete %s: %s", filepath, e) if pickles_count: log.info("Deleted %s old pickle files." % pickles_count) filename = pickle_dir + '/%s_%s.pickle' % (evt.device, evt.evid) try: with open(filename, 'w') as f: evtDict = to_dict(evt._event) pickle.dump(evtDict, f) except Exception as ex: log.exception("Unable to store evt pickle data to %s: %s", filename, ex)
def pickleFailedEvent(self, evt): obj = zope.component.getUtility(IDaemonConfig, 'zeneventd_config') config = obj.getConfig() # By default there are 100 pickle files in failed_transformed_events folder. # To change this value set maxpickle value in /opt/zenoss/etc/zeneventd.conf max_pickle = config.maxpickle-1 # By default the path to save pickle files is # $ZENHOME/var/zeneventd/failed_transformed_events. # To change this value set pickledir value in /opt/zenoss/etc/zeneventd.conf pickle_dir = config.pickledir if not os.path.exists(pickle_dir): os.makedirs(pickle_dir) file_list = [] pickles_count = 0 for file in os.listdir(pickle_dir): filepath = os.path.join(pickle_dir, file) modified = os.stat(filepath).st_mtime file_tuple = modified, file file_list.append(file_tuple) file_list.sort(reverse=True) files_to_delete = file_list[max_pickle:] for time, file in files_to_delete: filepath = os.path.join(pickle_dir, file) if os.path.isfile(filepath): if pickles_count == 0: log.info("Deleting old pickle files ...") try: os.remove(filepath) pickles_count += 1 except Exception as e: log.exception("Unable to delete %s: %s", filepath, e) if pickles_count: log.info("Deleted %s old pickle files." % pickles_count) filename = pickle_dir + '/%s_%s.pickle' % (evt.device, evt.evid) try: with open(filename, 'w') as f: evtDict = to_dict(evt._event) pickle.dump(evtDict, f) except Exception as ex: log.exception("Unable to store evt pickle data to %s: %s", filename, ex)
def testToDict(self): data = to_dict(self.message) self._compareProtoDict(data, self.message)
def __str__(self): return "{_zepRawEvent:%s}" % str(to_dict(self._zepRawEvent))
def nextEventSummaryUpdate(self, next_request): status, response = self.client.nextEventSummaryUpdate(from_dict(EventSummaryUpdateRequest, next_request)) return status, to_dict(response)
def getEventSummary(self, uuid): response, content = self.client.getEventSummary(uuid) return to_dict(content)
def __call__(self, eventContext): evtproxy = eventContext.eventProxy if eventContext.deviceObject is None: # Clear title fields actor = eventContext.event.actor actor.ClearField(EventField.Actor.ELEMENT_TITLE) actor.ClearField(EventField.Actor.ELEMENT_UUID) actor.ClearField(EventField.Actor.ELEMENT_SUB_TITLE) actor.ClearField(EventField.Actor.ELEMENT_SUB_UUID) eventContext.log.debug("device was cleared, must purge references in current event: %s" % to_dict(eventContext._zepRawEvent)) # clear out device-specific tags and details deviceOrganizerTypeNames = list(type for function,type in self.DEVICE_TAGGERS.values()) deviceDetailNames = set(deviceOrganizerTypeNames + self.DEVICE_TAGGERS.keys() + [ EventProxy.DEVICE_IP_ADDRESS_DETAIL_KEY, EventProxy.DEVICE_PRIORITY_DETAIL_KEY, EventProxy.PRODUCTION_STATE_DETAIL_KEY, ]) # clear device context details for detail in deviceDetailNames: evtproxy.resetReadOnly(detail) if detail in evtproxy.details: del evtproxy.details[detail] # clear device-dependent tags evtproxy.tags.clearType(self.DEVICE_TAGGERS.keys()) eventContext.log.debug("reset device values in event before reidentifying: %s" % to_dict(eventContext._zepRawEvent)) return eventContext
def processMessage(self, message): """ Handles a queue message, can call "acknowledge" on the Queue Consumer class when it is done with the message """ if self.SYNC_EVERY_EVENT: doSync = True else: # sync() db if it has been longer than self.syncInterval since the last time currentTime = datetime.now() doSync = currentTime > self.nextSync self.nextSync = currentTime + self.syncInterval if doSync: self.dmd._p_jar.sync() try: retry = True processed = False while not processed: try: # extract event from message body zepevent = ZepRawEvent() zepevent.event.CopyFrom(message) if log.isEnabledFor(logging.DEBUG): log.debug("Received event: %s", to_dict(zepevent.event)) eventContext = EventContext(log, zepevent) for pipe in self._pipes: eventContext = pipe(eventContext) if log.isEnabledFor(logging.DEBUG): log.debug('After pipe %s, event context is %s' % ( pipe.name, to_dict(eventContext.zepRawEvent) )) if eventContext.event.status == STATUS_DROPPED: raise DropEvent('Dropped by %s' % pipe, eventContext.event) processed = True except AttributeError: # _manager throws Attribute errors if connection to zope is lost - reset # and retry ONE time if retry: retry=False log.debug("Resetting connection to catalogs") self._manager.reset() else: raise except DropEvent: # we want these to propagate out raise except Exception as e: log.info("Failed to process event, forward original raw event: %s", to_dict(zepevent.event)) # Pipes and plugins may raise ProcessingException's for their own reasons - only log unexpected # exceptions of other type (will insert stack trace in log) if not isinstance(e, ProcessingException): log.exception(e) # construct wrapper event to report this event processing failure (including content of the # original event) origzepevent = ZepRawEvent() origzepevent.event.CopyFrom(message) failReportEvent = dict( uuid = guid.generate(), created_time = int(time.time()*1000), fingerprint='|'.join(['zeneventd', 'processMessage', repr(e)]), # Don't send the *same* event class or we trash and and crash endlessly eventClass='/', summary='Internal exception processing event: %r' % e, message='Internal exception processing event: %r/%s' % (e, to_dict(origzepevent.event)), severity=4, ) zepevent = ZepRawEvent() zepevent.event.CopyFrom(from_dict(Event, failReportEvent)) eventContext = EventContext(log, zepevent) eventContext.eventProxy.device = 'zeneventd' eventContext.eventProxy.component = 'processMessage' if log.isEnabledFor(logging.DEBUG): log.debug("Publishing event: %s", to_dict(eventContext.zepRawEvent)) return eventContext.zepRawEvent
def getHeartbeats(self, monitor=None): response, heartbeats = self.heartbeatClient.getHeartbeats(monitor=monitor) heartbeats_dict = to_dict(heartbeats) return heartbeats_dict.get('heartbeats', [])