Exemplo n.º 1
0
class EventPipelineProcessor(object):

    SYNC_EVERY_EVENT = False
    PROCESS_EVENT_TIMEOUT = 0

    def __init__(self, dmd):
        self.dmd = dmd
        self._manager = Manager(self.dmd)
        self._pipes = (EventPluginPipe(self._manager, IPreEventPlugin,
                                       'PreEventPluginPipe'),
                       CheckInputPipe(self._manager),
                       IdentifierPipe(self._manager),
                       AddDeviceContextAndTagsPipe(self._manager),
                       TransformAndReidentPipe(
                           self._manager, TransformPipe(self._manager), [
                               UpdateDeviceContextAndTagsPipe(self._manager),
                               IdentifierPipe(self._manager),
                               AddDeviceContextAndTagsPipe(self._manager),
                           ]),
                       AssignDefaultEventClassAndTagPipe(self._manager),
                       FingerprintPipe(self._manager),
                       SerializeContextPipe(self._manager),
                       EventPluginPipe(self._manager, IPostEventPlugin,
                                       'PostEventPluginPipe'),
                       ClearClassRefreshPipe(self._manager),
                       CheckHeartBeatPipe(self._manager))
        self._pipe_timers = {}
        for pipe in self._pipes:
            timer_name = pipe.name
            self._pipe_timers[timer_name] = Metrology.timer(timer_name)

        self.reporter = MetricReporter(prefix='zenoss.zeneventd.')
        self.reporter.start()

        if not self.SYNC_EVERY_EVENT:
            # don't call sync() more often than 1 every 0.5 sec
            # helps throughput when receiving events in bursts
            self.nextSync = time()
            self.syncInterval = 0.5

    def processMessage(self, message, retry=True):
        """
        Handles a queue message, can call "acknowledge" on the Queue Consumer
        class when it is done with the message
        """
        self._synchronize_with_database()

        try:
            # extract event from message body
            zepevent = ZepRawEvent()
            zepevent.event.CopyFrom(message)
            log.debug("Received event: %s", to_dict(zepevent.event))
            eventContext = EventContext(log, zepevent)

            with Timeout(zepevent,
                         self.PROCESS_EVENT_TIMEOUT,
                         error_message='while processing event'):
                for pipe in self._pipes:
                    with self._pipe_timers[pipe.name]:
                        eventContext = pipe(eventContext)
                    log.debug('After pipe %s, event context is %s', pipe.name,
                              to_dict(eventContext.zepRawEvent))
                    if eventContext.event.status == STATUS_DROPPED:
                        raise DropEvent('Dropped by %s' % pipe,
                                        eventContext.event)

        except AttributeError:
            # _manager throws Attribute errors
            # if connection to zope is lost - reset and retry ONE time
            if retry:
                log.debug("Resetting connection to catalogs")
                self._manager.reset()
                self.processMessage(message, retry=False)
            else:
                raise

        except DropEvent:
            # we want these to propagate out
            raise

        except Exception as error:
            log.info("Failed to process event, forward original raw event: %s",
                     to_dict(zepevent.event))
            # Pipes and plugins may raise ProcessingException's for their own
            # reasons. only log unexpected exceptions of other type
            # will insert stack trace in log
            if not isinstance(error, ProcessingException):
                log.exception(error)

            eventContext = self.create_exception_event(message, error)

        log.debug("Publishing event: %s", to_dict(eventContext.zepRawEvent))
        return eventContext.zepRawEvent

    def _synchronize_with_database(self):
        '''sync() db if it has been longer than
        self.syncInterval seconds since the last time,
        and no _synchronize has not been called for self.syncInterval seconds
        KNOWN ISSUE: ZEN-29884
        '''
        if self.SYNC_EVERY_EVENT:
            doSync = True
        else:
            current_time = time()
            doSync = current_time > self.nextSync
            self.nextSync = current_time + self.syncInterval

        if doSync:
            self.dmd._p_jar.sync()

    def create_exception_event(self, message, exception):
        # construct wrapper event to report this event processing failure
        # including content of the original event
        orig_zep_event = ZepRawEvent()
        orig_zep_event.event.CopyFrom(message)
        failure_event = {
            'uuid':
            guid.generate(),
            'created_time':
            int(time() * 1000),
            'fingerprint':
            '|'.join(['zeneventd', 'processMessage',
                      repr(exception)]),
            # Don't send the *same* event class or we loop endlessly
            'eventClass':
            '/',
            'summary':
            'Internal exception processing event: %r' % exception,
            'message':
            'Internal exception processing event: %r/%s' %
            (exception, to_dict(orig_zep_event.event)),
            'severity':
            4,
        }
        zep_raw_event = ZepRawEvent()
        zep_raw_event.event.CopyFrom(from_dict(Event, failure_event))
        event_context = EventContext(log, zep_raw_event)
        event_context.eventProxy.device = 'zeneventd'
        event_context.eventProxy.component = 'processMessage'
        return event_context
Exemplo n.º 2
0
class EventPipelineProcessor(object):

    SYNC_EVERY_EVENT = False

    def __init__(self, dmd):
        self.dmd = dmd
        self._manager = Manager(self.dmd)
        self._pipes = (
            EventPluginPipe(self._manager, IPreEventPlugin, 'PreEventPluginPipe'),
            CheckInputPipe(self._manager),
            IdentifierPipe(self._manager),
            AddDeviceContextAndTagsPipe(self._manager),
            TransformAndReidentPipe(self._manager,
                TransformPipe(self._manager),
                [
                UpdateDeviceContextAndTagsPipe(self._manager),
                IdentifierPipe(self._manager),
                AddDeviceContextAndTagsPipe(self._manager),
                ]),
            AssignDefaultEventClassAndTagPipe(self._manager),
            FingerprintPipe(self._manager),
            SerializeContextPipe(self._manager),
            EventPluginPipe(self._manager, IPostEventPlugin, 'PostEventPluginPipe'),
            ClearClassRefreshPipe(self._manager),
            CheckHeartBeatPipe(self._manager)
        )

        if not self.SYNC_EVERY_EVENT:
            # don't call sync() more often than 1 every 0.5 sec - helps throughput
            # when receiving events in bursts
            self.nextSync = datetime.now()
            self.syncInterval = timedelta(0,0,500000)

    def processMessage(self, message):
        """
        Handles a queue message, can call "acknowledge" on the Queue Consumer
        class when it is done with the message
        """

        if self.SYNC_EVERY_EVENT:
            doSync = True
        else:
            # sync() db if it has been longer than self.syncInterval since the last time
            currentTime = datetime.now()
            doSync = currentTime > self.nextSync
            self.nextSync = currentTime + self.syncInterval

        if doSync:
            self.dmd._p_jar.sync()

        try:
            retry = True
            processed = False
            while not processed:
                try:
                    # extract event from message body
                    zepevent = ZepRawEvent()
                    zepevent.event.CopyFrom(message)
                    if log.isEnabledFor(logging.DEBUG):
                        log.debug("Received event: %s", to_dict(zepevent.event))

                    eventContext = EventContext(log, zepevent)

                    for pipe in self._pipes:
                        eventContext = pipe(eventContext)
                        if log.isEnabledFor(logging.DEBUG):
                            log.debug('After pipe %s, event context is %s' % ( pipe.name, to_dict(eventContext.zepRawEvent) ))
                        if eventContext.event.status == STATUS_DROPPED:
                            raise DropEvent('Dropped by %s' % pipe, eventContext.event)

                    processed = True

                except AttributeError:
                    # _manager throws Attribute errors if connection to zope is lost - reset
                    # and retry ONE time
                    if retry:
                        retry=False
                        log.debug("Resetting connection to catalogs")
                        self._manager.reset()
                    else:
                        raise

        except DropEvent:
            # we want these to propagate out
            raise
        except Exception as e:
            log.info("Failed to process event, forward original raw event: %s", to_dict(zepevent.event))
            # Pipes and plugins may raise ProcessingException's for their own reasons - only log unexpected
            # exceptions of other type (will insert stack trace in log)
            if not isinstance(e, ProcessingException):
                log.exception(e)

            # construct wrapper event to report this event processing failure (including content of the
            # original event)
            origzepevent = ZepRawEvent()
            origzepevent.event.CopyFrom(message)
            failReportEvent = dict(
                uuid = guid.generate(),
                created_time = int(time.time()*1000),
                fingerprint='|'.join(['zeneventd', 'processMessage', repr(e)]),
                # Don't send the *same* event class or we trash and and crash endlessly
                eventClass='/',
                summary='Internal exception processing event: %r' % e,
                message='Internal exception processing event: %r/%s' % (e, to_dict(origzepevent.event)),
                severity=4,
            )
            zepevent = ZepRawEvent()
            zepevent.event.CopyFrom(from_dict(Event, failReportEvent))
            eventContext = EventContext(log, zepevent)
            eventContext.eventProxy.device = 'zeneventd'
            eventContext.eventProxy.component = 'processMessage'

        if log.isEnabledFor(logging.DEBUG):
            log.debug("Publishing event: %s", to_dict(eventContext.zepRawEvent))

        return eventContext.zepRawEvent
Exemplo n.º 3
0
class EventPipelineProcessor(object):

    SYNC_EVERY_EVENT = False
    PROCESS_EVENT_TIMEOUT = 0

    def __init__(self, dmd):
        self.dmd = dmd
        self._manager = Manager(self.dmd)
        self._pipes = (
            EventPluginPipe(
                self._manager, IPreEventPlugin, 'PreEventPluginPipe'
            ),
            CheckInputPipe(self._manager),
            IdentifierPipe(self._manager),
            AddDeviceContextAndTagsPipe(self._manager),
            TransformAndReidentPipe(
                self._manager,
                TransformPipe(self._manager),
                [
                    UpdateDeviceContextAndTagsPipe(self._manager),
                    IdentifierPipe(self._manager),
                    AddDeviceContextAndTagsPipe(self._manager),
                ]
            ),
            AssignDefaultEventClassAndTagPipe(self._manager),
            FingerprintPipe(self._manager),
            SerializeContextPipe(self._manager),
            EventPluginPipe(
                self._manager, IPostEventPlugin, 'PostEventPluginPipe'
            ),
            ClearClassRefreshPipe(self._manager),
            CheckHeartBeatPipe(self._manager)
        )
        self._pipe_timers = {}
        for pipe in self._pipes:
            timer_name = pipe.name
            self._pipe_timers[timer_name] = Metrology.timer(timer_name)

        self.reporter = MetricReporter(prefix='zenoss.zeneventd.')
        self.reporter.start()

        if not self.SYNC_EVERY_EVENT:
            # don't call sync() more often than 1 every 0.5 sec
            # helps throughput when receiving events in bursts
            self.nextSync = time()
            self.syncInterval = 0.5

    def processMessage(self, message, retry=True):
        """
        Handles a queue message, can call "acknowledge" on the Queue Consumer
        class when it is done with the message
        """
        self._synchronize_with_database()

        try:
            # extract event from message body
            zepevent = ZepRawEvent()
            zepevent.event.CopyFrom(message)
            log.debug("Received event: %s", to_dict(zepevent.event))
            eventContext = EventContext(log, zepevent)

            with Timeout(
                zepevent, self.PROCESS_EVENT_TIMEOUT,
                error_message='while processing event'
            ):
                for pipe in self._pipes:
                    with self._pipe_timers[pipe.name]:
                        eventContext = pipe(eventContext)
                    log.debug(
                        'After pipe %s, event context is %s',
                        pipe.name, to_dict(eventContext.zepRawEvent)
                    )
                    if eventContext.event.status == STATUS_DROPPED:
                        raise DropEvent(
                            'Dropped by %s' % pipe, eventContext.event
                        )

        except AttributeError:
            # _manager throws Attribute errors
            # if connection to zope is lost - reset and retry ONE time
            if retry:
                log.debug("Resetting connection to catalogs")
                self._manager.reset()
                self.processMessage(message, retry=False)
            else:
                raise

        except DropEvent:
            # we want these to propagate out
            raise

        except Exception as error:
            log.info(
                "Failed to process event, forward original raw event: %s",
                to_dict(zepevent.event)
            )
            # Pipes and plugins may raise ProcessingException's for their own
            # reasons. only log unexpected exceptions of other type
            # will insert stack trace in log
            if not isinstance(error, ProcessingException):
                log.exception(error)

            eventContext = self.create_exception_event(message, error)

        log.debug("Publishing event: %s", to_dict(eventContext.zepRawEvent))
        return eventContext.zepRawEvent

    def _synchronize_with_database(self):
        '''sync() db if it has been longer than
        self.syncInterval seconds since the last time,
        and no _synchronize has not been called for self.syncInterval seconds
        KNOWN ISSUE: ZEN-29884
        '''
        if self.SYNC_EVERY_EVENT:
            doSync = True
        else:
            current_time = time()
            doSync = current_time > self.nextSync
            self.nextSync = current_time + self.syncInterval

        if doSync:
            self.dmd._p_jar.sync()

    def create_exception_event(self, message, exception):
        # construct wrapper event to report this event processing failure
        # including content of the original event
        orig_zep_event = ZepRawEvent()
        orig_zep_event.event.CopyFrom(message)
        failure_event = {
            'uuid': guid.generate(),
            'created_time': int(time() * 1000),
            'fingerprint':
                '|'.join(['zeneventd', 'processMessage', repr(exception)]),
            # Don't send the *same* event class or we loop endlessly
            'eventClass': '/',
            'summary': 'Internal exception processing event: %r' % exception,
            'message':
                'Internal exception processing event: %r/%s' %
                (exception, to_dict(orig_zep_event.event)),
            'severity': 4,
        }
        zep_raw_event = ZepRawEvent()
        zep_raw_event.event.CopyFrom(from_dict(Event, failure_event))
        event_context = EventContext(log, zep_raw_event)
        event_context.eventProxy.device = 'zeneventd'
        event_context.eventProxy.component = 'processMessage'
        return event_context