Exemplo n.º 1
0
    def __init__(self, dmd):
        self.dmd = dmd
        self._manager = Manager(self.dmd)
        self._pipes = (
            EventPluginPipe(self._manager, IPreEventPlugin, 'PreEventPluginPipe'),
            CheckInputPipe(self._manager),
            IdentifierPipe(self._manager),
            AddDeviceContextAndTagsPipe(self._manager),
            TransformAndReidentPipe(self._manager,
                TransformPipe(self._manager),
                [
                UpdateDeviceContextAndTagsPipe(self._manager),
                IdentifierPipe(self._manager),
                AddDeviceContextAndTagsPipe(self._manager),
                ]),
            AssignDefaultEventClassAndTagPipe(self._manager),
            FingerprintPipe(self._manager),
            SerializeContextPipe(self._manager),
            EventPluginPipe(self._manager, IPostEventPlugin, 'PostEventPluginPipe'),
            ClearClassRefreshPipe(self._manager),
            CheckHeartBeatPipe(self._manager)
        )

        if not self.SYNC_EVERY_EVENT:
            # don't call sync() more often than 1 every 0.5 sec - helps throughput
            # when receiving events in bursts
            self.nextSync = datetime.now()
            self.syncInterval = timedelta(0,0,500000)
Exemplo n.º 2
0
    def __init__(self, dmd):
        self.dmd = dmd
        self._manager = Manager(self.dmd)
        self._pipes = (EventPluginPipe(self._manager, IPreEventPlugin,
                                       'PreEventPluginPipe'),
                       CheckInputPipe(self._manager),
                       IdentifierPipe(self._manager),
                       AddDeviceContextAndTagsPipe(self._manager),
                       TransformAndReidentPipe(
                           self._manager, TransformPipe(self._manager), [
                               UpdateDeviceContextAndTagsPipe(self._manager),
                               IdentifierPipe(self._manager),
                               AddDeviceContextAndTagsPipe(self._manager),
                           ]),
                       AssignDefaultEventClassAndTagPipe(self._manager),
                       FingerprintPipe(self._manager),
                       SerializeContextPipe(self._manager),
                       EventPluginPipe(self._manager, IPostEventPlugin,
                                       'PostEventPluginPipe'),
                       ClearClassRefreshPipe(self._manager),
                       CheckHeartBeatPipe(self._manager))
        self._pipe_timers = {}
        for pipe in self._pipes:
            timer_name = pipe.name
            self._pipe_timers[timer_name] = Metrology.timer(timer_name)

        self.reporter = MetricReporter(prefix='zenoss.zeneventd.')
        self.reporter.start()

        if not self.SYNC_EVERY_EVENT:
            # don't call sync() more often than 1 every 0.5 sec
            # helps throughput when receiving events in bursts
            self.nextSync = time()
            self.syncInterval = 0.5
Exemplo n.º 3
0
 def __init__(self, dmd, twokeydict):
     a, b = twokeydict.items()
     aid, self.abrains = a
     bid, self.bbrains = b
     self.a = dmd.unrestrictedTraverse(aid)
     self.b = dmd.unrestrictedTraverse(bid)
     self.zep = getFacade('zep', dmd)
     self.idmgr = Manager(dmd)
Exemplo n.º 4
0
class Layer3Link(object):
    """
    Provides an API for navigating paired groups of brains.
    """
    def __init__(self, dmd, twokeydict):
        a, b = twokeydict.items()
        aid, self.abrains = a
        bid, self.bbrains = b
        self.a = dmd.unrestrictedTraverse(aid)
        self.b = dmd.unrestrictedTraverse(bid)
        self.zep = getFacade('zep', dmd)
        self.idmgr = Manager(dmd)

    def _getComponentUuid(self, devuuid, compid):
        try:
            dev = self.idmgr.getElementByUuid(devuuid)
            compuuid = self.idmgr.getElementUuidById(dev, Device, compid)
            return compuuid
        except Exception:
            return None

    def getStatus(self):
        brains = self.abrains + self.bbrains

        # lookup all device uuids, make sure at least one exists
        devUuids = [self.idmgr.findDeviceUuid(a.deviceId.split("/")[-1], None) for a in brains if a.deviceId]
        validDevUuids = filter(None, devUuids)
        if not validDevUuids:
            return SEVERITY_CLEAR

        # if there is any open /Status/Ping event on any device, return CRITICAL severity
        statusPingFilter = self.zep.createEventFilter(
            tags = validDevUuids,
            event_class = '/Status/Ping/',
            status = (STATUS_NEW, STATUS_ACKNOWLEDGED),
            severity = (SEVERITY_WARNING, SEVERITY_ERROR, SEVERITY_CRITICAL)
        )
        maxpingrec = self.zep.getEventSummaries(0, filter=statusPingFilter, sort=(('count','desc'),), limit=1)
        if maxpingrec and maxpingrec['total'] > 0:
            return SEVERITY_CRITICAL

        # no /Status/Ping events found, just return worst severity of all events on all interface components
        devCompPairs = zip(devUuids, (a.interfaceId for a in brains))
        compUuids = (self._getComponentUuid(devuuid, compid)
                        for devuuid, compid in devCompPairs
                        if devuuid is not None)
        components = filter(None, compUuids)
        if components:
            sev = self.zep.getWorstSeverity(components)
            return sev

        return SEVERITY_CLEAR

    def getAddresses(self):
        return (self.a.address, self.b.address)

    def getUids(self):
        return ("/".join(self.a.getPhysicalPath()), "/".join(self.b.getPhysicalPath()))
Exemplo n.º 5
0
    def _migrate_events(self, conn, publisher, status):
        converter = EventConverter(self.dmd, status)
        manager = Manager(self.dmd)
        pipes = (IdentifierPipe(manager), AddDeviceContextAndTagsPipe(manager),
                 AssignDefaultEventClassAndTagPipe(manager))
        routing_key = 'zenoss.events.summary' if status else 'zenoss.events.archive'

        taggers = {
            EventProxy.DEVICE_CLASS_DETAIL_KEY: (self.dmd.Devices, DeviceClass),
            EventProxy.DEVICE_GROUPS_DETAIL_KEY: (self.dmd.Groups, DeviceGroup),
            EventProxy.DEVICE_LOCATION_DETAIL_KEY: (self.dmd.Locations, Location),
            EventProxy.DEVICE_SYSTEMS_DETAIL_KEY: (self.dmd.Systems, System),
        }

        try:
            for event_rows in self._page_rows(conn, status):
                with AmqpTransaction(publisher.channel):
                    for mapping_event_context in imap(converter.convert, event_rows):
                        if self._shutdown:
                            raise ShutdownException()
                        occurrence = mapping_event_context.occurrence
                        zep_raw_event = self._event_to_zep_raw_event(occurrence)
                        event_ctx = EventContext(log, zep_raw_event)
                        for pipe in pipes:
                            pipe(event_ctx)

                        # Clear tags for device class, location, systems, groups from current device
                        event_ctx.eventProxy.tags.clearType(AddDeviceContextAndTagsPipe.DEVICE_TAGGERS.keys())

                        # Resolve tags from original fields in the event
                        for detail in occurrence.details:
                            if detail.name in taggers:
                                organizer_root, organizer_cls = taggers[detail.name]
                                tags = set()
                                for val in detail.value:
                                    try:
                                        obj = organizer_root.unrestrictedTraverse(str(val[1:]))
                                        if isinstance(obj, organizer_cls):
                                            tags.update(manager.getUuidsOfPath(obj))
                                    except Exception:
                                        if log.isEnabledFor(logging.DEBUG):
                                            log.debug("Unable to resolve UUID for %s", val)
                                if tags:
                                    event_tag = occurrence.tags.add()
                                    event_tag.type = detail.name
                                    event_tag.uuid.extend(tags)

                        self._merge_tags(zep_raw_event, occurrence)
                        if log.isEnabledFor(logging.DEBUG):
                            log.debug("Migrated event: %s", mapping_event_context.summary)

                        publisher.publish("$MigratedEvents", routing_key, mapping_event_context.summary,
                                          createQueues=("$ZepMigratedEventSummary","$ZepMigratedEventArchive"))
        except ShutdownException:
            pass
Exemplo n.º 6
0
 def __init__(self, dmd, twokeydict):
     a, b = twokeydict.items()
     aid, self.abrains = a
     bid, self.bbrains = b
     self.a = dmd.unrestrictedTraverse(aid)
     self.b = dmd.unrestrictedTraverse(bid)
     self.zep = getFacade('zep', dmd)
     self.idmgr = Manager(dmd)
Exemplo n.º 7
0
    def test0(self):
        #Set up a device with a single interface with two IP addresses
        device = self.dmd.Devices.createInstance('mydevice')
        device.setManageIp('10.10.10.1')
        device.os.addIpInterface('eth0', False)
        iface = device.os.interfaces()[0]
        iface.addIpAddress('10.10.10.2')
        iface.addIpAddress('10.10.10.3')

        device_uuid = IGlobalIdentifier(device).getGUID()
        manager = Manager(self.dmd)
        def test(id, ip, msg, expected = device_uuid):
            self.assertEquals(manager.findDeviceUuid(id, ip), expected, msg)

        test('mydevice', '', "failed to find by device name")
        test('10.10.10.1', '', "failed to find by device name == IP")
        test('dev', '10.10.10.1', "failed to find by device's manageIP")
        test('dev', '10.10.10.2', "failed to find by interface's primary IP")
        test('dev', '10.10.10.3', "failed to find by interface's secondary IP")
        test('dev', '10.10.10.4', "failed missing IP test", None)
Exemplo n.º 8
0
    def __init__(self, dmd):
        self.dmd = dmd
        self._manager = Manager(self.dmd)
        self._pipes = (
            EventPluginPipe(
                self._manager, IPreEventPlugin, 'PreEventPluginPipe'
            ),
            CheckInputPipe(self._manager),
            IdentifierPipe(self._manager),
            AddDeviceContextAndTagsPipe(self._manager),
            TransformAndReidentPipe(
                self._manager,
                TransformPipe(self._manager),
                [
                    UpdateDeviceContextAndTagsPipe(self._manager),
                    IdentifierPipe(self._manager),
                    AddDeviceContextAndTagsPipe(self._manager),
                ]
            ),
            AssignDefaultEventClassAndTagPipe(self._manager),
            FingerprintPipe(self._manager),
            SerializeContextPipe(self._manager),
            EventPluginPipe(
                self._manager, IPostEventPlugin, 'PostEventPluginPipe'
            ),
            ClearClassRefreshPipe(self._manager),
            CheckHeartBeatPipe(self._manager)
        )
        self._pipe_timers = {}
        for pipe in self._pipes:
            timer_name = pipe.name
            self._pipe_timers[timer_name] = Metrology.timer(timer_name)

        self.reporter = MetricReporter(prefix='zenoss.zeneventd.')
        self.reporter.start()

        if not self.SYNC_EVERY_EVENT:
            # don't call sync() more often than 1 every 0.5 sec
            # helps throughput when receiving events in bursts
            self.nextSync = time()
            self.syncInterval = 0.5
Exemplo n.º 9
0
class EventPipelineProcessor(object):

    SYNC_EVERY_EVENT = False

    def __init__(self, dmd):
        self.dmd = dmd
        self._manager = Manager(self.dmd)
        self._pipes = (
            EventPluginPipe(self._manager, IPreEventPlugin, 'PreEventPluginPipe'),
            CheckInputPipe(self._manager),
            IdentifierPipe(self._manager),
            AddDeviceContextAndTagsPipe(self._manager),
            TransformAndReidentPipe(self._manager,
                TransformPipe(self._manager),
                [
                UpdateDeviceContextAndTagsPipe(self._manager),
                IdentifierPipe(self._manager),
                AddDeviceContextAndTagsPipe(self._manager),
                ]),
            AssignDefaultEventClassAndTagPipe(self._manager),
            FingerprintPipe(self._manager),
            SerializeContextPipe(self._manager),
            EventPluginPipe(self._manager, IPostEventPlugin, 'PostEventPluginPipe'),
            ClearClassRefreshPipe(self._manager),
            CheckHeartBeatPipe(self._manager)
        )

        if not self.SYNC_EVERY_EVENT:
            # don't call sync() more often than 1 every 0.5 sec - helps throughput
            # when receiving events in bursts
            self.nextSync = datetime.now()
            self.syncInterval = timedelta(0,0,500000)

    def processMessage(self, message):
        """
        Handles a queue message, can call "acknowledge" on the Queue Consumer
        class when it is done with the message
        """

        if self.SYNC_EVERY_EVENT:
            doSync = True
        else:
            # sync() db if it has been longer than self.syncInterval since the last time
            currentTime = datetime.now()
            doSync = currentTime > self.nextSync
            self.nextSync = currentTime + self.syncInterval

        if doSync:
            self.dmd._p_jar.sync()

        try:
            retry = True
            processed = False
            while not processed:
                try:
                    # extract event from message body
                    zepevent = ZepRawEvent()
                    zepevent.event.CopyFrom(message)
                    if log.isEnabledFor(logging.DEBUG):
                        log.debug("Received event: %s", to_dict(zepevent.event))

                    eventContext = EventContext(log, zepevent)

                    for pipe in self._pipes:
                        eventContext = pipe(eventContext)
                        if log.isEnabledFor(logging.DEBUG):
                            log.debug('After pipe %s, event context is %s' % ( pipe.name, to_dict(eventContext.zepRawEvent) ))
                        if eventContext.event.status == STATUS_DROPPED:
                            raise DropEvent('Dropped by %s' % pipe, eventContext.event)

                    processed = True

                except AttributeError:
                    # _manager throws Attribute errors if connection to zope is lost - reset
                    # and retry ONE time
                    if retry:
                        retry=False
                        log.debug("Resetting connection to catalogs")
                        self._manager.reset()
                    else:
                        raise

        except DropEvent:
            # we want these to propagate out
            raise
        except Exception as e:
            log.info("Failed to process event, forward original raw event: %s", to_dict(zepevent.event))
            # Pipes and plugins may raise ProcessingException's for their own reasons - only log unexpected
            # exceptions of other type (will insert stack trace in log)
            if not isinstance(e, ProcessingException):
                log.exception(e)

            # construct wrapper event to report this event processing failure (including content of the
            # original event)
            origzepevent = ZepRawEvent()
            origzepevent.event.CopyFrom(message)
            failReportEvent = dict(
                uuid = guid.generate(),
                created_time = int(time.time()*1000),
                fingerprint='|'.join(['zeneventd', 'processMessage', repr(e)]),
                # Don't send the *same* event class or we trash and and crash endlessly
                eventClass='/',
                summary='Internal exception processing event: %r' % e,
                message='Internal exception processing event: %r/%s' % (e, to_dict(origzepevent.event)),
                severity=4,
            )
            zepevent = ZepRawEvent()
            zepevent.event.CopyFrom(from_dict(Event, failReportEvent))
            eventContext = EventContext(log, zepevent)
            eventContext.eventProxy.device = 'zeneventd'
            eventContext.eventProxy.component = 'processMessage'

        if log.isEnabledFor(logging.DEBUG):
            log.debug("Publishing event: %s", to_dict(eventContext.zepRawEvent))

        return eventContext.zepRawEvent
Exemplo n.º 10
0
class Layer3Link(object):
    """
    Provides an API for navigating paired groups of brains.
    """
    def __init__(self, dmd, twokeydict):
        a, b = twokeydict.items()
        aid, self.abrains = a
        bid, self.bbrains = b
        self.a = dmd.unrestrictedTraverse(aid)
        self.b = dmd.unrestrictedTraverse(bid)
        self.zep = getFacade('zep', dmd)
        self.idmgr = Manager(dmd)

    def _getComponentUuid(self, devuuid, compid):
        try:
            dev = self.idmgr.getElementByUuid(devuuid)
            compuuid = self.idmgr.getElementUuidById(dev, Device, compid)
            return compuuid
        except Exception:
            return None

    def getStatus(self):
        brains = self.abrains + self.bbrains

        # lookup all device uuids, make sure at least one exists
        devUuids = [
            self.idmgr.findDeviceUuid(a.deviceId.split("/")[-1], None)
            for a in brains if a.deviceId
        ]
        validDevUuids = filter(None, devUuids)
        if not validDevUuids:
            return SEVERITY_CLEAR

        # if there is any open /Status/Ping event on any device, return CRITICAL severity
        statusPingFilter = self.zep.createEventFilter(
            tags=validDevUuids,
            event_class='/Status/Ping/',
            status=(STATUS_NEW, STATUS_ACKNOWLEDGED),
            severity=(SEVERITY_WARNING, SEVERITY_ERROR, SEVERITY_CRITICAL))
        maxpingrec = self.zep.getEventSummaries(0,
                                                filter=statusPingFilter,
                                                sort=(('count', 'desc'), ),
                                                limit=1)
        if maxpingrec and maxpingrec['total'] > 0:
            return SEVERITY_CRITICAL

        # no /Status/Ping events found, just return worst severity of all events on all interface components
        devCompPairs = zip(devUuids, (a.interfaceId for a in brains))
        compUuids = (self._getComponentUuid(devuuid, compid)
                     for devuuid, compid in devCompPairs
                     if devuuid is not None)
        components = filter(None, compUuids)
        if components:
            sev = self.zep.getWorstSeverity(components)
            return sev

        return SEVERITY_CLEAR

    def getAddresses(self):
        return (self.a.address, self.b.address)

    def getUids(self):
        return ("/".join(self.a.getPhysicalPath()),
                "/".join(self.b.getPhysicalPath()))
Exemplo n.º 11
0
class EventPipelineProcessor(object):

    SYNC_EVERY_EVENT = False
    PROCESS_EVENT_TIMEOUT = 0

    def __init__(self, dmd):
        self.dmd = dmd
        self._manager = Manager(self.dmd)
        self._pipes = (EventPluginPipe(self._manager, IPreEventPlugin,
                                       'PreEventPluginPipe'),
                       CheckInputPipe(self._manager),
                       IdentifierPipe(self._manager),
                       AddDeviceContextAndTagsPipe(self._manager),
                       TransformAndReidentPipe(
                           self._manager, TransformPipe(self._manager), [
                               UpdateDeviceContextAndTagsPipe(self._manager),
                               IdentifierPipe(self._manager),
                               AddDeviceContextAndTagsPipe(self._manager),
                           ]),
                       AssignDefaultEventClassAndTagPipe(self._manager),
                       FingerprintPipe(self._manager),
                       SerializeContextPipe(self._manager),
                       EventPluginPipe(self._manager, IPostEventPlugin,
                                       'PostEventPluginPipe'),
                       ClearClassRefreshPipe(self._manager),
                       CheckHeartBeatPipe(self._manager))
        self._pipe_timers = {}
        for pipe in self._pipes:
            timer_name = pipe.name
            self._pipe_timers[timer_name] = Metrology.timer(timer_name)

        self.reporter = MetricReporter(prefix='zenoss.zeneventd.')
        self.reporter.start()

        if not self.SYNC_EVERY_EVENT:
            # don't call sync() more often than 1 every 0.5 sec
            # helps throughput when receiving events in bursts
            self.nextSync = time()
            self.syncInterval = 0.5

    def processMessage(self, message, retry=True):
        """
        Handles a queue message, can call "acknowledge" on the Queue Consumer
        class when it is done with the message
        """
        self._synchronize_with_database()

        try:
            # extract event from message body
            zepevent = ZepRawEvent()
            zepevent.event.CopyFrom(message)
            log.debug("Received event: %s", to_dict(zepevent.event))
            eventContext = EventContext(log, zepevent)

            with Timeout(zepevent,
                         self.PROCESS_EVENT_TIMEOUT,
                         error_message='while processing event'):
                for pipe in self._pipes:
                    with self._pipe_timers[pipe.name]:
                        eventContext = pipe(eventContext)
                    log.debug('After pipe %s, event context is %s', pipe.name,
                              to_dict(eventContext.zepRawEvent))
                    if eventContext.event.status == STATUS_DROPPED:
                        raise DropEvent('Dropped by %s' % pipe,
                                        eventContext.event)

        except AttributeError:
            # _manager throws Attribute errors
            # if connection to zope is lost - reset and retry ONE time
            if retry:
                log.debug("Resetting connection to catalogs")
                self._manager.reset()
                self.processMessage(message, retry=False)
            else:
                raise

        except DropEvent:
            # we want these to propagate out
            raise

        except Exception as error:
            log.info("Failed to process event, forward original raw event: %s",
                     to_dict(zepevent.event))
            # Pipes and plugins may raise ProcessingException's for their own
            # reasons. only log unexpected exceptions of other type
            # will insert stack trace in log
            if not isinstance(error, ProcessingException):
                log.exception(error)

            eventContext = self.create_exception_event(message, error)

        log.debug("Publishing event: %s", to_dict(eventContext.zepRawEvent))
        return eventContext.zepRawEvent

    def _synchronize_with_database(self):
        '''sync() db if it has been longer than
        self.syncInterval seconds since the last time,
        and no _synchronize has not been called for self.syncInterval seconds
        KNOWN ISSUE: ZEN-29884
        '''
        if self.SYNC_EVERY_EVENT:
            doSync = True
        else:
            current_time = time()
            doSync = current_time > self.nextSync
            self.nextSync = current_time + self.syncInterval

        if doSync:
            self.dmd._p_jar.sync()

    def create_exception_event(self, message, exception):
        # construct wrapper event to report this event processing failure
        # including content of the original event
        orig_zep_event = ZepRawEvent()
        orig_zep_event.event.CopyFrom(message)
        failure_event = {
            'uuid':
            guid.generate(),
            'created_time':
            int(time() * 1000),
            'fingerprint':
            '|'.join(['zeneventd', 'processMessage',
                      repr(exception)]),
            # Don't send the *same* event class or we loop endlessly
            'eventClass':
            '/',
            'summary':
            'Internal exception processing event: %r' % exception,
            'message':
            'Internal exception processing event: %r/%s' %
            (exception, to_dict(orig_zep_event.event)),
            'severity':
            4,
        }
        zep_raw_event = ZepRawEvent()
        zep_raw_event.event.CopyFrom(from_dict(Event, failure_event))
        event_context = EventContext(log, zep_raw_event)
        event_context.eventProxy.device = 'zeneventd'
        event_context.eventProxy.component = 'processMessage'
        return event_context
Exemplo n.º 12
0
class EventPipelineProcessor(object):

    SYNC_EVERY_EVENT = False
    PROCESS_EVENT_TIMEOUT = 0

    def __init__(self, dmd):
        self.dmd = dmd
        self._manager = Manager(self.dmd)
        self._pipes = (
            EventPluginPipe(
                self._manager, IPreEventPlugin, 'PreEventPluginPipe'
            ),
            CheckInputPipe(self._manager),
            IdentifierPipe(self._manager),
            AddDeviceContextAndTagsPipe(self._manager),
            TransformAndReidentPipe(
                self._manager,
                TransformPipe(self._manager),
                [
                    UpdateDeviceContextAndTagsPipe(self._manager),
                    IdentifierPipe(self._manager),
                    AddDeviceContextAndTagsPipe(self._manager),
                ]
            ),
            AssignDefaultEventClassAndTagPipe(self._manager),
            FingerprintPipe(self._manager),
            SerializeContextPipe(self._manager),
            EventPluginPipe(
                self._manager, IPostEventPlugin, 'PostEventPluginPipe'
            ),
            ClearClassRefreshPipe(self._manager),
            CheckHeartBeatPipe(self._manager)
        )
        self._pipe_timers = {}
        for pipe in self._pipes:
            timer_name = pipe.name
            self._pipe_timers[timer_name] = Metrology.timer(timer_name)

        self.reporter = MetricReporter(prefix='zenoss.zeneventd.')
        self.reporter.start()

        if not self.SYNC_EVERY_EVENT:
            # don't call sync() more often than 1 every 0.5 sec
            # helps throughput when receiving events in bursts
            self.nextSync = time()
            self.syncInterval = 0.5

    def processMessage(self, message, retry=True):
        """
        Handles a queue message, can call "acknowledge" on the Queue Consumer
        class when it is done with the message
        """
        self._synchronize_with_database()

        try:
            # extract event from message body
            zepevent = ZepRawEvent()
            zepevent.event.CopyFrom(message)
            log.debug("Received event: %s", to_dict(zepevent.event))
            eventContext = EventContext(log, zepevent)

            with Timeout(
                zepevent, self.PROCESS_EVENT_TIMEOUT,
                error_message='while processing event'
            ):
                for pipe in self._pipes:
                    with self._pipe_timers[pipe.name]:
                        eventContext = pipe(eventContext)
                    log.debug(
                        'After pipe %s, event context is %s',
                        pipe.name, to_dict(eventContext.zepRawEvent)
                    )
                    if eventContext.event.status == STATUS_DROPPED:
                        raise DropEvent(
                            'Dropped by %s' % pipe, eventContext.event
                        )

        except AttributeError:
            # _manager throws Attribute errors
            # if connection to zope is lost - reset and retry ONE time
            if retry:
                log.debug("Resetting connection to catalogs")
                self._manager.reset()
                self.processMessage(message, retry=False)
            else:
                raise

        except DropEvent:
            # we want these to propagate out
            raise

        except Exception as error:
            log.info(
                "Failed to process event, forward original raw event: %s",
                to_dict(zepevent.event)
            )
            # Pipes and plugins may raise ProcessingException's for their own
            # reasons. only log unexpected exceptions of other type
            # will insert stack trace in log
            if not isinstance(error, ProcessingException):
                log.exception(error)

            eventContext = self.create_exception_event(message, error)

        log.debug("Publishing event: %s", to_dict(eventContext.zepRawEvent))
        return eventContext.zepRawEvent

    def _synchronize_with_database(self):
        '''sync() db if it has been longer than
        self.syncInterval seconds since the last time,
        and no _synchronize has not been called for self.syncInterval seconds
        KNOWN ISSUE: ZEN-29884
        '''
        if self.SYNC_EVERY_EVENT:
            doSync = True
        else:
            current_time = time()
            doSync = current_time > self.nextSync
            self.nextSync = current_time + self.syncInterval

        if doSync:
            self.dmd._p_jar.sync()

    def create_exception_event(self, message, exception):
        # construct wrapper event to report this event processing failure
        # including content of the original event
        orig_zep_event = ZepRawEvent()
        orig_zep_event.event.CopyFrom(message)
        failure_event = {
            'uuid': guid.generate(),
            'created_time': int(time() * 1000),
            'fingerprint':
                '|'.join(['zeneventd', 'processMessage', repr(exception)]),
            # Don't send the *same* event class or we loop endlessly
            'eventClass': '/',
            'summary': 'Internal exception processing event: %r' % exception,
            'message':
                'Internal exception processing event: %r/%s' %
                (exception, to_dict(orig_zep_event.event)),
            'severity': 4,
        }
        zep_raw_event = ZepRawEvent()
        zep_raw_event.event.CopyFrom(from_dict(Event, failure_event))
        event_context = EventContext(log, zep_raw_event)
        event_context.eventProxy.device = 'zeneventd'
        event_context.eventProxy.component = 'processMessage'
        return event_context