Example #1
0
    def setUp(self):
        self.reporter = GraphiteReporter('localhost', 3333)

        Metrology.meter('meter').mark()
        Metrology.counter('counter').increment()
        Metrology.timer('timer').update(1.5)
        Metrology.utilization_timer('utimer').update(1.5)
        Metrology.histogram('histogram').update(1.5)
Example #2
0
    def __init__(self, application, reporter=None, **kwargs):
        self.application = application
        self.request = Metrology.meter('request')
        self.request_time = Metrology.timer('request_time')

        # Start reporter
        if reporter:
            reporter.start()
Example #3
0
    def __init__(self, single=False):
        """
        Initalizer

        @param single: collect from a single device?
        @type single: boolean
        """
        PBDaemon.__init__(self)
        # FIXME: cleanup --force option #2660
        self.options.force = True
        self.start = None
        self.startat = None
        self.rrdStats = DaemonStats()
        self.single = single
        if self.options.device:
            self.single = True
        self.modelerCycleInterval = self.options.cycletime
        # get the minutes and convert to fraction of a day
        self.collage = float(self.options.collage) / 1440.0
        self.pendingNewClients = False
        self.clients = []
        self.finished = []
        self.devicegen = None
        self.counters = collections.Counter()
        self.configFilter = None
        self.configLoaded = False

        # Make sendEvent() available to plugins
        zope.component.provideUtility(self, IEventService)

        # Delay start for between 10 and 60 seconds when run as a daemon.
        self.started = False
        self.startDelay = 0
        self.immediate = 1
        if self.options.daemon or self.options.cycle:
            if self.options.now:
                self.log.debug('option "now" specified, starting immediately.')
            else:
                # self.startDelay = randint(10, 60) * 60
                self.startDelay = randint(10, 60) * 1
                self.immediate = 0
                self.log.info(
                    'option "now" not specified, waiting %s seconds to start.'
                    % self.startDelay)
        else:
            self.log.debug("Run in foreground, starting immediately.")

        # ZEN-26637
        self.collectorLoopIteration = 0
        self.mainLoopGotDeviceList = False

        self.isMainScheduled = False

        self._modeledDevicesMetric = Metrology.meter(
            "zenmodeler.modeledDevices")
        self._failuresMetric = Metrology.counter("zenmodeler.failures")
Example #4
0
    def __init__(self, notificationDao):
        self.notificationDao = notificationDao
        self.signal_timer = Metrology.timer('zenactiond.signals')
        self.notification_timer = Metrology.timer('zenactiond.notifications')

        # set by the constructor of queueConsumer
        self.queueConsumer = None

        self.schema = getUtility(IQueueSchema)
        self.queue = self.schema.getQueue("$Signals")
Example #5
0
    def __init__(self, notificationDao):
        self.notificationDao = notificationDao
        self.signal_timer = Metrology.timer('zenactiond.signals')
        self.notification_timer = Metrology.timer('zenactiond.notifications')

        # set by the constructor of queueConsumer
        self.queueConsumer = None

        self.schema = getUtility(IQueueSchema)
        self.queue = self.schema.getQueue("$Signals")
Example #6
0
def register_metrics_on_worklist(worklist):
    metricNames = {x[0] for x in registry}

    for metricName, priority in _gauge_priority_map.iteritems():
        if metricName not in metricNames:
            gauge = PriorityListLengthGauge(worklist, priority)
            Metrology.gauge(metricName, gauge)

    # Original metric name
    if "zenhub.workList" not in metricNames:
        gauge = WorklistLengthGauge(worklist)
        Metrology.gauge("zenhub.workList", gauge)
Example #7
0
    def __init__(self, single=False ):
        """
        Initalizer

        @param single: collect from a single device?
        @type single: boolean
        """
        PBDaemon.__init__(self)
        # FIXME: cleanup --force option #2660
        self.options.force = True
        self.start = None
        self.startat = None
        self.rrdStats = DaemonStats()
        self.single = single
        if self.options.device:
            self.single = True
        self.modelerCycleInterval = self.options.cycletime
        # get the minutes and convert to fraction of a day
        self.collage = float( self.options.collage ) / 1440.0
        self.pendingNewClients = False
        self.clients = []
        self.finished = []
        self.devicegen = None
        self.counters = collections.Counter()
        self.configFilter = None
        self.configLoaded = False

        # Make sendEvent() available to plugins
        zope.component.provideUtility(self, IEventService)

        # Delay start for between 10 and 60 seconds when run as a daemon.
        self.started = False
        self.startDelay = 0
        self.immediate = 1
        if self.options.daemon or self.options.cycle:
            if self.options.now:
                self.log.debug('option "now" specified, starting immediately.')
            else:
                # self.startDelay = randint(10, 60) * 60
                self.startDelay = randint(10, 60) * 1
                self.immediate = 0
                self.log.info('option "now" not specified, waiting %s seconds to start.' %
                              self.startDelay)
        else:
            self.log.debug("Run in foreground, starting immediately.")


        # ZEN-26637
        self.collectorLoopIteration = 0
        self.mainLoopGotDeviceList = False

        self._modeledDevicesMetric = Metrology.meter("zenmodeler.modeledDevices")
        self._failuresMetric = Metrology.counter("zenmodeler.failures")
    def test_send_batch(self, mock):
        self.reporter = GraphiteReporter("localhost", 3334, pickle=True, batch_size=2)

        Metrology.meter("meter").mark()
        Metrology.counter("counter").increment()
        Metrology.timer("timer").update(5)
        Metrology.utilization_timer("utimer").update(5)
        Metrology.histogram("histogram").update(5)
        self.reporter.write()
        self.assertTrue(mock.sendall.assert_called())
        self.assertEqual(25, len(mock.sendall.call_args_list))
        self.reporter.stop()
Example #9
0
    def test_send_batch(self, mock):
        self.reporter = GraphiteReporter('localhost', 3333, batch_size=2)

        Metrology.meter('meter').mark()
        Metrology.counter('counter').increment()
        Metrology.timer('timer').update(5)
        Metrology.utilization_timer('utimer').update(5)
        Metrology.histogram('histogram').update(5)
        self.reporter.write()
        self.assertTrue(mock.send.assert_called())
        self.assertEqual(25, len(mock.sendall.call_args_list))
        self.reporter.stop()
Example #10
0
    def __init__(self, zenhub, service):
        self.zenhub = zenhub
        self.service = service
        self._serviceCalls = Metrology.meter("zenhub.serviceCalls")
        self.log = logging.getLogger('zen.zenhub.WorkerInterceptor')
        self._admTimer = Metrology.timer('zenhub.applyDataMap')
        self._eventsSent = Metrology.meter("zenhub.eventsSent")

        self.meters = {
            'sendEvent': self.mark_send_event_timer,
            'sendEvents': self.mark_send_events_timer,
            'applyDataMaps': self.mark_apply_datamaps_timer,
        }
Example #11
0
    def test_udp_send_batch(self, mock):
        self.reporter = StatsDReporter('localhost', 3333,
                                       batch_size=2, conn_type='udp')

        Metrology.meter('meter').mark()
        Metrology.counter('counter').increment()
        Metrology.timer('timer').update(5)
        Metrology.utilization_timer('utimer').update(5)
        Metrology.histogram('histogram').update(5)
        self.reporter.write()
        self.assertTrue(mock.sendto.called)
        self.assertEqual(3, len(mock.sendto.call_args_list))
        self.reporter.stop()
Example #12
0
    def setUp(self):
        self.reporter = LibratoReporter("<email>", "<token>")

        Metrology.meter('meter').mark()
        Metrology.counter('counter').increment()
        Metrology.timer('timer').update(1.5)
        Metrology.utilization_timer('utimer').update(1.5)
Example #13
0
    def setUp(self):
        self.reporter = GangliaReporter("Group Name", "localhost", 8649)

        Metrology.meter('meter').mark()
        Metrology.counter('counter').increment()
        Metrology.timer('timer').update(5)
        Metrology.utilization_timer('utimer').update(5)
Example #14
0
    def test_udp_send_batch(self, mock):
        self.reporter = StatsDReporter('localhost',
                                       3333,
                                       batch_size=2,
                                       conn_type='udp')

        Metrology.meter('meter').mark()
        Metrology.counter('counter').increment()
        Metrology.timer('timer').update(5)
        Metrology.utilization_timer('utimer').update(5)
        Metrology.histogram('histogram').update(5)
        self.reporter.write()
        self.assertTrue(mock.sendto.called)
        self.assertEqual(3, len(mock.sendto.call_args_list))
        self.reporter.stop()
Example #15
0
    def __init__(self,
                 name,
                 configId=None,
                 scheduleIntervalSeconds=None,
                 taskConfig=None):
        super(ConfigurationLoaderTask, self).__init__()
        self._fetchConfigTimer = Metrology.timer('collectordaemon.configs')

        # Needed for interface
        self.name = name
        self.configId = configId if configId else name
        self.state = TaskStates.STATE_IDLE

        self._dataService = zope.component.queryUtility(IDataService)
        self._eventService = zope.component.queryUtility(IEventService)

        if taskConfig is None:
            raise TypeError("taskConfig cannot be None")
        self._prefs = taskConfig
        self.interval = self._prefs.configCycleInterval * 60
        self.options = self._prefs.options

        self._daemon = zope.component.getUtility(ICollector)
        self._daemon.heartbeatTimeout = self.options.heartbeatTimeout
        log.debug("Heartbeat timeout set to %ds", self._daemon.heartbeatTimeout)

        frameworkFactory = zope.component.queryUtility(IFrameworkFactory, self._frameworkFactoryName)
        self._configProxy = frameworkFactory.getConfigurationProxy()

        self.devices = []
        self.startDelay=0
Example #16
0
    def __init__(self,
                 name,
                 configId=None,
                 scheduleIntervalSeconds=None,
                 taskConfig=None):
        super(ConfigurationLoaderTask, self).__init__()
        self._fetchConfigTimer = Metrology.timer('collectordaemon.configs')

        # Needed for interface
        self.name = name
        self.configId = configId if configId else name
        self.state = TaskStates.STATE_IDLE

        self._dataService = zope.component.queryUtility(IDataService)
        self._eventService = zope.component.queryUtility(IEventService)

        if taskConfig is None:
            raise TypeError("taskConfig cannot be None")
        self._prefs = taskConfig
        self.interval = self._prefs.configCycleInterval * 60
        self.options = self._prefs.options

        self._daemon = zope.component.getUtility(ICollector)
        self._daemon.heartbeatTimeout = self.options.heartbeatTimeout
        log.debug("Heartbeat timeout set to %ds",
                  self._daemon.heartbeatTimeout)

        frameworkFactory = zope.component.queryUtility(
            IFrameworkFactory, self._frameworkFactoryName)
        self._configProxy = frameworkFactory.getConfigurationProxy()

        self.devices = []
        self.startDelay = 0
Example #17
0
    def __init__(self, dmd):
        self.dmd = dmd
        self._manager = Manager(self.dmd)
        self._pipes = (EventPluginPipe(self._manager, IPreEventPlugin,
                                       'PreEventPluginPipe'),
                       CheckInputPipe(self._manager),
                       IdentifierPipe(self._manager),
                       AddDeviceContextAndTagsPipe(self._manager),
                       TransformAndReidentPipe(
                           self._manager, TransformPipe(self._manager), [
                               UpdateDeviceContextAndTagsPipe(self._manager),
                               IdentifierPipe(self._manager),
                               AddDeviceContextAndTagsPipe(self._manager),
                           ]),
                       AssignDefaultEventClassAndTagPipe(self._manager),
                       FingerprintPipe(self._manager),
                       SerializeContextPipe(self._manager),
                       EventPluginPipe(self._manager, IPostEventPlugin,
                                       'PostEventPluginPipe'),
                       ClearClassRefreshPipe(self._manager),
                       CheckHeartBeatPipe(self._manager))
        self._pipe_timers = {}
        for pipe in self._pipes:
            timer_name = pipe.name
            self._pipe_timers[timer_name] = Metrology.timer(timer_name)

        self.reporter = MetricReporter(prefix='zenoss.zeneventd.')
        self.reporter.start()

        if not self.SYNC_EVERY_EVENT:
            # don't call sync() more often than 1 every 0.5 sec
            # helps throughput when receiving events in bursts
            self.nextSync = time()
            self.syncInterval = 0.5
Example #18
0
    def setUp(self):
        self.output = StringIO()
        logging.basicConfig(stream=self.output, level=logging.INFO)

        self.reporter = LoggerReporter()

        Metrology.meter('meter').mark()
        Metrology.counter('counter').increment()
        Metrology.timer('timer').update(1.5)
        Metrology.utilization_timer('utimer').update(1.5)
Example #19
0
    def __init__(self):
        signal.signal(signal.SIGUSR2, signal.SIG_IGN)
        ZCmdBase.__init__(self)
        if self.options.profiling:
            self.profiler = ContinuousProfiler('zenhubworker', log=self.log)
            self.profiler.start()
        self.current = IDLE
        self.currentStart = 0
        self.numCalls = Metrology.meter("zenhub.workerCalls")
        try:
            self.log.debug("establishing SIGUSR1 signal handler")
            signal.signal(signal.SIGUSR1, self.sighandler_USR1)
            self.log.debug("establishing SIGUSR2 signal handler")
            signal.signal(signal.SIGUSR2, self.sighandler_USR2)
        except ValueError:
            # If we get called multiple times, this will generate an exception:
            # ValueError: signal only works in main thread
            # Ignore it as we've already set up the signal handler.
            pass

        self.zem = self.dmd.ZenEventManager
        loadPlugins(self.dmd)
        self.pid = os.getpid()
        self.services = {}
        factory = ReconnectingPBClientFactory(pingPerspective=False)
        self.log.debug("Connecting to %s:%d", self.options.hubhost,
                       self.options.hubport)
        reactor.connectTCP(self.options.hubhost, self.options.hubport, factory)
        self.log.debug("Logging in as %s", self.options.username)
        c = credentials.UsernamePassword(self.options.username,
                                         self.options.password)
        factory.gotPerspective = self.gotPerspective

        def stop(*args):
            reactor.callLater(0, reactor.stop)

        factory.clientConnectionLost = stop
        factory.setCredentials(c)

        self.log.debug("Creating async MetricReporter")
        daemonTags = {
            'zenoss_daemon': 'zenhub_worker_%s' % self.options.workernum,
            'zenoss_monitor': self.options.monitor,
            'internal': True
        }

        def stopReporter():
            if self.metricreporter:
                return self.metricreporter.stop()

        # Order of the shutdown triggers matter. Want to stop reporter first, calling metricWriter() below
        # registers shutdown triggers for the actual metric http and redis publishers.
        reactor.addSystemEventTrigger('before', 'shutdown', stopReporter)
        self.metricreporter = TwistedMetricReporter(
            metricWriter=metricWriter(), tags=daemonTags)
        self.metricreporter.start()
Example #20
0
 def __init__(self, options, log):
     self.options = options
     self.transformers = _load_utilities(ICollectorEventTransformer)
     self.log = log
     self.discarded_events = 0
     # TODO: Do we want to limit the size of the clear event dictionary?
     self.clear_events_count = {}
     self._initQueues()
     self._eventsSent = Metrology.meter("collectordaemon.eventsSent")
     self._discardedEvents = Metrology.meter("collectordaemon.discardedEvent")
     self._eventTimer = Metrology.timer('collectordaemon.eventTimer')
     metricNames = {x[0] for x in registry}
     if 'collectordaemon.eventQueue' not in metricNames:
         queue = self
         class EventQueueGauge(Gauge):
             @property
             def value(self):
                 return queue.event_queue_length
         Metrology.gauge('collectordaemon.eventQueue', EventQueueGauge())
Example #21
0
    def __init__(self, options, log):
        self.options = options
        self.transformers = _load_utilities(ICollectorEventTransformer)
        self.log = log
        self.discarded_events = 0
        # TODO: Do we want to limit the size of the clear event dictionary?
        self.clear_events_count = {}
        self._initQueues()
        self._eventsSent = Metrology.meter("collectordaemon.eventsSent")
        self._discardedEvents = Metrology.meter(
            "collectordaemon.discardedEvent")
        self._eventTimer = Metrology.timer('collectordaemon.eventTimer')
        metricNames = {x[0] for x in registry}
        if 'collectordaemon.eventQueue' not in metricNames:
            queue = self

            class EventQueueGauge(Gauge):
                @property
                def value(self):
                    return queue.event_queue_length

            Metrology.gauge('collectordaemon.eventQueue', EventQueueGauge())
Example #22
0
def register_legacy_worklist_metrics():
    """Create the Metrology counters for tracking worklist statistics."""
    config = getUtility(IHubServerConfig)
    global _legacy_worklist_counters

    for metricName, priorityName in config.legacy_metric_priority_map.items():
        gauge = registry.metrics.get(metricName)
        priority = ServiceCallPriority[priorityName]
        if not gauge:
            gauge = WorkListGauge(_legacy_worklist_counters, priority)
            Metrology.gauge(metricName, gauge)
        _legacy_worklist_counters[priority] = 0

    gauge = registry.metrics.get(_legacy_metric_worklist_total.metric)
    if not gauge:
        gauge = WorkListGauge(
            _legacy_worklist_counters,
            _legacy_metric_worklist_total.name,
        )
        Metrology.gauge(_legacy_metric_worklist_total.metric, gauge)
    _legacy_worklist_counters["total"] = 0

    global _legacy_events_meter
    _legacy_events_meter = Metrology.meter("zenhub.eventsSent")
Example #23
0
    def __init__(self, reactor):
        """Initialize a ZenHubWorker instance."""
        ZCmdBase.__init__(self)

        self.__reactor = reactor

        if self.options.profiling:
            self.profiler = ContinuousProfiler('ZenHubWorker', log=self.log)
            self.profiler.start()
            reactor.addSystemEventTrigger(
                'before', 'shutdown', self.profiler.stop,
            )

        self.instanceId = self.options.workerid
        self.current = IDLE
        self.currentStart = 0
        self.numCalls = Metrology.meter("zenhub.workerCalls")

        self.zem = self.dmd.ZenEventManager
        loadPlugins(self.dmd)

        serviceFactory = ServiceReferenceFactory(self)
        self.__registry = HubServiceRegistry(self.dmd, serviceFactory)

        # Configure/initialize the ZenHub client
        creds = UsernamePassword(
            self.options.hubusername, self.options.hubpassword,
        )
        endpointDescriptor = "tcp:{host}:{port}".format(
            host=self.options.hubhost, port=self.options.hubport,
        )
        endpoint = clientFromString(reactor, endpointDescriptor)
        self.__client = ZenHubClient(reactor, endpoint, creds, self, 10.0)

        # Setup Metric Reporting
        self.log.debug("Creating async MetricReporter")
        self._metric_manager = MetricManager(
            daemon_tags={
                'zenoss_daemon': 'zenhub_worker_%s' % self.options.workerid,
                'zenoss_monitor': self.options.monitor,
                'internal': True,
            },
        )
Example #24
0
    def __init__(self, dmd):
        self.dmd = dmd
        self._manager = Manager(self.dmd)
        self._pipes = (
            EventPluginPipe(
                self._manager, IPreEventPlugin, 'PreEventPluginPipe'
            ),
            CheckInputPipe(self._manager),
            IdentifierPipe(self._manager),
            AddDeviceContextAndTagsPipe(self._manager),
            TransformAndReidentPipe(
                self._manager,
                TransformPipe(self._manager),
                [
                    UpdateDeviceContextAndTagsPipe(self._manager),
                    IdentifierPipe(self._manager),
                    AddDeviceContextAndTagsPipe(self._manager),
                ]
            ),
            AssignDefaultEventClassAndTagPipe(self._manager),
            FingerprintPipe(self._manager),
            SerializeContextPipe(self._manager),
            EventPluginPipe(
                self._manager, IPostEventPlugin, 'PostEventPluginPipe'
            ),
            ClearClassRefreshPipe(self._manager),
            CheckHeartBeatPipe(self._manager)
        )
        self._pipe_timers = {}
        for pipe in self._pipes:
            timer_name = pipe.name
            self._pipe_timers[timer_name] = Metrology.timer(timer_name)

        self.reporter = MetricReporter(prefix='zenoss.zeneventd.')
        self.reporter.start()

        if not self.SYNC_EVERY_EVENT:
            # don't call sync() more often than 1 every 0.5 sec
            # helps throughput when receiving events in bursts
            self.nextSync = time()
            self.syncInterval = 0.5
Example #25
0
    def __init__(self, modeling_paused=None):
        """Initializes a ZenHubWorklist object.

        If an argument is provided for the modeling_paused parameter,
        it should be function that takes no arguments and returns True
        to exclude modeling related priorities for job selection.
        """
        if modeling_paused is None:
            self.__modeling_paused = bool  # always False
        else:
            self.__modeling_paused = modeling_paused

        # Associate a list with each priority
        self.__worklists = {priority: [] for priority in ZenHubPriority}

        # All jobs priority selection
        self.__alljobs = _PrioritySelection(_all_priorities)

        # No ApplyDataMaps priority selection
        self.__noadmjobs = _PrioritySelection(_no_adm_priorities)

        # Metric for wait time in worklist
        self.__waitTimer = Metrology.timer("zenhub.worklist.wait_time")
Example #26
0
 def test_request(self):
     self.server.get('/')
     self.assertEqual(1, Metrology.meter('request').count)
     self.assertEqual(1, Metrology.timer('request_time').count)
 def __init__(self):
     self.timer = Metrology.timer('mytimer')
Example #28
0
 def tearDown(self):
     Metrology.stop()
Example #29
0
 def test_health_check(self):
     health = Metrology.health_check('test', HealthCheck)
     self.assertTrue(health is not None)
import collections
import time
import types

from Products.ZenEvents.ZenEventClasses import Status_Ping

from zenoss.protocols.protobufs.zep_pb2 import (
    STATUS_SUPPRESSED,
    SEVERITY_CLEAR,
    SEVERITY_CRITICAL,
)

from . import connections

from metrology import Metrology
s_meter = Metrology.meter("events-suppressed")

# Default exports.
__all__ = [
    "get_suppressor",
]

# Make status checks clearer.
UP, DOWN = True, False

# Make toggle checks clearer.
ENABLED, DISABLED = True, False

# Singleton to keep state for callers who can't keep their own state.
SUPPRESSOR = None
Example #31
0
 def tearDown(self):
     Metrology.stop()
Example #32
0
    def __init__(self, preferences, taskSplitter,
                 configurationListener=DUMMY_LISTENER,
                 initializationCallback=None,
                 stoppingCallback=None):
        """
        Constructs a new instance of the CollectorDaemon framework. Normally
        only a singleton instance of a CollectorDaemon should exist within a
        process, but this is not enforced.

        @param preferences: the collector configuration
        @type preferences: ICollectorPreferences
        @param taskSplitter: the task splitter to use for this collector
        @type taskSplitter: ITaskSplitter
        @param initializationCallback: a callable that will be executed after
                                       connection to the hub but before
                                       retrieving configuration information
        @type initializationCallback: any callable
        @param stoppingCallback: a callable that will be executed first during
                                 the stopping process. Exceptions will be
                                 logged but otherwise ignored.
        @type stoppingCallback: any callable
        """
        # create the configuration first, so we have the collector name
        # available before activating the rest of the Daemon class hierarchy.
        if not ICollectorPreferences.providedBy(preferences):
            raise TypeError("configuration must provide ICollectorPreferences")
        else:
            self._prefs = ObservableProxy(preferences)
            self._prefs.attachAttributeObserver('configCycleInterval', self._rescheduleConfig)

        if not ITaskSplitter.providedBy(taskSplitter):
            raise TypeError("taskSplitter must provide ITaskSplitter")
        else:
            self._taskSplitter = taskSplitter

        if not IConfigurationListener.providedBy(configurationListener):
            raise TypeError(
                    "configurationListener must provide IConfigurationListener")
        self._configListener = ConfigListenerNotifier()
        self._configListener.addListener(configurationListener)
        self._configListener.addListener(DeviceGuidListener(self))
        self._initializationCallback = initializationCallback
        self._stoppingCallback = stoppingCallback

        # register the various interfaces we provide the rest of the system so
        # that collector implementors can easily retrieve a reference back here
        # if needed
        zope.component.provideUtility(self, ICollector)
        zope.component.provideUtility(self, IEventService)
        zope.component.provideUtility(self, IDataService)

        # register the collector's own preferences object so it may be easily
        # retrieved by factories, tasks, etc.
        zope.component.provideUtility(self.preferences,
                                      ICollectorPreferences,
                                      self.preferences.collectorName)

        super(CollectorDaemon, self).__init__(name=self.preferences.collectorName)
        self._statService = StatisticsService()
        zope.component.provideUtility(self._statService, IStatisticsService)

        if self.options.cycle:
            # setup daemon statistics (deprecated names)
            self._statService.addStatistic("devices", "GAUGE")
            self._statService.addStatistic("dataPoints", "DERIVE")
            self._statService.addStatistic("runningTasks", "GAUGE")
            self._statService.addStatistic("taskCount", "GAUGE")
            self._statService.addStatistic("queuedTasks", "GAUGE")
            self._statService.addStatistic("missedRuns", "GAUGE")

            # namespace these a bit so they can be used in ZP monitoring.
            # prefer these stat names and metrology in future refs
            self._dataPointsMetric = Metrology.meter("collectordaemon.dataPoints")
            daemon = self
            class DeviceGauge(Gauge):
                @property
                def value(self):
                    return len(daemon._devices)
            Metrology.gauge('collectordaemon.devices', DeviceGauge())

            # Scheduler statistics
            class RunningTasks(Gauge):
                @property
                def value(self):
                    return daemon._scheduler._executor.running
            Metrology.gauge('collectordaemon.runningTasks', RunningTasks())

            class TaskCount(Gauge):
                @property
                def value(self):
                    return daemon._scheduler.taskCount
            Metrology.gauge('collectordaemon.taskCount', TaskCount())

            class QueuedTasks(Gauge):
                @property
                def value(self):
                    return daemon._scheduler._executor.queued
            Metrology.gauge('collectordaemon.queuedTasks', QueuedTasks())

            class MissedRuns(Gauge):
                @property
                def value(self):
                    return daemon._scheduler.missedRuns
            Metrology.gauge('collectordaemon.missedRuns', MissedRuns())

        self._deviceGuids = {}
        self._devices = set()
        self._unresponsiveDevices = set()
        self._rrd = None
        self._metric_writer = None
        self._derivative_tracker = None
        self.reconfigureTimeout = None

        # keep track of pending tasks if we're doing a single run, and not a
        # continuous cycle
        if not self.options.cycle:
            self._completedTasks = 0
            self._pendingTasks = []

        frameworkFactory = zope.component.queryUtility(IFrameworkFactory, self._frameworkFactoryName)
        self._configProxy = frameworkFactory.getConfigurationProxy()
        self._scheduler = frameworkFactory.getScheduler()
        self._scheduler.maxTasks = self.options.maxTasks
        self._ConfigurationLoaderTask = frameworkFactory.getConfigurationLoaderTask()

        # OLD - set the initialServices attribute so that the PBDaemon class
        # will load all of the remote services we need.
        self.initialServices = PBDaemon.initialServices +\
            [self.preferences.configurationService]

        # trap SIGUSR2 so that we can display detailed statistics
        signal.signal(signal.SIGUSR2, self._signalHandler)

        # let the configuration do any additional startup it might need
        self.preferences.postStartup()
        self.addedPostStartupTasks = False

        # Variables used by enterprise collector in resmgr
        #
        # flag that indicates we have finished loading the configs for the first time after a restart
        self.firstConfigLoadDone = False
        # flag that indicates the daemon has received the encryption key from zenhub
        self.encryptionKeyInitialized = False
        # flag that indicates the daemon is loading the cached configs
        self.loadingCachedConfigs = False
Example #33
0
 def test_gauge(self):
     self.assertTrue(Metrology.gauge('test', Gauge) is not None)
Example #34
0
 def test_timer(self):
     self.assertTrue(Metrology.timer('test') is not None)
Example #35
0
 def test_counter(self):
     self.assertTrue(Metrology.counter('test') is not None)
Example #36
0
 def test_meter(self):
     self.assertTrue(Metrology.meter('test') is not None)
Example #37
0
 def test_get(self):
     Metrology.counter('test')
     self.assertTrue(Metrology.get('test') is not None)
Example #38
0
 def __init__(self, processor):
     BaseQueueConsumerTask.__init__(self, processor)
     self.processing_timer = Metrology.timer('processMessage')
Example #39
0
from Products.ZenUtils.AmqpDataManager import AmqpDataManager
from Products.ZenMessaging.ChangeEvents.events import MessagePrePublishingEvent
from Products.ZenMessaging.queuemessaging.interfaces import IModelProtobufSerializer, IQueuePublisher, IProtobufSerializer, IEventPublisher
from contextlib import closing
from zenoss.protocols.protobufutil import ProtobufEnum
from zenoss.protocols.protobufs import modelevents_pb2
from zenoss.protocols.protobufs.zep_pb2 import Event
from zenoss.protocols.interfaces import IQueueSchema, IAMQPConnectionInfo

import logging

log = logging.getLogger('zen.queuepublisher')

MODEL_TYPE = ProtobufEnum(modelevents_pb2.ModelEvent, 'model_type')

_prepublishing_timer = Metrology.timer("MessagePrePublishingEvents")

class ModelChangePublisher(object):
    """
    Keeps track of all the model changes so far in this
    transaction. Do not instantiate this class directly,
    use "getModelChangePublisher" to receive the singleton
    """

    def __init__(self):
        self._events = []
        self._msgs = []
        self._addedGuids = set()
        self._modifiedGuids = set()
        self._removedGuids = set()
        self._publishable = []
Example #40
0
def _getPrepublishingTimer():
    global _prepublishing_timer
    if not _prepublishing_timer:
        _prepublishing_timer = Metrology.timer("MessagePrePublishingEvents")
    return _prepublishing_timer
Example #41
0
 def test_utilization_timer(self):
     self.assertTrue(Metrology.utilization_timer('test') is not None)
Example #42
0
import logging, configargparse, pyodbc, time, pdb, re, json, sys
from threading import Event
from confluent_kafka import Producer
from metrology import Metrology
from metrology.reporter import LoggerReporter

meter = Metrology.meter("messages")
successful = Metrology.counter("success")
errors = Metrology.meter("errors")
stop_event = Event()


def get_kafka_parameters(options):
    producer_config = {
        "bootstrap.servers": options.brokers,
        "message.timeout.ms": 1000
    }
    match = re.findall("://([^/]+)/", options.brokers)
    if len(match) == 1:
        producer_config["bootstrap.servers"] = match[0] + ":9093"
        producer_config.update({
            'sasl.mechanisms': 'PLAIN',
            'security.protocol': 'SASL_SSL',
            "sasl.username": "******",
            "sasl.password": options.brokers
        })
    logging.debug("Using Kafka config: {}".format(json.dumps(producer_config)))
    return producer_config


def get_badge(options):
Example #43
0
 def test_histogram(self):
     self.assertTrue(Metrology.histogram('test') is not None)
Example #44
0
 def tearDown(self):
     self.reporter.stop()
     Metrology.stop()
Example #45
0
    def __init__(self,
                 preferences,
                 taskSplitter,
                 configurationListener=DUMMY_LISTENER,
                 initializationCallback=None,
                 stoppingCallback=None):
        """
        Constructs a new instance of the CollectorDaemon framework. Normally
        only a singleton instance of a CollectorDaemon should exist within a
        process, but this is not enforced.

        @param preferences: the collector configuration
        @type preferences: ICollectorPreferences
        @param taskSplitter: the task splitter to use for this collector
        @type taskSplitter: ITaskSplitter
        @param initializationCallback: a callable that will be executed after
                                       connection to the hub but before
                                       retrieving configuration information
        @type initializationCallback: any callable
        @param stoppingCallback: a callable that will be executed first during
                                 the stopping process. Exceptions will be
                                 logged but otherwise ignored.
        @type stoppingCallback: any callable
        """
        # create the configuration first, so we have the collector name
        # available before activating the rest of the Daemon class hierarchy.
        if not ICollectorPreferences.providedBy(preferences):
            raise TypeError("configuration must provide ICollectorPreferences")
        else:
            self._prefs = ObservableProxy(preferences)
            self._prefs.attachAttributeObserver('configCycleInterval',
                                                self._rescheduleConfig)

        if not ITaskSplitter.providedBy(taskSplitter):
            raise TypeError("taskSplitter must provide ITaskSplitter")
        else:
            self._taskSplitter = taskSplitter

        if not IConfigurationListener.providedBy(configurationListener):
            raise TypeError(
                "configurationListener must provide IConfigurationListener")
        self._configListener = ConfigListenerNotifier()
        self._configListener.addListener(configurationListener)
        self._configListener.addListener(DeviceGuidListener(self))
        self._initializationCallback = initializationCallback
        self._stoppingCallback = stoppingCallback

        # register the various interfaces we provide the rest of the system so
        # that collector implementors can easily retrieve a reference back here
        # if needed
        zope.component.provideUtility(self, ICollector)
        zope.component.provideUtility(self, IEventService)
        zope.component.provideUtility(self, IDataService)

        # register the collector's own preferences object so it may be easily
        # retrieved by factories, tasks, etc.
        zope.component.provideUtility(self.preferences, ICollectorPreferences,
                                      self.preferences.collectorName)

        super(CollectorDaemon,
              self).__init__(name=self.preferences.collectorName)
        self._statService = StatisticsService()
        zope.component.provideUtility(self._statService, IStatisticsService)

        if self.options.cycle:
            # setup daemon statistics (deprecated names)
            self._statService.addStatistic("devices", "GAUGE")
            self._statService.addStatistic("dataPoints", "DERIVE")
            self._statService.addStatistic("runningTasks", "GAUGE")
            self._statService.addStatistic("taskCount", "GAUGE")
            self._statService.addStatistic("queuedTasks", "GAUGE")
            self._statService.addStatistic("missedRuns", "GAUGE")

            # namespace these a bit so they can be used in ZP monitoring.
            # prefer these stat names and metrology in future refs
            self._dataPointsMetric = Metrology.meter(
                "collectordaemon.dataPoints")
            daemon = self

            class DeviceGauge(Gauge):
                @property
                def value(self):
                    return len(daemon._devices)

            Metrology.gauge('collectordaemon.devices', DeviceGauge())

            # Scheduler statistics
            class RunningTasks(Gauge):
                @property
                def value(self):
                    return daemon._scheduler._executor.running

            Metrology.gauge('collectordaemon.runningTasks', RunningTasks())

            class TaskCount(Gauge):
                @property
                def value(self):
                    return daemon._scheduler.taskCount

            Metrology.gauge('collectordaemon.taskCount', TaskCount())

            class QueuedTasks(Gauge):
                @property
                def value(self):
                    return daemon._scheduler._executor.queued

            Metrology.gauge('collectordaemon.queuedTasks', QueuedTasks())

            class MissedRuns(Gauge):
                @property
                def value(self):
                    return daemon._scheduler.missedRuns

            Metrology.gauge('collectordaemon.missedRuns', MissedRuns())

        self._deviceGuids = {}
        self._devices = set()
        self._unresponsiveDevices = set()
        self._rrd = None
        self._metric_writer = None
        self._derivative_tracker = None
        self.reconfigureTimeout = None

        # keep track of pending tasks if we're doing a single run, and not a
        # continuous cycle
        if not self.options.cycle:
            self._completedTasks = 0
            self._pendingTasks = []

        frameworkFactory = zope.component.queryUtility(
            IFrameworkFactory, self._frameworkFactoryName)
        self._configProxy = frameworkFactory.getConfigurationProxy()
        self._scheduler = frameworkFactory.getScheduler()
        self._scheduler.maxTasks = self.options.maxTasks
        self._ConfigurationLoaderTask = frameworkFactory.getConfigurationLoaderTask(
        )

        # OLD - set the initialServices attribute so that the PBDaemon class
        # will load all of the remote services we need.
        self.initialServices = PBDaemon.initialServices +\
            [self.preferences.configurationService]

        # trap SIGUSR2 so that we can display detailed statistics
        signal.signal(signal.SIGUSR2, self._signalHandler)

        # let the configuration do any additional startup it might need
        self.preferences.postStartup()
        self.addedPostStartupTasks = False

        # Variables used by enterprise collector in resmgr
        #
        # flag that indicates we have finished loading the configs for the first time after a restart
        self.firstConfigLoadDone = False
        # flag that indicates the daemon has received the encryption key from zenhub
        self.encryptionKeyInitialized = False
        # flag that indicates the daemon is loading the cached configs
        self.loadingCachedConfigs = False