Example #1
0
    def setUp(self):
        self.reporter = GangliaReporter("Group Name", "localhost", 8649)

        Metrology.meter('meter').mark()
        Metrology.counter('counter').increment()
        Metrology.timer('timer').update(5)
        Metrology.utilization_timer('utimer').update(5)
Example #2
0
    def setUp(self):
        self.reporter = LibratoReporter("<email>", "<token>")

        Metrology.meter('meter').mark()
        Metrology.counter('counter').increment()
        Metrology.timer('timer').update(1.5)
        Metrology.utilization_timer('utimer').update(1.5)
    def setUp(self):
        self.reporter = GraphiteReporter('localhost', 3333)

        Metrology.meter('meter').mark()
        Metrology.counter('counter').increment()
        Metrology.timer('timer').update(1.5)
        Metrology.utilization_timer('utimer').update(1.5)
Example #4
0
    def setUp(self):
        self.output = StringIO()
        logging.basicConfig(stream=self.output, level=logging.INFO)

        self.reporter = LoggerReporter()

        Metrology.meter('meter').mark()
        Metrology.counter('counter').increment()
        Metrology.timer('timer').update(1.5)
        Metrology.utilization_timer('utimer').update(1.5)
    def test_send_batch(self, mock):
        self.reporter = GraphiteReporter("localhost", 3334, pickle=True, batch_size=2)

        Metrology.meter("meter").mark()
        Metrology.counter("counter").increment()
        Metrology.timer("timer").update(5)
        Metrology.utilization_timer("utimer").update(5)
        Metrology.histogram("histogram").update(5)
        self.reporter.write()
        self.assertTrue(mock.sendall.assert_called())
        self.assertEqual(25, len(mock.sendall.call_args_list))
        self.reporter.stop()
Example #6
0
    def test_send_batch(self, mock):
        self.reporter = GraphiteReporter('localhost', 3333, batch_size=2)

        Metrology.meter('meter').mark()
        Metrology.counter('counter').increment()
        Metrology.timer('timer').update(5)
        Metrology.utilization_timer('utimer').update(5)
        Metrology.histogram('histogram').update(5)
        self.reporter.write()
        self.assertTrue(mock.send.assert_called())
        self.assertEqual(25, len(mock.sendall.call_args_list))
        self.reporter.stop()
Example #7
0
    def test_udp_send_batch(self, mock):
        self.reporter = StatsDReporter('localhost', 3333,
                                       batch_size=2, conn_type='udp')

        Metrology.meter('meter').mark()
        Metrology.counter('counter').increment()
        Metrology.timer('timer').update(5)
        Metrology.utilization_timer('utimer').update(5)
        Metrology.histogram('histogram').update(5)
        self.reporter.write()
        self.assertTrue(mock.sendto.called)
        self.assertEqual(3, len(mock.sendto.call_args_list))
        self.reporter.stop()
Example #8
0
    def __init__(self, zenhub, service):
        self.zenhub = zenhub
        self.service = service
        self._serviceCalls = Metrology.meter("zenhub.serviceCalls")
        self.log = logging.getLogger('zen.zenhub.WorkerInterceptor')
        self._admTimer = Metrology.timer('zenhub.applyDataMap')
        self._eventsSent = Metrology.meter("zenhub.eventsSent")

        self.meters = {
            'sendEvent': self.mark_send_event_timer,
            'sendEvents': self.mark_send_events_timer,
            'applyDataMaps': self.mark_apply_datamaps_timer,
        }
Example #9
0
    def test_udp_send_batch(self, mock):
        self.reporter = StatsDReporter('localhost',
                                       3333,
                                       batch_size=2,
                                       conn_type='udp')

        Metrology.meter('meter').mark()
        Metrology.counter('counter').increment()
        Metrology.timer('timer').update(5)
        Metrology.utilization_timer('utimer').update(5)
        Metrology.histogram('histogram').update(5)
        self.reporter.write()
        self.assertTrue(mock.sendto.called)
        self.assertEqual(3, len(mock.sendto.call_args_list))
        self.reporter.stop()
Example #10
0
    def __init__(self, application, reporter=None, **kwargs):
        self.application = application
        self.request = Metrology.meter('request')
        self.request_time = Metrology.timer('request_time')

        # Start reporter
        if reporter:
            reporter.start()
Example #11
0
    def __init__(self):
        signal.signal(signal.SIGUSR2, signal.SIG_IGN)
        ZCmdBase.__init__(self)
        if self.options.profiling:
            self.profiler = ContinuousProfiler('zenhubworker', log=self.log)
            self.profiler.start()
        self.current = IDLE
        self.currentStart = 0
        self.numCalls = Metrology.meter("zenhub.workerCalls")
        try:
            self.log.debug("establishing SIGUSR1 signal handler")
            signal.signal(signal.SIGUSR1, self.sighandler_USR1)
            self.log.debug("establishing SIGUSR2 signal handler")
            signal.signal(signal.SIGUSR2, self.sighandler_USR2)
        except ValueError:
            # If we get called multiple times, this will generate an exception:
            # ValueError: signal only works in main thread
            # Ignore it as we've already set up the signal handler.
            pass

        self.zem = self.dmd.ZenEventManager
        loadPlugins(self.dmd)
        self.pid = os.getpid()
        self.services = {}
        factory = ReconnectingPBClientFactory(pingPerspective=False)
        self.log.debug("Connecting to %s:%d", self.options.hubhost,
                       self.options.hubport)
        reactor.connectTCP(self.options.hubhost, self.options.hubport, factory)
        self.log.debug("Logging in as %s", self.options.username)
        c = credentials.UsernamePassword(self.options.username,
                                         self.options.password)
        factory.gotPerspective = self.gotPerspective

        def stop(*args):
            reactor.callLater(0, reactor.stop)

        factory.clientConnectionLost = stop
        factory.setCredentials(c)

        self.log.debug("Creating async MetricReporter")
        daemonTags = {
            'zenoss_daemon': 'zenhub_worker_%s' % self.options.workernum,
            'zenoss_monitor': self.options.monitor,
            'internal': True
        }

        def stopReporter():
            if self.metricreporter:
                return self.metricreporter.stop()

        # Order of the shutdown triggers matter. Want to stop reporter first, calling metricWriter() below
        # registers shutdown triggers for the actual metric http and redis publishers.
        reactor.addSystemEventTrigger('before', 'shutdown', stopReporter)
        self.metricreporter = TwistedMetricReporter(
            metricWriter=metricWriter(), tags=daemonTags)
        self.metricreporter.start()
Example #12
0
    def __init__(self, single=False):
        """
        Initalizer

        @param single: collect from a single device?
        @type single: boolean
        """
        PBDaemon.__init__(self)
        # FIXME: cleanup --force option #2660
        self.options.force = True
        self.start = None
        self.startat = None
        self.rrdStats = DaemonStats()
        self.single = single
        if self.options.device:
            self.single = True
        self.modelerCycleInterval = self.options.cycletime
        # get the minutes and convert to fraction of a day
        self.collage = float(self.options.collage) / 1440.0
        self.pendingNewClients = False
        self.clients = []
        self.finished = []
        self.devicegen = None
        self.counters = collections.Counter()
        self.configFilter = None
        self.configLoaded = False

        # Make sendEvent() available to plugins
        zope.component.provideUtility(self, IEventService)

        # Delay start for between 10 and 60 seconds when run as a daemon.
        self.started = False
        self.startDelay = 0
        self.immediate = 1
        if self.options.daemon or self.options.cycle:
            if self.options.now:
                self.log.debug('option "now" specified, starting immediately.')
            else:
                # self.startDelay = randint(10, 60) * 60
                self.startDelay = randint(10, 60) * 1
                self.immediate = 0
                self.log.info(
                    'option "now" not specified, waiting %s seconds to start.'
                    % self.startDelay)
        else:
            self.log.debug("Run in foreground, starting immediately.")

        # ZEN-26637
        self.collectorLoopIteration = 0
        self.mainLoopGotDeviceList = False

        self.isMainScheduled = False

        self._modeledDevicesMetric = Metrology.meter(
            "zenmodeler.modeledDevices")
        self._failuresMetric = Metrology.counter("zenmodeler.failures")
Example #13
0
 def __init__(self, options, log):
     self.options = options
     self.transformers = _load_utilities(ICollectorEventTransformer)
     self.log = log
     self.discarded_events = 0
     # TODO: Do we want to limit the size of the clear event dictionary?
     self.clear_events_count = {}
     self._initQueues()
     self._eventsSent = Metrology.meter("collectordaemon.eventsSent")
     self._discardedEvents = Metrology.meter("collectordaemon.discardedEvent")
     self._eventTimer = Metrology.timer('collectordaemon.eventTimer')
     metricNames = {x[0] for x in registry}
     if 'collectordaemon.eventQueue' not in metricNames:
         queue = self
         class EventQueueGauge(Gauge):
             @property
             def value(self):
                 return queue.event_queue_length
         Metrology.gauge('collectordaemon.eventQueue', EventQueueGauge())
Example #14
0
    def __init__(self, single=False ):
        """
        Initalizer

        @param single: collect from a single device?
        @type single: boolean
        """
        PBDaemon.__init__(self)
        # FIXME: cleanup --force option #2660
        self.options.force = True
        self.start = None
        self.startat = None
        self.rrdStats = DaemonStats()
        self.single = single
        if self.options.device:
            self.single = True
        self.modelerCycleInterval = self.options.cycletime
        # get the minutes and convert to fraction of a day
        self.collage = float( self.options.collage ) / 1440.0
        self.pendingNewClients = False
        self.clients = []
        self.finished = []
        self.devicegen = None
        self.counters = collections.Counter()
        self.configFilter = None
        self.configLoaded = False

        # Make sendEvent() available to plugins
        zope.component.provideUtility(self, IEventService)

        # Delay start for between 10 and 60 seconds when run as a daemon.
        self.started = False
        self.startDelay = 0
        self.immediate = 1
        if self.options.daemon or self.options.cycle:
            if self.options.now:
                self.log.debug('option "now" specified, starting immediately.')
            else:
                # self.startDelay = randint(10, 60) * 60
                self.startDelay = randint(10, 60) * 1
                self.immediate = 0
                self.log.info('option "now" not specified, waiting %s seconds to start.' %
                              self.startDelay)
        else:
            self.log.debug("Run in foreground, starting immediately.")


        # ZEN-26637
        self.collectorLoopIteration = 0
        self.mainLoopGotDeviceList = False

        self._modeledDevicesMetric = Metrology.meter("zenmodeler.modeledDevices")
        self._failuresMetric = Metrology.counter("zenmodeler.failures")
Example #15
0
    def __init__(self, options, log):
        self.options = options
        self.transformers = _load_utilities(ICollectorEventTransformer)
        self.log = log
        self.discarded_events = 0
        # TODO: Do we want to limit the size of the clear event dictionary?
        self.clear_events_count = {}
        self._initQueues()
        self._eventsSent = Metrology.meter("collectordaemon.eventsSent")
        self._discardedEvents = Metrology.meter(
            "collectordaemon.discardedEvent")
        self._eventTimer = Metrology.timer('collectordaemon.eventTimer')
        metricNames = {x[0] for x in registry}
        if 'collectordaemon.eventQueue' not in metricNames:
            queue = self

            class EventQueueGauge(Gauge):
                @property
                def value(self):
                    return queue.event_queue_length

            Metrology.gauge('collectordaemon.eventQueue', EventQueueGauge())
Example #16
0
    def __init__(self, reactor):
        """Initialize a ZenHubWorker instance."""
        ZCmdBase.__init__(self)

        self.__reactor = reactor

        if self.options.profiling:
            self.profiler = ContinuousProfiler('ZenHubWorker', log=self.log)
            self.profiler.start()
            reactor.addSystemEventTrigger(
                'before', 'shutdown', self.profiler.stop,
            )

        self.instanceId = self.options.workerid
        self.current = IDLE
        self.currentStart = 0
        self.numCalls = Metrology.meter("zenhub.workerCalls")

        self.zem = self.dmd.ZenEventManager
        loadPlugins(self.dmd)

        serviceFactory = ServiceReferenceFactory(self)
        self.__registry = HubServiceRegistry(self.dmd, serviceFactory)

        # Configure/initialize the ZenHub client
        creds = UsernamePassword(
            self.options.hubusername, self.options.hubpassword,
        )
        endpointDescriptor = "tcp:{host}:{port}".format(
            host=self.options.hubhost, port=self.options.hubport,
        )
        endpoint = clientFromString(reactor, endpointDescriptor)
        self.__client = ZenHubClient(reactor, endpoint, creds, self, 10.0)

        # Setup Metric Reporting
        self.log.debug("Creating async MetricReporter")
        self._metric_manager = MetricManager(
            daemon_tags={
                'zenoss_daemon': 'zenhub_worker_%s' % self.options.workerid,
                'zenoss_monitor': self.options.monitor,
                'internal': True,
            },
        )
Example #17
0
def register_legacy_worklist_metrics():
    """Create the Metrology counters for tracking worklist statistics."""
    config = getUtility(IHubServerConfig)
    global _legacy_worklist_counters

    for metricName, priorityName in config.legacy_metric_priority_map.items():
        gauge = registry.metrics.get(metricName)
        priority = ServiceCallPriority[priorityName]
        if not gauge:
            gauge = WorkListGauge(_legacy_worklist_counters, priority)
            Metrology.gauge(metricName, gauge)
        _legacy_worklist_counters[priority] = 0

    gauge = registry.metrics.get(_legacy_metric_worklist_total.metric)
    if not gauge:
        gauge = WorkListGauge(
            _legacy_worklist_counters,
            _legacy_metric_worklist_total.name,
        )
        Metrology.gauge(_legacy_metric_worklist_total.metric, gauge)
    _legacy_worklist_counters["total"] = 0

    global _legacy_events_meter
    _legacy_events_meter = Metrology.meter("zenhub.eventsSent")
Example #18
0
 def test_meter(self):
     self.assertTrue(Metrology.meter('test') is not None)
try:
    from networkx import shortest_simple_paths
except ImportError:
    # The networkx version shipped with Zenoss 4 (1.3) doesn't have
    # shortest_simple_paths. So we have a compatible copy of the algorithm
    # locally.
    from .nx.simple_paths import shortest_simple_paths

from . import connections

import logging
LOG = logging.getLogger("zen.Layer2")

from metrology import Metrology
s_meter = Metrology.meter("events-suppressed")

# Default exports.
__all__ = [
    "get_suppressor",
    ]


# Make status checks clearer.
UP, DOWN = True, False

# Make toggle checks clearer.
ENABLED, DISABLED = True, False

# Singleton to keep state for callers who can't keep their own state.
SUPPRESSOR = None
Example #20
0
from metrology.reporter import LoggerReporter
from producer_conf import *

def generate_data(fake):
    data = {
        "username": fake.user_name(),
        "ip": fake.ipv4_public(),
        "uri": fake.uri(),
        "user_agent": fake.user_agent(),
        "method": fake.random_element(elements=["POST", "GET", "HEAD", "PUT"]),
        "status_code": fake.random_element(elements=[200,201, 202, 301, 303, 400, 401, 403, 404, 500]),
        "ts": datetime.now().isoformat()
    }
    return data

meter = Metrology.meter("messages")
successful = Metrology.counter("success")
errors = Metrology.counter("errors")
logging.basicConfig(level=logging.DEBUG)
reporter = LoggerReporter(level=logging.INFO, interval=10)


def delivery_report(err, msg):
    if err is not None:
        errors.increment()
    else:
        successful.increment()
    meter.mark()

producer_config = {"bootstrap.servers": BROKER_HOST}
if len(KAFKA_USERNAME):
Example #21
0
import logging, configargparse, pyodbc, time, pdb, re, json, sys
from threading import Event
from confluent_kafka import Producer
from metrology import Metrology
from metrology.reporter import LoggerReporter

meter = Metrology.meter("messages")
successful = Metrology.counter("success")
errors = Metrology.meter("errors")
stop_event = Event()


def get_kafka_parameters(options):
    producer_config = {
        "bootstrap.servers": options.brokers,
        "message.timeout.ms": 1000
    }
    match = re.findall("://([^/]+)/", options.brokers)
    if len(match) == 1:
        producer_config["bootstrap.servers"] = match[0] + ":9093"
        producer_config.update({
            'sasl.mechanisms': 'PLAIN',
            'security.protocol': 'SASL_SSL',
            "sasl.username": "******",
            "sasl.password": options.brokers
        })
    logging.debug("Using Kafka config: {}".format(json.dumps(producer_config)))
    return producer_config


def get_badge(options):
Example #22
0
    def __init__(self, preferences, taskSplitter,
                 configurationListener=DUMMY_LISTENER,
                 initializationCallback=None,
                 stoppingCallback=None):
        """
        Constructs a new instance of the CollectorDaemon framework. Normally
        only a singleton instance of a CollectorDaemon should exist within a
        process, but this is not enforced.

        @param preferences: the collector configuration
        @type preferences: ICollectorPreferences
        @param taskSplitter: the task splitter to use for this collector
        @type taskSplitter: ITaskSplitter
        @param initializationCallback: a callable that will be executed after
                                       connection to the hub but before
                                       retrieving configuration information
        @type initializationCallback: any callable
        @param stoppingCallback: a callable that will be executed first during
                                 the stopping process. Exceptions will be
                                 logged but otherwise ignored.
        @type stoppingCallback: any callable
        """
        # create the configuration first, so we have the collector name
        # available before activating the rest of the Daemon class hierarchy.
        if not ICollectorPreferences.providedBy(preferences):
            raise TypeError("configuration must provide ICollectorPreferences")
        else:
            self._prefs = ObservableProxy(preferences)
            self._prefs.attachAttributeObserver('configCycleInterval', self._rescheduleConfig)

        if not ITaskSplitter.providedBy(taskSplitter):
            raise TypeError("taskSplitter must provide ITaskSplitter")
        else:
            self._taskSplitter = taskSplitter

        if not IConfigurationListener.providedBy(configurationListener):
            raise TypeError(
                    "configurationListener must provide IConfigurationListener")
        self._configListener = ConfigListenerNotifier()
        self._configListener.addListener(configurationListener)
        self._configListener.addListener(DeviceGuidListener(self))
        self._initializationCallback = initializationCallback
        self._stoppingCallback = stoppingCallback

        # register the various interfaces we provide the rest of the system so
        # that collector implementors can easily retrieve a reference back here
        # if needed
        zope.component.provideUtility(self, ICollector)
        zope.component.provideUtility(self, IEventService)
        zope.component.provideUtility(self, IDataService)

        # register the collector's own preferences object so it may be easily
        # retrieved by factories, tasks, etc.
        zope.component.provideUtility(self.preferences,
                                      ICollectorPreferences,
                                      self.preferences.collectorName)

        super(CollectorDaemon, self).__init__(name=self.preferences.collectorName)
        self._statService = StatisticsService()
        zope.component.provideUtility(self._statService, IStatisticsService)

        if self.options.cycle:
            # setup daemon statistics (deprecated names)
            self._statService.addStatistic("devices", "GAUGE")
            self._statService.addStatistic("dataPoints", "DERIVE")
            self._statService.addStatistic("runningTasks", "GAUGE")
            self._statService.addStatistic("taskCount", "GAUGE")
            self._statService.addStatistic("queuedTasks", "GAUGE")
            self._statService.addStatistic("missedRuns", "GAUGE")

            # namespace these a bit so they can be used in ZP monitoring.
            # prefer these stat names and metrology in future refs
            self._dataPointsMetric = Metrology.meter("collectordaemon.dataPoints")
            daemon = self
            class DeviceGauge(Gauge):
                @property
                def value(self):
                    return len(daemon._devices)
            Metrology.gauge('collectordaemon.devices', DeviceGauge())

            # Scheduler statistics
            class RunningTasks(Gauge):
                @property
                def value(self):
                    return daemon._scheduler._executor.running
            Metrology.gauge('collectordaemon.runningTasks', RunningTasks())

            class TaskCount(Gauge):
                @property
                def value(self):
                    return daemon._scheduler.taskCount
            Metrology.gauge('collectordaemon.taskCount', TaskCount())

            class QueuedTasks(Gauge):
                @property
                def value(self):
                    return daemon._scheduler._executor.queued
            Metrology.gauge('collectordaemon.queuedTasks', QueuedTasks())

            class MissedRuns(Gauge):
                @property
                def value(self):
                    return daemon._scheduler.missedRuns
            Metrology.gauge('collectordaemon.missedRuns', MissedRuns())

        self._deviceGuids = {}
        self._devices = set()
        self._unresponsiveDevices = set()
        self._rrd = None
        self._metric_writer = None
        self._derivative_tracker = None
        self.reconfigureTimeout = None

        # keep track of pending tasks if we're doing a single run, and not a
        # continuous cycle
        if not self.options.cycle:
            self._completedTasks = 0
            self._pendingTasks = []

        frameworkFactory = zope.component.queryUtility(IFrameworkFactory, self._frameworkFactoryName)
        self._configProxy = frameworkFactory.getConfigurationProxy()
        self._scheduler = frameworkFactory.getScheduler()
        self._scheduler.maxTasks = self.options.maxTasks
        self._ConfigurationLoaderTask = frameworkFactory.getConfigurationLoaderTask()

        # OLD - set the initialServices attribute so that the PBDaemon class
        # will load all of the remote services we need.
        self.initialServices = PBDaemon.initialServices +\
            [self.preferences.configurationService]

        # trap SIGUSR2 so that we can display detailed statistics
        signal.signal(signal.SIGUSR2, self._signalHandler)

        # let the configuration do any additional startup it might need
        self.preferences.postStartup()
        self.addedPostStartupTasks = False

        # Variables used by enterprise collector in resmgr
        #
        # flag that indicates we have finished loading the configs for the first time after a restart
        self.firstConfigLoadDone = False
        # flag that indicates the daemon has received the encryption key from zenhub
        self.encryptionKeyInitialized = False
        # flag that indicates the daemon is loading the cached configs
        self.loadingCachedConfigs = False
    def store_request(self, request, response):
        """Store events in the cache, update statistics.

        Allow for separate samples/events/URI cache data.
        """

        # Create a unique client_id that has: host/uri
        client_ip = request.getClientIP()
        client_id = (client_ip, request.uri)

        # Add the client's message to the cache
        self.cache[client_id].append({
            'timestamp': datetime.datetime.now(),
            'response_code': request.code,
            'response_code_message': request.code_message,
            'request_body': request.content.getvalue(),
            'response_body': response,
            'zenoss_actions': request._zaction
        })

        # --------------------------------------------------------------------
        # Use Metrology to record the event
        # --------------------------------------------------------------------
        v = dict(
            dotted_uri=request.uri.replace('/', '.').strip('.'),
            client_ip=client_ip,
            method=request.method.lower()
        )

        Metrology.meter('http.requests').mark()
        Metrology.meter('http.{method}.requests'.format(**v)).mark()
        Metrology.meter("http.{method}.{dotted_uri}.requests".format(**v)).mark()
        Metrology.meter("http.{method}.{dotted_uri}.{client_ip}.requests".format(**v)).mark()

        if (request.code >= 400):
            Metrology.meter("http.{method}.{dotted_uri}.{client_ip}.errors".format(**v)).mark()

        for _ in request._zaction['maps']:
            Metrology.meter("zenopenstack.{dotted_uri}.{client_ip}.datamaps".format(**v)).mark()

        for _ in request._zaction['events']:
            Metrology.meter("zenopenstack.{dotted_uri}.{client_ip}.events".format(**v)).mark()

        for _ in request._zaction['metrics']:
            Metrology.meter("zenopenstack.{dotted_uri}.{client_ip}.metrics".format(**v)).mark()
import collections
import time
import types

from Products.ZenEvents.ZenEventClasses import Status_Ping

from zenoss.protocols.protobufs.zep_pb2 import (
    STATUS_SUPPRESSED,
    SEVERITY_CLEAR,
    SEVERITY_CRITICAL,
)

from . import connections

from metrology import Metrology
s_meter = Metrology.meter("events-suppressed")

# Default exports.
__all__ = [
    "get_suppressor",
]

# Make status checks clearer.
UP, DOWN = True, False

# Make toggle checks clearer.
ENABLED, DISABLED = True, False

# Singleton to keep state for callers who can't keep their own state.
SUPPRESSOR = None
Example #25
0
 def test_request(self):
     self.server.get('/')
     self.assertEqual(1, Metrology.meter('request').count)
     self.assertEqual(1, Metrology.timer('request_time').count)
Example #26
0
    def __init__(self,
                 preferences,
                 taskSplitter,
                 configurationListener=DUMMY_LISTENER,
                 initializationCallback=None,
                 stoppingCallback=None):
        """
        Constructs a new instance of the CollectorDaemon framework. Normally
        only a singleton instance of a CollectorDaemon should exist within a
        process, but this is not enforced.

        @param preferences: the collector configuration
        @type preferences: ICollectorPreferences
        @param taskSplitter: the task splitter to use for this collector
        @type taskSplitter: ITaskSplitter
        @param initializationCallback: a callable that will be executed after
                                       connection to the hub but before
                                       retrieving configuration information
        @type initializationCallback: any callable
        @param stoppingCallback: a callable that will be executed first during
                                 the stopping process. Exceptions will be
                                 logged but otherwise ignored.
        @type stoppingCallback: any callable
        """
        # create the configuration first, so we have the collector name
        # available before activating the rest of the Daemon class hierarchy.
        if not ICollectorPreferences.providedBy(preferences):
            raise TypeError("configuration must provide ICollectorPreferences")
        else:
            self._prefs = ObservableProxy(preferences)
            self._prefs.attachAttributeObserver('configCycleInterval',
                                                self._rescheduleConfig)

        if not ITaskSplitter.providedBy(taskSplitter):
            raise TypeError("taskSplitter must provide ITaskSplitter")
        else:
            self._taskSplitter = taskSplitter

        if not IConfigurationListener.providedBy(configurationListener):
            raise TypeError(
                "configurationListener must provide IConfigurationListener")
        self._configListener = ConfigListenerNotifier()
        self._configListener.addListener(configurationListener)
        self._configListener.addListener(DeviceGuidListener(self))
        self._initializationCallback = initializationCallback
        self._stoppingCallback = stoppingCallback

        # register the various interfaces we provide the rest of the system so
        # that collector implementors can easily retrieve a reference back here
        # if needed
        zope.component.provideUtility(self, ICollector)
        zope.component.provideUtility(self, IEventService)
        zope.component.provideUtility(self, IDataService)

        # register the collector's own preferences object so it may be easily
        # retrieved by factories, tasks, etc.
        zope.component.provideUtility(self.preferences, ICollectorPreferences,
                                      self.preferences.collectorName)

        super(CollectorDaemon,
              self).__init__(name=self.preferences.collectorName)
        self._statService = StatisticsService()
        zope.component.provideUtility(self._statService, IStatisticsService)

        if self.options.cycle:
            # setup daemon statistics (deprecated names)
            self._statService.addStatistic("devices", "GAUGE")
            self._statService.addStatistic("dataPoints", "DERIVE")
            self._statService.addStatistic("runningTasks", "GAUGE")
            self._statService.addStatistic("taskCount", "GAUGE")
            self._statService.addStatistic("queuedTasks", "GAUGE")
            self._statService.addStatistic("missedRuns", "GAUGE")

            # namespace these a bit so they can be used in ZP monitoring.
            # prefer these stat names and metrology in future refs
            self._dataPointsMetric = Metrology.meter(
                "collectordaemon.dataPoints")
            daemon = self

            class DeviceGauge(Gauge):
                @property
                def value(self):
                    return len(daemon._devices)

            Metrology.gauge('collectordaemon.devices', DeviceGauge())

            # Scheduler statistics
            class RunningTasks(Gauge):
                @property
                def value(self):
                    return daemon._scheduler._executor.running

            Metrology.gauge('collectordaemon.runningTasks', RunningTasks())

            class TaskCount(Gauge):
                @property
                def value(self):
                    return daemon._scheduler.taskCount

            Metrology.gauge('collectordaemon.taskCount', TaskCount())

            class QueuedTasks(Gauge):
                @property
                def value(self):
                    return daemon._scheduler._executor.queued

            Metrology.gauge('collectordaemon.queuedTasks', QueuedTasks())

            class MissedRuns(Gauge):
                @property
                def value(self):
                    return daemon._scheduler.missedRuns

            Metrology.gauge('collectordaemon.missedRuns', MissedRuns())

        self._deviceGuids = {}
        self._devices = set()
        self._unresponsiveDevices = set()
        self._rrd = None
        self._metric_writer = None
        self._derivative_tracker = None
        self.reconfigureTimeout = None

        # keep track of pending tasks if we're doing a single run, and not a
        # continuous cycle
        if not self.options.cycle:
            self._completedTasks = 0
            self._pendingTasks = []

        frameworkFactory = zope.component.queryUtility(
            IFrameworkFactory, self._frameworkFactoryName)
        self._configProxy = frameworkFactory.getConfigurationProxy()
        self._scheduler = frameworkFactory.getScheduler()
        self._scheduler.maxTasks = self.options.maxTasks
        self._ConfigurationLoaderTask = frameworkFactory.getConfigurationLoaderTask(
        )

        # OLD - set the initialServices attribute so that the PBDaemon class
        # will load all of the remote services we need.
        self.initialServices = PBDaemon.initialServices +\
            [self.preferences.configurationService]

        # trap SIGUSR2 so that we can display detailed statistics
        signal.signal(signal.SIGUSR2, self._signalHandler)

        # let the configuration do any additional startup it might need
        self.preferences.postStartup()
        self.addedPostStartupTasks = False

        # Variables used by enterprise collector in resmgr
        #
        # flag that indicates we have finished loading the configs for the first time after a restart
        self.firstConfigLoadDone = False
        # flag that indicates the daemon has received the encryption key from zenhub
        self.encryptionKeyInitialized = False
        # flag that indicates the daemon is loading the cached configs
        self.loadingCachedConfigs = False
class Health(Resource):
    """ /health: expose health of zenopenstack daemon (metrics, logs) """

    isLeaf = True

    def render_GET(self, request):
        request.setResponseCode(200)
        request.setHeader(b"content-type", b"text/html")

        if len(request.postpath):
            if len(request.postpath) == 1 and request.postpath[0] == "metrics":
                body = "<html><body>"
                body += "<table border=\"1\">"
                body += "<tr>"
                body += "  <th>Metric</th>"
                body += "  <th>Count</th>"
                body += "  <th>1M Rate</th>"
                body += "  <th>5M Rate</th>"
                body += "  <th>15M Rate</th>"
                body += "  <th>Mean Rate</th>"
                body += "</tr>"

                for name, metric in sorted(metrology_registry):
                    if isinstance(metric, Meter):
                        body += "<tr>"
                        body += "  <td>%s</td>" % name
                        body += "  <td>%d</td>" % metric.count
                        body += "  <td>%f</td>" % metric.one_minute_rate
                        body += "  <td>%f</td>" % metric.five_minute_rate
                        body += "  <td>%f</td>" % metric.fifteen_minute_rate
                        body += "  <td>%f</td>" % metric.mean_rate
                        body += "</tr>"
                    else:
                        log.debug("Ignoring unhandled metric type: %s", metric)
                body += "</body></html>"
                return body

            if len(request.postpath) == 1 and request.postpath[0] == "queue":
                body = "<html><body>"
                body = "The following devices have received model updates:<ul>"
                for device_id in MAP_QUEUE:
                    body += '<li><a href="/health/queue/%s">%s</a></li>' % (device_id, device_id)
                body += "</ul></body></html>"
                return body

            if len(request.postpath) == 2 and request.postpath[0] == "queue":
                device_id = request.postpath[1]

                if device_id not in MAP_QUEUE:
                    return NoResource().render(request)

                body = "<html><body>"

                body += "<b>Currently-Held Object Maps</b><p>"
                for component_id, objmap in MAP_QUEUE[device_id].held_objmaps.iteritems():
                    body += "<hr>"
                    body += component_id + ":<br>"
                    body += "<pre>" + cgi.escape(pformat(objmap[1])) + "</pre>"

                body += "<b>Most Recently Released Object Maps</b>"
                for timestamp, objmap in reversed(MAP_QUEUE[device_id].released_objmaps):
                    body += "<hr>%s (%s ago)" % (
                        timestamp.isoformat(),
                        (datetime.datetime.now() - timestamp)
                    )
                    body += "<pre>" + cgi.escape(pformat(objmap)) + "</pre>"

                body += "</body></html>"
                return body

            if len(request.postpath) < 3 or request.postpath[0] != "logs":
                return NoResource().render(request)

            ip = request.postpath[1]
            uri = "/".join(request.postpath[2:])
            body = "<html><body>"
            records = self.site.request_buffer.get_requests((ip, '/' + uri))
            body += "<b>Most recent %d requests from /%s to %s</b> (of %d max)<p>" % (
                len(records),
                ip,
                uri,
                self.site.request_buffer.cache_size
            )

            for record in reversed(records):
                try:
                    request_body = record['request_body']
                    try:
                        # if the payload is JSON data, reindent it for readability
                        request_body = json.dumps(json.loads(request_body), indent=5)
                    except Exception:
                        pass
                    response_body = cgi.escape(record['response_body'])
                    zenoss_actions = cgi.escape(pformat(record['zenoss_actions']))

                    body += "<hr>%s (%s ago)" % (
                        record['timestamp'].isoformat(),
                        (datetime.datetime.now() - record['timestamp'])
                    )
                    body += '<table border="1" style="font-size: 80%"><tr><th>Request</th><th>Response</th><th>Zenoss Action</th></tr>'
                    body += "<tr valign=\"top\"><td><pre>" + request_body + "</pre></td>"
                    body += "<td><b>%d %s</b><pre>%s</pre></td>" % (
                        record['response_code'],
                        record['response_code_message'],
                        response_body)
                    body += "<td><pre>" + zenoss_actions + "</pre></td>"
                    body += "</tr></table>"
                except Exception, e:
                    body += "<hr>Error displaying log record: %s<hr>" % str(e)

            body += "</body></html>"
            return body

        body = """
<html>
  <body>
    <table border="1">
      <tr>
        <th rowspan="2">URI</th>
        <th rowspan="2">Client IP</th>
        <th colspan="3">POST</th>
        <th colspan="3">GET</th>
        <th colspan="3">POST Errors</th>
        <th colspan="3">GET Errors</th>
        <th colspan="3">Datamaps</th>
        <th colspan="3">Events</th>
        <th colspan="3">Metrics</th>
        <th rowspan="2">Last Request</th>
        <th rowspan="2">Details</th>
     </tr>
     <tr>
        <th>count</th>
        <th>15m</th>
        <th>mean</th>
        <th>count</th>
        <th>15m</th>
        <th>mean</th>
        <th>count</th>
        <th>15m</th>
        <th>mean</th>
        <th>count</th>
        <th>15m</th>
        <th>mean</th>
        <th>count</th>
        <th>15m</th>
        <th>mean</th>
        <th>count</th>
        <th>15m</th>
        <th>mean</th>
        <th>count</th>
        <th>15m</th>
        <th>mean</th>
     </tr>

        """

        for client_ip, uri in sorted(self.site.request_buffer.get_client_keys()):
            v = dict(
                dotted_uri=uri.replace('/', '.').strip('.'),
                client_ip=client_ip
            )

            posts = Metrology.meter("http.post.{dotted_uri}.{client_ip}.requests".format(**v))
            gets = Metrology.meter("http.get.{dotted_uri}.{client_ip}.requests".format(**v))
            post_errors = Metrology.meter("http.post.{dotted_uri}.{client_ip}.errors".format(**v))
            get_errors = Metrology.meter("http.get.{dotted_uri}.{client_ip}.errors".format(**v))
            datamaps = Metrology.meter("zenopenstack.{dotted_uri}.{client_ip}.datamaps".format(**v))
            events = Metrology.meter("zenopenstack.{dotted_uri}.{client_ip}.events".format(**v))
            metrics = Metrology.meter("zenopenstack.{dotted_uri}.{client_ip}.metrics".format(**v))

            most_recent_request = self.site.request_buffer.get_most_recent_request((client_ip, uri))

            body += "<tr>"
            body += "  <td>%s</td>" % uri
            body += "  <td>%s</td>" % client_ip
            body += "  <td>%d</td>" % posts.count
            body += "  <td>%.2f</td>" % posts.fifteen_minute_rate
            body += "  <td>%.2f</td>" % posts.mean_rate
            body += "  <td>%d</td>" % gets.count
            body += "  <td>%.2f</td>" % gets.fifteen_minute_rate
            body += "  <td>%.2f</td>" % gets.mean_rate
            body += "  <td>%d</td>" % post_errors.count
            body += "  <td>%.2f</td>" % post_errors.fifteen_minute_rate
            body += "  <td>%.2f</td>" % post_errors.mean_rate
            body += "  <td>%d</td>" % get_errors.count
            body += "  <td>%.2f</td>" % get_errors.fifteen_minute_rate
            body += "  <td>%.2f</td>" % get_errors.mean_rate
            body += "  <td>%d</td>" % datamaps.count
            body += "  <td>%.2f</td>" % datamaps.fifteen_minute_rate
            body += "  <td>%.2f</td>" % datamaps.mean_rate
            body += "  <td>%d</td>" % events.count
            body += "  <td>%.2f</td>" % events.fifteen_minute_rate
            body += "  <td>%.2f</td>" % events.mean_rate
            body += "  <td>%d</td>" % metrics.count
            body += "  <td>%.2f</td>" % metrics.fifteen_minute_rate
            body += "  <td>%.2f</td>" % metrics.mean_rate

            if most_recent_request:
                body += "  <td>%s ago (%d)</td>" % (
                    (datetime.datetime.now() - most_recent_request['timestamp']),
                    most_recent_request['response_code'])
            else:
                body += "  <td>None</td>"
            body += "  <td><a href=\"/health/logs/%s%s\">request log</a></td>" % (client_ip, uri)
            body += "</tr>"

        return body + """
    def __init__(self, reactor):
        """Initialize a ZenHubWorker instance."""
        ZCmdBase.__init__(self)

        self.__reactor = reactor

        if self.options.profiling:
            self.profiler = ContinuousProfiler('ZenHubWorker', log=self.log)
            self.profiler.start()
            reactor.addSystemEventTrigger(
                'before',
                'shutdown',
                self.profiler.stop,
            )

        self.current = IDLE
        self.currentStart = 0
        self.numCalls = Metrology.meter("zenhub.workerCalls")

        self.zem = self.dmd.ZenEventManager
        loadPlugins(self.dmd)

        self.__registry = ServiceRegistry()
        loader = ServiceLoader()
        factory = ServiceReferenceFactory(self)
        self.__manager = ServiceManager(self.__registry, loader, factory)

        # Configure/initialize the ZenHub client
        creds = UsernamePassword(
            self.options.hubusername,
            self.options.hubpassword,
        )
        endpointDescriptor = "tcp:{host}:{port}".format(
            host=self.options.hubhost,
            port=self.options.hubport,
        )
        endpoint = clientFromString(reactor, endpointDescriptor)
        self.__client = ZenHubClient(
            reactor,
            endpoint,
            creds,
            self,
            10.0,
            self.worklistId,
        )

        # Setup Metric Reporting
        self.log.debug("Creating async MetricReporter")
        self._metric_manager = MetricManager(daemon_tags={
            'zenoss_daemon':
            'zenhub_worker_%s' % self.instanceId,
            'zenoss_monitor':
            self.options.monitor,
            'internal':
            True,
        }, )
        # Make the metric manager available via zope.component.getUtility
        getGlobalSiteManager().registerUtility(
            self._metric_manager,
            IMetricManager,
            name='zenhub_worker_metricmanager',
        )