def main(): preferences = Preferences() task_factory = SimpleTaskFactory(PythonCollectionTask) task_splitter = PerDataSourceInstanceTaskSplitter(task_factory) daemon = CollectorDaemon(preferences, task_splitter) pool_size = preferences.options.threadPoolSize # The Twisted version shipped with Zenoss 4.1 doesn't have this. if hasattr(reactor, 'suggestThreadPoolSize'): reactor.suggestThreadPoolSize(pool_size) daemon.run()
def testName(self): configs = [] c = DummyObject() c.id = 'host1' c.configCycleInterval = 30 configs.append(c) c = DummyObject() c.id = 'host2' c.configCycleInterval = 100 configs.append(c) taskFactory = SimpleTaskFactory(BasicTestTask) taskSplitter = SimpleTaskSplitter(taskFactory) tasks = taskSplitter.splitConfiguration(configs) self.assertEquals(len(tasks), 2)
return procs def reverseDict(d): """ Return a dictionary with keys and values swapped: all values are lists to handle the different keys mapping to the same value """ result = {} for a, v in d.iteritems(): result.setdefault(v, []).append(a) return result def chunk(lst, n): """ Break lst into n-sized chunks """ return [lst[i:i + n] for i in range(0, len(lst), n)] # Collector Daemon Main entry point # if __name__ == '__main__': myPreferences = ZenProcessPreferences() myTaskFactory = SimpleTaskFactory(ZenProcessTask) myTaskSplitter = SimpleTaskSplitter(myTaskFactory) daemon = CollectorDaemon(myPreferences, myTaskSplitter, ConfigListener()) daemon.run()
pass def doTask(self): log.debug("Scanning device %s [%s]", self._devId, self._manageIp) # try collecting events after a successful connect, or if we're # already connected d = self._collectData() # Add the _finished callback to be called in both success and error # scenarios. While we don't need final error processing in this task, # it is good practice to catch any final errors for diagnostic purposes. d.addCallback(self._finished) # returning a Deferred will keep the framework from assuming the task # is done until the Deferred actually completes return d # # Collector Daemon Main entry point # if __name__ == '__main__': myPreferences = ZenPerfWbemPreferences() myTaskFactory = SimpleTaskFactory(ZenPerfWbemTask) myTaskSplitter = SimpleTaskSplitter(myTaskFactory) daemon = CollectorDaemon(myPreferences, myTaskSplitter) daemon.run()
super(SyslogConfigTask, self).__init__() # Needed for ZCA interface contract self.name = taskName self.configId = configId self.state = TaskStates.STATE_IDLE self.interval = scheduleIntervalSeconds self._preferences = taskConfig self._daemon = zope.component.getUtility(ICollector) self._daemon.defaultPriority = self._preferences.defaultPriority def doTask(self): return defer.succeed("Already updated default syslog priority...") def cleanup(self): pass class SyslogDaemon(CollectorDaemon): _frameworkFactoryName = "nosip" if __name__ == '__main__': myPreferences = SyslogPreferences() myTaskFactory = SimpleTaskFactory(SyslogConfigTask) myTaskSplitter = SimpleTaskSplitter(myTaskFactory) daemon = SyslogDaemon(myPreferences, myTaskSplitter) daemon.run()
log.debug("Device %s [%s] scanned failed, %s", self._devId, self._manageIp, err) log.error("Unable to scan device %s: %s", self._devId, err) self._reset() summary = """ Could not read Windows services (%s). Check your username/password settings and verify network connectivity. """ % err self._eventService.sendEvent( dict(summary=summary, component='zenwin', eventClass=Status_Wmi, device=self._devId, severity=Error, traceback=traceback.format_exc())) # # Collector Daemon Main entry point # if __name__ == '__main__': myPreferences = ZenWinPreferences() myTaskFactory = SimpleTaskFactory(ZenWinTask) myTaskSplitter = SimpleTaskSplitter(myTaskFactory) daemon = CollectorDaemon(myPreferences, myTaskSplitter) daemon.run()
def main(): preferences = Preferences() task_factory = SimpleTaskFactory(PythonCollectionTask) task_splitter = PerDataSourceInstanceTaskSplitter(task_factory) daemon = CollectorDaemon(preferences, task_splitter) daemon.run()
def cleanup(self): pass def doTask(self): log.debug("Scanning device %s [%s]", self._devId, self._manageIp) # try collecting events after a successful connect, or if we're # already connected d = self._collectData() # Add the _finished callback to be called in both success and error # scenarios. While we don't need final error processing in this task, # it is good practice to catch any final errors for diagnostic purposes. d.addCallback(self._finished) # returning a Deferred will keep the framework from assuming the task # is done until the Deferred actually completes return d # # Collector Daemon Main entry point # if __name__ == '__main__': myPreferences = ZenPerfWmiPreferences() myTaskFactory = SimpleTaskFactory(ZenPerfWmiTask) myTaskSplitter = SimpleTaskSplitter(myTaskFactory) daemon = CollectorDaemon(myPreferences, myTaskSplitter) daemon.run()
severity=Error, agent=COLLECTOR_NAME)) return result def cleanup(self): unused(self) pass def doTask(self): self.state = TaskStates.STATE_WAITING log.debug("Connecting to %s (%s)", self._devId, self._manageIp) spec = txamqp.spec.load(os.path.join(os.path.dirname(__file__), "lib/txamqp/specs/standard/amqp0-8.xml")) delegate = TwistedDelegate() d = ClientCreator(reactor, AMQClient, delegate=delegate, spec=spec, vhost=self._config.zAMQPVirtualHost).connectTCP(self._config.manageIp, self._config.zAMQPPort) d.addCallback(self._onConnSucc, self._config.zAMQPQueue, self._config.zAMQPUsername, self._config.zAMQPPassword) d.addErrback(self._onConnFail) return d if __name__ == '__main__': tf = SimpleTaskFactory(AMQPEventsTask) ts = SimpleTaskSplitter(tf) CollectorDaemon(AMQPEventPreferences(), ts).run()
self.state = MailTxCollectionTask.STATE_SEND_STATUS msg = "Device %s cycle time %0.2fs (sent %0.2fs, fetch %0.2fs)" % ( self._cfg.device, self.totalTime, self.sendTime, self.fetchTime) dsdev, ds = self._cfg.key() self._eventService.sendEvent(dict( device=self._cfg.device, component='zenmailtx', severity=Event.Clear, dedupid='%s|%s|%s|%s' % (dsdev, ds, self._cfg.smtpHost, self._cfg.popHost), summary="Successfully completed transaction", message=msg, eventKey=self._cfg.eventKey, eventGroup="mail", dataSource=ds, eventClass=self._cfg.eventClass, )) return msg def displayStatistics(self): """ Called by the collector framework scheduler, and allows us to see how each task is doing. """ display = self.name if self._lastErrorMsg: display += "%s\n" % self._lastErrorMsg return display if __name__ == '__main__': myPreferences = MailTxCollectionPreferences() myTaskFactory = SimpleTaskFactory(MailTxCollectionTask) myTaskSplitter = MailTxTaskSplitter(myTaskFactory) daemon = CollectorDaemon(myPreferences, myTaskSplitter) daemon.run()
new_opts.append(o) encodedOpts = urlsafe_b64encode(zlib.compress('|'.join(new_opts), 9)) params = { 'gopts': encodedOpts, 'drange': drange, 'width': width, } if url.startswith('proxy'): url = url.replace('proxy', 'http', 1) params['remoteUrl'] = url return '/zport/RenderServer/render?%s' % (urlencode(params),) else: return '%s/render?%s' % (url, urlencode(params),) buildGraphUrl = staticmethod(buildGraphUrl) if __name__ == '__main__': myPreferences = ZenPowerConsumptionMonitorPreferences() myTaskFactory = SimpleTaskFactory(ZenPowerConsumptionMonitorTask) myTaskSplitter = SimpleTaskSplitter(myTaskFactory) print "Graph URL: " print Utils.buildGraphUrl() daemon = CollectorDaemon(myPreferences, myTaskSplitter) daemon.run()
""" Log internal exceptions that have occurred from testing TCP services @param reason: error message @type reason: Twisted error instance """ msg = reason.getErrorMessage() evt = dict( device=self._preferences.options.monitor, summary=msg, severity=4, # error component='zenstatus', traceback=reason.getTraceback()) self._eventService.sendEvent(evt) return defer.succeed("Failed due to internal error") def cleanup(self): pass # # Collector Daemon Main entry point # if __name__ == '__main__': myPreferences = ZenStatusPreferences() myTaskFactory = SimpleTaskFactory(ZenStatusTask) myTaskSplitter = ServiceTaskSplitter(myTaskFactory) daemon = CollectorDaemon(myPreferences, myTaskSplitter) daemon.run()
ICollectorPreferences, 'zenexample') # All of these properties are required to implement the IScheduledTask # interface. self.name = taskName self.configId = deviceId self.interval = interval self.state = TaskStates.STATE_IDLE # doTask is where the collector logic should go. It is also required to # implement the IScheduledTask interface. It will be called directly by the # framework when it's this task's turn to run. def doTask(self): # This method must return a deferred because the collector framework # is asynchronous. d = defer.Deferred() return d # cleanup is required to implement the IScheduledTask interface. def cleanup(self): pass if __name__ == '__main__': myPreferences = ZenExamplePreferences() myTaskFactory = SimpleTaskFactory(ZenExampleTask) myTaskSplitter = SimpleTaskSplitter(myTaskFactory) daemon = CollectorDaemon(myPreferences, myTaskSplitter) daemon.run()
pass def doTask(self): log.debug("Scanning device %s [%s]", self._devId, self._manageIp) # try collecting events after a successful connect, or if we're # already connected d = self._collectData() # Add the _finished callback to be called in both success and error # scenarios. While we don't need final error processing in this task, # it is good practice to catch any final errors for diagnostic purposes. d.addCallback(self._finished) # returning a Deferred will keep the framework from assuming the task # is done until the Deferred actually completes return d # # Collector Daemon Main entry point # if __name__ == '__main__': myPreferences = ZenPerfSqlPreferences() myTaskFactory = SimpleTaskFactory(ZenPerfSqlTask) myTaskSplitter = SimpleTaskSplitter(myTaskFactory) daemon = CollectorDaemon(myPreferences, myTaskSplitter) daemon.run()
# since we don't need to bother connecting, we'll just create an # empty deferred and have it run immediately so the collect callback # will be fired off d = defer.Deferred() reactor.callLater(0, d.callback, None) # try collecting events after a successful connect, or if we're already # connected d.addCallback(self._collectCallback) # Add the _finished callback to be called in both success and error # scenarios. While we don't need final error processing in this task, # it is good practice to catch any final errors for diagnostic purposes. d.addBoth(self._finished) # returning a Deferred will keep the framework from assuming the task # is done until the Deferred actually completes return d # # Collector Daemon Main entry point # if __name__ == '__main__': myPreferences = ZenEventLogPreferences() myTaskFactory = SimpleTaskFactory(ZenEventLogTask) myTaskSplitter = SimpleTaskSplitter(myTaskFactory) daemon = CollectorDaemon(myPreferences, myTaskSplitter) daemon.run()
self._snmpProxy = None def displayStatistics(self): """ Called by the collector framework scheduler, and allows us to see how each task is doing. """ display = "%s using SNMP %s\n" % (self.name, self._snmpConnInfo.zSnmpVer) display += "%s Cycles Exceeded: %s; V3 Error Count: %s; Stopped Task Count: %s\n" % ( self.name, self._cycleExceededCount, self._snmpV3ErrorCount, self._stoppedTaskCount) display += "%s OIDs configured: %d \n" % (self.name, len(self._oids.keys())) display += "%s Good OIDs: %d - %s\n" % (self.name, len( self._good_oids), self._good_oids) display += "%s Bad OIDs: %d - %s\n" % (self.name, len( self._bad_oids), self._bad_oids) if self._lastErrorMsg: display += "%s\n" % self._lastErrorMsg return display if __name__ == '__main__': myPreferences = SnmpPerformanceCollectionPreferences() myTaskFactory = SimpleTaskFactory(SnmpPerformanceCollectionTask) myTaskSplitter = SimpleTaskSplitter(myTaskFactory) daemon = CollectorDaemon(myPreferences, myTaskSplitter) daemon.run()
jobs = NJobs(self._preferences.options.parallel, self._collectJMX, self._taskConfig.jmxDataSourceConfigs.values()) deferred = jobs.start() return deferred def cleanup(self): pass def stopJavaJmxClients(): # Currently only starting/stopping one. clientName = DEFAULT_JMX_JAVA_CLIENT_NAME client = zope.component.queryUtility(IZenJMXJavaClient, clientName) if client is not None: log.debug('Shutting down JMX Java client %s' % clientName) client.stop() if __name__ == '__main__': myPreferences = ZenJMXPreferences() initialization = ZenJMXJavaClientInitialization() myTaskFactory = SimpleTaskFactory(ZenJMXTask) myTaskSplitter = SimpleTaskSplitter(myTaskFactory) daemon = CollectorDaemon(myPreferences, myTaskSplitter, initializationCallback=initialization.initialize, stoppingCallback=stopJavaJmxClients) daemon.run()
# 1) super sets self._prefs.task with the call to postStartupTasks # 2) call remote createAllUsers # 3) service in turn walks DeviceClass tree and returns users CollectorDaemon.runPostConfigTasks(self, result) if not isinstance(result, Failure) and self._prefs.task is not None: service = self.getRemoteConfigServiceProxy() log.debug( 'TrapDaemon.runPostConfigTasks callRemote createAllUsers') d = service.callRemote("createAllUsers") d.addCallback(self._createUsers) def remote_createUser(self, user): self._createUsers([user]) def _createUsers(self, users): fmt = 'TrapDaemon._createUsers {0} users' count = len(users) log.debug(fmt.format(count)) if self._prefs.task.session is None: log.debug("No session created, so unable to create users") else: self._prefs.task.session.create_users(users) if __name__ == '__main__': myPreferences = SnmpTrapPreferences() myTaskFactory = SimpleTaskFactory(MibConfigTask) myTaskSplitter = SimpleTaskSplitter(myTaskFactory) daemon = TrapDaemon(myPreferences, myTaskSplitter) daemon.run()
'eventKey': datasource.getEventKey(dp), 'component': datasource.component, } self._dataService.writeRRD(dp.rrdPath, dp_value[0], dp.rrdType, rrdCommand=dp.rrdCreateCommand, cycleTime=datasource.cycletime, min=dp.rrdMin, max=dp.rrdMax, threshEventData=threshData, timestamp=dp_value[1], allowStaleDatapoint=False) def handleError(self, result): log.error('unhandled plugin error: %s', result) # cleanup is required to implement the IScheduledTask interface. def cleanup(self): pass if __name__ == '__main__': myPreferences = ActiveMQPreferences() myTaskFactory = SimpleTaskFactory(ActiveMQTask) myTaskSplitter = SimpleTaskSplitter(myTaskFactory) daemon = CollectorDaemon(myPreferences, myTaskSplitter) daemon.run()