def runPostConfigTasks(self, result=None): # 1) super sets self._prefs.task with the call to postStartupTasks # 2) call remote createAllUsers # 3) service in turn walks DeviceClass tree and returns users CollectorDaemon.runPostConfigTasks(self, result) if not isinstance(result, Failure) and self._prefs.task is not None: service = self.getRemoteConfigServiceProxy() log.debug('TrapDaemon.runPostConfigTasks callRemote createAllUsers') d = service.callRemote("createAllUsers") d.addCallback(self._createUsers)
def runPostConfigTasks(self, result=None): # 1) super sets self._prefs.task with the call to postStartupTasks # 2) call remote createAllUsers # 3) service in turn walks DeviceClass tree and returns users CollectorDaemon.runPostConfigTasks(self, result) if not isinstance(result, Failure) and self._prefs.task is not None: service = self.getRemoteConfigServiceProxy() log.debug( 'TrapDaemon.runPostConfigTasks callRemote createAllUsers') d = service.callRemote("createAllUsers") d.addCallback(self._createUsers)
def main(): preferences = Preferences() task_factory = SimpleTaskFactory(PythonCollectionTask) task_splitter = PerDataSourceInstanceTaskSplitter(task_factory) daemon = CollectorDaemon(preferences, task_splitter) pool_size = preferences.options.threadPoolSize # The Twisted version shipped with Zenoss 4.1 doesn't have this. if hasattr(reactor, 'suggestThreadPoolSize'): reactor.suggestThreadPoolSize(pool_size) daemon.run()
def run(self): if "--worker" in sys.argv: executor = zope.component.getUtility(IWorkerExecutor) executor.setWorkerClass(self.workerClass) executor.run() else: myPreferences = self.prefsClass() myTaskFactory = zope.component.getUtility(IWorkerTaskFactory) myTaskFactory.setWorkerClass(self.workerClass) myTaskSplitter = SimpleTaskSplitter(myTaskFactory) daemon = CollectorDaemon(myPreferences, myTaskSplitter) myTaskFactory.postInitialization() self.log = daemon.log daemon.run()
def setUp(t): # Patch out the __init__ method, due to excessive side-effects t.init_patcher = patch.object( CollectorDaemon, '__init__', autospec=True, return_value=None ) t.init_patcher.start() t.addCleanup(t.init_patcher.stop) preferences = create_interface_mock(ICollectorPreferences)() taskSplitter = create_interface_mock(ITaskSplitter)() configurationListener = create_interface_mock(IConfigurationListener)() t.cd = CollectorDaemon( preferences, taskSplitter, configurationListener ) t.cd.log = Mock(name='log') t.cd._prefs = Mock( name='options', spec_set=['pauseUnreachableDevices'], pauseUnreachableDevices=True ) t.cd.options = Mock(name='options', spec_set=['cycle'], cycle=True) t.cd.getDevicePingIssues = create_autospec( t.cd.getDevicePingIssues ) t.cd._unresponsiveDevices = set()
def cleanup(self): pass def doTask(self): log.debug("Scanning device %s [%s]", self._devId, self._manageIp) # try collecting events after a successful connect, or if we're # already connected d = self._collectData() # Add the _finished callback to be called in both success and error # scenarios. While we don't need final error processing in this task, # it is good practice to catch any final errors for diagnostic purposes. d.addCallback(self._finished) # returning a Deferred will keep the framework from assuming the task # is done until the Deferred actually completes return d # # Collector Daemon Main entry point # if __name__ == '__main__': myPreferences = ZenPerfWmiPreferences() myTaskFactory = SimpleTaskFactory(ZenPerfWmiTask) myTaskSplitter = SimpleTaskSplitter(myTaskFactory) daemon = CollectorDaemon(myPreferences, myTaskSplitter) daemon.run()
self._snmpProxy = None def displayStatistics(self): """ Called by the collector framework scheduler, and allows us to see how each task is doing. """ display = "%s using SNMP %s\n" % (self.name, self._snmpConnInfo.zSnmpVer) display += "%s Cycles Exceeded: %s; V3 Error Count: %s; Stopped Task Count: %s\n" % ( self.name, self._cycleExceededCount, self._snmpV3ErrorCount, self._stoppedTaskCount) display += "%s OIDs configured: %d \n" % ( self.name, len(self._oids.keys())) display += "%s Good OIDs: %d - %s\n" % ( self.name, len(self._good_oids), self._good_oids) display += "%s Bad OIDs: %d - %s\n" % ( self.name, len(self._bad_oids), self._bad_oids) if self._lastErrorMsg: display += "%s\n" % self._lastErrorMsg return display if __name__ == '__main__': myPreferences = SnmpPerformanceCollectionPreferences() myTaskFactory = SimpleTaskFactory(SnmpPerformanceCollectionTask) myTaskSplitter = SimpleTaskSplitter(myTaskFactory) daemon = CollectorDaemon(myPreferences, myTaskSplitter) daemon.run()
return procs def reverseDict(d): """ Return a dictionary with keys and values swapped: all values are lists to handle the different keys mapping to the same value """ result = {} for a, v in d.iteritems(): result.setdefault(v, []).append(a) return result def chunk(lst, n): """ Break lst into n-sized chunks """ return [lst[i:i + n] for i in range(0, len(lst), n)] # Collector Daemon Main entry point # if __name__ == '__main__': myPreferences = ZenProcessPreferences() myTaskFactory = SimpleTaskFactory(ZenProcessTask) myTaskSplitter = SimpleTaskSplitter(myTaskFactory) daemon = CollectorDaemon(myPreferences, myTaskSplitter, ConfigListener()) daemon.run()
def main(): preferences = Preferences() task_factory = SimpleTaskFactory(PythonCollectionTask) task_splitter = PerDataSourceInstanceTaskSplitter(task_factory) daemon = CollectorDaemon(preferences, task_splitter) daemon.run()
jobs = NJobs(self._preferences.options.parallel, self._collectJMX, self._taskConfig.jmxDataSourceConfigs.values()) deferred = jobs.start() return deferred def cleanup(self): pass def stopJavaJmxClients(): # Currently only starting/stopping one. clientName = DEFAULT_JMX_JAVA_CLIENT_NAME client = zope.component.queryUtility(IZenJMXJavaClient, clientName) if client is not None: log.debug('Shutting down JMX Java client %s' % clientName) client.stop() if __name__ == '__main__': myPreferences = ZenJMXPreferences() initialization = ZenJMXJavaClientInitialization() myTaskFactory = SimpleTaskFactory(ZenJMXTask) myTaskSplitter = SimpleTaskSplitter(myTaskFactory) daemon = CollectorDaemon(myPreferences, myTaskSplitter, initializationCallback=initialization.initialize, stoppingCallback=stopJavaJmxClients) daemon.run()
agent=COLLECTOR_NAME)) return result def cleanup(self): unused(self) pass def doTask(self): self.state = TaskStates.STATE_WAITING log.debug("Connecting to %s (%s)", self._devId, self._manageIp) spec = txamqp.spec.load(os.path.join(os.path.dirname(__file__), "lib/txamqp/specs/standard/amqp0-8.xml")) delegate = TwistedDelegate() d = ClientCreator(reactor, AMQClient, delegate=delegate, spec=spec, vhost=self._config.zAMQPVirtualHost).connectTCP(self._config.manageIp, self._config.zAMQPPort) d.addCallback(self._onConnSucc, self._config.zAMQPQueue, self._config.zAMQPUsername, self._config.zAMQPPassword) d.addErrback(self._onConnFail) return d if __name__ == '__main__': tf = SimpleTaskFactory(AMQPEventsTask) ts = SimpleTaskSplitter(tf) CollectorDaemon(AMQPEventPreferences(), ts).run()
def runPostConfigTasks(self, result=None): CollectorDaemon.runPostConfigTasks(self, result=result) self.preferences.runPostConfigTasks()