def zenPackUpdated(self, object, event): with gc_cache_every(1000, db=self.dmd._p_jar._db): for listener in self.listeners: try: listener.callRemote("updateThresholdClasses", self.remote_getThresholdClasses()) except Exception, ex: self.log.warning("Error notifying a listener of new classes")
def notifyAffectedDevices(self, object, event): # FIXME: This is horrible with gc_cache_every(1000, db=self.dmd._p_jar._db): if isinstance(object, self._getNotifiableClasses()): self._reconfigureIfNotify(object) else: if isinstance(object, Device): return # something else... mark the devices as out-of-date while object: # Don't bother with privately managed objects; the ZenPack # will handle them on its own if is_private(object): return # walk up until you hit an organizer or a device if isinstance(object, DeviceClass): uid = (self.__class__.__name__, self.instance) self._notifier.notify_subdevices(object, uid, self._notifyAll) break if isinstance(object, Device): self._notifyAll(object) break object = aq_parent(object)
def notifyAffectedDevices(self, object, event): # FIXME: This is horrible with gc_cache_every(1000, db=self.dmd._p_jar._db): if isinstance(object, self._getNotifiableClasses()): self._reconfigureIfNotify(object) else: if isinstance(object, Device): return # something else... mark the devices as out-of-date while object: # Don't bother with privately managed objects; the ZenPack # will handle them on its own if is_private(object): return # walk up until you hit an organizer or a device if isinstance(object, DeviceClass): uid = (self.__class__.__name__, self.instance) self._notifier.notify_subdevices(object, uid, self._notifyAll) break if isinstance(object, Device): self._notifyAll(object) break object = aq_parent(object)
def zenPackUpdated(self, object, event): with gc_cache_every(1000, db=self.dmd._p_jar._db): for listener in self.listeners: try: listener.callRemote('updateThresholdClasses', self.remote_getThresholdClasses()) except Exception, ex: self.log.warning("Error notifying a listener of new classes")
def deviceDeleted(self, object, event): with gc_cache_every(1000, db=self.dmd._p_jar._db): devid = object.id collector = object.getPerformanceServer().getId() # The invalidation is only sent to the collector where the deleted device was if collector == self.instance: self.log.debug('Invalidation: Performing remote call to delete device {0} from collector {1}'.format(devid, self.instance)) for listener in self.listeners: listener.callRemote('deleteDevice', devid) else: self.log.debug('Invalidation: Skipping remote call to delete device {0} from collector {1}'.format(devid, self.instance))
def deviceDeleted(self, object, event): with gc_cache_every(1000, db=self.dmd._p_jar._db): devid = object.id collector = object.getPerformanceServer().getId() # The invalidation is only sent to the collector where the deleted device was if collector == self.instance: self.log.debug('Invalidation: Performing remote call to delete device {0} from collector {1}'.format(devid, self.instance)) for listener in self.listeners: listener.callRemote('deleteDevice', devid) else: self.log.debug('Invalidation: Skipping remote call to delete device {0} from collector {1}'.format(devid, self.instance))
def run(self): """ Operates on uids/nodes from get_work until exhausted. """ with worker_context(self): with gc_cache_every(1000, self._db): count = 0 while True: if self.cancel.is_set() or self.terminator.is_set(): return for uid in self.get_work(): if self.terminator.is_set(): # If terminator is set, we exit immediately, leaving data behind return checkLogging(self.logtoggle) if uid == TERMINATE_SENTINEL: log.debug('Worker {0} found sentinel'.format( self.idx)) self.cancel.set() self.notify_parent(True) break try: self.process(uid) except Exception: self.handle_exception() continue count += 1 # update our counter every so often (less lock usage) if count >= 1000: with self.counter.get_lock(): self.counter.value += count count = 0 log.debug( 'Worker {0} notifying parent of count update'. format(self.idx)) self.notify_parent() # Flush the index batch dregs self.index(True) if count: with self.counter.get_lock(): self.counter.value += count count = 0 # Should we die? Or wait for more work from the parent? if self.cancel.is_set() or self.terminator.is_set(): return log.debug( 'Worker {0} notifying parent it is out of work'.format( self.idx)) self.notify_parent(True)
def notifyAffectedDevices(self, object, event): # FIXME: This is horrible with gc_cache_every(1000, db=self.dmd._p_jar._db): if isinstance(object, self._getNotifiableClasses()): self._reconfigureIfNotify(object) else: if isinstance(object, Device): return # something else... mark the devices as out-of-date template = None while object: # Don't bother with privately managed objects; the ZenPack # will handle them on its own if is_private(object): return # walk up until you hit an organizer or a device if isinstance(object, RRDTemplate): template = object if isinstance(object, DeviceClass): uid = (self.__class__.__name__, self.instance) devfilter = None if template: def hasTemplate(device): if issubclass(template.getTargetPythonClass(), Device): result = template in device.getRRDTemplates() if result: self.log.debug("%s bound to template %s", device.getPrimaryId(), template.getPrimaryId()) else: self.log.debug("%s not bound to template %s", device.getPrimaryId(), template.getPrimaryId()) return result else: # check components, Too expensive? for comp in device.getMonitoredComponents(type=template.getTargetPythonClass().meta_type): result = template in comp.getRRDTemplates() if result: self.log.debug("%s bound to template %s", comp.getPrimaryId(), template.getPrimaryId()) return True else: self.log.debug("%s not bound to template %s", comp.getPrimaryId(), template.getPrimaryId()) return False devfilter = hasTemplate self._notifier.notify_subdevices(object, uid, self._notifyAll, devfilter) break if isinstance(object, Device): self._notifyAll(object) break object = aq_parent(object)
def notifyAffectedDevices(self, object, event): # FIXME: This is horrible with gc_cache_every(1000, db=self.dmd._p_jar._db): if isinstance(object, self._getNotifiableClasses()): self._reconfigureIfNotify(object) else: if isinstance(object, Device): return # something else... mark the devices as out-of-date template = None while object: # Don't bother with privately managed objects; the ZenPack # will handle them on its own if is_private(object): return # walk up until you hit an organizer or a device if isinstance(object, RRDTemplate): template = object if isinstance(object, DeviceClass): uid = (self.__class__.__name__, self.instance) devfilter = None if template: def hasTemplate(device): if issubclass(template.getTargetPythonClass(), Device): result = template in device.getRRDTemplates() if result: self.log.debug("%s bound to template %s", device.getPrimaryId(), template.getPrimaryId()) else: self.log.debug("%s not bound to template %s", device.getPrimaryId(), template.getPrimaryId()) return result else: # check components, Too expensive? for comp in device.getMonitoredComponents(type=template.getTargetPythonClass().meta_type): result = template in comp.getRRDTemplates() if result: self.log.debug("%s bound to template %s", comp.getPrimaryId(), template.getPrimaryId()) return True else: self.log.debug("%s not bound to template %s", comp.getPrimaryId(), template.getPrimaryId()) return False devfilter = hasTemplate self._notifier.notify_subdevices(object, uid, self._notifyAll, devfilter) break if isinstance(object, Device): self._notifyAll(object) break object = aq_parent(object)
def run(self, log, number_of_issues): print("[%s] Examining %d items in the '%s' database:" % (strftime("%Y-%m-%d %H:%M:%S", localtime()), self._size, self._dbname)) log.info("Examining %d items in %s database" % (self._size, self._dbname)) oid = '\x00\x00\x00\x00\x00\x00\x00\x01' with gc_cache_every(1000, self._db): reported, scanned, total = self.verify(oid, log, number_of_issues) if (100.0*scanned/total) < 90.0: print(" ** %3.2f%% of %s objects not reachable - examine your zenossdbpack settings **" % ((100.0-100.0*scanned/total), self._dbname)) log.info("%3.2f%% of %s objects not reachable - examine your zenossdbpack settings" % ((100.0-100.0*scanned/total), self._dbname)) print
def run(self): """ Operates on uids/nodes from get_work until exhausted. """ with worker_context(self): with gc_cache_every(1000, self._db): count = 0 while True: if self.cancel.is_set() or self.terminator.is_set(): return for uid in self.get_work(): if self.terminator.is_set(): # If terminator is set, we exit immediately, leaving data behind return checkLogging(self.logtoggle) if uid == TERMINATE_SENTINEL: log.debug('Worker {0} found sentinel'.format(self.idx)) self.cancel.set() self.notify_parent(True) break try: self.process(uid) except Exception: self.handle_exception() continue count += 1 # update our counter every so often (less lock usage) if count >= 1000: with self.counter.get_lock(): self.counter.value += count count = 0 log.debug('Worker {0} notifying parent of count update'.format(self.idx)) self.notify_parent() # Flush the index batch dregs self.index(True) if count: with self.counter.get_lock(): self.counter.value += count count = 0 # Should we die? Or wait for more work from the parent? if self.cancel.is_set() or self.terminator.is_set(): return log.debug('Worker {0} notifying parent it is out of work'.format(self.idx)) self.notify_parent(True)
def run(self): print print "=" * 50 print print " DATABASE INTEGRITY SCAN: ", self._dbname print print "=" * 50 oid = '\x00\x00\x00\x00\x00\x00\x00\x01' with gc_cache_every(1000, self._db): reported, scanned, total = self.verify(oid) sys.stderr.write(' ' * 80) sys.stderr.flush() print print "SUMMARY:" print "Found", reported, "dangling references" print "Scanned", scanned, "out of", total, "reachable objects" if total > scanned: print "(Run zenossdbpack to garbage collect unreachable objects)" print
def getSubComponents(self, dmd): catalog = IModelCatalogTool(dmd.Devices) COMPONENT = 'Products.ZenModel.DeviceComponent.DeviceComponent' query = Eq('monitored', '1') with gc_cache_every(100, db=dmd._p_jar._db): for brain in catalog.search(COMPONENT, query=query): try: obj = brain.getObject() except KeyError: continue status = obj.getStatus() row = (dict(getParentDeviceTitle=obj.getParentDeviceTitle(), hostname=obj.getParentDeviceTitle(), name=obj.name(), meta_type=obj.meta_type, getInstDescription=obj.getInstDescription(), getStatusString=obj.convertStatus(status), getDeviceLink=obj.getDeviceLink(), getPrimaryUrlPath=obj.getPrimaryUrlPath(), cssclass=obj.getStatusCssClass(status), status=status)) yield Utils.Record(**row)
def run(self): print print "=" * 50 print print " DATABASE INTEGRITY SCAN: ", self._dbname print print "=" * 50 oid = "\x00\x00\x00\x00\x00\x00\x00\x01" with gc_cache_every(1000, self._db): reported, scanned, total = self.verify(oid) sys.stderr.write(" " * 80) sys.stderr.flush() print print "SUMMARY:" print "Found", reported, "dangling references" print "Scanned", scanned, "out of", total, "reachable objects" if total > scanned: print "(Run zenossdbpack to garbage collect unreachable objects)" print
def deviceUpdated(self, object, event): with gc_cache_every(1000, db=self.dmd._p_jar._db): self._notifyAll(object)
def deviceDeleted(self, object, event): with gc_cache_every(1000, db=self.dmd._p_jar._db): devid = object.id for listener in self.listeners: listener.callRemote('deleteDevice', devid)
def perfConfUpdated(self, object, event): with gc_cache_every(1000, db=self.dmd._p_jar._db): if object.id == self.instance: for listener in self.listeners: listener.callRemote('setPropertyItems', object.propertyItems())
def deviceUpdated(self, object, event): with gc_cache_every(1000, db=self.dmd._p_jar._db): self._notifyAll(object)
def perfConfUpdated(self, object, event): with gc_cache_every(1000, db=self.dmd._p_jar._db): if object.id == self.instance: for listener in self.listeners: listener.callRemote('setPropertyItems', object.propertyItems())
def deviceDeleted(self, object, event): with gc_cache_every(1000, db=self.dmd._p_jar._db): devid = object.id for listener in self.listeners: listener.callRemote("deleteDevice", devid)