Esempio n. 1
0
    def writeRootSummary(self):
        ''' This method updates the RRD summary files.'''
        ds = DataStore()
        rootNode = ds.rootElement
        # If there isn't a root node then there is not need to continue.
        if rootNode is None: return
        # Get a lock on the data store.
        ds.acquireLock(self)
        try:
            gmetadConfig = getConfig()
            # Create the summary RRD base path and validate it
            rootPath = '%s/__SummaryInfo__' % self.cfg[RRDPlugin.RRD_ROOTDIR]
            self._checkDir(rootPath)
            # Update metrics for each grid node (there should only be one.)
            for gridNode in rootNode:
                # If there isn't any summary data, then no need to continue.
                if not hasattr(gridNode, 'summaryData'):
                    continue

                # Update metrics RRDs for each cluster summary in the grid
                for metricNode in gridNode.summaryData['summary'].itervalues():
                    # Create the summary RRD final path and validate it.
                    rrdPath = '%s/%s.rrd' % (rootPath,
                                             metricNode.getAttr('name'))
                    # if the RRD file doesn't exist then create it
                    if not os.path.isfile(rrdPath):
                        self._createRRD(rootNode, metricNode, rrdPath, 15,
                                        True)
                        #need to do some error checking here if the createRRD failed
                    # Update the RRD file.
                    self._updateRRD(rootNode, metricNode, rrdPath, True)
        except Exception, e:
            logging.error('Error writing to summary RRD %s' % str(e))
 def notify(self, clusterNode):
     '''Called by the engine when the internal data structure has changed.'''
     gmetadConfig = getConfig()
     try:
         if clusterNode.getAttr('status') == 'down':
             return
     except AttributeError:
         pass
     # Find the data source configuration entry that matches the cluster name
     for ds in gmetadConfig[GmetadConfig.DATA_SOURCE]:
         if ds.name == clusterNode.getAttr('name'):
             break
     if ds is None:
         logging.info('No matching data source for %s'%clusterNode.getAttr('name'))
         return
     # Create the summary RRD base path and validate it
     clusterPath = '%s/%s'%(self.cfg[RRDPlugin.RRD_ROOTDIR], clusterNode.getAttr('name'))
     self._checkDir(clusterPath)
     clusterPath = '%s/__SummaryInfo__'%clusterPath
     self._checkDir(clusterPath)
     # Update metrics for each cluster
     for metricNode in clusterNode.summaryData['summary'].itervalues():
         # Create the summary RRD final path and validate it
         rrdPath = '%s/%s.rrd'%(clusterPath,metricNode.getAttr('name'))
         # Create the RRD metric summary file if it doesn't exist
         if not os.path.isfile(rrdPath):
             self._createRRD(clusterNode, metricNode, rrdPath, ds.interval, True)
             #need to do some error checking here if the createRRD failed
         # Update the RRD file.
         self._updateRRD(clusterNode, metricNode, rrdPath, True)
Esempio n. 3
0
    def notify(self, clusterNode):
        '''Called by the engine when the internal data source has changed.'''
        # Get the current configuration
        if 'GRID' == clusterNode.id:
            # we don't need aggregation by GRID, this can be easily done in grpahite
            return
        gmetadConfig = getConfig()
        # Find the data source configuration entry that matches the cluster name
        for ds in gmetadConfig[GmetadConfig.DATA_SOURCE]:
            if ds.name == clusterNode.getAttr('name'):
                break
        if ds is None:
            logging.info('No matching data source for %s' %
                         clusterNode.getAttr('name'))
            return
        try:
            if clusterNode.getAttr('status') == 'down':
                return
        except AttributeError:
            pass

        # Update metrics for each host in the cluster
        self.sendMetrics([
            (
                ".".join(("ganglia",
                          self._carbonEscape(clusterNode.getAttr('name')),
                          self._carbonEscape(hostNode.getAttr('name')),
                          metricNode.getAttr('name'))),  # metric name
                int(hostNode.getAttr('REPORTED')) +
                int(metricNode.getAttr('TN')),
                float(metricNode.getAttr('VAL'))) for hostNode in clusterNode
            for metricNode in hostNode
            if metricNode.getAttr('type') not in ('string', 'timestamp')
        ])
 def writeRootSummary(self):
     ''' This method updates the RRD summary files.'''
     ds = DataStore()
     rootNode = ds.rootElement
     # If there isn't a root node then there is not need to continue.
     if rootNode is None: return
     # Get a lock on the data store.
     ds.acquireLock(self)
     try:
         gmetadConfig = getConfig()
         # Create the summary RRD base path and validate it
         rootPath = '%s/__SummaryInfo__'%self.cfg[RRDPlugin.RRD_ROOTDIR]
         self._checkDir(rootPath)
         # Update metrics for each grid node (there should only be one.)
         for gridNode in rootNode:
             # If there isn't any summary data, then no need to continue.
             if not hasattr(gridNode, 'summaryData'): 
                 continue
         
             # Update metrics RRDs for each cluster summary in the grid
             for metricNode in gridNode.summaryData['summary'].itervalues():
                 # Create the summary RRD final path and validate it.
                 rrdPath = '%s/%s.rrd'%(rootPath,metricNode.getAttr('name'))
                 # if the RRD file doesn't exist then create it
                 if not os.path.isfile(rrdPath):
                     self._createRRD(rootNode, metricNode, rrdPath, 15, True)
                     #need to do some error checking here if the createRRD failed
                 # Update the RRD file.
                 self._updateRRD(rootNode, metricNode, rrdPath, True)
     except Exception, e:
         logging.error('Error writing to summary RRD %s'%str(e))
Esempio n. 5
0
    def notify(self, clusterNode):
        '''Called by the engine when the internal data structure has changed.'''
        gmetadConfig = getConfig()
        try:
            if clusterNode.getAttr('status') == 'down':
                return
        except AttributeError:
            pass
        # Find the data source configuration entry that matches the cluster name
        for ds in gmetadConfig[GmetadConfig.DATA_SOURCE]:
            if ds.name == clusterNode.getAttr('name'):
                break
        if ds is None:
            logging.info('No matching data source for %s'%clusterNode.getAttr('name'))
            return

        if 'GRID' == clusterNode.id:
	    # bh: We still do not process GRID here
	    return

        if 'CLUSTER' == clusterNode.id:
	    if len(self.cfg[RRDGroupPlugin.RRD_GROUPS]) == 0:
		# bh: No group is set so done
		return

	    groupSummary = {}
	    self._updateGroupSummary(groupSummary, clusterNode)
	    self._updateGroupRRD(groupSummary, clusterNode, ds)
Esempio n. 6
0
    def notify(self, clusterNode):
        '''Called by the engine when the internal data source has changed.'''
        # Get the current configuration
        if 'GRID' == clusterNode.id:
            # we don't need aggregation by GRID, this can be easily done in grpahite
            return
        gmetadConfig = getConfig()
        # Find the data source configuration entry that matches the cluster name
        for ds in gmetadConfig[GmetadConfig.DATA_SOURCE]:
            if ds.name == clusterNode.getAttr('name'):
                break
        if ds is None:
            logging.info('No matching data source for %s'%clusterNode.getAttr('name'))
            return
        try:
            if clusterNode.getAttr('status') == 'down':
                return
        except AttributeError:
            pass

        # Update metrics for each host in the cluster
        self.sendMetrics([
                (".".join(
                    ("ganglia", self._carbonEscape(clusterNode.getAttr('name')),
                        self._carbonEscape(hostNode.getAttr('name')),
                        metricNode.getAttr('name'))
                        ), # metric name
                    int(hostNode.getAttr('REPORTED')) + int(metricNode.getAttr('TN')), float(metricNode.getAttr('VAL')))
                    for hostNode in clusterNode
                    for metricNode in hostNode
                if metricNode.getAttr('type') not in ('string', 'timestamp' )
                ]
        )
    def notify(self, clusterNode):
        """Called by the engine when the internal data structure has changed."""
        gmetadConfig = getConfig()
        try:
            if clusterNode.getAttr("status") == "down":
                return
        except AttributeError:
            pass
        # Find the data source configuration entry that matches the cluster name
        for ds in gmetadConfig[GmetadConfig.DATA_SOURCE]:
            if ds.name == clusterNode.getAttr("name"):
                break
        if ds is None:
            logging.info("No matching data source for %s" % clusterNode.getAttr("name"))
            return

        if "GRID" == clusterNode.id:
            # bh: We still do not process GRID here
            return

        if "CLUSTER" == clusterNode.id:
            if len(self.cfg[RRDGroupPlugin.RRD_GROUPS]) == 0:
                # bh: No group is set so done
                return

            groupSummary = {}
            self._updateGroupSummary(groupSummary, clusterNode)
            self._updateGroupRRD(groupSummary, clusterNode, ds)
Esempio n. 8
0
 def _connIsAllowedFrom(self, remoteHost):
     cfg = getConfig()
     if '127.0.0.1' == remoteHost: return True
     if 'localhost' == remoteHost: return True
     if cfg[GmetadConfig.ALL_TRUSTED]: return True
     trustedHosts = cfg[GmetadConfig.TRUSTED_HOSTS]
     if trustedHosts:
         if trustedHosts.count(remoteHost): return True
         hostname, aliases, ips = socket.gethostbyaddr(remoteHost)
         if trustedHosts.count(hostname): return True
         for alias in aliases:
             if trustedHosts.count(alias): return True
         for ip in ips:
             if trustedHosts.count(ip): return True
     return False
Esempio n. 9
0
 def _connIsAllowedFrom(self, remoteHost):
     cfg = getConfig()
     if '127.0.0.1' == remoteHost: return True
     if 'localhost' == remoteHost: return True
     if cfg[GmetadConfig.ALL_TRUSTED]: return True
     trustedHosts = cfg[GmetadConfig.TRUSTED_HOSTS]
     if trustedHosts:
         if trustedHosts.count(remoteHost): return True
         hostname, aliases, ips = socket.gethostbyaddr(remoteHost)
         if trustedHosts.count(hostname): return True
         for alias in aliases:
             if trustedHosts.count(alias): return True
         for ip in ips:
             if trustedHosts.count(ip): return True
     return False
Esempio n. 10
0
    def notify(self, clusterNode):
        '''Called by the engine when the internal data source has changed.'''
        # Get the current configuration
        gmetadConfig = getConfig()
        # Find the data source configuration entry that matches the cluster name
        for ds in gmetadConfig[GmetadConfig.DATA_SOURCE]:
            if ds.name == clusterNode.getAttr('name'):
                break
        if ds is None:
            logging.info('No matching data source for %s'%clusterNode.getAttr('name'))
            return
        try:
            if clusterNode.getAttr('status') == 'down':
                return
        except AttributeError:
            pass
        # Create the cluster RRD base path and validate it
        clusterPath = '%s/%s'%(self.cfg[RRDPlugin.RRD_ROOTDIR], clusterNode.getAttr('name'))
        if 'GRID' == clusterNode.id:
            clusterPath = '%s/__SummaryInfo__'%clusterPath
        self._checkDir(clusterPath)

        # We do not want to process grid data
        if 'GRID' == clusterNode.id:
            return

        # Update metrics for each host in the cluster
        for hostNode in clusterNode:
            # Create the host RRD base path and validate it.
            hostPath = '%s/%s'%(clusterPath,hostNode.getAttr('name'))
            self._checkDir(hostPath)
            # Update metrics for each host
            for metricNode in hostNode:
                # Don't update metrics that are numeric values.
                if metricNode.getAttr('type') in ['string', 'timestamp']:
                    continue
                # Create the RRD final path and validate it.
                rrdPath = '%s/%s.rrd'%(hostPath, metricNode.getAttr('name'))
                # Create the RRD metric file if it doesn't exist
                if not os.path.isfile(rrdPath):
                    self._createRRD(clusterNode, metricNode, rrdPath, ds.interval, False)
                #need to do some error checking here if the createRRD failed
                # Update the RRD file.
                self._updateRRD(clusterNode, metricNode, rrdPath, False)
Esempio n. 11
0
    def notify(self, clusterNode):
        '''Called by the engine when the internal data source has changed.'''
        # Get the current configuration
        gmetadConfig = getConfig()
        # Find the data source configuration entry that matches the cluster name
        for ds in gmetadConfig[GmetadConfig.DATA_SOURCE]:
            if ds.name == clusterNode.getAttr('name'):
                break
        if ds is None:
            logging.info('No matching data source for %s' %
                         clusterNode.getAttr('name'))
            return
        try:
            if clusterNode.getAttr('status') == 'down':
                return
        except AttributeError:
            pass
        # Create the cluster RRD base path and validate it
        clusterPath = clusterNode.getAttr('name')
        if 'GRID' == clusterNode.id:
            clusterPath = '%s/__SummaryInfo__' % clusterPath

        # We do not want to process grid data
        if 'GRID' == clusterNode.id:
            return

        # Update metrics for each host in the cluster
        for hostNode in clusterNode:
            # Create the host RRD base path and validate it.
            hostPath = '%s/%s' % (clusterPath, hostNode.getAttr('name'))
            # Update metrics for each host
            for metricNode in hostNode:
                # Don't update metrics that are numeric values.
                if metricNode.getAttr('type') in ['string', 'timestamp']:
                    continue
                # Update the MonetDB records.
                self._updateMonetDB(hostPath, clusterNode, metricNode)
    def notify(self, clusterNode):
        '''Called by the engine when the internal data source has changed.'''
        # Get the current configuration
        gmetadConfig = getConfig()
        # Find the data source configuration entry that matches the cluster name
        for ds in gmetadConfig[GmetadConfig.DATA_SOURCE]:
            if ds.name == clusterNode.getAttr('name'):
                break
        if ds is None:
            logging.info('No matching data source for %s'%clusterNode.getAttr('name'))
            return
        try:
            if clusterNode.getAttr('status') == 'down':
                return
        except AttributeError:
            pass
        # Create the cluster RRD base path and validate it
        clusterPath = clusterNode.getAttr('name')
        if 'GRID' == clusterNode.id:
            clusterPath = '%s/__SummaryInfo__'%clusterPath

        # We do not want to process grid data
        if 'GRID' == clusterNode.id:
            return

        # Update metrics for each host in the cluster
        for hostNode in clusterNode:
            # Create the host RRD base path and validate it.
            hostPath = '%s/%s'%(clusterPath,hostNode.getAttr('name'))
            # Update metrics for each host
            for metricNode in hostNode:
                # Don't update metrics that are numeric values.
                if metricNode.getAttr('type') in ['string', 'timestamp']:
                    continue
                # Update the MonetDB records.
                self._updateMonetDB(hostPath, clusterNode, metricNode)
Esempio n. 13
0
    def notify(self, clusterNode):
        '''Called by the engine when the internal data structure has changed.'''
        gmetadConfig = getConfig()
        try:
            if clusterNode.getAttr('status') == 'down':
                return
        except AttributeError:
            pass
        # Find the data source configuration entry that matches the cluster name
        for ds in gmetadConfig[GmetadConfig.DATA_SOURCE]:
            if ds.name == clusterNode.getAttr('name'):
                break
        if ds is None:
            logging.info('No matching data source for %s' %
                         clusterNode.getAttr('name'))
            return

        if 'CLUSTER' == clusterNode.id:
            # Create the summary RRD base path and validate it
            clusterPath = '%s/%s' % (self.cfg[RRDPlugin.RRD_ROOTDIR],
                                     clusterNode.getAttr('name'))
            self._checkDir(clusterPath)
            clusterPath = '%s/__SummaryInfo__' % clusterPath
            self._checkDir(clusterPath)
            # Update metrics for each cluster
            for metricNode in clusterNode.summaryData['summary'].itervalues():
                # Create the summary RRD final path and validate it
                rrdPath = '%s/%s.rrd' % (clusterPath,
                                         metricNode.getAttr('name'))
                # Create the RRD metric summary file if it doesn't exist
                if not os.path.isfile(rrdPath):
                    self._createRRD(clusterNode, metricNode, rrdPath,
                                    ds.interval, True)
                    #need to do some error checking here if the createRRD failed
                # Update the RRD file.
                try:
                    self._updateRRD(clusterNode, metricNode, rrdPath, True)
                except Exception:
                    pass

        if 'GRID' == clusterNode.id:
            try:
                # Create the summary RRD base path and validate it
                gridPath = '%s/%s' % (self.cfg[RRDPlugin.RRD_ROOTDIR],
                                      clusterNode.getAttr('name'))
                self._checkDir(gridPath)
                gridPath = '%s/__SummaryInfo__' % gridPath
                # Update metrics for the grid
                # If there isn't any summary data, then no need to continue.
                if not hasattr(clusterNode, 'summaryData'):
                    return

                # Update metrics RRDs for grid summary
                for metricNode in clusterNode.summaryData[
                        'summary'].itervalues():
                    # Create the summary RRD final path and validate it.
                    rrdPath = '%s/%s.rrd' % (gridPath,
                                             metricNode.getAttr('name'))
                    # if the RRD file doesn't exist then create it
                    if not os.path.isfile(rrdPath):
                        self._createRRD(clusterNode, metricNode, rrdPath, 15,
                                        True)
                        #need to do some error checking here if the createRRD failed
                    # Update the RRD file.
                    self._updateRRD(clusterNode, metricNode, rrdPath, True)
            except Exception, e:
                logging.error('Error writing to summary RRD %s' % str(e))
Esempio n. 14
0
def getLoggingLevel(lspec):
    levelMap = {0:logging.FATAL,
            1:logging.CRITICAL,
            2:logging.ERROR,
            3:logging.WARNING,
            4:logging.INFO,
            5:logging.DEBUG}
    try:
        return levelMap[lspec]
    except KeyError:
        if lspec < 0: return logging.FATAL
        return logging.DEBUG

if __name__ == '__main__':
    # Read and store the configuration
    gmetadConfig = getConfig()
    
    # Initialize the application
    ignore_fds = [] # Remembers log file descriptors we create, so they aren't closed when we daemonize.

    # HACK ALERT - A very crude "syslog facility selector"
    _syslog_selector = {}
    _syslog_selector["16"] = SysLogHandler.LOG_LOCAL0
    _syslog_selector["17"] = SysLogHandler.LOG_LOCAL1
    _syslog_selector["18"] = SysLogHandler.LOG_LOCAL2
    _syslog_selector["19"] = SysLogHandler.LOG_LOCAL3
    _syslog_selector["20"] = SysLogHandler.LOG_LOCAL4
    _syslog_selector["21"] = SysLogHandler.LOG_LOCAL5
    _syslog_selector["22"] = SysLogHandler.LOG_LOCAL6
    _syslog_selector["23"] = SysLogHandler.LOG_LOCAL7
    
Esempio n. 15
0
def getLoggingLevel(lspec):
    levelMap = {0:logging.FATAL,
            1:logging.CRITICAL,
            2:logging.ERROR,
            3:logging.WARNING,
            4:logging.INFO,
            5:logging.DEBUG}
    try:
        return levelMap[lspec]
    except KeyError:
        if lspec < 0: return logging.FATAL
        return logging.DEBUG

if __name__ == '__main__':
    # Read and store the configuration
    gmetadConfig = getConfig()
    
    # Initialize the application
    ignore_fds = [] # Remembers log file descriptors we create, so they aren't closed when we daemonize.

    # HACK ALERT - A very crude "syslog facility selector"
    _syslog_selector = {}
    _syslog_selector["16"] = SysLogHandler.LOG_LOCAL0
    _syslog_selector["17"] = SysLogHandler.LOG_LOCAL1
    _syslog_selector["18"] = SysLogHandler.LOG_LOCAL2
    _syslog_selector["19"] = SysLogHandler.LOG_LOCAL3
    _syslog_selector["20"] = SysLogHandler.LOG_LOCAL4
    _syslog_selector["21"] = SysLogHandler.LOG_LOCAL5
    _syslog_selector["22"] = SysLogHandler.LOG_LOCAL6
    _syslog_selector["23"] = SysLogHandler.LOG_LOCAL7