def fetch(self, startTime, endTime): startString = time.strftime("%H:%M_%Y%m%d+%Ss", time.localtime(startTime)) endString = time.strftime("%H:%M_%Y%m%d+%Ss", time.localtime(endTime)) if settings.FLUSHRRDCACHED: rrdtool.flushcached(self.fs_path, '--daemon', settings.FLUSHRRDCACHED) (timeInfo,columns,rows) = rrdtool.fetch(self.fs_path,'AVERAGE','-s' + startString,'-e' + endString) colIndex = list(columns).index(self.name) rows.pop() #chop off the latest value because RRD returns crazy last values sometimes values = (row[colIndex] for row in rows) return (timeInfo,values)
def check_rrd_files(self): if self.app.config['rrd']['rrdcached_sock_path']: rrdcached_sock_file = self.app.config['rrd']['rrdcached_sock_path'] else: rrdcached_sock_file = self.app.config['rrd']['rrdcached_sock_path'] metrics = self.app.config['metrics'] metrics_map = { 'cpu': 'CPUSNMP/db.rrd', 'la': 'LASNMP/db.rrd', 'mem': 'MEMSNMP/db.rrd', 'net': 'NETSNMP/db.rrd', 'io': 'IO/sda1.rrd' } out_map = { 'cpu': '10 10 10 10', 'la': '1.0 1.0 1.0', 'mem': '1024.0 1024.0 1024.0 U 1024.0 1024.0 1024.0 1024.0', 'net': '1024 1024', 'io': '10 10 10 10', } for server_id, server in lib.world.servers.iteritems(): x1x2 = helper.x1x2(server['farm_id']) path = os.path.join(self.app.config['rrd']['dir'], x1x2, str(server['farm_id'])) farm_path = os.path.join(path, 'FARM') role_path = os.path.join(path, 'FR_%s' % server['farm_roleid']) server_path = os.path.join( path, 'INSTANCE_%s_%s' % (server['farm_roleid'], server['index'])) if server['status'] != 'Running': assert not os.path.isdir(server_path) continue assert os.path.isdir(farm_path), farm_path assert os.path.isdir(os.path.join(farm_path, 'SERVERS')) assert os.path.isdir(role_path), role_path assert os.path.isdir(os.path.join(role_path, 'SERVERS')) assert os.path.isdir(server_path), server_path for metric in metrics: if metric == 'snum': continue rrd_db_file = os.path.join(server_path, metrics_map[metric]) rrdtool.flushcached('--daemon', 'unix:%s' % rrdcached_sock_file, rrd_db_file) stdout, stderr, return_code = helper.call( 'rrdtool lastupdate %s' % rrd_db_file) assert not return_code assert stdout.split('/n')[-1].split( ':')[-1].strip() == out_map[metric]
def check_rrd_files(self): if self.app.config['rrd']['rrdcached_sock_path']: rrdcached_sock_file = self.app.config['rrd']['rrdcached_sock_path'] else: rrdcached_sock_file = self.app.config['rrd']['rrdcached_sock_path'] metrics = self.app.config['metrics'] metrics_map = { 'cpu': 'CPUSNMP/db.rrd', 'la': 'LASNMP/db.rrd', 'mem': 'MEMSNMP/db.rrd', 'net': 'NETSNMP/db.rrd', 'io': 'IO/sda1.rrd' } out_map = { 'cpu': '10 10 10 10', 'la': '1.0 1.0 1.0', 'mem': '1024.0 1024.0 1024.0 U 1024.0 1024.0 1024.0 1024.0', 'net': '1024 1024', 'io': '10 10 10 10', } for server_id, server in lib.world.servers.iteritems(): x1x2 = helper.x1x2(server['farm_id']) path = os.path.join(self.app.config['rrd']['dir'], x1x2, str(server['farm_id'])) farm_path = os.path.join(path, 'FARM') role_path = os.path.join(path, 'FR_%s' % server['farm_roleid']) server_path = os.path.join(path, 'INSTANCE_%s_%s' % (server['farm_roleid'], server['index'])) if server['status'] != 'Running': assert not os.path.isdir(server_path) continue assert os.path.isdir(farm_path), farm_path assert os.path.isdir(os.path.join(farm_path, 'SERVERS')) assert os.path.isdir(role_path), role_path assert os.path.isdir(os.path.join(role_path, 'SERVERS')) assert os.path.isdir(server_path), server_path for metric in metrics: if metric == 'snum': continue rrd_db_file = os.path.join(server_path, metrics_map[metric]) rrdtool.flushcached('--daemon', 'unix:%s' % rrdcached_sock_file, rrd_db_file) stdout, stderr, return_code = helper.call('rrdtool lastupdate %s' % rrd_db_file) assert not return_code assert stdout.split('/n')[-1].split(':')[-1].strip() == out_map[metric]
def _read_data(self): d = OrderedDict() for dr in self.datarows: datafile = str(os.path.join(settings.MUNIN_DATA_DIR, dr.rrdfile)) if self._flush_rrdcached_before_fetch and self._rrdcached: try: rrdtool.flushcached(['--daemon', self._rrdcached, datafile]) except: logger.exception("Could not flushrrdcached at %s", self._rrdcached) date_range_start = "" date_range_end = "" if self.data_scope == SCOPE_DAY: date_range_start = "-s -34h" elif self.data_scope == SCOPE_WEEK: date_range_start = "-s -200h" elif self.data_scope == SCOPE_MONTH: date_range_start = "-s -756h" elif self.data_scope == SCOPE_YEAR: date_range_start = "-s -365d" elif self.data_scope == SCOPE_RANGE: date_range_start = "-s %d" % self._range_start if self._range_end is not None: date_range_end = '-e %d' % self._range_end (self._start, self._end, self._resolution), (no,), data = rrdtool.fetch( [datafile, 'AVERAGE', date_range_start, date_range_end]) for dt, value in zip( (x * 1000 for x in range(self._start, self._end - self._resolution, self._resolution)), (x[0] for x in data) ): if dt not in d: d[dt] = {} d[dt][dr.name] = value return d
def get_data(host, service, start=None, end=None, resolution='150'): if not end: end = int(time.time()) if not start: start = end - DAY rra_path = settings.RRA_PATH rrd = '%s%s/%s.rrd' % (rra_path, host, service) coilfile = '%s%s/%s.coil' % (rra_path, host, service) railroad_conf = 'railroad_conf' statistics = 'statistics' trend_attributes = ['color', 'stack', 'scale', 'display'] # calculate custom resolution resolution = (int(end) - int(start)) / int(resolution) # Flush rrdcached as a separate command rather as part of fetch(). # A failure to flush isn't fatal, just results in stale data. daemon = getattr(settings, 'RRDCACHED_ADDRESS', None) if daemon: try: rrdtool.flushcached('--daemon', str(daemon), str(rrd)) except rrdtool.error, ex: logger.warning('rrdcached flush failed: %s', ex)
def flush_rrd_cache(self): """Flush the rrd_cache for necessary files""" rrdtool.flushcached('--daemon', self.__rrd_cached_socket, map(lambda x: '/'.join([self.rrd_path_host, x.encode('ascii', 'replace')]), self.rrd_files))