def __init__(self, path, name, date_start=None, date_end=None): self.rrd_name = name if date_start is None or (date_start > rrdtool.last(str(os.path.join(path, name)))): self.date_start = str(rrdtool.first(str(os.path.join(path, name)))) else: self.date_start = str(date_start) if date_end is None: self.date_end = str(rrdtool.last(str(os.path.join(path, name)))) else: self.date_end = str(date_end) if float(self.date_start) > float(self.date_end): raise Exception( "Invalid date_start={0} and date_end={1}".format(str(datetime.fromtimestamp(float(self.date_start))), str(datetime.fromtimestamp(float(self.date_end))))) self.user_hash = os.path.split(path)[1] self.user_path = os.path.join( self.get_first_part_path(path, 3), "users", "user", self.user_hash[:2], self.user_hash ) self.uuid = self.get_uuid_from_file(self.user_path) self.age = 0 self.sn = self.get_machine_sn_from_file(self.user_path) self.school = "unkown" log.debug('*******************************************') log.debug(' creating a RRD instance ') log.debug('start: %s', str(datetime.fromtimestamp(float(self.date_start)))) log.debug('end: %s', str(datetime.fromtimestamp(float(self.date_end)))) log.debug('PATH: %s', path) log.debug('RRD NAME: %s', name) log.debug('\n') try: self.rrd = rrdtool.fetch(str(os.path.join(path, name)), 'AVERAGE', '-r 60', '-s ' + self.date_start, '-e ' + self.date_end) except Exception as e: raise Exception("rrdtool.fetch: {0}".format(e)) log.debug(' DS ') for item in self.DS.keys(): idx = self.get_ds_index(item) if idx != -1: self.DS[item] = idx else: log.warning('DS %s not found in header of %s rrd file', item, name) log.debug('***********************************************')
def LoadConfig(rrd_dir): nodes = {} with open(os.path.join(rrd_dir,'config'), 'r') as fp: while True: line = fp.readline() if not line: break node_id, node_type, description = line.split(' ') d = {'type':node_type, 'desc':description} # Extract battery and other state rrd_file = os.path.join(rrd_dir, 'node%s_bat.rrd' % node_id) v = DailyValue(rrd_file, 'node%s_bat' % node_id, 'LAST') if v: d['bat'] = (float(v)+50)*20/1000.0 else: d['bat'] = 0.0 last_report = rrdtool.last(rrd_file) d['last_report'] = last_report d['report_delta'] = time.time() - last_report if node_type == 'TempSensor': rrd_file = os.path.join(rrd_dir, 'node%s_temp.rrd' % node_id) val_name = 'node%s_temp' % node_id d['temp'] = DailyValue(rrd_file, val_name, 'LAST') d['temp_24h_max'] = DailyValue(rrd_file, val_name, 'MAXIMUM') d['temp_24h_avg'] = DailyValue(rrd_file, val_name, 'AVERAGE') d['temp_24h_min'] = DailyValue(rrd_file, val_name, 'MINIMUM') nodes[int(node_id)] = d return nodes
def getStoreData(): fi = open('testData.txt','r') for line in fi: #generate data VAR1 = float(line) #see what we generated to determine if rrd data is correct print dataName1 + ': %f\n' % VAR1 #update rrd ret = rrdtool.update(name, "%d:%f" %(time.time(),VAR1)) #Error message. Error if time stamp is wrong if ret: print 'ERROR: ' + rrdtool.error() #set endtime for fetching data from rrd lastTimeStamp = rrdtool.last(name) #send data from RRDTool to server #CURRENTLY SENDS ALL CONTENTS OF RRD - to change replace startTime with 1 second ago... sendData(rrdtool.fetch(name, 'LAST', '--start', "%d" % (startTime) , '--end', '%d' % (lastTimeStamp))) #let me know if everything worked print 'client sleeping...\n' #sleep for 1 second time.sleep(1) fi.close()
def info(self, context, target=None): # Figure out the target. if target == None: target = socket.getfqdn() rrdpath = os.path.join(FLAGS.canary_rrdpath, target) # Grab available metrics. available = glob.glob(os.path.join(rrdpath, "*/*.rrd")) metrics = {} for filename in available: # NOTE: Not sure quite why, but it seems like # the rrdtool commands below barf unless they # this happens -- maybe barfing on unicode? filename = str(filename) m = re.match("^%s/([^\/-]+)(-([^\/]+))?/([^\.]+)\.rrd$" % rrdpath, filename) if m: plugin = m.group(1) unit = m.group(3) key = m.group(4) # NOTE: At this point we construct a metric name that is # equivilant to how we deconstruct the name above. It's # important that these two operations are symmetric for # the sake of a user's sanity. if unit: metric = "%s[%s].%s" % (plugin, unit, key) else: metric = "%s.%s" % (plugin, key) if not (metric in metrics): metrics[metric] = {} metrics[metric]["from_time"] = rrdtool.first(filename) metrics[metric]["to_time"] = rrdtool.last(filename) step = 1 pdps = [] cfs = [] for (k, v) in rrdtool.info(filename).items(): if re.match("^step$", k): step = int(v) continue elif re.match("^rra\[\d+\]\.pdp_per_row$", k): pdps.append(int(v)) continue elif re.match("^rra\[\d+\]\.cf", k): cfs.append(v) continue pdps = list(set(pdps)) cfs = list(set(cfs)) cfs.sort() resolutions = map(lambda x: step * x, pdps) resolutions.sort() metrics[metric]["cfs"] = cfs metrics[metric]["resolutions"] = resolutions return metrics
def getStartTime(self): """Gets the timestamp of the first non-null entry in the RRD""" first = rrdtool.first(self.filename) end = rrdtool.last(self.filename) cf = self.getPeriodCF(first) try: info , _ds_rrd , data = rrdtool.fetch(self.filename, cf, "--start", str(first), "--end", str(end)) except rrdtool.error: # Adjust for daylight saving times first = first - 3600 end = end + 3600 info , _ds_rrd , data = rrdtool.fetch(self.filename, cf, "--start", str(first), "--end", str(end)) #start_rrd = info[0] #end_rrd = info[1] step = info[2] for line in data: all_none = True for value in line: if value is not None: all_none = False if not all_none: break first = first + step if first >= end: raise RRDError("The RRD file looks empty !") return first
def lastupd_rrd(self, path): import rrdtool import time from datetime import datetime try: return datetime.fromtimestamp(rrdtool.last(str(path))) except Exception: return datetime.fromtimestamp(time.mktime([1970,1,1,0,0,0,0,0,0]))
def get_bandwidth(): fn = "/var/lib/pnp4nagios/mesonet/eth0.rrd" ts = rrdtool.last(fn) data = rrdtool.fetch(fn, "AVERAGE", "-s", str(ts - 300), "-e", str(ts)) samples = data[2] j['stats']['bandwidth'] = samples[-2][2]
def rrd_boundary(self, boundary): if boundary == 'first': obs_time = rrdtool.first(self.filename) else: obs_time = rrdtool.last(self.filename) dt = datetime.datetime.fromtimestamp(obs_time) return dt.replace(hour=0, minute=0, second=0, microsecond=0)
def get_last_update(rrd_name): """Retrieve the timestamp of the last update to a specific RRD. :param rrd_name: The name of the target RRD :type rrd_name: str :return: The timestamp of the last update, ;:rtype: int """ return rrdtool.last(rrd_name)
def update_rrd(self, dom, t): """update_rrd Update RRD with records at t time. True : if data are up-to-date for current minute False : syslog may have probably been already recorded or something wrong """ fname = "%s/%s.rrd" % (self.workdir, dom) m = t - (t % rrdstep) if not os.path.exists(fname): self.lupdates[fname] = self.init_rrd(fname, m) self._dprint("[rrd] create new RRD file %s" % fname) else: if not fname in self.lupdates: self.lupdates[fname] = rrdtool.last(str(fname)) if m <= self.lupdates[fname]: if self.verbose: print "[rrd] VERBOSE events at %s already recorded in RRD" % m return False tpl = "" for v in variables: if tpl != "": tpl += ":" tpl += v # Missing some RRD steps # Est ce vraiment nécessaire... ? if m > self.lupdates[fname] + rrdstep: values = "" for v in variables: if values != "": values += ":" values += "0" for p in range(self.lupdates[fname] + rrdstep, m, rrdstep): if self.verbose: print "[rrd] VERBOSE update -t %s %s:%s (SKIP)" \ % (tpl, p, values) rrdtool.update(str(fname), "-t", tpl, "%s:%s" % (p, values)) values = "%s" % m tpl = "" for v in variables: values += ":" values += str(self.data[dom][m][v]) if tpl != "": tpl += ":" tpl += v if self.verbose: print "[rrd] VERBOSE update -t %s %s" % (tpl, values) rrdtool.update(str(fname), "-t", tpl, values) self.lupdates[fname] = m return True
def _addElement(self, name, timeStamp, WR_MB, RD_MB, REQS, fileRRD): if os.path.exists(fileRRD): lastTimeStamp = rrdtool.last(fileRRD) if lastTimeStamp >= timeStamp: print ('timeStamp is to smal skip: ' + str(name) + ' ' + str(timeStamp)) else: self._addEntry(name, timeStamp, WR_MB, RD_MB, REQS, fileRRD) else: if not os.path.exists(self.folder): os.mkdir(self.folder) self._generateRRD(name, timeStamp, WR_MB, RD_MB, REQS, fileRRD)
def get_reqs(): count = 0 for i in range(100,109): fn = "/var/lib/pnp4nagios/iemvs%03i/Apache_Stats_II.rrd" % (i,) ts = rrdtool.last(fn) data = rrdtool.fetch(fn, "AVERAGE", "-s", str(ts - 300), "-e", str(ts)) samples = data[2] if len(samples) < 2: continue count += samples[-2][13] j['stats']['apache_req_per_sec'] = count
def rrdfetch_wrapper(rrd_db, cf, resolution, start=-3600, end=-1): # type check assert type(rrd_db) is str assert type(cf) is str assert type(resolution) is int assert type(start) is int assert type(end) is int assert start < end #print 'start is %s, end is %s' % (start, end) last = rrdtool.last(rrd_db) first = rrdtool.first(rrd_db) if start < 0: start = last + start if end < 0: end = last + end if start >= last or end <= first: return (start, end, resolution), [] if start < first: start = first if end > last: end = last #print start, end # adjust the start/end time to match the result with the args #start = int(start) - int(resolution) #end = int(end) - int(resolution) # comment this line if we are using the native # python binding(rrdtoolmodule.so) data = rrdtool.fetch(rrd_db, cf, "-r", str(resolution), "-e", str(end), \ "-s", str(start)) #logger.debug(r'rrdtool.fetch("%s", "%s", "-r", "%s", "-e", "%s", "-s", "%s")' %\ #(rrd_db, cf, str(resolution), str(end), str(start))) #print data[0] #return (data[0][0]+data[0][2], data[0][1], data[0][2]),\ #data[2][0:-1] rstep = data[0][2] rstart = data[0][0] + rstep rend = data[0][1] - rstep # minus rstep if we are using the native python binding rdata = [] timestamp = rstart for d in data[2][0:-1]: rdata.append([timestamp, d[0]]) timestamp += rstep return (rstart, rend, rstep), rdata
def rrdfetchlatest(rrd_db, cf, resolution): step = 5 * resolution end = rrdtool.last(rrd_db) start = end - step ret = rrdfetch_wrapper(rrd_db, cf, resolution, start, end) vals = ret[1] l_ = len(vals) j = k = -1 while j >= -l_: if vals[j][1] is not None: k = j break j -= 1 return ret[0], [vals[k]]
def _get_time_range(rrd, opts): """Get a time range for generate report from rrd.""" start_time = None end_time = None if opts.start_time is not None and opts.end_time is not None: start_time = opts.start_time.strftime('%s') end_time = opts.end_time.strftime('%s') elif opts.last_day: end_time = rrdtool.last(rrd) start_time = end_time - ONE_DAY_SECS elif opts.last_hour: end_time = rrdtool.last(rrd) start_time = end_time - ONE_HOUR_SECS elif opts.last_week: end_time = rrdtool.last(rrd) start_time = end_time - ONE_DAY_SECS * 7 elif opts.last_month: end_time = rrdtool.last(rrd) start_time = end_time - ONE_DAY_SECS * 28 elif opts.last_year: end_time = rrdtool.last(rrd) start_time = end_time - ONE_DAY_SECS * 365 return str(start_time), str(end_time)
def get_duration_points(rrd_file, graph_period): start = rrdtool.first(rrd_file, '--rraindex', graph_period) end = rrdtool.last(rrd_file) # Start gives us the start timestamp in the data but it might be null/empty # (i.e no data entered for that time.) # Find the timestamp in which data was first entered. command = [ 'rrdtool', 'fetch', rrd_file, 'AVERAGE', '--start', str(start), '--end', str(end)] output = subprocess.check_output(command) actual_start = find_actual_start(output) return actual_start, end
def get_list(self): """ list vm stats with start and end times """ f = os.listdir(settings.PATH_TO_RRD) rrds = {} for rrd in f: try: t = [] t.append(rrdtool.first(settings.PATH_TO_RRD + rrd)) t.append(rrdtool.last(settings.PATH_TO_RRD + rrd)) rrds.update({os.path.splitext(rrd)[0]: t}) except Exception, e: log.error(0, 'stat_error %s %s' % (rrd, e))
def fetch_current_rrd_data(service, interval=60, aggregate='AVERAGE'): """ Fetch the current data for SERVICE over INTERVAL aggregated by AGGREGATE. """ rra_path = settings.RRA_PATH rrd = '%s%s/%s.rrd' % (rra_path, service['host_name'], service['service_description']) if not os.path.exists(rrd): # there's no RRD file for state-only checks so return a dummy list end = int(time.time()) start = end - interval return [(start, end, interval), ('dummy1', 'dummy2'), (0, 0)] end = rrdtool.last(rrd) start = end - interval return rrdtool.fetch(rrd, '--start', str(start), '--end', str(end), aggregate)
def create_graphs(imagedir, rrddir, last_hours, maxitems): """create graph images for list of rrd files """ os.environ['TZ'] = 'UTC' time.tzset() files = listfiles(rrddir, '*.rrd') lasts = [rrd.last(f) for f in files] last = max(lasts) first = last - (last_hours * 60 * 60) # sort file file according to the last value in rrd file last_min = min(lasts) step = max([rrd.info(f)['step'] for f in files]) # retrieve last 3 datapoints # seems that fetch delivers one datapoint before and after required # start/end, so using "--start e-step*2 --end last-step" instead of # "--start e-step*3 --end last" data = [rrd.fetch(f, 'MAX', '--resolution', str(step), '--start', 'e-' + str(step * 2), '--end', str(last_min - step))[2][0] for f in files] data = zip([a for (a, _) in data], files) files = [f for (_, f) in sorted(data)] width = (last_hours // 24 + 1) * 1000 cfs = [('rrdgraph_epi2product', 'epi2product:MAX', 'EPI file -> product'), ('rrdgraph_timeslot2product', 'timeslot2product:MAX', 'HRIT timeslot -> product')] for imagename, arch, title in cfs: path = os.path.join(imagedir, "%s_all.png" % (imagename,)) create_single_graph(path, imagename, first, last, arch, title, files, width) for n, file_chunk in enumerate(chunks(files, maxitems)): path = os.path.join(imagedir, "%s_%02d.png" % (imagename, n)) create_single_graph(path, imagename, first, last, arch, title, file_chunk, width)
def graphRAM(): ultima_lectura = int(rrdtool.last("trendRAM.rrd")) tiempo_final = ultima_lectura tiempo_inicial = tiempo_final - 600 ret = rrdtool.graphv( "deteccionRAM.png", "--start", str(tiempo_inicial), "--end", str(tiempo_final), "--vertical-label=RAM load", '--lower-limit', '0', '--upper-limit', '100', "DEF:cargaRAM=trendRAM.rrd:RAMload:AVERAGE", "CDEF:umbral25=cargaRAM,250000,LT,0,cargaRAM,IF", "CDEF:umbral30=cargaRAM,300000,LT,0,cargaRAM,IF", "CDEF:umbral35=cargaRAM,350000,LT,0,cargaRAM,IF", "VDEF:cargaMAX=cargaRAM,MAXIMUM", "VDEF:cargaMIN=cargaRAM,MINIMUM", "VDEF:cargaSTDEV=cargaRAM,STDEV", "VDEF:cargaLAST=cargaRAM,LAST", "AREA:cargaRAM#00FF00:Carga de la RAM", "AREA:umbral25#FF9F00:Carga RAM mayor que 2.5GB", "AREA:umbral30#FF9F70:Carga RAM mayor que 3GB", "AREA:umbral35#FF0000:Carga RAM mayor que 3.5GB", "HRULE:200000#00FF00:Umbral 2GB", "HRULE:250000#FF9F00:Umbral 2.5GB", "HRULE:300000#FF9F70:Umbral 3GB", "HRULE:350000#FF0000:Umbral 3.5GB", "PRINT:cargaLAST:%6.2lf %SLAst", "GPRINT:cargaMIN:%6.2lf %SMIN", "GPRINT:cargaSTDEV:%6.2lf %SSTDEV", "GPRINT:cargaLAST:%6.2lf %SLAST")
def update_zenProcessMetricTable(PP, system_name, process_name): for metric_name in metric_names(system_name, process_name): indices = [system_name, process_name, metric_name] PP.add_str(oid(MIB.zenProcessMetricName, indices), metric_name) try: rrd_filename = daemons_path( system_name, '{0}_{1}.rrd'.format(process_name, metric_name)) info, _, values = rrdtool.fetch(rrd_filename, 'AVERAGE') # Often the last sample is missing. Allow for it by using # the second-most-recent sample instead. if not none_or_nan(values[-1][0]): metric_value = values[-1][0] else: metric_value = values[-2][0] if not none_or_nan(metric_value): PP.add_str( oid(MIB.zenProcessMetricValue, indices), metric_value) # Figure out how many cycles (with decimal precision) it has # been since the metric was last updated. This is a better # measure than seconds since update because some metrics are # stored more frequently than others. step = info[2] seconds_since_update = time.time() - rrdtool.last(rrd_filename) cycles_since_update = seconds_since_update / step PP.add_str( oid(MIB.zenProcessMetricCyclesSinceUpdate, indices), '{0:.2f}'.format(cycles_since_update)) except Exception: pass
def graficarNoLineal1(archivo): title="Deteccion de comportamiento anomalo" fname="rrd/"+archivo+".rrd" endDate = rrdtool.last(fname) #ultimo valor del XML begDate = endDate - 40000 DatosAyer=begDate - 86400 FinAyer=endDate - 86400 scale = 8 rrdtool.tune(fname, '--alpha', '0.9') rrdtool.tune(fname, '--beta', '0.0035') rrdtool.tune(fname, '--gamma', '0.1') ret = rrdtool.graph("png/predAC.png", '--start', str(begDate), '--end', str(endDate), '--title=' + title, "--vertical-label=Bytes/s", '--slope-mode', "DEF:obs=" + fname + ":inoctets:AVERAGE", "DEF:obsAyer=" + fname + ":inoctets:AVERAGE:start="+str(DatosAyer)+":end="+str(FinAyer), "DEF:pred=" + fname + ":inoctets:HWPREDICT", "DEF:dev=" + fname + ":inoctets:DEVPREDICT", "DEF:fail=" + fname + ":inoctets:FAILURES", 'SHIFT:obsAyer:86400', #"RRA:DEVSEASONAL:1d:0.1:2", #"RRA:DEVPREDICT:5d:5", #"RRA:FAILURES:1d:7:9:5"" "CDEF:scaledobs=obs,"+str(scale)+",*", "CDEF:scaledobsAyer=obsAyer,"+str(scale)+",*", "CDEF:upper=pred,dev,2,*,+", "CDEF:lower=pred,dev,2,*,-", "CDEF:scaledupper=upper,"+str(scale)+",*", "CDEF:scaledlower=lower,"+str(scale)+",*", "CDEF:scaledpred=pred,"+str(scale)+",*", "TICK:fail#FDD017:1.0: Fallas", "AREA:scaledobsAyer#9C9C9C:Ayer", "LINE3:scaledobs#00FF00:In traffic", "LINE1:scaledpred#FF00FF:Prediccion", #"LINE1:outoctets#0000FF:Out traffic", "LINE1:scaledupper#ff0000:Upper Bound Average bits in", "LINE1:scaledlower#0000FF:Lower Bound Average bits in")
def servicio(agenteSerializer): enddate = int(time.mktime(time.localtime())) begdate = enddate - (86400) direcciones = {} agente = convertSelializerAgenteToAgente(agenteSerializer) rrd = '{}/static/files/{}/{}/{}/'.format( path, agente.Comunidad, agente.Ip.replace(".", ""), agente.NombreHost) os.chdir(rrd+"rrd/") for lista in (serviciosSNMP, rendimientoSNMP, monitoreosSNMP): for elemento in lista: nombre = elemento[0]+".rrd" direcciones.update({rrd+"rrd/"+elemento[0]: 0}) total_input, total_output = get_SNMP_task( agente, elemento[1], elemento[2]) if total_input is not "error" and total_output is not "error": valor = str(rrdtool.last(nombre)+300)+":"+str( total_input)+':'+str(total_output) ret = rrdtool.update(nombre, valor) if not ret: rrdtool.dump(nombre, elemento[0]+'.xml') else: print(rrdtool.error()) main(rrd, begdate, enddate, direcciones)
def get_last_timestamp_rrd(rrdfile_name): """ get_last_timestamp_rrd: grt the last timestamp of the rrd Args: rrdfile_name (string): it is the name of your file/rrd Returns: a dict with the value and a status_message last_timestamp (dict): last timestamp of rrd (is necessary for creating a png) """ last_timestamp = [] get_last_status_msg = "" try: db_last_timestamp = rrdtool.last(rrdfile_name) get_last_status_msg = f"success: last timestamp of {rrdfile_name} was found" except Exception as e: get_last_status_msg = f"error: get_last_timestamp_rrd({rrdfile_name}) was not possible: {sys.exc_info()[1]} \n{e}" get_last_msg = dict() get_last_msg['last_timestamp'] = db_last_timestamp get_last_msg['status'] = get_last_status_msg return get_last_msg
def update(self, updates): updateString = rrdConstants.NOW for key, value in updates.items(): updates[key] = str( value) if value != None else rrdConstants.UNKNOWN updateString += (':' + updates[rrdConstants.DS_NETWORK]) rrdtool.update(self.fileName, updateString) end = rrdtool.last(self.fileName) begin = str(end - rrdConstants.NETWORK_TIME_FRAME) end = str(end) fail = float(rrdGraphs.makeNetworkGraph(self.path, begin, end)) if math.isnan(fail): return False fail = int(fail) return fail > 0
def discos(agenteSerializer): agente = convertSelializerAgenteToAgente(agenteSerializer) rrd = '{}/static/files/{}/{}/{}/'.format( path, agente.Comunidad, agente.Ip.replace(".", ""), agente.NombreHost) os.chdir(rrd+"rrd/") walk = walk_SNMP_task(agente, '1.3.6.1.4.1.2021.9.1.2') if walk is not 'error': for elemento in walk: oid = elemento.oid.split(".") oid_1 = '1.3.6.1.4.1.2021.9.1.6.'+oid[-1] oid_2 = '1.3.6.1.4.1.2021.9.1.7.'+oid[-1] nombre = elemento.value.replace("/", "_")+".rrd" nombre = "Disk"+nombre total_input, total_output = get_SNMP_task( agente, oid_1, oid_2) if total_input is not "error" and total_output is not "error": valor = str(rrdtool.last(nombre)+300)+":"+str( total_input)+':'+str(total_output) ret = rrdtool.update(nombre, valor) if not ret: rrdtool.dump(nombre, nombre+'.xml') else: print(rrdtool.error())
def prediction(): total_input_traffic = 0 total_output_traffic = 0 fname = "netPred.rrd" while 1: total_input_traffic = int( consultaSNMP('comunidadEquipo2_grupo4CM3', 'localhost', 161, '1.3.6.1.2.1.2.2.1.10.1')) total_output_traffic = int( consultaSNMP('comunidadEquipo2_grupo4CM3', 'localhost', 161, '1.3.6.1.2.1.2.2.1.16.1')) valor = str(rrdtool.last(fname) + 100) + ":" + str(total_input_traffic) ret = rrdtool.update(fname, valor) rrdtool.dump(fname, 'netP.xml') time.sleep(1) graph() if ret: print(rrdtool.error()) time.sleep(300)
def graph(): fname = "netPred.rrd" title = "Deteccion de comportamiento anomalo, valor de Alpha 0.1" endDate = rrdtool.last(fname) #ultimo valor del XML begDate = endDate - 36000 rrdtool.tune(fname, '--alpha', '0.1') ret = rrdtool.graph( "netPalphaBajoFallas.png", '--start', str(begDate), '--end', str(endDate), '--title=' + title, "--vertical-label=Bytes/s", '--slope-mode', "DEF:obs=" + fname + ":inoctets:AVERAGE", "DEF:outoctets=" + fname + ":outoctets:AVERAGE", "DEF:pred=" + fname + ":inoctets:HWPREDICT", "DEF:dev=" + fname + ":inoctets:DEVPREDICT", "DEF:fail=" + fname + ":inoctets:FAILURES", #"RRA:DEVSEASONAL:1d:0.1:2", #"RRA:DEVPREDICT:5d:5", #"RRA:FAILURES:1d:7:9:5"" "CDEF:scaledobs=obs,8,*", "CDEF:upper=pred,dev,2,*,+", "CDEF:lower=pred,dev,2,*,-", "CDEF:scaledupper=upper,8,*", "CDEF:scaledlower=lower,8,*", "CDEF:scaledpred=pred,8,*", "TICK:fail#FDD017:1.0:Fallas", "LINE3:scaledobs#00FF00:In traffic", "LINE1:scaledpred#FF00FF:Prediccion\\n", "LINE1:outoctets#0000FF:Out traffic", "LINE1:scaledupper#ff0000:Upper Bound Average bits in\\n", "LINE1:scaledlower#0000FF:Lower Bound Average bits in")
def check_CPU2(name, ds, upper, inicio): print('checking.....') info = rrdtool.info(name) rrdstep = int(info['step']) estimado = int(inicio) + rrdstep graphtmpfile = tempfile.NamedTemporaryFile() while 1: #print('Hora estimada = ' + str(ctime(estimado)) + ' - ' + str(estimado)) values = rrdtool.graph( graphtmpfile.name + 'F', #name.split('.')[0] + '.png', "--start", str(inicio), "--end", str(estimado), "DEF:carga=" + name + ':' + ds + ':LAST', # trend.rrd:CPUload:AVERAGE "VDEF:a=carga,LSLSLOPE", "VDEF:b=carga,LSLINT", 'CDEF:avg2=carga,POP,a,COUNT,*,b,+', 'PRINT:avg2:LAST:%1.0lf') #print(values) try: fail = int(values[2][0]) if int(fail) >= int(upper): print('Encontramos la falla en:' + str(estimado)) return estimado else: if int(fail) > int(100): print('ERROR :(' + str(estimado - 150759900)) return int(rrdtool.last(name)) except: print('Sin valores') estimado = estimado + int(rrdstep)
def grafica(base): nombreBase = base + ".rrd" nombreResultado = base + ".png" ultima_lectura = int(rrdtool.last(nombreBase)) tiempo_final = ultima_lectura tiempo_inicial = tiempo_final - 1800 while 1: ret = rrdtool.graphv( nombreResultado, "--start", str(tiempo_inicial), "--vertical-label=Uso HDD", "--title=Uso de HDD", "--color", "ARROW#009900", '--vertical-label', "Uso de HDD (M)", '--lower-limit', '0', '--upper-limit', '100', "DEF:carga=" + nombreBase + ":CPUload:AVERAGE", "DEF:carga2=" + nombreBase + ":CPUload:AVERAGE", "LINE1:50", "AREA:5#ff000022:stack", "VDEF:CPUlast=carga,LAST", "VDEF:CPUmin=carga,MINIMUM", "VDEF:CPUavg=carga,AVERAGE", "VDEF:CPUmax=carga,MAXIMUM", "COMMENT: Now Min Avg Max//n", "GPRINT:CPUlast:%12.0lf%s", "GPRINT:CPUmin:%10.0lf%s", "GPRINT:CPUavg:%13.0lf%s", "GPRINT:CPUmax:%13.0lf%s", "VDEF:a=carga,LSLSLOPE", "VDEF:b=carga,LSLINT", 'CDEF:avg2=carga,POP,a,COUNT,*,b,+', 'CDEF:avg3=carga2,POP,a,COUNT,*,b,+', "LINE3:3000000#000000", "LINE3:6000000#00BB00", "LINE3:10000000#BB0000", "LINE2:avg2#FFBB00", "AREA:carga#00FF00:CPU load", "PRINT:carga:LAST:%6.1lf") print(ret['print[0]']) a = float(ret['print[0]']) if (a > float(10000000)): send_alert_attached("Alerta de Sobre Carga de HDD") # else: # send_alert_attached("Vas bien papu") time.sleep(15)
def last(self): "Same as rrdtool last" self._sanity() return last(self._rrd_name)
import rrdtool import time from path import * ultima_lectura = int(rrdtool.last(rrdpath + rrdname)) tiempo_final = ultima_lectura tiempo_inicial = tiempo_final - 3600 ret = rrdtool.graph( pngpath + "trend.png", "--start", str(tiempo_inicial), "--end", str(tiempo_final), "--vertical-label=Carga CPU", "--title=Uso de CPU", "--color", "ARROW#009900", '--vertical-label', "Uso de CPU (%)", '--lower-limit', '0', '--upper-limit', '100', "DEF:carga=" + rrdpath + rrdname + ":CPUload:AVERAGE", "AREA:carga#00FF00:Carga CPU", "LINE1:30", "AREA:5#ff000022:stack", "VDEF:CPUlast=carga,LAST", "VDEF:CPUmin=carga,MINIMUM", "VDEF:CPUavg=carga,AVERAGE", "VDEF:CPUmax=carga,MAXIMUM", "COMMENT:Now Min Avg Max", "GPRINT:CPUlast:%12.0lf%s", "GPRINT:CPUmin:%10.0lf%s", "GPRINT:CPUavg:%13.0lf%s", "GPRINT:CPUmax:%13.0lf%s", "VDEF:m=carga,LSLSLOPE", "VDEF:b=carga,LSLINT", 'CDEF:tendencia=carga,POP,m,COUNT,*,b,+', "LINE2:tendencia#FFBB00")
def getLast(self): """getLast""" return rrdtool.last(str(self.filename))
import SnmpGet import time import rrdtool import os v = '751.86 k' a = float(v.split(".")[0]) print(a) ret = rrdtool.create("TESTRAM.rdd", "--start",'N', "--step",'10', "DS:RAMload:GAUGE:600:U:U", "RRA:AVERAGE:0.5:1:24") while 1: tiempo_final = int(rrdtool.last("TESTRAM.rdd")) tiempo_inicial = tiempo_final - 600 carga_CPU = int(SnmpGet.consultaSNMP('gr_4cm3','localhost',161,2,'1.3.6.1.2.1.25.2.3.1.6.6')) valor = "N:" + str(carga_CPU) print (valor) ret=rrdtool.update("TESTRAM.rdd", valor) time.sleep(1)
def getLastDate(datafile_name): info = rrdtool.last(datafile_name) return info
total_input_traffic = 0 total_output_traffic = 0 while 1: total_input_traffic = int( consultaSNMP('comunidadSNMP', 'localhost', '1.3.6.1.2.1.2.2.1.10.3')) total_output_traffic = int( consultaSNMP('comunidadSNMP', 'localhost', '1.3.6.1.2.1.2.2.1.16.3')) valor = "N:" + str(total_input_traffic) + ':' + str(total_output_traffic) print(valor) ret = rrdtool.update('netP.rrd', valor) rrdtool.dump('netP.rrd', 'netP.xml') ret = rrdtool.graph( "netP.png", "--start", str(rrdtool.last('netP.rrd')), "--end", str(rrdtool.last('netP.rrd') + 60), "--vertical-label=Bytes/s", "DEF:obs=netP.rrd:inoctets:AVERAGE", "DEF:outoctets=netP.rrd:outoctets:AVERAGE", "DEF:pred=netP.rrd:inoctets:HWPREDICT", "DEF:dev=netP.rrd:inoctets:DEVPREDICT", "CDEF:scaledobs=obs,8,*", "CDEF:upper=pred,dev,2,*,+", "CDEF:lower=pred,dev,2,*,-", "CDEF:scaledupper=upper,8,*", "CDEF:scaledlower=lower,8,*", "LINE1:scaledobs#00FF00:In traffic", "LINE1:outoctets#0000FF:Out traffic", "LINE1:scaledupper#ff0000:Upper Bound Average bits out", "LINE1:scaledlower#ff0000:Lower Bound Average bits out") time.sleep(1) if ret:
def DetectarComportamiento(ip, comunidad, idAgente): global banderaCorreos banderaMC = True banderaPre = True thread_read = threading.Thread(target=EnviarCorreo, args=[ip, idAgente]) thread_read.start() while (ip in agentes): CPULoad = consultaSNMP(comunidad, ip, '1.3.6.1.2.1.25.3.3.1.2.196608') #CPULoad = consultaSNMP(comunidad, ip, '1.3.6.1.2.1.2.2.1.10.1' ) valor = "N:" + CPULoad rrdtool.update("RRDsAgentes/prediccion" + idAgente + '.rrd', valor) rrdtool.dump('RRDsAgentes/prediccion' + idAgente + '.rrd', 'RRDsAgentes/prediccion' + idAgente + '.xml') time.sleep(0.5) umbral = 90 ultimo = rrdtool.last("RRDsAgentes/prediccion" + idAgente + ".rrd") tiempo_inicial = ultimo - 200 ret2 = rrdtool.graphv( "Graficas/minimoscuadrados" + idAgente + ".png", "--start", str(tiempo_inicial), "--end", str(ultimo + 60 * 2), "--title", "Carga de CPU", "--vertical-label=Carga de CPU", '--lower-limit', '0', '--upper-limit', '100', "DEF:carga=" + "RRDsAgentes/prediccion" + idAgente + '.rrd' + ":carga:AVERAGE", "CDEF:umbral25=carga," + str(umbral) + ",LT,0,carga,IF", "VDEF:cargaMAX=carga,MAXIMUM", "VDEF:cargaMIN=carga,MINIMUM", "VDEF:cargaSTDEV=carga,STDEV", "VDEF:cargaLAST=carga,LAST", "VDEF:m=carga,LSLSLOPE", "VDEF:b=carga,LSLINT", 'CDEF:y=carga,POP,m,COUNT,*,b,+', "VDEF:yUltimo=y,LAST", "AREA:umbral25#FF9F00:Prediccion en 2 min ", "GPRINT:yUltimo:%6.2lf %S.", "HRULE:" + str(umbral) + "#FF0000:Umbral al " + str(umbral) + "%\\n", "AREA:carga#00FF00:Carga del CPU", "GPRINT:cargaMIN:%6.2lf %SMIN", "GPRINT:cargaSTDEV:%6.2lf %SSTDEV", "GPRINT:cargaLAST:%6.2lf %SLAST", "LINE2:y#FFBB00", "PRINT:yUltimo:%6.2lf %S ") #print(str(ret2['print[0]'])) try: prediccion = float(ret2['print[0]']) except: prediccion = 0.0 if (prediccion >= 90.0 and banderaCorreos and banderaMC): print("Enviando correo prediccion") banderaMC = False send_alert_attached( "El agente cerca de alcanzar el umbral establecido", "Graficas/minimoscuadrados" + idAgente, "El porcentaje de uso de cpu del agente " + ip + " se encuentra cerca del 90%. Por favor, tomar las medidas correspondientes." ) ret = rrdtool.graphv( "Graficas/prediccion" + idAgente + ".png", '--start', str(ultimo - 60 * 5), '--end', str(ultimo + 120), '--title=' + "Comportamiento anómalo", "--vertical-label=Carga de cpu", '--slope-mode', "DEF:valor=" + "RRDsAgentes/prediccion" + idAgente + ".rrd" + ":carga:AVERAGE", "DEF:prediccion=" + "RRDsAgentes/prediccion" + idAgente + ".rrd" + ":carga:HWPREDICT", "DEF:desv=" + "RRDsAgentes/prediccion" + idAgente + ".rrd" + ":carga:DEVPREDICT", "DEF:falla=" + "RRDsAgentes/prediccion" + idAgente + ".rrd" + ":carga:FAILURES", "CDEF:carga=valor", "CDEF:limiteSuperior=prediccion,desv,2,*,+", "CDEF:limiteInferior=prediccion,desv,2,*,-", "CDEF:superior=limiteSuperior", "CDEF:inferior=limiteInferior", "CDEF:pred=prediccion,", "TICK:falla#FDD017:1.0:_Fallas\\n", "LINE3:carga#00FF00:Carga cpu", "VDEF:cargaMAX=carga,MAXIMUM", "VDEF:cargaMIN=carga,MINIMUM", "VDEF:cargaLAST=carga,LAST", "GPRINT:cargaMIN:%6.2lf %SCarga", "GPRINT:cargaMIN:%6.2lf %SMin", "GPRINT:cargaMAX:%6.2lf %sMax", "LINE1:pred#FF00FF:Predicción", "VDEF:predLast=pred,LAST", "GPRINT:predLast:%6.2lf %spred", "LINE1:superior#ff0000:Limite superior", "LINE1:inferior#0000FF:Limite inferior", "VDEF:lastfail=falla,LAST", "PRINT:lastfail: %c :strftime", "PRINT:lastfail:%6.2lf %S ", 'PRINT:falla:MIN:%1.0lf', 'PRINT:falla:MAX:%1.0lf', ) ultima_falla = ret['print[1]'] try: val = float(ultima_falla) except: val = 0.0 if (banderaCorreos and banderaPre): if (val > 0): banderaPre = False print("Enviando correo fallas") send_alert_attached( "Un agente ha presentado una falla", "Graficas/prediccion" + idAgente, "El porcentaje de uso de cpu del agente " + ip + " ha presentado una falla . Por favor, tomar las medidas correspondientes." )
def EjecutarLb(comunidad, ip, port, name, times): agentPath = lbPath + "/" # 1 RAM de interfaz ramUsada = int( consultaSNMP(comunidad, ip, port, '1.3.6.1.2.1.25.2.3.1.6.1')) ramTotal = int( consultaSNMP(comunidad, ip, port, '1.3.6.1.2.1.25.2.3.1.5.1')) valor = "N:" + str(ramUsada) + ':' + str(ramTotal) rrdtool.update(str(agentPath) + 'RAM.rrd', valor) # 1 Grafica RAM de interfaz finalTime = int(rrdtool.last(str(agentPath) + "RAM.rrd")) initialTime = finalTime - 3600 ret = rrdtool.graphv( str(agentPath) + "RAM.png", "--start", str(initialTime), "--vertical-label=Carga RAM", "--title=USO DE RAM - LINEA DE BASE", "--color", "ARROW#009900", '--vertical-label', "Uso de RAM (%)", '--lower-limit', '0', '--upper-limit', '100', "DEF:usada=" + str(agentPath) + "RAM.rrd:RAMusado:AVERAGE", "DEF:total=" + str(agentPath) + "RAM.rrd:RAMtotal:AVERAGE", "CDEF:porciento=usada,100,*,total,/", "VDEF:cargaSTDEV=usada,STDEV", "GPRINT:cargaSTDEV:%6.2lf %SSTDEV", "AREA:porciento#00FF00:RAM Storage", 'VDEF:ultimo=porciento,LAST', "VDEF:RAMlast=porciento,LAST", "VDEF:RAMmin=porciento,MINIMUM", "VDEF:RAMavg=porciento,AVERAGE", "VDEF:RAMmax=porciento,MAXIMUM", "VDEF:RAMmax2=porciento,STDEV", "CDEF:aux=porciento,5,+", "CDEF:aux2=porciento,2,+", "VDEF:RAMavg2=aux,AVERAGE", "VDEF:RAMavg3=aux2,AVERAGE", "HRULE:RAMavg2#BB0000:Umbral Go", "HRULE:RAMavg3#00BB00:Umbral Set", "HRULE:RAMavg#000000:Umbral Ready", "CDEF:umbral25=porciento,RAMavg2,LT,0,porciento,IF", "AREA:umbral25#FF9F00:Tráfico de carga mayor que umbral 3", #'CDEF:abc=porciento,RAMavg,100,LIMIT', "PRINT:ultimo:%12.0lf%s", "PRINT:RAMavg2:%12.0lf%s ", "COMMENT: Last Now Min Avg Max//n", "GPRINT:RAMlast:%12.0lf%s", "GPRINT:RAMmin:%10.0lf%s", "GPRINT:RAMavg:%13.0lf%s", "GPRINT:RAMmax:%13.0lf%s", ) #ultimo_valor= float(ret['print[0]']) #limite= float(ret['print[1]']) #print (ultimo_valor ) #valores= ultimo_valor.split(" ") #lim=limite.split(" ") #print (name+"RAM--valor" +str(ultimo_valor) + "limite "+ str(limite)) #if float(ultimo_valor)>float(limite): # sendAlertEmail("Agente "+name+"Sobrepasó umbral RAM Go con :"+str(ultimo_valor), str(agentPath ) +"RAM.png",str(agentPath ) + "RAM.rrd") #----------------------------------------------------------------------------------------- # 2 CPU de interfaz #if name=='linux'or name=="linuxmario": carga_CPU = int( consultaSNMP(comunidad, ip, port, '1.3.6.1.2.1.25.3.3.1.2.196608')) #elif name=="examen": # carga_CPU = int(consultaSNMP(comunidad , ip , port ,'1.3.6.1.2.1.25.3.3.1.2.769')) #else: # carga_CPU = int(consultaSNMP(comunidad , ip , port ,'1.3.6.1.2.1.25.3.3.1.2.3')) #carga_CPU = int(consultaSNMP(comunidad , ip , port ,'1.3.6.1.2.1.25.3.3.1.2.196608')) valor = "N:" + str(carga_CPU) time.sleep(1) rrdtool.update(str(agentPath) + 'CPU.rrd', valor) tiempo_final = int(rrdtool.last(str(agentPath) + "CPU.rrd")) tiempo_inicial = tiempo_final - 3600 ret2 = rrdtool.graphv( str(agentPath) + "CPU.png", "--start", str(tiempo_inicial), "--end", str(tiempo_final + 1000), "--title", "Carga de CPU", "--vertical-label=Uso de CPU (%)", '--lower-limit', '0', '--upper-limit', '100', "DEF:carga=" + str(agentPath) + "CPU.rrd:CPUload:AVERAGE", # "CDEF:umbral25=carga,"+str(umbral)+",LT,0,carga,IF", "VDEF:cargaMAX=carga,MAXIMUM", "VDEF:cargaMIN=carga,MINIMUM", "VDEF:cargaSTDEV=carga,STDEV", "VDEF:cargaLAST=carga,LAST", "AREA:carga#00FF00:Carga del CPU", # "HRULE:"+str(umbral)+"#FF0000:"+str(umbral)+"%", "GPRINT:cargaMIN:%6.2lf %SMIN", "GPRINT:cargaSTDEV:%6.2lf %SSTDEV", "GPRINT:cargaLAST:%6.2lf %SLAST", # ---METODO DE MINIMOS CUADRADOS #"VDEF:m=carga,LSLSLOPE", #"VDEF:b=carga,LSLINT", #'CDEF:avg2=carga,POP,m,COUNT,*,b,+', #"LINE2:avg2#FFBB00", #'VDEF:um=m,0,LT,90,0,IF' #'VDEF:um2=um,100,LT,0,90,0,IF' "VDEF:CPUavg=carga,AVERAGE", "CDEF:aux=carga,5,+", "CDEF:aux2=carga,2,+", "VDEF:CPUavg2=aux,AVERAGE", "VDEF:CPUavg3=aux2,AVERAGE", "HRULE:CPUavg2#BB0000:Umbral Go", "HRULE:CPUavg3#00BB00:Umbral Set", "HRULE:CPUavg#000000:Umbral Ready", "CDEF:umbral25=carga,CPUavg2,LT,0,carga,IF", "AREA:umbral25#FF9F00:Tráfico de carga mayor que CPUavg2", "AREA:umbral25#FF9F00:Tráfico de carga mayor que umbral 3", # 'CDEF:abc=avg2,'+str(umbral)+','+str(umbral+ 100000000)+',LIMIT', # 'CDEF:abc2=avg2,-10,0,LIMIT', #'VDEF:primero=abc,FIRST', #'VDEF:primero2=abc2,FIRST', #"GPRINT:primero: Alcanzara el umbral "+ str(umbral)+"% @ %c :strftime", #"GPRINT:primero2: Alcanzara el umbral 0% @ %c :strftime", #"PRINT:primero: Alcanzara umbral el @ %c :strftime", "PRINT:cargaLAST:%6.2lf %S ", "PRINT:CPUavg2:%6.2lf %S ") #alcanza_umbral=ret2['print[0]'] #ultimo_valor= ret2['print[0]'] #limite= ret2['print[1]'] #print (ultimo_valor + limite) #if float(ultimo_valor)>float(limite): # sendAlertEmail("Evidencia 3 Equipo2 Grupo 4cm1 :Agente "+name+"Sobrepasó umbral CPU Go con :"+str(ultimo_valor), str(agentPath ) +"CPU.png",str(agentPath ) + "CPU.rrd") # peirnt ("Envie correo ") # time.sleep(3600) #----------------------------------------------------------------------------------------- # 3 HDD de interfaz # 1 RAM de interfaz # if name=="linux" or name=="linuxmario": ramUsada = int( consultaSNMP(comunidad, ip, port, '1.3.6.1.2.1.25.2.3.1.6.36')) ramTotal = int( consultaSNMP(comunidad, ip, port, '1.3.6.1.2.1.25.2.3.1.5.36')) #else : # ramUsada = int(consultaSNMP(comunidad, ip, port, '1.3.6.1.2.1.25.2.3.1.5.1')) # ramTotal = int(consultaSNMP(comunidad, ip, port, '1.3.6.1.2.1.25.2.3.1.4.1')) valor = "N:" + str(ramUsada) + ':' + str(ramTotal) rrdtool.update(str(agentPath) + 'HDD.rrd', valor) rrdtool.dump(str(agentPath) + 'HDD.rrd', str(agentPath) + 'HDD.xml') # 1 Grafica RAM de interfaz finalTime = int(rrdtool.last(str(agentPath) + "HDD.rrd")) initialTime = finalTime - 3600 ret = rrdtool.graphv( str(agentPath) + "HDD.png", "--start", str(initialTime), "--vertical-label=Carga HDD", "--title=USO DE HDD - LINEA DE BASE", "--color", "ARROW#009900", '--vertical-label', "Uso de RAM (%)", '--lower-limit', '0', '--upper-limit', '100', "DEF:usada=" + str(agentPath) + "HDD.rrd:HDDusado:AVERAGE", "DEF:total=" + str(agentPath) + "HDD.rrd:HDDtotal:AVERAGE", "CDEF:porciento=usada,100,*,total,/", "VDEF:cargaSTDEV=usada,STDEV", "GPRINT:cargaSTDEV:%6.2lf %SSTDEV", "AREA:porciento#00FF00:RAM Storage", 'VDEF:ultimo=porciento,LAST', #'VDEF:ultimo2=carga,LAST', "VDEF:RAMlast=porciento,LAST", "VDEF:RAMmin=porciento,MINIMUM", "VDEF:RAMavg=porciento,AVERAGE", "VDEF:RAMmax=porciento,MAXIMUM", "VDEF:RAMmax2=porciento,STDEV", "CDEF:aux=porciento,5,+", "CDEF:aux2=porciento,2,+", "VDEF:RAMavg2=aux,AVERAGE", "VDEF:RAMavg3=aux2,AVERAGE", "HRULE:RAMavg2#BB0000:Umbral Go", "HRULE:RAMavg3#00BB00:Umbral Set", "HRULE:RAMavg#000000:Umbral Ready", "CDEF:umbral25=porciento,RAMavg2,LT,0,porciento,IF", "AREA:umbral25#FF9F00:Tráfico de carga mayor que umbral 3", #'CDEF:abc=porciento,RAMavg,100,LIMIT', "PRINT:RAMlast:%12.0lf%s", "PRINT:RAMavg2:%12.0lf%s ", "COMMENT: Last Now Min Avg Max//n", "GPRINT:RAMlast:%12.0lf%s", "GPRINT:RAMmin:%10.0lf%s", "GPRINT:RAMavg:%13.0lf%s", "GPRINT:RAMmax:%13.0lf%s", )
def getstatus(): hwids = request.args.get('hwids').split(',') hwids = map(lambda x: str(x), hwids) now = utcnow() statii = {} for hwid in hwids: if hwid == '': continue statii[hwid] = {} statii[hwid]['time'] = -1 statii[hwid]['online'] = False statii[hwid]['msg'] = "" statii[hwid]['values'] = {} device = ceresdb.devices.find_one({'hwid' : hwid}) if device == None: continue if device['username'] != session['username']: flash('Wrong username [' + session['username'] + '] for device [' + hwid + ']') return redirect(url_for('myceres')) timestamp = rrdtool.last(str(device['file'])) statii[hwid]['time'] = now-timestamp if (now - timestamp) < 5: statii[hwid]['online'] = True minvals = rrdtool.fetch(str(device['file']), 'MIN', '--start='+str(timestamp-3600), '--end='+str(timestamp)) avgvals = rrdtool.fetch(str(device['file']), 'AVERAGE', '--start='+str(timestamp-1), '--end='+str(timestamp)) maxvals = rrdtool.fetch(str(device['file']), 'MAX', '--start='+str(timestamp-3600), '--end='+str(timestamp)) for idx, sensorname in enumerate(avgvals[1]): avgmin=0 n = 0 for minval in minvals[2]: if minval[idx] != None: avgmin += minval[idx] n += 1 if n > 0: avgmin /= float(n) avgmax=0 n = 0 for maxval in maxvals[2]: if maxval[idx] != None: avgmax += maxval[idx] n += 1 if n > 0: avgmax /= float(n) statii[hwid]['values'][sensorname] = { 'min' : str(avgmin)[0:5], 'avg' : str(avgvals[2][0][idx])[0:5], 'max' : str(avgmax)[0:5]} else: statii[hwid]['msg'] = "No report recieved for {0} seconds".format(now-timestamp) return jsonify(data=statii)
class NotifyHW: COMMASPACE = ', ' # Define params rrdpath = "/home/francisco/Documentos/Redes 3/Redes3-master/bdrrdtool/" pngpath = "/home/francisco/Documentos/Redes 3/Redes3-master/graficas/" width = '500' height = '200' mailsender = "*****@*****.**" mailreceip = "*****@*****.**" mailserver = 'smtp.gmail.com: 587' password = '******' fname = "netPred.rrd" fname1 = "netPred.rrd" # Generate charts for last 48 hours enddate = int(rrdtool.last("bdrrdtool/netPred.rrd")) #ultimo valor del XML begdate = enddate - 1300 def send_alert_attached(self, subject, flist): """ Will send e-mail, attaching png files in the flist. """ msg = MIMEMultipart() msg['Subject'] = subject msg['From'] = self.mailsender msg['To'] = self.COMMASPACE.join(self.mailreceip) for file in flist: png_file = self.pngpath + file.split('.')[0] + '.png' #print png_file fp = open(png_file, 'rb') img = MIMEImage(fp.read()) fp.close() msg.attach(img) mserver = smtplib.SMTP(self.mailserver) mserver.sendmail(self.mailsender, self.mailreceip, msg.as_string()) mserver.quit() def check_aberration(self, rrdpath, fname): """ This will check for begin and end of aberration in file. Will return: 0 if aberration not found. 1 if aberration begins 2 if aberration ends """ ab_status = 0 rrdfilename = rrdpath + self.fname info = rrdtool.info(rrdfilename) rrdstep = int(info['step']) lastupdate = info['last_update'] previosupdate = str((lastupdate - rrdstep * 100) - 1) graphtmpfile = tempfile.NamedTemporaryFile() # Ready to get FAILURES from rrdfile # will process failures array values for time of 2 last updates values = rrdtool.graph( graphtmpfile.name + 'F', 'DEF:f0=' + rrdfilename + ':inoctets:FAILURES:start=' + previosupdate + ':end=' + str(lastupdate), 'PRINT:f0:MIN:%1.0lf', 'PRINT:f0:MAX:%1.0lf', 'PRINT:f0:LAST:%1.0lf') if (values[2][0] != '-nan'): fmin = int(values[2][0]) fmax = int(values[2][1]) flast = int(values[2][2]) print("fmin=" + str(fmin) + ", fmax=" + str(fmax) + ",flast=" + str(flast)) # check if failure value had changed. if (fmin != fmax): if (flast == 1): ab_status = 1 else: ab_status = 2 return ab_status def gen_image(self, rrdpath, pngpath, fname, width, height, begdate, enddate): """ Generates png file from rrd database: rrdpath - the path where rrd is located pngpath - the path png file should be created in fname - rrd file name, png file will have the same name .png extention width - chart area width height - chart area height begdate - unixtime enddate - unixtime """ # 24 hours before current time, will show on chart using SHIFT option ldaybeg = str(begdate - 86400) ldayend = str(enddate - 86400) # Will show some additional info on chart endd_str = time.strftime("%d/%m/%Y %H:%M:%S", (time.localtime(int(enddate)))).replace( ':', '\:') begd_str = time.strftime("%d/%m/%Y %H:%M:%S", (time.localtime(int(begdate)))).replace( ':', '\:') title = 'Chart for: ' + self.fname.split('.')[0] # Files names pngfname = pngpath + self.fname.split('.')[0] + '.png' rrdfname = self.rrdpath + self.fname # Get iformation from rrd file info = rrdtool.info(rrdfname) # print(info) rrdtype = info["ds[inoctets].type"] # Will use multip variable for calculation of totals, # should be usefull for internet traffic accounting, # or call/minutes count from CDR's. # Do not need logic for DERIVE and ABSOLUTE if rrdtype == 'COUNTER': multip = str(int(enddate) - int(begdate)) else: # if value type is GAUGE should divide time to step value rrdstep = info['step'] multip = str(round((int(enddate) - int(begdate)) / int(rrdstep))) # Make png image rrdtool.graph( pngfname, '--width', width, '--height', height, '--start', str(begdate), '--end', str(enddate), '--title=' + title, '--lower-limit', '0', '--slope-mode', 'COMMENT:From\:' + begd_str + ' To\:' + endd_str + '\\c', 'DEF:value=' + rrdfname + ':inoctets:AVERAGE', 'DEF:pred=' + rrdfname + ':inoctets:HWPREDICT', 'DEF:dev=' + rrdfname + ':inoctets:DEVPREDICT', 'DEF:fail=' + rrdfname + ':inoctets:FAILURES', 'DEF:yvalue=' + rrdfname + ':inoctets:AVERAGE:start=' + ldaybeg + ':end=' + ldayend, 'SHIFT:yvalue:86400', 'CDEF:upper=pred,dev,2,*,+', 'CDEF:lower=pred,dev,2,*,-', 'CDEF:ndev=dev,-1,*', 'CDEF:tot=value,' + multip + ',*', 'CDEF:ytot=yvalue,' + multip + ',*', 'TICK:fail#FDD017:1.0:"Failures"\\n', 'AREA:yvalue#C0C0C0:"Yesterday\:"', 'GPRINT:ytot:AVERAGE:"Total\:%8.0lf"', 'GPRINT:yvalue:MAX:"Max\:%8.0lf"', 'GPRINT:yvalue:AVERAGE:"Average\:%8.0lf" \\n', 'LINE3:value#0000ff:"Value \:"', 'GPRINT:tot:AVERAGE:"Total\:%8.0lf"', 'GPRINT:value:MAX:"Max\:%8.0lf"', 'GPRINT:value:AVERAGE:"Average\:%8.0lf" \\n', 'LINE1:upper#ff0000:"Upper Bound "', 'LINE1:pred#ff00FF:"Forecast "', 'LINE1:ndev#000000:"Deviation "', 'LINE1:lower#00FF00:"Lower Bound"')
import time import rrdtool from getSNMP import consultaSNMP from Notify import check_aberration total_input_traffic = 0 total_output_traffic = 0 rrdpath = "/home/alina/Documentos/Redes3/Parcial2/hw/" pngpath = "/home/alina/Documentos/Redes3/Parcial2/hw/IMG/" fname = "netP.rrd" pngfname = "predict.png" title = "Deteccion de comportamiento anomalo" # Generate charts for last 24 hours endDate = rrdtool.last(fname) #ultimo valor del XML begDate = endDate - 86000 while 1: total_input_traffic = int( consultaSNMP('ComunidadASR', 'localhost', '1.3.6.1.2.1.2.2.1.10.1')) total_output_traffic = int( consultaSNMP('ComunidadASR', 'localhost', '1.3.6.1.2.1.2.2.1.16.1')) valor = str(rrdtool.last(fname) + 100) + ":" + str( total_input_traffic) + ':' + str(total_output_traffic) print valor ret = rrdtool.update(fname, valor) rrdtool.dump(fname, 'netP.xml') time.sleep(1) print check_aberration(rrdpath, fname) if ret:
def LastUpdateFor(self, rrd): if self.dry_run and not os.path.exists(rrd): return 0 if rrd not in self.latest_update: self.latest_update[rrd] = rrdtool.last(rrd) return self.latest_update[rrd]
def check_rrds(rrddir, max_age_minutes, max_age_intervals=2.0, sat_availability_config=None, test_ref_datetime=None): """ analyses the rrdtool files in the specified directory """ old_prods = [] new_prods = [] all_prods = [] old_prods_ok = [] if sat_availability_config: sat_avail_service = SatDataAvailabilityService( config_yml_filename=sat_availability_config) else: sat_avail_service = None files = listfiles(rrddir, '*.rrd') for rrdfile in files: step = rrd.info(rrdfile)['step'] if max_age_minutes is None: current_max_age_minutes = max_age_intervals * (step / 60.0) else: current_max_age_minutes = max_age_minutes if test_ref_datetime is None: # should be default reftime_dt = datetime.utcnow() - timedelta( seconds=current_max_age_minutes * 60) else: # only for debugging purposes reftime_dt = test_ref_datetime - timedelta( seconds=current_max_age_minutes * 60) reftime = to_unix_seconds(reftime_dt) name = extract_prod_name(rrdfile) last = rrd.last(rrdfile) # print "name: " + name + " reftime: " + str(reftime) # + " last: " + str(last) + " max_age_minutes: " # + str(current_max_age_minutes) # + " steps: " + str(step) + " max_age_intervals: " # + str(max_age_intervals) if last < reftime: if sat_avail_service: res = sat_avail_service.get_data_availability_error( reftime_dt, name) if res: print "EUMETSAT UNS: Nr {}, Impact {}, " \ "Reference Time {}, Product {}".format(res.number, res.impact, reftime_dt, name) old_prods_ok.append(name) else: old_prods.append(name) else: old_prods.append(name) else: # retrieve last 3 datapoints # seems that fetch delivers one datapoint before and after required # start/end, so using "--start e-step*2 --end last-step" instead of # "--start e-step*3 --end last" data = rrd.fetch(rrdfile, 'MAX', '--resolution', str(step), '--start', 'e-' + str(step * 2), '--end', str(last - step)) lastvalues = [a for (a, _) in data[2]] if (lastvalues[-1] is not None and (lastvalues[:-1] == [None] * (len(lastvalues) - 1))): new_prods.append(name) all_prods.append(name) return old_prods, new_prods, all_prods, old_prods_ok
def sum_data(options): """ summarize collected data """ # check if summary-file exists and is newer than database sumfile = os.path.splitext(options.dbfile)[0] + ".summary" if os.path.exists(sumfile) and (os.path.getmtime(options.dbfile) <= os.path.getmtime(sumfile)): # summary is current f = open(sumfile, "r") result = json.load(f) f.close() return result else: options.logger.msg("INFO", "creating summary-file: %s" % sumfile) # create summary try: if options.ts_start > 0: first = options.ts_start else: # either no data was collected or the summary file was deleted options.logger.msg("WARN", "trying to recreate start timepoint") first = rrdtool.first(options.dbfile) options.logger.msg("INFO", "estimated start is %r" % first) last = rrdtool.last(options.dbfile) except Exception as e: options.logger.msg("TRACE", traceback.format_exc()) options.logger.msg("ERROR", "no data in database: %s" % options.dbfile) sys.exit(3) # extract avg and max values I_def = "DEF:I=%s:I:AVERAGE" % options.dbfile I_avg = "VDEF:I_avg=I,AVERAGE" I_max = "VDEF:I_max=I,MAXIMUM" U_def = "DEF:U=%s:U:AVERAGE" % options.dbfile U_avg = "VDEF:U_avg=U,AVERAGE" U_max = "VDEF:U_max=U,MAXIMUM" P_def = "DEF:P=%s:P:AVERAGE" % options.dbfile P_avg = "VDEF:P_avg=P,AVERAGE" P_max = "VDEF:P_max=P,MAXIMUM" args = [ "rrdtool", "graphv", options.dbfile, "--start", str(first), "--end", str(last), I_def, I_avg, I_max, U_def, U_avg, U_max, P_def, P_avg, P_max, "PRINT:I_avg:%8.4lf", "PRINT:I_max:%8.4lf", "PRINT:U_avg:%8.4lf", "PRINT:U_max:%8.4lf", "PRINT:P_avg:%6.2lf", "PRINT:P_max:%6.2lf" ] info = rrdtool.graphv(options.dbfile, args[3:]) summary = { "ts_start": first, "ts_end": last, "U_avg": float(info['print[2]']), "U_max": float(info['print[3]']), "P_avg": float(info['print[4]']), "P_max": float(info['print[5]']), "P_tot": round((last - first + 1) * float(info['print[4]']) / 3600, 2) } try: if options.voltage: summary["I_avg"] = float(info['print[0]']) summary["I_max"] = float(info['print[1]']) else: summary["I_avg"] = int(float(info['print[0]'])) summary["I_max"] = int(float(info['print[1]'])) except: pass # write results to file f = open(sumfile, "w") json.dump(summary, f, indent=2, sort_keys=True) f.close() return summary
'--step', '{0}'.format(shortPeriodSeconds), # Lossless for a year of instantanious; longer for effective estimate. No unknowns allowed. # (60 * 60 * 24 * 365 = 31536000 seconds per year) 'RRA:AVERAGE:0:1:{0}'.format(int(31536000/shortPeriodSeconds)), # Daily average for five years; longer for effective estimate. # (3600 * 24 = 86400 seconds in a day;365 * 5 = 1825 days) 'RRA:AVERAGE:0:{0}:1825'.format(int(86400/shortPeriodSeconds)), *datasources ) if args.runRRD: # # Start computation where the stored values left off, if any. # If the database is new rrdtool last returns the database start time. # last = rrdtool.last(args.rrd) fromTime = datetime.datetime.utcfromtimestamp(int(last)) toTime = fromTime + shortPeriod log("Resuming network size computation for {0}.".format(toTime)) def formula(samples, networkSize): return networkSize * (1 - math.e**(-samples/networkSize)) def binarySearch(distinctSamples, samples): if math.fabs(samples - distinctSamples) < 3: """Not enough information to make an estimate.""" return float('NaN') # Upper and lower are network size guesses. lower = distinctSamples upper = distinctSamples * 2
import time import rrdtool from getSNMP import consultaSNMP total_input_traffic = 0 total_output_traffic = 0 #Se adquiere la informacon y se grafica while 1: total_input_traffic = int(consultaSNMP('ComunidadASR','localhost','1.3.6.1.2.1.2.2.1.10.1')) total_output_traffic = int(consultaSNMP('ComunidadASR','localhost','1.3.6.1.2.1.2.2.1.16.1')) #Acelearar el tiepo del grafico valor = str(rrdtool.last("netP.rrd")+60 )+":" + str(total_input_traffic) + ':' + str(total_output_traffic) print ("Valor en P2",valor) ret = rrdtool.update('netP.rrd', valor) rrdtool.dump('netP.rrd','netP.xml') ret = rrdtool.graph("netP.png", "--start", str(rrdtool.last('netP.rrd')-3600), "--end",str(rrdtool.last('netP.rrd')), "--vertical-label=Bytes/s", "DEF:obs=netP.rrd:inoctets:AVERAGE", #Predicion de in octets "DEF:outoctets=netP.rrd:outoctets:AVERAGE", "DEF:pred=netP.rrd:inoctets:HWPREDICT", "DEF:dev=netP.rrd:inoctets:DEVPREDICT", "DEF:fail=netP.rrd:inoctets:FAILURES", #Archivo de fallas "CDEF:scaledobs=obs,8,*", #Escalar octetos "CDEF:upper=pred,dev,2,*,+", #Dev *2 + pred limite superior "CDEF:lower=pred,dev,2,*,-", #Dev *2 - pred limite inferior "CDEF:scaledupper=upper,8,*", #Escalar valores anteriores "CDEF:scaledlower=lower,8,*",
def createAberrationImage(path): begin_date = rrdtool.last(path + '/trafico.rrd') - 1000 end_date = rrdtool.last(path + '/trafico.rrd') ret = rrdtool.graph( path + "/trafico.png", '--start', str(begin_date), '--end', str(end_date), '--title= Equipo 10 Fallas', "--vertical-label=Bytes/s", '--slope-mode', "DEF:obs=" + path + "/trafico.rrd:inoctets:AVERAGE", "DEF:pred=" + path + "/trafico.rrd:inoctets:HWPREDICT", "DEF:dev=" + path + "/trafico.rrd:inoctets:DEVPREDICT", "DEF:fail=" + path + "/trafico.rrd:inoctets:FAILURES", "CDEF:scaledobs=obs,8,*", #"CDEF:predict=120,-1,900,scaledobs,PREDICT", "CDEF:upper=pred,dev,2,*,+", "CDEF:lower=pred,dev,2,*,-", "CDEF:scaledupper=upper,8,*", "CDEF:scaledlower=lower,8,*", "CDEF:scaledpred=pred,8,*", "TICK:fail#FDD017:1.0:Fallas\\n", "LINE3:scaledobs#00FF00:In traffic\\n", "LINE1:scaledpred#FF00FF:Current Prediccion\\n", #"LINE1:predict#FF00FF:Future Prediction\\n:dashes=3", "LINE1:scaledupper#ff0000:Upper Bound Average bits in\\n", "LINE1:scaledlower#0000FF:Lower Bound Average bits in") global begin_aberration global number_of_beginning_aberrations global number_of_end_aberrations isAberration = 0 graphtmpfile = tempfile.NamedTemporaryFile() values = rrdtool.graph( graphtmpfile.name + 'F', "DEF:f0=" + path + '/trafico.rrd' + ":inoctets:FAILURES", 'PRINT:f0:LAST:%1.0lf') try: isAberration = int(values[2][0]) except ValueError: pass if isAberration: begin_aberration = 1 if not number_of_beginning_aberrations: #print(f"Aberration finded at {datetime.datetime.now().time()}") number_of_beginning_aberrations += 1 mail.asyncsendAb(path + "/trafico.png", datetime.datetime.now().time()) if begin_aberration and not isAberration: begin_aberration = 0 if not number_of_end_aberrations: #print(f"Enden aberration at {datetime.datetime.now().time()}") number_of_end_aberrations += 1 mail.asyncsendAb(path + "/trafico.png", datetime.datetime.now().time())
def deteccion(self, umbrales, type="CPUload"): print(type) ultima_lectura = int(rrdtool.last(self.path_rrd + type + self.name_rrd)) tiempo_final = ultima_lectura tiempo_inicial = tiempo_final - 3600 if type == "CPUload": ret = rrdtool.graphv( self.path_rrd + type + "deteccion.png", "--start", str(tiempo_inicial), "--end", str(tiempo_final), "--title", type, "--vertical-label=Uso de " + type + "(%)", '--lower-limit', '0', '--upper-limit', '100', "DEF:carga=" + self.path_rrd + type + self.name_rrd + ":" + type + ":AVERAGE", "CDEF:umbral25=carga,25,LT,0,carga,IF", "VDEF:cargaMAX=carga,MAXIMUM", "VDEF:cargaMIN=carga,MINIMUM", "VDEF:cargaSTDEV=carga,STDEV", "VDEF:cargaLAST=carga,LAST", "AREA:carga#00FF00:" + type, "AREA:umbral25#FF9F00:Tráfico de carga mayor que 25", "HRULE:25#FF0000:Umbral 1 - 25%", "LINE2:" + umbrales['breakpoint'] + "#FF0000", "LINE2:" + umbrales['set'] + "#0D76FF", "LINE2:" + umbrales['go'] + "#00FF00", "PRINT:cargaMAX:%6.2lf %S", "GPRINT:cargaMIN:%6.2lf %SMIN", "GPRINT:cargaSTDEV:%6.2lf %SSTDEV", "GPRINT:cargaLAST:%6.2lf %SLAST", "VDEF:m=carga,LSLSLOPE", "VDEF:b=carga,LSLINT", 'CDEF:tendencia=carga,POP,m,COUNT,*,b,+', "LINE2:tendencia#FFBB00", "PRINT:m:%6.2lf %S", "PRINT:b:%6.2lf %S") elif type == "RAM": ret = rrdtool.graphv( self.path_rrd + type + "deteccion.png", "--start", str(tiempo_inicial), "--end", str(tiempo_final), "--title", type, "--vertical-label=Uso de " + type + "(%)", '--lower-limit', '0', '--upper-limit', '100', "DEF:carga=" + self.path_rrd + type + self.name_rrd + ":" + type + ":AVERAGE", "CDEF:umbral25=carga,25,LT,0,carga,IF", "VDEF:cargaMAX=carga,MAXIMUM", "VDEF:cargaMIN=carga,MINIMUM", "VDEF:cargaSTDEV=carga,STDEV", "VDEF:cargaLAST=carga,LAST", "AREA:carga#00FF00:" + type, "AREA:umbral25#FF9F00:Tráfico de carga mayor que 25", "HRULE:25#FF0000:Umbral 1 - 25%", "LINE2:" + umbrales['breakpoint'] + "#FF0000", "LINE2:" + umbrales['set'] + "#0D76FF", "LINE2:" + umbrales['go'] + "#00FF00", "PRINT:cargaMAX:%6.2lf %S", "GPRINT:cargaMIN:%6.2lf %SMIN", "GPRINT:cargaSTDEV:%6.2lf %SSTDEV", "GPRINT:cargaLAST:%6.2lf %SLAST", "VDEF:m=carga,LSLSLOPE", "VDEF:b=carga,LSLINT", 'CDEF:tendencia=carga,POP,m,COUNT,*,b,+', "LINE2:tendencia#FFBB00", "PRINT:m:%6.2lf %S", "PRINT:b:%6.2lf %S") print(ret) # print(ret.keys()) # print(ret.items()) value = ret['print[0]'] res = formatNumber(value) ultimo_valor = float(res) slope = float( formatNumber(ret['print[1]'].replace(" ", "").replace("k", ""))) B = float(formatNumber(ret['print[2]'])) print(slope) print(B) pre = Prediction(slope, B) # print(pre.predict(80)) if ultimo_valor > float(umbrales['breakpoint']): nombre_asunto = "Equipo Champions - " send_alert_attached(nombre_asunto + "Sobrepasa Umbral línea base", self.path_rrd, type + "deteccion.png", type + self.name_rrd)
def put(self, path, value, rrdType, rrdCommand=None, cycleTime=None, min='U', max='U', useRRDDaemon=True, timestamp='N', start=None, allowStaleDatapoint=True): """ Save the value provided in the command to the RRD file specified in path. If the RRD file does not exist, use the rrdType, rrdCommand, min and max parameters to create the file. @param path: name for a datapoint in a path (eg device/component/datasource_datapoint) @type path: string @param value: value to store into the RRD file @type value: number @param rrdType: RRD data type (eg ABSOLUTE, DERIVE, COUNTER) @type rrdType: string @param rrdCommand: RRD file creation command @type rrdCommand: string @param cycleTime: length of a cycle @type cycleTime: number @param min: minimum value acceptable for this metric @type min: number @param max: maximum value acceptable for this metric @type max: number @param allowStaleDatapoint: attempt to write datapoint even if a newer datapoint has already been written @type allowStaleDatapoint: boolean @return: the parameter value converted to a number @rtype: number or None """ if value is None: return None self.dataPoints += 1 self.cycleDataPoints += 1 if cycleTime is None: cycleTime = self.defaultCycleTime filename = self.performancePath(path) + '.rrd' if not rrdCommand: rrdCommand = self.defaultRrdCreateCommand if not os.path.exists(filename): log.debug("Creating new RRD file %s", filename) dirname = os.path.dirname(filename) if not os.path.exists(dirname): os.makedirs(dirname, 0750) min, max = map(_checkUndefined, (min, max)) dataSource = 'DS:%s:%s:%d:%s:%s' % ( 'ds0', rrdType, self.getHeartbeat(cycleTime), min, max) args = [str(filename), "--step", str(self.getStep(cycleTime)),] if start is not None: args.extend(["--start", "%d" % start]) elif timestamp != 'N': args.extend(["--start", str(int(timestamp) - 10)]) args.append(str(dataSource)) args.extend(rrdCommand.split()) rrdtool.create(*args), daemon_args = rrd_daemon_args() if useRRDDaemon else tuple() # remove unwanted chars (this is actually pretty quick) value = str(value).translate(None, _UNWANTED_CHARS) if rrdType in ('COUNTER', 'DERIVE'): try: # cast to float first because long('100.0') will fail with a # ValueError value = long(float(value)) except (TypeError, ValueError): return None else: try: value = float(value) except (TypeError, ValueError): return None try: @rrd_daemon_retry def rrdtool_fn(): return rrdtool.update(str(filename), *(daemon_args + ('%s:%s' % (timestamp, value),))) if timestamp == 'N' or allowStaleDatapoint: rrdtool_fn() else: # try to detect when the last datasample was collected lastTs = _LAST_RRDFILE_WRITE.get(filename, None) if lastTs is None: try: lastTs = _LAST_RRDFILE_WRITE[filename] = rrdtool.last( *(daemon_args + (str(filename),))) except Exception as ex: lastTs = 0 log.exception("Could not determine last update to %r", filename) # if the current datapoint is newer than the last datapoint, then write if lastTs < timestamp: _LAST_RRDFILE_WRITE[filename] = timestamp if log.getEffectiveLevel() < logging.DEBUG: log.debug('%s: %r, currentTs = %s, lastTs = %s', filename, value, timestamp, lastTs) rrdtool_fn() else: if log.getEffectiveLevel() < logging.DEBUG: log.debug("ignoring write %s:%s", filename, timestamp) return None log.debug('%s: %r, @ %s', str(filename), value, timestamp) except rrdtool.error, err: # may get update errors when updating too quickly log.error('rrdtool reported error %s %s', err, path)
import time import rrdtool from getSNMP import consultaSNMP from Notify import check_aberration total_input_traffic = 0 total_output_traffic = 0 rrdpath="/home/tani/Escritorio/Trend-Non-Linear/" pngpath="/home/tani/PycharmProjects/NoLineal/IMG/" fname="netPred.rrd" pngfname="predict.png" title="Deteccion de comportamiento anomalo" # Generate charts for last 24 hours endDate = rrdtool.last(fname) #ultimo valor del XML begDate = endDate - 86000 while 1: total_input_traffic = int(consultaSNMP('comunidadASR','localhost','1.3.6.1.2.1.2.2.1.10.1')) total_output_traffic = int(consultaSNMP('comunidadASR','localhost','1.3.6.1.2.1.2.2.1.16.1')) valor = str(rrdtool.last(fname)+100)+":" + str(total_input_traffic) + ':' + str(total_output_traffic) print valor ret = rrdtool.update(fname, valor) rrdtool.dump(fname,'netP.xml') time.sleep(1) print check_aberration(rrdpath,fname) if ret: print rrdtool.error() time.sleep(300)
# -*- coding: utf-8 -*- import time import rrdtool from getSNMP import consultaSNMP from notify import enviaAlerta total_input_traffic = 0 NOMBRE_PNG = "netX.png" BASE_RRD = "netPred.rrd" VENTANA_CORREO = 1 # En caso que haya muchas aberraciones seguidas, enviara correos a lo mas, cada n minutos init_time = rrdtool.last(BASE_RRD) #para ventana: --start init_time_100 tiempo_actual = int(time.time()) #sin ventana --start tiempo_actual timeOfLastSentMail = int(time.time() - (60 * VENTANA_CORREO)) #MUST BE this value for my condicional OID = '1.3.6.1.2.1.2.2.1.10.2' COMUNIDAD = 'variation/virtualtable' HOST = '10.100.71.200' falla_actual = 0 falla_anterior = 0 bandera_falla_iniciada = 0 # 3.6.1.2.1.2.2.1.10.10 while 1: total_input_traffic = int(consultaSNMP(COMUNIDAD, HOST, OID))
def main(argv): RRD_MIN_RES=300 update=False dump=False fname="" host="localhost" port="8086" db="" key="" user="" password="" device="" def help(): print('Usage: rddflux.py [-u|-m] -f <RRD FILE> [-H <INFLUXDB HOST>] [-p <INFLUXDB PORT>] -d DATABASE [-U user] [-P password] [-k KEY] -D device [-h] ') print('Updates or dumps passed RRD File to selected InfluxDB database') print(' -h, --help Display help and exit') print(' -u, --update Only update database with last value') print(' -m, --dump Dump full RRD to database') print(' -f, --file RRD file to dump') print(' -H, --host Optional. Name or IP of InfluxDB server. Default localhost.') print(' -p, --port Optional. InfluxDB server port. Default 8086.') print(' -d, --database Database name where to store data.') print(' -U, --user Optional. Database user.') print(' -P, --password Optional. Database password.') print(' -k, --key Optional. Key used to store data values. Taken from RRD file\'s name if not specified.') print(' -D, --device Device the RRD metrics are related with.') try: opts, args = getopt.getopt(argv,"humf:H:p:d:U:P:k:D:",["help=","update=","dump=","file=","host=","port=","database=","user="******"password="******"key=","device="]) except getopt.GetoptError: help() sys.exit(2) for opt, arg in opts: if opt == '-h': help() sys.exit() elif opt in ("-u", "--update"): update = True elif opt in ("-m", "--dump"): dump = True elif opt in ("-f", "--file"): fname = arg elif opt in ("-H", "--host"): host = arg elif opt in ("-p", "--port"): port = arg elif opt in ("-d", "--database"): db = arg elif opt in ("-U", "--user"): user = arg elif opt in ("-P", "--password"): password = arg elif opt in ("-k", "--key"): key = arg elif opt in ("-D", "--device"): device = arg if device == "" or fname == "" or db == "" or (update == False and dump == False) or (update == True and dump == True): print("ERROR: Missing or duplicated parameters.") help() sys.exit(2) client = InfluxDBClient(host, port, user, password, db) client.query("create database "+db+";") # Create database if it not exists if key == "": key = re.sub('\.rrd$','',os.path.split(fname)[1]) if update == True: # We save the last two records of the rrd tool to avoid missing data lastvalue = rrdtool.fetch(fname,"AVERAGE",'-s', str(rrdtool.last(fname)-2*RRD_MIN_RES), '-e', str(rrdtool.last(fname)-RRD_MIN_RES),'-r', str(RRD_MIN_RES)) unixts=lastvalue[0][1] val=lastvalue[2][0][0] json_body = [ { "measurement": device, "time": unixts, "fields": { key: val, } } ] client.write_points(json_body) unixts=lastvalue[0][1]-RRD_MIN_RES val=lastvalue[2][0][0] json_body = [ { "measurement": device, "time": unixts, "fields": { key: val, } } ] client.write_points(json_body) if dump == True: allvalues = rrdtool.fetch( fname, "AVERAGE", '-e', str(rrdtool.last(fname)-RRD_MIN_RES), '-r', str(RRD_MIN_RES)) i=0 while i < len(allvalues[2]): val=allvalues[2][i][0] unixts=allvalues[0][0]+(i+1)*RRD_MIN_RES json_body = [ { "measurement": device, "time": unixts, "fields": { key: val, } } ] client.write_points(json_body) i=i+1
import time import rrdtool from getSNMP import consultaSNMP from Notify import check_aberration total_input_traffic = 0 total_output_traffic = 0 from path import * title = "Deteccion de comportamiento anomalo" # Generate charts for last 24 hours endDate = rrdtool.last(rrdname) #ultimo valor del XML begDate = endDate - 86000 while 1: total_input_traffic = int( consultaSNMP('grupo_4cm3', 'localhost', '1.3.6.1.2.1.2.2.1.10.1')) total_output_traffic = int( consultaSNMP('grupo_4cm3', 'localhost', '1.3.6.1.2.1.2.2.1.16.1')) valor = "N:" + str(total_input_traffic) + ':' + str(total_output_traffic) print(valor) ret = rrdtool.update(rrdname, valor) rrdtool.dump(rrdname, 'pred.xml') time.sleep(1) print(check_aberration(rrdpath, rrdname)) if ret: print(rrdtool.error()) time.sleep(300)
def EjecutarMc(comunidad, ip, port, name, times, umbral): if name == 'linux' or name == "linuxmario": carga_CPU = int( consultaSNMP(comunidad, ip, port, '1.3.6.1.2.1.25.3.3.1.2.196608')) elif name == "examen": carga_CPU = int( consultaSNMP(comunidad, ip, port, '1.3.6.1.2.1.25.3.3.1.2.769')) else: carga_CPU = int( consultaSNMP(comunidad, ip, port, '1.3.6.1.2.1.25.3.3.1.2.3')) agentPath = lbPathmc + name + "/" #carga_CPU = int(consultaSNMP(comunidad , ip , port ,'1.3.6.1.2.1.25.3.3.1.2.196608')) valor = "N:" + str(carga_CPU) time.sleep(1) rrdtool.update(str(agentPath + str(name)) + 'trend.rrd', valor) tiempo_final = int(rrdtool.last(agentPath + str(name) + "trend.rrd")) tiempo_inicial = tiempo_final - 3600 ret2 = rrdtool.graphv( str(agentPath + name) + "deteccion.png", "--start", str(tiempo_inicial), "--end", str(tiempo_final + 15500), "--title", "Carga de CPU", "--vertical-label=Uso de CPU (%)", '--lower-limit', '0', '--upper-limit', '100', "DEF:carga=" + agentPath + name + "trend.rrd:CPUload:AVERAGE", "CDEF:umbral25=carga," + str(umbral) + ",LT,0,carga,IF", "VDEF:cargaMAX=carga,MAXIMUM", "VDEF:cargaMIN=carga,MINIMUM", "VDEF:cargaSTDEV=carga,STDEV", "VDEF:cargaLAST=carga,LAST", "AREA:carga#00FF00:Carga del CPU", "AREA:umbral25#FF9F00:Tráfico de carga mayor que" + str(umbral), "HRULE:" + str(umbral) + "#FF0000:" + str(umbral) + "%", "GPRINT:cargaMIN:%6.2lf %SMIN", "GPRINT:cargaSTDEV:%6.2lf %SSTDEV", "GPRINT:cargaLAST:%6.2lf %SLAST", # ---METODO DE MINIMOS CUADRADOS "VDEF:m=carga,LSLSLOPE", "VDEF:b=carga,LSLINT", 'CDEF:avg2=carga,POP,m,COUNT,*,b,+', "LINE2:avg2#FFBB00", #'VDEF:um=m,0,LT,90,0,IF' #'VDEF:um2=um,100,LT,0,90,0,IF' 'CDEF:abc=avg2,' + str(umbral) + ',' + str(umbral + 100000000) + ',LIMIT', 'CDEF:abc2=avg2,-10,0,LIMIT', 'VDEF:primero=abc,FIRST', 'VDEF:primero2=abc2,FIRST', "GPRINT:primero: Alcanzara el umbral " + str(umbral) + "% @ %c :strftime", "GPRINT:primero2: Alcanzara el umbral 0% @ %c :strftime", "PRINT:primero: Alcanzara umbral el @ %c :strftime", "PRINT:cargaLAST:%6.2lf %S ") alcanza_umbral = ret2['print[0]'] ultimo_valor = ret2['print[1]'] #print ("alcanza umbral"+ str(alcanza_umbral)+str(ultimo_valor)) #valores= ultimo_valor.split(" ") #print (ultimo_valor + alcanza_umbral) #print("Archivo generado en " + str(agentPath + name)+"deteccion.png") if float(ultimo_valor) > int(umbral): sendAlertEmail( "Agente " + name + " sobrepasó umbral de minimos cuadrados" + str(ultimo_valor), str(agentPath + name) + "deteccion.png", str(agentPath + str(name)) + 'trend.rrd') elif (float(ultimo_valor) == 0): sendAlertEmail( "Agente " + name + " sobrepasó umbral 0 de minimos cuadrados" + str(ultimo_valor), str(agentPath + name) + "deteccion.png", str(agentPath + str(name)) + 'trend.rrd')
import time import rrdtool from getSNMP import consultaSNMP total_input_traffic = 0 total_output_traffic = 0 while 1: total_input_traffic = int( consultaSNMP('comunidadSNMP', 'localhost', '1.3.6.1.2.1.2.2.1.10.3')) total_output_traffic = int( consultaSNMP('comunidadSNMP', 'localhost', '1.3.6.1.2.1.2.2.1.16.3')) valor = str(rrdtool.last("netP.rrd") + 100) + ":" + str( total_input_traffic) + ':' + str(total_output_traffic) print valor ret = rrdtool.update('netP.rrd', valor) rrdtool.dump('netP.rrd', 'netP.xml') ret = rrdtool.graph( "netP.png", "--start", '1524174069', "--end", str(rrdtool.last('netP.rrd')), "--vertical-label=Bytes/s", "DEF:obs=netP.rrd:inoctets:AVERAGE", "DEF:outoctets=netP.rrd:outoctets:AVERAGE", "DEF:pred=netP.rrd:inoctets:HWPREDICT", "DEF:dev=netP.rrd:inoctets:DEVPREDICT", #"RRA:DEVSEASONAL:1d:0.1:2",
import smtplib import rrdtool COMMASPACE = ', ' tiempo_final = int(rrdtool.last("trend.rrd")) tiempo_inicial = tiempo_final - 360 class Notification1: #def run(self): ret = rrdtool.graphv("deteccion.png", "--start", str(tiempo_inicial), "--end", str(tiempo_final), "--title", "Carga de CPU", "--vertical-label=Uso de CPU (%)", '--lower-limit', '0', '--upper-limit', '100', "DEF:carga=trend.rrd:CPUload:AVERAGE", "CDEF:umbral80=carga,80,LT,0,carga,IF", "VDEF:cargaMAX=carga,MAXIMUM", "VDEF:cargaMIN=carga,MINIMUM", "VDEF:cargaSTDEV=carga,STDEV", "VDEF:cargaLAST=carga,LAST", "AREA:carga#00FF00:Carga del CPU", "AREA:umbral80#FF9F00:Tráfico de carga mayor que 80", "AREA:umbral80#FF9F00:Tráfico de carga mayor que 25", "HRULE:80#FF0000:Umbral 1 - 80%", "PRINT:cargaMAX:%6.2lf %S", "GPRINT:cargaMIN:%6.2lf %SMIN", "GPRINT:cargaSTDEV:%6.2lf %SSTDEV",
return 0 stats = [] res = [] step = ds_info['step'] for key in ds_info.keys(): if 'index' in key: if key[0:2] == "ds": ds_name = key[3:] ds_name = ds_name[0: ds_name.find(']')] stats.append(ds_name) if 'pdp_per_row' in key: res.append(ds_info[key] * step) first = rrdtool.first(filepath) last = rrdtool.last(filepath) return {'stats': stats, 'resolutions': res, 'first': first, 'last': last} def get_vm_stats(self, vm, names, start="-5min", end="now", resolution="10"): if not check_stat_exists(vm): raise CMException('stat_not_exists') res = [] filename = get_path(vm) info, ds_rrd, data = rrdtool.fetch(filename, "AVERAGE", "--start", str(start), "--end", str(end), "--resolution", str(resolution)) start_rrd = info[0] end_rrd = info[1] step = info[2] ts = start_rrd total = self.get_vm_total(vm, names)
def check_rrds(rrddir, max_age_minutes, max_age_intervals=2.0, sat_availability_config=None, test_ref_datetime=None): """ analyses the rrdtool files in the specified directory """ old_prods = [] new_prods = [] all_prods = [] old_prods_ok = [] if sat_availability_config: sat_avail_service = SatDataAvailabilityService( config_yml_filename=sat_availability_config) else: sat_avail_service = None files = listfiles(rrddir, '*.rrd') for rrdfile in files: step = rrd.info(rrdfile)['step'] if max_age_minutes is None: current_max_age_minutes = max_age_intervals * (step / 60.0) else: current_max_age_minutes = max_age_minutes if test_ref_datetime is None: # should be default reftime_dt = datetime.utcnow() - timedelta( seconds=current_max_age_minutes * 60) else: # only for debugging purposes reftime_dt = test_ref_datetime - timedelta( seconds=current_max_age_minutes * 60) reftime = to_unix_seconds(reftime_dt) name = extract_prod_name(rrdfile) last = rrd.last(rrdfile) # print "name: " + name + " reftime: " + str(reftime) # + " last: " + str(last) + " max_age_minutes: " # + str(current_max_age_minutes) # + " steps: " + str(step) + " max_age_intervals: " # + str(max_age_intervals) if last < reftime: if sat_avail_service: res = sat_avail_service.get_data_availability_error(reftime_dt, name) if res: print "EUMETSAT UNS: Nr {}, Impact {}, " \ "Reference Time {}, Product {}".format(res.number, res.impact, reftime_dt, name) old_prods_ok.append(name) else: old_prods.append(name) else: old_prods.append(name) else: # retrieve last 3 datapoints # seems that fetch delivers one datapoint before and after required # start/end, so using "--start e-step*2 --end last-step" instead of # "--start e-step*3 --end last" data = rrd.fetch(rrdfile, 'MAX', '--resolution', str(step), '--start', 'e-' + str(step * 2), '--end', str(last - step)) lastvalues = [a for (a, _) in data[2]] if (lastvalues[-1] is not None and (lastvalues[:-1] == [None] * (len(lastvalues) - 1))): new_prods.append(name) all_prods.append(name) return old_prods, new_prods, all_prods, old_prods_ok