def find(self, match=""): o, e = self.query( match, "_" + MetricStore.group_suffix( self.group ) ) results = [] if e: return results try: results = json.loads( o ); return results["results"] except: return results
def __init__(self, hostname="localhost", port=None, db_name="metric", username="******", password="******", group="/universe", md_col="md", val_col="", val_ttl=3600*24*180, **kwds): InfluxDBStore.__init__( self, hostname=hostname, port=port, db_name=db_name, username=username, password=password ) self.group = group self.create_db( "_" + MetricStore.group_suffix( self.group ) ) self._val_ttl = val_ttl self.batch_count = 0 self.time_precision = TIME_PRECISION self.addtags = False if TAGFILE: self.tagfile = TAGFILE self.addtags = True self.batch = {} self.batch_timestamp = time.time() self.time_multiplier = 1
def send_batch(self): """ Send data to Influxdb. Data that can not be sent will be kept in queued. """ metrics_buffer = [] def append_metric(time, tags, mname, value): try: value = float(value) if math.isinf(value) or math.isnan(value): value = 0 except: value = str(value) #mname = mname.replace(",", "\,") # this should work with InfluxDB newer than 0.9 (https://github.com/influxdata/influxdb/issues/3183) new_name = mname.replace(",", "#") if new_name != mname: # warning disabled since it floods the log #log.warn("escaped measurement name: '%s' to '%s'" % (mname, new_name)) mname = new_name mjson = {"time": time, "tags": tags, "measurement": mname, "fields": {"value": value}} metrics_buffer.append(mjson) log.debug("store metric: %s" % mjson) try: # build metrics data for path in self.batch.keys(): tags = {} # ex. path: servers.node6.likwid.cpu1.dpmflops pathlist = path.split(".") if len(pathlist) >= 4: pathlist.pop(0) mname = pathlist[-1] pathlist.pop() host = pathlist[0] pathlist.pop(0) collector = pathlist[0] pathlist.pop(0) tags["host"] = host tags["collector"] = collector for p in pathlist: if p.startswith("cpu"): tags["cpu"] = p.replace("cpu","") pathlist[pathlist.index(p)] = "" elif p.startswith("total"): mname = "sum."+mname pathlist[pathlist.index(p)] = "" if collector == "likwid": for p in pathlist: if p in ["avg","min","max","sum"]: mname = p+"."+mname pathlist[pathlist.index(p)] = "" elif collector == "iostat": tags["disk"] = pathlist[0] pathlist[0] = "" else: mname = path for item in self.batch[path]: time = item[0] value = item[1] if len(item) > 2: tags["job"] = item[2] if isinstance(value, list): quants = value[0] if isinstance(quants, list): # tags = {'host': u'tb033', 'collector': u'likwid', 'cpu': u'6'} # mname = dpmuops # value = [[28.0601539612, 28.0601539612, 28.0601539612, 28.0601539612, 28.0601539612, 28.0601539612, 28.0601539612, 28.0601539612, 28.0601539612, 28.0601539612, 28.0601539612], 28.0601539612] if "host" in tags.keys(): del tags["host"] nquants = len(quants) for n in xrange(0, nquants): tags["quant"] = str(100 / (nquants - 1) * n) append_metric(time, tags, mname, quants[n]) if len(value) >= 2: tags["quant"] = "avg" append_metric(time, tags, mname, value[1]) elif isinstance(value, basestring) or isinstance(value, float) or isinstance(value, int) or isinstance(value, long): append_metric(time, tags, mname, value) else: log.warn("Don't know how to handle metric with value type %s. Metric ignored!" % type(value)) _o, e = self.write(metrics_buffer, "_" + MetricStore.group_suffix( self.group )) if not e or self.batch_count >= MAX_CACHE_SIZE: if self.batch_count >= MAX_CACHE_SIZE: log.warn("Discarding %d metrics (check for previous failures)!" % self.batch_count) self.batch = {} self.batch_count = 0 self.time_multiplier = 1 except Exception, e: raise e