def generate_latest(registry=core.REGISTRY): '''Returns the metrics from the registry in latest text format as a string.''' output = [] for metric in registry.collect(): output.append('# HELP {0} {1}'.format( metric.name, metric.documentation.replace('\\', r'\\').replace('\n', r'\n'))) output.append('\n# TYPE {0} {1}\n'.format(metric.name, metric.type)) for sample in metric.samples: print(sample) if len(sample) == 3: name, labels, value = sample timestamp = None else: name, labels, value, timestamp = sample if labels: labelstr = '{{{0}}}'.format(','.join([ '{0}="{1}"'.format( k, v.replace('\\', r'\\').replace('\n', r'\n').replace('"', r'\"')) for k, v in sorted(labels.items()) ])) else: labelstr = '' output.append('{0}{1} {2}{3}\n'.format( name, labelstr, core._floatToGoString(value), ' %s' % timestamp if timestamp else '')) return ''.join(output).encode('utf-8')
def generate_latest_target(self, request): target = request.args.get('target', [None])[0] section = request.args.get('section', ['default'])[0] output = [] for metric in self.collect(target, section): output.append('# HELP {0} {1}'.format( metric.name, metric.documentation.replace('\\', r'\\').replace('\n', r'\n'))) output.append('\n# TYPE {0} {1}\n'.format(metric.name, metric.type)) for name, labels, value in metric.samples: if labels: labelstr = '{{{0}}}'.format(','.join([ '{0}="{1}"'.format( k, v.replace('\\', r'\\').replace('\n', r'\n').replace( '"', r'\"')) for k, v in sorted(labels.items()) ])) else: labelstr = '' output.append('{0}{1} {2}\n'.format(name, labelstr, _floatToGoString(value))) if output != []: request.write(''.join(output).encode('utf-8')) request.finish() else: request.setResponseCode(500, message=('cannot connect to vmware')) request.finish() return
def generate_latest(): output = [] for metric in gen_metrics(): # print('metric.name -->', metric.name) # print('metric.documentation -->', metric.documentation) # print('metric.samples -->', metric.samples) output.append('# HELP {0} {1}'.format( metric.name, metric.documentation.replace('\\', r'\\').replace('\n', r'\n'))) output.append('\n# TYPE {0} {1}\n'.format(metric.name, metric.type)) for name, labels, value in metric.samples: if labels: labelstr = '{{{0}}}'.format(','.join([ '{0}="{1}"'.format( k, v.replace('\\', r'\\').replace('\n', r'\n').replace('"', r'\"')) for k, v in sorted(labels.items()) ])) else: labelstr = '' output.append('{0}{1} {2}\n'.format(name, labelstr, _floatToGoString(value))) # print(metric) # print(''.join(output)) return ''.join(output)
def generate_latest_metrics(self, request): """ gets the latest metrics """ section = request.args.get('section', ['default'])[0] if self.config[section].get('vsphere_host') and self.config[ section].get('vsphere_host') != "None": vsphere_host = self.config[section].get('vsphere_host') elif request.args.get('target', [None])[0]: vsphere_host = request.args.get('target', [None])[0] elif request.args.get('vsphere_host', [None])[0]: vsphere_host = request.args.get('vsphere_host')[0] else: request.setResponseCode(500) log("No vsphere_host or target defined") request.write('No vsphere_host or target defined!\n') request.finish() output = [] for metric in self.collect(vsphere_host, section): output.append('# HELP {0} {1}'.format( metric.name, metric.documentation.replace('\\', r'\\').replace('\n', r'\n'))) output.append('\n# TYPE {0} {1}\n'.format(metric.name, metric.type)) for name, labels, value in metric.samples: if labels: labelstr = '{{{0}}}'.format(','.join([ '{0}="{1}"'.format( k, v.replace('\\', r'\\').replace('\n', r'\n').replace( '"', r'\"')) for k, v in sorted(labels.items()) ])) else: labelstr = '' if isinstance(value, int): value = float(value) if isinstance(value, long): # noqa: F821 value = float(value) if isinstance(value, float): output.append('{0}{1} {2}\n'.format( name, labelstr, _floatToGoString(value))) if output != []: request.write(''.join(output).encode('utf-8')) request.finish() else: request.setResponseCode(500, message=('cannot connect to vmware')) request.finish() return
def sample_line(s): if s.labels: labelstr = '{{{0}}}'.format(','.join( ['{0}="{1}"'.format( k, v.replace('\\', r'\\').replace('\n', r'\n').replace('"', r'\"')) for k, v in sorted(s.labels.items())])) else: labelstr = '' timestamp = '' if s.timestamp is not None: # Convert to milliseconds. timestamp = ' {0:d}'.format(int(float(s.timestamp) * 1000)) return '{0}{1} {2}{3}\n'.format( s.name, labelstr, core._floatToGoString(s.value), timestamp)
def generate_latest(registry=core.REGISTRY): '''Returns the metrics from the registry in latest text format as a string.''' output = [] for metric in registry.collect(): mname = metric.name mtype = metric.type # Munging from OpenMetrics into Prometheus format. if mtype == 'counter': mname = mname + '_total' elif mtype == 'info': mname = mname + '_info' mtype = 'gauge' elif mtype == 'stateset': mtype = 'gauge' elif mtype == 'gaugehistogram': # A gauge histogram is really a gauge, # but this captures the strucutre better. mtype = 'histogram' elif mtype == 'unknown': mtype = 'untyped' output.append('# HELP {0} {1}'.format( mname, metric.documentation.replace('\\', r'\\').replace('\n', r'\n'))) output.append('\n# TYPE {0} {1}\n'.format(mname, mtype)) for s in metric.samples: if s.name == metric.name + '_created': continue # Ignore OpenMetrics specific sample. TODO: Make these into a gauge. if s.labels: labelstr = '{{{0}}}'.format(','.join([ '{0}="{1}"'.format( k, v.replace('\\', r'\\').replace('\n', r'\n').replace('"', r'\"')) for k, v in sorted(s.labels.items()) ])) else: labelstr = '' timestamp = '' if s.timestamp is not None: # Convert to milliseconds. timestamp = ' {0:d}'.format(int(float(s.timestamp) * 1000)) output.append('{0}{1} {2}{3}\n'.format( s.name, labelstr, core._floatToGoString(s.value), timestamp)) return ''.join(output).encode('utf-8')
def _generate_output_text(self, metric): lines = [] lines.append('# HELP {0} {1}'.format( metric.name, metric.documentation.replace('\\', r'\\').replace('\n', r'\n'))) lines.append('\n# TYPE {0} {1}\n'.format(metric.name, metric.type)) for name, labels, value in metric.samples: labelstr = '' if labels: labelstr = '{{{0}}}'.format(','.join([ '{0}="{1}"'.format( k, v.replace('\\', r'\\').replace('\n', r'\n').replace('"', r'\"')) for k, v in sorted(labels.items()) ])) lines.append('{0}{1} {2}\n'.format(name, labelstr, _floatToGoString(value))) return lines
def generate_latest(registry=REGISTRY): '''Returns the metrics from the registry in latest text format as a string.''' output = [] for metric in registry.collect(): output.append('# HELP {0} {1}'.format( metric.name, metric.documentation.replace('\\', r'\\').replace('\n', r'\n'))) output.append('\n# TYPE {0} {1}\n'.format(metric.name, metric.type)) for name, labels, value in metric.samples: if labels: label_text = '{{{0}}}'.format(','.join([ '{0}="{1}"'.format( k, v.replace('\\', r'\\').replace('\n', r'\n').replace( '"', r'\"') if v else '') for k, v in sorted(labels.items()) ])) else: label_text = '' output.append('{0}{1} {2}\n'.format(name, label_text, core._floatToGoString(value))) return ''.join(output).encode('utf-8')
def generate_latest(registry=core.REGISTRY): '''Returns the metrics from the registry in latest text format as a string.''' output = [] for metric in registry.collect(): output.append('# HELP {0} {1}'.format( metric.name, metric.documentation.replace('\\', r'\\').replace('\n', r'\n'))) output.append('\n# TYPE {0} {1}\n'.format(metric.name, metric.type)) for sample in metric.samples: if len(sample) == 3: name, labels, value = sample timestamp = None else: name, labels, value, timestamp = sample if labels: labelstr = '{{{0}}}'.format(','.join( ['{0}="{1}"'.format( k, v.replace('\\', r'\\').replace('\n', r'\n').replace('"', r'\"')) for k, v in sorted(labels.items())])) else: labelstr = '' output.append('{0}{1} {2}{3}\n'.format(name, labelstr, core._floatToGoString(value), ' %s' % timestamp if timestamp else '')) return ''.join(output).encode('utf-8')
def collect(files): """This almost verbatim from MultiProcessCollector.collect(). The original collects all results in a format designed to be scraped. We instead need to collect limited results, in a format that can be written back to disk. To facilitate this, this version of collect() preserves label ordering, and does not aggregate the histograms. Specifically, it differs from the original: 1. it takes its files as an argument, rather than hardcoding '*.db' 2. it does not accumulate histograms 3. it preserves label order, to facilitate being inserted back into an mmap file. It needs to be kept up to date with changes to prometheus_client as much as possible, or until changes are landed upstream to allow reuse of collect(). """ from prometheus_client import core metrics = {} for f in files: if not os.path.exists(f): continue # verbatim from here... parts = os.path.basename(f).split('_') typ = parts[0] d = core._MmapedDict(f, read_mode=True) for key, value in d.read_all_values(): metric_name, name, labelnames, labelvalues = json.loads(key) metric = metrics.get(metric_name) if metric is None: metric = core.Metric(metric_name, 'Multiprocess metric', typ) metrics[metric_name] = metric if typ == 'gauge': pid = parts[2][:-3] metric._multiprocess_mode = parts[1] metric.add_sample( name, tuple(zip(labelnames, labelvalues)) + (('pid', pid), ), value, ) else: # The duplicates and labels are fixed in the next for. metric.add_sample( name, tuple(zip(labelnames, labelvalues)), value, ) d.close() for metric in metrics.values(): samples = defaultdict(float) buckets = {} for name, labels, value in metric.samples: if metric.type == 'gauge': without_pid = tuple(l for l in labels if l[0] != 'pid') if metric._multiprocess_mode == 'min': current = samples.setdefault((name, without_pid), value) if value < current: samples[(name, without_pid)] = value elif metric._multiprocess_mode == 'max': current = samples.setdefault((name, without_pid), value) if value > current: samples[(name, without_pid)] = value elif metric._multiprocess_mode == 'livesum': samples[(name, without_pid)] += value else: # all/liveall samples[(name, labels)] = value elif metric.type == 'histogram': bucket = tuple(float(l[1]) for l in labels if l[0] == 'le') if bucket: # _bucket without_le = tuple(l for l in labels if l[0] != 'le') buckets.setdefault(without_le, {}) buckets[without_le].setdefault(bucket[0], 0.0) buckets[without_le][bucket[0]] += value else: # _sum/_count samples[(name, labels)] += value else: # Counter and Summary. samples[(name, labels)] += value # end of verbatim copy # modified to remove accumulation if metric.type == 'histogram': for labels, values in buckets.items(): for bucket, value in sorted(values.items()): key = ( metric.name + '_bucket', labels + (('le', core._floatToGoString(bucket)), ), ) samples[key] = value # Convert to correct sample format. metric.samples = [ # OrderedDict used instead of dict (name, OrderedDict(labels), value) for (name, labels), value in samples.items() ] return metrics.values()
def legacy_collect(files): """This almost verbatim from MultiProcessCollector.collect(), pre 0.4.0 The original collects all results in a format designed to be scraped. We instead need to collect limited results, in a format that can be written back to disk. To facilitate this, this version of collect() preserves label ordering, and does not aggregate the histograms. Specifically, it differs from the original: 1. it takes its files as an argument, rather than hardcoding '*.db' 2. it does not accumulate histograms 3. it preserves label order, to facilitate being inserted back into an mmap file. It needs to be kept up to date with changes to prometheus_client as much as possible, or until changes are landed upstream to allow reuse of collect(). """ from prometheus_client import core metrics = {} for f in files: if not os.path.exists(f): continue # verbatim from here... parts = os.path.basename(f).split('_') typ = parts[0] d = core._MmapedDict(f, read_mode=True) for key, value in d.read_all_values(): # Note: key format changed in 0.4+ metric_name, name, labelnames, labelvalues = json.loads(key) metric = metrics.get(metric_name) if metric is None: metric = core.Metric(metric_name, 'Multiprocess metric', typ) metrics[metric_name] = metric if typ == 'gauge': pid = parts[2][:-3] metric._multiprocess_mode = parts[1] metric.add_sample( name, tuple(zip(labelnames, labelvalues)) + (('pid', pid), ), value, ) else: # The duplicates and labels are fixed in the next for. metric.add_sample( name, tuple(zip(labelnames, labelvalues)), value, ) d.close() for metric in metrics.values(): samples = defaultdict(float) buckets = {} for name, labels, value in metric.samples: if metric.type == 'gauge': without_pid = tuple(l for l in labels if l[0] != 'pid') if metric._multiprocess_mode == 'min': current = samples.setdefault((name, without_pid), value) if value < current: samples[(name, without_pid)] = value elif metric._multiprocess_mode == 'max': current = samples.setdefault((name, without_pid), value) if value > current: samples[(name, without_pid)] = value elif metric._multiprocess_mode == 'livesum': samples[(name, without_pid)] += value else: # all/liveall samples[(name, labels)] = value elif metric.type == 'histogram': bucket = tuple(float(l[1]) for l in labels if l[0] == 'le') if bucket: # _bucket without_le = tuple(l for l in labels if l[0] != 'le') buckets.setdefault(without_le, {}) buckets[without_le].setdefault(bucket[0], 0.0) buckets[without_le][bucket[0]] += value else: # _sum/_count samples[(name, labels)] += value else: # Counter and Summary. samples[(name, labels)] += value # end of verbatim copy # modified to remove accumulation if metric.type == 'histogram': for labels, values in buckets.items(): for bucket, value in sorted(values.items()): key = ( metric.name + '_bucket', labels + (('le', core._floatToGoString(bucket)),), ) samples[key] = value # Convert to correct sample format. metric.samples = [ # OrderedDict used instead of dict (name, OrderedDict(labels), value) for (name, labels), value in samples.items() ] return metrics.values()