Example #1
0
def write_metrics(metrics, histogram_file, counter_file):
    from prometheus_client import core
    try:
        key_func = core._mmap_key
    except AttributeError:
        # pre 0.4 key format
        def key_func(metric_name, name, labelnames, labelvalues):
            return json.dumps(
                (metric_name, name, tuple(labels), tuple(labels.values())))

    histograms = core._MmapedDict(histogram_file)
    counters = core._MmapedDict(counter_file)

    try:
        for metric in metrics:
            if metric.type == 'histogram':
                sink = histograms
            elif metric.type == 'counter':
                sink = counters
            else:
                continue

            for sample in metric.samples:
                # prometheus_client 0.4+ adds extra fields
                name, labels, value = sample[:3]
                key = key_func(
                    metric.name,
                    name,
                    tuple(labels),
                    tuple(labels.values()),
                )
                sink.write_value(key, value)
    finally:
        histograms.close()
        counters.close()
Example #2
0
def write_metrics(metrics, histogram_file, counter_file):
    from prometheus_client.core import _MmapedDict
    histograms = _MmapedDict(histogram_file)
    counters = _MmapedDict(counter_file)

    try:
        for metric in metrics:
            if metric.type == 'histogram':
                sink = histograms
            elif metric.type == 'counter':
                sink = counters

            for name, labels, value in metric.samples:
                key = json.dumps(
                    (metric.name, name, tuple(labels), tuple(labels.values())))
                sink.write_value(key, value)
    finally:
        histograms.close()
        counters.close()
Example #3
0
def write_metrics(metrics, histogram_file, counter_file):
    from prometheus_client import core
    try:
        key_func = core._mmap_key
    except AttributeError:
        # pre 0.4 key format
        def key_func(metric_name, name, labelnames, labelvalues):
            return json.dumps(
                (metric_name, name, tuple(labels), tuple(labels.values()))
            )

    histograms = core._MmapedDict(histogram_file)
    counters = core._MmapedDict(counter_file)

    try:
        for metric in metrics:
            if metric.type == 'histogram':
                sink = histograms
            elif metric.type == 'counter':
                sink = counters
            else:
                continue

            for sample in metric.samples:
                # prometheus_client 0.4+ adds extra fields
                name, labels, value = sample[:3]
                key = key_func(
                    metric.name,
                    name,
                    tuple(labels),
                    tuple(labels.values()),
                )
                sink.write_value(key, value)
    finally:
        histograms.close()
        counters.close()
Example #4
0
def collect(files):
    """This almost verbatim from MultiProcessCollector.collect().

    The original collects all results in a format designed to be scraped. We
    instead need to collect limited results, in a format that can be written
    back to disk. To facilitate this, this version of collect() preserves label
    ordering, and does not aggregate the histograms.

    Specifically, it differs from the original:

    1. it takes its files as an argument, rather than hardcoding '*.db'
    2. it does not accumulate histograms
    3. it preserves label order, to facilitate being inserted back into an mmap
       file.

    It needs to be kept up to date with changes to prometheus_client as much as
    possible, or until changes are landed upstream to allow reuse of collect().
    """
    from prometheus_client import core
    metrics = {}
    for f in files:
        if not os.path.exists(f):
            continue
        # verbatim from here...
        parts = os.path.basename(f).split('_')
        typ = parts[0]
        d = core._MmapedDict(f, read_mode=True)
        for key, value in d.read_all_values():
            metric_name, name, labelnames, labelvalues = json.loads(key)

            metric = metrics.get(metric_name)
            if metric is None:
                metric = core.Metric(metric_name, 'Multiprocess metric', typ)
                metrics[metric_name] = metric

            if typ == 'gauge':
                pid = parts[2][:-3]
                metric._multiprocess_mode = parts[1]
                metric.add_sample(
                    name,
                    tuple(zip(labelnames, labelvalues)) + (('pid', pid), ),
                    value,
                )
            else:
                # The duplicates and labels are fixed in the next for.
                metric.add_sample(
                    name,
                    tuple(zip(labelnames, labelvalues)),
                    value,
                )
        d.close()

    for metric in metrics.values():
        samples = defaultdict(float)
        buckets = {}
        for name, labels, value in metric.samples:
            if metric.type == 'gauge':
                without_pid = tuple(l for l in labels if l[0] != 'pid')
                if metric._multiprocess_mode == 'min':
                    current = samples.setdefault((name, without_pid), value)
                    if value < current:
                        samples[(name, without_pid)] = value
                elif metric._multiprocess_mode == 'max':
                    current = samples.setdefault((name, without_pid), value)
                    if value > current:
                        samples[(name, without_pid)] = value
                elif metric._multiprocess_mode == 'livesum':
                    samples[(name, without_pid)] += value
                else:  # all/liveall
                    samples[(name, labels)] = value

            elif metric.type == 'histogram':
                bucket = tuple(float(l[1]) for l in labels if l[0] == 'le')
                if bucket:
                    # _bucket
                    without_le = tuple(l for l in labels if l[0] != 'le')
                    buckets.setdefault(without_le, {})
                    buckets[without_le].setdefault(bucket[0], 0.0)
                    buckets[without_le][bucket[0]] += value
                else:
                    # _sum/_count
                    samples[(name, labels)] += value

            else:
                # Counter and Summary.
                samples[(name, labels)] += value

        # end of verbatim copy
        # modified to remove accumulation
        if metric.type == 'histogram':
            for labels, values in buckets.items():
                for bucket, value in sorted(values.items()):
                    key = (
                        metric.name + '_bucket',
                        labels + (('le', core._floatToGoString(bucket)), ),
                    )
                    samples[key] = value

        # Convert to correct sample format.
        metric.samples = [
            # OrderedDict used instead of dict
            (name, OrderedDict(labels), value)
            for (name, labels), value in samples.items()
        ]
    return metrics.values()
Example #5
0
 def test_process_restart(self):
     self.d.write_value('abc', 123.0)
     self.d.close()
     self.d = core._MmapedDict(self.tempfile)
     self.assertEqual(123, self.d.read_value('abc'))
     self.assertEqual([('abc', 123.0)], list(self.d.read_all_values()))
Example #6
0
 def setUp(self):
     fd, self.tempfile = tempfile.mkstemp()
     os.close(fd)
     self.d = core._MmapedDict(self.tempfile)
Example #7
0
def legacy_collect(files):
    """This almost verbatim from MultiProcessCollector.collect(), pre 0.4.0

    The original collects all results in a format designed to be scraped. We
    instead need to collect limited results, in a format that can be written
    back to disk. To facilitate this, this version of collect() preserves label
    ordering, and does not aggregate the histograms.

    Specifically, it differs from the original:

    1. it takes its files as an argument, rather than hardcoding '*.db'
    2. it does not accumulate histograms
    3. it preserves label order, to facilitate being inserted back into an mmap
       file.

    It needs to be kept up to date with changes to prometheus_client as much as
    possible, or until changes are landed upstream to allow reuse of collect().
    """
    from prometheus_client import core
    metrics = {}
    for f in files:
        if not os.path.exists(f):
            continue
        # verbatim from here...
        parts = os.path.basename(f).split('_')
        typ = parts[0]
        d = core._MmapedDict(f, read_mode=True)
        for key, value in d.read_all_values():
            # Note: key format changed in 0.4+
            metric_name, name, labelnames, labelvalues = json.loads(key)

            metric = metrics.get(metric_name)
            if metric is None:
                metric = core.Metric(metric_name, 'Multiprocess metric', typ)
                metrics[metric_name] = metric

            if typ == 'gauge':
                pid = parts[2][:-3]
                metric._multiprocess_mode = parts[1]
                metric.add_sample(
                    name,
                    tuple(zip(labelnames, labelvalues)) + (('pid', pid), ),
                    value,
                )
            else:
                # The duplicates and labels are fixed in the next for.
                metric.add_sample(
                    name,
                    tuple(zip(labelnames, labelvalues)),
                    value,
                )
        d.close()

    for metric in metrics.values():
        samples = defaultdict(float)
        buckets = {}
        for name, labels, value in metric.samples:
            if metric.type == 'gauge':
                without_pid = tuple(l for l in labels if l[0] != 'pid')
                if metric._multiprocess_mode == 'min':
                    current = samples.setdefault((name, without_pid), value)
                    if value < current:
                        samples[(name, without_pid)] = value
                elif metric._multiprocess_mode == 'max':
                    current = samples.setdefault((name, without_pid), value)
                    if value > current:
                        samples[(name, without_pid)] = value
                elif metric._multiprocess_mode == 'livesum':
                    samples[(name, without_pid)] += value
                else:  # all/liveall
                    samples[(name, labels)] = value

            elif metric.type == 'histogram':
                bucket = tuple(float(l[1]) for l in labels if l[0] == 'le')
                if bucket:
                    # _bucket
                    without_le = tuple(l for l in labels if l[0] != 'le')
                    buckets.setdefault(without_le, {})
                    buckets[without_le].setdefault(bucket[0], 0.0)
                    buckets[without_le][bucket[0]] += value
                else:
                    # _sum/_count
                    samples[(name, labels)] += value

            else:
                # Counter and Summary.
                samples[(name, labels)] += value

        # end of verbatim copy
        # modified to remove accumulation
        if metric.type == 'histogram':
            for labels, values in buckets.items():
                for bucket, value in sorted(values.items()):
                    key = (
                        metric.name + '_bucket',
                        labels + (('le', core._floatToGoString(bucket)),),
                    )
                    samples[key] = value

        # Convert to correct sample format.
        metric.samples = [
            # OrderedDict used instead of dict
            (name, OrderedDict(labels), value)
            for (name, labels), value in samples.items()
        ]
    return metrics.values()