class TestAdvisoryLock(unittest.TestCase): """ These tests use lock aqusition as a proxy for cleanup/collect operations, the former using exclusive locks, the latter shared locks """ def setUp(self): self.tempdir = tempfile.mkdtemp() os.environ['prometheus_multiproc_dir'] = self.tempdir values.ValueClass = MultiProcessValue(lambda: 123) self.registry = CollectorRegistry() self.collector = MultiProcessCollector(self.registry, self.tempdir) def test_cleanup_waits_for_collectors(self): # IOError in python2, OSError in python3 with self.assertRaises(EnvironmentError): with advisory_lock(LOCK_SH): archive_metrics(blocking=False) def test_collect_doesnt_block_other_collects(self): values.ValueClass = MultiProcessValue(lambda: 0) labels = dict((i, i) for i in 'abcd') c = Counter('c', 'help', labelnames=labels.keys(), registry=None) c.labels(**labels).inc(1) with advisory_lock(LOCK_SH): metrics = dict( (m.name, m) for m in self.collector.collect(blocking=False)) self.assertEqual(metrics['c'].samples, [Sample('c_total', labels, 1.0)]) def test_collect_waits_for_cleanup(self): values.ValueClass = MultiProcessValue(lambda: 0) labels = dict((i, i) for i in 'abcd') c = Counter('c', 'help', labelnames=labels.keys(), registry=None) c.labels(**labels).inc(1) with self.assertRaises(EnvironmentError): with advisory_lock(LOCK_EX): self.collector.collect(blocking=False) def test_exceptions_release_lock(self): with self.assertRaises(ValueError): with advisory_lock(LOCK_EX): raise ValueError # Do an operation which requires acquiring the lock archive_metrics(blocking=False) def tearDown(self): del os.environ['prometheus_multiproc_dir'] shutil.rmtree(self.tempdir) values.ValueClass = MutexValue
def __init__(self, app=None, export_defaults=True, defaults_prefix='flask', group_by='path', buckets=None, static_labels=None, registry=None): """ Create a new multiprocess-aware Prometheus metrics export configuration. :param app: the Flask application (can be `None`) :param export_defaults: expose all HTTP request latencies and number of HTTP requests :param defaults_prefix: string to prefix the default exported metrics name with (when either `export_defaults=True` or `export_defaults(..)` is called) :param group_by: group default HTTP metrics by this request property, like `path`, `endpoint`, `url_rule`, etc. (defaults to `path`) :param buckets: the time buckets for request latencies (will use the default when `None`) :param static_labels: static labels to attach to each of the metrics exposed by this metrics instance :param registry: the Prometheus Registry to use (can be `None` and it will be registered with `prometheus_client.multiprocess.MultiProcessCollector`) """ _check_multiproc_env_var() registry = registry or CollectorRegistry() MultiProcessCollector(registry) super(MultiprocessPrometheusMetrics, self).__init__( app=app, path=None, export_defaults=export_defaults, defaults_prefix=defaults_prefix, group_by=group_by, buckets=buckets, static_labels=static_labels, registry=registry )
def get_metrics_reporting_registry( process_registry: CollectorRegistry, ) -> CollectorRegistry: """ Get the metrics registry for reporting metrics. If we're running under gunicorn, then each worker has its own process and its own process collector. For reporting, we need a fresh registry with a multiprocess collector that points to the metrics folder (created empty at startup, with a database file for each process and metric type). It will use the databases in this folder to generate combined metrics across the processes, and will not double-count the reporting process's metrics. If we're not running under gunicorn, then return the passed per-process registry, which is the only metrics registry. In the single-process case, We could use the default prometheus_client.REGISTRY, but it makes tests easier to write if it is possible to replace the registry with a fresh one. """ try: settings = config.Settings() prometheus_multiproc_dir = settings.prometheus_multiproc_dir except ValidationError: prometheus_multiproc_dir = None if prometheus_multiproc_dir: registry = CollectorRegistry() MultiProcessCollector(registry, path=prometheus_multiproc_dir) return registry return process_registry
def get_metrics() -> str: """Get collected metrics.""" registry = REGISTRY if 'PROMETHEUS_MULTIPROC_DIR' in os.environ: registry = CollectorRegistry() MultiProcessCollector(registry) return generate_latest(registry)
def metrics(request: Request) -> Response: if "prometheus_multiproc_dir" in os.environ: registry = CollectorRegistry() MultiProcessCollector(registry) else: registry = REGISTRY return Response(generate_latest(registry), headers={"Content-Type": CONTENT_TYPE_LATEST})
def get_registry(name): if name not in collector_registries.keys(): collector_registries[name] = CollectorRegistry() if prometheus_multiproc_dir is not None: MultiProcessCollector(collector_registries[name]) return collector_registries[name]
def get_metrics(): registry = CollectorRegistry() MultiProcessCollector(registry) data = generate_latest(registry) response_headers = [ ('Content-type', CONTENT_TYPE_LATEST), ('Content-Length', str(len(data))), ] return data, 200, response_headers
def collect_metrics(): registry = CollectorRegistry() MultiProcessCollector(registry) PlatformCollector(registry) ExternalMetrics(registry) data = generate_latest(registry) return Response(data, mimetype=CONTENT_TYPE_LATEST)
def __init__(self, app, endpoint="/metrics"): self.app = None self.endpoint = endpoint self.registry = CollectorRegistry() MultiProcessCollector(self.registry) self._metrics = {} if app is not None: self.init_app(app)
def test_deprecation_warning(self): os.environ['prometheus_multiproc_dir'] = self.tempdir with warnings.catch_warnings(record=True) as w: values.ValueClass = get_value_class() registry = CollectorRegistry() collector = MultiProcessCollector(registry) Counter('c', 'help', registry=None) assert os.environ['PROMETHEUS_MULTIPROC_DIR'] == self.tempdir assert len(w) == 1 assert issubclass(w[-1].category, DeprecationWarning) assert "PROMETHEUS_MULTIPROC_DIR" in str(w[-1].message)
def metrics(request: Request) -> Response: if "prometheus_multiproc_dir" in os.environ: registry = CollectorRegistry() MultiProcessCollector(registry) logger.info( f"Metrics multiprocess :[{os.environ['prometheus_multiproc_dir']}]" ) else: registry = REGISTRY logger.info(f"Metrics no multiprocess") return Response(generate_latest(registry), media_type=CONTENT_TYPE_LATEST)
def get_metrics(request): """Pyramid view that return the metrics""" if prom.IS_MULTIPROC: registry = CollectorRegistry() MultiProcessCollector(registry) else: registry = REGISTRY request.response.content_type = CONTENT_TYPE_LATEST resp = Response(content_type=CONTENT_TYPE_LATEST, ) resp.body = generate_latest(registry) return resp
def prometheus_registry(): """ Configure prometheus_client. This is run the first time the /metrics endpoint is used. """ # The multiprocess configuration makes it compatible with gunicorn. # https://github.com/prometheus/client_python/#multiprocess-mode-eg-gunicorn from prometheus_client import CollectorRegistry from prometheus_client.multiprocess import MultiProcessCollector registry = CollectorRegistry() MultiProcessCollector(registry) # This has a side effect, apparently. return registry
def prometheus_cleanup_worker(pid): """Aggregate dead worker's metrics into a single archive file.""" mark_process_dead(pid) prom_dir = os.environ['prometheus_multiproc_dir'] worker_files = [ 'histogram_{}.db'.format(pid), 'counter_{}.db'.format(pid), ] paths = _filter_exists(os.path.join(prom_dir, f) for f in worker_files) # check at least one worker file exists if not paths: return histogram_path = os.path.join(prom_dir, histogram_archive) counter_path = os.path.join(prom_dir, counter_archive) archive_paths = _filter_exists([histogram_path, counter_path]) collect_paths = paths + archive_paths collector = MultiProcessCollector(None) try: metrics = collector.merge(collect_paths, accumulate=False) except AttributeError: metrics = legacy_collect(collect_paths) tmp_histogram = tempfile.NamedTemporaryFile(delete=False) tmp_counter = tempfile.NamedTemporaryFile(delete=False) write_metrics(metrics, tmp_histogram.name, tmp_counter.name) # ensure reader does get partial state with prometheus_lock: os.rename(tmp_histogram.name, histogram_path) os.rename(tmp_counter.name, counter_path) for path in paths: os.unlink(path)
def __init__(self, app=None, **kwargs): """ Create a new multiprocess-aware Prometheus metrics export configuration. :param registry: the Prometheus Registry to use (can be `None` and it will be registered with `prometheus_client.multiprocess.MultiProcessCollector`) """ _check_multiproc_env_var() registry = kwargs.get('registry') or CollectorRegistry() MultiProcessCollector(registry) super(MultiprocessPrometheusMetrics, self).__init__(app=app, path=None, registry=registry, **kwargs)
def set_config(self, config: ValueExtractor) -> None: super().set_config(config) registry = get_registry(self.config.get('registry', REGISTRY)) if MULTIPROC_DIR: registry = MultiProcessCollector(registry) self._registry = registry port: int = self.config.get_int('port', default=0) if port: start_http_server(port=port, registry=registry) graphite: Optional[ValueExtractor] = self.config.get('graphite') if graphite: address = graphite.get('address') if isinstance(address, str): addr = address.split(':') address = (addr[0], int(addr[-1])) gb = GraphiteBridge(address, registry=registry) interval: float = graphite.get_duration('interval', 60) prefix: str = graphite.get('prefix', '') gb.start(interval, prefix=prefix)
def __init__(self, collectors: Iterable[Any]): """Builds a metric encoder using the given list of collectors. A collector is broadly defined here as any type that implements the `collect` method. :param collectors: The collectors to fetch metrics from :type collectors: Iterable[Any] """ self._registry = CollectorRegistry(auto_describe=True) # Always use a new registry for collecting mmapped files under multiprocess mode if "PROMETHEUS_MULTIPROC_DIR" in os.environ: MultiProcessCollector(self._registry) # Do not double register metrics that implement MultiProcessValue # See: prometheus_client/values.py#L31 collectors = filter( lambda c: not isinstance(c, LiveMetricRegistry) and not isinstance(c, MetricWrapperBase) and hasattr(c, "collect"), collectors, ) for c in collectors: self._registry.register(c)
def metrics(): return Response( generate_latest(MultiProcessCollector(CollectorRegistry())), mimetype=CONTENT_TYPE_LATEST, )
def display_metrics(): registry = prometheus_client.CollectorRegistry() MultiProcessCollector(registry) data = prometheus_client.generate_latest(registry=registry) return data.decode()
@contact: [email protected] @time: 2018/5/8 上午10:18 """ import os import prometheus_client from flask import Flask, Response, request from flask import jsonify from prometheus_client import CollectorRegistry, multiprocess from prometheus_client import Counter from prometheus_client.multiprocess import MultiProcessCollector # REQUEST_TIME = Summary('request_processing_seconds', 'Time spent processing request') REGISTRY = CollectorRegistry(auto_describe=False) MultiProcessCollector(REGISTRY) requests_total = Counter("app:requests_total", "Total count of requests", ["method", "url_rule"], registry=REGISTRY) app = Flask(__name__) def record(func): """ func 即为调用该函数的方法 """ def req(*args, **kwargs): requests_total.labels(method=request.method, url_rule=request.path.lower()).inc()
def test_prometheus_cleanup(registry): pid = 1 def getpid(): return pid # override use of os.getpid. _ValueClass is recreated after every test, # so we don't need to clean up from prometheus_client import core core._ValueClass = core._MultiProcessValue(getpid) histogram = metrics.Histogram( name='histogram', documentation='test histogram', labelnames=['foo', 'bar', 'baz'], statsd='{name}.{label}', registry=registry, ) counter = metrics.Counter( name='counter', documentation='test counter', labelnames=['foo', 'bar', 'baz'], statsd='{name}.{label}', registry=registry, ) from prometheus_client.multiprocess import MultiProcessCollector collector = MultiProcessCollector(registry) labels = {'foo': 'foo', 'bar': 'bar', 'baz': 'baz'} def collect(): return {m.name: m for m in collector.collect()} def files(): return list(sorted(os.listdir(os.environ['prometheus_multiproc_dir']))) counter.inc(1, **labels) histogram.observe(0.5, **labels) histogram.observe(2.5, **labels) assert files() == [ 'counter_1.db', 'histogram_1.db', ] before = collect() metrics.prometheus_cleanup_worker(pid) after = collect() assert files() == [ 'counter_archive.db', 'histogram_archive.db', ] assert before == after # magic! pid += 1 # new worker, create some new metrics, check they are all combined counter.inc(2, **labels) histogram.observe(0.5, **labels) histogram.observe(2.5, **labels) later = collect() assert files() == [ 'counter_2.db', 'counter_archive.db', 'histogram_2.db', 'histogram_archive.db', ] # check counter is correct assert later['counter'].samples == [ Sample(counter_name('counter_total'), labels, 3.0), ] expected_histogram = [ Sample('histogram_bucket', dict(le='0.005', **labels), 0.0), Sample('histogram_bucket', dict(le='0.01', **labels), 0.0), Sample('histogram_bucket', dict(le='0.025', **labels), 0.0), Sample('histogram_bucket', dict(le='0.05', **labels), 0.0), Sample('histogram_bucket', dict(le='0.075', **labels), 0.0), Sample('histogram_bucket', dict(le='0.1', **labels), 0.0), Sample('histogram_bucket', dict(le='0.25', **labels), 0.0), Sample('histogram_bucket', dict(le='0.5', **labels), 2.0), Sample('histogram_bucket', dict(le='0.75', **labels), 2.0), Sample('histogram_bucket', dict(le='1.0', **labels), 2.0), Sample('histogram_bucket', dict(le='2.5', **labels), 4.0), Sample('histogram_bucket', dict(le='5.0', **labels), 4.0), Sample('histogram_bucket', dict(le='7.5', **labels), 4.0), Sample('histogram_bucket', dict(le='10.0', **labels), 4.0), Sample('histogram_bucket', dict(le='+Inf', **labels), 4.0), Sample('histogram_count', labels, 4.0), Sample('histogram_sum', labels, 6.0), ] # check histogram is correct later['histogram'].samples.sort(key=metrics.histogram_sorter) assert later['histogram'].samples == expected_histogram # check the final files produce the correct numbers metrics.prometheus_cleanup_worker(pid) final = collect() assert files() == [ 'counter_archive.db', 'histogram_archive.db', ] final['histogram'].samples.sort(key=metrics.histogram_sorter) assert later == final
def metrics(request): log.info('Serving metrics') registry = CollectorRegistry() MultiProcessCollector(registry) data = generate_latest(registry) return Response(data, content_type='text/plain')
def setUp(self): self.tempdir = tempfile.mkdtemp() os.environ['PROMETHEUS_MULTIPROC_DIR'] = self.tempdir values.ValueClass = MultiProcessValue(lambda: 123) self.registry = CollectorRegistry() self.collector = MultiProcessCollector(self.registry)
class TestMultiProcess(unittest.TestCase): def setUp(self): self.tempdir = tempfile.mkdtemp() os.environ['PROMETHEUS_MULTIPROC_DIR'] = self.tempdir values.ValueClass = MultiProcessValue(lambda: 123) self.registry = CollectorRegistry() self.collector = MultiProcessCollector(self.registry) @property def _value_class(self): return def tearDown(self): del os.environ['PROMETHEUS_MULTIPROC_DIR'] shutil.rmtree(self.tempdir) values.ValueClass = MutexValue def test_counter_adds(self): c1 = Counter('c', 'help', registry=None) values.ValueClass = MultiProcessValue(lambda: 456) c2 = Counter('c', 'help', registry=None) self.assertEqual(0, self.registry.get_sample_value('c_total')) c1.inc(1) c2.inc(2) self.assertEqual(3, self.registry.get_sample_value('c_total')) def test_summary_adds(self): s1 = Summary('s', 'help', registry=None) values.ValueClass = MultiProcessValue(lambda: 456) s2 = Summary('s', 'help', registry=None) self.assertEqual(0, self.registry.get_sample_value('s_count')) self.assertEqual(0, self.registry.get_sample_value('s_sum')) s1.observe(1) s2.observe(2) self.assertEqual(2, self.registry.get_sample_value('s_count')) self.assertEqual(3, self.registry.get_sample_value('s_sum')) def test_histogram_adds(self): h1 = Histogram('h', 'help', registry=None) values.ValueClass = MultiProcessValue(lambda: 456) h2 = Histogram('h', 'help', registry=None) self.assertEqual(0, self.registry.get_sample_value('h_count')) self.assertEqual(0, self.registry.get_sample_value('h_sum')) self.assertEqual( 0, self.registry.get_sample_value('h_bucket', {'le': '5.0'})) h1.observe(1) h2.observe(2) self.assertEqual(2, self.registry.get_sample_value('h_count')) self.assertEqual(3, self.registry.get_sample_value('h_sum')) self.assertEqual( 2, self.registry.get_sample_value('h_bucket', {'le': '5.0'})) def test_gauge_all(self): g1 = Gauge('g', 'help', registry=None) values.ValueClass = MultiProcessValue(lambda: 456) g2 = Gauge('g', 'help', registry=None) self.assertEqual(0, self.registry.get_sample_value('g', {'pid': '123'})) self.assertEqual(0, self.registry.get_sample_value('g', {'pid': '456'})) g1.set(1) g2.set(2) mark_process_dead(123) self.assertEqual(1, self.registry.get_sample_value('g', {'pid': '123'})) self.assertEqual(2, self.registry.get_sample_value('g', {'pid': '456'})) def test_gauge_liveall(self): g1 = Gauge('g', 'help', registry=None, multiprocess_mode='liveall') values.ValueClass = MultiProcessValue(lambda: 456) g2 = Gauge('g', 'help', registry=None, multiprocess_mode='liveall') self.assertEqual(0, self.registry.get_sample_value('g', {'pid': '123'})) self.assertEqual(0, self.registry.get_sample_value('g', {'pid': '456'})) g1.set(1) g2.set(2) self.assertEqual(1, self.registry.get_sample_value('g', {'pid': '123'})) self.assertEqual(2, self.registry.get_sample_value('g', {'pid': '456'})) mark_process_dead(123, os.environ['PROMETHEUS_MULTIPROC_DIR']) self.assertEqual(None, self.registry.get_sample_value('g', {'pid': '123'})) self.assertEqual(2, self.registry.get_sample_value('g', {'pid': '456'})) def test_gauge_min(self): g1 = Gauge('g', 'help', registry=None, multiprocess_mode='min') values.ValueClass = MultiProcessValue(lambda: 456) g2 = Gauge('g', 'help', registry=None, multiprocess_mode='min') self.assertEqual(0, self.registry.get_sample_value('g')) g1.set(1) g2.set(2) self.assertEqual(1, self.registry.get_sample_value('g')) def test_gauge_max(self): g1 = Gauge('g', 'help', registry=None, multiprocess_mode='max') values.ValueClass = MultiProcessValue(lambda: 456) g2 = Gauge('g', 'help', registry=None, multiprocess_mode='max') self.assertEqual(0, self.registry.get_sample_value('g')) g1.set(1) g2.set(2) self.assertEqual(2, self.registry.get_sample_value('g')) def test_gauge_livesum(self): g1 = Gauge('g', 'help', registry=None, multiprocess_mode='livesum') values.ValueClass = MultiProcessValue(lambda: 456) g2 = Gauge('g', 'help', registry=None, multiprocess_mode='livesum') self.assertEqual(0, self.registry.get_sample_value('g')) g1.set(1) g2.set(2) self.assertEqual(3, self.registry.get_sample_value('g')) mark_process_dead(123, os.environ['PROMETHEUS_MULTIPROC_DIR']) self.assertEqual(2, self.registry.get_sample_value('g')) def test_namespace_subsystem(self): c1 = Counter('c', 'help', registry=None, namespace='ns', subsystem='ss') c1.inc(1) self.assertEqual(1, self.registry.get_sample_value('ns_ss_c_total')) def test_counter_across_forks(self): pid = 0 values.ValueClass = MultiProcessValue(lambda: pid) c1 = Counter('c', 'help', registry=None) self.assertEqual(0, self.registry.get_sample_value('c_total')) c1.inc(1) c1.inc(1) pid = 1 c1.inc(1) self.assertEqual(3, self.registry.get_sample_value('c_total')) self.assertEqual(1, c1._value.get()) def test_initialization_detects_pid_change(self): pid = 0 values.ValueClass = MultiProcessValue(lambda: pid) # can not inspect the files cache directly, as it's a closure, so we # check for the actual files themselves def files(): fs = os.listdir(os.environ['PROMETHEUS_MULTIPROC_DIR']) fs.sort() return fs c1 = Counter('c1', 'c1', registry=None) self.assertEqual(files(), ['counter_0.db']) c2 = Counter('c2', 'c2', registry=None) self.assertEqual(files(), ['counter_0.db']) pid = 1 c3 = Counter('c3', 'c3', registry=None) self.assertEqual(files(), ['counter_0.db', 'counter_1.db']) def test_collect(self): pid = 0 values.ValueClass = MultiProcessValue(lambda: pid) labels = dict((i, i) for i in 'abcd') def add_label(key, value): l = labels.copy() l[key] = value return l c = Counter('c', 'help', labelnames=labels.keys(), registry=None) g = Gauge('g', 'help', labelnames=labels.keys(), registry=None) h = Histogram('h', 'help', labelnames=labels.keys(), registry=None) c.labels(**labels).inc(1) g.labels(**labels).set(1) h.labels(**labels).observe(1) pid = 1 c.labels(**labels).inc(1) g.labels(**labels).set(1) h.labels(**labels).observe(5) metrics = dict((m.name, m) for m in self.collector.collect()) self.assertEqual(metrics['c'].samples, [Sample('c_total', labels, 2.0)]) metrics['g'].samples.sort(key=lambda x: x[1]['pid']) self.assertEqual(metrics['g'].samples, [ Sample('g', add_label('pid', '0'), 1.0), Sample('g', add_label('pid', '1'), 1.0), ]) metrics['h'].samples.sort( key=lambda x: (x[0], float(x[1].get('le', 0)))) expected_histogram = [ Sample('h_bucket', add_label('le', '0.005'), 0.0), Sample('h_bucket', add_label('le', '0.01'), 0.0), Sample('h_bucket', add_label('le', '0.025'), 0.0), Sample('h_bucket', add_label('le', '0.05'), 0.0), Sample('h_bucket', add_label('le', '0.075'), 0.0), Sample('h_bucket', add_label('le', '0.1'), 0.0), Sample('h_bucket', add_label('le', '0.25'), 0.0), Sample('h_bucket', add_label('le', '0.5'), 0.0), Sample('h_bucket', add_label('le', '0.75'), 0.0), Sample('h_bucket', add_label('le', '1.0'), 1.0), Sample('h_bucket', add_label('le', '2.5'), 1.0), Sample('h_bucket', add_label('le', '5.0'), 2.0), Sample('h_bucket', add_label('le', '7.5'), 2.0), Sample('h_bucket', add_label('le', '10.0'), 2.0), Sample('h_bucket', add_label('le', '+Inf'), 2.0), Sample('h_count', labels, 2.0), Sample('h_sum', labels, 6.0), ] self.assertEqual(metrics['h'].samples, expected_histogram) def test_merge_no_accumulate(self): pid = 0 values.ValueClass = MultiProcessValue(lambda: pid) labels = dict((i, i) for i in 'abcd') def add_label(key, value): l = labels.copy() l[key] = value return l h = Histogram('h', 'help', labelnames=labels.keys(), registry=None) h.labels(**labels).observe(1) pid = 1 h.labels(**labels).observe(5) path = os.path.join(os.environ['PROMETHEUS_MULTIPROC_DIR'], '*.db') files = glob.glob(path) metrics = dict( (m.name, m) for m in self.collector.merge(files, accumulate=False)) metrics['h'].samples.sort( key=lambda x: (x[0], float(x[1].get('le', 0)))) expected_histogram = [ Sample('h_bucket', add_label('le', '0.005'), 0.0), Sample('h_bucket', add_label('le', '0.01'), 0.0), Sample('h_bucket', add_label('le', '0.025'), 0.0), Sample('h_bucket', add_label('le', '0.05'), 0.0), Sample('h_bucket', add_label('le', '0.075'), 0.0), Sample('h_bucket', add_label('le', '0.1'), 0.0), Sample('h_bucket', add_label('le', '0.25'), 0.0), Sample('h_bucket', add_label('le', '0.5'), 0.0), Sample('h_bucket', add_label('le', '0.75'), 0.0), Sample('h_bucket', add_label('le', '1.0'), 1.0), Sample('h_bucket', add_label('le', '2.5'), 0.0), Sample('h_bucket', add_label('le', '5.0'), 1.0), Sample('h_bucket', add_label('le', '7.5'), 0.0), Sample('h_bucket', add_label('le', '10.0'), 0.0), Sample('h_bucket', add_label('le', '+Inf'), 0.0), Sample('h_sum', labels, 6.0), ] self.assertEqual(metrics['h'].samples, expected_histogram) def test_missing_gauge_file_during_merge(self): # These files don't exist, just like if mark_process_dead(9999999) had been # called during self.collector.collect(), after the glob found it # but before the merge actually happened. # This should not raise and return no metrics self.assertFalse( self.collector.merge([ os.path.join(self.tempdir, 'gauge_liveall_9999999.db'), os.path.join(self.tempdir, 'gauge_livesum_9999999.db'), ]))
def create_registry(): registry = CollectorRegistry() MultiProcessCollector(registry) return registry
def setUp(self): self.tempdir = tempfile.mkdtemp() os.environ['prometheus_multiproc_dir'] = self.tempdir core._ValueClass = core._MultiProcessValue(lambda: 123) self.registry = CollectorRegistry() MultiProcessCollector(self.registry, self.tempdir)
class TestMultiProcess(unittest.TestCase): def setUp(self): self.tempdir = tempfile.mkdtemp() os.environ['prometheus_multiproc_dir'] = self.tempdir core._ValueClass = core._MultiProcessValue(lambda: 123) self.registry = CollectorRegistry() self.collector = MultiProcessCollector(self.registry, self.tempdir) def tearDown(self): del os.environ['prometheus_multiproc_dir'] shutil.rmtree(self.tempdir) core._ValueClass = core._MutexValue def test_counter_adds(self): c1 = Counter('c', 'help', registry=None) core._ValueClass = core._MultiProcessValue(lambda: 456) c2 = Counter('c', 'help', registry=None) self.assertEqual(0, self.registry.get_sample_value('c_total')) c1.inc(1) c2.inc(2) self.assertEqual(3, self.registry.get_sample_value('c_total')) def test_summary_adds(self): s1 = Summary('s', 'help', registry=None) core._ValueClass = core._MultiProcessValue(lambda: 456) s2 = Summary('s', 'help', registry=None) self.assertEqual(0, self.registry.get_sample_value('s_count')) self.assertEqual(0, self.registry.get_sample_value('s_sum')) s1.observe(1) s2.observe(2) self.assertEqual(2, self.registry.get_sample_value('s_count')) self.assertEqual(3, self.registry.get_sample_value('s_sum')) def test_histogram_adds(self): h1 = Histogram('h', 'help', registry=None) core._ValueClass = core._MultiProcessValue(lambda: 456) h2 = Histogram('h', 'help', registry=None) self.assertEqual(0, self.registry.get_sample_value('h_count')) self.assertEqual(0, self.registry.get_sample_value('h_sum')) self.assertEqual(0, self.registry.get_sample_value('h_bucket', {'le': '5.0'})) h1.observe(1) h2.observe(2) self.assertEqual(2, self.registry.get_sample_value('h_count')) self.assertEqual(3, self.registry.get_sample_value('h_sum')) self.assertEqual(2, self.registry.get_sample_value('h_bucket', {'le': '5.0'})) def test_gauge_all(self): g1 = Gauge('g', 'help', registry=None) core._ValueClass = core._MultiProcessValue(lambda: 456) g2 = Gauge('g', 'help', registry=None) self.assertEqual(0, self.registry.get_sample_value('g', {'pid': '123'})) self.assertEqual(0, self.registry.get_sample_value('g', {'pid': '456'})) g1.set(1) g2.set(2) mark_process_dead(123, os.environ['prometheus_multiproc_dir']) self.assertEqual(1, self.registry.get_sample_value('g', {'pid': '123'})) self.assertEqual(2, self.registry.get_sample_value('g', {'pid': '456'})) def test_gauge_liveall(self): g1 = Gauge('g', 'help', registry=None, multiprocess_mode='liveall') core._ValueClass = core._MultiProcessValue(lambda: 456) g2 = Gauge('g', 'help', registry=None, multiprocess_mode='liveall') self.assertEqual(0, self.registry.get_sample_value('g', {'pid': '123'})) self.assertEqual(0, self.registry.get_sample_value('g', {'pid': '456'})) g1.set(1) g2.set(2) self.assertEqual(1, self.registry.get_sample_value('g', {'pid': '123'})) self.assertEqual(2, self.registry.get_sample_value('g', {'pid': '456'})) mark_process_dead(123, os.environ['prometheus_multiproc_dir']) self.assertEqual(None, self.registry.get_sample_value('g', {'pid': '123'})) self.assertEqual(2, self.registry.get_sample_value('g', {'pid': '456'})) def test_gauge_min(self): g1 = Gauge('g', 'help', registry=None, multiprocess_mode='min') core._ValueClass = core._MultiProcessValue(lambda: 456) g2 = Gauge('g', 'help', registry=None, multiprocess_mode='min') self.assertEqual(0, self.registry.get_sample_value('g')) g1.set(1) g2.set(2) self.assertEqual(1, self.registry.get_sample_value('g')) def test_gauge_max(self): g1 = Gauge('g', 'help', registry=None, multiprocess_mode='max') core._ValueClass = core._MultiProcessValue(lambda: 456) g2 = Gauge('g', 'help', registry=None, multiprocess_mode='max') self.assertEqual(0, self.registry.get_sample_value('g')) g1.set(1) g2.set(2) self.assertEqual(2, self.registry.get_sample_value('g')) def test_gauge_livesum(self): g1 = Gauge('g', 'help', registry=None, multiprocess_mode='livesum') core._ValueClass = core._MultiProcessValue(lambda: 456) g2 = Gauge('g', 'help', registry=None, multiprocess_mode='livesum') self.assertEqual(0, self.registry.get_sample_value('g')) g1.set(1) g2.set(2) self.assertEqual(3, self.registry.get_sample_value('g')) mark_process_dead(123, os.environ['prometheus_multiproc_dir']) self.assertEqual(2, self.registry.get_sample_value('g')) def test_namespace_subsystem(self): c1 = Counter('c', 'help', registry=None, namespace='ns', subsystem='ss') c1.inc(1) self.assertEqual(1, self.registry.get_sample_value('ns_ss_c_total')) def test_counter_across_forks(self): pid = 0 core._ValueClass = core._MultiProcessValue(lambda: pid) c1 = Counter('c', 'help', registry=None) self.assertEqual(0, self.registry.get_sample_value('c_total')) c1.inc(1) c1.inc(1) pid = 1 c1.inc(1) self.assertEqual(3, self.registry.get_sample_value('c_total')) self.assertEqual(1, c1._value.get()) def test_initialization_detects_pid_change(self): pid = 0 core._ValueClass = core._MultiProcessValue(lambda: pid) # can not inspect the files cache directly, as it's a closure, so we # check for the actual files themselves def files(): fs = os.listdir(os.environ['prometheus_multiproc_dir']) fs.sort() return fs c1 = Counter('c1', 'c1', registry=None) self.assertEqual(files(), ['counter_0.db']) c2 = Counter('c2', 'c2', registry=None) self.assertEqual(files(), ['counter_0.db']) pid = 1 c3 = Counter('c3', 'c3', registry=None) self.assertEqual(files(), ['counter_0.db', 'counter_1.db']) @unittest.skipIf(sys.version_info < (2, 7), "Test requires Python 2.7+.") def test_collect(self): pid = 0 core._ValueClass = core._MultiProcessValue(lambda: pid) labels = dict((i, i) for i in 'abcd') def add_label(key, value): l = labels.copy() l[key] = value return l c = Counter('c', 'help', labelnames=labels.keys(), registry=None) g = Gauge('g', 'help', labelnames=labels.keys(), registry=None) h = Histogram('h', 'help', labelnames=labels.keys(), registry=None) c.labels(**labels).inc(1) g.labels(**labels).set(1) h.labels(**labels).observe(1) pid = 1 c.labels(**labels).inc(1) g.labels(**labels).set(1) h.labels(**labels).observe(5) metrics = dict((m.name, m) for m in self.collector.collect()) self.assertEqual( metrics['c'].samples, [Sample('c_total', labels, 2.0)] ) metrics['g'].samples.sort(key=lambda x: x[1]['pid']) self.assertEqual(metrics['g'].samples, [ Sample('g', add_label('pid', '0'), 1.0), Sample('g', add_label('pid', '1'), 1.0), ]) metrics['h'].samples.sort( key=lambda x: (x[0], float(x[1].get('le', 0))) ) expected_histogram = [ Sample('h_bucket', add_label('le', '0.005'), 0.0), Sample('h_bucket', add_label('le', '0.01'), 0.0), Sample('h_bucket', add_label('le', '0.025'), 0.0), Sample('h_bucket', add_label('le', '0.05'), 0.0), Sample('h_bucket', add_label('le', '0.075'), 0.0), Sample('h_bucket', add_label('le', '0.1'), 0.0), Sample('h_bucket', add_label('le', '0.25'), 0.0), Sample('h_bucket', add_label('le', '0.5'), 0.0), Sample('h_bucket', add_label('le', '0.75'), 0.0), Sample('h_bucket', add_label('le', '1.0'), 1.0), Sample('h_bucket', add_label('le', '2.5'), 1.0), Sample('h_bucket', add_label('le', '5.0'), 2.0), Sample('h_bucket', add_label('le', '7.5'), 2.0), Sample('h_bucket', add_label('le', '10.0'), 2.0), Sample('h_bucket', add_label('le', '+Inf'), 2.0), Sample('h_count', labels, 2.0), Sample('h_sum', labels, 6.0), ] self.assertEqual(metrics['h'].samples, expected_histogram) @unittest.skipIf(sys.version_info < (2, 7), "Test requires Python 2.7+.") def test_merge_no_accumulate(self): pid = 0 core._ValueClass = core._MultiProcessValue(lambda: pid) labels = dict((i, i) for i in 'abcd') def add_label(key, value): l = labels.copy() l[key] = value return l h = Histogram('h', 'help', labelnames=labels.keys(), registry=None) h.labels(**labels).observe(1) pid = 1 h.labels(**labels).observe(5) path = os.path.join(os.environ['prometheus_multiproc_dir'], '*.db') files = glob.glob(path) metrics = dict( (m.name, m) for m in self.collector.merge(files, accumulate=False) ) metrics['h'].samples.sort( key=lambda x: (x[0], float(x[1].get('le', 0))) ) expected_histogram = [ Sample('h_bucket', add_label('le', '0.005'), 0.0), Sample('h_bucket', add_label('le', '0.01'), 0.0), Sample('h_bucket', add_label('le', '0.025'), 0.0), Sample('h_bucket', add_label('le', '0.05'), 0.0), Sample('h_bucket', add_label('le', '0.075'), 0.0), Sample('h_bucket', add_label('le', '0.1'), 0.0), Sample('h_bucket', add_label('le', '0.25'), 0.0), Sample('h_bucket', add_label('le', '0.5'), 0.0), Sample('h_bucket', add_label('le', '0.75'), 0.0), Sample('h_bucket', add_label('le', '1.0'), 1.0), Sample('h_bucket', add_label('le', '2.5'), 0.0), Sample('h_bucket', add_label('le', '5.0'), 1.0), Sample('h_bucket', add_label('le', '7.5'), 0.0), Sample('h_bucket', add_label('le', '10.0'), 0.0), Sample('h_bucket', add_label('le', '+Inf'), 0.0), Sample('h_sum', labels, 6.0), ] self.assertEqual(metrics['h'].samples, expected_histogram)