def clean_up(tmpdir, monkeypatch): """Clean up all globals. Sadly, talisker uses some global state. Namely, stdlib logging module globals and thread/greenlet locals. This fixure ensures they are all cleaned up each time. """ multiproc = tmpdir.mkdir('multiproc') monkeypatch.setenv('prometheus_multiproc_dir', str(multiproc)) yield # module/context globals talisker.util.clear_globals() # reset stdlib logging talisker.logs.reset_logging() # reset context storage talisker.context.clear() raven.context._active_contexts.__dict__.clear() talisker.logs.configure_test_logging() # clear prometheus file cache import prometheus_client.core as core # recreate class to clear cache, because cache is a closure... core._ValueClass = core._MultiProcessValue()
def clean_up(request, tmpdir, monkeypatch, config): """Clean up all globals. Sadly, talisker uses some global state. Namely, stdlib logging module globals and thread/greenlet locals. This fixure ensures they are all cleaned up each time. """ multiproc = tmpdir.mkdir('multiproc') monkeypatch.setenv('prometheus_multiproc_dir', str(multiproc)) orig_client = talisker.sentry._client yield talisker.testing.clear_all() # some tests mess with the sentry client talisker.sentry.set_client(orig_client) # reset stdlib logging talisker.logs.reset_logging() talisker.logs.configure_test_logging(logging.FileHandler('/dev/null')) try: # clear prometheus file cache import prometheus_client.core as core # recreate class to clear cache, because cache is a closure... core._ValueClass = core._MultiProcessValue() except ImportError: pass # prometheus is optional
def test_collect(self): pid = 0 core._ValueClass = core._MultiProcessValue(lambda: pid) labels = dict((i, i) for i in 'abcd') def add_label(key, value): l = labels.copy() l[key] = value return l c = Counter('c', 'help', labelnames=labels.keys(), registry=None) g = Gauge('g', 'help', labelnames=labels.keys(), registry=None) h = Histogram('h', 'help', labelnames=labels.keys(), registry=None) c.labels(**labels).inc(1) g.labels(**labels).set(1) h.labels(**labels).observe(1) pid = 1 c.labels(**labels).inc(1) g.labels(**labels).set(1) h.labels(**labels).observe(5) metrics = dict((m.name, m) for m in self.collector.collect()) self.assertEqual( metrics['c'].samples, [Sample('c_total', labels, 2.0)] ) metrics['g'].samples.sort(key=lambda x: x[1]['pid']) self.assertEqual(metrics['g'].samples, [ Sample('g', add_label('pid', '0'), 1.0), Sample('g', add_label('pid', '1'), 1.0), ]) metrics['h'].samples.sort( key=lambda x: (x[0], float(x[1].get('le', 0))) ) expected_histogram = [ Sample('h_bucket', add_label('le', '0.005'), 0.0), Sample('h_bucket', add_label('le', '0.01'), 0.0), Sample('h_bucket', add_label('le', '0.025'), 0.0), Sample('h_bucket', add_label('le', '0.05'), 0.0), Sample('h_bucket', add_label('le', '0.075'), 0.0), Sample('h_bucket', add_label('le', '0.1'), 0.0), Sample('h_bucket', add_label('le', '0.25'), 0.0), Sample('h_bucket', add_label('le', '0.5'), 0.0), Sample('h_bucket', add_label('le', '0.75'), 0.0), Sample('h_bucket', add_label('le', '1.0'), 1.0), Sample('h_bucket', add_label('le', '2.5'), 1.0), Sample('h_bucket', add_label('le', '5.0'), 2.0), Sample('h_bucket', add_label('le', '7.5'), 2.0), Sample('h_bucket', add_label('le', '10.0'), 2.0), Sample('h_bucket', add_label('le', '+Inf'), 2.0), Sample('h_count', labels, 2.0), Sample('h_sum', labels, 6.0), ] self.assertEqual(metrics['h'].samples, expected_histogram)
def test_counter_adds(self): c1 = Counter('c', 'help', registry=None) core._ValueClass = core._MultiProcessValue(lambda: 456) c2 = Counter('c', 'help', registry=None) self.assertEqual(0, self.registry.get_sample_value('c')) c1.inc(1) c2.inc(2) self.assertEqual(3, self.registry.get_sample_value('c'))
def test_gauge_max(self): g1 = Gauge('g', 'help', registry=None, multiprocess_mode='max') core._ValueClass = core._MultiProcessValue(lambda: 456) g2 = Gauge('g', 'help', registry=None, multiprocess_mode='max') self.assertEqual(0, self.registry.get_sample_value('g')) g1.set(1) g2.set(2) self.assertEqual(2, self.registry.get_sample_value('g'))
def test_summary_adds(self): s1 = Summary('s', 'help', registry=None) core._ValueClass = core._MultiProcessValue(lambda: 456) s2 = Summary('s', 'help', registry=None) self.assertEqual(0, self.registry.get_sample_value('s_count')) self.assertEqual(0, self.registry.get_sample_value('s_sum')) s1.observe(1) s2.observe(2) self.assertEqual(2, self.registry.get_sample_value('s_count')) self.assertEqual(3, self.registry.get_sample_value('s_sum'))
def test_gauge_livesum(self): g1 = Gauge('g', 'help', registry=None, multiprocess_mode='livesum') core._ValueClass = core._MultiProcessValue(lambda: 456) g2 = Gauge('g', 'help', registry=None, multiprocess_mode='livesum') self.assertEqual(0, self.registry.get_sample_value('g')) g1.set(1) g2.set(2) self.assertEqual(3, self.registry.get_sample_value('g')) mark_process_dead(123, os.environ['prometheus_multiproc_dir']) self.assertEqual(2, self.registry.get_sample_value('g'))
def test_counter_across_forks(self): pid = 0 core._ValueClass = core._MultiProcessValue(lambda: pid) c1 = Counter('c', 'help', registry=None) self.assertEqual(0, self.registry.get_sample_value('c')) c1.inc(1) c1.inc(1) pid = 1 c1.inc(1) self.assertEqual(3, self.registry.get_sample_value('c')) self.assertEqual(1, c1._value.get())
def test_gauge_all(self): g1 = Gauge('g', 'help', registry=None) core._ValueClass = core._MultiProcessValue(lambda: 456) g2 = Gauge('g', 'help', registry=None) self.assertEqual(0, self.registry.get_sample_value('g', {'pid': '123'})) self.assertEqual(0, self.registry.get_sample_value('g', {'pid': '456'})) g1.set(1) g2.set(2) mark_process_dead(123, os.environ['prometheus_multiproc_dir']) self.assertEqual(1, self.registry.get_sample_value('g', {'pid': '123'})) self.assertEqual(2, self.registry.get_sample_value('g', {'pid': '456'}))
def test_histogram_adds(self): h1 = Histogram('h', 'help', registry=None) core._ValueClass = core._MultiProcessValue(lambda: 456) h2 = Histogram('h', 'help', registry=None) self.assertEqual(0, self.registry.get_sample_value('h_count')) self.assertEqual(0, self.registry.get_sample_value('h_sum')) self.assertEqual(0, self.registry.get_sample_value('h_bucket', {'le': '5.0'})) h1.observe(1) h2.observe(2) self.assertEqual(2, self.registry.get_sample_value('h_count')) self.assertEqual(3, self.registry.get_sample_value('h_sum')) self.assertEqual(2, self.registry.get_sample_value('h_bucket', {'le': '5.0'}))
def test_initialization_detects_pid_change(self): pid = 0 core._ValueClass = core._MultiProcessValue(lambda: pid) # can not inspect the files cache directly, as it's a closure, so we # check for the actual files themselves def files(): fs = os.listdir(os.environ['prometheus_multiproc_dir']) fs.sort() return fs c1 = Counter('c1', 'c1', registry=None) self.assertEqual(files(), ['counter_0.db']) c2 = Counter('c2', 'c2', registry=None) self.assertEqual(files(), ['counter_0.db']) pid = 1 c3 = Counter('c3', 'c3', registry=None) self.assertEqual(files(), ['counter_0.db', 'counter_1.db'])
def test_merge_no_accumulate(self): pid = 0 core._ValueClass = core._MultiProcessValue(lambda: pid) labels = dict((i, i) for i in 'abcd') def add_label(key, value): l = labels.copy() l[key] = value return l h = Histogram('h', 'help', labelnames=labels.keys(), registry=None) h.labels(**labels).observe(1) pid = 1 h.labels(**labels).observe(5) path = os.path.join(os.environ['prometheus_multiproc_dir'], '*.db') files = glob.glob(path) metrics = dict( (m.name, m) for m in self.collector.merge(files, accumulate=False) ) metrics['h'].samples.sort( key=lambda x: (x[0], float(x[1].get('le', 0))) ) expected_histogram = [ Sample('h_bucket', add_label('le', '0.005'), 0.0), Sample('h_bucket', add_label('le', '0.01'), 0.0), Sample('h_bucket', add_label('le', '0.025'), 0.0), Sample('h_bucket', add_label('le', '0.05'), 0.0), Sample('h_bucket', add_label('le', '0.075'), 0.0), Sample('h_bucket', add_label('le', '0.1'), 0.0), Sample('h_bucket', add_label('le', '0.25'), 0.0), Sample('h_bucket', add_label('le', '0.5'), 0.0), Sample('h_bucket', add_label('le', '0.75'), 0.0), Sample('h_bucket', add_label('le', '1.0'), 1.0), Sample('h_bucket', add_label('le', '2.5'), 0.0), Sample('h_bucket', add_label('le', '5.0'), 1.0), Sample('h_bucket', add_label('le', '7.5'), 0.0), Sample('h_bucket', add_label('le', '10.0'), 0.0), Sample('h_bucket', add_label('le', '+Inf'), 0.0), Sample('h_sum', labels, 6.0), ] self.assertEqual(metrics['h'].samples, expected_histogram)
def start_process(respository, kafka_config): core._ValueClass = core._MultiProcessValue() LOG.info("start process: {}".format(respository)) m_persister = persister.Persister(kafka_config, cfg.CONF.zookeeper, respository) m_persister.run()
def test_prometheus_cleanup(registry): pid = 1 def getpid(): return pid # override use of os.getpid. _ValueClass is recreated after every test, # so we don't need to clean up from prometheus_client import core core._ValueClass = core._MultiProcessValue(getpid) histogram = metrics.Histogram( name='histogram', documentation='test histogram', labelnames=['foo', 'bar', 'baz'], statsd='{name}.{label}', registry=registry, ) counter = metrics.Counter( name='counter', documentation='test counter', labelnames=['foo', 'bar', 'baz'], statsd='{name}.{label}', registry=registry, ) from prometheus_client.multiprocess import MultiProcessCollector collector = MultiProcessCollector(registry) labels = {'foo': 'foo', 'bar': 'bar', 'baz': 'baz'} def collect(): return {m.name: m for m in collector.collect()} def files(): return list(sorted(os.listdir(os.environ['prometheus_multiproc_dir']))) counter.inc(1, **labels) histogram.observe(0.5, **labels) histogram.observe(2.5, **labels) assert files() == [ 'counter_1.db', 'histogram_1.db', ] before = collect() metrics.prometheus_cleanup_worker(pid) after = collect() assert files() == [ 'counter_archive.db', 'histogram_archive.db', ] assert before == after # magic! pid += 1 # new worker, create some new metrics, check they are all combined counter.inc(2, **labels) histogram.observe(0.5, **labels) histogram.observe(2.5, **labels) later = collect() assert files() == [ 'counter_2.db', 'counter_archive.db', 'histogram_2.db', 'histogram_archive.db', ] # check counter is correct assert later['counter'].samples == [ Sample(counter_name('counter_total'), labels, 3.0), ] expected_histogram = [ Sample('histogram_bucket', dict(le='0.005', **labels), 0.0), Sample('histogram_bucket', dict(le='0.01', **labels), 0.0), Sample('histogram_bucket', dict(le='0.025', **labels), 0.0), Sample('histogram_bucket', dict(le='0.05', **labels), 0.0), Sample('histogram_bucket', dict(le='0.075', **labels), 0.0), Sample('histogram_bucket', dict(le='0.1', **labels), 0.0), Sample('histogram_bucket', dict(le='0.25', **labels), 0.0), Sample('histogram_bucket', dict(le='0.5', **labels), 2.0), Sample('histogram_bucket', dict(le='0.75', **labels), 2.0), Sample('histogram_bucket', dict(le='1.0', **labels), 2.0), Sample('histogram_bucket', dict(le='2.5', **labels), 4.0), Sample('histogram_bucket', dict(le='5.0', **labels), 4.0), Sample('histogram_bucket', dict(le='7.5', **labels), 4.0), Sample('histogram_bucket', dict(le='10.0', **labels), 4.0), Sample('histogram_bucket', dict(le='+Inf', **labels), 4.0), Sample('histogram_count', labels, 4.0), Sample('histogram_sum', labels, 6.0), ] # check histogram is correct later['histogram'].samples.sort(key=metrics.histogram_sorter) assert later['histogram'].samples == expected_histogram # check the final files produce the correct numbers metrics.prometheus_cleanup_worker(pid) final = collect() assert files() == [ 'counter_archive.db', 'histogram_archive.db', ] final['histogram'].samples.sort(key=metrics.histogram_sorter) assert later == final
def setUp(self): self.tempdir = tempfile.mkdtemp() os.environ['prometheus_multiproc_dir'] = self.tempdir core._ValueClass = core._MultiProcessValue(lambda: 123) self.registry = CollectorRegistry() MultiProcessCollector(self.registry, self.tempdir)