def test_serves_metrics(self): labels = dict((i, i) for i in 'abcd') c = Counter('c', 'help', labelnames=labels.keys(), registry=None) c.labels(**labels).inc(1) self.assertEqual(None, self.registry.get_sample_value('c_total', labels)) archive_metrics() self.assertEqual(self.collector.collect()[0].samples, [Sample('c_total', labels, 1.0)])
def test_gauge_all(self): values.ValueClass = MultiProcessValue(lambda: 123) g1 = Gauge('g', 'help', registry=None, multiprocess_mode='all') values.ValueClass = MultiProcessValue(lambda: 456) g2 = Gauge('g', 'help', registry=None, multiprocess_mode='all') self.assertEqual(0, self.registry.get_sample_value('g', {'pid': '123'})) self.assertEqual(0, self.registry.get_sample_value('g', {'pid': '456'})) g1.set(1) g2.set(2) archive_metrics() mark_process_dead(123, os.environ['prometheus_multiproc_dir']) self.assertEqual(1, self.registry.get_sample_value('g', {'pid': '123'})) self.assertEqual(2, self.registry.get_sample_value('g', {'pid': '456'}))
def test_gauge_latest(self): self.assertEqual(None, self.registry.get_sample_value('g')) g1 = Gauge('g', 'G', registry=None, multiprocess_mode=Gauge.LATEST) g1.set(0) self.assertEqual(0, self.registry.get_sample_value('g')) g1.set(123) self.assertEqual(123, self.registry.get_sample_value('g')) t0 = time.time() g1.set(1, timestamp=t0) self.assertEqual(1, self.registry.get_sample_value('g')) archive_metrics() self.assertEqual(1, self.registry.get_sample_value('g')) values.ValueClass = MultiProcessValue(lambda: '456789') g2 = Gauge('g', 'G', registry=None, multiprocess_mode=Gauge.LATEST) t1 = t0 - time.time() g2.set(2, timestamp=t1) self.assertEqual(1, self.registry.get_sample_value('g')) archive_metrics() self.assertEqual(1, self.registry.get_sample_value('g'))
def test_aggregates_live_and_archived_metrics(self): pid = 456 values.ValueClass = MultiProcessValue(lambda: pid) def files(): fs = os.listdir(os.environ['prometheus_multiproc_dir']) fs.sort() return fs c1 = Counter('c1', 'c1', registry=None) c1.inc(1) self.assertIn('counter_456.db', files()) archive_metrics() self.assertNotIn('counter_456.db', files()) self.assertEqual(1, self.registry.get_sample_value('c1_total')) pid = 789 values.ValueClass = MultiProcessValue(lambda: pid) c1 = Counter('c1', 'c1', registry=None) c1.inc(2) g1 = Gauge('g1', 'g1', registry=None, multiprocess_mode="liveall") g1.set(5) self.assertIn('counter_789.db', files()) # Pretend that pid 789 is live archive_metrics(aggregate_only=True) # The live counter should be merged with the archived counter, and the # liveall gauge should be included self.assertIn('counter_789.db', files()) self.assertIn('gauge_liveall_789.db', files()) self.assertEqual(3, self.registry.get_sample_value('c1_total')) self.assertEqual( 5, self.registry.get_sample_value('g1', labels={u'pid': u'789'})) # Now pid 789 is dead archive_metrics() # The formerly live counter's value should be archived, and the # liveall gauge should be removed completely self.assertNotIn('counter_789.db', files()) self.assertNotIn('gauge_liveall_789.db', files()) self.assertEqual(3, self.registry.get_sample_value('c1_total')) self.assertEqual( None, self.registry.get_sample_value('g1', labels={u'pid': u'789'}))
def test_exceptions_release_lock(self): with self.assertRaises(ValueError): with advisory_lock(LOCK_EX): raise ValueError # Do an operation which requires acquiring the lock archive_metrics(blocking=False)
def test_cleanup_waits_for_collectors(self): # IOError in python2, OSError in python3 with self.assertRaises(EnvironmentError): with advisory_lock(LOCK_SH): archive_metrics(blocking=False)