class TestInMemoryCollector(unittest.TestCase): def setUp(self): self.tempdir = tempfile.mkdtemp() os.environ['prometheus_multiproc_dir'] = self.tempdir values.ValueClass = MultiProcessValue(lambda: 123) self.registry = CollectorRegistry() self.collector = InMemoryCollector(self.registry) def tearDown(self): del os.environ['prometheus_multiproc_dir'] shutil.rmtree(self.tempdir) values.ValueClass = MutexValue prometheus_client.multiprocess._metrics_cache = prometheus_client.multiprocess.MetricsCache( ) def test_serves_empty_metrics_if_no_metrics_written(self): self.assertEqual(self.collector.collect(), []) def test_serves_empty_metrics_if_not_processed(self): c1 = Counter('c', 'help', registry=None) # The cleanup/archiver task hasn't run yet, no metrics self.assertEqual(None, self.registry.get_sample_value('c_total')) c1.inc(1) # Still no metrics self.assertEqual(self.collector.collect(), []) def test_serves_metrics(self): labels = dict((i, i) for i in 'abcd') c = Counter('c', 'help', labelnames=labels.keys(), registry=None) c.labels(**labels).inc(1) self.assertEqual(None, self.registry.get_sample_value('c_total', labels)) archive_metrics() self.assertEqual(self.collector.collect()[0].samples, [Sample('c_total', labels, 1.0)]) def test_displays_archive_stats(self): output = generate_latest(self.registry) self.assertIn("archive_duration_seconds", output) def test_aggregates_live_and_archived_metrics(self): pid = 456 values.ValueClass = MultiProcessValue(lambda: pid) def files(): fs = os.listdir(os.environ['prometheus_multiproc_dir']) fs.sort() return fs c1 = Counter('c1', 'c1', registry=None) c1.inc(1) self.assertIn('counter_456.db', files()) archive_metrics() self.assertNotIn('counter_456.db', files()) self.assertEqual(1, self.registry.get_sample_value('c1_total')) pid = 789 values.ValueClass = MultiProcessValue(lambda: pid) c1 = Counter('c1', 'c1', registry=None) c1.inc(2) g1 = Gauge('g1', 'g1', registry=None, multiprocess_mode="liveall") g1.set(5) self.assertIn('counter_789.db', files()) # Pretend that pid 789 is live archive_metrics(aggregate_only=True) # The live counter should be merged with the archived counter, and the # liveall gauge should be included self.assertIn('counter_789.db', files()) self.assertIn('gauge_liveall_789.db', files()) self.assertEqual(3, self.registry.get_sample_value('c1_total')) self.assertEqual( 5, self.registry.get_sample_value('g1', labels={u'pid': u'789'})) # Now pid 789 is dead archive_metrics() # The formerly live counter's value should be archived, and the # liveall gauge should be removed completely self.assertNotIn('counter_789.db', files()) self.assertNotIn('gauge_liveall_789.db', files()) self.assertEqual(3, self.registry.get_sample_value('c1_total')) self.assertEqual( None, self.registry.get_sample_value('g1', labels={u'pid': u'789'}))
def test_reset_registry(self): registry = CollectorRegistry() gauge = Gauge('g', 'help', registry=registry) gauge.inc() self.assertEqual(1, registry.get_sample_value('g')) counter = Counter('c_total', 'help', registry=registry) counter.inc() self.assertEqual(1, registry.get_sample_value('c_total')) summary = Summary('s', 'help', registry=registry) summary.observe(10) self.assertEqual(1, registry.get_sample_value('s_count')) self.assertEqual(10, registry.get_sample_value('s_sum')) histogram = Histogram('h', 'help', registry=registry) histogram.observe(2) self.assertEqual(0, registry.get_sample_value('h_bucket', {'le': '1.0'})) self.assertEqual(1, registry.get_sample_value('h_bucket', {'le': '2.5'})) self.assertEqual(1, registry.get_sample_value('h_bucket', {'le': '5.0'})) self.assertEqual(1, registry.get_sample_value('h_bucket', {'le': '+Inf'})) self.assertEqual(1, registry.get_sample_value('h_count')) self.assertEqual(2, registry.get_sample_value('h_sum')) registry.reset() self.assertEqual(0, registry.get_sample_value('g')) self.assertEqual(0, registry.get_sample_value('c_total')) self.assertEqual(0, registry.get_sample_value('s_count')) self.assertEqual(0, registry.get_sample_value('s_sum')) self.assertEqual(0, registry.get_sample_value('h_bucket', {'le': '1.0'})) self.assertEqual(0, registry.get_sample_value('h_bucket', {'le': '2.5'})) self.assertEqual(0, registry.get_sample_value('h_bucket', {'le': '5.0'})) self.assertEqual(0, registry.get_sample_value('h_bucket', {'le': '+Inf'})) self.assertEqual(0, registry.get_sample_value('h_count')) self.assertEqual(0, registry.get_sample_value('h_sum')) # -------------------------- gauge.inc() gauge.inc() gauge.inc() gauge.dec() self.assertEqual(2, registry.get_sample_value('g')) counter.inc() counter.inc() counter.inc() self.assertEqual(3, registry.get_sample_value('c_total')) summary.observe(10) summary.observe(5) self.assertEqual(2, registry.get_sample_value('s_count')) self.assertEqual(15, registry.get_sample_value('s_sum')) histogram.observe(2) histogram.observe(6) histogram.observe(1) self.assertEqual(1, registry.get_sample_value('h_bucket', {'le': '1.0'})) self.assertEqual(2, registry.get_sample_value('h_bucket', {'le': '2.5'})) self.assertEqual(2, registry.get_sample_value('h_bucket', {'le': '5.0'})) self.assertEqual(3, registry.get_sample_value('h_bucket', {'le': '+Inf'})) self.assertEqual(3, registry.get_sample_value('h_count')) self.assertEqual(9, registry.get_sample_value('h_sum')) registry.reset() self.assertEqual(0, registry.get_sample_value('g')) self.assertEqual(0, registry.get_sample_value('c_total')) self.assertEqual(0, registry.get_sample_value('s_count')) self.assertEqual(0, registry.get_sample_value('s_sum')) self.assertEqual(0, registry.get_sample_value('h_bucket', {'le': '1.0'})) self.assertEqual(0, registry.get_sample_value('h_bucket', {'le': '2.5'})) self.assertEqual(0, registry.get_sample_value('h_bucket', {'le': '5.0'})) self.assertEqual(0, registry.get_sample_value('h_bucket', {'le': '+Inf'})) self.assertEqual(0, registry.get_sample_value('h_count')) self.assertEqual(0, registry.get_sample_value('h_sum'))
class RQCollectorTestCase(unittest.TestCase): """Tests for the `RQCollector` class.""" summary_metric = 'rq_request_processing_seconds' workers_metric = 'rq_workers' jobs_metric = 'rq_jobs' def setUp(self): """Prepare for the tests. The summary metric used to track the count and time in the `RQCollector.collect` method is automatically registered on the global REGISTRY. """ # Create a registry for testing to replace the global REGISTRY self.registry = CollectorRegistry(auto_describe=True) # Default Summary class arguments values default_args = Summary.__init__.__defaults__ # Create a similar default values tuple and replace the default `registry` argument with a mock # Mocking `prometheus_client.metrics.REGISTRY` doesn't work as expected because default arguments # are evaluated at definition time new_default_args = tuple(self.registry if isinstance(arg, CollectorRegistry) else arg for arg in default_args) # Patch the default Summary class arguments patch('prometheus_client.metrics.Summary.__init__.__defaults__', new_default_args).start() # On cleanup call patch.stopall self.addCleanup(patch.stopall) def test_multiple_instances_raise_ValueError(self, get_workers_stats, get_jobs_by_queue): """Creating multiple instances of `RQCollector` registers duplicate summary metric in the registry.""" RQCollector() with self.assertRaises(ValueError) as error: RQCollector() self.assertTrue('Duplicated timeseries in CollectorRegistry' in str(error.exception)) def test_summary_metric(self, get_workers_stats, get_jobs_by_queue): """Test the summary metric that tracks the requests count and time.""" collector = RQCollector() # Initial values before calling the `collect` method self.assertEqual(0, self.registry.get_sample_value(f'{self.summary_metric}_count')) self.assertEqual(0, self.registry.get_sample_value(f'{self.summary_metric}_sum')) # The `collect` method is a generator # Exhaust the generator to get the recorded samples list(collector.collect()) self.assertEqual(1, self.registry.get_sample_value(f'{self.summary_metric}_count')) self.assertTrue(self.registry.get_sample_value(f'{self.summary_metric}_sum') > 0) def test_passed_connection_is_used(self, get_workers_stats, get_jobs_by_queue): """Test that the connection passed to `RQCollector` is used to get the workers and jobs.""" get_workers_stats.return_value = [] get_jobs_by_queue.return_value = {} connection = Mock() collector = RQCollector(connection) with patch('rq_exporter.collector.Connection') as Connection: list(collector.collect()) Connection.assert_called_once_with(connection) get_workers_stats.assert_called_once_with() get_jobs_by_queue.assert_called_once_with() def test_metrics_with_empty_data(self, get_workers_stats, get_jobs_by_queue): """Test the workers and jobs metrics when there's no data.""" get_workers_stats.return_value = [] get_jobs_by_queue.return_value = {} self.registry.register(RQCollector()) self.assertEqual(None, self.registry.get_sample_value(self.workers_metric)) self.assertEqual(None, self.registry.get_sample_value(self.jobs_metric)) def test_metrics_with_data(self, get_workers_stats, get_jobs_by_queue): """Test the workers and jobs metrics when there is data available.""" workers = [ { 'name': 'worker_one', 'queues': ['default'], 'state': 'idle' }, { 'name': 'worker_two', 'queues': ['high', 'default', 'low'], 'state': 'busy' } ] jobs_by_queue = { 'default': { JobStatus.QUEUED: 2, JobStatus.STARTED: 3, JobStatus.FINISHED: 15, JobStatus.FAILED: 5, JobStatus.DEFERRED: 1, JobStatus.SCHEDULED: 4 }, 'high': { JobStatus.QUEUED: 10, JobStatus.STARTED: 4, JobStatus.FINISHED: 25, JobStatus.FAILED: 22, JobStatus.DEFERRED: 5, JobStatus.SCHEDULED: 1 } } get_workers_stats.return_value = workers get_jobs_by_queue.return_value = jobs_by_queue # On registration the `collect` method is called self.registry.register(RQCollector()) get_workers_stats.assert_called_once_with() get_jobs_by_queue.assert_called_once_with() for w in workers: self.assertEqual(1, self.registry.get_sample_value( self.workers_metric, { 'name': w['name'], 'state': w['state'], 'queues': ','.join(w['queues']) } ) ) for (queue, jobs) in jobs_by_queue.items(): for (status, value) in jobs.items(): self.assertEqual( value, self.registry.get_sample_value( self.jobs_metric, {'queue': queue, 'status': status} ) )