def test_one_summary_format_with_const_labels(self): data = { 'name': "logged_users_total", 'doc': "Logged users in the application", 'const_labels': { "app": "my_app" }, } labels = {'handler': '/static'} values = [3, 5.2, 13, 4] s = Summary(**data) for i in values: s.add(labels, i) tmp_valid_data = [ (labels, { 0.5: 4.0, 0.9: 5.2, 0.99: 5.2, "sum": 25.2, "count": 4 }), ] valid_result = self._create_protobuf_object(data, tmp_valid_data, pmp.SUMMARY, data['const_labels']) f = BinaryFormatter() result = f.marshall_collector(s) self.assertTrue(self._protobuf_metric_equal(valid_result, result))
def test_registry_marshall_summary(self): metric_name = "summary_test" metric_help = "A summary." # metric_data = ( # ({'s_sample': '1', 's_subsample': 'b'}, # {0.5: 4235.0, 0.9: 4470.0, 0.99: 4517.0, 'count': 22, 'sum': 98857.0}), # ) summary_data = (({"s_sample": "1", "s_subsample": "b"}, range(4000, 5000, 47)),) summary = Summary(metric_name, metric_help, const_labels={"type": "summary"}) for labels, values in summary_data: for v in values: summary.add(labels, v) registry = Registry() registry.register(summary) valid_result = ( b"\x99\x01\n\x0csummary_test\x12\nA summary." b'\x18\x02"{\n\r\n\x08s_sample\x12\x011\n\x10\n' b"\x0bs_subsample\x12\x01b\n\x0f\n\x04type\x12\x07" b'summary"G\x08\x16\x11\x00\x00\x00\x00\x90"\xf8@' b"\x1a\x12\t\x00\x00\x00\x00\x00\x00\xe0?\x11\x00" b"\x00\x00\x00\x00\x8b\xb0@\x1a\x12\t\xcd\xcc\xcc" b"\xcc\xcc\xcc\xec?\x11\x00\x00\x00\x00\x00v\xb1@" b"\x1a\x12\t\xaeG\xe1z\x14\xae\xef?\x11\x00\x00\x00" b"\x00\x00\xa5\xb1@" ) f = binary.BinaryFormatter() self.assertEqual(valid_result, f.marshall(registry))
def test_registry_marshall_summary(self): format_times = 10 summary_data = (({ 's_sample': '1', 's_subsample': 'b' }, range(4000, 5000, 47)), ) registry = Registry() summary = Summary("summary_test", "A summary.", {'type': "summary"}) # Add data [summary.add(i[0], s) for i in summary_data for s in i[1]] registry.register(summary) valid_result = (b'\x99\x01\n\x0csummary_test\x12\nA summary.' b'\x18\x02"{\n\r\n\x08s_sample\x12\x011\n\x10\n' b'\x0bs_subsample\x12\x01b\n\x0f\n\x04type\x12\x07' b'summary"G\x08\x16\x11\x00\x00\x00\x00\x90"\xf8@' b'\x1a\x12\t\x00\x00\x00\x00\x00\x00\xe0?\x11\x00' b'\x00\x00\x00\x00\x8b\xb0@\x1a\x12\t\xcd\xcc\xcc' b'\xcc\xcc\xcc\xec?\x11\x00\x00\x00\x00\x00v\xb1@' b'\x1a\x12\t\xaeG\xe1z\x14\xae\xef?\x11\x00\x00\x00' b'\x00\x00\xa5\xb1@') f = BinaryFormatter() # Check multiple times to ensure multiple marshalling requests for i in range(format_times): self.assertEqual(valid_result, f.marshall(registry))
def test_summary_format_text(self): data = { "name": "prometheus_target_interval_length_seconds", "doc": "Actual intervals between scrapes.", "const_labels": {}, } labels = {"interval": "5s"} values = [3, 5.2, 13, 4] valid_result = """# HELP prometheus_target_interval_length_seconds Actual intervals between scrapes. # TYPE prometheus_target_interval_length_seconds summary prometheus_target_interval_length_seconds{interval="5s",quantile="0.5"} 4.0 prometheus_target_interval_length_seconds{interval="5s",quantile="0.9"} 5.2 prometheus_target_interval_length_seconds{interval="5s",quantile="0.99"} 5.2 prometheus_target_interval_length_seconds_count{interval="5s"} 4 prometheus_target_interval_length_seconds_sum{interval="5s"} 25.2""" s = Summary(**data) for i in values: s.add(labels, i) f = text.TextFormatter() result = f.marshall_collector(s) self.assertEqual(valid_result, result)
def test_single_summary_format(self): data = { "name": "logged_users_total", "doc": "Logged users in the application", "const_labels": {}, } labels = {} # This one hasn't got labels values = [3, 5.2, 13, 4] valid_result = ( "# HELP logged_users_total Logged users in the application", "# TYPE logged_users_total summary", 'logged_users_total{quantile="0.5"} 4.0', 'logged_users_total{quantile="0.9"} 5.2', 'logged_users_total{quantile="0.99"} 5.2', "logged_users_total_count 4", "logged_users_total_sum 25.2", ) s = Summary(**data) for i in values: s.add(labels, i) f = text.TextFormatter() result = f.marshall_lines(s) result = sorted(result) valid_result = sorted(valid_result) self.assertEqual(valid_result, result)
def test_summary_format_timestamp(self): data = { "name": "prometheus_target_interval_length_seconds", "doc": "Actual intervals between scrapes.", "const_labels": {}, } labels = {"interval": "5s"} values = [3, 5.2, 13, 4] result_regex = r"""# HELP prometheus_target_interval_length_seconds Actual intervals between scrapes. # TYPE prometheus_target_interval_length_seconds summary prometheus_target_interval_length_seconds{interval="5s",quantile="0.5"} 4.0 \d*(?:.\d*)? prometheus_target_interval_length_seconds{interval="5s",quantile="0.9"} 5.2 \d*(?:.\d*)? prometheus_target_interval_length_seconds{interval="5s",quantile="0.99"} 5.2 \d*(?:.\d*)? prometheus_target_interval_length_seconds_count{interval="5s"} 4 \d*(?:.\d*)? prometheus_target_interval_length_seconds_sum{interval="5s"} 25.2 \d*(?:.\d*)?$""" s = Summary(**data) for i in values: s.add(labels, i) f = TextFormatter(True) result = f.marshall_collector(s) self.assertTrue(re.match(result_regex, result))
async def test_summary(self): # Add some metrics data = [3, 5.2, 13, 4] label = {'data': 1} s = Summary("test_summary", "Test Summary.", {'test': "test_summary"}) self.registry.register(s) for i in data: s.add(label, i) expected_data = """# HELP test_summary Test Summary. # TYPE test_summary summary test_summary_count{data="1",test="test_summary"} 4 test_summary_sum{data="1",test="test_summary"} 25.2 test_summary{data="1",quantile="0.5",test="test_summary"} 4.0 test_summary{data="1",quantile="0.9",test="test_summary"} 5.2 test_summary{data="1",quantile="0.99",test="test_summary"} 5.2 """ with aiohttp.ClientSession(loop=self.loop) as session: headers = {ACCEPT: 'text/plain; version=0.0.4'} async with session.get(self.metrics_url, headers=headers) as resp: assert resp.status == 200 content = await resp.read() self.assertEqual("text/plain; version=0.0.4; charset=utf-8", resp.headers.get(CONTENT_TYPE)) self.assertEqual(200, resp.status) self.assertEqual(expected_data, content.decode())
def test_single_summary_format(self): data = { "name": "logged_users_total", "doc": "Logged users in the application", "const_labels": {}, } labels = {} # This one hasn't got labels values = [3, 5.2, 13, 4] valid_result = ( "# HELP logged_users_total Logged users in the application", "# TYPE logged_users_total summary", 'logged_users_total{quantile="0.5"} 4.0', 'logged_users_total{quantile="0.9"} 5.2', 'logged_users_total{quantile="0.99"} 5.2', "logged_users_total_count 4", "logged_users_total_sum 25.2", ) s = Summary(**data) for i in values: s.add(labels, i) f = TextFormatter() result = f.marshall_lines(s) result = sorted(result) valid_result = sorted(valid_result) self.assertEqual(valid_result, result)
def test_one_summary_format(self): data = { "name": "logged_users_total", "doc": "Logged users in the application", "const_labels": {}, } labels = {"handler": "/static"} values = [3, 5.2, 13, 4] valid_result = ( "# HELP logged_users_total Logged users in the application", "# TYPE logged_users_total summary", 'logged_users_total{handler="/static",quantile="0.5"} 4.0', 'logged_users_total{handler="/static",quantile="0.9"} 5.2', 'logged_users_total{handler="/static",quantile="0.99"} 5.2', 'logged_users_total_count{handler="/static"} 4', 'logged_users_total_sum{handler="/static"} 25.2', ) s = Summary(**data) for i in values: s.add(labels, i) f = text.TextFormatter() result = f.marshall_lines(s) result = sorted(result) valid_result = sorted(valid_result) self.assertEqual(valid_result, result)
def test_summary_format_text(self): data = { "name": "prometheus_target_interval_length_seconds", "doc": "Actual intervals between scrapes.", "const_labels": {}, } labels = {"interval": "5s"} values = [3, 5.2, 13, 4] valid_result = """# HELP prometheus_target_interval_length_seconds Actual intervals between scrapes. # TYPE prometheus_target_interval_length_seconds summary prometheus_target_interval_length_seconds{interval="5s",quantile="0.5"} 4.0 prometheus_target_interval_length_seconds{interval="5s",quantile="0.9"} 5.2 prometheus_target_interval_length_seconds{interval="5s",quantile="0.99"} 5.2 prometheus_target_interval_length_seconds_count{interval="5s"} 4 prometheus_target_interval_length_seconds_sum{interval="5s"} 25.2""" s = Summary(**data) for i in values: s.add(labels, i) f = TextFormatter() result = f.marshall_collector(s) self.assertEqual(valid_result, result)
def test_one_summary_format(self): data = { "name": "logged_users_total", "doc": "Logged users in the application", "const_labels": {}, } labels = {"handler": "/static"} values = [3, 5.2, 13, 4] valid_result = ( "# HELP logged_users_total Logged users in the application", "# TYPE logged_users_total summary", 'logged_users_total{handler="/static",quantile="0.5"} 4.0', 'logged_users_total{handler="/static",quantile="0.9"} 5.2', 'logged_users_total{handler="/static",quantile="0.99"} 5.2', 'logged_users_total_count{handler="/static"} 4', 'logged_users_total_sum{handler="/static"} 25.2', ) s = Summary(**data) for i in values: s.add(labels, i) f = TextFormatter() result = f.marshall_lines(s) result = sorted(result) valid_result = sorted(valid_result) self.assertEqual(valid_result, result)
def test_summary_format_timestamp(self): data = { "name": "prometheus_target_interval_length_seconds", "doc": "Actual intervals between scrapes.", "const_labels": {}, } labels = {"interval": "5s"} values = [3, 5.2, 13, 4] result_regex = r"""# HELP prometheus_target_interval_length_seconds Actual intervals between scrapes. # TYPE prometheus_target_interval_length_seconds summary prometheus_target_interval_length_seconds{interval="5s",quantile="0.5"} 4.0 \d*(?:.\d*)? prometheus_target_interval_length_seconds{interval="5s",quantile="0.9"} 5.2 \d*(?:.\d*)? prometheus_target_interval_length_seconds{interval="5s",quantile="0.99"} 5.2 \d*(?:.\d*)? prometheus_target_interval_length_seconds_count{interval="5s"} 4 \d*(?:.\d*)? prometheus_target_interval_length_seconds_sum{interval="5s"} 25.2 \d*(?:.\d*)?$""" s = Summary(**data) for i in values: s.add(labels, i) f = text.TextFormatter(True) result = f.marshall_collector(s) self.assertTrue(re.match(result_regex, result))
def setUp(self): self.data = { 'name': "http_request_duration_microseconds", 'doc': "Request duration per application", 'const_labels': {"app": "my_app"}, } self.s = Summary(**self.data)
class TestSummary(unittest.TestCase): def setUp(self): self.data = { "name": "http_request_duration_microseconds", "doc": "Request duration per application", "const_labels": {"app": "my_app"}, } self.s = Summary(**self.data) def test_add(self): data = ( {"labels": {"handler": "/static"}, "values": range(0, 500, 50)}, {"labels": {"handler": "/p"}, "values": range(0, 1000, 100)}, {"labels": {"handler": "/p/login"}, "values": range(0, 10000, 1000)}, ) for i in data: for j in i["values"]: self.s.add(i["labels"], j) for i in data: self.assertEqual(len(i["values"]), self.s.values[i["labels"]]._observations) def test_get(self): labels = {"handler": "/static"} values = [3, 5.2, 13, 4] for i in values: self.s.add(labels, i) data = self.s.get(labels) correct_data = {"sum": 25.2, "count": 4, 0.50: 4.0, 0.90: 5.2, 0.99: 5.2} self.assertEqual(correct_data, data) def test_add_get_without_labels(self): labels = None values = [3, 5.2, 13, 4] for i in values: self.s.add(labels, i) self.assertEqual(1, len(self.s.values)) correct_data = {"sum": 25.2, "count": 4, 0.50: 4.0, 0.90: 5.2, 0.99: 5.2} self.assertEqual(correct_data, self.s.get(labels)) def test_add_wrong_types(self): labels = None values = ["3", (1, 2), {"1": 2}, True] for i in values: with self.assertRaises(TypeError) as context: self.s.add(labels, i) self.assertEqual( "Summary only works with digits (int, float)", str(context.exception) )
def setUp(self): self.data = { "name": "http_request_duration_microseconds", "doc": "Request duration per application", "const_labels": { "app": "my_app" }, } self.s = Summary(**self.data)
async def test_summary(self): """ check summary metric export """ # Add some metrics data = [3, 5.2, 13, 4] label = {"data": 1} s = Summary("test_summary", "Test Summary.", {"test": "test_summary"}) self.server.register(s) for i in data: s.add(label, i) expected_data = """# HELP test_summary Test Summary. # TYPE test_summary summary test_summary{data="1",quantile="0.5",test="test_summary"} 4.0 test_summary{data="1",quantile="0.9",test="test_summary"} 5.2 test_summary{data="1",quantile="0.99",test="test_summary"} 5.2 test_summary_count{data="1",test="test_summary"} 4 test_summary_sum{data="1",test="test_summary"} 25.2 """ async with aiohttp.ClientSession() as session: # Fetch as text async with session.get(self.metrics_url, headers={ACCEPT: text.TEXT_CONTENT_TYPE}) as resp: self.assertEqual(resp.status, 200) content = await resp.read() self.assertEqual(text.TEXT_CONTENT_TYPE, resp.headers.get(CONTENT_TYPE)) self.assertEqual(expected_data, content.decode()) # Fetch as binary async with session.get(self.metrics_url, headers={ ACCEPT: binary.BINARY_CONTENT_TYPE }) as resp: self.assertEqual(resp.status, 200) content = await resp.read() self.assertEqual(binary.BINARY_CONTENT_TYPE, resp.headers.get(CONTENT_TYPE)) metrics = pmp.decode(content) self.assertEqual(len(metrics), 1) mf = metrics[0] self.assertIsInstance(mf, pmp.MetricFamily) self.assertEqual(mf.type, pmp.SUMMARY) self.assertEqual(len(mf.metric), 1) self.assertEqual(len(mf.metric[0].summary.quantile), 3)
def test_multiple_summary_format(self): data = { "name": "prometheus_target_interval_length_seconds", "doc": "Actual intervals between scrapes.", "const_labels": {}, } summary_data = ( ({ "interval": "5s" }, [3, 5.2, 13, 4]), ({ "interval": "10s" }, [1.3, 1.2, 32.1, 59.2, 109.46, 70.9]), ({ "interval": "10s", "method": "fast" }, [5, 9.8, 31, 9.7, 101.4]), ) valid_result = ( "# HELP prometheus_target_interval_length_seconds Actual intervals between scrapes.", "# TYPE prometheus_target_interval_length_seconds summary", 'prometheus_target_interval_length_seconds{interval="5s",quantile="0.5"} 4.0', 'prometheus_target_interval_length_seconds{interval="5s",quantile="0.9"} 5.2', 'prometheus_target_interval_length_seconds{interval="5s",quantile="0.99"} 5.2', 'prometheus_target_interval_length_seconds_count{interval="5s"} 4', 'prometheus_target_interval_length_seconds_sum{interval="5s"} 25.2', 'prometheus_target_interval_length_seconds{interval="10s",quantile="0.5"} 32.1', 'prometheus_target_interval_length_seconds{interval="10s",quantile="0.9"} 59.2', 'prometheus_target_interval_length_seconds{interval="10s",quantile="0.99"} 59.2', 'prometheus_target_interval_length_seconds_count{interval="10s"} 6', 'prometheus_target_interval_length_seconds_sum{interval="10s"} 274.15999999999997', 'prometheus_target_interval_length_seconds{interval="10s",method="fast",quantile="0.5"} 9.7', 'prometheus_target_interval_length_seconds{interval="10s",method="fast",quantile="0.9"} 9.8', 'prometheus_target_interval_length_seconds{interval="10s",method="fast",quantile="0.99"} 9.8', 'prometheus_target_interval_length_seconds_count{interval="10s",method="fast"} 5', 'prometheus_target_interval_length_seconds_sum{interval="10s",method="fast"} 156.9', ) s = Summary(**data) for i in summary_data: for j in i[1]: s.add(i[0], j) f = text.TextFormatter() result = f.marshall_lines(s) self.assertEqual(sorted(valid_result), sorted(result))
async def test_summary(self): """ check summary metric export """ # Add some metrics data = [3, 5.2, 13, 4] label = {"data": 1} s = Summary("test_summary", "Test Summary.", {"test": "test_summary"}) self.server.register(s) for i in data: s.add(label, i) expected_data = """# HELP test_summary Test Summary. # TYPE test_summary summary test_summary{data="1",quantile="0.5",test="test_summary"} 4.0 test_summary{data="1",quantile="0.9",test="test_summary"} 5.2 test_summary{data="1",quantile="0.99",test="test_summary"} 5.2 test_summary_count{data="1",test="test_summary"} 4 test_summary_sum{data="1",test="test_summary"} 25.2 """ async with aiohttp.ClientSession() as session: # Fetch as text async with session.get( self.metrics_url, headers={ACCEPT: TEXT_CONTENT_TYPE} ) as resp: self.assertEqual(resp.status, 200) content = await resp.read() self.assertEqual(TEXT_CONTENT_TYPE, resp.headers.get(CONTENT_TYPE)) self.assertEqual(expected_data, content.decode()) # Fetch as binary async with session.get( self.metrics_url, headers={ACCEPT: BINARY_CONTENT_TYPE} ) as resp: self.assertEqual(resp.status, 200) content = await resp.read() self.assertEqual(BINARY_CONTENT_TYPE, resp.headers.get(CONTENT_TYPE)) metrics = pmp.decode(content) self.assertEqual(len(metrics), 1) mf = metrics[0] self.assertIsInstance(mf, pmp.MetricFamily) self.assertEqual(mf.type, pmp.SUMMARY) self.assertEqual(len(mf.metric), 1) self.assertEqual(len(mf.metric[0].summary.quantile), 3)
def setUp(self): self.data = { "name": "http_request_duration_microseconds", "doc": "Request duration per application", "const_labels": {"app": "my_app"}, } self.s = Summary(**self.data)
def test_multiple_summary_format(self): data = { "name": "prometheus_target_interval_length_seconds", "doc": "Actual intervals between scrapes.", "const_labels": {}, } summary_data = ( ({"interval": "5s"}, [3, 5.2, 13, 4]), ({"interval": "10s"}, [1.3, 1.2, 32.1, 59.2, 109.46, 70.9]), ({"interval": "10s", "method": "fast"}, [5, 9.8, 31, 9.7, 101.4]), ) valid_result = ( "# HELP prometheus_target_interval_length_seconds Actual intervals between scrapes.", "# TYPE prometheus_target_interval_length_seconds summary", 'prometheus_target_interval_length_seconds{interval="5s",quantile="0.5"} 4.0', 'prometheus_target_interval_length_seconds{interval="5s",quantile="0.9"} 5.2', 'prometheus_target_interval_length_seconds{interval="5s",quantile="0.99"} 5.2', 'prometheus_target_interval_length_seconds_count{interval="5s"} 4', 'prometheus_target_interval_length_seconds_sum{interval="5s"} 25.2', 'prometheus_target_interval_length_seconds{interval="10s",quantile="0.5"} 32.1', 'prometheus_target_interval_length_seconds{interval="10s",quantile="0.9"} 59.2', 'prometheus_target_interval_length_seconds{interval="10s",quantile="0.99"} 59.2', 'prometheus_target_interval_length_seconds_count{interval="10s"} 6', 'prometheus_target_interval_length_seconds_sum{interval="10s"} 274.15999999999997', 'prometheus_target_interval_length_seconds{interval="10s",method="fast",quantile="0.5"} 9.7', 'prometheus_target_interval_length_seconds{interval="10s",method="fast",quantile="0.9"} 9.8', 'prometheus_target_interval_length_seconds{interval="10s",method="fast",quantile="0.99"} 9.8', 'prometheus_target_interval_length_seconds_count{interval="10s",method="fast"} 5', 'prometheus_target_interval_length_seconds_sum{interval="10s",method="fast"} 156.9', ) s = Summary(**data) for i in summary_data: for j in i[1]: s.add(i[0], j) f = TextFormatter() result = f.marshall_lines(s) self.assertEqual(sorted(valid_result), sorted(result))
async def test_timer(self): m = Summary("metric_label", "metric help") # decorator should work methods as well as functions @timer(m, {"kind": "function"}) async def a(): return await a() m_function = m.get({"kind": "function"}) self.assertEqual(m_function["count"], 1) # decorator should work methods as well as functions class B(object): @timer(m, {"kind": "method"}) async def b(self, arg1, arg2=None): return arg1 == "b_arg", arg2 == "arg_2" b = B() results = await b.b("b_arg", arg2="arg_2") self.assertTrue(all(results)) m_method = m.get({"kind": "method"}) self.assertEqual(m_method["count"], 1) # Only Summary metric type can be used with @timer, others should # raise an exception. with self.assertRaises(Exception) as cm: m = Counter("metric_label", "metric help") @timer(m) async def c(): return self.assertIn( "timer decorator expects a Summary metric but got:", str(cm.exception) )
async def test_timer(self): m = Summary('metric_label', 'metric help') # decorator should work methods as well as functions @timer(m, {'kind': 'function'}) async def a(): return await a() m_function = m.get({'kind': 'function'}) self.assertEqual(m_function['count'], 1) # decorator should work methods as well as functions class B(object): @timer(m, {'kind': 'method'}) async def b(self, arg1, arg2=None): return arg1 == 'b_arg', arg2 == 'arg_2' b = B() results = await b.b('b_arg', arg2='arg_2') self.assertTrue(all(results)) m_method = m.get({'kind': 'method'}) self.assertEqual(m_method['count'], 1) # Only Summary metric type can be used with @timer, others should # raise an exception. with self.assertRaises(Exception) as cm: m = Counter('metric_label', 'metric help') @timer(m) async def c(): return self.assertIn( "timer decorator expects a Summary metric but got:", str(cm.exception))
async def test_timer(self): m = Summary('metric_label', 'metric help') # decorator should work methods as well as functions @timer(m, {'kind': 'function'}) async def a(): return await a() m_function = m.get({'kind': 'function'}) self.assertEqual(m_function['count'], 1) # decorator should work methods as well as functions class B(object): @timer(m, {'kind': 'method'}) async def b(self, arg1, arg2=None): return arg1 == 'b_arg', arg2 == 'arg_2' b = B() results = await b.b('b_arg', arg2='arg_2') self.assertTrue(all(results)) m_method = m.get({'kind': 'method'}) self.assertEqual(m_method['count'], 1) # Only Summary metric type can be used with @timer, others should # raise an exception. with self.assertRaises(Exception) as cm: m = Counter('metric_label', 'metric help') @timer(m) async def c(): return self.assertIn("timer decorator expects a Summary metric but got:", str(cm.exception))
def test_summary_format_binary(self): s = Summary(name=self.summary_metric_name, doc=self.summary_metric_help) # Add data to the collector for labels, values in self.summary_metric_data_values: for value in values: s.add(labels, value) f = binary.BinaryFormatter() result = f.marshall_collector(s) self.assertIsInstance(result, pmp.MetricFamily) self.assertEqual(len(result.metric), 1) # Construct the result to expected to receive when the summary # collector is marshalled. expected_result = pmp.create_summary(self.summary_metric_name, self.summary_metric_help, self.summary_metric_data) self.assertEqual(result, expected_result) ###################################################################### # Check metric with constant labels s = Summary( name=self.summary_metric_name, doc=self.summary_metric_help, const_labels=self.const_labels, ) # Add data to the collector for labels, values in self.summary_metric_data_values: for value in values: s.add(labels, value) f = binary.BinaryFormatter() result = f.marshall_collector(s) self.assertIsInstance(result, pmp.MetricFamily) self.assertEqual(len(result.metric), 1) # Construct the result to expected to receive when the summary # collector is marshalled. expected_result = pmp.create_summary( self.summary_metric_name, self.summary_metric_help, self.summary_metric_data, const_labels=self.const_labels, ) self.assertEqual(result, expected_result) ###################################################################### # Check metric with timestamps with unittest.mock.patch.object(pmp.utils, "_timestamp_ms", return_value=TEST_TIMESTAMP): s = Summary(name=self.summary_metric_name, doc=self.summary_metric_help) # Add data to the collector for labels, values in self.summary_metric_data_values: for value in values: s.add(labels, value) f = binary.BinaryFormatter(timestamp=True) result = f.marshall_collector(s) self.assertIsInstance(result, pmp.MetricFamily) self.assertEqual(len(result.metric), 1) # Construct the result to expected to receive when the summary # collector is marshalled. expected_result = pmp.create_summary( self.summary_metric_name, self.summary_metric_help, self.summary_metric_data, timestamp=True, ) self.assertEqual(result, expected_result) ###################################################################### # Check metric with multiple metric instances input_summary_data = ( ({ "interval": "5s" }, [3, 5.2, 13, 4]), ({ "interval": "10s" }, [1.3, 1.2, 32.1, 59.2, 109.46, 70.9]), ({ "interval": "10s", "method": "fast" }, [5, 9.8, 31, 9.7, 101.4]), ) managed_summary_data = ( ( { "interval": "5s" }, { 0.5: 4.0, 0.9: 5.2, 0.99: 5.2, "sum": 25.2, "count": 4 }, ), ( { "interval": "10s" }, { 0.5: 32.1, 0.9: 59.2, 0.99: 59.2, "sum": 274.15999999999997, "count": 6, }, ), ( { "interval": "10s", "method": "fast" }, { 0.5: 9.7, 0.9: 9.8, 0.99: 9.8, "sum": 156.9, "count": 5 }, ), ) s = Summary(name=self.summary_metric_name, doc=self.summary_metric_help) # Add data to the collector for labels, values in input_summary_data: for value in values: s.add(labels, value) f = binary.BinaryFormatter() result = f.marshall_collector(s) self.assertIsInstance(result, pmp.MetricFamily) self.assertEqual(len(result.metric), 3) # Construct the result to expected to receive when the summary # collector is marshalled. expected_result = pmp.create_summary(self.summary_metric_name, self.summary_metric_help, managed_summary_data) self.assertEqual(result, expected_result)
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. See the License for the specific language governing # permissions and limitations under the License. # ------------------------------------------------------------------- from aioprometheus import Summary, Counter __all__ = ['PROM_METRICS'] PROM_METRICS = dict( requests_total = Counter('requests_total', 'requests total'), queries_total = Counter('queries_total', 'queries total'), queries_per_request = Summary('queries_per_request', 'number of queries per request'), server_latency = Summary('server_latency_milliseconds', 'server request handler latency'), server_qps = Summary('server_throughput_qps', 'server queries per second'), server_rps = Summary('server_throughput_rps', 'server requests per second'), process_request_latency = Summary('process_request_latency_milliseconds', 'process_request handler latency'), run_inference_latency = Summary('run_inference_latency_milliseconds', 'run_inference handler latency'), process_response_latency = Summary('process_response_latency_milliseconds', 'process_response handler latency (ms)'), inference_batch_size = Summary('inference_batch_size', 'inference batch size'), request_queue_size = Summary('request_queue_size',
def test_registry_marshall(self): format_times = 3 counter_data = ( ({"c_sample": "1"}, 100), ({"c_sample": "2"}, 200), ({"c_sample": "3"}, 300), ({"c_sample": "1", "c_subsample": "b"}, 400), ) gauge_data = ( ({"g_sample": "1"}, 500), ({"g_sample": "2"}, 600), ({"g_sample": "3"}, 700), ({"g_sample": "1", "g_subsample": "b"}, 800), ) summary_data = ( ({"s_sample": "1"}, range(1000, 2000, 4)), ({"s_sample": "2"}, range(2000, 3000, 20)), ({"s_sample": "3"}, range(3000, 4000, 13)), ({"s_sample": "1", "s_subsample": "b"}, range(4000, 5000, 47)), ) registry = Registry() counter = Counter("counter_test", "A counter.", {"type": "counter"}) gauge = Gauge("gauge_test", "A gauge.", {"type": "gauge"}) summary = Summary("summary_test", "A summary.", {"type": "summary"}) # Add data [counter.set(c[0], c[1]) for c in counter_data] [gauge.set(g[0], g[1]) for g in gauge_data] [summary.add(i[0], s) for i in summary_data for s in i[1]] registry.register(counter) registry.register(gauge) registry.register(summary) valid_regex = r"""# HELP counter_test A counter. # TYPE counter_test counter counter_test{c_sample="1",type="counter"} 100 counter_test{c_sample="2",type="counter"} 200 counter_test{c_sample="3",type="counter"} 300 counter_test{c_sample="1",c_subsample="b",type="counter"} 400 # HELP gauge_test A gauge. # TYPE gauge_test gauge gauge_test{g_sample="1",type="gauge"} 500 gauge_test{g_sample="2",type="gauge"} 600 gauge_test{g_sample="3",type="gauge"} 700 gauge_test{g_sample="1",g_subsample="b",type="gauge"} 800 # HELP summary_test A summary. # TYPE summary_test summary summary_test{quantile="0.5",s_sample="1",type="summary"} \d*(?:.\d*)? summary_test{quantile="0.9",s_sample="1",type="summary"} \d*(?:.\d*)? summary_test{quantile="0.99",s_sample="1",type="summary"} \d*(?:.\d*)? summary_test_count{s_sample="1",type="summary"} \d*(?:.\d*)? summary_test_sum{s_sample="1",type="summary"} \d*(?:.\d*)? summary_test{quantile="0.5",s_sample="2",type="summary"} \d*(?:.\d*)? summary_test{quantile="0.9",s_sample="2",type="summary"} 2\d*(?:.\d*)? summary_test{quantile="0.99",s_sample="2",type="summary"} \d*(?:.\d*)? summary_test_count{s_sample="2",type="summary"} \d*(?:.\d*)? summary_test_sum{s_sample="2",type="summary"} \d*(?:.\d*)? summary_test{quantile="0.5",s_sample="3",type="summary"} \d*(?:.\d*)? summary_test{quantile="0.9",s_sample="3",type="summary"} \d*(?:.\d*)? summary_test{quantile="0.99",s_sample="3",type="summary"} \d*(?:.\d*)? summary_test_count{s_sample="3",type="summary"} \d*(?:.\d*)? summary_test_sum{s_sample="3",type="summary"} \d*(?:.\d*)? summary_test{quantile="0.5",s_sample="1",s_subsample="b",type="summary"} \d*(?:.\d*)? summary_test{quantile="0.9",s_sample="1",s_subsample="b",type="summary"} \d*(?:.\d*)? summary_test{quantile="0.99",s_sample="1",s_subsample="b",type="summary"} \d*(?:.\d*)? summary_test_count{s_sample="1",s_subsample="b",type="summary"} \d*(?:.\d*)? summary_test_sum{s_sample="1",s_subsample="b",type="summary"} \d*(?:.\d*)? """ f = TextFormatter() self.maxDiff = None # Check multiple times to ensure multiple calls to marshalling # produce the same results for i in range(format_times): self.assertTrue(re.match(valid_regex, f.marshall(registry).decode()))
def test_registry_marshall(self): format_times = 3 counter_data = ( ({ "c_sample": "1" }, 100), ({ "c_sample": "2" }, 200), ({ "c_sample": "3" }, 300), ({ "c_sample": "1", "c_subsample": "b" }, 400), ) gauge_data = ( ({ "g_sample": "1" }, 500), ({ "g_sample": "2" }, 600), ({ "g_sample": "3" }, 700), ({ "g_sample": "1", "g_subsample": "b" }, 800), ) summary_data = ( ({ "s_sample": "1" }, range(1000, 2000, 4)), ({ "s_sample": "2" }, range(2000, 3000, 20)), ({ "s_sample": "3" }, range(3000, 4000, 13)), ({ "s_sample": "1", "s_subsample": "b" }, range(4000, 5000, 47)), ) registry = Registry() counter = Counter("counter_test", "A counter.", {"type": "counter"}) gauge = Gauge("gauge_test", "A gauge.", {"type": "gauge"}) summary = Summary("summary_test", "A summary.", {"type": "summary"}) # Add data [counter.set(c[0], c[1]) for c in counter_data] [gauge.set(g[0], g[1]) for g in gauge_data] [summary.add(i[0], s) for i in summary_data for s in i[1]] registry.register(counter) registry.register(gauge) registry.register(summary) valid_regex = r"""# HELP counter_test A counter. # TYPE counter_test counter counter_test{c_sample="1",type="counter"} 100 counter_test{c_sample="2",type="counter"} 200 counter_test{c_sample="3",type="counter"} 300 counter_test{c_sample="1",c_subsample="b",type="counter"} 400 # HELP gauge_test A gauge. # TYPE gauge_test gauge gauge_test{g_sample="1",type="gauge"} 500 gauge_test{g_sample="2",type="gauge"} 600 gauge_test{g_sample="3",type="gauge"} 700 gauge_test{g_sample="1",g_subsample="b",type="gauge"} 800 # HELP summary_test A summary. # TYPE summary_test summary summary_test{quantile="0.5",s_sample="1",type="summary"} \d*(?:.\d*)? summary_test{quantile="0.9",s_sample="1",type="summary"} \d*(?:.\d*)? summary_test{quantile="0.99",s_sample="1",type="summary"} \d*(?:.\d*)? summary_test_count{s_sample="1",type="summary"} \d*(?:.\d*)? summary_test_sum{s_sample="1",type="summary"} \d*(?:.\d*)? summary_test{quantile="0.5",s_sample="2",type="summary"} \d*(?:.\d*)? summary_test{quantile="0.9",s_sample="2",type="summary"} 2\d*(?:.\d*)? summary_test{quantile="0.99",s_sample="2",type="summary"} \d*(?:.\d*)? summary_test_count{s_sample="2",type="summary"} \d*(?:.\d*)? summary_test_sum{s_sample="2",type="summary"} \d*(?:.\d*)? summary_test{quantile="0.5",s_sample="3",type="summary"} \d*(?:.\d*)? summary_test{quantile="0.9",s_sample="3",type="summary"} \d*(?:.\d*)? summary_test{quantile="0.99",s_sample="3",type="summary"} \d*(?:.\d*)? summary_test_count{s_sample="3",type="summary"} \d*(?:.\d*)? summary_test_sum{s_sample="3",type="summary"} \d*(?:.\d*)? summary_test{quantile="0.5",s_sample="1",s_subsample="b",type="summary"} \d*(?:.\d*)? summary_test{quantile="0.9",s_sample="1",s_subsample="b",type="summary"} \d*(?:.\d*)? summary_test{quantile="0.99",s_sample="1",s_subsample="b",type="summary"} \d*(?:.\d*)? summary_test_count{s_sample="1",s_subsample="b",type="summary"} \d*(?:.\d*)? summary_test_sum{s_sample="1",s_subsample="b",type="summary"} \d*(?:.\d*)? """ f = text.TextFormatter() self.maxDiff = None # Check multiple times to ensure multiple calls to marshalling # produce the same results for i in range(format_times): self.assertTrue( re.match(valid_regex, f.marshall(registry).decode()))
class TestSummary(unittest.TestCase): def setUp(self): self.data = { 'name': "http_request_duration_microseconds", 'doc': "Request duration per application", 'const_labels': {"app": "my_app"}, } self.s = Summary(**self.data) def test_add(self): data = ( { 'labels': {'handler': '/static'}, 'values': range(0, 500, 50) }, { 'labels': {'handler': '/p'}, 'values': range(0, 1000, 100) }, { 'labels': {'handler': '/p/login'}, 'values': range(0, 10000, 1000) } ) for i in data: for j in i['values']: self.s.add(i['labels'], j) for i in data: self.assertEqual(len(i['values']), self.s.values[i['labels']]._observations) def test_get(self): labels = {'handler': '/static'} values = [3, 5.2, 13, 4] for i in values: self.s.add(labels, i) data = self.s.get(labels) correct_data = { 'sum': 25.2, 'count': 4, 0.50: 4.0, 0.90: 5.2, 0.99: 5.2, } self.assertEqual(correct_data, data) def test_add_get_without_labels(self): labels = None values = [3, 5.2, 13, 4] for i in values: self.s.add(labels, i) self.assertEqual(1, len(self.s.values)) correct_data = { 'sum': 25.2, 'count': 4, 0.50: 4.0, 0.90: 5.2, 0.99: 5.2, } self.assertEqual(correct_data, self.s.get(labels)) def test_add_wrong_types(self): labels = None values = ["3", (1, 2), {'1': 2}, True] for i in values: with self.assertRaises(TypeError) as context: self.s.add(labels, i) self.assertEqual("Summary only works with digits (int, float)", str(context.exception))
def test_summary_format(self): data = { 'name': "logged_users_total", 'doc': "Logged users in the application", 'const_labels': {}, } summary_data = ( ({ 'interval': "5s" }, [3, 5.2, 13, 4]), ({ 'interval': "10s" }, [1.3, 1.2, 32.1, 59.2, 109.46, 70.9]), ({ 'interval': "10s", 'method': "fast" }, [5, 9.8, 31, 9.7, 101.4]), ) s = Summary(**data) for i in summary_data: for j in i[1]: s.add(i[0], j) tmp_valid_data = [ ({ 'interval': "5s" }, { 0.5: 4.0, 0.9: 5.2, 0.99: 5.2, "sum": 25.2, "count": 4 }), ({ 'interval': "10s" }, { 0.5: 32.1, 0.9: 59.2, 0.99: 59.2, "sum": 274.15999999999997, "count": 6 }), ({ 'interval': "10s", 'method': "fast" }, { 0.5: 9.7, 0.9: 9.8, 0.99: 9.8, "sum": 156.9, "count": 5 }), ] valid_result = self._create_protobuf_object(data, tmp_valid_data, pmp.SUMMARY) f = BinaryFormatter() result = f.marshall_collector(s) self.assertTrue(self._protobuf_metric_equal(valid_result, result))
# TYPE request_processing_seconds summary request_processing_seconds_count 77 request_processing_seconds_sum 38.19072341918945 request_processing_seconds{quantile="0.5"} 0.27150511741638184 request_processing_seconds{quantile="0.9"} 0.5016570091247559 request_processing_seconds{quantile="0.99"} 0.6077709197998047 """ import asyncio import random from aioprometheus import Service, Summary, timer # Create a metric to track time spent and requests made. REQUEST_TIME = Summary("request_processing_seconds", "Time spent processing request") # Decorate function with metric. @timer(REQUEST_TIME) async def handle_request(duration): """ A dummy function that takes some time """ await asyncio.sleep(duration) async def handle_requests(): # Start up the server to expose the metrics. await svr.start(port=8000) # Generate some requests. while True: await handle_request(random.random())
async def test_all(self): counter_data = ( ({ "c_sample": "1" }, 100), ({ "c_sample": "2" }, 200), ({ "c_sample": "3" }, 300), ({ "c_sample": "1", "c_subsample": "b" }, 400), ) gauge_data = ( ({ "g_sample": "1" }, 500), ({ "g_sample": "2" }, 600), ({ "g_sample": "3" }, 700), ({ "g_sample": "1", "g_subsample": "b" }, 800), ) summary_data = ( ({ "s_sample": "1" }, range(1000, 2000, 4)), ({ "s_sample": "2" }, range(2000, 3000, 20)), ({ "s_sample": "3" }, range(3000, 4000, 13)), ({ "s_sample": "1", "s_subsample": "b" }, range(4000, 5000, 47)), ) histogram_data = ( ({ "h_sample": "1" }, [3, 14]), ({ "h_sample": "2" }, range(1, 20, 2)), ({ "h_sample": "3" }, range(1, 20, 2)), ({ "h_sample": "1", "h_subsample": "b" }, range(1, 20, 2)), ) counter = Counter("counter_test", "A counter.", {"type": "counter"}) gauge = Gauge("gauge_test", "A gauge.", {"type": "gauge"}) summary = Summary("summary_test", "A summary.", {"type": "summary"}) histogram = Histogram( "histogram_test", "A histogram.", {"type": "histogram"}, buckets=[5.0, 10.0, 15.0], ) self.server.register(counter) self.server.register(gauge) self.server.register(summary) self.server.register(histogram) # Add data [counter.set(c[0], c[1]) for c in counter_data] [gauge.set(g[0], g[1]) for g in gauge_data] [summary.add(i[0], s) for i in summary_data for s in i[1]] [histogram.observe(i[0], h) for i in histogram_data for h in i[1]] expected_data = """# HELP counter_test A counter. # TYPE counter_test counter counter_test{c_sample="1",type="counter"} 100 counter_test{c_sample="2",type="counter"} 200 counter_test{c_sample="3",type="counter"} 300 counter_test{c_sample="1",c_subsample="b",type="counter"} 400 # HELP gauge_test A gauge. # TYPE gauge_test gauge gauge_test{g_sample="1",type="gauge"} 500 gauge_test{g_sample="2",type="gauge"} 600 gauge_test{g_sample="3",type="gauge"} 700 gauge_test{g_sample="1",g_subsample="b",type="gauge"} 800 # HELP histogram_test A histogram. # TYPE histogram_test histogram histogram_test_bucket{h_sample="1",le="5.0",type="histogram"} 1.0 histogram_test_bucket{h_sample="1",le="10.0",type="histogram"} 1.0 histogram_test_bucket{h_sample="1",le="15.0",type="histogram"} 2.0 histogram_test_bucket{h_sample="1",le="+Inf",type="histogram"} 2.0 histogram_test_count{h_sample="1",type="histogram"} 2.0 histogram_test_sum{h_sample="1",type="histogram"} 17.0 histogram_test_bucket{h_sample="2",le="5.0",type="histogram"} 3.0 histogram_test_bucket{h_sample="2",le="10.0",type="histogram"} 5.0 histogram_test_bucket{h_sample="2",le="15.0",type="histogram"} 8.0 histogram_test_bucket{h_sample="2",le="+Inf",type="histogram"} 10.0 histogram_test_count{h_sample="2",type="histogram"} 10.0 histogram_test_sum{h_sample="2",type="histogram"} 100.0 histogram_test_bucket{h_sample="3",le="5.0",type="histogram"} 3.0 histogram_test_bucket{h_sample="3",le="10.0",type="histogram"} 5.0 histogram_test_bucket{h_sample="3",le="15.0",type="histogram"} 8.0 histogram_test_bucket{h_sample="3",le="+Inf",type="histogram"} 10.0 histogram_test_count{h_sample="3",type="histogram"} 10.0 histogram_test_sum{h_sample="3",type="histogram"} 100.0 histogram_test_bucket{h_sample="1",h_subsample="b",le="5.0",type="histogram"} 3.0 histogram_test_bucket{h_sample="1",h_subsample="b",le="10.0",type="histogram"} 5.0 histogram_test_bucket{h_sample="1",h_subsample="b",le="15.0",type="histogram"} 8.0 histogram_test_bucket{h_sample="1",h_subsample="b",le="+Inf",type="histogram"} 10.0 histogram_test_count{h_sample="1",h_subsample="b",type="histogram"} 10.0 histogram_test_sum{h_sample="1",h_subsample="b",type="histogram"} 100.0 # HELP summary_test A summary. # TYPE summary_test summary summary_test{quantile="0.5",s_sample="1",type="summary"} 1272.0 summary_test{quantile="0.9",s_sample="1",type="summary"} 1452.0 summary_test{quantile="0.99",s_sample="1",type="summary"} 1496.0 summary_test_count{s_sample="1",type="summary"} 250 summary_test_sum{s_sample="1",type="summary"} 374500.0 summary_test{quantile="0.5",s_sample="2",type="summary"} 2260.0 summary_test{quantile="0.9",s_sample="2",type="summary"} 2440.0 summary_test{quantile="0.99",s_sample="2",type="summary"} 2500.0 summary_test_count{s_sample="2",type="summary"} 50 summary_test_sum{s_sample="2",type="summary"} 124500.0 summary_test{quantile="0.5",s_sample="3",type="summary"} 3260.0 summary_test{quantile="0.9",s_sample="3",type="summary"} 3442.0 summary_test{quantile="0.99",s_sample="3",type="summary"} 3494.0 summary_test_count{s_sample="3",type="summary"} 77 summary_test_sum{s_sample="3",type="summary"} 269038.0 summary_test{quantile="0.5",s_sample="1",s_subsample="b",type="summary"} 4235.0 summary_test{quantile="0.9",s_sample="1",s_subsample="b",type="summary"} 4470.0 summary_test{quantile="0.99",s_sample="1",s_subsample="b",type="summary"} 4517.0 summary_test_count{s_sample="1",s_subsample="b",type="summary"} 22 summary_test_sum{s_sample="1",s_subsample="b",type="summary"} 98857.0 """ async with aiohttp.ClientSession() as session: # Fetch as text async with session.get(self.metrics_url, headers={ACCEPT: text.TEXT_CONTENT_TYPE}) as resp: self.assertEqual(resp.status, 200) content = await resp.read() self.assertEqual(text.TEXT_CONTENT_TYPE, resp.headers.get(CONTENT_TYPE)) self.assertEqual(expected_data, content.decode()) # Fetch as binary async with session.get(self.metrics_url, headers={ ACCEPT: binary.BINARY_CONTENT_TYPE }) as resp: self.assertEqual(resp.status, 200) content = await resp.read() self.assertEqual(binary.BINARY_CONTENT_TYPE, resp.headers.get(CONTENT_TYPE)) metrics = pmp.decode(content) self.assertEqual(len(metrics), 4) for mf in metrics: self.assertIsInstance(mf, pmp.MetricFamily) if mf.type == pmp.COUNTER: self.assertEqual(len(mf.metric), 4) elif mf.type == pmp.GAUGE: self.assertEqual(len(mf.metric), 4) elif mf.type == pmp.SUMMARY: self.assertEqual(len(mf.metric), 4) self.assertEqual(len(mf.metric[0].summary.quantile), 3) elif mf.type == pmp.HISTOGRAM: self.assertEqual(len(mf.metric), 4) self.assertEqual(len(mf.metric[0].histogram.bucket), 4)
} ping_counter = Counter("health_check_counter", "total ping requests.") latency_metric = Histogram( "request_latency_seconds", "request latency in seconds.", const_labels=prometheus_labels, buckets=[0.1, 0.5, 1.0, 5.0], ) ram_metric = Gauge("memory_usage_bytes", "memory usage in bytes.", const_labels=prometheus_labels) cpu_metric = Gauge("cpu_usage_percent", "cpu usage percent.", const_labels=prometheus_labels) metrics_request_time = Summary( "metrics_processing_seconds", "time spent processing request for metrics in seconds.", const_labels=prometheus_labels) prometheus_service.registry.register(ping_counter) prometheus_service.registry.register(latency_metric) prometheus_service.registry.register(ram_metric) prometheus_service.registry.register(cpu_metric) prometheus_service.registry.register(metrics_request_time) def verify_url_token(f): @wraps(f) def handler(*args, **kwargs): if not "token" in args[0].query: return xml_response( {
async def test_all(self): counter_data = ( ({"c_sample": "1"}, 100), ({"c_sample": "2"}, 200), ({"c_sample": "3"}, 300), ({"c_sample": "1", "c_subsample": "b"}, 400), ) gauge_data = ( ({"g_sample": "1"}, 500), ({"g_sample": "2"}, 600), ({"g_sample": "3"}, 700), ({"g_sample": "1", "g_subsample": "b"}, 800), ) summary_data = ( ({"s_sample": "1"}, range(1000, 2000, 4)), ({"s_sample": "2"}, range(2000, 3000, 20)), ({"s_sample": "3"}, range(3000, 4000, 13)), ({"s_sample": "1", "s_subsample": "b"}, range(4000, 5000, 47)), ) histogram_data = ( ({"h_sample": "1"}, [3, 14]), ({"h_sample": "2"}, range(1, 20, 2)), ({"h_sample": "3"}, range(1, 20, 2)), ({"h_sample": "1", "h_subsample": "b"}, range(1, 20, 2)), ) counter = Counter("counter_test", "A counter.", {"type": "counter"}) gauge = Gauge("gauge_test", "A gauge.", {"type": "gauge"}) summary = Summary("summary_test", "A summary.", {"type": "summary"}) histogram = Histogram( "histogram_test", "A histogram.", {"type": "histogram"}, buckets=[5.0, 10.0, 15.0], ) self.server.register(counter) self.server.register(gauge) self.server.register(summary) self.server.register(histogram) # Add data [counter.set(c[0], c[1]) for c in counter_data] [gauge.set(g[0], g[1]) for g in gauge_data] [summary.add(i[0], s) for i in summary_data for s in i[1]] [histogram.observe(i[0], h) for i in histogram_data for h in i[1]] expected_data = """# HELP counter_test A counter. # TYPE counter_test counter counter_test{c_sample="1",type="counter"} 100 counter_test{c_sample="2",type="counter"} 200 counter_test{c_sample="3",type="counter"} 300 counter_test{c_sample="1",c_subsample="b",type="counter"} 400 # HELP gauge_test A gauge. # TYPE gauge_test gauge gauge_test{g_sample="1",type="gauge"} 500 gauge_test{g_sample="2",type="gauge"} 600 gauge_test{g_sample="3",type="gauge"} 700 gauge_test{g_sample="1",g_subsample="b",type="gauge"} 800 # HELP histogram_test A histogram. # TYPE histogram_test histogram histogram_test_bucket{h_sample="1",le="5.0",type="histogram"} 1.0 histogram_test_bucket{h_sample="1",le="10.0",type="histogram"} 1.0 histogram_test_bucket{h_sample="1",le="15.0",type="histogram"} 2.0 histogram_test_bucket{h_sample="1",le="+Inf",type="histogram"} 2.0 histogram_test_count{h_sample="1",type="histogram"} 2.0 histogram_test_sum{h_sample="1",type="histogram"} 17.0 histogram_test_bucket{h_sample="2",le="5.0",type="histogram"} 3.0 histogram_test_bucket{h_sample="2",le="10.0",type="histogram"} 5.0 histogram_test_bucket{h_sample="2",le="15.0",type="histogram"} 8.0 histogram_test_bucket{h_sample="2",le="+Inf",type="histogram"} 10.0 histogram_test_count{h_sample="2",type="histogram"} 10.0 histogram_test_sum{h_sample="2",type="histogram"} 100.0 histogram_test_bucket{h_sample="3",le="5.0",type="histogram"} 3.0 histogram_test_bucket{h_sample="3",le="10.0",type="histogram"} 5.0 histogram_test_bucket{h_sample="3",le="15.0",type="histogram"} 8.0 histogram_test_bucket{h_sample="3",le="+Inf",type="histogram"} 10.0 histogram_test_count{h_sample="3",type="histogram"} 10.0 histogram_test_sum{h_sample="3",type="histogram"} 100.0 histogram_test_bucket{h_sample="1",h_subsample="b",le="5.0",type="histogram"} 3.0 histogram_test_bucket{h_sample="1",h_subsample="b",le="10.0",type="histogram"} 5.0 histogram_test_bucket{h_sample="1",h_subsample="b",le="15.0",type="histogram"} 8.0 histogram_test_bucket{h_sample="1",h_subsample="b",le="+Inf",type="histogram"} 10.0 histogram_test_count{h_sample="1",h_subsample="b",type="histogram"} 10.0 histogram_test_sum{h_sample="1",h_subsample="b",type="histogram"} 100.0 # HELP summary_test A summary. # TYPE summary_test summary summary_test{quantile="0.5",s_sample="1",type="summary"} 1272.0 summary_test{quantile="0.9",s_sample="1",type="summary"} 1452.0 summary_test{quantile="0.99",s_sample="1",type="summary"} 1496.0 summary_test_count{s_sample="1",type="summary"} 250 summary_test_sum{s_sample="1",type="summary"} 374500.0 summary_test{quantile="0.5",s_sample="2",type="summary"} 2260.0 summary_test{quantile="0.9",s_sample="2",type="summary"} 2440.0 summary_test{quantile="0.99",s_sample="2",type="summary"} 2500.0 summary_test_count{s_sample="2",type="summary"} 50 summary_test_sum{s_sample="2",type="summary"} 124500.0 summary_test{quantile="0.5",s_sample="3",type="summary"} 3260.0 summary_test{quantile="0.9",s_sample="3",type="summary"} 3442.0 summary_test{quantile="0.99",s_sample="3",type="summary"} 3494.0 summary_test_count{s_sample="3",type="summary"} 77 summary_test_sum{s_sample="3",type="summary"} 269038.0 summary_test{quantile="0.5",s_sample="1",s_subsample="b",type="summary"} 4235.0 summary_test{quantile="0.9",s_sample="1",s_subsample="b",type="summary"} 4470.0 summary_test{quantile="0.99",s_sample="1",s_subsample="b",type="summary"} 4517.0 summary_test_count{s_sample="1",s_subsample="b",type="summary"} 22 summary_test_sum{s_sample="1",s_subsample="b",type="summary"} 98857.0 """ async with aiohttp.ClientSession() as session: # Fetch as text async with session.get( self.metrics_url, headers={ACCEPT: TEXT_CONTENT_TYPE} ) as resp: self.assertEqual(resp.status, 200) content = await resp.read() self.assertEqual(TEXT_CONTENT_TYPE, resp.headers.get(CONTENT_TYPE)) self.assertEqual(expected_data, content.decode()) # Fetch as binary async with session.get( self.metrics_url, headers={ACCEPT: BINARY_CONTENT_TYPE} ) as resp: self.assertEqual(resp.status, 200) content = await resp.read() self.assertEqual(BINARY_CONTENT_TYPE, resp.headers.get(CONTENT_TYPE)) metrics = pmp.decode(content) self.assertEqual(len(metrics), 4) for mf in metrics: self.assertIsInstance(mf, pmp.MetricFamily) if mf.type == pmp.COUNTER: self.assertEqual(len(mf.metric), 4) elif mf.type == pmp.GAUGE: self.assertEqual(len(mf.metric), 4) elif mf.type == pmp.SUMMARY: self.assertEqual(len(mf.metric), 4) self.assertEqual(len(mf.metric[0].summary.quantile), 3) elif mf.type == pmp.HISTOGRAM: self.assertEqual(len(mf.metric), 4) self.assertEqual(len(mf.metric[0].histogram.bucket), 4)
async def test_all(self): counter_data = ( ({ 'c_sample': '1' }, 100), ({ 'c_sample': '2' }, 200), ({ 'c_sample': '3' }, 300), ({ 'c_sample': '1', 'c_subsample': 'b' }, 400), ) gauge_data = ( ({ 'g_sample': '1' }, 500), ({ 'g_sample': '2' }, 600), ({ 'g_sample': '3' }, 700), ({ 'g_sample': '1', 'g_subsample': 'b' }, 800), ) summary_data = ( ({ 's_sample': '1' }, range(1000, 2000, 4)), ({ 's_sample': '2' }, range(2000, 3000, 20)), ({ 's_sample': '3' }, range(3000, 4000, 13)), ({ 's_sample': '1', 's_subsample': 'b' }, range(4000, 5000, 47)), ) histogram_data = ( ({ 'h_sample': '1' }, range(1, 20, 2)), ({ 'h_sample': '2' }, range(1, 20, 2)), ({ 'h_sample': '3' }, range(1, 20, 2)), ({ 'h_sample': '1', 'h_subsample': 'b' }, range(1, 20, 2)), ) counter = Counter("counter_test", "A counter.", {'type': "counter"}) gauge = Gauge("gauge_test", "A gauge.", {'type': "gauge"}) summary = Summary("summary_test", "A summary.", {'type': "summary"}) histogram = Histogram("histogram_test", "A histogram.", {'type': "histogram"}, buckets=[5.0, 10.0, 15.0]) self.registry.register(counter) self.registry.register(gauge) self.registry.register(summary) self.registry.register(histogram) # Add data [counter.set(c[0], c[1]) for c in counter_data] [gauge.set(g[0], g[1]) for g in gauge_data] [summary.add(i[0], s) for i in summary_data for s in i[1]] [histogram.add(i[0], h) for i in histogram_data for h in i[1]] expected_data = """# HELP counter_test A counter. # TYPE counter_test counter counter_test{c_sample="1",c_subsample="b",type="counter"} 400 counter_test{c_sample="1",type="counter"} 100 counter_test{c_sample="2",type="counter"} 200 counter_test{c_sample="3",type="counter"} 300 # HELP gauge_test A gauge. # TYPE gauge_test gauge gauge_test{g_sample="1",g_subsample="b",type="gauge"} 800 gauge_test{g_sample="1",type="gauge"} 500 gauge_test{g_sample="2",type="gauge"} 600 gauge_test{g_sample="3",type="gauge"} 700 # HELP histogram_test A histogram. # TYPE histogram_test histogram histogram_test_bucket{h_sample="1",h_subsample="b",le="+Inf",type="histogram"} 2 histogram_test_bucket{h_sample="1",h_subsample="b",le="10.0",type="histogram"} 2 histogram_test_bucket{h_sample="1",h_subsample="b",le="15.0",type="histogram"} 3 histogram_test_bucket{h_sample="1",h_subsample="b",le="5.0",type="histogram"} 3 histogram_test_bucket{h_sample="1",le="+Inf",type="histogram"} 2 histogram_test_bucket{h_sample="1",le="10.0",type="histogram"} 2 histogram_test_bucket{h_sample="1",le="15.0",type="histogram"} 3 histogram_test_bucket{h_sample="1",le="5.0",type="histogram"} 3 histogram_test_bucket{h_sample="2",le="+Inf",type="histogram"} 2 histogram_test_bucket{h_sample="2",le="10.0",type="histogram"} 2 histogram_test_bucket{h_sample="2",le="15.0",type="histogram"} 3 histogram_test_bucket{h_sample="2",le="5.0",type="histogram"} 3 histogram_test_bucket{h_sample="3",le="+Inf",type="histogram"} 2 histogram_test_bucket{h_sample="3",le="10.0",type="histogram"} 2 histogram_test_bucket{h_sample="3",le="15.0",type="histogram"} 3 histogram_test_bucket{h_sample="3",le="5.0",type="histogram"} 3 histogram_test_count{h_sample="1",h_subsample="b",type="histogram"} 10 histogram_test_count{h_sample="1",type="histogram"} 10 histogram_test_count{h_sample="2",type="histogram"} 10 histogram_test_count{h_sample="3",type="histogram"} 10 histogram_test_sum{h_sample="1",h_subsample="b",type="histogram"} 100.0 histogram_test_sum{h_sample="1",type="histogram"} 100.0 histogram_test_sum{h_sample="2",type="histogram"} 100.0 histogram_test_sum{h_sample="3",type="histogram"} 100.0 # HELP summary_test A summary. # TYPE summary_test summary summary_test_count{s_sample="1",s_subsample="b",type="summary"} 22 summary_test_count{s_sample="1",type="summary"} 250 summary_test_count{s_sample="2",type="summary"} 50 summary_test_count{s_sample="3",type="summary"} 77 summary_test_sum{s_sample="1",s_subsample="b",type="summary"} 98857.0 summary_test_sum{s_sample="1",type="summary"} 374500.0 summary_test_sum{s_sample="2",type="summary"} 124500.0 summary_test_sum{s_sample="3",type="summary"} 269038.0 summary_test{quantile="0.5",s_sample="1",s_subsample="b",type="summary"} 4235.0 summary_test{quantile="0.5",s_sample="1",type="summary"} 1272.0 summary_test{quantile="0.5",s_sample="2",type="summary"} 2260.0 summary_test{quantile="0.5",s_sample="3",type="summary"} 3260.0 summary_test{quantile="0.9",s_sample="1",s_subsample="b",type="summary"} 4470.0 summary_test{quantile="0.9",s_sample="1",type="summary"} 1452.0 summary_test{quantile="0.9",s_sample="2",type="summary"} 2440.0 summary_test{quantile="0.9",s_sample="3",type="summary"} 3442.0 summary_test{quantile="0.99",s_sample="1",s_subsample="b",type="summary"} 4517.0 summary_test{quantile="0.99",s_sample="1",type="summary"} 1496.0 summary_test{quantile="0.99",s_sample="2",type="summary"} 2500.0 summary_test{quantile="0.99",s_sample="3",type="summary"} 3494.0 """ with aiohttp.ClientSession(loop=self.loop) as session: headers = {ACCEPT: 'text/plain; version=0.0.4'} async with session.get(self.metrics_url, headers=headers) as resp: assert resp.status == 200 content = await resp.read() self.assertEqual("text/plain; version=0.0.4; charset=utf-8", resp.headers.get(CONTENT_TYPE)) self.assertEqual(200, resp.status) self.assertEqual(expected_data, content.decode())
def __init__( self, metrics_host="127.0.0.1", metrics_port: int = 5000, loop: BaseEventLoop = None, ): self.metrics_host = metrics_host self.metrics_port = metrics_port self.loop = loop or asyncio.get_event_loop() self.timer = None # type: asyncio.Handle ###################################################################### # Create application metrics and metrics service # Create a metrics server. The server will create a metrics collector # registry if one is not specifically created and passed in. self.msvr = Service() # Define some constant labels that need to be added to all metrics const_labels = { "host": socket.gethostname(), "app": f"{self.__class__.__name__}-{uuid.uuid4().hex}", } # Create metrics collectors # Create a counter metric to track requests self.requests_metric = Counter( "requests", "Number of requests.", const_labels=const_labels ) # Collectors must be registered with the registry before they # get exposed. self.msvr.register(self.requests_metric) # Create a gauge metrics to track memory usage. self.ram_metric = Gauge( "memory_usage_bytes", "Memory usage in bytes.", const_labels=const_labels ) self.msvr.register(self.ram_metric) # Create a gauge metrics to track CPU. self.cpu_metric = Gauge( "cpu_usage_percent", "CPU usage percent.", const_labels=const_labels ) self.msvr.register(self.cpu_metric) self.payload_metric = Summary( "request_payload_size_bytes", "Request payload size in bytes.", const_labels=const_labels, invariants=[(0.50, 0.05), (0.99, 0.001)], ) self.msvr.register(self.payload_metric) self.latency_metric = Histogram( "request_latency_seconds", "Request latency in seconds", const_labels=const_labels, buckets=[0.1, 0.5, 1.0, 5.0], ) self.msvr.register(self.latency_metric)
# TYPE request_processing_seconds summary request_processing_seconds_count 77 request_processing_seconds_sum 38.19072341918945 request_processing_seconds{quantile="0.5"} 0.27150511741638184 request_processing_seconds{quantile="0.9"} 0.5016570091247559 request_processing_seconds{quantile="0.99"} 0.6077709197998047 ''' import asyncio import random from aioprometheus import Service, Summary, timer # Create a metric to track time spent and requests made. REQUEST_TIME = Summary('request_processing_seconds', 'Time spent processing request') # Decorate function with metric. @timer(REQUEST_TIME) async def handle_request(duration): ''' A dummy function that takes some time ''' await asyncio.sleep(duration) async def handle_requests(): # Start up the server to expose the metrics. await svr.start(port=8000) # Generate some requests. while True: await handle_request(random.random())
# url_health_result = namedtuple('UrlHealthResult', ['url', 'status']) class UrlHealthResult(NamedTuple): """Holds infos about a check""" url: str status: int const_labels = { "host": socket.gethostname(), "app": "url_health_checker" # "app": f"{__file__}-{uuid.uuid4().hex}", } url_request_times = Summary("url_health_request_processing_seconds", "Time spent processing request", const_labels=const_labels) url_health_metric = Gauge("url_health", "Health status of a url.", const_labels=const_labels) url_requests_in_progress = Gauge("request_in_progress", "Number of requests in progress", const_labels=const_labels) def urls_from_env(env: str = 'URLS') -> str: """Returns the urls string define by a environment variable""" try: return os.environ[env] except KeyError as err: raise ValueError(
class TestSummary(unittest.TestCase): def setUp(self): self.data = { "name": "http_request_duration_microseconds", "doc": "Request duration per application", "const_labels": { "app": "my_app" }, } self.s = Summary(**self.data) def test_add(self): data = ( { "labels": { "handler": "/static" }, "values": range(0, 500, 50) }, { "labels": { "handler": "/p" }, "values": range(0, 1000, 100) }, { "labels": { "handler": "/p/login" }, "values": range(0, 10000, 1000) }, ) for i in data: for j in i["values"]: self.s.add(i["labels"], j) for i in data: self.assertEqual(len(i["values"]), self.s.values[i["labels"]]._observations) def test_get(self): labels = {"handler": "/static"} values = [3, 5.2, 13, 4] for i in values: self.s.add(labels, i) data = self.s.get(labels) correct_data = { "sum": 25.2, "count": 4, 0.50: 4.0, 0.90: 5.2, 0.99: 5.2 } self.assertEqual(correct_data, data) def test_add_get_without_labels(self): labels = None values = [3, 5.2, 13, 4] for i in values: self.s.add(labels, i) self.assertEqual(1, len(self.s.values)) correct_data = { "sum": 25.2, "count": 4, 0.50: 4.0, 0.90: 5.2, 0.99: 5.2 } self.assertEqual(correct_data, self.s.get(labels)) def test_add_wrong_types(self): labels = None values = ["3", (1, 2), {"1": 2}, True] for i in values: with self.assertRaises(TypeError) as context: self.s.add(labels, i) self.assertEqual("Summary only works with digits (int, float)", str(context.exception))
class ExampleApp(object): """ An example application that demonstrates how ``aioprometheus`` can be integrated and used within a Python application built upon asyncio. This application attempts to simulate a long running distributed system process, say a socket relay or some kind of message adapter. It is intentionally not hosting an existing web service in the application. In this case the aioprometheus.Service object is used to provide a new HTTP endpoint that can be used to expose Prometheus metrics on. If this application was a web service (i.e. already had an existing web interface) then the aioprometheus.Service object could be used as before to add another web interface or a different approach could be used that provides a metrics handler function for use with the existing web service. """ def __init__( self, metrics_host="127.0.0.1", metrics_port: int = 5000, loop: BaseEventLoop = None, ): self.metrics_host = metrics_host self.metrics_port = metrics_port self.loop = loop or asyncio.get_event_loop() self.timer = None # type: asyncio.Handle ###################################################################### # Create application metrics and metrics service # Create a metrics server. The server will create a metrics collector # registry if one is not specifically created and passed in. self.msvr = Service() # Define some constant labels that need to be added to all metrics const_labels = { "host": socket.gethostname(), "app": f"{self.__class__.__name__}-{uuid.uuid4().hex}", } # Create metrics collectors # Create a counter metric to track requests self.requests_metric = Counter( "requests", "Number of requests.", const_labels=const_labels ) # Collectors must be registered with the registry before they # get exposed. self.msvr.register(self.requests_metric) # Create a gauge metrics to track memory usage. self.ram_metric = Gauge( "memory_usage_bytes", "Memory usage in bytes.", const_labels=const_labels ) self.msvr.register(self.ram_metric) # Create a gauge metrics to track CPU. self.cpu_metric = Gauge( "cpu_usage_percent", "CPU usage percent.", const_labels=const_labels ) self.msvr.register(self.cpu_metric) self.payload_metric = Summary( "request_payload_size_bytes", "Request payload size in bytes.", const_labels=const_labels, invariants=[(0.50, 0.05), (0.99, 0.001)], ) self.msvr.register(self.payload_metric) self.latency_metric = Histogram( "request_latency_seconds", "Request latency in seconds", const_labels=const_labels, buckets=[0.1, 0.5, 1.0, 5.0], ) self.msvr.register(self.latency_metric) async def start(self): """ Start the application """ await self.msvr.start(addr=self.metrics_host, port=self.metrics_port) logger.debug("Serving prometheus metrics on: %s", self.msvr.metrics_url) # Schedule a timer to update internal metrics. In a realistic # application metrics would be updated as needed. In this example # application a simple timer is used to emulate things happening, # which conveniently allows all metrics to be updated at once. self.timer = self.loop.call_later(1.0, self.on_timer_expiry) async def stop(self): """ Stop the application """ await self.msvr.stop() if self.timer: self.timer.cancel() self.timer = None def on_timer_expiry(self): """ Update application to simulate work """ # Update memory metrics self.ram_metric.set({"type": "virtual"}, psutil.virtual_memory().used) self.ram_metric.set({"type": "swap"}, psutil.swap_memory().used) # Update cpu metrics for c, p in enumerate(psutil.cpu_percent(interval=1, percpu=True)): self.cpu_metric.set({"core": c}, p) # Incrementing a requests counter to emulate webserver app self.requests_metric.inc({"path": "/"}) # Monitor request payload data to emulate webserver app self.payload_metric.add({"path": "/data"}, random.random() * 2 ** 10) # Monitor request latency to emulate webserver app self.latency_metric.add({"path": "/data"}, random.random() * 5) # re-schedule another metrics update self.timer = self.loop.call_later(1.0, self.on_timer_expiry)
def test_register_summary(self): """ check registering a summary collector """ r = CollectorRegistry() r.register(Summary(**self.data)) self.assertEqual(1, len(r.collectors))
DEFAULT_HIGH_THRESHOLD_CPU_PERCENTAGE = int( os.getenv('DEFAULT_HIGH_THRESHOLD_CPU_PERCENTAGE', 50)) DEFAULT_LOW_THRESHOLD_CPU_PERCENTAGE = int( os.getenv('DEFAULT_LOW_THRESHOLD_CPU_PERCENTAGE', 10)) DEFAULT_THRESHOLD_PERIOD_MINUTES = int( os.getenv('DEFAULT_THRESHOLD_PERIOD_MINUTES', 1)) DEFAULT_MINIMUM_INSTANCES = int(os.getenv('DEFAULT_MINIMUM_INSTANCES', 2)) DEFAULT_MAXIMUM_INSTANCES = int(os.getenv('DEFAULT_MAXIMUM_INSTANCES', 10)) DEFAULT_SCALE_UP_DELAY_MINUTES = int( os.getenv('DEFAULT_SCALE_UP_DELAY_MINUTES', 1)) DEFAULT_SCALE_DOWN_DELAY_MINUTES = int( os.getenv('DEFAULT_SCALE_DOWN_DELAY_MINUTES', 2)) SENTRY_DSN = os.getenv('SENTRY_DSN') # prometheus metrics: PROM_GET_METRICS_TIME = Summary( 'get_metrics', 'the time it takes to get application metrics') PROM_AUTOSCALER_CHECK_TIME = Summary( 'autoscaler_check_time', 'the time it takes for the autoscaler check to complete') PROM_SCALING_ACTIONS = Counter( 'scaling_actions', 'a count of the number of scaling actions that have taken place') PROM_AUTOSCALING_ENABLED = Gauge( 'autoscaling_enabled', 'number of apps that have autoscaling enabled') PROM_INSUFFICIENT_DATA = Gauge('insufficient_data', 'number of apps that have insufficient data') dictConfig({ 'version': 1, 'handlers': { 'console': {