async def test_histogram(self): """ check histogram metric export """ # Add some metrics data = [3, 5.2, 13, 4] label = {"data": 1} h = Histogram( "histogram_test", "Test Histogram.", {"type": "test_histogram"}, buckets=[5.0, 10.0, 15.0], ) self.server.register(h) for i in data: h.add(label, i) expected_data = """# HELP histogram_test Test Histogram. # TYPE histogram_test histogram histogram_test_bucket{data="1",le="5.0",type="test_histogram"} 2.0 histogram_test_bucket{data="1",le="10.0",type="test_histogram"} 3.0 histogram_test_bucket{data="1",le="15.0",type="test_histogram"} 4.0 histogram_test_bucket{data="1",le="+Inf",type="test_histogram"} 4.0 histogram_test_count{data="1",type="test_histogram"} 4.0 histogram_test_sum{data="1",type="test_histogram"} 25.2 """ async with aiohttp.ClientSession() as session: # Fetch as text async with session.get( self.metrics_url, headers={ACCEPT: TEXT_CONTENT_TYPE} ) as resp: self.assertEqual(resp.status, 200) content = await resp.read() self.assertEqual(TEXT_CONTENT_TYPE, resp.headers.get(CONTENT_TYPE)) self.assertEqual(expected_data, content.decode()) # Fetch as binary async with session.get( self.metrics_url, headers={ACCEPT: BINARY_CONTENT_TYPE} ) as resp: self.assertEqual(resp.status, 200) content = await resp.read() self.assertEqual(BINARY_CONTENT_TYPE, resp.headers.get(CONTENT_TYPE)) metrics = pmp.decode(content) self.assertEqual(len(metrics), 1) mf = metrics[0] self.assertIsInstance(mf, pmp.MetricFamily) self.assertEqual(mf.type, pmp.HISTOGRAM) self.assertEqual(len(mf.metric), 1) self.assertEqual(len(mf.metric[0].histogram.bucket), 4)
def setUp(self): self.h = Histogram( "h", "doc", const_labels={"app": "my_app"}, buckets=[5.0, 10.0, 15.0] ) self.correct_data = { "sum": 25.2, "count": 4, 5.0: 2.0, 10.0: 3.0, 15.0: 4.0, POS_INF: 4.0, } self.input_values = [3, 5.2, 13, 4]
class ExampleApp(object): """ An example application that demonstrates how ``aioprometheus`` can be used within a Python async application. """ def __init__( self, metrics_host="127.0.0.1", metrics_port: int = 0, loop: BaseEventLoop = None, ): self.metrics_host = metrics_host self.metrics_port = metrics_port self.loop = loop or asyncio.get_event_loop() self.timer = None # type: asyncio.Handle ###################################################################### # Create application metrics and metrics service # Create a metrics server. The server will create a metrics collector # registry if one is not specifically created and passed in. self.msvr = Service() # Define some constant labels that need to be added to all metrics const_labels = { "host": socket.gethostname(), "app": f"{self.__class__.__name__}-{uuid.uuid4().hex}", } # Create metrics collectors # Create a counter metric to track requests self.requests_metric = Counter( "requests", "Number of requests.", const_labels=const_labels ) # Collectors must be registered with the registry before they # get exposed. self.msvr.register(self.requests_metric) # Create a gauge metrics to track memory usage. self.ram_metric = Gauge( "memory_usage_bytes", "Memory usage in bytes.", const_labels=const_labels ) self.msvr.register(self.ram_metric) # Create a gauge metrics to track CPU. self.cpu_metric = Gauge( "cpu_usage_percent", "CPU usage percent.", const_labels=const_labels ) self.msvr.register(self.cpu_metric) self.payload_metric = Summary( "request_payload_size_bytes", "Request payload size in bytes.", const_labels=const_labels, invariants=[(0.50, 0.05), (0.99, 0.001)], ) self.msvr.register(self.payload_metric) self.latency_metric = Histogram( "request_latency_seconds", "Request latency in seconds", const_labels=const_labels, buckets=[0.1, 0.5, 1.0, 5.0], ) self.msvr.register(self.latency_metric) async def start(self): """ Start the application """ await self.msvr.start(addr=self.metrics_host, port=self.metrics_port) logger.debug("Serving prometheus metrics on: %s", self.msvr.metrics_url) # Schedule a timer to update internal metrics. In a realistic # application metrics would be updated as needed. In this example # application a simple timer is used to emulate things happening, # which conveniently allows all metrics to be updated at once. self.timer = self.loop.call_later(1.0, self.on_timer_expiry) async def stop(self): """ Stop the application """ await self.msvr.stop() if self.timer: self.timer.cancel() self.timer = None def on_timer_expiry(self): """ Update application to simulate work """ # Update memory metrics self.ram_metric.set({"type": "virtual"}, psutil.virtual_memory().used) self.ram_metric.set({"type": "swap"}, psutil.swap_memory().used) # Update cpu metrics for c, p in enumerate(psutil.cpu_percent(interval=1, percpu=True)): self.cpu_metric.set({"core": c}, p) # Incrementing a requests counter to emulate webserver app self.requests_metric.inc({"path": "/"}) # Monitor request payload data to emulate webserver app self.payload_metric.add({"path": "/data"}, random.random() * 2 ** 10) # Monitor request latency to emulate webserver app self.latency_metric.add({"path": "/data"}, random.random() * 5) # re-schedule another metrics update self.timer = self.loop.call_later(1.0, self.on_timer_expiry)
def test_histogram_format_binary(self): h = Histogram( name=self.histogram_metric_name, doc=self.histogram_metric_help, buckets=self.histogram_metric_buckets, ) # Add data to the collector for labels, values in self.histogram_metric_data_values: for value in values: h.add(labels, value) f = BinaryFormatter() result = f.marshall_collector(h) self.assertIsInstance(result, pmp.MetricFamily) self.assertEqual(len(result.metric), 1) # Construct the result to expected to receive when the histogram # collector is marshalled. expected_result = pmp.create_histogram( self.histogram_metric_name, self.histogram_metric_help, self.histogram_metric_data, ) self.assertEqual(result, expected_result) ###################################################################### # Check metric with constant labels h = Histogram( name=self.histogram_metric_name, doc=self.histogram_metric_help, const_labels=self.const_labels, buckets=self.histogram_metric_buckets, ) # Add data to the collector for labels, values in self.histogram_metric_data_values: for value in values: h.add(labels, value) f = BinaryFormatter() result = f.marshall_collector(h) self.assertIsInstance(result, pmp.MetricFamily) self.assertEqual(len(result.metric), 1) # Construct the result to expected to receive when the histogram # collector is marshalled. expected_result = pmp.create_histogram( self.histogram_metric_name, self.histogram_metric_help, self.histogram_metric_data, const_labels=self.const_labels, ) self.assertEqual(result, expected_result) ###################################################################### # Check metric with timestamps with unittest.mock.patch.object(pmp.utils, "_timestamp_ms", return_value=TEST_TIMESTAMP): h = Histogram( name=self.histogram_metric_name, doc=self.histogram_metric_help, buckets=self.histogram_metric_buckets, ) # Add data to the collector for labels, values in self.histogram_metric_data_values: for value in values: h.add(labels, value) f = BinaryFormatter(timestamp=True) result = f.marshall_collector(h) self.assertIsInstance(result, pmp.MetricFamily) self.assertEqual(len(result.metric), 1) # Construct the result to expected to receive when the histogram # collector is marshalled. expected_result = pmp.create_histogram( self.histogram_metric_name, self.histogram_metric_help, self.histogram_metric_data, timestamp=True, ) self.assertEqual(result, expected_result)
async def test_all(self): counter_data = ( ({"c_sample": "1"}, 100), ({"c_sample": "2"}, 200), ({"c_sample": "3"}, 300), ({"c_sample": "1", "c_subsample": "b"}, 400), ) gauge_data = ( ({"g_sample": "1"}, 500), ({"g_sample": "2"}, 600), ({"g_sample": "3"}, 700), ({"g_sample": "1", "g_subsample": "b"}, 800), ) summary_data = ( ({"s_sample": "1"}, range(1000, 2000, 4)), ({"s_sample": "2"}, range(2000, 3000, 20)), ({"s_sample": "3"}, range(3000, 4000, 13)), ({"s_sample": "1", "s_subsample": "b"}, range(4000, 5000, 47)), ) histogram_data = ( ({"h_sample": "1"}, [3, 14]), ({"h_sample": "2"}, range(1, 20, 2)), ({"h_sample": "3"}, range(1, 20, 2)), ({"h_sample": "1", "h_subsample": "b"}, range(1, 20, 2)), ) counter = Counter("counter_test", "A counter.", {"type": "counter"}) gauge = Gauge("gauge_test", "A gauge.", {"type": "gauge"}) summary = Summary("summary_test", "A summary.", {"type": "summary"}) histogram = Histogram( "histogram_test", "A histogram.", {"type": "histogram"}, buckets=[5.0, 10.0, 15.0], ) self.server.register(counter) self.server.register(gauge) self.server.register(summary) self.server.register(histogram) # Add data [counter.set(c[0], c[1]) for c in counter_data] [gauge.set(g[0], g[1]) for g in gauge_data] [summary.add(i[0], s) for i in summary_data for s in i[1]] [histogram.observe(i[0], h) for i in histogram_data for h in i[1]] expected_data = """# HELP counter_test A counter. # TYPE counter_test counter counter_test{c_sample="1",type="counter"} 100 counter_test{c_sample="2",type="counter"} 200 counter_test{c_sample="3",type="counter"} 300 counter_test{c_sample="1",c_subsample="b",type="counter"} 400 # HELP gauge_test A gauge. # TYPE gauge_test gauge gauge_test{g_sample="1",type="gauge"} 500 gauge_test{g_sample="2",type="gauge"} 600 gauge_test{g_sample="3",type="gauge"} 700 gauge_test{g_sample="1",g_subsample="b",type="gauge"} 800 # HELP histogram_test A histogram. # TYPE histogram_test histogram histogram_test_bucket{h_sample="1",le="5.0",type="histogram"} 1.0 histogram_test_bucket{h_sample="1",le="10.0",type="histogram"} 1.0 histogram_test_bucket{h_sample="1",le="15.0",type="histogram"} 2.0 histogram_test_bucket{h_sample="1",le="+Inf",type="histogram"} 2.0 histogram_test_count{h_sample="1",type="histogram"} 2.0 histogram_test_sum{h_sample="1",type="histogram"} 17.0 histogram_test_bucket{h_sample="2",le="5.0",type="histogram"} 3.0 histogram_test_bucket{h_sample="2",le="10.0",type="histogram"} 5.0 histogram_test_bucket{h_sample="2",le="15.0",type="histogram"} 8.0 histogram_test_bucket{h_sample="2",le="+Inf",type="histogram"} 10.0 histogram_test_count{h_sample="2",type="histogram"} 10.0 histogram_test_sum{h_sample="2",type="histogram"} 100.0 histogram_test_bucket{h_sample="3",le="5.0",type="histogram"} 3.0 histogram_test_bucket{h_sample="3",le="10.0",type="histogram"} 5.0 histogram_test_bucket{h_sample="3",le="15.0",type="histogram"} 8.0 histogram_test_bucket{h_sample="3",le="+Inf",type="histogram"} 10.0 histogram_test_count{h_sample="3",type="histogram"} 10.0 histogram_test_sum{h_sample="3",type="histogram"} 100.0 histogram_test_bucket{h_sample="1",h_subsample="b",le="5.0",type="histogram"} 3.0 histogram_test_bucket{h_sample="1",h_subsample="b",le="10.0",type="histogram"} 5.0 histogram_test_bucket{h_sample="1",h_subsample="b",le="15.0",type="histogram"} 8.0 histogram_test_bucket{h_sample="1",h_subsample="b",le="+Inf",type="histogram"} 10.0 histogram_test_count{h_sample="1",h_subsample="b",type="histogram"} 10.0 histogram_test_sum{h_sample="1",h_subsample="b",type="histogram"} 100.0 # HELP summary_test A summary. # TYPE summary_test summary summary_test{quantile="0.5",s_sample="1",type="summary"} 1272.0 summary_test{quantile="0.9",s_sample="1",type="summary"} 1452.0 summary_test{quantile="0.99",s_sample="1",type="summary"} 1496.0 summary_test_count{s_sample="1",type="summary"} 250 summary_test_sum{s_sample="1",type="summary"} 374500.0 summary_test{quantile="0.5",s_sample="2",type="summary"} 2260.0 summary_test{quantile="0.9",s_sample="2",type="summary"} 2440.0 summary_test{quantile="0.99",s_sample="2",type="summary"} 2500.0 summary_test_count{s_sample="2",type="summary"} 50 summary_test_sum{s_sample="2",type="summary"} 124500.0 summary_test{quantile="0.5",s_sample="3",type="summary"} 3260.0 summary_test{quantile="0.9",s_sample="3",type="summary"} 3442.0 summary_test{quantile="0.99",s_sample="3",type="summary"} 3494.0 summary_test_count{s_sample="3",type="summary"} 77 summary_test_sum{s_sample="3",type="summary"} 269038.0 summary_test{quantile="0.5",s_sample="1",s_subsample="b",type="summary"} 4235.0 summary_test{quantile="0.9",s_sample="1",s_subsample="b",type="summary"} 4470.0 summary_test{quantile="0.99",s_sample="1",s_subsample="b",type="summary"} 4517.0 summary_test_count{s_sample="1",s_subsample="b",type="summary"} 22 summary_test_sum{s_sample="1",s_subsample="b",type="summary"} 98857.0 """ async with aiohttp.ClientSession() as session: # Fetch as text async with session.get( self.metrics_url, headers={ACCEPT: TEXT_CONTENT_TYPE} ) as resp: self.assertEqual(resp.status, 200) content = await resp.read() self.assertEqual(TEXT_CONTENT_TYPE, resp.headers.get(CONTENT_TYPE)) self.assertEqual(expected_data, content.decode()) # Fetch as binary async with session.get( self.metrics_url, headers={ACCEPT: BINARY_CONTENT_TYPE} ) as resp: self.assertEqual(resp.status, 200) content = await resp.read() self.assertEqual(BINARY_CONTENT_TYPE, resp.headers.get(CONTENT_TYPE)) metrics = pmp.decode(content) self.assertEqual(len(metrics), 4) for mf in metrics: self.assertIsInstance(mf, pmp.MetricFamily) if mf.type == pmp.COUNTER: self.assertEqual(len(mf.metric), 4) elif mf.type == pmp.GAUGE: self.assertEqual(len(mf.metric), 4) elif mf.type == pmp.SUMMARY: self.assertEqual(len(mf.metric), 4) self.assertEqual(len(mf.metric[0].summary.quantile), 3) elif mf.type == pmp.HISTOGRAM: self.assertEqual(len(mf.metric), 4) self.assertEqual(len(mf.metric[0].histogram.bucket), 4)
import clamd app = Quart(__name__) app.config.from_pyfile('config.py') # Configure metrics app.registry = Registry() app.request_counter = Counter('requests', 'Number of overall requests.') app.registry.register(app.request_counter) app.scan_counter = Counter('scans', 'Number of overall virus scans.') app.registry.register(app.scan_counter) app.infection_counter = Counter('infections', 'Number of infected files found.') app.registry.register(app.infection_counter) app.scan_duration_histogram = Histogram('scan_duration', 'Histogram over virus scan duration.') app.registry.register(app.scan_duration_histogram) # Configure logging if app.config['LOGJSON']: do_not_log = ['/health', '/metrics'] json_logging.init_quart(enable_json=True) json_logging.init_request_instrument(app, exclude_url_patterns=do_not_log) logger = logging.getLogger('clamav-rest') logger.setLevel(app.config['LOGLEVEL']) logger.addHandler(logging.StreamHandler(sys.stdout)) # Configure clamd try:
async def test_all(self): counter_data = ( ({ 'c_sample': '1' }, 100), ({ 'c_sample': '2' }, 200), ({ 'c_sample': '3' }, 300), ({ 'c_sample': '1', 'c_subsample': 'b' }, 400), ) gauge_data = ( ({ 'g_sample': '1' }, 500), ({ 'g_sample': '2' }, 600), ({ 'g_sample': '3' }, 700), ({ 'g_sample': '1', 'g_subsample': 'b' }, 800), ) summary_data = ( ({ 's_sample': '1' }, range(1000, 2000, 4)), ({ 's_sample': '2' }, range(2000, 3000, 20)), ({ 's_sample': '3' }, range(3000, 4000, 13)), ({ 's_sample': '1', 's_subsample': 'b' }, range(4000, 5000, 47)), ) histogram_data = ( ({ 'h_sample': '1' }, range(1, 20, 2)), ({ 'h_sample': '2' }, range(1, 20, 2)), ({ 'h_sample': '3' }, range(1, 20, 2)), ({ 'h_sample': '1', 'h_subsample': 'b' }, range(1, 20, 2)), ) counter = Counter("counter_test", "A counter.", {'type': "counter"}) gauge = Gauge("gauge_test", "A gauge.", {'type': "gauge"}) summary = Summary("summary_test", "A summary.", {'type': "summary"}) histogram = Histogram("histogram_test", "A histogram.", {'type': "histogram"}, buckets=[5.0, 10.0, 15.0]) self.registry.register(counter) self.registry.register(gauge) self.registry.register(summary) self.registry.register(histogram) # Add data [counter.set(c[0], c[1]) for c in counter_data] [gauge.set(g[0], g[1]) for g in gauge_data] [summary.add(i[0], s) for i in summary_data for s in i[1]] [histogram.add(i[0], h) for i in histogram_data for h in i[1]] expected_data = """# HELP counter_test A counter. # TYPE counter_test counter counter_test{c_sample="1",c_subsample="b",type="counter"} 400 counter_test{c_sample="1",type="counter"} 100 counter_test{c_sample="2",type="counter"} 200 counter_test{c_sample="3",type="counter"} 300 # HELP gauge_test A gauge. # TYPE gauge_test gauge gauge_test{g_sample="1",g_subsample="b",type="gauge"} 800 gauge_test{g_sample="1",type="gauge"} 500 gauge_test{g_sample="2",type="gauge"} 600 gauge_test{g_sample="3",type="gauge"} 700 # HELP histogram_test A histogram. # TYPE histogram_test histogram histogram_test_bucket{h_sample="1",h_subsample="b",le="+Inf",type="histogram"} 2 histogram_test_bucket{h_sample="1",h_subsample="b",le="10.0",type="histogram"} 2 histogram_test_bucket{h_sample="1",h_subsample="b",le="15.0",type="histogram"} 3 histogram_test_bucket{h_sample="1",h_subsample="b",le="5.0",type="histogram"} 3 histogram_test_bucket{h_sample="1",le="+Inf",type="histogram"} 2 histogram_test_bucket{h_sample="1",le="10.0",type="histogram"} 2 histogram_test_bucket{h_sample="1",le="15.0",type="histogram"} 3 histogram_test_bucket{h_sample="1",le="5.0",type="histogram"} 3 histogram_test_bucket{h_sample="2",le="+Inf",type="histogram"} 2 histogram_test_bucket{h_sample="2",le="10.0",type="histogram"} 2 histogram_test_bucket{h_sample="2",le="15.0",type="histogram"} 3 histogram_test_bucket{h_sample="2",le="5.0",type="histogram"} 3 histogram_test_bucket{h_sample="3",le="+Inf",type="histogram"} 2 histogram_test_bucket{h_sample="3",le="10.0",type="histogram"} 2 histogram_test_bucket{h_sample="3",le="15.0",type="histogram"} 3 histogram_test_bucket{h_sample="3",le="5.0",type="histogram"} 3 histogram_test_count{h_sample="1",h_subsample="b",type="histogram"} 10 histogram_test_count{h_sample="1",type="histogram"} 10 histogram_test_count{h_sample="2",type="histogram"} 10 histogram_test_count{h_sample="3",type="histogram"} 10 histogram_test_sum{h_sample="1",h_subsample="b",type="histogram"} 100.0 histogram_test_sum{h_sample="1",type="histogram"} 100.0 histogram_test_sum{h_sample="2",type="histogram"} 100.0 histogram_test_sum{h_sample="3",type="histogram"} 100.0 # HELP summary_test A summary. # TYPE summary_test summary summary_test_count{s_sample="1",s_subsample="b",type="summary"} 22 summary_test_count{s_sample="1",type="summary"} 250 summary_test_count{s_sample="2",type="summary"} 50 summary_test_count{s_sample="3",type="summary"} 77 summary_test_sum{s_sample="1",s_subsample="b",type="summary"} 98857.0 summary_test_sum{s_sample="1",type="summary"} 374500.0 summary_test_sum{s_sample="2",type="summary"} 124500.0 summary_test_sum{s_sample="3",type="summary"} 269038.0 summary_test{quantile="0.5",s_sample="1",s_subsample="b",type="summary"} 4235.0 summary_test{quantile="0.5",s_sample="1",type="summary"} 1272.0 summary_test{quantile="0.5",s_sample="2",type="summary"} 2260.0 summary_test{quantile="0.5",s_sample="3",type="summary"} 3260.0 summary_test{quantile="0.9",s_sample="1",s_subsample="b",type="summary"} 4470.0 summary_test{quantile="0.9",s_sample="1",type="summary"} 1452.0 summary_test{quantile="0.9",s_sample="2",type="summary"} 2440.0 summary_test{quantile="0.9",s_sample="3",type="summary"} 3442.0 summary_test{quantile="0.99",s_sample="1",s_subsample="b",type="summary"} 4517.0 summary_test{quantile="0.99",s_sample="1",type="summary"} 1496.0 summary_test{quantile="0.99",s_sample="2",type="summary"} 2500.0 summary_test{quantile="0.99",s_sample="3",type="summary"} 3494.0 """ with aiohttp.ClientSession(loop=self.loop) as session: headers = {ACCEPT: 'text/plain; version=0.0.4'} async with session.get(self.metrics_url, headers=headers) as resp: assert resp.status == 200 content = await resp.read() self.assertEqual("text/plain; version=0.0.4; charset=utf-8", resp.headers.get(CONTENT_TYPE)) self.assertEqual(200, resp.status) self.assertEqual(expected_data, content.decode())
svr.registry.register(requests_metric) ram_metric = Gauge("memory_usage_bytes", "Memory usage in bytes.", {'host': host}) svr.registry.register(ram_metric) cpu_metric = Gauge("cpu_usage_percent", "CPU usage percent.", {'host': host}) svr.registry.register(cpu_metric) payload_metric = Summary("request_payload_size_bytes", "Request payload size in bytes.", {'host': host}, invariants=[(0.50, 0.05), (0.99, 0.001)]) svr.registry.register(payload_metric) latency_metric = Histogram("request_latency_seconds", "Request latency in seconds", {'host': host}, buckets=[0.1, 0.5, 1.0, 5.0]) svr.registry.register(latency_metric) loop.run_until_complete(svr.start()) logger.debug('serving prometheus metrics on: %s', svr.url) # schedule the first update, which will continue to re-schedule itself. loop.call_later(UPDATE_INTERVAL, on_timer_expiry, loop, ram_metric, cpu_metric, requests_metric, payload_metric, latency_metric) # initiate the client task loop.call_later(1.5, fetch_task, svr.url, loop) try:
logger = logging.getLogger() logger.setLevel(getenv("LOG_LEVEL") or 10) logHandler = logging.StreamHandler() formatter = CustomJsonFormatter() logHandler.setFormatter(formatter) logger.addHandler(logHandler) prometheus_service = Service() prometheus_service.registry = Registry() prometheus_labels = { "host": gethostname(), } ping_counter = Counter("health_check_counter", "total ping requests.") latency_metric = Histogram( "request_latency_seconds", "request latency in seconds.", const_labels=prometheus_labels, buckets=[0.1, 0.5, 1.0, 5.0], ) ram_metric = Gauge("memory_usage_bytes", "memory usage in bytes.", const_labels=prometheus_labels) cpu_metric = Gauge("cpu_usage_percent", "cpu usage percent.", const_labels=prometheus_labels) metrics_request_time = Summary( "metrics_processing_seconds", "time spent processing request for metrics in seconds.", const_labels=prometheus_labels) prometheus_service.registry.register(ping_counter) prometheus_service.registry.register(latency_metric)
class TestHistogram(unittest.TestCase): def setUp(self): self.h = Histogram("h", "doc", const_labels={"app": "my_app"}, buckets=[5.0, 10.0, 15.0]) self.correct_data = { "sum": 25.2, "count": 4, 5.0: 2.0, 10.0: 3.0, 15.0: 4.0, POS_INF: 4.0, } self.input_values = [3, 5.2, 13, 4] def test_wrong_labels(self): with self.assertRaises(ValueError) as context: self.h.set_value({"le": 2}, 1) self.assertEqual("Invalid label name: le", str(context.exception)) def test_expected_values(self): labels = None self.h.observe(labels, 7) results = self.h.get(labels) self.assertEqual(0, results[5.0]) self.assertEqual(1, results[10.0]) self.assertEqual(1, results[15.0]) self.assertEqual(1, results[POS_INF]) self.assertEqual(1, results["count"]) self.assertEqual(7.0, results["sum"]) self.h.observe(labels, 7.5) results = self.h.get(labels) self.assertEqual(0, results[5.0]) self.assertEqual(2, results[10.0]) self.assertEqual(2, results[15.0]) self.assertEqual(2, results[POS_INF]) self.assertEqual(2, results["count"]) self.assertEqual(14.5, results["sum"]) self.h.observe(labels, POS_INF) results = self.h.get(labels) self.assertEqual(0, results[5.0]) self.assertEqual(2, results[10.0]) self.assertEqual(2, results[15.0]) self.assertEqual(3, results[POS_INF]) self.assertEqual(3, results["count"]) self.assertEqual(POS_INF, results["sum"]) def test_get(self): labels = {"path": "/"} for i in self.input_values: self.h.observe(labels, i) data = self.h.get(labels) self.assertEqual(self.correct_data, data) def test_add_get_without_labels(self): labels = None for i in self.input_values: self.h.observe(labels, i) self.assertEqual(1, len(self.h.values)) self.assertEqual(self.correct_data, self.h.get(labels))
class TestHistogram(unittest.TestCase): def setUp(self): self.h = Histogram( "h", "doc", const_labels={"app": "my_app"}, buckets=[5.0, 10.0, 15.0] ) self.correct_data = { "sum": 25.2, "count": 4, 5.0: 2.0, 10.0: 3.0, 15.0: 4.0, POS_INF: 4.0, } self.input_values = [3, 5.2, 13, 4] def test_wrong_labels(self): with self.assertRaises(ValueError) as context: self.h.set_value({"le": 2}, 1) self.assertEqual("Invalid label name: le", str(context.exception)) def test_expected_values(self): labels = None self.h.observe(labels, 7) results = self.h.get(labels) self.assertEqual(0, results[5.0]) self.assertEqual(1, results[10.0]) self.assertEqual(1, results[15.0]) self.assertEqual(1, results[POS_INF]) self.assertEqual(1, results["count"]) self.assertEqual(7.0, results["sum"]) self.h.observe(labels, 7.5) results = self.h.get(labels) self.assertEqual(0, results[5.0]) self.assertEqual(2, results[10.0]) self.assertEqual(2, results[15.0]) self.assertEqual(2, results[POS_INF]) self.assertEqual(2, results["count"]) self.assertEqual(14.5, results["sum"]) self.h.observe(labels, POS_INF) results = self.h.get(labels) self.assertEqual(0, results[5.0]) self.assertEqual(2, results[10.0]) self.assertEqual(2, results[15.0]) self.assertEqual(3, results[POS_INF]) self.assertEqual(3, results["count"]) self.assertEqual(POS_INF, results["sum"]) def test_get(self): labels = {"path": "/"} for i in self.input_values: self.h.observe(labels, i) data = self.h.get(labels) self.assertEqual(self.correct_data, data) def test_add_get_without_labels(self): labels = None for i in self.input_values: self.h.observe(labels, i) self.assertEqual(1, len(self.h.values)) self.assertEqual(self.correct_data, self.h.get(labels))
def __init__( self, metrics_host="127.0.0.1", metrics_port: int = 5000, loop: BaseEventLoop = None, ): self.metrics_host = metrics_host self.metrics_port = metrics_port self.loop = loop or asyncio.get_event_loop() self.timer = None # type: asyncio.Handle ###################################################################### # Create application metrics and metrics service # Create a metrics server. The server will create a metrics collector # registry if one is not specifically created and passed in. self.msvr = Service() # Define some constant labels that need to be added to all metrics const_labels = { "host": socket.gethostname(), "app": f"{self.__class__.__name__}-{uuid.uuid4().hex}", } # Create metrics collectors # Create a counter metric to track requests self.requests_metric = Counter( "requests", "Number of requests.", const_labels=const_labels ) # Collectors must be registered with the registry before they # get exposed. self.msvr.register(self.requests_metric) # Create a gauge metrics to track memory usage. self.ram_metric = Gauge( "memory_usage_bytes", "Memory usage in bytes.", const_labels=const_labels ) self.msvr.register(self.ram_metric) # Create a gauge metrics to track CPU. self.cpu_metric = Gauge( "cpu_usage_percent", "CPU usage percent.", const_labels=const_labels ) self.msvr.register(self.cpu_metric) self.payload_metric = Summary( "request_payload_size_bytes", "Request payload size in bytes.", const_labels=const_labels, invariants=[(0.50, 0.05), (0.99, 0.001)], ) self.msvr.register(self.payload_metric) self.latency_metric = Histogram( "request_latency_seconds", "Request latency in seconds", const_labels=const_labels, buckets=[0.1, 0.5, 1.0, 5.0], ) self.msvr.register(self.latency_metric)
class ExampleApp(object): """ An example application that demonstrates how ``aioprometheus`` can be integrated and used within a Python application built upon asyncio. This application attempts to simulate a long running distributed system process, say a socket relay or some kind of message adapter. It is intentionally not hosting an existing web service in the application. In this case the aioprometheus.Service object is used to provide a new HTTP endpoint that can be used to expose Prometheus metrics on. If this application was a web service (i.e. already had an existing web interface) then the aioprometheus.Service object could be used as before to add another web interface or a different approach could be used that provides a metrics handler function for use with the existing web service. """ def __init__( self, metrics_host="127.0.0.1", metrics_port: int = 5000, loop: BaseEventLoop = None, ): self.metrics_host = metrics_host self.metrics_port = metrics_port self.loop = loop or asyncio.get_event_loop() self.timer = None # type: asyncio.Handle ###################################################################### # Create application metrics and metrics service # Create a metrics server. The server will create a metrics collector # registry if one is not specifically created and passed in. self.msvr = Service() # Define some constant labels that need to be added to all metrics const_labels = { "host": socket.gethostname(), "app": f"{self.__class__.__name__}-{uuid.uuid4().hex}", } # Create metrics collectors # Create a counter metric to track requests self.requests_metric = Counter( "requests", "Number of requests.", const_labels=const_labels ) # Collectors must be registered with the registry before they # get exposed. self.msvr.register(self.requests_metric) # Create a gauge metrics to track memory usage. self.ram_metric = Gauge( "memory_usage_bytes", "Memory usage in bytes.", const_labels=const_labels ) self.msvr.register(self.ram_metric) # Create a gauge metrics to track CPU. self.cpu_metric = Gauge( "cpu_usage_percent", "CPU usage percent.", const_labels=const_labels ) self.msvr.register(self.cpu_metric) self.payload_metric = Summary( "request_payload_size_bytes", "Request payload size in bytes.", const_labels=const_labels, invariants=[(0.50, 0.05), (0.99, 0.001)], ) self.msvr.register(self.payload_metric) self.latency_metric = Histogram( "request_latency_seconds", "Request latency in seconds", const_labels=const_labels, buckets=[0.1, 0.5, 1.0, 5.0], ) self.msvr.register(self.latency_metric) async def start(self): """ Start the application """ await self.msvr.start(addr=self.metrics_host, port=self.metrics_port) logger.debug("Serving prometheus metrics on: %s", self.msvr.metrics_url) # Schedule a timer to update internal metrics. In a realistic # application metrics would be updated as needed. In this example # application a simple timer is used to emulate things happening, # which conveniently allows all metrics to be updated at once. self.timer = self.loop.call_later(1.0, self.on_timer_expiry) async def stop(self): """ Stop the application """ await self.msvr.stop() if self.timer: self.timer.cancel() self.timer = None def on_timer_expiry(self): """ Update application to simulate work """ # Update memory metrics self.ram_metric.set({"type": "virtual"}, psutil.virtual_memory().used) self.ram_metric.set({"type": "swap"}, psutil.swap_memory().used) # Update cpu metrics for c, p in enumerate(psutil.cpu_percent(interval=1, percpu=True)): self.cpu_metric.set({"core": c}, p) # Incrementing a requests counter to emulate webserver app self.requests_metric.inc({"path": "/"}) # Monitor request payload data to emulate webserver app self.payload_metric.add({"path": "/data"}, random.random() * 2 ** 10) # Monitor request latency to emulate webserver app self.latency_metric.add({"path": "/data"}, random.random() * 5) # re-schedule another metrics update self.timer = self.loop.call_later(1.0, self.on_timer_expiry)
def __init__( self, metrics_host="127.0.0.1", metrics_port: int = 0, loop: BaseEventLoop = None, ): self.metrics_host = metrics_host self.metrics_port = metrics_port self.loop = loop or asyncio.get_event_loop() self.timer = None # type: asyncio.Handle ###################################################################### # Create application metrics and metrics service # Create a metrics server. The server will create a metrics collector # registry if one is not specifically created and passed in. self.msvr = Service() # Define some constant labels that need to be added to all metrics const_labels = { "host": socket.gethostname(), "app": f"{self.__class__.__name__}-{uuid.uuid4().hex}", } # Create metrics collectors # Create a counter metric to track requests self.requests_metric = Counter( "requests", "Number of requests.", const_labels=const_labels ) # Collectors must be registered with the registry before they # get exposed. self.msvr.register(self.requests_metric) # Create a gauge metrics to track memory usage. self.ram_metric = Gauge( "memory_usage_bytes", "Memory usage in bytes.", const_labels=const_labels ) self.msvr.register(self.ram_metric) # Create a gauge metrics to track CPU. self.cpu_metric = Gauge( "cpu_usage_percent", "CPU usage percent.", const_labels=const_labels ) self.msvr.register(self.cpu_metric) self.payload_metric = Summary( "request_payload_size_bytes", "Request payload size in bytes.", const_labels=const_labels, invariants=[(0.50, 0.05), (0.99, 0.001)], ) self.msvr.register(self.payload_metric) self.latency_metric = Histogram( "request_latency_seconds", "Request latency in seconds", const_labels=const_labels, buckets=[0.1, 0.5, 1.0, 5.0], ) self.msvr.register(self.latency_metric)
async def test_all(self): counter_data = ( ({ "c_sample": "1" }, 100), ({ "c_sample": "2" }, 200), ({ "c_sample": "3" }, 300), ({ "c_sample": "1", "c_subsample": "b" }, 400), ) gauge_data = ( ({ "g_sample": "1" }, 500), ({ "g_sample": "2" }, 600), ({ "g_sample": "3" }, 700), ({ "g_sample": "1", "g_subsample": "b" }, 800), ) summary_data = ( ({ "s_sample": "1" }, range(1000, 2000, 4)), ({ "s_sample": "2" }, range(2000, 3000, 20)), ({ "s_sample": "3" }, range(3000, 4000, 13)), ({ "s_sample": "1", "s_subsample": "b" }, range(4000, 5000, 47)), ) histogram_data = ( ({ "h_sample": "1" }, [3, 14]), ({ "h_sample": "2" }, range(1, 20, 2)), ({ "h_sample": "3" }, range(1, 20, 2)), ({ "h_sample": "1", "h_subsample": "b" }, range(1, 20, 2)), ) counter = Counter("counter_test", "A counter.", {"type": "counter"}) gauge = Gauge("gauge_test", "A gauge.", {"type": "gauge"}) summary = Summary("summary_test", "A summary.", {"type": "summary"}) histogram = Histogram( "histogram_test", "A histogram.", {"type": "histogram"}, buckets=[5.0, 10.0, 15.0], ) self.server.register(counter) self.server.register(gauge) self.server.register(summary) self.server.register(histogram) # Add data [counter.set(c[0], c[1]) for c in counter_data] [gauge.set(g[0], g[1]) for g in gauge_data] [summary.add(i[0], s) for i in summary_data for s in i[1]] [histogram.observe(i[0], h) for i in histogram_data for h in i[1]] expected_data = """# HELP counter_test A counter. # TYPE counter_test counter counter_test{c_sample="1",type="counter"} 100 counter_test{c_sample="2",type="counter"} 200 counter_test{c_sample="3",type="counter"} 300 counter_test{c_sample="1",c_subsample="b",type="counter"} 400 # HELP gauge_test A gauge. # TYPE gauge_test gauge gauge_test{g_sample="1",type="gauge"} 500 gauge_test{g_sample="2",type="gauge"} 600 gauge_test{g_sample="3",type="gauge"} 700 gauge_test{g_sample="1",g_subsample="b",type="gauge"} 800 # HELP histogram_test A histogram. # TYPE histogram_test histogram histogram_test_bucket{h_sample="1",le="5.0",type="histogram"} 1.0 histogram_test_bucket{h_sample="1",le="10.0",type="histogram"} 1.0 histogram_test_bucket{h_sample="1",le="15.0",type="histogram"} 2.0 histogram_test_bucket{h_sample="1",le="+Inf",type="histogram"} 2.0 histogram_test_count{h_sample="1",type="histogram"} 2.0 histogram_test_sum{h_sample="1",type="histogram"} 17.0 histogram_test_bucket{h_sample="2",le="5.0",type="histogram"} 3.0 histogram_test_bucket{h_sample="2",le="10.0",type="histogram"} 5.0 histogram_test_bucket{h_sample="2",le="15.0",type="histogram"} 8.0 histogram_test_bucket{h_sample="2",le="+Inf",type="histogram"} 10.0 histogram_test_count{h_sample="2",type="histogram"} 10.0 histogram_test_sum{h_sample="2",type="histogram"} 100.0 histogram_test_bucket{h_sample="3",le="5.0",type="histogram"} 3.0 histogram_test_bucket{h_sample="3",le="10.0",type="histogram"} 5.0 histogram_test_bucket{h_sample="3",le="15.0",type="histogram"} 8.0 histogram_test_bucket{h_sample="3",le="+Inf",type="histogram"} 10.0 histogram_test_count{h_sample="3",type="histogram"} 10.0 histogram_test_sum{h_sample="3",type="histogram"} 100.0 histogram_test_bucket{h_sample="1",h_subsample="b",le="5.0",type="histogram"} 3.0 histogram_test_bucket{h_sample="1",h_subsample="b",le="10.0",type="histogram"} 5.0 histogram_test_bucket{h_sample="1",h_subsample="b",le="15.0",type="histogram"} 8.0 histogram_test_bucket{h_sample="1",h_subsample="b",le="+Inf",type="histogram"} 10.0 histogram_test_count{h_sample="1",h_subsample="b",type="histogram"} 10.0 histogram_test_sum{h_sample="1",h_subsample="b",type="histogram"} 100.0 # HELP summary_test A summary. # TYPE summary_test summary summary_test{quantile="0.5",s_sample="1",type="summary"} 1272.0 summary_test{quantile="0.9",s_sample="1",type="summary"} 1452.0 summary_test{quantile="0.99",s_sample="1",type="summary"} 1496.0 summary_test_count{s_sample="1",type="summary"} 250 summary_test_sum{s_sample="1",type="summary"} 374500.0 summary_test{quantile="0.5",s_sample="2",type="summary"} 2260.0 summary_test{quantile="0.9",s_sample="2",type="summary"} 2440.0 summary_test{quantile="0.99",s_sample="2",type="summary"} 2500.0 summary_test_count{s_sample="2",type="summary"} 50 summary_test_sum{s_sample="2",type="summary"} 124500.0 summary_test{quantile="0.5",s_sample="3",type="summary"} 3260.0 summary_test{quantile="0.9",s_sample="3",type="summary"} 3442.0 summary_test{quantile="0.99",s_sample="3",type="summary"} 3494.0 summary_test_count{s_sample="3",type="summary"} 77 summary_test_sum{s_sample="3",type="summary"} 269038.0 summary_test{quantile="0.5",s_sample="1",s_subsample="b",type="summary"} 4235.0 summary_test{quantile="0.9",s_sample="1",s_subsample="b",type="summary"} 4470.0 summary_test{quantile="0.99",s_sample="1",s_subsample="b",type="summary"} 4517.0 summary_test_count{s_sample="1",s_subsample="b",type="summary"} 22 summary_test_sum{s_sample="1",s_subsample="b",type="summary"} 98857.0 """ async with aiohttp.ClientSession() as session: # Fetch as text async with session.get(self.metrics_url, headers={ACCEPT: text.TEXT_CONTENT_TYPE}) as resp: self.assertEqual(resp.status, 200) content = await resp.read() self.assertEqual(text.TEXT_CONTENT_TYPE, resp.headers.get(CONTENT_TYPE)) self.assertEqual(expected_data, content.decode()) # Fetch as binary async with session.get(self.metrics_url, headers={ ACCEPT: binary.BINARY_CONTENT_TYPE }) as resp: self.assertEqual(resp.status, 200) content = await resp.read() self.assertEqual(binary.BINARY_CONTENT_TYPE, resp.headers.get(CONTENT_TYPE)) metrics = pmp.decode(content) self.assertEqual(len(metrics), 4) for mf in metrics: self.assertIsInstance(mf, pmp.MetricFamily) if mf.type == pmp.COUNTER: self.assertEqual(len(mf.metric), 4) elif mf.type == pmp.GAUGE: self.assertEqual(len(mf.metric), 4) elif mf.type == pmp.SUMMARY: self.assertEqual(len(mf.metric), 4) self.assertEqual(len(mf.metric[0].summary.quantile), 3) elif mf.type == pmp.HISTOGRAM: self.assertEqual(len(mf.metric), 4) self.assertEqual(len(mf.metric[0].histogram.bucket), 4)
self.tracer = None self.tracer_current_span = None self.profile_grpc = None self.profile_http = None ctx = Ctx() const_labels = { 'pod': socket.gethostname() # corresponds to pod name } ctx.profile_grpc = (Gauge('grpc_requests_in_flight', 'Number of gRPC requests being served.', const_labels=const_labels), Histogram( 'grpc_requests', 'Histogram of gRPC request latencies in milliseconds', const_labels=const_labels, buckets=[30, 50, 100, 200], ), Counter('grpc_requests_failed', 'Number of failed gRPC requests', const_labels=const_labels)) ctx.profile_http = (Gauge('http_requests_in_flight', 'Number of HTTP requests being served.', const_labels=const_labels), Histogram( 'http_requests', 'Histogram of HTTP request latencies in milliseconds', const_labels=const_labels, buckets=[50, 100, 200, 1000], ), Counter('http_requests_failed',
class Metrics(commands.Cog): def __init__(self, bot: commands.Bot): self.bot = bot self.registry = Registry() self.service = Service(self.registry) self.events = Counter("events", "Discord API event counts.") self.registry.register(self.events) self.latency = Histogram("latency", "Discord API latency.") self.registry.register(self.latency) self.gc_started: typing.Optional[float] = None self.gc_latency = Histogram( "gc_latency", "CPython garbage collector execution times." ) self.registry.register(self.gc_latency) self.gc_stats = Counter("gc_stats", "CPython garbage collector stats.") self.registry.register(self.gc_stats) self.process = psutil.Process(os.getpid()) self.resources = Gauge("resources", "Process resource usage gauges.") self.registry.register(self.resources) self.hook_gc() self.update_gc_and_resource_stats.start() # pylint: disable=no-member self.serve.start() # pylint: disable=no-member self.update_latency.start() # pylint: disable=no-member def gc_callback(self, phase: str, info: typing.Mapping[str, int]): if phase == "start": self.gc_started = time.time() else: self.gc_latency.observe( {"generation": info["generation"]}, time.time() - self.gc_started ) def hook_gc(self): gc.callbacks.append(self.gc_callback) def unhook_gc(self): gc.callbacks.remove(self.gc_callback) @tasks.loop(minutes=1) async def update_gc_and_resource_stats(self): # gc stats for gen, stats in zip(itertools.count(), gc.get_stats()): for stat, value in stats.items(): self.gc_stats.set({"generation": gen, "type": stat}, value) # process resource usage for key, value in self.process.cpu_times()._asdict().items(): self.resources.set({"type": f"cpu_{key}"}, value) for key, value in self.process.memory_info()._asdict().items(): self.resources.set({"type": f"mem_{key}"}, value) for key, value in self.process.io_counters()._asdict().items(): self.resources.set({"type": f"io_{key}"}, value) self.resources.set({"type": "num_threads"}, self.process.num_threads()) self.resources.set({"type": "num_fds"}, self.process.num_fds()) @tasks.loop(count=1, reconnect=False) async def serve(self): await self.service.start(port=9100) logging.info("Serving Prometheus metrics on: %s", self.service.metrics_url) @tasks.loop(minutes=1) async def update_latency(self): self.latency.observe({"type": "seconds"}, self.bot.latency) @update_latency.before_loop async def before_update_latency(self): await self.bot.wait_until_ready() def cog_unload(self): self.unhook_gc() self.update_gc_and_resource_stats.cancel() # pylint: disable=no-member self.serve.cancel() # pylint: disable=no-member self.update_latency.cancel() # pylint: disable=no-member @commands.Cog.listener() async def on_connect(self): self.events.inc({"type": "connect"}) @commands.Cog.listener() async def on_shard_connect(self, shard_id): self.events.inc({"type": f"shard_connect_{shard_id}"}) @commands.Cog.listener() async def on_disconnect(self): self.events.inc({"type": "disconnect"}) @commands.Cog.listener() async def on_shard_disconnect(self, shard_id): self.events.inc({"type": f"shard_disconnect_{shard_id}"}) @commands.Cog.listener() async def on_ready(self): self.events.inc({"type": "ready"}) @commands.Cog.listener() async def on_shard_ready(self, shard_id): self.events.inc({"type": f"shard_ready_{shard_id}"}) @commands.Cog.listener() async def on_resumed(self): self.events.inc({"type": "resumed"}) @commands.Cog.listener() async def on_shard_resumed(self, shard_id): self.events.inc({"type": f"shard_resumed_{shard_id}"}) @commands.Cog.listener() async def on_error(self, event, *_): self.events.inc({"type": f"error_{event}"}) @commands.Cog.listener() async def on_socket_raw_receive(self, *_): self.events.inc({"type": "socket_raw_receive"}) @commands.Cog.listener() async def on_socket_raw_send(self, *_): self.events.inc({"type": "socket_raw_send"}) @commands.Cog.listener() async def on_typing(self, *_): self.events.inc({"type": "typing"}) @commands.Cog.listener() async def on_message(self, *_): self.events.inc({"type": "message"}) @commands.Cog.listener() async def on_message_delete(self, *_): self.events.inc({"type": "message_delete"}) @commands.Cog.listener() async def on_bulk_message_delete(self, *_): self.events.inc({"type": "bulk_message_delete"}) @commands.Cog.listener() async def on_raw_message_delete(self, *_): self.events.inc({"type": "raw_message_delete"}) @commands.Cog.listener() async def on_raw_bulk_message_delete(self, *_): self.events.inc({"type": "raw_bulk_message_delete"}) @commands.Cog.listener() async def on_message_edit(self, *_): self.events.inc({"type": "message_edit"}) @commands.Cog.listener() async def on_raw_message_edit(self, *_): self.events.inc({"type": "raw_message_edit"}) @commands.Cog.listener() async def on_reaction_add(self, *_): self.events.inc({"type": "reaction_add"}) @commands.Cog.listener() async def on_raw_reaction_add(self, *_): self.events.inc({"type": "raw_reaction_add"}) @commands.Cog.listener() async def on_reaction_remove(self, *_): self.events.inc({"type": "reaction_remove"}) @commands.Cog.listener() async def on_raw_reaction_remove(self, *_): self.events.inc({"type": "raw_reaction_remove"}) @commands.Cog.listener() async def on_reaction_clear(self, *_): self.events.inc({"type": "reaction_clear"}) @commands.Cog.listener() async def on_raw_reaction_clear(self, *_): self.events.inc({"type": "raw_reaction_clear"}) @commands.Cog.listener() async def on_reaction_clear_emoji(self, *_): self.events.inc({"type": "reaction_clear_emoji"}) @commands.Cog.listener() async def on_raw_reaction_clear_emoji(self, *_): self.events.inc({"type": "raw_reaction_clear_emoji"}) @commands.Cog.listener() async def on_private_channel_delete(self, *_): self.events.inc({"type": "private_channel_delete"}) @commands.Cog.listener() async def on_private_channel_create(self, *_): self.events.inc({"type": "private_channel_create"}) @commands.Cog.listener() async def on_private_channel_update(self, *_): self.events.inc({"type": "private_channel_update"}) @commands.Cog.listener() async def on_private_channel_pins_update(self, *_): self.events.inc({"type": "private_channel_pins_update"}) @commands.Cog.listener() async def on_guild_channel_delete(self, *_): self.events.inc({"type": "guild_channel_delete"}) @commands.Cog.listener() async def on_guild_channel_create(self, *_): self.events.inc({"type": "guild_channel_create"}) @commands.Cog.listener() async def on_guild_channel_update(self, *_): self.events.inc({"type": "guild_channel_update"}) @commands.Cog.listener() async def on_guild_channel_pins_update(self, *_): self.events.inc({"type": "guild_channel_pins_update"}) @commands.Cog.listener() async def on_guild_channel_integrations_update(self, *_): self.events.inc({"type": "guild_channel_integrations_update"}) @commands.Cog.listener() async def on_webhooks_update(self, *_): self.events.inc({"type": "webhooks_update"}) @commands.Cog.listener() async def on_member_join(self, *_): self.events.inc({"type": "member_join"}) @commands.Cog.listener() async def on_member_remove(self, *_): self.events.inc({"type": "member_remove"}) @commands.Cog.listener() async def on_member_update(self, *_): self.events.inc({"type": "member_update"}) @commands.Cog.listener() async def on_user_update(self, *_): self.events.inc({"type": "user_update"}) @commands.Cog.listener() async def on_guild_join(self, *_): self.events.inc({"type": "guild_join"}) @commands.Cog.listener() async def on_guild_remove(self, *_): self.events.inc({"type": "guild_remove"}) @commands.Cog.listener() async def on_guild_update(self, *_): self.events.inc({"type": "guild_update"}) @commands.Cog.listener() async def on_guild_role_create(self, *_): self.events.inc({"type": "guild_role_create"}) @commands.Cog.listener() async def on_guild_role_delete(self, *_): self.events.inc({"type": "guild_role_delete"}) @commands.Cog.listener() async def on_guild_role_update(self, *_): self.events.inc({"type": "guild_role_update"}) @commands.Cog.listener() async def on_guild_emojis_update(self, *_): self.events.inc({"type": "guild_emojis_update"}) @commands.Cog.listener() async def on_guild_available(self, *_): self.events.inc({"type": "guild_available"}) @commands.Cog.listener() async def on_guild_unavailable(self, *_): self.events.inc({"type": "guild_unavailable"}) @commands.Cog.listener() async def on_voice_state_update(self, *_): self.events.inc({"type": "voice_state_update"}) @commands.Cog.listener() async def on_member_ban(self, *_): self.events.inc({"type": "member_ban"}) @commands.Cog.listener() async def on_member_unban(self, *_): self.events.inc({"type": "member_unban"}) @commands.Cog.listener() async def on_invite_create(self, *_): self.events.inc({"type": "invite_create"}) @commands.Cog.listener() async def on_invite_delete(self, *_): self.events.inc({"type": "invite_delete"}) @commands.Cog.listener() async def on_group_join(self, *_): self.events.inc({"type": "group_join"}) @commands.Cog.listener() async def on_group_remove(self, *_): self.events.inc({"type": "group_remove"}) @commands.Cog.listener() async def on_relationship_add(self, *_): self.events.inc({"type": "relationship_add"}) @commands.Cog.listener() async def on_relationship_remove(self, *_): self.events.inc({"type": "relationship_remove"}) @commands.Cog.listener() async def on_relationship_update(self, *_): self.events.inc({"type": "relationship_update"})