Ejemplo n.º 1
0
    def __init__(self):
        self.logger = logging.getLogger(
            "cobald.runtime.tardis.plugins.prometheusmonitoring")
        self.logger.setLevel(logging.DEBUG)
        config = Configuration().Plugins.PrometheusMonitoring

        self._port = config.port
        self._addr = config.addr

        self._svr_started = False
        self._drones = {}

        self._svr = Service()

        self._gauges = {
            ResourceStatus.Booting: Gauge("booting", "Booting drones"),
            ResourceStatus.Running: Gauge("running", "Running drones"),
            ResourceStatus.Stopped: Gauge("stopped", "Stopped drones"),
            ResourceStatus.Deleted: Gauge("deleted", "Deleted drones"),
            ResourceStatus.Error: Gauge("error", "Drones in error state"),
        }

        for gauge in self._gauges.values():
            self._svr.register(gauge)
            gauge.set({}, 0)
Ejemplo n.º 2
0
async def run_server(host: str = "127.0.0.1",
                     port: str = "9999",
                     sleeptime: int = 5,
                     urls: Optional[str] = None) -> None:
    """Starts the metrics server"""
    logger = logging.getLogger(__name__)

    prom_service = Service()
    for metric in (url_request_times, url_health_metric,
                   url_requests_in_progress):
        prom_service.register(metric)

    urls = parsed_urls(urls)
    logger.info(f"Urls to check: {urls}")

    try:
        logger.info('Starting metrics server')
        await prom_service.start(addr=host, port=port)

        url_tasks = [
            url_checker(url_health_metric, url, sleeptime=sleeptime)
            for url in urls if urls
        ]
        tasks = [
            *url_tasks
            # add other tasks if needed
        ]
        await asyncio.gather(*tasks)
    finally:
        await prom_service.stop()
Ejemplo n.º 3
0
    async def test_invalid_registry(self):
        """ check only valid registry can be provided """
        for invalid_registry in ["nope", dict(), list()]:
            with self.assertRaises(Exception) as cm:
                Service(registry=invalid_registry)
            self.assertIn("registry must be a Registry, got:",
                          str(cm.exception))

        Service(registry=Registry())
Ejemplo n.º 4
0
    async def test_invalid_registry(self):
        ''' check only valid registry can be provided '''
        for invalid_registry in ['nope', dict(), list()]:
            with self.assertRaises(Exception) as cm:
                Service(registry=invalid_registry, loop=self.loop)
            self.assertIn('registry must be a Registry, got:',
                          str(cm.exception))

        Service(registry=Registry(), loop=self.loop)
Ejemplo n.º 5
0
 def __init__(
     self,
     url: str,
     host: str = None,
     port: int = 9105,
     aircraft_interval: int = 10,
     stats_interval: int = 60,
     time_periods: Sequence[str] = ("last1min",),
     origin: PositionType = None,
     fetch_timeout: float = 2.0,
     loop: AbstractEventLoop = None,
 ) -> None:
     """
     :param url: The base dump1090 web address.
     :param host: The host to expose Prometheus metrics on. Defaults
       to listen on all interfaces.
     :param port: The port to expose Prometheus metrics on. Defaults to
       port 9105.
     :param aircraft_interval: number of seconds between processing the
       dump1090 aircraft data. Defaults to 10 seconds.
     :param stats_interval: number of seconds between processing the
       dump1090 stats data. Defaults to 60 seconds as the data only
       seems to be updated at 60 second intervals.
     :param time_periods: A list of time period keys to extract from the
       statistics data. By default this is just the 'last1min' time
       period as Prometheus can provide the historical access.
     :param origin: a tuple of (lat, lon) representing the receiver
       location. The origin is used for distance calculations with
       aircraft data. If it is not provided then range calculations
       can not be performed and the maximum range metric will always
       be zero.
     :param fetch_timeout: The number of seconds to wait for a response
       from dump1090.
     :param loop: the event loop.
     """
     self.dump1090urls = build_resources(url)
     self.loop = loop or asyncio.get_event_loop()
     self.host = host
     self.port = port
     with warnings.catch_warnings():
         warnings.simplefilter("ignore")
         self.session = aiohttp.ClientSession(loop=self.loop)
     self.aircraft_interval = datetime.timedelta(seconds=aircraft_interval)
     self.stats_interval = datetime.timedelta(seconds=stats_interval)
     self.stats_time_periods = time_periods
     self.origin = Position(*origin) if origin else None
     self.fetch_timeout = fetch_timeout
     self.svr = Service()
     self.stats_task = None  # type: Union[asyncio.Task, None]
     self.aircraft_task = None  # type: Union[asyncio.Task, None]
     self.initialise_metrics()
     logger.info(f"Monitoring dump1090 at url: {self.dump1090urls.base}")
     logger.info(
         f"Refresh rates: aircraft={self.aircraft_interval}, statstics={self.stats_interval}"
     )
     logger.info(f"Origin: {self.origin}")
Ejemplo n.º 6
0
    def __init__(self, bot):
        self.bot = bot

        self.msvr = Service()

        if platform.system() == "Linux":
            self.platform = platform
            self.pid = os.path.join("/proc", "self")
            self.pagesize = resource.getpagesize()
            self.ticks = os.sysconf("SC_CLK_TCK")
            self.btime = 0

            with open(os.path.join("/proc", "stat"), "rb") as stat:
                for line in stat:
                    if line.startswith(b"btime "):
                        self.btime = float(line.split()[1])
                        break

        self.vmem = Gauge("process_virtual_memory_bytes",
                          "Virtual memory size in bytes.")
        self.rss = Gauge("process_resident_memory_bytes",
                         "Resident memory size in bytes.")
        self.start_time = Gauge(
            "process_start_time_seconds",
            "Start time of the process since unix epoch in seconds.")
        self.cpu = Counter("process_cpu_seconds",
                           "Total user and system CPU time spent in seconds.")
        self.fds = Gauge("process_open_fds",
                         "Number of open file descriptors.")

        self.info = Gauge("python_info", "Python platform information.")
        self.collected = Counter("python_gc_objects_collected",
                                 "Objects collected during GC.")
        self.uncollectable = Counter("python_gc_objects_uncollectable",
                                     "Uncollectable objects found during GC.")
        self.collections = Counter(
            "python_gc_collections",
            "Number of times this generation was collected.")

        self.http = Counter("modmail_http_requests",
                            "The number of http requests sent to Discord.")
        self.commands = Counter(
            "modmail_commands",
            "The total number of commands used on the bot.")
        self.tickets = Counter(
            "modmail_tickets",
            "The total number of tickets created by the bot.")
        self.tickets_message = Counter(
            "modmail_tickets_message",
            "The total number of messages sent in tickets.")
Ejemplo n.º 7
0
async def start_webapp(port):
    prometheus_service = Service()
    prometheus_service.register(PROM_CLOUDFOUNDRY_EVENT)

    app = web.Application()
    app.add_routes([
        web.get('/check', lambda _: web.Response(text='OK')),
        web.get('/metrics', prometheus_service.handle_metrics)
    ])

    runner = web.AppRunner(app)
    await runner.setup()
    site = web.TCPSite(runner, '0.0.0.0', port)
    await site.start()
Ejemplo n.º 8
0
class MetricsServer(object):
    def __init__(self):
        self.msvr = Service()

    async def start(self, host: str, port: int):
        metrics = []
        metrics += list(ctx.profile_http)
        metrics += list(ctx.profile_grpc)
        # attach metrics
        for m in metrics:
            self.msvr.register(m)
        await self.msvr.start(addr=host, port=port)

    async def stop(self):
        await self.msvr.stop()
Ejemplo n.º 9
0
    def __init__(self, bot: commands.Bot):
        self.bot = bot

        self.registry = Registry()
        self.service = Service(self.registry)

        self.events = Counter("events", "Discord API event counts.")
        self.registry.register(self.events)

        self.latency = Histogram("latency", "Discord API latency.")
        self.registry.register(self.latency)

        self.gc_started: typing.Optional[float] = None
        self.gc_latency = Histogram(
            "gc_latency", "CPython garbage collector execution times."
        )
        self.registry.register(self.gc_latency)
        self.gc_stats = Counter("gc_stats", "CPython garbage collector stats.")
        self.registry.register(self.gc_stats)

        self.process = psutil.Process(os.getpid())
        self.resources = Gauge("resources", "Process resource usage gauges.")
        self.registry.register(self.resources)

        self.hook_gc()
        self.update_gc_and_resource_stats.start()  # pylint: disable=no-member
        self.serve.start()  # pylint: disable=no-member
        self.update_latency.start()  # pylint: disable=no-member
Ejemplo n.º 10
0
    async def main(svr: Service) -> None:

        events_counter = Counter(
            "events", "Number of events.", const_labels={"host": socket.gethostname()}
        )
        svr.register(events_counter)
        await svr.start(addr="127.0.0.1", port=5000)
        print(f"Serving prometheus metrics on: {svr.metrics_url}")

        # Now start another coroutine to periodically update a metric to
        # simulate the application making some progress.
        async def updater(c: Counter):
            while True:
                c.inc({"kind": "timer_expiry"})
                await asyncio.sleep(1.0)

        await updater(events_counter)
Ejemplo n.º 11
0
    async def main(svr: Service) -> None:

        events_counter = Counter("events",
                                 "Number of events.",
                                 const_labels={"host": socket.gethostname()})
        svr.register(events_counter)
        await svr.start(addr="", port=5000)
        print(f"Serving prometheus metrics on: {svr.metrics_url}")

        # Now start another coroutine to periodically update a metric to
        # simulate the application making some progress.
        async def updater(c: Counter):
            while True:
                c.inc({"kind": "timer_expiry"})
                await asyncio.sleep(1.0)

        await updater(events_counter)
Ejemplo n.º 12
0
def run_async(metrics_list: list, addr: str, port: int,
              collector: Callable) -> None:
    """
    This function starts the async loop with the aio-prometheus server.
    :param metrics_list:
    :param addr:
    :param port:
    :param collector:
    :return: None
    """
    loop = asyncio.get_event_loop()
    svr = Service()
    try:
        loop.run_until_complete(
            server(svr, metrics_list, addr, port, collector))
    except KeyboardInterrupt:
        pass
    finally:
        loop.run_until_complete(svr.stop())
    loop.close()
Ejemplo n.º 13
0
    async def test_stop_stopped_server(self):
        """ check stopping a stopped server """

        s = Service(registry=self.registry)
        await s.start(addr="127.0.0.1")
        await s.stop()

        with unittest.mock.patch.object(aioprometheus.service.logger,
                                        "warning") as mock_warn:
            await s.stop()
            self.assertEqual(mock_warn.call_count, 1)
            mock_warn.assert_called_once_with(
                "Prometheus metrics server is already stopped")
Ejemplo n.º 14
0
async def server(svc: Service, metrics_list: list, addr: str, port: int,
                 collector: Callable) -> None:
    """
    This function runs the metrics updater task, that runs forever.
    :param svc:
    :param metrics_list:
    :param addr:
    :param port:
    :param collector:
    :return:
    """
    [svc.register(my_metric.metric) for my_metric in metrics_list]
    await svc.start(addr=addr, port=port)
    print(f"Serving prometheus metrics on: {svc.metrics_url}")
    await collector(metrics_list)
Ejemplo n.º 15
0
    def test_fetch_url_before_starting_server(self):
        """ check accessing a URL property raises expection if not available """
        s = Service()

        with self.assertRaises(Exception) as cm:
            _ = s.root_url
        self.assertIn(
            "No URL available, Prometheus metrics server is not running",
            str(cm.exception),
        )

        with self.assertRaises(Exception) as cm:
            _ = s.metrics_url
        self.assertIn(
            "No URL available, Prometheus metrics server is not running",
            str(cm.exception),
        )
Ejemplo n.º 16
0
    (env) $ python simple-example.py
    Serving prometheus metrics on: http://127.0.0.1:50624/metrics

In another terminal fetch the metrics using the ``curl`` command line tool
to verify they can be retrieved by Prometheus server.
"""
import asyncio
import socket
from aioprometheus import Counter, Service


if __name__ == "__main__":

    loop = asyncio.get_event_loop()

    svr = Service()

    events_counter = Counter(
        "events", "Number of events.", const_labels={"host": socket.gethostname()}
    )

    svr.register(events_counter)

    loop.run_until_complete(svr.start(addr="127.0.0.1"))
    print(f"Serving prometheus metrics on: {svr.metrics_url}")

    async def updater(m: Counter):
        # Periodically update the metric to simulate some progress
        # happening in a real application.
        while True:
            m.inc({"kind": "timer_expiry"})
Ejemplo n.º 17
0
 async def setUp(self):
     self.registry = Registry()
     self.server = Service(registry=self.registry)
     await self.server.start(addr="127.0.0.1")
     self.metrics_url = self.server.metrics_url
     self.root_url = self.server.root_url
Ejemplo n.º 18
0
        raise Exception('Ooops')
    await asyncio.sleep(duration)


async def handle_requests():
    # Start up the server to expose the metrics.
    await svr.start(port=8000)
    # Generate some requests.
    while True:
        try:
            await handle_request(random.random())
        except:
            pass  # keep handling


if __name__ == '__main__':

    loop = asyncio.get_event_loop()

    svr = Service(loop=loop)
    svr.registry.register(REQUESTS)

    try:
        loop.run_until_complete(handle_requests())
    except KeyboardInterrupt:
        pass
    finally:
        loop.run_until_complete(svr.stop())
    loop.stop()
    loop.close()
Ejemplo n.º 19
0
class ExampleApp(object):
    """
    An example application that demonstrates how ``aioprometheus`` can be
    integrated and used within a Python application built upon asyncio.

    This application attempts to simulate a long running distributed system
    process, say a socket relay or some kind of message adapter. It is
    intentionally not hosting an existing web service in the application.

    In this case the aioprometheus.Service object is used to provide a
    new HTTP endpoint that can be used to expose Prometheus metrics on.

    If this application was a web service (i.e. already had an existing web
    interface) then the aioprometheus.Service object could be used as before
    to add another web interface or a different approach could be used that
    provides a metrics handler function for use with the existing web service.
    """

    def __init__(
        self,
        metrics_host="127.0.0.1",
        metrics_port: int = 5000,
        loop: BaseEventLoop = None,
    ):

        self.metrics_host = metrics_host
        self.metrics_port = metrics_port
        self.loop = loop or asyncio.get_event_loop()
        self.timer = None  # type: asyncio.Handle

        ######################################################################
        # Create application metrics and metrics service

        # Create a metrics server. The server will create a metrics collector
        # registry if one is not specifically created and passed in.
        self.msvr = Service()

        # Define some constant labels that need to be added to all metrics
        const_labels = {
            "host": socket.gethostname(),
            "app": f"{self.__class__.__name__}-{uuid.uuid4().hex}",
        }

        # Create metrics collectors

        # Create a counter metric to track requests
        self.requests_metric = Counter(
            "requests", "Number of requests.", const_labels=const_labels
        )

        # Collectors must be registered with the registry before they
        # get exposed.
        self.msvr.register(self.requests_metric)

        # Create a gauge metrics to track memory usage.
        self.ram_metric = Gauge(
            "memory_usage_bytes", "Memory usage in bytes.", const_labels=const_labels
        )
        self.msvr.register(self.ram_metric)

        # Create a gauge metrics to track CPU.
        self.cpu_metric = Gauge(
            "cpu_usage_percent", "CPU usage percent.", const_labels=const_labels
        )
        self.msvr.register(self.cpu_metric)

        self.payload_metric = Summary(
            "request_payload_size_bytes",
            "Request payload size in bytes.",
            const_labels=const_labels,
            invariants=[(0.50, 0.05), (0.99, 0.001)],
        )
        self.msvr.register(self.payload_metric)

        self.latency_metric = Histogram(
            "request_latency_seconds",
            "Request latency in seconds",
            const_labels=const_labels,
            buckets=[0.1, 0.5, 1.0, 5.0],
        )
        self.msvr.register(self.latency_metric)

    async def start(self):
        """ Start the application """
        await self.msvr.start(addr=self.metrics_host, port=self.metrics_port)
        logger.debug("Serving prometheus metrics on: %s", self.msvr.metrics_url)

        # Schedule a timer to update internal metrics. In a realistic
        # application metrics would be updated as needed. In this example
        # application a simple timer is used to emulate things happening,
        # which conveniently allows all metrics to be updated at once.
        self.timer = self.loop.call_later(1.0, self.on_timer_expiry)

    async def stop(self):
        """ Stop the application """
        await self.msvr.stop()
        if self.timer:
            self.timer.cancel()
        self.timer = None

    def on_timer_expiry(self):
        """ Update application to simulate work """

        # Update memory metrics
        self.ram_metric.set({"type": "virtual"}, psutil.virtual_memory().used)
        self.ram_metric.set({"type": "swap"}, psutil.swap_memory().used)

        # Update cpu metrics
        for c, p in enumerate(psutil.cpu_percent(interval=1, percpu=True)):
            self.cpu_metric.set({"core": c}, p)

        # Incrementing a requests counter to emulate webserver app
        self.requests_metric.inc({"path": "/"})

        # Monitor request payload data to emulate webserver app
        self.payload_metric.add({"path": "/data"}, random.random() * 2 ** 10)

        # Monitor request latency to emulate webserver app
        self.latency_metric.add({"path": "/data"}, random.random() * 5)

        # re-schedule another metrics update
        self.timer = self.loop.call_later(1.0, self.on_timer_expiry)
Ejemplo n.º 20
0
if __name__ == "__main__":
    LISTEN_PORT = getenv("PENDLESSH_PORT", default=2222)
    LISTEN_ADDRESS = getenv("PENDLESSH_ADDRESS", default="0.0.0.0")
    MESSAGE_MAX_DELAY = int(getenv("PENDLESSH_MAX_DELAY", default=30))
    PROMETHEUS_HOSTNAME = getenv("PENDLESSH_PROMETHEUS_HOSTNAME",
                                 default=gethostname())

    log.info(f"Starting server on {LISTEN_ADDRESS}:{LISTEN_PORT} "
             f"with {MESSAGE_MAX_DELAY}s max delay")

    const_labels = {
        "host": PROMETHEUS_HOSTNAME,
        "app": f"{sys.argv[0].split('/')[-1].replace('.py', '')}",
    }
    PromConnectionCounter = Counter(
        "pendlessh_connections",
        "Number of connections received",
        const_labels=const_labels,
    )
    PromActiveConnectionGauge = Gauge(
        "pendlessh_active_connections",
        "Number of currently active connections",
        const_labels=const_labels,
    )

    PromServer = Service()
    PromServer.register(PromConnectionCounter)
    PromServer.register(PromActiveConnectionGauge)

    asyncio.run(main(PromServer))
Ejemplo n.º 21
0
 async def setUp(self):
     self.registry = Registry()
     self.server = Service(registry=self.registry)
     await self.server.start(addr="127.0.0.1")
     self.metrics_url = self.server.metrics_url
     self.root_url = self.server.root_url
Ejemplo n.º 22
0
if __name__ == "__main__":

    async def main(svr: Service) -> None:

        events_counter = Counter(
            "events", "Number of events.", const_labels={"host": socket.gethostname()}
        )
        svr.register(events_counter)
        await svr.start(addr="127.0.0.1", port=5000)
        print(f"Serving prometheus metrics on: {svr.metrics_url}")

        # Now start another coroutine to periodically update a metric to
        # simulate the application making some progress.
        async def updater(c: Counter):
            while True:
                c.inc({"kind": "timer_expiry"})
                await asyncio.sleep(1.0)

        await updater(events_counter)

    loop = asyncio.get_event_loop()
    svr = Service()
    try:
        loop.run_until_complete(main(svr))
    except KeyboardInterrupt:
        pass
    finally:
        loop.run_until_complete(svr.stop())
    loop.close()
Ejemplo n.º 23
0
async def start_webapp(port):
    prometheus_service = Service()
    prometheus_service.register(PROM_GET_METRICS_TIME)
    prometheus_service.register(PROM_AUTOSCALER_CHECK_TIME)
    prometheus_service.register(PROM_AUTOSCALING_ENABLED)
    prometheus_service.register(PROM_INSUFFICIENT_DATA)
    prometheus_service.register(PROM_SCALING_ACTIONS)

    app = web.Application()
    app.add_routes([
        web.get('/check', lambda _: web.Response(text='OK')),
        web.get('/metrics', prometheus_service.handle_metrics)
    ])

    runner = web.AppRunner(app)
    await runner.setup()
    site = web.TCPSite(runner, '0.0.0.0', port)
    await site.start()
Ejemplo n.º 24
0
 async def setUp(self):
     self.registry = Registry()
     self.server = Service(registry=self.registry, loop=self.loop)
     await self.server.start(addr=TEST_HOST, port=TEST_PORT)
     self.metrics_url = self.server.url
Ejemplo n.º 25
0
    def __init__(
        self,
        metrics_host="127.0.0.1",
        metrics_port: int = 5000,
        loop: BaseEventLoop = None,
    ):

        self.metrics_host = metrics_host
        self.metrics_port = metrics_port
        self.loop = loop or asyncio.get_event_loop()
        self.timer = None  # type: asyncio.Handle

        ######################################################################
        # Create application metrics and metrics service

        # Create a metrics server. The server will create a metrics collector
        # registry if one is not specifically created and passed in.
        self.msvr = Service()

        # Define some constant labels that need to be added to all metrics
        const_labels = {
            "host": socket.gethostname(),
            "app": f"{self.__class__.__name__}-{uuid.uuid4().hex}",
        }

        # Create metrics collectors

        # Create a counter metric to track requests
        self.requests_metric = Counter(
            "requests", "Number of requests.", const_labels=const_labels
        )

        # Collectors must be registered with the registry before they
        # get exposed.
        self.msvr.register(self.requests_metric)

        # Create a gauge metrics to track memory usage.
        self.ram_metric = Gauge(
            "memory_usage_bytes", "Memory usage in bytes.", const_labels=const_labels
        )
        self.msvr.register(self.ram_metric)

        # Create a gauge metrics to track CPU.
        self.cpu_metric = Gauge(
            "cpu_usage_percent", "CPU usage percent.", const_labels=const_labels
        )
        self.msvr.register(self.cpu_metric)

        self.payload_metric = Summary(
            "request_payload_size_bytes",
            "Request payload size in bytes.",
            const_labels=const_labels,
            invariants=[(0.50, 0.05), (0.99, 0.001)],
        )
        self.msvr.register(self.payload_metric)

        self.latency_metric = Histogram(
            "request_latency_seconds",
            "Request latency in seconds",
            const_labels=const_labels,
            buckets=[0.1, 0.5, 1.0, 5.0],
        )
        self.msvr.register(self.latency_metric)
Ejemplo n.º 26
0
 def __init__(self):
     self.msvr = Service()
Ejemplo n.º 27
0
class TestTextExporter(asynctest.TestCase):
    async def setUp(self):
        self.registry = Registry()
        self.server = Service(registry=self.registry)
        await self.server.start(addr="127.0.0.1")
        self.metrics_url = self.server.metrics_url
        self.root_url = self.server.root_url

    async def tearDown(self):
        await self.server.stop()

    async def test_invalid_registry(self):
        """ check only valid registry can be provided """
        for invalid_registry in ["nope", dict(), list()]:
            with self.assertRaises(Exception) as cm:
                Service(registry=invalid_registry)
            self.assertIn("registry must be a Registry, got:",
                          str(cm.exception))

        Service(registry=Registry())

    def test_fetch_url_before_starting_server(self):
        """ check accessing a URL property raises expection if not available """
        s = Service()

        with self.assertRaises(Exception) as cm:
            _ = s.root_url
        self.assertIn(
            "No URL available, Prometheus metrics server is not running",
            str(cm.exception),
        )

        with self.assertRaises(Exception) as cm:
            _ = s.metrics_url
        self.assertIn(
            "No URL available, Prometheus metrics server is not running",
            str(cm.exception),
        )

    def test_register_deregister(self):
        """ check registering and deregistering metrics """
        c = Counter("test_counter", "Test Counter.", {"test": "test_counter"})
        self.server.register(c)

        # Check registering a collector with same name raises an exception
        c2 = Counter("test_counter", "Another Test Counter.")
        with self.assertRaises(ValueError) as cm:
            self.server.register(c2)
        self.assertIn("is already registered", str(cm.exception))

        self.server.deregister("test_counter")

        # Check deregistering a non-existant collector raises an exception
        with self.assertRaises(KeyError) as cm:
            self.server.deregister("test_counter")

    async def test_start_started_server(self):
        """ check starting a started server """

        with unittest.mock.patch.object(aioprometheus.service.logger,
                                        "warning") as mock_warn:
            await self.server.start(addr="127.0.0.1")
            self.assertEqual(mock_warn.call_count, 1)
            mock_warn.assert_called_once_with(
                "Prometheus metrics server is already running")

    async def test_stop_stopped_server(self):
        """ check stopping a stopped server """

        s = Service(registry=self.registry)
        await s.start(addr="127.0.0.1")
        await s.stop()

        with unittest.mock.patch.object(aioprometheus.service.logger,
                                        "warning") as mock_warn:
            await s.stop()
            self.assertEqual(mock_warn.call_count, 1)
            mock_warn.assert_called_once_with(
                "Prometheus metrics server is already stopped")

    async def test_counter(self):
        """ check counter metric export """

        # Add some metrics
        data = (
            ({
                "data": 1
            }, 100),
            ({
                "data": "2"
            }, 200),
            ({
                "data": 3
            }, 300),
            ({
                "data": 1
            }, 400),
        )
        c = Counter("test_counter", "Test Counter.", {"test": "test_counter"})
        self.server.register(c)

        for i in data:
            c.set(i[0], i[1])

        expected_data = """# HELP test_counter Test Counter.
# TYPE test_counter counter
test_counter{data="1",test="test_counter"} 400
test_counter{data="2",test="test_counter"} 200
test_counter{data="3",test="test_counter"} 300
"""

        async with aiohttp.ClientSession() as session:

            # Fetch as text
            async with session.get(self.metrics_url,
                                   headers={ACCEPT:
                                            text.TEXT_CONTENT_TYPE}) as resp:
                self.assertEqual(resp.status, 200)
                content = await resp.read()
                self.assertEqual(text.TEXT_CONTENT_TYPE,
                                 resp.headers.get(CONTENT_TYPE))
                self.assertEqual(expected_data, content.decode())

            # Fetch as binary
            async with session.get(self.metrics_url,
                                   headers={
                                       ACCEPT: binary.BINARY_CONTENT_TYPE
                                   }) as resp:
                self.assertEqual(resp.status, 200)
                content = await resp.read()
                self.assertEqual(binary.BINARY_CONTENT_TYPE,
                                 resp.headers.get(CONTENT_TYPE))
                metrics = pmp.decode(content)
                self.assertEqual(len(metrics), 1)
                mf = metrics[0]
                self.assertIsInstance(mf, pmp.MetricFamily)
                self.assertEqual(mf.type, pmp.COUNTER)
                self.assertEqual(len(mf.metric), 3)

    async def test_gauge(self):
        """ check gauge metric export """

        # Add some metrics
        data = (
            ({
                "data": 1
            }, 100),
            ({
                "data": "2"
            }, 200),
            ({
                "data": 3
            }, 300),
            ({
                "data": 1
            }, 400),
        )
        g = Gauge("test_gauge", "Test Gauge.", {"test": "test_gauge"})
        self.server.register(g)

        for i in data:
            g.set(i[0], i[1])

        expected_data = """# HELP test_gauge Test Gauge.
# TYPE test_gauge gauge
test_gauge{data="1",test="test_gauge"} 400
test_gauge{data="2",test="test_gauge"} 200
test_gauge{data="3",test="test_gauge"} 300
"""

        async with aiohttp.ClientSession() as session:

            # Fetch as text
            async with session.get(self.metrics_url,
                                   headers={ACCEPT:
                                            text.TEXT_CONTENT_TYPE}) as resp:
                self.assertEqual(resp.status, 200)
                content = await resp.read()
                self.assertEqual(text.TEXT_CONTENT_TYPE,
                                 resp.headers.get(CONTENT_TYPE))
                self.assertEqual(expected_data, content.decode())

            # Fetch as binary
            async with session.get(self.metrics_url,
                                   headers={
                                       ACCEPT: binary.BINARY_CONTENT_TYPE
                                   }) as resp:
                self.assertEqual(resp.status, 200)
                content = await resp.read()
                self.assertEqual(binary.BINARY_CONTENT_TYPE,
                                 resp.headers.get(CONTENT_TYPE))
                metrics = pmp.decode(content)
                self.assertEqual(len(metrics), 1)
                mf = metrics[0]
                self.assertIsInstance(mf, pmp.MetricFamily)
                self.assertEqual(mf.type, pmp.GAUGE)
                self.assertEqual(len(mf.metric), 3)

    async def test_summary(self):
        """ check summary metric export """

        # Add some metrics
        data = [3, 5.2, 13, 4]
        label = {"data": 1}

        s = Summary("test_summary", "Test Summary.", {"test": "test_summary"})
        self.server.register(s)

        for i in data:
            s.add(label, i)

        expected_data = """# HELP test_summary Test Summary.
# TYPE test_summary summary
test_summary{data="1",quantile="0.5",test="test_summary"} 4.0
test_summary{data="1",quantile="0.9",test="test_summary"} 5.2
test_summary{data="1",quantile="0.99",test="test_summary"} 5.2
test_summary_count{data="1",test="test_summary"} 4
test_summary_sum{data="1",test="test_summary"} 25.2
"""

        async with aiohttp.ClientSession() as session:

            # Fetch as text
            async with session.get(self.metrics_url,
                                   headers={ACCEPT:
                                            text.TEXT_CONTENT_TYPE}) as resp:
                self.assertEqual(resp.status, 200)
                content = await resp.read()
                self.assertEqual(text.TEXT_CONTENT_TYPE,
                                 resp.headers.get(CONTENT_TYPE))
                self.assertEqual(expected_data, content.decode())

            # Fetch as binary
            async with session.get(self.metrics_url,
                                   headers={
                                       ACCEPT: binary.BINARY_CONTENT_TYPE
                                   }) as resp:
                self.assertEqual(resp.status, 200)
                content = await resp.read()
                self.assertEqual(binary.BINARY_CONTENT_TYPE,
                                 resp.headers.get(CONTENT_TYPE))
                metrics = pmp.decode(content)
                self.assertEqual(len(metrics), 1)
                mf = metrics[0]
                self.assertIsInstance(mf, pmp.MetricFamily)
                self.assertEqual(mf.type, pmp.SUMMARY)
                self.assertEqual(len(mf.metric), 1)
                self.assertEqual(len(mf.metric[0].summary.quantile), 3)

    async def test_histogram(self):
        """ check histogram metric export """

        # Add some metrics
        data = [3, 5.2, 13, 4]
        label = {"data": 1}

        h = Histogram(
            "histogram_test",
            "Test Histogram.",
            {"type": "test_histogram"},
            buckets=[5.0, 10.0, 15.0],
        )
        self.server.register(h)

        for i in data:
            h.add(label, i)

        expected_data = """# HELP histogram_test Test Histogram.
# TYPE histogram_test histogram
histogram_test_bucket{data="1",le="5.0",type="test_histogram"} 2.0
histogram_test_bucket{data="1",le="10.0",type="test_histogram"} 3.0
histogram_test_bucket{data="1",le="15.0",type="test_histogram"} 4.0
histogram_test_bucket{data="1",le="+Inf",type="test_histogram"} 4.0
histogram_test_count{data="1",type="test_histogram"} 4.0
histogram_test_sum{data="1",type="test_histogram"} 25.2
"""

        async with aiohttp.ClientSession() as session:

            # Fetch as text
            async with session.get(self.metrics_url,
                                   headers={ACCEPT:
                                            text.TEXT_CONTENT_TYPE}) as resp:
                self.assertEqual(resp.status, 200)
                content = await resp.read()
                self.assertEqual(text.TEXT_CONTENT_TYPE,
                                 resp.headers.get(CONTENT_TYPE))
                self.assertEqual(expected_data, content.decode())

            # Fetch as binary
            async with session.get(self.metrics_url,
                                   headers={
                                       ACCEPT: binary.BINARY_CONTENT_TYPE
                                   }) as resp:
                self.assertEqual(resp.status, 200)
                content = await resp.read()
                self.assertEqual(binary.BINARY_CONTENT_TYPE,
                                 resp.headers.get(CONTENT_TYPE))
                metrics = pmp.decode(content)
                self.assertEqual(len(metrics), 1)
                mf = metrics[0]
                self.assertIsInstance(mf, pmp.MetricFamily)
                self.assertEqual(mf.type, pmp.HISTOGRAM)
                self.assertEqual(len(mf.metric), 1)
                self.assertEqual(len(mf.metric[0].histogram.bucket), 4)

    async def test_all(self):

        counter_data = (
            ({
                "c_sample": "1"
            }, 100),
            ({
                "c_sample": "2"
            }, 200),
            ({
                "c_sample": "3"
            }, 300),
            ({
                "c_sample": "1",
                "c_subsample": "b"
            }, 400),
        )

        gauge_data = (
            ({
                "g_sample": "1"
            }, 500),
            ({
                "g_sample": "2"
            }, 600),
            ({
                "g_sample": "3"
            }, 700),
            ({
                "g_sample": "1",
                "g_subsample": "b"
            }, 800),
        )

        summary_data = (
            ({
                "s_sample": "1"
            }, range(1000, 2000, 4)),
            ({
                "s_sample": "2"
            }, range(2000, 3000, 20)),
            ({
                "s_sample": "3"
            }, range(3000, 4000, 13)),
            ({
                "s_sample": "1",
                "s_subsample": "b"
            }, range(4000, 5000, 47)),
        )

        histogram_data = (
            ({
                "h_sample": "1"
            }, [3, 14]),
            ({
                "h_sample": "2"
            }, range(1, 20, 2)),
            ({
                "h_sample": "3"
            }, range(1, 20, 2)),
            ({
                "h_sample": "1",
                "h_subsample": "b"
            }, range(1, 20, 2)),
        )

        counter = Counter("counter_test", "A counter.", {"type": "counter"})
        gauge = Gauge("gauge_test", "A gauge.", {"type": "gauge"})
        summary = Summary("summary_test", "A summary.", {"type": "summary"})
        histogram = Histogram(
            "histogram_test",
            "A histogram.",
            {"type": "histogram"},
            buckets=[5.0, 10.0, 15.0],
        )

        self.server.register(counter)
        self.server.register(gauge)
        self.server.register(summary)
        self.server.register(histogram)

        # Add data
        [counter.set(c[0], c[1]) for c in counter_data]
        [gauge.set(g[0], g[1]) for g in gauge_data]
        [summary.add(i[0], s) for i in summary_data for s in i[1]]
        [histogram.observe(i[0], h) for i in histogram_data for h in i[1]]

        expected_data = """# HELP counter_test A counter.
# TYPE counter_test counter
counter_test{c_sample="1",type="counter"} 100
counter_test{c_sample="2",type="counter"} 200
counter_test{c_sample="3",type="counter"} 300
counter_test{c_sample="1",c_subsample="b",type="counter"} 400
# HELP gauge_test A gauge.
# TYPE gauge_test gauge
gauge_test{g_sample="1",type="gauge"} 500
gauge_test{g_sample="2",type="gauge"} 600
gauge_test{g_sample="3",type="gauge"} 700
gauge_test{g_sample="1",g_subsample="b",type="gauge"} 800
# HELP histogram_test A histogram.
# TYPE histogram_test histogram
histogram_test_bucket{h_sample="1",le="5.0",type="histogram"} 1.0
histogram_test_bucket{h_sample="1",le="10.0",type="histogram"} 1.0
histogram_test_bucket{h_sample="1",le="15.0",type="histogram"} 2.0
histogram_test_bucket{h_sample="1",le="+Inf",type="histogram"} 2.0
histogram_test_count{h_sample="1",type="histogram"} 2.0
histogram_test_sum{h_sample="1",type="histogram"} 17.0
histogram_test_bucket{h_sample="2",le="5.0",type="histogram"} 3.0
histogram_test_bucket{h_sample="2",le="10.0",type="histogram"} 5.0
histogram_test_bucket{h_sample="2",le="15.0",type="histogram"} 8.0
histogram_test_bucket{h_sample="2",le="+Inf",type="histogram"} 10.0
histogram_test_count{h_sample="2",type="histogram"} 10.0
histogram_test_sum{h_sample="2",type="histogram"} 100.0
histogram_test_bucket{h_sample="3",le="5.0",type="histogram"} 3.0
histogram_test_bucket{h_sample="3",le="10.0",type="histogram"} 5.0
histogram_test_bucket{h_sample="3",le="15.0",type="histogram"} 8.0
histogram_test_bucket{h_sample="3",le="+Inf",type="histogram"} 10.0
histogram_test_count{h_sample="3",type="histogram"} 10.0
histogram_test_sum{h_sample="3",type="histogram"} 100.0
histogram_test_bucket{h_sample="1",h_subsample="b",le="5.0",type="histogram"} 3.0
histogram_test_bucket{h_sample="1",h_subsample="b",le="10.0",type="histogram"} 5.0
histogram_test_bucket{h_sample="1",h_subsample="b",le="15.0",type="histogram"} 8.0
histogram_test_bucket{h_sample="1",h_subsample="b",le="+Inf",type="histogram"} 10.0
histogram_test_count{h_sample="1",h_subsample="b",type="histogram"} 10.0
histogram_test_sum{h_sample="1",h_subsample="b",type="histogram"} 100.0
# HELP summary_test A summary.
# TYPE summary_test summary
summary_test{quantile="0.5",s_sample="1",type="summary"} 1272.0
summary_test{quantile="0.9",s_sample="1",type="summary"} 1452.0
summary_test{quantile="0.99",s_sample="1",type="summary"} 1496.0
summary_test_count{s_sample="1",type="summary"} 250
summary_test_sum{s_sample="1",type="summary"} 374500.0
summary_test{quantile="0.5",s_sample="2",type="summary"} 2260.0
summary_test{quantile="0.9",s_sample="2",type="summary"} 2440.0
summary_test{quantile="0.99",s_sample="2",type="summary"} 2500.0
summary_test_count{s_sample="2",type="summary"} 50
summary_test_sum{s_sample="2",type="summary"} 124500.0
summary_test{quantile="0.5",s_sample="3",type="summary"} 3260.0
summary_test{quantile="0.9",s_sample="3",type="summary"} 3442.0
summary_test{quantile="0.99",s_sample="3",type="summary"} 3494.0
summary_test_count{s_sample="3",type="summary"} 77
summary_test_sum{s_sample="3",type="summary"} 269038.0
summary_test{quantile="0.5",s_sample="1",s_subsample="b",type="summary"} 4235.0
summary_test{quantile="0.9",s_sample="1",s_subsample="b",type="summary"} 4470.0
summary_test{quantile="0.99",s_sample="1",s_subsample="b",type="summary"} 4517.0
summary_test_count{s_sample="1",s_subsample="b",type="summary"} 22
summary_test_sum{s_sample="1",s_subsample="b",type="summary"} 98857.0
"""

        async with aiohttp.ClientSession() as session:

            # Fetch as text
            async with session.get(self.metrics_url,
                                   headers={ACCEPT:
                                            text.TEXT_CONTENT_TYPE}) as resp:
                self.assertEqual(resp.status, 200)
                content = await resp.read()
                self.assertEqual(text.TEXT_CONTENT_TYPE,
                                 resp.headers.get(CONTENT_TYPE))
                self.assertEqual(expected_data, content.decode())

            # Fetch as binary
            async with session.get(self.metrics_url,
                                   headers={
                                       ACCEPT: binary.BINARY_CONTENT_TYPE
                                   }) as resp:
                self.assertEqual(resp.status, 200)
                content = await resp.read()
                self.assertEqual(binary.BINARY_CONTENT_TYPE,
                                 resp.headers.get(CONTENT_TYPE))
                metrics = pmp.decode(content)
                self.assertEqual(len(metrics), 4)
                for mf in metrics:
                    self.assertIsInstance(mf, pmp.MetricFamily)
                    if mf.type == pmp.COUNTER:
                        self.assertEqual(len(mf.metric), 4)
                    elif mf.type == pmp.GAUGE:
                        self.assertEqual(len(mf.metric), 4)
                    elif mf.type == pmp.SUMMARY:
                        self.assertEqual(len(mf.metric), 4)
                        self.assertEqual(len(mf.metric[0].summary.quantile), 3)
                    elif mf.type == pmp.HISTOGRAM:
                        self.assertEqual(len(mf.metric), 4)
                        self.assertEqual(len(mf.metric[0].histogram.bucket), 4)

    async def test_no_accept_header(self):
        """ check default format is used when no accept header is defined """

        # Add some metrics
        data = (({"data": 1}, 100), )
        c = Counter("test_counter", "Test Counter.", {"test": "test_counter"})
        self.server.register(c)

        for i in data:
            c.set(i[0], i[1])

        expected_data = """# HELP test_counter Test Counter.
# TYPE test_counter counter
test_counter{data="1",test="test_counter"} 100
"""

        async with aiohttp.ClientSession() as session:

            # Fetch without explicit accept type
            async with session.get(self.metrics_url) as resp:
                self.assertEqual(resp.status, 200)
                content = await resp.read()
                self.assertEqual(text.TEXT_CONTENT_TYPE,
                                 resp.headers.get(CONTENT_TYPE))
                self.assertEqual(expected_data, content.decode())

            # TODO: Add another test here that includes the ACCEPT header
            # but with no value set. I have not worked out how to do this
            # yet as aiohttp expects headers to be a dict and a value of None
            # is not permitted.

    async def test_root_route(self):
        """ check root route returns content """
        async with aiohttp.ClientSession() as session:
            async with session.get(self.root_url) as resp:
                self.assertEqual(resp.status, 200)
                self.assertIn("text/html", resp.headers.get(CONTENT_TYPE))

    async def test_robots_route(self):
        """ check robots route returns content """
        async with aiohttp.ClientSession() as session:
            async with session.get(f"{self.root_url}robots.txt") as resp:
                self.assertEqual(resp.status, 200)
                self.assertIn("text/plain", resp.headers.get(CONTENT_TYPE))
Ejemplo n.º 28
0
class Dump1090Exporter(object):
    """
    This class is responsible for fetching, parsing and exporting dump1090
    metrics to Prometheus.
    """

    def __init__(
        self,
        resource_path: str,
        host: str = None,
        port: int = 9105,
        aircraft_interval: int = 10,
        stats_interval: int = 60,
        time_periods: Sequence[str] = ("last1min",),
        origin: PositionType = None,
        fetch_timeout: float = 2.0,
        loop: AbstractEventLoop = None,
    ) -> None:
        """
        :param resource_path: The base dump1090 resource address. This can be
          a web address or a directory path.
        :param host: The host to expose Prometheus metrics on. Defaults
          to listen on all interfaces.
        :param port: The port to expose Prometheus metrics on. Defaults to
          port 9105.
        :param aircraft_interval: number of seconds between processing the
          dump1090 aircraft data. Defaults to 10 seconds.
        :param stats_interval: number of seconds between processing the
          dump1090 stats data. Defaults to 60 seconds as the data only
          seems to be updated at 60 second intervals.
        :param time_periods: A list of time period keys to extract from the
          statistics data. By default this is just the 'last1min' time
          period as Prometheus can provide the historical access.
        :param origin: a tuple of (lat, lon) representing the receiver
          location. The origin is used for distance calculations with
          aircraft data. If it is not provided then range calculations
          can not be performed and the maximum range metric will always
          be zero.
        :param fetch_timeout: The number of seconds to wait for a response
          from dump1090.
        :param loop: the event loop.
        """
        self.resources = build_resources(resource_path)
        self.loop = loop or asyncio.get_event_loop()
        self.host = host
        self.port = port
        self.aircraft_interval = datetime.timedelta(seconds=aircraft_interval)
        self.stats_interval = datetime.timedelta(seconds=stats_interval)
        self.stats_time_periods = time_periods
        self.origin = Position(*origin) if origin else None
        self.fetch_timeout = fetch_timeout
        self.svr = Service()
        self.stats_task = None  # type: Union[asyncio.Task, None]
        self.aircraft_task = None  # type: Union[asyncio.Task, None]
        self.initialise_metrics()
        logger.info(f"Monitoring dump1090 resources at: {self.resources.base}")
        logger.info(
            f"Refresh rates: aircraft={self.aircraft_interval}, statstics={self.stats_interval}"
        )
        logger.info(f"Origin: {self.origin}")

    async def start(self) -> None:
        """ Start the monitor """
        await self.svr.start(addr=self.host, port=self.port)
        logger.info(f"serving dump1090 prometheus metrics on: {self.svr.metrics_url}")

        # Attempt to retrieve the optional lat and lon position from
        # the dump1090 receiver data. If present this data will override
        # command line configuration.
        try:
            receiver = await self._fetch(self.resources.receiver)
            if receiver:
                if "lat" in receiver and "lon" in receiver:
                    self.origin = Position(receiver["lat"], receiver["lon"])
                    logger.info(
                        f"Origin successfully extracted from receiver data: {self.origin}"
                    )
        except Exception as exc:
            logger.error(f"Error fetching dump1090 receiver data: {exc}")

        # fmt: off
        self.stats_task = asyncio.ensure_future(self.updater_stats())  # type: ignore
        self.aircraft_task = asyncio.ensure_future(self.updater_aircraft())  # type: ignore
        # fmt: on

    async def stop(self) -> None:
        """ Stop the monitor """

        if self.stats_task:
            self.stats_task.cancel()
            try:
                await self.stats_task
            except asyncio.CancelledError:
                pass
            self.stats_task = None

        if self.aircraft_task:
            self.aircraft_task.cancel()
            try:
                await self.aircraft_task
            except asyncio.CancelledError:
                pass
            self.aircraft_task = None

        await self.svr.stop()

    def initialise_metrics(self) -> None:
        """Create metrics

        This method initialises a dict as the metrics attribute.

        The metrics dict has two str keys; one is `aircraft` and the other
        is `stats`.
        The `aircraft` key stores aircraft summary metrics using a value
        of Dict[str, Gauge].

        The `stats` key stores metrics under group keys. It has a value
        of Dict[str, Dict[str, Gauge]]
        """
        self.metrics = {"aircraft": {}, "stats": {}}  # type: ignore

        # aircraft
        d = self.metrics["aircraft"]
        for (name, label, doc) in Specs["aircraft"]:  # type: ignore
            d[name] = self._create_gauge_metric(label, doc)

        # statistics
        for group, metrics_specs in Specs["stats"].items():  # type: ignore
            d = self.metrics["stats"].setdefault(group, {})
            for name, label, doc in metrics_specs:
                d[name] = self._create_gauge_metric(label, doc)

    def _create_gauge_metric(self, label, doc):
        gauge = Gauge("dump1090_{}".format(label), doc)
        self.svr.register(gauge)
        return gauge

    async def _fetch(
        self,
        resource: str,
    ) -> Dict[Any, Any]:
        """ Fetch JSON data from a web or file resource and return a dict """
        logger.debug(f"fetching {resource}")
        if resource.startswith("http"):
            try:
                async with aiohttp.ClientSession() as session:
                    async with session.get(
                        resource, timeout=self.fetch_timeout
                    ) as resp:
                        if not resp.status == 200:
                            raise Exception(f"Fetch failed {resp.status}: {resource}")
                        data = await resp.json()
            except asyncio.TimeoutError:
                raise Exception(f"Request timed out to {resource}") from None
            except aiohttp.ClientError as exc:
                raise Exception(f"Client error {exc}, {resource}") from None
        else:
            with open(resource, "rt") as f:
                data = json.loads(f.read())

        return data

    async def updater_stats(self) -> None:
        """
        This long running coroutine task is responsible for fetching current
        statistics from dump1090 and then updating internal metrics.
        """
        while True:
            start = datetime.datetime.now()
            try:
                stats = await self._fetch(self.resources.stats)
                self.process_stats(stats, time_periods=self.stats_time_periods)
            except Exception as exc:
                logger.error(f"Error fetching dump1090 stats data: {exc}")

            # wait until next collection time
            end = datetime.datetime.now()
            wait_seconds = (start + self.stats_interval - end).total_seconds()
            await asyncio.sleep(wait_seconds)

    async def updater_aircraft(self) -> None:
        """
        This long running coroutine task is responsible for fetching current
        statistics from dump1090 and then updating internal metrics.
        """
        while True:
            start = datetime.datetime.now()
            try:
                aircraft = await self._fetch(self.resources.aircraft)
                self.process_aircraft(aircraft)
            except Exception as exc:
                logger.error(f"Error fetching dump1090 aircraft data: {exc}")

            # wait until next collection time
            end = datetime.datetime.now()
            wait_seconds = (start + self.aircraft_interval - end).total_seconds()
            await asyncio.sleep(wait_seconds)

    def process_stats(
        self, stats: dict, time_periods: Sequence[str] = ("last1min",)
    ) -> None:
        """Process dump1090 statistics into exported metrics.

        :param stats: a dict containing dump1090 statistics data.
        """
        metrics = self.metrics["stats"]  # type: Dict[str, Dict[str, Gauge]]

        for time_period in time_periods:
            try:
                tp_stats = stats[time_period]
            except KeyError:
                logger.exception(f"Problem extracting time period: {time_period}")
                continue

            labels = dict(time_period=time_period)

            for key in metrics:
                d = tp_stats[key] if key else tp_stats
                for name, metric in metrics[key].items():
                    try:
                        value = d[name]
                        # 'accepted' values are in a list
                        if isinstance(value, list):
                            value = value[0]
                    except KeyError:
                        # 'signal' and 'peak_signal' are not present if
                        # there are no aircraft.
                        if name not in ["peak_signal", "signal"]:
                            key_str = " {} ".format(key) if key else " "
                            logger.warning(
                                f"Problem extracting{key_str}item '{name}' from: {d}"
                            )
                        value = math.nan
                    metric.set(labels, value)

    def process_aircraft(self, aircraft: dict, threshold: int = 15) -> None:
        """Process aircraft statistics into exported metrics.

        :param aircraft: a dict containing aircraft data.
        :param threshold: only let aircraft seen within this threshold to
          contribute to the metrics.
        """
        # Ensure aircraft dict always contains all keys, as optional
        # items are not always present.
        for entry in aircraft["aircraft"]:
            for key in AircraftKeys:
                entry.setdefault(key, None)

        messages = aircraft["messages"]

        # 'seen' shows how long ago (in seconds before "now") a message
        # was last received from an aircraft.
        # 'seen_pos' shows how long ago (in seconds before "now") the
        # position was last updated
        aircraft_observed = 0
        aircraft_with_pos = 0
        aircraft_with_mlat = 0
        aircraft_max_range = 0.0
        # Filter aircraft to only those that have been seen within the
        # last n seconds to minimise contributions from aged obsevations.
        for a in aircraft["aircraft"]:
            if a["seen"] < threshold:
                aircraft_observed += 1
            if a["seen_pos"] and a["seen_pos"] < threshold:
                aircraft_with_pos += 1
                if self.origin:
                    distance = haversine_distance(
                        self.origin, Position(a["lat"], a["lon"])
                    )
                    if distance > aircraft_max_range:
                        aircraft_max_range = distance
                if a["mlat"] and "lat" in a["mlat"]:
                    aircraft_with_mlat += 1

        # Add any current data into the 'latest' time_period bucket
        labels = dict(time_period="latest")
        d = self.metrics["aircraft"]
        d["observed"].set(labels, aircraft_observed)
        d["observed_with_pos"].set(labels, aircraft_with_pos)
        d["observed_with_mlat"].set(labels, aircraft_with_mlat)
        d["max_range"].set(labels, aircraft_max_range)
        d["messages_total"].set(labels, messages)

        logger.debug(
            f"aircraft: observed={aircraft_observed}, "
            f"with_pos={aircraft_with_pos}, with_mlat={aircraft_with_mlat}, "
            f"max_range={aircraft_max_range}, messages={messages}"
        )
        raise Exception("Ooops")
    await asyncio.sleep(duration)


async def handle_requests():
    # Start up the server to expose the metrics.
    await svr.start(port=8000)
    # Generate some requests.
    while True:
        try:
            await handle_request(random.random())
        except Exception:
            pass  # keep handling


if __name__ == "__main__":

    loop = asyncio.get_event_loop()

    svr = Service(loop=loop)
    svr.register(REQUESTS)

    try:
        loop.run_until_complete(handle_requests())
    except KeyboardInterrupt:
        pass
    finally:
        loop.run_until_complete(svr.stop())
    loop.stop()
    loop.close()
Ejemplo n.º 30
0
class ExampleApp(object):
    """
    An example application that demonstrates how ``aioprometheus`` can be
    integrated and used within a Python application built upon asyncio.

    This application attempts to simulate a long running distributed system
    process, say a socket relay or some kind of message adapter. It is
    intentionally not hosting an existing web service in the application.

    In this case the aioprometheus.Service object is used to provide a
    new HTTP endpoint that can be used to expose Prometheus metrics on.

    If this application was a web service (i.e. already had an existing web
    interface) then the aioprometheus.Service object could be used as before
    to add another web interface or a different approach could be used that
    provides a metrics handler function for use with the existing web service.
    """

    def __init__(
        self,
        metrics_host="127.0.0.1",
        metrics_port: int = 5000,
        loop: BaseEventLoop = None,
    ):

        self.metrics_host = metrics_host
        self.metrics_port = metrics_port
        self.loop = loop or asyncio.get_event_loop()
        self.timer = None  # type: asyncio.Handle

        ######################################################################
        # Create application metrics and metrics service

        # Create a metrics server. The server will create a metrics collector
        # registry if one is not specifically created and passed in.
        self.msvr = Service()

        # Define some constant labels that need to be added to all metrics
        const_labels = {
            "host": socket.gethostname(),
            "app": f"{self.__class__.__name__}-{uuid.uuid4().hex}",
        }

        # Create metrics collectors

        # Create a counter metric to track requests
        self.requests_metric = Counter(
            "requests", "Number of requests.", const_labels=const_labels
        )

        # Collectors must be registered with the registry before they
        # get exposed.
        self.msvr.register(self.requests_metric)

        # Create a gauge metrics to track memory usage.
        self.ram_metric = Gauge(
            "memory_usage_bytes", "Memory usage in bytes.", const_labels=const_labels
        )
        self.msvr.register(self.ram_metric)

        # Create a gauge metrics to track CPU.
        self.cpu_metric = Gauge(
            "cpu_usage_percent", "CPU usage percent.", const_labels=const_labels
        )
        self.msvr.register(self.cpu_metric)

        self.payload_metric = Summary(
            "request_payload_size_bytes",
            "Request payload size in bytes.",
            const_labels=const_labels,
            invariants=[(0.50, 0.05), (0.99, 0.001)],
        )
        self.msvr.register(self.payload_metric)

        self.latency_metric = Histogram(
            "request_latency_seconds",
            "Request latency in seconds",
            const_labels=const_labels,
            buckets=[0.1, 0.5, 1.0, 5.0],
        )
        self.msvr.register(self.latency_metric)

    async def start(self):
        """ Start the application """
        await self.msvr.start(addr=self.metrics_host, port=self.metrics_port)
        logger.debug("Serving prometheus metrics on: %s", self.msvr.metrics_url)

        # Schedule a timer to update internal metrics. In a realistic
        # application metrics would be updated as needed. In this example
        # application a simple timer is used to emulate things happening,
        # which conveniently allows all metrics to be updated at once.
        self.timer = self.loop.call_later(1.0, self.on_timer_expiry)

    async def stop(self):
        """ Stop the application """
        await self.msvr.stop()
        if self.timer:
            self.timer.cancel()
        self.timer = None

    def on_timer_expiry(self):
        """ Update application to simulate work """

        # Update memory metrics
        self.ram_metric.set({"type": "virtual"}, psutil.virtual_memory().used)
        self.ram_metric.set({"type": "swap"}, psutil.swap_memory().used)

        # Update cpu metrics
        for c, p in enumerate(psutil.cpu_percent(interval=1, percpu=True)):
            self.cpu_metric.set({"core": c}, p)

        # Incrementing a requests counter to emulate webserver app
        self.requests_metric.inc({"path": "/"})

        # Monitor request payload data to emulate webserver app
        self.payload_metric.add({"path": "/data"}, random.random() * 2 ** 10)

        # Monitor request latency to emulate webserver app
        self.latency_metric.add({"path": "/data"}, random.random() * 5)

        # re-schedule another metrics update
        self.timer = self.loop.call_later(1.0, self.on_timer_expiry)
Ejemplo n.º 31
0
            now = datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%S.%fZ")
            log_record["timestamp"] = now
        if log_record.get("level"):
            log_record["level"] = log_record["level"].upper()
        else:
            log_record["level"] = record.levelname


logger = logging.getLogger()
logger.setLevel(getenv("LOG_LEVEL") or 10)
logHandler = logging.StreamHandler()
formatter = CustomJsonFormatter()
logHandler.setFormatter(formatter)
logger.addHandler(logHandler)

prometheus_service = Service()
prometheus_service.registry = Registry()
prometheus_labels = {
    "host": gethostname(),
}
ping_counter = Counter("health_check_counter", "total ping requests.")
latency_metric = Histogram(
    "request_latency_seconds",
    "request latency in seconds.",
    const_labels=prometheus_labels,
    buckets=[0.1, 0.5, 1.0, 5.0],
)
ram_metric = Gauge("memory_usage_bytes",
                   "memory usage in bytes.",
                   const_labels=prometheus_labels)
cpu_metric = Gauge("cpu_usage_percent",
Ejemplo n.º 32
0
    def __init__(
        self,
        metrics_host="127.0.0.1",
        metrics_port: int = 5000,
        loop: BaseEventLoop = None,
    ):

        self.metrics_host = metrics_host
        self.metrics_port = metrics_port
        self.loop = loop or asyncio.get_event_loop()
        self.timer = None  # type: asyncio.Handle

        ######################################################################
        # Create application metrics and metrics service

        # Create a metrics server. The server will create a metrics collector
        # registry if one is not specifically created and passed in.
        self.msvr = Service()

        # Define some constant labels that need to be added to all metrics
        const_labels = {
            "host": socket.gethostname(),
            "app": f"{self.__class__.__name__}-{uuid.uuid4().hex}",
        }

        # Create metrics collectors

        # Create a counter metric to track requests
        self.requests_metric = Counter(
            "requests", "Number of requests.", const_labels=const_labels
        )

        # Collectors must be registered with the registry before they
        # get exposed.
        self.msvr.register(self.requests_metric)

        # Create a gauge metrics to track memory usage.
        self.ram_metric = Gauge(
            "memory_usage_bytes", "Memory usage in bytes.", const_labels=const_labels
        )
        self.msvr.register(self.ram_metric)

        # Create a gauge metrics to track CPU.
        self.cpu_metric = Gauge(
            "cpu_usage_percent", "CPU usage percent.", const_labels=const_labels
        )
        self.msvr.register(self.cpu_metric)

        self.payload_metric = Summary(
            "request_payload_size_bytes",
            "Request payload size in bytes.",
            const_labels=const_labels,
            invariants=[(0.50, 0.05), (0.99, 0.001)],
        )
        self.msvr.register(self.payload_metric)

        self.latency_metric = Histogram(
            "request_latency_seconds",
            "Request latency in seconds",
            const_labels=const_labels,
            buckets=[0.1, 0.5, 1.0, 5.0],
        )
        self.msvr.register(self.latency_metric)
Ejemplo n.º 33
0
class PrometheusMonitoring(Plugin):
    """
    The :py:class:`~.PrometheusMonitoring`
    implements an interface to monitor the state of the Drones using Prometheus.
    """
    def __init__(self):
        config = Configuration().Plugins.PrometheusMonitoring

        self._port = config.port
        self._addr = config.addr

        self._svr_started = False
        self._drones = {}

        self._svr = Service()

        self._gauges = {
            ResourceStatus.Booting: Gauge("booting", "Booting drones"),
            ResourceStatus.Running: Gauge("running", "Running drones"),
            ResourceStatus.Stopped: Gauge("stopped", "Stopped drones"),
            ResourceStatus.Deleted: Gauge("deleted", "Deleted drones"),
            ResourceStatus.Error: Gauge("error", "Drones in error state"),
        }

        for gauge in self._gauges.values():
            self._svr.register(gauge)
            gauge.set({}, 0)

    async def start(self):
        await self._svr.start(addr=self._addr, port=self._port)
        logger.debug(f"Serving Prometheus metrics on {self._svr.metrics_url}")
        self._svr_started = True

    async def notify(self, state: State,
                     resource_attributes: AttributeDict) -> None:
        """
        Update Prometheus metrics at every state change

        :param state: New state of the Drone
        :type state: State
        :param resource_attributes: Contains all meta-data of the Drone (created and
            updated timestamps, dns name, unique id, site_name, machine_type, etc.)
        :type resource_attributes: AttributeDict
        :return: None
        """
        if not self._svr_started:
            await self.start()

        logger.debug(
            f"Drone: {str(resource_attributes)} has changed state to {state}")

        if resource_attributes.drone_uuid in self._drones:
            old_status = self._drones[resource_attributes.drone_uuid]
            self._gauges[old_status].dec({})

        new_status = resource_attributes.resource_status
        self._drones[resource_attributes.drone_uuid] = new_status

        self._gauges[new_status].inc({})

        if new_status == ResourceStatus.Deleted:
            self._drones.pop(resource_attributes.drone_uuid, None)
Ejemplo n.º 34
0
class Prometheus:
    def __init__(self, bot):
        self.bot = bot

        self.msvr = Service()

        if platform.system() == "Linux":
            self.platform = platform
            self.pid = os.path.join("/proc", "self")
            self.pagesize = resource.getpagesize()
            self.ticks = os.sysconf("SC_CLK_TCK")
            self.btime = 0

            with open(os.path.join("/proc", "stat"), "rb") as stat:
                for line in stat:
                    if line.startswith(b"btime "):
                        self.btime = float(line.split()[1])
                        break

        self.vmem = Gauge("process_virtual_memory_bytes", "Virtual memory size in bytes.")
        self.rss = Gauge("process_resident_memory_bytes", "Resident memory size in bytes.")
        self.start_time = Gauge("process_start_time_seconds", "Start time of the process since unix epoch in seconds.")
        self.cpu = Counter("process_cpu_seconds", "Total user and system CPU time spent in seconds.")
        self.fds = Gauge("process_open_fds", "Number of open file descriptors.")

        self.info = Gauge("python_info", "Python platform information.")
        self.collected = Counter("python_gc_objects_collected", "Objects collected during GC.")
        self.uncollectable = Counter("python_gc_objects_uncollectable", "Uncollectable objects found during GC.")
        self.collections = Counter("python_gc_collections", "Number of times this generation was collected.")

        self.latency = Gauge("modmail_latency", "The average latency for shards on this cluster")
        self.events = Counter("modmail_discord_events", "The total number of processed events.")
        self.dispatch = Counter("modmail_dispatch_events", "The total number of dispatched events.")
        self.http = Counter("modmail_http_requests", "The number of http requests sent to Discord.")

        self.guilds_join = Counter("modmail_guilds_join", "The number of guilds ModMail is added to.")
        self.guilds_leave = Counter("modmail_guilds_leave", "The number of guilds ModMail is removed from.")

        self.shards = Gauge("modmail_shards", "The total number of shards on this cluster.")
        self.guilds = Gauge("modmail_guilds", "The total number of guilds on this cluster.")
        self.users = Gauge("modmail_users", "The total number of users on this cluster.")

        self.commands = Counter("modmail_commands", "The total number of commands used on the bot.")
        self.tickets = Counter("modmail_tickets", "The total number of tickets created by the bot.")
        self.tickets_message = Counter("modmail_tickets_message", "The total number of messages sent in tickets.")

    async def start(self):
        for name, value in vars(self).items():
            if issubclass(type(value), Collector):
                self.msvr.register(getattr(self, name))
        await self.msvr.start(addr="127.0.0.1", port=6000 + self.bot.cluster)
        self.msvr._runner._server._kwargs["access_log"] = None
        self.bot.loop.create_task(self.update_bot_stats())

        if platform.system() == "Linux":
            self.bot.loop.create_task(self.update_process_stats())
            self.bot.loop.create_task(self.update_platform_stats())

    async def update_bot_stats(self):
        while True:
            await self.bot.wait_until_ready()
            await asyncio.sleep(60)
            self.shards.set({}, len(self.bot.shards))
            self.guilds.set({}, len(self.bot.guilds))
            self.users.set({}, len(self.bot.users))
            self.latency.set({}, self.bot.latency)
            await asyncio.sleep(10)

    async def update_process_stats(self):
        while True:
            with open(os.path.join(self.pid, "stat"), "rb") as stat:
                parts = stat.read().split(b")")[-1].split()
            self.vmem.set({}, float(parts[20]))
            self.rss.set({}, float(parts[21]) * self.pagesize)
            self.start_time.set({}, float(parts[19]) / self.ticks + self.btime)
            self.cpu.set({}, float(parts[11]) / self.ticks + float(parts[12]) / self.ticks)
            self.fds.set({}, len(os.listdir(os.path.join(self.pid, "fd"))))
            await asyncio.sleep(5)

    async def update_platform_stats(self):
        while True:
            self.info.set(
                {
                    "version": self.platform.python_version(),
                    "implementation": self.platform.python_implementation(),
                    "major": platform.python_version_tuple()[0],
                    "minor": platform.python_version_tuple()[1],
                    "patchlevel": platform.python_version_tuple()[2],
                },
                1,
            )
            for gen, stat in enumerate(gc.get_stats()):
                self.collected.set({"generation": str(gen)}, stat["collected"])
                self.uncollectable.set({"generation": str(gen)}, stat["uncollectable"])
                self.collections.set({"generation": str(gen)}, stat["collections"])
            await asyncio.sleep(5)
Ejemplo n.º 35
0
class TestTextExporter(asynctest.TestCase):
    async def setUp(self):
        self.registry = Registry()
        self.server = Service(registry=self.registry)
        await self.server.start(addr="127.0.0.1")
        self.metrics_url = self.server.metrics_url
        self.root_url = self.server.root_url

    async def tearDown(self):
        await self.server.stop()

    async def test_invalid_registry(self):
        """ check only valid registry can be provided """
        for invalid_registry in ["nope", dict(), list()]:
            with self.assertRaises(Exception) as cm:
                Service(registry=invalid_registry)
            self.assertIn("registry must be a Registry, got:", str(cm.exception))

        Service(registry=Registry())

    def test_fetch_url_before_starting_server(self):
        """ check accessing a URL property raises expection if not available """
        s = Service()

        with self.assertRaises(Exception) as cm:
            _ = s.root_url
        self.assertIn(
            "No URL available, Prometheus metrics server is not running",
            str(cm.exception),
        )

        with self.assertRaises(Exception) as cm:
            _ = s.metrics_url
        self.assertIn(
            "No URL available, Prometheus metrics server is not running",
            str(cm.exception),
        )

    def test_register_deregister(self):
        """ check registering and deregistering metrics """
        c = Counter("test_counter", "Test Counter.", {"test": "test_counter"})
        self.server.register(c)

        # Check registering a collector with same name raises an exception
        c2 = Counter("test_counter", "Another Test Counter.")
        with self.assertRaises(ValueError) as cm:
            self.server.register(c2)
        self.assertIn("is already registered", str(cm.exception))

        self.server.deregister("test_counter")

        # Check deregistering a non-existant collector raises an exception
        with self.assertRaises(KeyError) as cm:
            self.server.deregister("test_counter")

    async def test_start_started_server(self):
        """ check starting a started server """

        with unittest.mock.patch.object(
            aioprometheus.service.logger, "warning"
        ) as mock_warn:
            await self.server.start(addr="127.0.0.1")
            self.assertEqual(mock_warn.call_count, 1)
            mock_warn.assert_called_once_with(
                "Prometheus metrics server is already running"
            )

    async def test_stop_stopped_server(self):
        """ check stopping a stopped server """

        s = Service(registry=self.registry)
        await s.start(addr="127.0.0.1")
        await s.stop()

        with unittest.mock.patch.object(
            aioprometheus.service.logger, "warning"
        ) as mock_warn:
            await s.stop()
            self.assertEqual(mock_warn.call_count, 1)
            mock_warn.assert_called_once_with(
                "Prometheus metrics server is already stopped"
            )

    async def test_counter(self):
        """ check counter metric export """

        # Add some metrics
        data = (
            ({"data": 1}, 100),
            ({"data": "2"}, 200),
            ({"data": 3}, 300),
            ({"data": 1}, 400),
        )
        c = Counter("test_counter", "Test Counter.", {"test": "test_counter"})
        self.server.register(c)

        for i in data:
            c.set(i[0], i[1])

        expected_data = """# HELP test_counter Test Counter.
# TYPE test_counter counter
test_counter{data="1",test="test_counter"} 400
test_counter{data="2",test="test_counter"} 200
test_counter{data="3",test="test_counter"} 300
"""

        async with aiohttp.ClientSession() as session:

            # Fetch as text
            async with session.get(
                self.metrics_url, headers={ACCEPT: TEXT_CONTENT_TYPE}
            ) as resp:
                self.assertEqual(resp.status, 200)
                content = await resp.read()
                self.assertEqual(TEXT_CONTENT_TYPE, resp.headers.get(CONTENT_TYPE))
                self.assertEqual(expected_data, content.decode())

            # Fetch as binary
            async with session.get(
                self.metrics_url, headers={ACCEPT: BINARY_CONTENT_TYPE}
            ) as resp:
                self.assertEqual(resp.status, 200)
                content = await resp.read()
                self.assertEqual(BINARY_CONTENT_TYPE, resp.headers.get(CONTENT_TYPE))
                metrics = pmp.decode(content)
                self.assertEqual(len(metrics), 1)
                mf = metrics[0]
                self.assertIsInstance(mf, pmp.MetricFamily)
                self.assertEqual(mf.type, pmp.COUNTER)
                self.assertEqual(len(mf.metric), 3)

    async def test_gauge(self):
        """ check gauge metric export """

        # Add some metrics
        data = (
            ({"data": 1}, 100),
            ({"data": "2"}, 200),
            ({"data": 3}, 300),
            ({"data": 1}, 400),
        )
        g = Gauge("test_gauge", "Test Gauge.", {"test": "test_gauge"})
        self.server.register(g)

        for i in data:
            g.set(i[0], i[1])

        expected_data = """# HELP test_gauge Test Gauge.
# TYPE test_gauge gauge
test_gauge{data="1",test="test_gauge"} 400
test_gauge{data="2",test="test_gauge"} 200
test_gauge{data="3",test="test_gauge"} 300
"""

        async with aiohttp.ClientSession() as session:

            # Fetch as text
            async with session.get(
                self.metrics_url, headers={ACCEPT: TEXT_CONTENT_TYPE}
            ) as resp:
                self.assertEqual(resp.status, 200)
                content = await resp.read()
                self.assertEqual(TEXT_CONTENT_TYPE, resp.headers.get(CONTENT_TYPE))
                self.assertEqual(expected_data, content.decode())

            # Fetch as binary
            async with session.get(
                self.metrics_url, headers={ACCEPT: BINARY_CONTENT_TYPE}
            ) as resp:
                self.assertEqual(resp.status, 200)
                content = await resp.read()
                self.assertEqual(BINARY_CONTENT_TYPE, resp.headers.get(CONTENT_TYPE))
                metrics = pmp.decode(content)
                self.assertEqual(len(metrics), 1)
                mf = metrics[0]
                self.assertIsInstance(mf, pmp.MetricFamily)
                self.assertEqual(mf.type, pmp.GAUGE)
                self.assertEqual(len(mf.metric), 3)

    async def test_summary(self):
        """ check summary metric export """

        # Add some metrics
        data = [3, 5.2, 13, 4]
        label = {"data": 1}

        s = Summary("test_summary", "Test Summary.", {"test": "test_summary"})
        self.server.register(s)

        for i in data:
            s.add(label, i)

        expected_data = """# HELP test_summary Test Summary.
# TYPE test_summary summary
test_summary{data="1",quantile="0.5",test="test_summary"} 4.0
test_summary{data="1",quantile="0.9",test="test_summary"} 5.2
test_summary{data="1",quantile="0.99",test="test_summary"} 5.2
test_summary_count{data="1",test="test_summary"} 4
test_summary_sum{data="1",test="test_summary"} 25.2
"""

        async with aiohttp.ClientSession() as session:

            # Fetch as text
            async with session.get(
                self.metrics_url, headers={ACCEPT: TEXT_CONTENT_TYPE}
            ) as resp:
                self.assertEqual(resp.status, 200)
                content = await resp.read()
                self.assertEqual(TEXT_CONTENT_TYPE, resp.headers.get(CONTENT_TYPE))
                self.assertEqual(expected_data, content.decode())

            # Fetch as binary
            async with session.get(
                self.metrics_url, headers={ACCEPT: BINARY_CONTENT_TYPE}
            ) as resp:
                self.assertEqual(resp.status, 200)
                content = await resp.read()
                self.assertEqual(BINARY_CONTENT_TYPE, resp.headers.get(CONTENT_TYPE))
                metrics = pmp.decode(content)
                self.assertEqual(len(metrics), 1)
                mf = metrics[0]
                self.assertIsInstance(mf, pmp.MetricFamily)
                self.assertEqual(mf.type, pmp.SUMMARY)
                self.assertEqual(len(mf.metric), 1)
                self.assertEqual(len(mf.metric[0].summary.quantile), 3)

    async def test_histogram(self):
        """ check histogram metric export """

        # Add some metrics
        data = [3, 5.2, 13, 4]
        label = {"data": 1}

        h = Histogram(
            "histogram_test",
            "Test Histogram.",
            {"type": "test_histogram"},
            buckets=[5.0, 10.0, 15.0],
        )
        self.server.register(h)

        for i in data:
            h.add(label, i)

        expected_data = """# HELP histogram_test Test Histogram.
# TYPE histogram_test histogram
histogram_test_bucket{data="1",le="5.0",type="test_histogram"} 2.0
histogram_test_bucket{data="1",le="10.0",type="test_histogram"} 3.0
histogram_test_bucket{data="1",le="15.0",type="test_histogram"} 4.0
histogram_test_bucket{data="1",le="+Inf",type="test_histogram"} 4.0
histogram_test_count{data="1",type="test_histogram"} 4.0
histogram_test_sum{data="1",type="test_histogram"} 25.2
"""

        async with aiohttp.ClientSession() as session:

            # Fetch as text
            async with session.get(
                self.metrics_url, headers={ACCEPT: TEXT_CONTENT_TYPE}
            ) as resp:
                self.assertEqual(resp.status, 200)
                content = await resp.read()
                self.assertEqual(TEXT_CONTENT_TYPE, resp.headers.get(CONTENT_TYPE))
                self.assertEqual(expected_data, content.decode())

            # Fetch as binary
            async with session.get(
                self.metrics_url, headers={ACCEPT: BINARY_CONTENT_TYPE}
            ) as resp:
                self.assertEqual(resp.status, 200)
                content = await resp.read()
                self.assertEqual(BINARY_CONTENT_TYPE, resp.headers.get(CONTENT_TYPE))
                metrics = pmp.decode(content)
                self.assertEqual(len(metrics), 1)
                mf = metrics[0]
                self.assertIsInstance(mf, pmp.MetricFamily)
                self.assertEqual(mf.type, pmp.HISTOGRAM)
                self.assertEqual(len(mf.metric), 1)
                self.assertEqual(len(mf.metric[0].histogram.bucket), 4)

    async def test_all(self):

        counter_data = (
            ({"c_sample": "1"}, 100),
            ({"c_sample": "2"}, 200),
            ({"c_sample": "3"}, 300),
            ({"c_sample": "1", "c_subsample": "b"}, 400),
        )

        gauge_data = (
            ({"g_sample": "1"}, 500),
            ({"g_sample": "2"}, 600),
            ({"g_sample": "3"}, 700),
            ({"g_sample": "1", "g_subsample": "b"}, 800),
        )

        summary_data = (
            ({"s_sample": "1"}, range(1000, 2000, 4)),
            ({"s_sample": "2"}, range(2000, 3000, 20)),
            ({"s_sample": "3"}, range(3000, 4000, 13)),
            ({"s_sample": "1", "s_subsample": "b"}, range(4000, 5000, 47)),
        )

        histogram_data = (
            ({"h_sample": "1"}, [3, 14]),
            ({"h_sample": "2"}, range(1, 20, 2)),
            ({"h_sample": "3"}, range(1, 20, 2)),
            ({"h_sample": "1", "h_subsample": "b"}, range(1, 20, 2)),
        )

        counter = Counter("counter_test", "A counter.", {"type": "counter"})
        gauge = Gauge("gauge_test", "A gauge.", {"type": "gauge"})
        summary = Summary("summary_test", "A summary.", {"type": "summary"})
        histogram = Histogram(
            "histogram_test",
            "A histogram.",
            {"type": "histogram"},
            buckets=[5.0, 10.0, 15.0],
        )

        self.server.register(counter)
        self.server.register(gauge)
        self.server.register(summary)
        self.server.register(histogram)

        # Add data
        [counter.set(c[0], c[1]) for c in counter_data]
        [gauge.set(g[0], g[1]) for g in gauge_data]
        [summary.add(i[0], s) for i in summary_data for s in i[1]]
        [histogram.observe(i[0], h) for i in histogram_data for h in i[1]]

        expected_data = """# HELP counter_test A counter.
# TYPE counter_test counter
counter_test{c_sample="1",type="counter"} 100
counter_test{c_sample="2",type="counter"} 200
counter_test{c_sample="3",type="counter"} 300
counter_test{c_sample="1",c_subsample="b",type="counter"} 400
# HELP gauge_test A gauge.
# TYPE gauge_test gauge
gauge_test{g_sample="1",type="gauge"} 500
gauge_test{g_sample="2",type="gauge"} 600
gauge_test{g_sample="3",type="gauge"} 700
gauge_test{g_sample="1",g_subsample="b",type="gauge"} 800
# HELP histogram_test A histogram.
# TYPE histogram_test histogram
histogram_test_bucket{h_sample="1",le="5.0",type="histogram"} 1.0
histogram_test_bucket{h_sample="1",le="10.0",type="histogram"} 1.0
histogram_test_bucket{h_sample="1",le="15.0",type="histogram"} 2.0
histogram_test_bucket{h_sample="1",le="+Inf",type="histogram"} 2.0
histogram_test_count{h_sample="1",type="histogram"} 2.0
histogram_test_sum{h_sample="1",type="histogram"} 17.0
histogram_test_bucket{h_sample="2",le="5.0",type="histogram"} 3.0
histogram_test_bucket{h_sample="2",le="10.0",type="histogram"} 5.0
histogram_test_bucket{h_sample="2",le="15.0",type="histogram"} 8.0
histogram_test_bucket{h_sample="2",le="+Inf",type="histogram"} 10.0
histogram_test_count{h_sample="2",type="histogram"} 10.0
histogram_test_sum{h_sample="2",type="histogram"} 100.0
histogram_test_bucket{h_sample="3",le="5.0",type="histogram"} 3.0
histogram_test_bucket{h_sample="3",le="10.0",type="histogram"} 5.0
histogram_test_bucket{h_sample="3",le="15.0",type="histogram"} 8.0
histogram_test_bucket{h_sample="3",le="+Inf",type="histogram"} 10.0
histogram_test_count{h_sample="3",type="histogram"} 10.0
histogram_test_sum{h_sample="3",type="histogram"} 100.0
histogram_test_bucket{h_sample="1",h_subsample="b",le="5.0",type="histogram"} 3.0
histogram_test_bucket{h_sample="1",h_subsample="b",le="10.0",type="histogram"} 5.0
histogram_test_bucket{h_sample="1",h_subsample="b",le="15.0",type="histogram"} 8.0
histogram_test_bucket{h_sample="1",h_subsample="b",le="+Inf",type="histogram"} 10.0
histogram_test_count{h_sample="1",h_subsample="b",type="histogram"} 10.0
histogram_test_sum{h_sample="1",h_subsample="b",type="histogram"} 100.0
# HELP summary_test A summary.
# TYPE summary_test summary
summary_test{quantile="0.5",s_sample="1",type="summary"} 1272.0
summary_test{quantile="0.9",s_sample="1",type="summary"} 1452.0
summary_test{quantile="0.99",s_sample="1",type="summary"} 1496.0
summary_test_count{s_sample="1",type="summary"} 250
summary_test_sum{s_sample="1",type="summary"} 374500.0
summary_test{quantile="0.5",s_sample="2",type="summary"} 2260.0
summary_test{quantile="0.9",s_sample="2",type="summary"} 2440.0
summary_test{quantile="0.99",s_sample="2",type="summary"} 2500.0
summary_test_count{s_sample="2",type="summary"} 50
summary_test_sum{s_sample="2",type="summary"} 124500.0
summary_test{quantile="0.5",s_sample="3",type="summary"} 3260.0
summary_test{quantile="0.9",s_sample="3",type="summary"} 3442.0
summary_test{quantile="0.99",s_sample="3",type="summary"} 3494.0
summary_test_count{s_sample="3",type="summary"} 77
summary_test_sum{s_sample="3",type="summary"} 269038.0
summary_test{quantile="0.5",s_sample="1",s_subsample="b",type="summary"} 4235.0
summary_test{quantile="0.9",s_sample="1",s_subsample="b",type="summary"} 4470.0
summary_test{quantile="0.99",s_sample="1",s_subsample="b",type="summary"} 4517.0
summary_test_count{s_sample="1",s_subsample="b",type="summary"} 22
summary_test_sum{s_sample="1",s_subsample="b",type="summary"} 98857.0
"""

        async with aiohttp.ClientSession() as session:

            # Fetch as text
            async with session.get(
                self.metrics_url, headers={ACCEPT: TEXT_CONTENT_TYPE}
            ) as resp:
                self.assertEqual(resp.status, 200)
                content = await resp.read()
                self.assertEqual(TEXT_CONTENT_TYPE, resp.headers.get(CONTENT_TYPE))
                self.assertEqual(expected_data, content.decode())

            # Fetch as binary
            async with session.get(
                self.metrics_url, headers={ACCEPT: BINARY_CONTENT_TYPE}
            ) as resp:
                self.assertEqual(resp.status, 200)
                content = await resp.read()
                self.assertEqual(BINARY_CONTENT_TYPE, resp.headers.get(CONTENT_TYPE))
                metrics = pmp.decode(content)
                self.assertEqual(len(metrics), 4)
                for mf in metrics:
                    self.assertIsInstance(mf, pmp.MetricFamily)
                    if mf.type == pmp.COUNTER:
                        self.assertEqual(len(mf.metric), 4)
                    elif mf.type == pmp.GAUGE:
                        self.assertEqual(len(mf.metric), 4)
                    elif mf.type == pmp.SUMMARY:
                        self.assertEqual(len(mf.metric), 4)
                        self.assertEqual(len(mf.metric[0].summary.quantile), 3)
                    elif mf.type == pmp.HISTOGRAM:
                        self.assertEqual(len(mf.metric), 4)
                        self.assertEqual(len(mf.metric[0].histogram.bucket), 4)

    async def test_no_accept_header(self):
        """ check default format is used when no accept header is defined """

        # Add some metrics
        data = (({"data": 1}, 100),)
        c = Counter("test_counter", "Test Counter.", {"test": "test_counter"})
        self.server.register(c)

        for i in data:
            c.set(i[0], i[1])

        expected_data = """# HELP test_counter Test Counter.
# TYPE test_counter counter
test_counter{data="1",test="test_counter"} 100
"""

        async with aiohttp.ClientSession() as session:

            # Fetch without explicit accept type
            async with session.get(self.metrics_url) as resp:
                self.assertEqual(resp.status, 200)
                content = await resp.read()
                self.assertEqual(TEXT_CONTENT_TYPE, resp.headers.get(CONTENT_TYPE))
                self.assertEqual(expected_data, content.decode())

            # TODO: Add another test here that includes the ACCEPT header
            # but with no value set. I have not worked out how to do this
            # yet as aiohttp expects headers to be a dict and a value of None
            # is not permitted.

    async def test_root_route(self):
        """ check root route returns content """
        async with aiohttp.ClientSession() as session:
            async with session.get(self.root_url) as resp:
                self.assertEqual(resp.status, 200)
                self.assertIn("text/html", resp.headers.get(CONTENT_TYPE))

    async def test_robots_route(self):
        """ check robots route returns content """
        async with aiohttp.ClientSession() as session:
            async with session.get(f"{self.root_url}robots.txt") as resp:
                self.assertEqual(resp.status, 200)
                self.assertIn("text/plain", resp.headers.get(CONTENT_TYPE))