Esempio n. 1
0
    def __init__(self):
        self.registry = prometheus_client.CollectorRegistry()

        self._video_processed = prometheus_client.Counter(
            "video_precessed",
            "Video processed count",
            labelnames=(),
            namespace="youtube",
            subsystem="video",
            unit="",
            registry=self.registry,
            labelvalues=None,
        )
        self._emails_found = prometheus_client.Counter(
            "emails_found",
            "Emails found",
            labelnames=(),
            namespace="youtube",
            subsystem="video",
            unit="",
            registry=self.registry,
            labelvalues=None,
        )
        self._unique_emails_found = prometheus_client.Counter(
            "unique_emails_found",
            "Unique emails found",
            labelnames=(),
            namespace="youtube",
            subsystem="video",
            unit="",
            registry=self.registry,
            labelvalues=None,
        )
def ExportToDjangoView(request):
    """Exports /metrics as a Django view.

    You can use django_prometheus.urls to map /metrics to this view.
    """
    if "prometheus_multiproc_dir" in os.environ:
        registry = prometheus_client.CollectorRegistry()
        multiprocess.MultiProcessCollector(registry)
    else:
        registry = prometheus_client.REGISTRY
    metrics_page = prometheus_client.generate_latest(registry)
    expected_username = getattr(settings, "DJANGO_PROMETHEUS_AUTHORIZATION_USERNAME", None)
    expected_password = getattr(settings, "DJANGO_PROMETHEUS_AUTHORIZATION_PASSWORD", None)
    if expected_password is not None and expected_username is not None:
        auth_header = request.META.get("HTTP_AUTHORIZATION", "")
        token_type, _, credentials = auth_header.partition(" ")
        if credentials == '':
            return HttpResponse("", status=400)

        received_auth_string = base64.b64decode(credentials).decode()
        if ':' not in received_auth_string:
            return HttpResponse("", status=400)

        received_username = received_auth_string.split(':')[0]
        received_password = received_auth_string.split(':')[1]

        valid_username = received_username == expected_username
        valid_password = received_password == expected_password

        if token_type != 'Basic' or not valid_username or not valid_password:
            return HttpResponse("", status=401)

    return HttpResponse(
        metrics_page, content_type=prometheus_client.CONTENT_TYPE_LATEST
    )
Esempio n. 3
0
    def run(self):
        with ExitStack() as es:
            self.stopped.clear()
            es.callback(self.stopped.set)

            self.logger.info("connecting consumer to rabbitmq server at %s:%d as %s.",
                self.connection_parameters.host,
                self.connection_parameters.port,
                self.connection_parameters.credentials.username,
            )
            self.connection = es.enter_context(pika.BlockingConnection(self.connection_parameters))
            self.channel = es.enter_context(self.connection.channel())
            self.connected.set()

            self.logger.info("setting up queues and exchanges.")
            declare_exchange_with_queue(self.channel, self.src_queue)
            declare_exchange_with_queue(self.channel, self.dst_exchange_beehive)
            self.channel.exchange_declare(self.dst_exchange_node, exchange_type="topic", durable=True)

            # register and run fresh set of metrics and metrics server
            self.logger.info("starting metric server on %s:%d.", self.metrics_host, self.metrics_port)
            registry = prometheus_client.CollectorRegistry()
            self.messages_total = Counter("wes_data_service_messages_total", "Total number of messages handled.", registry=registry)
            self.messages_rejected_total = Counter("wes_data_service_messages_rejected_total", "Total number of invalid messages.", registry=registry)
            self.messages_published_node_total = Counter("wes_data_service_messages_published_node_total", "Total number of messages published to node.", registry=registry)
            self.messages_published_beehive_total = Counter("wes_data_service_messages_published_beehive_total", "Total number of messages published to beehive.", registry=registry)
            metrics_server = MetricServer(self.metrics_host, self.metrics_port, registry)
            threading.Thread(target=metrics_server.run, daemon=True).start()
            es.callback(metrics_server.shutdown)

            self.logger.info("starting consumer on %s.", self.src_queue)
            self.channel.basic_consume(self.src_queue, self.on_message_callback, auto_ack=False)
            self.channel.start_consuming()
Esempio n. 4
0
def app_context(request_settings, init_db=True, clean_db=True, drop_db=False):
    try:
        os.environ.pop("FLASK_APP_CONFIG_FILE", None)
        conn_param = lm_db.db_connection_params(request_settings)
        if init_db:
            lm_db.create_db(conn_params=conn_param)
            logger.debug("DB created (conn params=%r)", conn_param)
        flask_app = create_app(env="testing",
                               settings=request_settings,
                               init_app=False)
        flask_app.before_request(process_auto_login)
        with flask_app.app_context() as ctx:
            logger.info("Starting application")
            initialize_app(flask_app,
                           ctx,
                           prom_registry=prometheus_client.CollectorRegistry())
            if init_db:
                logger.debug("Initializing DB...")
                lm_db.db.create_all()
                logger.debug("DB initialized!")
            yield ctx
        # clean the database and
        # close all sessions and connections
        with flask_app.app_context() as ctx:
            if clean_db:
                # _clean_db()
                logger.debug("DB cleanup")
            if drop_db:
                lm_db.db.close_all_sessions()
                lm_db.db.engine.pool.dispose()
                lm_db.drop_db(conn_param)
                logger.debug("DB deleted (connection params=%r)", conn_param)
    except Exception as e:
        logger.exception(e)
        raise RuntimeError(e)
def app():
    """ create a test app with various endpoints for the test scenarios """
    app = web.Application()
    routes = web.RouteTableDef()

    registry = prometheus_client.CollectorRegistry()

    app.middlewares.append(prometheus_middleware_factory(registry=registry))
    app.router.add_get("/metrics", metrics(registry=registry))

    @routes.get("/200")
    async def response_200(_):
        return json_response({"message": "Hello World"})

    @routes.get("/exception")
    async def response_exception(_):
        raise ValueError("Error")

    @routes.get("/path/{value}")
    async def response_detail(request):
        return json_response({"message": f"Hello {request.match_info['value']}"})

    app.router.add_routes(routes)

    #
    # @app.route("/500")
    # async def error(request):
    #     raise HTTPException(status_code=500, detail="this is a test error")
    #
    # @app.route("/unhandled")
    # async def unhandled(request):
    #     test_dict = {"yup": 123}
    #     return JSONResponse({"message": test_dict["value_error"]})

    yield app
Esempio n. 6
0
def worker_init(**kwargs):
    servicer.init(WorkerParams.service_name, ThreadContextHolder())
    if WorkerParams.prometheus_port is not None:
        registry = prometheus_client.CollectorRegistry()
        prometheus_client.multiprocess.MultiProcessCollector(registry)
        prometheus_client.start_http_server(WorkerParams.prometheus_port,
                                            registry=registry)
Esempio n. 7
0
    def test_record_call_latency_async(self):
        definitions = [
            MetricDefinition('Histogram', 'histo', 'An histogram',
                             ['foo', 'bar'])
        ]
        prometheus_metrics = PrometheusMetrics(
            definitions=definitions,
            registry=prometheus_client.CollectorRegistry())
        label_call_args = []

        def get_labels(*args, **kwargs):
            label_call_args.append((args, kwargs))
            return {'foo': 'FOO', 'bar': 'BAR'}

        @prometheus_metrics.record_call_latency('histo', get_labels=get_labels)
        @inlineCallbacks
        def func(param1, param2=None):
            yield
            returnValue(param1)

        obj = object()
        result = yield func(obj, param2='baz')
        self.assertIs(result, obj)
        # the get_labels function is called with the same args as the function
        self.assertEqual(label_call_args, [((obj, ), {'param2': 'baz'})])
        self.assertIn('histo_count{bar="BAR",foo="FOO"} 1.0',
                      prometheus_metrics.generate_latest().decode('ascii'))
Esempio n. 8
0
 def test_metrics(self):
     prometheus_metrics = create_metrics(
         self.metrics_definitions,
         registry=prometheus_client.CollectorRegistry())
     self.assertIsInstance(prometheus_metrics, PrometheusMetrics)
     self.assertCountEqual(prometheus_metrics.available_metrics,
                           ['sample_counter', 'sample_histogram'])
Esempio n. 9
0
    def _write_response(self, writer):
        registry = prometheus_client.CollectorRegistry()

        status_gauge = prometheus_client.Gauge(
            "ldapsync_up",
            "Connectivity to LDAP server",
            registry=registry,
        )

        try:
            ctr = self.read_counter()
        except Exception as exc:
            status_gauge.set(0)
        else:
            status_gauge.set(1)

            delay_gauge = prometheus_client.Gauge(
                "ldapsync_delay_seconds",
                "Delay of LDAP replication in seconds",
                registry=registry,
            )

            now = datetime.utcnow().timestamp()
            delay_gauge.set(now - ctr)

        writer.write(b"HTTP/1.0 200 OK\r\n")
        writer.write("Content-Type: {}\r\n".format(
            prometheus_client.exposition.CONTENT_TYPE_LATEST).encode("utf-8"))
        writer.write(b"\r\n")
        writer.write(prometheus_client.exposition.generate_latest(registry))
Esempio n. 10
0
    def test_record_call_latency_sync(self):
        definitions = [
            MetricDefinition("Histogram", "histo", "An histogram",
                             ["foo", "bar"])
        ]
        prometheus_metrics = PrometheusMetrics(
            definitions=definitions,
            registry=prometheus_client.CollectorRegistry(),
        )
        label_call_args = []

        def get_labels(*args, **kwargs):
            label_call_args.append((args, kwargs))
            return {"foo": "FOO", "bar": "BAR"}

        @prometheus_metrics.record_call_latency("histo", get_labels=get_labels)
        def func(param1, param2=None):
            return param1

        obj = object()
        result = func(obj, param2="baz")
        self.assertIs(result, obj)
        # the get_labels function is called with the same args as the function
        self.assertEqual(label_call_args, [((obj, ), {"param2": "baz"})])
        self.assertIn(
            'histo_count{bar="BAR",foo="FOO"} 1.0',
            prometheus_metrics.generate_latest().decode("ascii"),
        )
Esempio n. 11
0
    def test_track_tftp_latency(self):
        class Thing:
            did_something = False

            def do_something(self):
                self.did_something = True
                return True

        thing = Thing()
        start_time = time.time()
        prometheus_metrics = create_metrics(
            METRICS_DEFINITIONS,
            registry=prometheus_client.CollectorRegistry())
        thing.do_something = track_tftp_latency(
            thing.do_something,
            start_time=start_time,
            filename='myfile.txt',
            prometheus_metrics=prometheus_metrics)
        time_mock = self.patch(tftp_module, 'time')
        time_mock.return_value = start_time + 0.5
        result = thing.do_something()
        self.assertTrue(result)
        self.assertTrue(thing.did_something)

        metrics = prometheus_metrics.generate_latest().decode('ascii')
        self.assertIn(
            'maas_tftp_file_transfer_latency_count{filename="myfile.txt"} 1.0',
            metrics)
        self.assertIn(
            'maas_tftp_file_transfer_latency_bucket'
            '{filename="myfile.txt",le="0.5"} 1.0', metrics)
        self.assertIn(
            'maas_tftp_file_transfer_latency_bucket'
            '{filename="myfile.txt",le="0.25"} 0.0', metrics)
Esempio n. 12
0
def setup_status(app) -> prometheus_client.CollectorRegistry:
    """Add /status to serve Prometheus-driven runtime metrics."""
    registry = prometheus_client.CollectorRegistry(auto_describe=True)
    app["request_count"] = prometheus_client.Counter(
        "requests_total",
        "Total Request Count",
        ["app_name", "method", "endpoint", "http_status"],
        registry=registry,
    )
    app["request_latency"] = prometheus_client.Histogram(
        "request_latency_seconds",
        "Request latency",
        ["app_name", "endpoint"],
        registry=registry,
    )
    app["request_in_progress"] = prometheus_client.Gauge(
        "requests_in_progress_total",
        "Requests in progress",
        ["app_name", "endpoint", "method"],
        registry=registry,
    )
    prometheus_client.Info("server", "API server version",
                           registry=registry).info({
                               "version":
                               metadata.__version__,
                               "commit":
                               getattr(metadata, "__commit__", "null"),
                               "build_date":
                               getattr(metadata, "__date__", "null"),
                           })
    app.middlewares.insert(0, instrument)
    # passing StatusRenderer(registry) without __call__ triggers a spurious DeprecationWarning
    # FIXME(vmarkovtsev): https://github.com/aio-libs/aiohttp/issues/4519
    app.router.add_get("/status", StatusRenderer(registry).__call__)
    return registry
Esempio n. 13
0
 def __init__(self, push_gateaway, nodename, jobname="Duty"):
     '''
     Arguments
     ---------
     push_gateaway: str
         Push gateway
     nodename : str
         Name of node
     jobname : str
         default of "Duty"
         Not important
     '''
     self.push_gateaway = push_gateaway
     self.registry = pc.CollectorRegistry()
     self.nodename = nodename
     self.jobname = jobname
     ### Defining metrics
     # Are there heimdall|fredda candidates?
     self.heimdall_up = pc.Enum('cands_heimdall_up',
                                'Heimdall candidates present',
                                states=['yes', 'no'],
                                labelnames=['node', 'antenna'])
     self.fredda_up = pc.Enum('cands_fredda_up',
                              'Fredda candidates present',
                              states=['yes', 'no'],
                              labelnames=['node', 'antenna'])
     # How many candidates
     self.heimdall_n = pc.Gauge('cands_heimdall_num',
                                'Heimdall candidates number',
                                labelnames=['node', 'antenna'])
     self.fredda_n = pc.Gauge('cands_fredda_num',
                              'Fredda candidates number',
                              labelnames=['node', 'antenna'])
Esempio n. 14
0
    def after_process_boot(self, broker):
        os.environ["prometheus_multiproc_dir"] = DB_PATH

        # This import MUST happen at runtime, after process boot and
        # after the env variable has been set up.
        import prometheus_client as prom

        self.logger.debug("Setting up metrics...")
        registry = prom.CollectorRegistry()
        self.total_messages = prom.Counter(
            "dramatiq_messages_total",
            "The total number of messages processed.",
            ["queue_name", "actor_name"],
            registry=registry,
        )
        self.total_errored_messages = prom.Counter(
            "dramatiq_message_errors_total",
            "The total number of errored messages.",
            ["queue_name", "actor_name"],
            registry=registry,
        )
        self.total_retried_messages = prom.Counter(
            "dramatiq_message_retries_total",
            "The total number of retried messages.",
            ["queue_name", "actor_name"],
            registry=registry,
        )
        self.total_rejected_messages = prom.Counter(
            "dramatiq_message_rejects_total",
            "The total number of dead-lettered messages.",
            ["queue_name", "actor_name"],
            registry=registry,
        )
        self.total_revived_messages = prom.Counter(
            "dramatiq_message_revives_total",
            "The total number of messages revived from dead workers.",
            ["queue_name", "actor_name"],
            registry=registry,
        )
        self.inprogress_messages = prom.Gauge(
            "dramatiq_messages_inprogress",
            "The number of messages in progress.",
            ["queue_name", "actor_name"],
            registry=registry,
            multiprocess_mode="livesum",
        )
        self.inprogress_delayed_messages = prom.Gauge(
            "dramatiq_delayed_messages_inprogress",
            "The number of delayed messages in memory.",
            ["queue_name", "actor_name"],
            registry=registry,
        )
        self.message_durations = prom.Histogram(
            "dramatiq_message_duration_milliseconds",
            "The time spent processing messages.",
            ["queue_name", "actor_name"],
            buckets=(5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000,
                     7500, 10000, 30000, 60000, 600000, 900000, float("inf")),
            registry=registry,
        )
Esempio n. 15
0
 def test_update_metrics(self):
     prometheus_metrics = create_metrics(
         METRICS_DEFINITIONS,
         registry=prometheus_client.CollectorRegistry())
     middleware = PrometheusRequestMetricsMiddleware(
         self.get_response, prometheus_metrics=prometheus_metrics)
     middleware(factory.make_fake_request("/MAAS/accounts/login/"))
     middleware(factory.make_fake_request("/MAAS/accounts/login/"))
     middleware(factory.make_fake_request("/MAAS/other/path"))
     middleware(
         factory.make_fake_request("/MAAS/other/path",
                                   data={'op': 'do-foo'}))
     middleware(
         factory.make_fake_request("/MAAS/other/path",
                                   method='POST',
                                   data={'op': 'do-bar'}))
     metrics_text = prometheus_metrics.generate_latest().decode('ascii')
     self.assertIn(
         'maas_http_request_latency_count{method="GET",op="",'
         'path="/MAAS/accounts/login/",status="200"} 2.0', metrics_text)
     self.assertIn(
         'maas_http_request_latency_count{method="GET",op="do-foo",'
         'path="/MAAS/other/path",status="404"} 1.0', metrics_text)
     self.assertIn(
         'maas_http_request_latency_count{method="POST",op="do-bar",'
         'path="/MAAS/other/path",status="404"} 1.0', metrics_text)
Esempio n. 16
0
  def testPrometheusIntegration(self):
    registry = prometheus_client.CollectorRegistry(auto_describe=True)

    def MakeCollector(metadatas):
      return prometheus_stats_collector.PrometheusStatsCollector(
          metadatas, registry)

    with self.SetUpStatsCollector(MakeCollector):
      counter = metrics.Counter("foobars")
    counter.Increment(42)

    port = portpicker.pick_unused_port()

    with mock.patch.object(stats_server.StatsServerHandler, "registry",
                           registry):
      server = stats_server.StatsServer(port)
      server.Start()
      self.addCleanup(server.Stop)
      res = requests.get("http://localhost:{}/metrics".format(port))

    text_fd = io.StringIO(res.text)
    families = prometheus_parser.text_fd_to_metric_families(text_fd)
    families = {family.name: family for family in families}

    self.assertIn("foobars", families)
    self.assertEqual(families["foobars"].samples[0].value, 42)
Esempio n. 17
0
 def test_metrics_prometheus_not_availble(self):
     self.patch(utils, "PROMETHEUS_SUPPORTED", False)
     prometheus_metrics = create_metrics(
         self.metrics_definitions,
         registry=prometheus_client.CollectorRegistry(),
     )
     self.assertEqual(prometheus_metrics.available_metrics, [])
Esempio n. 18
0
 def test_update_metrics(self):
     self.patch(metrics, "GLOBAL_LABELS", {"service_type": "rack"})
     tempdir = self.useFixture(TempDirectory())
     meminfo = Path(tempdir.path) / "meminfo"
     meminfo.write_text(
         dedent(
             """\
         MemTotal:         123 Kb
         SwapCached:       456 Kb
         VmallocUsed:      789 Kb
         HugePages_Total:  321
         """
         )
     )
     prometheus_metrics = create_metrics(
         node_metrics_definitions(),
         registry=prometheus_client.CollectorRegistry(),
     )
     update_memory_metrics(prometheus_metrics, path=meminfo)
     output = prometheus_metrics.generate_latest().decode("ascii")
     self.assertIn(
         'maas_node_mem_MemTotal{service_type="rack"} 123.0', output
     )
     self.assertIn(
         'maas_node_mem_SwapCached{service_type="rack"} 456.0', output
     )
     self.assertIn(
         'maas_node_mem_VmallocUsed{service_type="rack"} 789.0', output
     )
     self.assertIn(
         'maas_node_mem_HugePages_Total{service_type="rack"} 321.0', output
     )
Esempio n. 19
0
 def test_metrics_disabled(self):
     prometheus_metrics = create_metrics(
         None, registry=prometheus_client.CollectorRegistry())
     resource = http.PrometheusMetricsResource(prometheus_metrics)
     request = Request(DummyChannel(), False)
     content = resource.render_GET(request).decode('utf-8')
     self.assertEqual(request.code, 404)
     self.assertEqual(content, '')
Esempio n. 20
0
 def test_no_register_atexit_custom_registry(self):
     mock_register = self.patch(atexit, 'register')
     definitions = [
         MetricDefinition('Gauge', 'a_gauge', 'A Gauge', ['foo', 'bar'])
     ]
     PrometheusMetrics(definitions=definitions,
                       registry=prometheus_client.CollectorRegistry())
     mock_register.assert_not_called()
Esempio n. 21
0
 def __init__(self, crawler):
     """
     :param scrapy.crawler.Crawler crawler:
     """
     self.crawler = crawler
     self.registries = defaultdict(lambda: prometheus_client.CollectorRegistry())
     self.crawler.signals.connect(self.engine_stopped, signal=signals.engine_stopped)
     super(PrometheusStatsCollector, self).__init__(crawler)
Esempio n. 22
0
    def setUp(self):
        self.app = Flask(__name__)
        self.app.testing = True
        self.client = self.app.test_client()

        # reset the underlying Prometheus registry
        prometheus_client.REGISTRY = prometheus_client.CollectorRegistry(
            auto_describe=True)
Esempio n. 23
0
def start_metrics_server():
    print("Metrics server started")
    if "prometheus_multiproc_dir" in os.environ:
        registry = prometheus_client.CollectorRegistry()
        multiprocess.MultiProcessCollector(registry)
    else:
        registry = REGISTRY
    start_http_server(9090, addr="0.0.0.0", registry=registry)
Esempio n. 24
0
 def test_update_call_value_class(self):
     definitions = [MetricDefinition('Counter', 'a_counter', 'A Counter')]
     prometheus_metrics = PrometheusMetrics(
         definitions=definitions,
         registry=prometheus_client.CollectorRegistry())
     prometheus_metrics.update('a_counter', 'set', value=22)
     self.assertIn('a_counter 22.0',
                   prometheus_metrics.generate_latest().decode('ascii'))
Esempio n. 25
0
 def test_update_metrics(self):
     self.patch(metrics, "GLOBAL_LABELS", {"service_type": "rack"})
     tempdir = self.useFixture(TempDirectory())
     stat = Path(tempdir.path) / "stat"
     stat.write_text(
         dedent("""\
         cpu  111 222 333 444 555 666 7 888 9 11
         cpu0 222 333 444 555 666 777 8 999 1 22
         cpu1 222 333 444 555 666 777 8 999 1 22
         other line
         other line
         """))
     prometheus_metrics = create_metrics(
         node_metrics_definitions(),
         registry=prometheus_client.CollectorRegistry(),
     )
     update_cpu_metrics(prometheus_metrics, path=stat)
     output = prometheus_metrics.generate_latest().decode("ascii")
     self.assertIn(
         'maas_node_cpu_time_total{service_type="rack",state="user"} 1.11',
         output,
     )
     self.assertIn(
         'maas_node_cpu_time_total{service_type="rack",state="nice"} 2.22',
         output,
     )
     self.assertIn(
         'maas_node_cpu_time_total{service_type="rack",state="system"} 3.33',
         output,
     )
     self.assertIn(
         'maas_node_cpu_time_total{service_type="rack",state="idle"} 4.44',
         output,
     )
     self.assertIn(
         'maas_node_cpu_time_total{service_type="rack",state="iowait"} 5.55',
         output,
     )
     self.assertIn(
         'maas_node_cpu_time_total{service_type="rack",state="irq"} 6.66',
         output,
     )
     self.assertIn(
         'maas_node_cpu_time_total{service_type="rack",state="softirq"} 0.07',
         output,
     )
     self.assertIn(
         'maas_node_cpu_time_total{service_type="rack",state="steal"} 8.88',
         output,
     )
     self.assertIn(
         'maas_node_cpu_time_total{service_type="rack",state="guest"} 0.09',
         output,
     )
     self.assertIn(
         'maas_node_cpu_time_total{service_type="rack",state="guest_nice"} 0.11',
         output,
     )
Esempio n. 26
0
def setup_prom_server():
    multiproc_dir = os.environ['prometheus_multiproc_dir']
    logging.info("Exposing prometheus metrics in multi-process mode")
    logging.info("Cleaning up prometheus multiprocessing dir: {}".format(multiproc_dir))
    for f in glob.glob(os.path.join(multiproc_dir, '*.db')):
        os.remove(f)
    registry = prometheus_client.CollectorRegistry()
    prometheus_client.multiprocess.MultiProcessCollector(registry)
    prometheus_client.start_http_server(PROM_METRICS_ENDPOINT_PORT, registry=registry)
Esempio n. 27
0
 def __init__(self, project_id: str,
              compute_client: googleapiclient.discovery.Resource, http_port,
              metrics_mode):
     self.project_id = project_id
     self.compute_client = compute_client
     self.http_port = int(http_port)
     self.metrics: typing.Dict[str, prometheus_client.core.Gauge] = {}
     self.registry = prometheus_client.CollectorRegistry(auto_describe=True)
     self.metrics_mode = metrics_mode
Esempio n. 28
0
def registry(monkeypatch, tmpdir):
    registry = prometheus_client.CollectorRegistry()

    def get_metric(name, **labels):
        value = registry.get_sample_value(name, labels)
        return 0 if value is None else value

    registry.get_metric = get_metric
    return registry
Esempio n. 29
0
def metrics(request):
    if "prometheus_multiproc_dir" in os.environ:
        registry = prometheus_client.CollectorRegistry()
        multiprocess.MultiProcessCollector(registry)
    else:
        registry = prometheus_client.REGISTRY
    metrics_page = prometheus_client.generate_latest(registry)
    return HttpResponse(metrics_page,
                        content_type=prometheus_client.CONTENT_TYPE_LATEST)
Esempio n. 30
0
 def do_GET(self):
     #self.registry = MetricsHandler.registry
     self.registry = prometheus_client.CollectorRegistry()
     function = self.path.split('?')[0]
     param = self.path.split('?')[1]
     input_name = dict(pair.split('=') for pair in param.split('&'))
     if function == '/probe':
         if input_name.get('module') == 'ping':
             # http://localhost:8888/probe?module=ping&target=8.8.8.8
             result = ping(input_name.get('target'))
             if result[0]:
                 self.rtt_min = Gauge('ping_rtt_min',
                                      'RTT Min',
                                      registry=self.registry)
                 self.rtt_min.set(result[0])
             if result[1]:
                 self.rtt_avg = Gauge('ping_rtt_avg',
                                      'RTT Avg',
                                      registry=self.registry)
                 self.rtt_avg.set(result[1])
             if result[2]:
                 self.rtt_max = Gauge('ping_rtt_max',
                                      'RTT Max',
                                      registry=self.registry)
                 self.rtt_max.set(result[2])
             self.packet_loss = Gauge('ping_packet_loss',
                                      'Packet Loss',
                                      registry=self.registry)
             self.packet_loss.set(result[3])
             return MetricsHandler.do_GET(self)
         elif input_name.get('module') == 'speedtest':
             # http://localhost:8888/probe?module=speedtest
             # optional: http://localhost:8888/probe?module=speedtest&target=17846
             if input_name.get('target'):
                 result = testspeed(input_name.get('target'))
             else:
                 result = testspeed(0)
             if result[0]:
                 self.rtt_min = Gauge('speedtest_download',
                                      'Speedtest download', ['server'],
                                      registry=self.registry)
                 self.rtt_min.labels(server=result[3]).set(result[0])
             if result[1]:
                 self.rtt_avg = Gauge('speedtest_upload',
                                      'Speedtest Upload', ['server'],
                                      registry=self.registry)
                 self.rtt_avg.labels(server=result[3]).set(result[1])
             if result[2]:
                 self.rtt_max = Gauge('speedtest_latency',
                                      'Speedtest Latency', ['server'],
                                      registry=self.registry)
                 self.rtt_max.labels(server=result[3]).set(result[2])
             return MetricsHandler.do_GET(self)
         else:
             print("module not defined")
     else:
         print("function not defined")