def _setup_prometheus(app): # This environment variable MUST be declared before importing the # prometheus modules (or unit tests fail) # More details on this awkwardness: https://github.com/prometheus/client_python/issues/250 os.environ["prometheus_multiproc_dir"] = PROMETHEUS_TMP_COUNTER_DIR.name from prometheus_client import ( CollectorRegistry, multiprocess, make_wsgi_app, ) from prometheus_flask_exporter import Counter from prometheus_flask_exporter.multiprocess import ( UWsgiPrometheusMetrics, ) app.prometheus_registry = CollectorRegistry() multiprocess.MultiProcessCollector(app.prometheus_registry) UWsgiPrometheusMetrics(app) # Add prometheus wsgi middleware to route /metrics requests app.wsgi_app = DispatcherMiddleware( app.wsgi_app, {"/metrics": make_wsgi_app(registry=app.prometheus_registry)}) # set up counters app.prometheus_counters["pre_signed_url_req"] = Counter( "pre_signed_url_req", "tracking presigned url requests", ["requested_protocol"], )
def __init__(self, kafka_conf, zookeeper_conf, repository): self._data_points = [] self._kafka_topic = kafka_conf.topic self._batch_size = kafka_conf.batch_size self._consumer = consumer.KafkaConsumer( kafka_conf.uri, zookeeper_conf.uri, kafka_conf.zookeeper_path, kafka_conf.group_id, kafka_conf.topic, repartition_callback=self._flush, commit_callback=self._flush, commit_timeout=kafka_conf.max_wait_time_seconds) self.repository = repository() self._start_time = time.time() self._end_time = 0 self.registry = CollectorRegistry() multiprocess.MultiProcessCollector(self.registry) self.message_counter = Counter('monasca_persister_message_count_total', 'total count of messages', ['version']) self.message_counter.labels(version='v1.0') self.message_counter_per_topic = Counter( 'monasca_persister_message_count_per_topic', 'total number of messages processed from one topic', ['topic']) self.message_process_rate_gauge = Gauge( 'monasca_persister_messages_processed_per_sec', 'messages processed per second from one topic', ['topic'], multiprocess_mode='all')
def ExportToDjangoView(request): """Exports /metrics as a Django view. You can use django_prometheus.urls to map /metrics to this view. """ if "prometheus_multiproc_dir" in os.environ: registry = prometheus_client.CollectorRegistry() multiprocess.MultiProcessCollector(registry) else: registry = prometheus_client.REGISTRY metrics_page = prometheus_client.generate_latest(registry) expected_username = getattr(settings, "DJANGO_PROMETHEUS_AUTHORIZATION_USERNAME", None) expected_password = getattr(settings, "DJANGO_PROMETHEUS_AUTHORIZATION_PASSWORD", None) if expected_password is not None and expected_username is not None: auth_header = request.META.get("HTTP_AUTHORIZATION", "") token_type, _, credentials = auth_header.partition(" ") if credentials == '': return HttpResponse("", status=400) received_auth_string = base64.b64decode(credentials).decode() if ':' not in received_auth_string: return HttpResponse("", status=400) received_username = received_auth_string.split(':')[0] received_password = received_auth_string.split(':')[1] valid_username = received_username == expected_username valid_password = received_password == expected_password if token_type != 'Basic' or not valid_username or not valid_password: return HttpResponse("", status=401) return HttpResponse( metrics_page, content_type=prometheus_client.CONTENT_TYPE_LATEST )
def init_registry(self) -> None: try: multiprocess_registry = CollectorRegistry() multiprocess.MultiProcessCollector(multiprocess_registry) self.registry = multiprocess_registry except ValueError: self.registry = REGISTRY
def metrics(request): if 'prometheus_multiproc_dir' in os.environ: registry = CollectorRegistry() multiprocess.MultiProcessCollector(registry) data = generate_latest(registry) return HttpResponse(data, content_type=CONTENT_TYPE_LATEST) return HttpResponse('')
def metrics(self, request): """Endpoint exposing Prometheus metrics""" if not pkg_is_installed('prometheus-client'): return Response('Not Supported', status=501) # Importing this too early would break multiprocess metrics from prometheus_client import ( CONTENT_TYPE_LATEST, CollectorRegistry, REGISTRY, generate_latest, multiprocess, ) if 'prometheus_multiproc_dir' in os.environ: # prometheus_client is running in multiprocess mode. # Use a custom registry, as the global one includes custom # collectors which are not supported in this mode registry = CollectorRegistry() multiprocess.MultiProcessCollector(registry) else: if request.environ.get('wsgi.multiprocess', False): return Response( 'Not Supported: running in multiprocess mode but ' '`prometheus_multiproc_dir` envvar not set', status=501) # prometheus_client is running in single process mode. # Use the global registry (includes CPU and RAM collectors) registry = REGISTRY with prometheus_lock: data = generate_latest(registry) return Response(data, status=200, mimetype=CONTENT_TYPE_LATEST)
def get_registry(): if 'prometheus_multiproc_dir' in os.environ: registry = CollectorRegistry() multiprocess.MultiProcessCollector(registry) else: registry = REGISTRY return registry
def metrics(): # pylint: disable=unused-variable # /metrics API shouldn't be visible in the API documentation, # hence it's added here in the create_app step # requires environment variable prometheus_multiproc_dir registry = CollectorRegistry() multiprocess.MultiProcessCollector(registry) return generate_latest(registry)
def _setup_metrics(self): """ Start metric exposition """ path = os.environ.get("prometheus_multiproc_dir") if not os.path.exists(self.metrics_dir): try: log.info("Creating metrics directory") os.makedirs(self.metrics_dir) except OSError: log.error("Failed to create metrics directory!") raise ConfigurationException( "Failed to create metrics directory!") path = self.metrics_dir elif path != self.metrics_dir: path = self.metrics_dir os.environ['prometheus_multiproc_dir'] = path log.info("Cleaning metrics collection directory") log.debug("Metrics directory set to: {}".format(path)) files = os.listdir(path) for f in files: if f.endswith(".db"): os.remove(os.path.join(path, f)) log.debug("Starting metrics exposition") if self.metrics_enabled: registry = CollectorRegistry() multiprocess.MultiProcessCollector(registry) start_http_server(port=self.metrics_port, addr=self.metrics_address, registry=registry)
def metrics(): """ Prometheus metrics export """ request_count.labels("metrics").inc() registry = CollectorRegistry() multiprocess.MultiProcessCollector(registry) return generate_latest(registry)
def on_get(self, req, resp): """Handles GET requests Prometheus scraps metrics from this endpoint """ request_count.labels('get', '/metrics').inc() registry = CollectorRegistry() multiprocess.MultiProcessCollector(registry) resp.body = generate_latest(registry)
def _get_metrics_data(self): if not self._multiprocess_on: registry = core.REGISTRY else: registry = CollectorRegistry() multiprocess.MultiProcessCollector(registry) data = generate_latest(registry) return data
async def get_prom_metrics(): if prom_multit_mode: registry = CollectorRegistry() multiprocess.MultiProcessCollector(registry) metric_data = prom.generate_latest(registry).decode("utf-8") else: metric_data = prom.generate_latest(prom.REGISTRY).decode("utf-8") return Response(metric_data, media_type="text/plain")
def metrics(): registry = CollectorRegistry() multiprocess.MultiProcessCollector(registry) data = generate_latest(registry) status = '200 OK' response_headers = [('Content-type', CONTENT_TYPE_LATEST), ('Content-Length', str(len(data)))] return data
def start_metrics_server(): print("Metrics server started") if "prometheus_multiproc_dir" in os.environ: registry = prometheus_client.CollectorRegistry() multiprocess.MultiProcessCollector(registry) else: registry = REGISTRY start_http_server(9090, addr="0.0.0.0", registry=registry)
def get_metrics(request: Request) -> Response: if "prometheus_multiproc_dir" in os.environ: registry = CollectorRegistry() multiprocess.MultiProcessCollector(registry) else: registry = REGISTRY return Response(generate_latest(registry), status_code=200, headers={"Content-Type": CONTENT_TYPE_LATEST})
def __init__(self): self.metrics_path = os.environ.get('PROMETHEUS_METRICS_PATH', '/metrics') self.auth_token = os.environ.get("METRICS_BASIC_AUTH_TOKEN") self.application_id = json.loads(os.environ.get("VCAP_APPLICATION", "{}")).get("application_id") self.authenticate_requests = os.environ.get("METRICS_BASIC_AUTH", "true") == "true" self.registry = CollectorRegistry() multiprocess.MultiProcessCollector(self.registry)
def retrieve(self, request, *args, **kwargs): if "prometheus_multiproc_dir" in os.environ: registry = prometheus_client.CollectorRegistry(auto_describe=True) multiprocess.MultiProcessCollector(registry) else: registry = prometheus_client.REGISTRY metrics_page = prometheus_client.generate_latest(registry) return HttpResponse(metrics_page, content_type=prometheus_client.CONTENT_TYPE_LATEST)
def metrics_app(environ, start_response): registry = CollectorRegistry() multiprocess.MultiProcessCollector(registry) data = generate_latest(registry) status = '200 OK' response_headers = [('Content-type', CONTENT_TYPE_LATEST), ('Content-Length', str(len(data)))] start_response(status, response_headers) return iter([data])
def on_get(self, req, resp): '''GET /metrics/''' registry = CollectorRegistry() multiprocess.MultiProcessCollector(registry) data = generate_latest(registry) resp.body = data resp.content_type = CONTENT_TYPE_LATEST resp.status = falcon.HTTP_200
def get_registry(): """ obtain a metrics registry. If one doesn't already exist, initialize and throw it into the global app context. """ if "metrics_registry" not in g: g.metrics_registry = CollectorRegistry() multiprocess.MultiProcessCollector(g.metrics_registry) return g.metrics_registry
def metrics(request): if "prometheus_multiproc_dir" in os.environ: registry = prometheus_client.CollectorRegistry() multiprocess.MultiProcessCollector(registry) else: registry = prometheus_client.REGISTRY metrics_page = prometheus_client.generate_latest(registry) return HttpResponse(metrics_page, content_type=prometheus_client.CONTENT_TYPE_LATEST)
def __init__(self, *args, **kwargs): """ :param prometheus_registry: Prometheus registry that metrics are registered to. :param int prometheus_port: If not None, start prometheus server with given registry on given port. :param prometheus_buckets: Gets passed to prometheus_client.Histogram. """ super(Application, self).__init__(*args, **kwargs) # self.registry = kwargs.pop('registry', REGISTRY) self.multiprocess_registry = CollectorRegistry(auto_describe=False) with open(os.path.join(CONF_DIR, "config.yml"), "r") as f: self.CONFIG = yaml.safe_load(f) Configuration().validate(self.CONFIG) multiprocess.MultiProcessCollector( registry=self.multiprocess_registry, path=".tmp" ) self.max_workers = kwargs.pop("max_workers", 10) self.debug = kwargs.pop("debug", False) buckets = kwargs.pop("prometheus_buckets", None) histogram_kwargs = { "labelnames": ["method", "path", "status"], "registry": self.multiprocess_registry, } self.exception_counter = Counter( "network_exporter_raised_exceptions", "Count of raised Exceptions in the Exporter", ["exception", "collector", "hostname"], registry=self.multiprocess_registry, ) # Counter initialization self.used_workers = Gauge( "network_exporter_used_workers", "The amount of workers being busy scraping Devices.", registry=self.multiprocess_registry, ) self.total_workers = Gauge( "network_exporter_workers", "The total amount of workers", registry=self.multiprocess_registry, ) self.total_workers.set(self.max_workers) self.CONNECTIONS = Gauge( "network_exporter_tcp_states", "The count per tcp state and protocol", ["state", "protocol"], registry=self.multiprocess_registry, ) if buckets is not None: histogram_kwargs["buckets"] = buckets self.request_time_histogram = Histogram( "tornado_http_request_duration_seconds", "Tornado HTTP request duration in seconds", **histogram_kwargs )
def _push_to_gateway(self): registry = CollectorRegistry() multiprocess.MultiProcessCollector(registry) host = getattr(settings, 'PUSHGATEWAY_HOST') try: push_to_gateway(host, job='batch_mode', registry=registry) except Exception: # Could get a URLOpenerror if Pushgateway is not running prometheus_soft_assert( False, 'Prometheus metric error while pushing to gateway')
def prometheus_metrics_exporter(): PrometheusMetric.populate_collectors() registry = CollectorRegistry() multiprocess.MultiProcessCollector(registry) data = generate_latest(registry) headers = { "Content-type": CONTENT_TYPE_LATEST, "Content-Length": str(len(data)), } return Response(data, headers=headers, mimetype=CONTENT_TYPE_LATEST)
def expose_metrics_multiprocess() -> Response: # pragma: no cover """Expose prometheus metrics from the current set of processes. Use this instead of expose_metrics if you're using a multi-process server. """ registry = CollectorRegistry() multiprocess.MultiProcessCollector(registry) return Response(HTTP_200, headers=_HEADERS, stream=BytesIO(generate_latest(registry)))
def metrics(context, event): context.logger.info('called metrics') #use multiprocess metrics otherwise data collected from different processors is not included registry = CollectorRegistry() multiprocess.MultiProcessCollector(registry) output = generate_latest(registry).decode('UTF-8') return context.Response(body=output, headers={}, content_type=CONTENT_TYPE_LATEST, status_code=200)
def ExportToDjangoView(request): """Exports /metrics as a Django view. You can use django_prometheus.urls to map /metrics to this view. """ registry = prometheus_client.CollectorRegistry() if 'prometheus_multiproc_dir' in os.environ: multiprocess.MultiProcessCollector(registry) metrics_page = prometheus_client.generate_latest(registry) return HttpResponse(metrics_page, content_type=prometheus_client.CONTENT_TYPE_LATEST)
def metrics_view_func(self): from prometheus_client import ( CONTENT_TYPE_LATEST, CollectorRegistry, generate_latest, multiprocess, ) registry = CollectorRegistry() multiprocess.MultiProcessCollector(registry) return Response(generate_latest(registry), mimetype=CONTENT_TYPE_LATEST)
def metrics(): registry = CollectorRegistry() multiprocess.MultiProcessCollector(registry) data = generate_latest(registry) response = make_response(data) response.headers['Content-Type'] = CONTENT_TYPE_LATEST response.headers['Content-Length'] = str(len(data)) return response