def create_app() -> FastAPI: app = FastAPI() # Unregister all collectors. collectors = list(REGISTRY._collector_to_names.keys()) print(f"before unregister collectors={collectors}") for collector in collectors: REGISTRY.unregister(collector) print(f"after unregister collectors={list(REGISTRY._collector_to_names.keys())}") # Import default collectors. from prometheus_client import gc_collector, platform_collector, process_collector # Re-register default collectors. process_collector.ProcessCollector() platform_collector.PlatformCollector() gc_collector.GCCollector() print(f"after re-register collectors={list(REGISTRY._collector_to_names.keys())}") @app.get("/") def read_root(): return "Hello World!" return app
def collect_application_query_results(self): # temp - to ensure we run/collect results and only swap the collector right at the end temp_collector = ApplicationResultCollector() for q_id, q in self.app_queries.items(): r = ApplicationUsageRollup( q_id, ["Application_name", "Application_version"], "Client_device_id") try: label_name = f"app_query_{q['name']}" with http_request_time_taken.labels(label_name).time(): r.exec(self.fw_query) for result in r.results(): name = q['name'] version = result[1] total = int(result[2]) logger.info( f"app query result for {name}, {version}, query_id: {r.query_id} = {total}" ) temp_collector.add_result(r.query_id, name, version, total) except Exception as e: logger.error( f"failed to do app query rollup on query id {q_id}, {e}") traceback.print_exc(file=sys.stdout) # Because running the inventory queries can take a non-trivial amount of time, we must # ensure that swapping the collector in the REGISTRY happens as fast as possible. # Remember: start_http_server kicks off a thread that will fire independantly of this code. if self.results_collector is not None: REGISTRY.unregister(self.results_collector) REGISTRY.register(temp_collector) self.results_collector = temp_collector
def testapp(): """ create a test app with various endpoints for the test scenarios """ # unregister all the collectors before we start collectors = list(REGISTRY._collector_to_names.keys()) for collector in collectors: REGISTRY.unregister(collector) PrometheusMiddleware._metrics = {} def _testapp(**middleware_options): app = Starlette() app.add_middleware(starlette_exporter.PrometheusMiddleware, **middleware_options) app.add_route("/metrics", handle_metrics) @app.route("/200") @app.route("/200/{test_param}") def normal_response(request): return JSONResponse({"message": "Hello World"}) @app.route("/500") @app.route("/500/{test_param}") async def error(request): raise HTTPException(status_code=500, detail="this is a test error") @app.route("/unhandled") @app.route("/unhandled/{test_param}") async def unhandled(request): test_dict = {"yup": 123} return JSONResponse({"message": test_dict["value_error"]}) return app return _testapp
def test_metric(self): print("test_metric") # Setup build_info runner = CliRunner() runner.invoke(builder.make_build_info, ["test_app", "master", "abcdef", "1.0.0"]) python_version_info = sys.version_info python_version = "{}.{}.{}".format(python_version_info.major, python_version_info.minor, python_version_info.micro) # Unregister all collectors. collectors = list(REGISTRY._collector_to_names.keys()) print(f"before unregister collectors={collectors}") for collector in collectors: REGISTRY.unregister(collector) print(f"after unregister collectors={list(REGISTRY._collector_to_names.keys())}") labels = {"branch": "master", "revision": "abcdef", "pythonversion": python_version, "version": "1.0.0"} # Test before = REGISTRY.get_sample_value("test_app_build_info", labels) from prometheus_build_info import metrics after = REGISTRY.get_sample_value("test_app_build_info", labels) self.assertEqual(before, None) self.assertEqual(after, 1.0) print("before: {}".format(before)) print("after: {}".format(after)) # Cleanup os.remove(PROM_BUILD_FILE)
def test_no_buildinfo(self): print("test_no_buildinfo") try: os.remove(PROM_BUILD_FILE) except OSError as err: print("Nothing to cleanup") # Unregister all collectors. collectors = list(REGISTRY._collector_to_names.keys()) print(f"before unregister collectors={collectors}") for collector in collectors: REGISTRY.unregister(collector) print(f"after unregister collectors={list(REGISTRY._collector_to_names.keys())}") python_version_info = sys.version_info python_version = "{}.{}.{}".format(python_version_info.major, python_version_info.minor, python_version_info.micro) labels = {"branch": "master", "revision": "abcdef", "pythonversion": python_version, "version": "1.0.0"} before = REGISTRY.get_sample_value("test_app_build_info", labels) print("before: {}".format(before)) from prometheus_build_info import metrics after = REGISTRY.get_sample_value("test_app_build_info", labels) print("after: {}".format(after)) self.assertEqual(before, None) self.assertEqual(after, None)
def start(self) -> None: """ configure internal python and scrape metrics exposure and start http server exposing prometheus metrics """ write_to_redis_on_startup(self._redis_host, self._redis_port, self._exposed_info_metrics) REGISTRY.register(self._collector) if self._global_options.get('disable_builtin_collectors', False): LOGGER.info("Disabling builtin collectors") REGISTRY.unregister(PROCESS_COLLECTOR) REGISTRY.unregister(PLATFORM_COLLECTOR) REGISTRY.unregister( REGISTRY. _names_to_collectors['python_gc_objects_collected_total']) if self._collector_options.get('disable_scrape_metrics', False): LOGGER.info("Disabling scrape metrics") REGISTRY.unregister( REGISTRY. _names_to_collectors['redis_exporter_failed_scrape_total']) REGISTRY.unregister( REGISTRY. _names_to_collectors['redis_exporter_successful_scrape_total']) start_http_server(self._global_options.get('exporter_port', 9999)) try: while True: time.sleep(60) except (KeyboardInterrupt, SystemExit): LOGGER.info("Exit redis exporter") sys.exit()
def setUpClass(cls): super().setUpClass() # Allow CustomMetrics to be used for metric in Metrics._instance.__dict__.values(): if isinstance(metric, MetricWrapperBase): REGISTRY.unregister(metric) Metrics._instance = None
def reset_prometheus_registry(): collectors_to_unregister = [] for collector, names in REGISTRY._collector_to_names.items(): if any(name.startswith("my_service") for name in names): collectors_to_unregister.append(collector) for collector in collectors_to_unregister: REGISTRY.unregister(collector)
def unregister_metrics(): for collector, names in tuple(REGISTRY._collector_to_names.items()): if any( name.startswith('flask_') or name.startswith('webhook_proxy_') for name in names): REGISTRY.unregister(collector)
def clearAll(): for name in list(REGISTRY._names_to_collectors.keys()): try: REGISTRY.unregister(REGISTRY._names_to_collectors[name]) except: print("{0} has already been deleted".format(name)) pass
def reset_registry(monkeypatch): """ Ensures prometheus_client's CollectorRegistry is empty before each test. """ from prometheus_client import REGISTRY for c in list(REGISTRY._collector_to_names): REGISTRY.unregister(c)
def ttl_watchdog_unregister_old_metrics(now): for (name, last_update) in list(GAUGES_LAST_UPDATE.items()): if now - last_update > GAUGES_TTL: REGISTRY.unregister(GAUGES[name]) del GAUGES[name] del GAUGES_LAST_UPDATE[name] for (other_name, label_values) in list(GAUGES_LABELS_LAST_UPDATE.keys()): if name == other_name: del GAUGES_LABELS_LAST_UPDATE[(name, label_values)]
def reregister_flow_vars(self, table_name, table_tags): for prom_var in PROM_FLOW_VARS: table_prom_var = '_'.join((prom_var, table_name)) try: REGISTRY.unregister(self.metrics[table_prom_var]) except KeyError: pass self.metrics[table_prom_var] = PromGauge( table_prom_var, '', list(table_tags))
def tearDown(self): testing.tearDown() if prom.pyramid_request: REGISTRY.unregister(prom.pyramid_request) prom.pyramid_request = None if prom.pyramid_request_ingress: REGISTRY.unregister(prom.pyramid_request_ingress) prom.pyramid_request_ingress = None
def create_app() -> FastAPI: app = FastAPI() # Unregister all collectors. collectors = list(REGISTRY._collector_to_names.keys()) print(f"before unregister collectors={collectors}") for collector in collectors: REGISTRY.unregister(collector) print( f"after unregister collectors={list(REGISTRY._collector_to_names.keys())}" ) # Import default collectors. from prometheus_client import gc_collector, platform_collector, process_collector # Re-register default collectors. process_collector.ProcessCollector() platform_collector.PlatformCollector() gc_collector.GCCollector() print( f"after re-register collectors={list(REGISTRY._collector_to_names.keys())}" ) @app.get("/") def read_root(): return "Hello World!" @app.get("/sleep") async def sleep(seconds: float): await asyncio.sleep(seconds) return f"I have slept for {seconds}s" @app.get("/always_error") def read_always_error(): raise HTTPException(status_code=404, detail="Not really error") @app.get("/ignore") def read_ignore(): return "Should be ignored" @app.get("/items/{item_id}") def read_item(item_id: int, q: Optional[str] = None): return {"item_id": item_id, "q": q} @app.get("/just_another_endpoint") def read_just_another_endpoint(): return "Green is my pepper" @app.post("/items") def create_item(item: Dict[Any, Any]): return None return app
def reset_prometheus() -> None: from prometheus_client import REGISTRY # Unregister all collectors. collectors = list(REGISTRY._collector_to_names.keys()) print(f"before unregister collectors={collectors}") for collector in collectors: REGISTRY.unregister(collector) print(f"after unregister collectors={list(REGISTRY._collector_to_names.keys())}") # Import default collectors. from prometheus_client import gc_collector, platform_collector, process_collector # Re-register default collectors. process_collector.ProcessCollector() platform_collector.PlatformCollector() gc_collector.GCCollector()
def create_app() -> "app": app = Flask(__name__) # Unregister all collectors. collectors = list(REGISTRY._collector_to_names.keys()) print(f"before unregister collectors={collectors}") for collector in collectors: REGISTRY.unregister(collector) print( f"after unregister collectors={list(REGISTRY._collector_to_names.keys())}" ) # Import default collectors. from prometheus_client import (gc_collector, platform_collector, process_collector) # Re-register default collectors. process_collector.ProcessCollector() platform_collector.PlatformCollector() gc_collector.GCCollector() @app.route("/") def home(): return "Hello World!" @app.route("/path/<page_name>") def other_page(page_name): return page_name @app.route("/to/exclude") def exclude(): return "Exclude me!" @app.route("/server_error") def server_error(): raise Exception("Test") return "will ever get here" @app.route("/ignored") @Instrumentator.do_not_track() def ignored(): return "HALLO" return app
def __init__(self, rdc_ip_port, field_ids, update_freq, max_keep_age, max_keep_samples, gpu_indexes, rdc_unauth, enable_plugin_monitoring): group_name = "rdc_prometheus_plugin_group" field_group_name = "rdc_prometheus_plugin_fieldgroup" if rdc_unauth: RdcReader.__init__(self, ip_port=rdc_ip_port, field_ids=field_ids, update_freq=update_freq, max_keep_age=max_keep_age, max_keep_samples=max_keep_samples, gpu_indexes=gpu_indexes, field_group_name=field_group_name, gpu_group_name=group_name, root_ca=None) else: RdcReader.__init__(self, ip_port=rdc_ip_port, field_ids=field_ids, update_freq=update_freq, max_keep_age=max_keep_age, max_keep_samples=max_keep_samples, gpu_indexes=gpu_indexes, field_group_name=field_group_name, gpu_group_name=group_name) # Supress internal metrics from prometheus_client if enable_plugin_monitoring == False: REGISTRY.unregister(PROCESS_COLLECTOR) REGISTRY.unregister(PLATFORM_COLLECTOR) # Create the guages self.guages = {} for fid in self.field_ids: field_name = self.rdc_util.field_id_string(fid).lower() self.guages[fid] = Gauge(field_name, field_name, labelnames=['gpu_index'])
def start_metrics_server(): """ Starts a http server on a port specified in the configuration file and exposes Prometheus metrics on it. Also removes GC_COLLECTOR metrics because they are not really needed. """ # Remove garbage collection metrics REGISTRY.unregister(GC_COLLECTOR) # Gather configurations config = Configuration().values ip = config.service.ip metrics_port = config.service.metrics_port # Start server start_wsgi_server(metrics_port) # Log Logger() \ .event(category="runnable", action="run metrics") \ .server(ip=ip, port=metrics_port) \ .out(severity=Severity.INFO)
def start_prometheus_endpoint(port: int) -> None: """ This starts the prometheus http server thread and returns. """ # From https://github.com/prometheus/client_python/issues/414 from prometheus_client import REGISTRY, PROCESS_COLLECTOR, \ PLATFORM_COLLECTOR # Unregister default metrics REGISTRY.unregister(PROCESS_COLLECTOR) REGISTRY.unregister(PLATFORM_COLLECTOR) for i in ("python_gc_objects_collected_total", ): REGISTRY.unregister(REGISTRY._names_to_collectors[i]) start_http_server(port)
def unregisterMetrics(self): """Unset all the metrics to avoid duplicated timeseries error.""" for collector, names in tuple(REGISTRY._collector_to_names.items()): REGISTRY.unregister(collector)
@app.route('/test/') def test(): return 'rest' @app.route('/') def test1(): return 'landing page' @app.errorhandler(500) def handle_500(error): return str(error), 500 @app.route('/metrics') def metrics(): return Response(prometheus_client.generate_latest(), mimetype=CONTENT_TYPE_LATEST) if __name__ == '__main__': REGISTRY.register(CustomCollector()) REGISTRY.unregister(PROCESS_COLLECTOR) REGISTRY.unregister(PLATFORM_COLLECTOR) REGISTRY.unregister( REGISTRY._names_to_collectors['python_gc_objects_collected_total']) app.run(host='0.0.0.0')
GC_COLLECTOR, Gauge, Info, generate_latest ) from awx.conf.license import get_license from awx.main.utils import (get_awx_version, get_ansible_version) from awx.main.analytics.collectors import ( counts, instance_info, job_instance_counts, ) REGISTRY.unregister(PROCESS_COLLECTOR) REGISTRY.unregister(PLATFORM_COLLECTOR) REGISTRY.unregister(GC_COLLECTOR) SYSTEM_INFO = Info('awx_system', 'AWX System Information') ORG_COUNT = Gauge('awx_organizations_total', 'Number of organizations') USER_COUNT = Gauge('awx_users_total', 'Number of users') TEAM_COUNT = Gauge('awx_teams_total', 'Number of teams') INV_COUNT = Gauge('awx_inventories_total', 'Number of inventories') PROJ_COUNT = Gauge('awx_projects_total', 'Number of projects') JT_COUNT = Gauge('awx_job_templates_total', 'Number of job templates') WFJT_COUNT = Gauge('awx_workflow_job_templates_total', 'Number of workflow job templates') HOST_COUNT = Gauge('awx_hosts_total', 'Number of hosts', ['type',]) SCHEDULE_COUNT = Gauge('awx_schedules_total', 'Number of schedules') INV_SCRIPT_COUNT = Gauge('awx_inventory_scripts_total', 'Number of invetory scripts') USER_SESSIONS = Gauge('awx_sessions_total', 'Number of sessions', ['type',])
def unregister_metrics(): for collector, _ in tuple(REGISTRY._collector_to_names.items()): REGISTRY.unregister(collector)
def unregister_metrics() -> None: # noinspection PyProtectedMember for collector, names in tuple(REGISTRY._collector_to_names.items()): REGISTRY.unregister(collector)
def testapp(): """ create a test app with various endpoints for the test scenarios """ # unregister all the collectors before we start collectors = list(REGISTRY._collector_to_names.keys()) for collector in collectors: REGISTRY.unregister(collector) PrometheusMiddleware._metrics = {} def _testapp(**middleware_options): app = Starlette() app.add_middleware(starlette_exporter.PrometheusMiddleware, **middleware_options) app.add_route("/metrics", handle_metrics) @app.route("/200") @app.route("/200/{test_param}") def normal_response(request): return JSONResponse({"message": "Hello World"}) @app.route("/500") @app.route("/500/{test_param}") async def error(request): raise HTTPException(status_code=500, detail="this is a test error") @app.route("/unhandled") @app.route("/unhandled/{test_param}") async def unhandled(request): test_dict = {"yup": 123} return JSONResponse({"message": test_dict["value_error"]}) @app.route("/background") async def background(request): def backgroundtask(): time.sleep(0.1) task = BackgroundTask(backgroundtask) return JSONResponse({"message": "task started"}, background=task) # testing routes added using Mount async def test_mounted_function(request): return JSONResponse({"message": "Hello World"}) async def test_mounted_function_param(request): return JSONResponse({"message": request.path_params.get("item")}) mounted_routes = Mount("/", routes=[ Route("/test/{item}", test_mounted_function_param), Route("/test", test_mounted_function) ]) app.mount("/mounted", mounted_routes) app.mount('/static', app=StaticFiles(directory='tests/static'), name="static") return app return _testapp
async def close(): try: await runner.cleanup() finally: REGISTRY.unregister(collector)
def _python_gc_metrics(self, remove: bool = True): collectors = REGISTRY._names_to_collectors.values() for name in list(collectors): with suppress(KeyError): REGISTRY.unregister(name)
from prometheus_client import Gauge, Counter, generate_latest, start_http_server, REGISTRY import logging import threading, time from sensors import lidar, temperature from util import AtomicBool import yaml if __name__ == '__main__': logging.basicConfig(level=logging.INFO) running = AtomicBool(True) # unregister the default collectors. source: https://github.com/prometheus/client_python/issues/414 for coll in list(REGISTRY._collector_to_names.keys()): REGISTRY.unregister(coll) lidar_distance = Gauge( name='lidar_distance', documentation= 'The distance (m) between the LiDAR (sensor) and the floor.') lidar_strength = Gauge( name='lidar_strength', documentation='The strength of the measurement taken by the LiDAR') pipe_temperature = Gauge( name='pipe_temperature', documentation='Temperature measured directly on the milk pipe') config = {} try: with open(r"config.yaml") as file: config = yaml.load(file)
'django_http_responses_total_by_status', 'django_db') if ENABLE_PROMETHEUS.get(): django_collectors = set() django_metrics_names = [ name for name in REGISTRY._names_to_collectors.keys() if name.startswith('django_') and not name.startswith(ALLOWED_DJANGO_PROMETHEUS_METRICS) ] for metric_name in django_metrics_names: collector_obj = REGISTRY._names_to_collectors[metric_name] django_collectors.add(collector_obj) for django_collector in django_collectors: REGISTRY.unregister(django_collector) global_registry().gauge_callback( name='threads.total', callback=lambda: len(threading.enumerate()), label='Threads', description='The total number of threads', numerator='threads', ) global_registry().gauge_callback( name='threads.daemon', callback=lambda: sum(1 for thread in threading.enumerate() if thread.isDaemon()), label='Daemon Threads', description='The number of daemon threads',