def setUp(self): """ setUp test """ # self.registry = CollectorRegistry() self.app = Flask(__name__) self.prometheus = make_wsgi_app() @self.app.route('/') @self.app.route('/teste') @self.app.route('/database') def index(): """ Home """ return jsonify({'home': True}) def check_db(): """ HealthCheck """ response = self.client.get("http://*****:*****@self.app.route('/metrics') def metrics(): """ Default metrics """ return self.dispatcher.mounts['/metrics']({}, self.capture)[0] self.app, self.registry = register_metrics(self.app, error_fn=is_error200) self.dispatcher = DispatcherMiddleware( self.app.wsgi_app, {"/metrics": make_wsgi_app(registry=self.registry)}) # self.app.wsgi_app = DispatcherMiddleware(self.app.wsgi_app, self.registry) self.scheduler = watch_dependencies("database", check_db, time_execution=500, registry=self.registry, app=self.app) self.client = self.app.test_client() self.client.get('/batata') self.client.get('/teste') self.client.get('/teste') self.client.get('/')
def create_app(): app = Flask(__name__) if 'AXE_CONFIG' in os.environ: app.config.from_envvar('AXE_CONFIG') else: logger.warning('Missing config: AXE_CONFIG.') app.config.update({ 'SCHEDULER_API_ENABLED': True, 'JOBS': [ { 'func': count, 'trigger': 'interval', 'seconds': 3, 'id': 'count', } ] }) scheduler = APScheduler() scheduler.init_app(app) app.wsgi_app = DispatcherMiddleware(app.wsgi_app, { '/metrics': make_wsgi_app() }) return app
def create_app(): app = Flask(__name__, instance_relative_config=True) app_dispatch = DispatcherMiddleware(app, { '/metrics': make_wsgi_app() }) return app_dispatch
def create_app(graphql_host="", graphql_port=0, graphql_user="", graphql_pass=""): from werkzeug.wsgi import DispatcherMiddleware from prometheus_client import make_wsgi_app import base64 from .simplejson.routes import create_app graphql_auth = 'Basic ' + base64.b64encode( (graphql_user + ':' + graphql_pass).encode()).decode() app = create_app(graphql_host, graphql_port, graphql_auth) # Load the app application = DispatcherMiddleware(app, {'/metrics': make_wsgi_app()}) # Load the node exporter app from .nodeexporter import metrics import threading t = threading.Thread(target=nodeexporter.metrics.main, args=(graphql_host, graphql_port, graphql_auth)) t.daemon = True t.start() return application
def _start_web(port, sentry_dsn=None, blueprints=None): """ Starts a Flask app with optional error logging and blueprint registration. Args: port - str/int - port on which management application should listen for HTTP traffic. sentry_dsn - str - Sentry DSN for error logging. blueprints - list(flask.Blueprint)- blueprints to register. """ # Set up management application app = Flask(__name__) app.register_blueprint(lifecycle_blueprint) if blueprints: for blueprint in blueprints: app.register_blueprint(blueprint) if sentry_dsn: app = setup_sentry_wsgi(app, sentry_dsn) app_dispatch = DispatcherMiddleware(app, {'/metrics': make_wsgi_app()}) http_server = WSGIServer(('0.0.0.0', int(port)), app_dispatch) try: http_server.serve_forever() except KeyboardInterrupt: logger.info('Keyboard interrupt, executing shutdown hooks...') execute_shutdown_handlers()
def _setup_prometheus(app): # This environment variable MUST be declared before importing the # prometheus modules (or unit tests fail) # More details on this awkwardness: https://github.com/prometheus/client_python/issues/250 os.environ["prometheus_multiproc_dir"] = PROMETHEUS_TMP_COUNTER_DIR.name from prometheus_client import ( CollectorRegistry, multiprocess, make_wsgi_app, ) from prometheus_flask_exporter import Counter from prometheus_flask_exporter.multiprocess import ( UWsgiPrometheusMetrics, ) app.prometheus_registry = CollectorRegistry() multiprocess.MultiProcessCollector(app.prometheus_registry) UWsgiPrometheusMetrics(app) # Add prometheus wsgi middleware to route /metrics requests app.wsgi_app = DispatcherMiddleware( app.wsgi_app, {"/metrics": make_wsgi_app(registry=app.prometheus_registry)}) # set up counters app.prometheus_counters["pre_signed_url_req"] = Counter( "pre_signed_url_req", "tracking presigned url requests", ["requested_protocol"], )
def main(args): logDir = args.log try_remove_old_prom_file(logDir + "/watchdog.prom") address = args.k8s_api parse_result = urlparse.urlparse(address) api_server_ip = parse_result.hostname api_server_port = parse_result.port or 80 hosts = load_machine_list(args.hosts) list_pods_url = "{}/api/v1/namespaces/default/pods/".format(address) list_nodes_url = "{}/api/v1/nodes/".format(address) atomic_ref = AtomicRef() REGISTRY.register(CustomCollector(atomic_ref)) app = make_wsgi_app(REGISTRY) httpd = make_server("", int(args.port), app) t = threading.Thread(target=httpd.serve_forever) t.daemon = True t.start() while True: # these gauge is generate on each iteration pai_pod_gauge = gen_pai_pod_gauge() pai_container_gauge = gen_pai_container_gauge() pai_node_gauge = gen_pai_node_gauge() docker_daemon_gauge = gen_docker_daemon_gauge() k8s_gauge = gen_k8s_component_gauge() try: # 1. check service level status podsStatus = request_with_histogram(list_pods_url, list_pods_histogram) process_pods_status(pai_pod_gauge, pai_container_gauge, podsStatus) # 2. check nodes level status nodesStatus = request_with_histogram(list_nodes_url, list_nodes_histogram) process_nodes_status(pai_node_gauge, nodesStatus) # 3. check docker deamon status collect_docker_daemon_status(docker_daemon_gauge, hosts) # 4. check k8s level status collect_k8s_componentStaus(k8s_gauge, api_server_ip, api_server_port, nodesStatus) except Exception as e: error_counter.labels(type="unknown").inc() logger.exception("watchdog failed in one iteration") atomic_ref.get_and_set([ pai_pod_gauge, pai_container_gauge, pai_node_gauge, docker_daemon_gauge, k8s_gauge ]) time.sleep(float(args.interval))
def main(): # command line arguments parser = cli() args = parser.parse_args() # display version and exit if args.version is True: print("Version is {0}".format(__version__)) sys.exit() # check if password has been set if args.password is None and args.password_file is None: parser.error('Option --password or --password-file must be set.') # logger configuration logging.basicConfig( format='%(asctime)s %(name)s %(levelname)s: %(message)s', datefmt="%Y-%m-%d %H:%M:%S") logger.setLevel(args.log_level) # get password status_password = get_password(args.password, args.password_file) # collector options opts = CollectorOpts(args.filter_smsc, args.collect_wdp, args.collect_box_uptime, args.collect_smsc_uptime, args.box_connections) REGISTRY.register(KannelCollector(args.target, status_password, opts)) app = make_wsgi_app() httpd = make_server('', args.port, app) httpd.serve_forever()
def create_app(): app = flask.Flask(__name__) app.add_url_rule("/", "ping", app_ping, methods=["GET"]) app.add_url_rule("/", "refresh_tasks", app_refresh_tasks, methods=["POST"]) app.add_url_rule( "/feed_update_callback", "feed_update_callback", app_feed_update_callback, methods=["POST"], ) # Add prometheus wsgi middleware to route /metrics requests app.wsgi_app = DispatcherMiddleware( app.wsgi_app, {"/metrics": prometheus.make_wsgi_app()}) logger.setLevel(logging.INFO) handler = logging.StreamHandler() logger.addHandler(handler) formatter = logging.Formatter( "%(asctime)s TS %(levelname)-5s [%(module)s] %(message)s") handler.setFormatter(formatter) logger.info("Launching scheduler") scheduler.start() feed_auto_update_registry.initialize() transiter_registry.initialize() metrics_populator.refresh() logger.info("Launching HTTP server") return app
def main(): parser = argparse.ArgumentParser() parser.add_argument('-l', '--listen', metavar='ADDRESS', help='Listen on this address', default=':8000') parser.add_argument('-u', '--uri', default='/', help='The URI to check for POSTs coming from Druid') parser.add_argument('-d', '--debug', action='store_true', help='Enable debug logging') parser.add_argument('-e', '--encoding', default='utf-8', help='Encoding of the Druid POST JSON data.') args = parser.parse_args() if args.debug: logging.basicConfig(level=logging.DEBUG) else: logging.basicConfig(level=logging.WARNING) collect_metrics_from = [] address, port = args.listen.split(':', 1) log.info('Starting druid_exporter on %s:%s', address, port) druid_collector = collector.DruidCollector() REGISTRY.register(druid_collector) prometheus_app = make_wsgi_app() druid_wsgi_app = DruidWSGIApp(args.uri, druid_collector, prometheus_app, args.encoding) httpd = make_server(address, int(port), druid_wsgi_app, ThreadingWSGIServer) httpd.serve_forever()
def create_wsgi_middleware(other_wsgi, tracer=None): """ Create a wrapper middleware for another WSGI response handler. If tracer is not passed in, 'opentracing.tracer' is used. """ prometheus_app = make_wsgi_app() def wsgi_tracing_middleware(environ, start_response): if environ['PATH_INFO'] == '/metrics': return prometheus_app(environ, start_response) # TODO find out if the route can be retrieved from somewhere request = WSGIRequestWrapper.from_wsgi_environ(environ) span = before_request(request=request, tracer=tracer) nm = '%s %s %s' % (environ['wsgi.url_scheme'].upper(), request.operation.upper(), environ['PATH_INFO']) nm = _NORM_RE.sub('-', nm) span.set_operation_name(nm) # Wrapper around the real start_response object to log # additional information to opentracing Span def start_response_wrapper(status, response_headers, exc_info=None): span.set_tag('error', exc_info is not None) span.set_tag('http.status_code', status[:3]) span.finish() return start_response(status, response_headers) with opentracing_instrumentation.span_in_context(span): return other_wsgi(environ, start_response_wrapper) return wsgi_tracing_middleware
def monitor_app(app, path="/metrics"): app.before_request(before_request) app.after_request(after_request) prometheus_app = make_wsgi_app() return DispatcherMiddleware(app.wsgi_app, {path: prometheus_app})
def main(): # Set up logging to look similar to bitcoin logs (UTC). logging.basicConfig( format="%(asctime)s %(levelname)s %(message)s", datefmt="%Y-%m-%dT%H:%M:%SZ" ) logging.Formatter.converter = time.gmtime logger.setLevel(LOG_LEVEL) # Handle SIGTERM gracefully. signal.signal(signal.SIGTERM, sigterm_handler) app = make_wsgi_app() last_refresh = None def refresh_app(*args, **kwargs): nonlocal last_refresh process_start = datetime.now() if not last_refresh or (process_start - last_refresh).total_seconds() > 1: # Limit updates to every 1 seconds try: refresh_metrics() except Exception as e: logger.debug("Refresh failed", exc_info=True) exception_count(e) duration = datetime.now() - process_start PROCESS_TIME.inc(duration.total_seconds()) logger.info("Refresh took %s seconds", duration) last_refresh = process_start return app(*args, **kwargs) httpd = make_server(METRICS_ADDR, METRICS_PORT, refresh_app) httpd.serve_forever()
def my_app(environ, start_fn): service = ['https://httpstat.us/200', 'https://httpstat.us/503'] if environ['PATH_INFO'] == '/': headers = [('Content-type', 'html')] for s in service: try: start = time.time() res = requests.get(s) end = time.time() response_latency_metric.labels(s).observe(end - start) response_time_metric.labels(s).set( res.elapsed.total_seconds() / 1000) except ConnectionError as e: start_fn('406 Bad Connection', headers) return [b'Connection Error while accessing Sample URLS'] except Exception as e: start_fn(res.status_code, headers) return [b'Issue while accessing Sample urls'] if res.status_code == 200: up_metric.labels(s).set(int(1)) else: up_metric.labels(s).set(int(0)) if environ['PATH_INFO'] == '/metrics': metrics_app = make_wsgi_app(registry) return metrics_app(environ, start_fn) headers = [('Content-type', 'html')] start_fn('200 OK', headers) return [ b'<h1><span style="color: #0000ff; background-color: #ffffff;">This page when accessed checks the following sample urls Availability and Response times and exposes the prometheus metrics in /metrics endpoint</span></h1> \ <h1><a href="https://httpstat.us/503" target="_blank">https://httpstat.us/503</a> </h1> \ <h1><a href="https://httpstat.us/200" target="_blank">https://httpstat.us/200</a></h1>' ]
def main(): cfg.CONF(sys.argv[1:]) socket_path = cfg.CONF.oslo_metrics.metrics_socket_file m = MetricsListener(socket_path) try: os.chmod(socket_path, stat.S_IRWXU | stat.S_IRWXO) except OSError: LOG.error("Changing the mode of the file failed.... continuing") mt = threading.Thread(target=m.serve) LOG.info("Start oslo.metrics") mt.start() app = make_wsgi_app() try: global httpd httpd = make_server('', 3000, app) signal.signal(signal.SIGTERM, handle_sigterm) httpd.serve_forever() except KeyboardInterrupt: pass finally: LOG.info("Try to stop...") os.remove(cfg.CONF.oslo_metrics.metrics_socket_file) m.stop() httpd.server_close()
def main(): parser = argparse.ArgumentParser( description="Borg exporter for Prometheus") parser.add_argument( "-p", "--port", help=f"exporter exposed port (default {PORT})", type=int, default=PORT, ) parser.add_argument( "-c", "--conf", help=f"configuration file (default {CONF})", type=argparse.FileType("r"), default=CONF, ) args = parser.parse_args() data = safe_load(args.conf) REGISTRY.register(BorgCollector(data)) app = make_wsgi_app() httpd = make_server("127.0.0.1", args.port, app) httpd.serve_forever()
def monitor(app, path="/metrics", http_server=False, port=9090, addr=""): FLASK_REQUEST_COUNT = Counter( "{}_flask_request_count".format(app.name), "{} - Flask Request Count".format(app.name), ["method", "endpoint", "http_status"], ) FLASK_REQUEST_LATENCY = Histogram( "{}_flask_request_latency_seconds".format(app.name), "{} - Flask Request Latency".format(app.name), ["method", "endpoint"], ) def before_request(): request.start_time = time.time() def after_request(response): request_latency = time.time() - request.start_time FLASK_REQUEST_LATENCY.labels(request.method, request.path).observe(request_latency) FLASK_REQUEST_COUNT.labels(request.method, request.path, response.status_code).inc() return response app.before_request(before_request) app.after_request(after_request) if http_server: start_http_server(port, addr) else: prometheus_app = make_wsgi_app() return DispatcherMiddleware(app.wsgi_app, {path: prometheus_app})
def main(): logging.basicConfig( format='%(message)s', stream=sys.stdout, level=logging.DEBUG ) structlog.configure( processors=[ structlog.stdlib.add_log_level, structlog.stdlib.add_logger_name, structlog.processors.KeyValueRenderer( key_order=["event", "request_id"] ) ], context_class=structlog.threadlocal.wrap_dict(dict), logger_factory=structlog.stdlib.LoggerFactory() ) app = create_app() # allows us to view metrics on /metrics dispatcher = DispatcherMiddleware(app.wsgi_app, {'/metrics': make_wsgi_app()}) run_simple( 'localhost', 5000, dispatcher, use_reloader=True, )
def create_dispatcher() -> DispatcherMiddleware: """ App factory for dispatcher middleware managing multiple WSGI apps """ main_app = create_app(config=CONFIG) return DispatcherMiddleware(main_app.wsgi_app, {"/metrics": make_wsgi_app()})
def start_wsgi_server(port, addr='', registry=REGISTRY): """Starts a WSGI server for prometheus metrics as a daemon thread.""" app = make_wsgi_app(registry) httpd = make_server(addr, port, app, ThreadingWSGIServer, handler_class=HTTPRequestHandler) t = threading.Thread(target=httpd.serve_forever) t.daemon = True t.start()
def validate_metrics(self, metric_name, help_text, increments): """ WSGI app serves the metrics from the provided registry. """ c = Counter(metric_name, help_text, registry=self.registry) for _ in range(increments): c.inc() # Create and run WSGI app app = make_wsgi_app(self.registry) outputs = app(self.environ, self.capture) # Assert outputs self.assertEqual(len(outputs), 1) output = outputs[0].decode('utf8') # Status code self.assertEqual(self.captured_status, "200 OK") # Headers self.assertEqual(len(self.captured_headers), 1) self.assertEqual(self.captured_headers[0], ("Content-Type", CONTENT_TYPE_LATEST)) # Body self.assertIn("# HELP " + metric_name + "_total " + help_text + "\n", output) self.assertIn("# TYPE " + metric_name + "_total counter\n", output) self.assertIn(metric_name + "_total " + str(increments) + ".0\n", output)
def setup_metrics(app: Flask): logger.info('Setting up metrics') # provide app's version and deploy environment/config name to set a gauge metric ErrorLogger.add_logging_callback( MetricsHandler.log_exception_for_metrics) MetricsHandler.setup_metrics_callbacks( app, app_version=app.config['VERSION']) if 'prometheus_multiproc_dir' in os.environ: # We're in a multiprocessing environment (i.e. gunicorn) registry = CollectorRegistry() multiprocess.MultiProcessCollector(registry) wsgi_app = make_wsgi_app(registry) else: wsgi_app = make_wsgi_app() # Plug metrics WSGI app to your main app with dispatcher app.wsgi_app = DispatcherMiddleware(app.wsgi_app, {"/metrics": wsgi_app})
def create_app(): flask_app = Flask('demo') flask_app.add_url_rule("/", view_func=index) prometheus_app = make_wsgi_app() dispatcher = DispatcherMiddleware(flask_app, {"/metrics": prometheus_app}) return dispatcher
def app(environ, start_fn): REQUESTS.inc() if environ['PATH_INFO'] == '/metrics': registry = CollectorRegistry() multiprocess.MultiProcessCollector(registry) metrics_app = make_wsgi_app(registry) return metrics_app(environ, start_fn) start_fn('200 OK', []) return [b'Hello World']
def sentry_exporter(): sentry = SentryAPI(BASE_URL, AUTH_TOKEN) log.info("exporter: cleaning registry collectors...") clean_registry() REGISTRY.register( SentryCollector(sentry, ORG_SLUG, get_metric_config(), PROJECTS_SLUG)) exporter = DispatcherMiddleware(app.wsgi_app, {"/metrics": make_wsgi_app()}) return exporter
def __init__(self, host, port, registry): from wsgiref.simple_server import make_server, WSGIRequestHandler class SilentHandler(WSGIRequestHandler): def log_message(self, format, *args): pass app = prometheus_client.make_wsgi_app(registry) self.httpd = make_server(host, port, app, handler_class=SilentHandler)
def main(): parser = argparse.ArgumentParser(description='BMP180 exporter for Prometheus') parser.add_argument('-p', '--port', help=f'exporter exposed port (default {PORT})', type=int, default=PORT) args = parser.parse_args() REGISTRY.register(BMP180Collector()) app = make_wsgi_app() httpd = make_server('', args.port, app) httpd.serve_forever()
def __init__(self, settings): metrics_handler = wsgi.WSGIContainer(make_wsgi_app()) handlers = [ (r'/webhooks/github', GithubWebhookHandler), (r'/metrics', metrics_handler) ] settings.update(dict( )) super().__init__(handlers, **settings)
def create_app(config: CollectorConfig): app = Flask(__name__, instance_relative_config=True) client = AcsClient(ak=config.credential['access_key_id'], secret=config.credential['access_key_secret'], region_id=config.credential['region_id']) @app.route("/") def projectIndex(): req = QueryProjectMetaRequest() req.set_PageSize(100) try: resp = client.do_action_with_exception(req) except Exception as e: return render_template("error.html", errorMsg=e) data = json.loads(resp) return render_template("index.html", projects=data["Resources"]["Resource"]) @app.route("/projects/<string:name>") def projectDetail(name): req = QueryMetricMetaRequest() req.set_PageSize(100) req.set_Project(name) try: resp = client.do_action_with_exception(req) except Exception as e: return render_template("error.html", errorMsg=e) data = json.loads(resp) return render_template("detail.html", metrics=data["Resources"]["Resource"], project=name) @app.route("/yaml/<string:name>") def projectYaml(name): req = QueryMetricMetaRequest() req.set_PageSize(100) req.set_Project(name) try: resp = client.do_action_with_exception(req) except Exception as e: return render_template("error.html", errorMsg=e) data = json.loads(resp) return render_template("yaml.html", metrics=data["Resources"]["Resource"], project=name) app.jinja_env.filters['formatmetric'] = format_metric app.jinja_env.filters['formatperiod'] = format_period app_dispatch = dispatcher.DispatcherMiddleware( app, {'/metrics': make_wsgi_app()}) return app_dispatch
def app(): app = create_app({'TESTING': True}) # See https://stackoverflow.com/a/36222848/4241180, allows # test_client() to work as expected app.wsgi_app = DispatcherMiddleware(app.wsgi_app, {'/metrics': make_wsgi_app()}) yield app # teardown user_db.clear()
def metrics_page(): #-- Get data speedtest_data = load_data_from_file(data_source_file) logger.info("speedtest data= {}".format(speedtest_data)) #-- set data in prometheus gauges speedtest_ping.set(float(speedtest_data['ping'])) speedtest_download.set(float(speedtest_data['download'])) speedtest_upload.set(float(speedtest_data['upload'])) return make_wsgi_app()
def exporter_app(environ, start_response): if environ['PATH_INFO'] == '/metrics': app = make_wsgi_app() return app(environ, start_response) if environ['PATH_INFO'] == '/': status, header, output = home() else: status, header, output = not_found() start_response(status, [header]) return [output]
from prometheus_client import make_wsgi_app from twisted.web.server import Site from twisted.web.wsgi import WSGIResource from twisted.web.resource import Resource from twisted.internet import reactor metrics_resource = WSGIResource( reactor, reactor.getThreadPool(), make_wsgi_app()) class HelloWorld(Resource): isLeaf = False def render_GET(self, request): return b"Hello World" root = HelloWorld() root.putChild(b'metrics', metrics_resource) reactor.listenTCP(8000, Site(root)) reactor.run()
from prometheus_client import make_wsgi_app from wsgiref.simple_server import make_server metrics_app = make_wsgi_app() def my_app(environ, start_fn): if environ['PATH_INFO'] == '/metrics': return metrics_app(environ, start_fn) start_fn('200 OK', []) return [b'Hello World'] if __name__ == '__main__': httpd = make_server('', 8000, my_app) httpd.serve_forever()
def metrics(): return make_wsgi_app()