def when_ready(server): print("Using", os.environ['PROMETHEUS_MULTIPROC_DIR'], " as PROMETHEUS_MULTIPROC_DIR", file=sys.stderr) # Fix the initialization of the backward compatible var `prometheus_multiproc_dir`. # This fix can go away in future releases of prometheus-client library # when the backward compatibility will be removed. if 'PROMETHEUS_MULTIPROC_DIR' in os.environ and 'prometheus_multiproc_dir' not in os.environ: os.environ["prometheus_multiproc_dir"] = os.environ['PROMETHEUS_MULTIPROC_DIR'] metrics_port = int(os.environ.get('PROMETHEUS_METRICS_PORT', 9090)) print("Starting metrics server on port", metrics_port, file=sys.stderr) GunicornPrometheusMetrics.start_http_server_when_ready(port=metrics_port)
def when_ready(server): # noqa """Log when worker is ready to serve.""" logger = logging.getLogger(__name__) logger.info( "Starting gunicorn with %s workers %s worker class and preload %s", workers, worker_class, preload_app, ) log_all_settings() GunicornPrometheusMetrics.start_http_server_when_ready(GUNICORN_SETTINGS.metrics_port)
def create_app_with_metrics( config_name='ProductionConfig'): # pragma: no cover # NOQA from prometheus_flask_exporter.multiprocess import ( GunicornPrometheusMetrics) app = create_app(config_name) GunicornPrometheusMetrics(app=app, group_by='url_rule') return app
def when_ready(server): """Start metrics server when Timesketch app is ready.""" # Exit early if we don't have the necessary environment set up. if not METRICS_ENABLED: return # Clean up andy old prometheus database files if os.path.isdir(METRICS_DB_DIR): files = glob.glob(METRICS_DB_DIR + '/*.db') for file in files: os.remove(file) else: pathlib.Path(METRICS_DB_DIR).mkdir(parents=True, exist_ok=True) GunicornPrometheusMetrics.start_http_server_when_ready( int(METRICS_HTTP_PORT), METRICS_HTTP_HOST)
def create_app(test_config=None): """create and configure the app""" app = Flask(__name__, static_folder="build/static", template_folder="build") dir_name = "logs" try: # Create log Directory os.mkdir(dir_name) print("Directory ", dir_name, " Created ") except FileExistsError: print("Directory ", dir_name, " already exists") # app.logger.removeHandler(default_handler) handler = RotatingFileHandler("logs/orders.log", maxBytes=10000, backupCount=1) handler.setLevel(logging.DEBUG) app.logger.addHandler(handler) es_host_url = os.getenv("ES_HOST_URL") # if es url is set, enable logging to elasticsearch if es_host_url: es_handler = ElasticsearchLogHandler(os.getenv("ES_HOST_URL")) app.logger.addHandler(es_handler) app.logger.setLevel(logging.DEBUG) app.config.from_object("config") app.register_blueprint(mock) app.register_blueprint(index_blueprint) # set up prometheus metrics exporting metrics = GunicornPrometheusMetrics(app) # static information as metric metrics.info("AnomalyDetectorDemo", "Demo application for PAD/LAD", version="0.1") return app
def child_exit(server, worker): """Mark a child worker as exited.""" if METRICS_ENABLED: GunicornPrometheusMetrics.mark_process_dead_on_child_exit(worker.pid)
def when_ready(server): GunicornPrometheusMetrics.start_http_server_when_ready(int(os.getenv('METRICS_PORT')))
def when_ready(server): GunicornPrometheusMetrics.start_http_server_when_ready(PROMETHEUS_PORT)
def when_ready(server): metric_server_port = int(os.getenv("METRICS_SERVER_PORT", "9153")) GunicornPrometheusMetrics.start_http_server_when_ready(metric_server_port) print("Metrics Server started on port: {0}".format(metric_server_port))
def when_ready(server): GunicornPrometheusMetrics.start_http_server_when_ready(8080)
import os import sys from flask import Flask from flask.logging import default_handler from prometheus_flask_exporter.multiprocess import GunicornPrometheusMetrics from masu.api.blueprint import api_v1 from masu.api.status import ApplicationStatus from masu.celery import celery as celery_app, update_celery_config from masu.util import setup_cloudwatch_logging logger = logging.getLogger(__name__) # pylint: disable=invalid-name logger.addHandler(default_handler) metrics = GunicornPrometheusMetrics(app=None) # pylint: disable=invalid-name def create_app(test_config=None): """ App factory for Flask application. Args: test_config (dict): A mapping of configurations used for testing Returns: flask.app.Flask: The configured Flask application """ app = Flask(__name__, instance_relative_config=True)
def child_exit(server, worker): # pylint: disable=unused-argument """Mark process dead when gunicorn worker dies.""" GunicornPrometheusMetrics.mark_process_dead_on_child_exit(worker.pid)
def when_ready(server): # pylint: disable=unused-argument """Start metrics server when gunicorn is ready.""" GunicornPrometheusMetrics.start_http_server_when_ready( int(os.getenv('METRICS_PORT', default='9100')))
"http://datacollector.infra.ooni.io/ooni-public/centrifugation/", ) PROMETHEUS_PORT = int(os.environ.get("PROMETHEUS_PORT", "8080")) # S3 related configuration S3_ACCESS_KEY_ID = os.environ.get("S3_ACCESS_KEY_ID", None) S3_SECRET_ACCESS_KEY = os.environ.get("S3_SECRET_ACCESS_KEY", None) S3_SESSION_TOKEN = os.environ.get("S3_SESSION_TOKEN", None) S3_ENDPOINT_URL = os.environ.get("S3_ENDPOINT_URL", None) # As of 2017-07-18 635830 is the latest index in the database REPORT_INDEX_OFFSET = int(os.environ.get("REPORT_INDEX_OFFSET", "635830")) REQID_HDR = "X-Request-ID" # We do a lazy setup, and populate the app inside of create_app try: metrics = GunicornPrometheusMetrics(app=None, group_by="endpoint") except ValueError: from prometheus_flask_exporter import PrometheusMetrics # In testing we should use the standard PrometheusMetrics due to: # env prometheus_multiproc_dir is not set or not a directory metrics = PrometheusMetrics(app=None, group_by="endpoint") def request_id(): if request: return request.headers.get(REQID_HDR) return None
# If you installed Timesketch in a virtualenv you need to activate it. # This needs to be before any imports in order to import from the virtualenv. # activate_virtualenv = '/path/to/your/virtualenv/bin/activate_this.py' # execfile(activate_virtualenv, dict(__file__=activate_virtualenv)) import os import logging from prometheus_flask_exporter.multiprocess import GunicornPrometheusMetrics from timesketch.app import configure_logger from timesketch.app import create_app from timesketch.models import db_session logger = logging.getLogger("timesketch.wsgi_server") configure_logger() application = create_app() application_v2 = create_app(v2=True) # Setup metrics endpoint. if os.environ.get("prometheus_multiproc_dir"): logger.info("Metrics server enabled") GunicornPrometheusMetrics(application, group_by="endpoint") # pylint: disable=unused-argument @application.teardown_appcontext def shutdown_session(exception=None): """Remove the database session after every request or app shutdown.""" db_session.remove()
def when_ready(server): GunicornPrometheusMetrics.start_http_server_when_ready( int(os.environ["METRICS_PORT"]))
import os import datetime from flask import Flask, jsonify import prometheus_flask_exporter from prometheus_flask_exporter.multiprocess import GunicornPrometheusMetrics from requests import get app = Flask(__name__) metrics = GunicornPrometheusMetrics( app, defaults_prefix=prometheus_flask_exporter.NO_PREFIX) @app.route('/') def hello_world(): return jsonify(message="Hello world order") @app.route('/healthz') @metrics.do_not_track() def healthz(): return datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") @app.route('/metrics') @metrics.do_not_track() def metrics(): return get(f'http://localhost:8080/metrics').content
import json from flask import Flask, request from prometheus_flask_exporter.multiprocess import GunicornPrometheusMetrics from config.config import Config app = Flask(__name__) metrics = GunicornPrometheusMetrics(app) metrics.info("app_name", "application info", version=Config.APP_VERSION) @app.route("/health-check") def health_check(): return json.dumps({"status": "OK", "version": Config.APP_VERSION})
from flask import Flask from prometheus_flask_exporter.multiprocess import GunicornPrometheusMetrics app = Flask(__name__) metrics = GunicornPrometheusMetrics(app) @app.route('/test') def index(): return 'Hello world' if __name__ == '__main__': metrics.start_http_server(9100) app.run(debug=False, port=5000)
config = jConfig( config=tracer_config, service_name=f"RDSWebConnexionPlus", metrics_factory=PrometheusMetricsFactory(namespace=f"RDSWebConnexionPlus"), ) tracer_obj = config.initialize_tracer() tracing = FlaskTracing(tracer_obj, True, app) install_all_patches() # add a TracingHandler for Logging gunicorn_logger = logging.getLogger("gunicorn.error") app.logger.handlers.extend(gunicorn_logger.handlers) app.logger.addHandler(TracingHandler(tracer_obj)) app.logger.setLevel(gunicorn_logger.level) ### Tracing end ### app.config.update(flask_config) try: from prometheus_flask_exporter.multiprocess import GunicornPrometheusMetrics metrics = GunicornPrometheusMetrics(app) except Exception as e: print(f"error in prometheus setup: {e}") Session(app) socketio = SocketIO(app, cors_allowed_origins=json.loads( os.getenv("FLASK_ORIGINS")), manage_session=False)
import os from flask import Flask from healthcheck import HealthCheck from prometheus_flask_exporter.multiprocess import GunicornPrometheusMetrics import redis try: redis_client = redis.Redis(host="redis", port=6379, password="******") except redis.RedisError as e: print(e) app = Flask(__name__) metrics = GunicornPrometheusMetrics(app) metrics.info('app_info', 'Application info', version='1.0') health = HealthCheck() def redis_available(): redis_client.info() return True, "Redis OK" health.add_check(redis_available) app.add_url_rule("/healthcheck", "healthcheck", view_func=lambda: health.run())
def child_exit(server, worker): GunicornPrometheusMetrics.mark_process_dead_on_child_exit(worker.pid)
RDBServerException) from werkzeug.exceptions import BadRequest from f8a_utils.ingestion_utils import unknown_package_flow from f8a_utils import ingestion_utils from bayesian.default_config import (THREESCALE_USER_KEY, THREESCALE_API_URL, STACK_REPORT_UI_HOSTNAME) from prometheus_flask_exporter.multiprocess import GunicornPrometheusMetrics from prometheus_flask_exporter import NO_PREFIX logger = logging.getLogger(__name__) api_v2 = Blueprint('api_v2', __name__, url_prefix='/api/v2') # metrics obj to be used to track endpoints metrics = GunicornPrometheusMetrics(api_v2, group_by="endpoint", defaults_prefix=NO_PREFIX) @api_v2.route('/component-analyses/<ecosystem>/<package>/<version>', methods=['GET']) @validate_user @login_required def component_analyses_get(ecosystem, package, version): """Handle the GET REST API call. Component Analyses: - If package is Known (exists in GraphDB (Snyk Edge) returns Json formatted response. - If package is not Known: Call Util's function to trigger ingestion flow. :return: