def init_flask_metrics(flask_app, export_defaults=True, **kwargs): global flask_metrics, enabled try: localconfig = anchore_engine.configuration.localconfig.get_config() metrics_config = localconfig.get('metrics', {}) enabled = bool(metrics_config.get('enable', False)) if not enabled: enabled = bool(metrics_config.get('enabled', False)) except Exception as err: logger.warn( "unable to determine if metrics are enabled - exception: " + str(err)) enabled = False if not enabled: flask_metrics = disabled_flask_metrics() return (True) if not flask_metrics: flask_metrics = PrometheusMetrics(flask_app, export_defaults=export_defaults) flask_metrics.info('anchore_service_info', "Anchore Service Static Information", version=version, **kwargs) return (True)
class BlueprintTest(unittest.TestCase): def setUp(self): self.app = Flask(__name__) self.app.testing = True self.client = self.app.test_client() if sys.version_info.major < 3: self.assertRegex = self.assertRegexpMatches self.assertNotRegex = self.assertNotRegexpMatches registry = CollectorRegistry(auto_describe=True) self.metrics = PrometheusMetrics(app=None, registry=registry) def test_blueprint(self): blueprint = Blueprint('test-blueprint', __name__) @blueprint.route('/test') @self.metrics.summary('requests_by_status', 'Request latencies by status', labels={'status': lambda r: r.status_code}) def test(): return 'OK' self.app.register_blueprint(blueprint) self.metrics.init_app(self.app) self.client.get('/test') response = self.client.get('/metrics') self.assertEqual(response.status_code, 200) self.assertIn('requests_by_status_count{status="200"} 1.0', str(response.data)) self.assertRegex(str(response.data), 'requests_by_status_sum{status="200"} [0-9.]+')
def create_app(test_config=None): # create and configure the app app = Flask(__name__, static_folder="build/static", template_folder="build") dir_name = "logs" try: # Create log Directory os.mkdir(dir_name) print("Directory ", dir_name, " Created ") except FileExistsError: print("Directory ", dir_name, " already exists") # app.logger.removeHandler(default_handler) handler = RotatingFileHandler("logs/orders.log", maxBytes=10000, backupCount=1) handler.setLevel(logging.DEBUG) app.logger.addHandler(handler) es_host_url = os.getenv("ES_HOST_URL") # if es url is set, enable logging to elasticsearch if es_host_url: es_handler = ElasticsearchLogHandler(os.getenv("ES_HOST_URL")) app.logger.addHandler(es_handler) app.logger.setLevel(logging.DEBUG) app.config.from_object("config") app.register_blueprint(mock) app.register_blueprint(index_blueprint) # set up prometheus metrics exporting metrics = PrometheusMetrics(app) # static information as metric metrics.info("AnomalyDetectorDemo", "Demo application for PAD/LAD", version="0.1") return app
class Server: def __init__(self,log_level=logging.DEBUG): self.app = Flask(__name__) self.app.logger.setLevel(log_level) self.metrics = PrometheusMetrics(self.app) self.metrics.info('app_info', 'Version info', version=__version__) self.cv = ComputerVision() self.app.register_blueprint(self.cv.blueprint, url_prefix='/v1/') self.app.config['SWAGGER'] = { 'title': 'Corona Medical Monitors Camera Monitoring API', 'uiversion': 3, 'openapi': '3.0.2', 'version': __version__ } self.swagger = Swagger(self.app) @self.app.route('/ping/') def ping() ->str: """ ping --- description: get a pong """ return 'pong'
def create_app(config_name): app = Flask(__name__) app.config.from_object(config[config_name]) config[config_name].init_app(app) if os.environ.get('SQLALCHEMY_POOL_SIZE'): # 上传进程内部加载sqlalchemy时需要控制每个进程的数据库连接池大小 print('根据环境变量设置数据库连接池大小为:', os.environ.get('SQLALCHEMY_POOL_SIZE')) app.config['SQLALCHEMY_POOL_SIZE'] = int( os.environ.get('SQLALCHEMY_POOL_SIZE')) db.init_app(app) print('connect to {}'.format(app.config['SQLALCHEMY_DATABASE_URI'])) try: aios_redis.init_app(app) print('connect to {}'.format(app.config['REDIS_URL'])) # 每次项目重启时清空缓存,如有redis实现的消息队列时,需要采取措施及时保存队列信息,以便不丢失消息 init_pubsub(aios_redis) # aios_redis.flushdb() except Exception as err: print('warning: Redis服务出现异常!!!', err) try: from prometheus_flask_exporter import PrometheusMetrics metrics = PrometheusMetrics(app) # static information as metric metrics.info('ifaios_metrics', 'ifaios_cs', version="v2") except Exception as err: pass # 注册蓝本 from app.api import api_blueprint app.register_blueprint(api_blueprint) from app.static import static_blueprint app.register_blueprint(static_blueprint) from app.common import common_blueprint app.register_blueprint(common_blueprint) return app
def init_flask_metrics(flask_app, export_defaults=True, **kwargs): global flask_metrics, enabled try: localconfig = anchore_engine.configuration.localconfig.get_config() metrics_config = localconfig.get('metrics', {}) enabled = bool(metrics_config.get('enable', False)) if not enabled: enabled = bool(metrics_config.get('enabled', False)) except Exception as err: logger.warn( "unable to determine if metrics are enabled - exception: " + str(err)) enabled = False if not enabled: flask_metrics = disabled_flask_metrics() return (True) if not flask_metrics: # Build a blueprint for metrics, wrapped in auth flask_metrics = PrometheusMetrics(metrics_blueprint, export_defaults=export_defaults) # Note: this must be after the addition of PrometheusMetrics to the blueprint in order to ensure proper ordering of before_request and after_request handling by prometheus counters metrics_blueprint.before_request(auth_function_factory()) flask_app.register_blueprint(metrics_blueprint) flask_metrics.info('anchore_service_info', "Anchore Service Static Information", version=version, **kwargs) return (True)
def __init__(self, app: flask.Flask): self.app = app self.sio = flask_socketio.SocketIO(app) self.discord = DiscordOAuth2Session(app) self.metrics = PrometheusMetrics(app) self.fernet_encrypt = Fernet(app.config["FERNET_KEY"]) if app.config["GUEST_KEY"] is not None: self.guest_encrypt = Fernet(app.config["GUEST_KEY"])
def setUp(self): self.app = Flask(__name__) self.app.testing = True self.client = self.app.test_client() if sys.version_info.major < 3: self.assertRegex = self.assertRegexpMatches self.assertNotRegex = self.assertNotRegexpMatches registry = CollectorRegistry(auto_describe=True) self.metrics = PrometheusMetrics(app=None, registry=registry)
def __init__(self, app: flask.Flask): self.app = app self.sio = flask_socketio.SocketIO(app) self.discord = CustomDiscordOAuth2Session(app) self.metrics = PrometheusMetrics(app) self.fernet_encrypt = Fernet(app.config["FERNET_KEY"]) if app.config["GUEST_KEY"] is not None: self.guest_encrypt = Fernet(app.config["GUEST_KEY"]) if app.config["ENFORCE_ROLE"] is not None: self.enforce_role = EnforceDiscordRole(app.config["ENFORCE_ROLE"]) self.expected_headers = connection_headers() self.expected_headers.pop("X-Randovania-Version")
def init_prometheus_flask_exporter(self, app): enable_exporter_flask = app.config.get( "PROMETHEUS_ENABLE_EXPORTER_FLASK", False) if not enable_exporter_flask: LOGGER.debug( f"Prometheus Flask exporter is not enabled for {app.name}.") return prefix = app.name metrics_flask = PrometheusMetrics(app=None, defaults_prefix=prefix) metrics_flask.init_app(app) LOGGER.debug( f"Prometheus Flask exporter is initialized with prefix {prefix}.")
def setup_prometheus(self, registry=None): """Setup Prometheus.""" kwargs = {} if registry: kwargs["registry"] = registry self.metrics = PrometheusMetrics(self.app, **kwargs) try: version = pkg_resources.require(self.app.name)[0].version except pkg_resources.DistributionNotFound: version = "unknown" self.metrics.info( "app_info", "Application info", version=version, appname=self.app.name ) self.app.logger.info("Prometheus is enabled.")
def create_app(config): app = Flask(__name__) # http://docs.mongoengine.org/projects/flask-mongoengine/en/latest/ # mongodb config app.config['MONGODB_SETTINGS'] = { 'db': 'recall', 'host': '127.0.0.1', 'port': 27017 } # tracing config app.config[tracing.SETTINGS_NAME] = { 'service_name': 'my-helloworld-service', 'agent_host_name': '192.168.110.252', 'agent_port': 6831 } # metrics metrics = PrometheusMetrics(app) # Tracing tracing.init_app(app) # OpenTelemetry pymongo Instrumentation PymongoInstrumentor().instrument() # OpenTelemetry WSGI Instrumentation # https://opentelemetry-python.readthedocs.io/en/stable/instrumentation/wsgi/wsgi.html#usage-flask # app.wsgi_app = OpenTelemetryMiddleware(app.wsgi_app) # OpenTelemetry Flask Instrumentation FlaskInstrumentor().instrument_app(app) # 初始化数据库配置 db.init_app(app) app.json_encoder = RecallJSONEncoder # app.response_class = RecallResponse app.register_blueprint(index.controller, url_prefix="/") app.register_blueprint(admin.controller, url_prefix="/admin") return app
def create_app(): app = Flask(__name__, instance_relative_config=True) app.config.from_object(Config) app.url_map.converters['oid'] = ObjectIdConverter db.init_app(app) auth.init_app(app) views.init_app(app) metrics = PrometheusMetrics(app) metrics.info('backend_info', 'Backend Information', version='1.0.0') @app.route('/ping') @metrics.do_not_track() def ping(): return 'pong' return app
def create_app(): """Create Flask App.""" app = Flask(__name__, static_folder="static") # Register blueprints app.register_blueprint(index_blueprint) app.register_blueprint(api) sql_db = os.getenv("SQL_CONNECT", "sqlite://") app.config['SQLALCHEMY_DATABASE_URI'] = sql_db app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False # Setup Prometheus Metrics metrics = PrometheusMetrics(app) metrics.info('app_info', 'Log Anomaly Detector', version='v0.1.0.beta1') # Initialize db and tables db.init_app(app) with app.app_context(): db.create_all() return app
class AppFactoryTest(unittest.TestCase): def setUp(self): self.app = Flask(__name__) self.app.testing = True self.client = self.app.test_client() if sys.version_info.major < 3: self.assertRegex = self.assertRegexpMatches self.assertNotRegex = self.assertNotRegexpMatches registry = CollectorRegistry(auto_describe=True) self.metrics = PrometheusMetrics(app=None, registry=registry) self.metrics.init_app(self.app) def test_restricted(self): @self.app.route('/test') def test(): return 'OK' self.client.get('/test') response = self.client.get('/metrics') self.assertEqual(response.status_code, 200) response = self.client.get('/metrics?name[]=flask_exporter_info') self.assertEqual(response.status_code, 200) response = self.client.get( '/metrics' '?name[]=flask_http_request_duration_seconds_bucket' '&name[]=flask_http_request_duration_seconds_count' '&name[]=flask_http_request_duration_seconds_sum') self.assertEqual(response.status_code, 200) self.assertIn( 'flask_http_request_duration_seconds_bucket{le="0.1",method="GET",path="/test",status="200"} 1.0', str(response.data)) self.assertIn( 'flask_http_request_duration_seconds_count{method="GET",path="/test",status="200"} 1.0', str(response.data))
def init_flask_metrics(flask_app, export_defaults=True, **kwargs): global flask_metrics, enabled auth_enabled = True try: localconfig = anchore_engine.configuration.localconfig.get_config() metrics_config = localconfig.get("metrics", {}) # Handle typo in config. enabled == enable enabled = bool(metrics_config.get("enable", False)) if not enabled: enabled = bool(metrics_config.get("enabled", False)) auth_enabled = not bool(metrics_config.get("auth_disabled", False)) except Exception as err: logger.warn( "unable to determine if metrics are enabled - exception: " + str(err)) enabled = False if not enabled: flask_metrics = disabled_flask_metrics() return True if not flask_metrics: flask_metrics = PrometheusMetrics(flask_app, export_defaults=export_defaults, group_by_endpoint=True) if auth_enabled: flask_app.before_request(metrics_auth(flask_metrics.path)) flask_metrics.info("anchore_service_info", "Anchore Service Static Information", version=version, **kwargs) return True
def config_app(application): try: # Load the default configuration application.config.from_object('prom2teams.config.settings') # Load the configuration from the instance folder instance = os.path.join(os.path.join(root, os.pardir), 'instance') config = os.path.join(instance, 'config.py') if os.path.isdir(instance) and os.path.exists(config): application.config.from_pyfile(config) # Load the file specified by the APP_CONFIG_FILE environment variable # Variables defined here will override those in the default configuration if 'APP_CONFIG_FILE' in os.environ: application.config['APP_CONFIG_FILE'] = os.environ.get( 'APP_CONFIG_FILE') config_provided = _config_provided(os.getenv('APP_CONFIG_FILE')) _update_application_configuration(application, config_provided) # Parse and load command line properties # Variables defined here will override previous configuration command_line_args = _config_command_line() if command_line_args.configpath: application.config[ 'APP_CONFIG_FILE'] = command_line_args.configpath config_provided = _config_provided(command_line_args.configpath) _update_application_configuration(application, config_provided) if command_line_args.loglevel: application.config['LOG_LEVEL'] = command_line_args.loglevel if command_line_args.logfilepath: application.config['LOG_FILE_PATH'] = command_line_args.logfilepath if command_line_args.templatepath: application.config[ 'TEMPLATE_PATH'] = command_line_args.templatepath if command_line_args.groupalertsby: application.config[ 'GROUP_ALERTS_BY'] = command_line_args.groupalertsby if command_line_args.enablemetrics or os.environ.get( 'PROM2TEAMS_PROMETHEUS_METRICS', False): os.environ["DEBUG_METRICS"] = "True" from prometheus_flask_exporter import PrometheusMetrics metrics = PrometheusMetrics(application) if 'MICROSOFT_TEAMS' not in application.config: raise MissingConnectorConfigKeyException( 'missing connector key in config') except MissingConnectorConfigKeyException: sys.exit('No Microsoft Teams connector available')
def integrate_prometheus_metrics(app: Flask, restful_api: Api): # metrics = RESTfulPrometheusMetrics(app, restful_api) metrics = PrometheusMetrics(app) metrics.info('app_info', 'Application info', version='1.0.3') metrics.register_default( metrics.summary('by_path_method_time_stamp_summary', 'Request summary by request paths, method, timestamp', labels={ 'path': lambda: request.path, 'method': lambda: request.method, 'status': lambda r: r.status_code, 'time_stamp': lambda: time.time() })) return metrics
def create_app(config_class=Config): app = Flask(__name__) app.config.from_object(Config) db.init_app(app) bcrypt.init_app(app) compress.init_app(app) PrometheusMetrics(app) from autochannel.api.routes import mod_api from autochannel.site.routes import mod_site from autochannel.errors.routes import mod_errors app.register_blueprint(mod_api, url_prefix='/api') app.register_blueprint(mod_site) app.register_blueprint(mod_errors) return app
def _setup_metrics(self): metrics = PrometheusMetrics(self.app) metrics.info('flask_app_info', 'Application info', version=os.environ.get('GIT_COMMIT') or 'unknown') metrics.info('flask_app_built_at', 'Application build timestamp').set( float(os.environ.get('BUILD_TIMESTAMP') or '0')) action_summary = Summary('webhook_proxy_actions', 'Action invocation metrics', labelnames=('http_route', 'http_method', 'action_type', 'action_index')) return action_summary
def create_app(test_config=None): app = Flask(__name__) app.config.from_object("project.config.Config") if test_config is not None: app.config.update(test_config) db.init_app(app) Migrate(app, db) app.register_blueprint(bp_v1) configure_swagger(app) configure_dependencies(app) metrics = PrometheusMetrics(app=app, path='/metrics') config_logging(app) return app
def create_app(): # Construct new Flask core app = Flask(__name__, instance_relative_config=False) # Enable cache app.cache = Cache(app, config={'CACHE_TYPE': 'simple'}) # Enable metrics app.metrics = PrometheusMetrics(app) # Use Configuration object app.config.from_object('config.Config') with app.app_context(): # Import Flask routes from . import routes #Import Dash application from .dash_application import dash_routes app = dash_routes.add_dash(app) return app
def _setup_metrics(self): metrics = PrometheusMetrics(self.app) metrics.info( "flask_app_info", "Application info", version=os.environ.get("GIT_COMMIT") or "unknown", ) metrics.info("flask_app_built_at", "Application build timestamp").set( float(os.environ.get("BUILD_TIMESTAMP") or "0")) action_summary = Summary( "webhook_proxy_actions", "Action invocation metrics", labelnames=("http_route", "http_method", "action_type", "action_index"), ) return action_summary
arguments={"title": "User API"}, resolver=RestyResolver(default_module_name="thoth.user_api.api_v1"), strict_validation=True, validate_responses=False, ) application = app.app # create tracer and put it in the application configuration Configuration.tracer = init_jaeger_tracer("user_api") # create metrics and manager metrics = PrometheusMetrics(application, group_by="endpoint", excluded_paths=[ "/liveness", "/readiness", "/api/v1/ui", "/api/v1/openapi", ]) manager = Manager(application) # Needed for session. application.secret_key = Configuration.APP_SECRET_KEY # static information as metric metrics.info("user_api_info", "User API info", version=__service_version__) _API_GAUGE_METRIC = metrics.info("user_api_schema_up2date", "User API schema up2date") class _GraphDatabaseWrapper:
from flask import Flask from flask_restplus import Api from prometheus_flask_exporter import PrometheusMetrics metrics = PrometheusMetrics(app=None) def create_app(): from users_backend.api_namespace import api_namespace from users_backend.admin_namespace import admin_namespace application = Flask(__name__) # Initialise metrics metrics.init_app(application) api = Api(application, version='0.1', title='Users Backend API', description='A Simple CRUD API') from users_backend.db import db, db_config application.config['RESTPLUS_MASK_SWAGGER'] = False application.config.update(db_config) db.init_app(application) application.db = db api.add_namespace(api_namespace) api.add_namespace(admin_namespace) return application
from flask import Flask, request, jsonify from prometheus_flask_exporter import PrometheusMetrics import mysql.connector import json app = Flask(__name__) app.config['JSONIFY_PRETTYPRINT_REGULAR'] = True metrics = PrometheusMetrics(app) metrics.info('app_info', 'Application info', version='1.0.3') connection = None cursor = None def connect_database(): global connection global cursor connection = mysql.connector.connect(host='database', port='3306', user='******', passwd='1234', database='mydb') cursor = connection.cursor() @app.route('/') def hello_world(): return "hello world"
def metrics(self, **kwargs): registry = kwargs.pop('registry', CollectorRegistry(auto_describe=True)) return PrometheusMetrics(self.app, registry=registry, **kwargs)
if os.path.isfile(full_path): playbook = JsonPlaybookLoader.load_playbook(full_path) if playbook.name not in playbook_name: app.running_context.execution_db.session.add(playbook) app.running_context.execution_db.session.commit() if __name__ == "__main__": args = parse_args() exit_code = 0 walkoff.config.initialize(args.config) compose_api(walkoff.config.Config) app = create_app() if not walkoff.config.Config.SEPARATE_PROMETHEUS: metrics = PrometheusMetrics(app, path='/prometheus_metrics') import_workflows(app) try: run(args, app, *convert_host_port(args)) except KeyboardInterrupt: logger.info( 'Caught KeyboardInterrupt! Please wait a few seconds for WALKOFF to shutdown.' ) except Exception as e: exc_type, exc_value, exc_traceback = sys.exc_info() traceback.print_exc() exit_code = 1 finally: app.running_context.executor.shutdown_pool() logger.info('Shutting down server')
from flask import Flask, Response, request from prometheus_client import generate_latest, Gauge, Histogram, Counter, Summary, CONTENT_TYPE_LATEST from prometheus_flask_exporter import PrometheusMetrics import os, time, sys, json, requests VERSION = "0.0.1b" BOTID = 'Robotics' BOTPWD = 'na' OUTPUT_URL = os.environ['REPORT_URL'] app = Flask(__name__) m = PrometheusMetrics(app=app) @app.route('/alert/<string:room>', methods=['POST']) def alert(room): alertinfo = request.json app.logger.debug(alertinfo) c = len(alertinfo['alerts']) app.logger.debug('There are ' + str(c) + ' alerts.') for alert in alertinfo['alerts']: json_data = {} json_data['to'] = room json_data['displayfromname'] = 'AlertManager Bot' json_data['from'] = BOTID json_data['password'] = BOTPWD json_data['type'] = 'meeting' if alert['status'] == "firing": app.logger.info('Firing WARNING Alert to ' + room) app.logger.info('WARNING: ' + alert['annotations']['description'])
# DATABASE def make_db(app): # SQL db = SQLAlchemy() db.init_app(app) # Mongo mg = PyMongo() mg.init_app(app) # CORS CORS(app) return db, mg, app # CELERY AND RABBITMQ def make_celery(app): celery = Celery(config_source=app.config) class ContextTask(celery.Task): def __call__(self, *args, **kwargs): with app.app_context(): return self.run(*args, **kwargs) celery.Task = ContextTask return celery # PROMETHEUS metrics = PrometheusMetrics.for_app_factory()