def setup_sqlalchemy_events(app): TOTAL_DB_CONNECTIONS = Gauge( 'db_connection_total_connected', 'How many db connections are currently held (potentially idle) by the server', ) TOTAL_CHECKED_OUT_DB_CONNECTIONS = Gauge( 'db_connection_total_checked_out', 'How many db connections are currently checked out by web requests', ) DB_CONNECTION_OPEN_DURATION_SECONDS = Histogram( 'db_connection_open_duration_seconds', 'How long db connections are held open for in seconds', ['method', 'host', 'path']) # need this or db.engine isn't accessible with app.app_context(): @event.listens_for(db.engine, 'connect') def connect(dbapi_connection, connection_record): # connection first opened with db TOTAL_DB_CONNECTIONS.inc() @event.listens_for(db.engine, 'close') def close(dbapi_connection, connection_record): # connection closed (probably only happens with overflow connections) TOTAL_DB_CONNECTIONS.dec() @event.listens_for(db.engine, 'checkout') def checkout(dbapi_connection, connection_record, connection_proxy): try: # connection given to a web worker TOTAL_CHECKED_OUT_DB_CONNECTIONS.inc() # this will overwrite any previous checkout_at timestamp connection_record.info['checkout_at'] = time.monotonic() # checkin runs after the request is already torn down, therefore we add the request_data onto the # connection_record as otherwise it won't have that information when checkin actually runs. # Note: this is not a problem for checkouts as the checkout always happens within a web request or task # web requests if has_request_context(): connection_record.info['request_data'] = { 'method': request.method, 'host': request.host, 'url_rule': request.url_rule.rule if request.url_rule else 'No endpoint' } # celery apps elif current_task: connection_record.info['request_data'] = { 'method': 'celery', 'host': current_app.config['NOTIFY_APP_NAME'], # worker name 'url_rule': current_task.name, # task name } # anything else. migrations possibly. else: current_app.logger.warning( 'Checked out sqlalchemy connection from outside of request/task' ) connection_record.info['request_data'] = { 'method': 'unknown', 'host': 'unknown', 'url_rule': 'unknown', } except Exception: current_app.logger.exception( "Exception caught for checkout event.") @event.listens_for(db.engine, 'checkin') def checkin(dbapi_connection, connection_record): try: # connection returned by a web worker TOTAL_CHECKED_OUT_DB_CONNECTIONS.dec() # duration that connection was held by a single web request duration = time.monotonic( ) - connection_record.info['checkout_at'] DB_CONNECTION_OPEN_DURATION_SECONDS.labels( connection_record.info['request_data']['method'], connection_record.info['request_data']['host'], connection_record.info['request_data'] ['url_rule']).observe(duration) except Exception: current_app.logger.exception( "Exception caught for checkin event.")
zendesk_client = ZendeskClient() statsd_client = StatsdClient() redis_store = RedisClient() performance_platform_client = PerformancePlatformClient() cbc_proxy_client = CBCProxyClient() document_download_client = DocumentDownloadClient() metrics = GDSMetrics() notification_provider_clients = NotificationProviderClients() api_user = LocalProxy(lambda: _request_ctx_stack.top.api_user) authenticated_service = LocalProxy( lambda: _request_ctx_stack.top.authenticated_service) CONCURRENT_REQUESTS = Gauge( 'concurrent_web_request_count', 'How many concurrent requests are currently being served', ) def create_app(application): from app.config import configs notify_environment = os.environ['NOTIFY_ENVIRONMENT'] application.config.from_object(configs[notify_environment]) application.config['NOTIFY_APP_NAME'] = application.name init_app(application) # Metrics intentionally high up to give the most accurate timing and reliability that the metric is recorded metrics.init_app(application)