def receive_checkout(dbapi_connection, connection_record, connection_proxy): '''Log checkedout and overflow when a connection is checked out''' hostname = gethostname().replace(".", "-") process_name = str(config.get("PROCESS_NAME", "unknown")) statsd_client.gauge(".".join( ["dbconn", database_name, hostname, process_name, "checkedout"]), connection_proxy._pool.checkedout()) statsd_client.gauge(".".join( ["dbconn", database_name, hostname, process_name, "overflow"]), connection_proxy._pool.overflow()) # Keep track of where and why this connection was checked out. log = get_logger() context = log._context._dict.copy() f, name = find_first_app_frame_and_name(ignores=['sqlalchemy', 'inbox.ignition', 'nylas.logging']) source = '{}:{}'.format(name, f.f_lineno) pool_tracker[dbapi_connection] = { 'source': source, 'context': context, 'checkedout_at': time.time() }
def receive_checkout(dbapi_connection, connection_record, connection_proxy): '''Log checkedout and overflow when a connection is checked out''' hostname = gethostname().replace(".", "-") process_name = str(config.get("PROCESS_NAME", "unknown")) statsd_client.gauge( ".".join([ "dbconn", database_name, hostname, process_name, "checkedout" ]), connection_proxy._pool.checkedout()) statsd_client.gauge( ".".join( ["dbconn", database_name, hostname, process_name, "overflow"]), connection_proxy._pool.overflow()) # Keep track of where and why this connection was checked out. log = get_logger() context = log._context._dict.copy() f, name = find_first_app_frame_and_name( ignores=['sqlalchemy', 'inbox.ignition', 'nylas.logging']) source = '{}:{}'.format(name, f.f_lineno) pool_tracker[dbapi_connection] = { 'source': source, 'context': context, 'checkedout_at': time.time() }
def receive_checkout(dbapi_connection, connection_record, connection_proxy): """Log checkedout and overflow when a connection is checked out""" hostname = gethostname().replace(".", "-") process_name = str(config.get("PROCESS_NAME", "main_process")) if config.get("ENABLE_DB_TXN_METRICS", False): statsd_client.gauge( ".".join([ "dbconn", database_name, hostname, process_name, "checkedout" ]), connection_proxy._pool.checkedout(), ) statsd_client.gauge( ".".join([ "dbconn", database_name, hostname, process_name, "overflow" ]), connection_proxy._pool.overflow(), ) # Keep track of where and why this connection was checked out. log = get_logger() context = log._context._dict.copy() f, name = find_first_app_frame_and_name( ignores=["sqlalchemy", "inbox.ignition", "nylas.logging"]) source = "{}:{}".format(name, f.f_lineno) pool_tracker[dbapi_connection] = { "source": source, "context": context, "checkedout_at": time.time(), }
def new_session(engine, versioned=True): """Returns a session bound to the given engine.""" session = Session(bind=engine, autoflush=True, autocommit=False) if versioned: from inbox.models.transaction import (create_revisions, propagate_changes, increment_versions) @event.listens_for(session, 'before_flush') def before_flush(session, flush_context, instances): propagate_changes(session) increment_versions(session) @event.listens_for(session, 'after_flush') def after_flush(session, flush_context): """ Hook to log revision snapshots. Must be post-flush in order to grab object IDs on new objects. """ create_revisions(session) # Make statsd calls for transaction times transaction_start_map = {} frame, modname = find_first_app_frame_and_name( ignores=['sqlalchemy', 'inbox.models.session', 'nylas.logging', 'contextlib']) funcname = frame.f_code.co_name modname = modname.replace(".", "-") metric_name = 'db.{}.{}.{}'.format(engine.url.database, modname, funcname) @event.listens_for(session, 'after_begin') def after_begin(session, transaction, connection): # It's okay to key on the session object here, because each session # binds to only one engine/connection. If this changes in the # future such that a session may encompass multiple engines, then # we'll have to get more sophisticated. transaction_start_map[session] = time.time() @event.listens_for(session, 'after_commit') @event.listens_for(session, 'after_rollback') def end(session): start_time = transaction_start_map.get(session) if not start_time: return del transaction_start_map[session] t = time.time() latency = int((t - start_time) * 1000) statsd_client.timing(metric_name, latency) statsd_client.incr(metric_name) if latency > MAX_SANE_TRX_TIME_MS: log.warning('Long transaction', latency=latency, modname=modname, funcname=funcname) return session
def new_session(engine, versioned=True): """Returns a session bound to the given engine.""" session = Session(bind=engine, autoflush=True, autocommit=False) if versioned: configure_versioning(session) # Make statsd calls for transaction times transaction_start_map = {} frame, modname = find_first_app_frame_and_name(ignores=[ "sqlalchemy", "inbox.models.session", "nylas.logging", "contextlib", ]) funcname = frame.f_code.co_name modname = modname.replace(".", "-") metric_name = "db.{}.{}.{}".format(engine.url.database, modname, funcname) @event.listens_for(session, "after_begin") def after_begin(session, transaction, connection): # It's okay to key on the session object here, because each session # binds to only one engine/connection. If this changes in the # future such that a session may encompass multiple engines, then # we'll have to get more sophisticated. transaction_start_map[session] = time.time() @event.listens_for(session, "after_commit") @event.listens_for(session, "after_rollback") def end(session): start_time = transaction_start_map.get(session) if not start_time: return del transaction_start_map[session] t = time.time() latency = int((t - start_time) * 1000) if config.get("ENABLE_DB_TXN_METRICS", False): statsd_client.timing(metric_name, latency) statsd_client.incr(metric_name) if latency > MAX_SANE_TRX_TIME_MS: log.warning( "Long transaction", latency=latency, modname=modname, funcname=funcname, ) return session
def new_session(engine, versioned=True): """Returns a session bound to the given engine.""" session = Session(bind=engine, autoflush=True, autocommit=False) if versioned: from inbox.models.transaction import (create_revisions, propagate_changes, increment_versions) @event.listens_for(session, 'before_flush') def before_flush(session, flush_context, instances): propagate_changes(session) increment_versions(session) @event.listens_for(session, 'after_flush') def after_flush(session, flush_context): """ Hook to log revision snapshots. Must be post-flush in order to grab object IDs on new objects. """ create_revisions(session) # Make statsd calls for transaction times transaction_start_map = {} frame, modname = find_first_app_frame_and_name( ignores=['sqlalchemy', 'inbox.models.session', 'nylas.logging', 'contextlib']) funcname = frame.f_code.co_name modname = modname.replace(".", "-") metric_name = 'db.{}.{}.{}'.format(engine.url.database, modname, funcname) @event.listens_for(session, 'after_transaction_create') def after_transaction_create(session, transaction): transaction_start_map[hash(transaction)] = time.time() @event.listens_for(session, 'after_transaction_end') def after_transaction_end(session, transaction): start_time = transaction_start_map.get(hash(transaction)) if not start_time: return latency = int((time.time() - start_time) * 1000) statsd_client.timing(metric_name, latency) statsd_client.incr(metric_name) return session
def new_session(engine, versioned=True): """Returns a session bound to the given engine.""" session = Session(bind=engine, autoflush=True, autocommit=False) if versioned: from inbox.models.transaction import (create_revisions, propagate_changes, increment_versions) @event.listens_for(session, 'before_flush') def before_flush(session, flush_context, instances): propagate_changes(session) increment_versions(session) @event.listens_for(session, 'after_flush') def after_flush(session, flush_context): """ Hook to log revision snapshots. Must be post-flush in order to grab object IDs on new objects. """ create_revisions(session) # Make statsd calls for transaction times transaction_start_map = {} frame, modname = find_first_app_frame_and_name(ignores=[ 'sqlalchemy', 'inbox.models.session', 'nylas.logging', 'contextlib' ]) funcname = frame.f_code.co_name modname = modname.replace(".", "-") metric_name = 'db.{}.{}.{}'.format(engine.url.database, modname, funcname) @event.listens_for(session, 'after_transaction_create') def after_transaction_create(session, transaction): transaction_start_map[hash(transaction)] = time.time() @event.listens_for(session, 'after_transaction_end') def after_transaction_end(session, transaction): start_time = transaction_start_map.get(hash(transaction)) if not start_time: return latency = int((time.time() - start_time) * 1000) statsd_client.timing(metric_name, latency) statsd_client.incr(metric_name) return session
def new_session(engine, versioned=True, explicit_begin=False): """Returns a session bound to the given engine.""" session = Session(bind=engine, autoflush=True, autocommit=False) if versioned: configure_versioning(session) # Make statsd calls for transaction times transaction_start_map = {} frame, modname = find_first_app_frame_and_name( ignores=['sqlalchemy', 'inbox.models.session', 'nylas.logging', 'contextlib']) funcname = frame.f_code.co_name modname = modname.replace(".", "-") metric_name = 'db.{}.{}.{}'.format(engine.url.database, modname, funcname) @event.listens_for(session, 'after_begin') def after_begin(session, transaction, connection): if explicit_begin: connection.execute('BEGIN') # It's okay to key on the session object here, because each session # binds to only one engine/connection. If this changes in the # future such that a session may encompass multiple engines, then # we'll have to get more sophisticated. transaction_start_map[session] = time.time() @event.listens_for(session, 'after_commit') @event.listens_for(session, 'after_rollback') def end(session): start_time = transaction_start_map.get(session) if not start_time: return del transaction_start_map[session] t = time.time() latency = int((t - start_time) * 1000) statsd_client.timing(metric_name, latency) statsd_client.incr(metric_name) if latency > MAX_SANE_TRX_TIME_MS: log.warning('Long transaction', latency=latency, modname=modname, funcname=funcname) return session