def _flask_dispatch(f, *args, **keywords): try: telemetry.count('web.requests') Timing.push_timer() return f(*args, **keywords) finally: elapsed, net_elapsed = Timing.pop_timer() telemetry.record('web.response.latency', elapsed) telemetry.record('app.response.latency', net_elapsed)
def process_response(self, request, response): elapsed, net_elapsed = Timing.pop_timer() if self.is_active: telemetry.record("web.response.latency", elapsed) telemetry.record("app.response.latency", net_elapsed) telemetry.count("web.status.%ixx" % floor(response.status_code / 100)) context.pop_state(STATE_NAME) self.is_active = False else: logger.warn("process_response without request") return response
def process_response(self, request, response): elapsed, net_elapsed = Timing.pop_timer() if self.is_active: telemetry.record('web.response.latency', elapsed) telemetry.record('app.response.latency', net_elapsed) telemetry.count('web.status.%ixx' % floor(response.status_code / 100)) context.pop_state(STATE_NAME) self.is_active = False else: logger.warn('process_response without request') return response
def complex_wrapper(func, *args, **keywords): if _should_be_instrumented(state, enable_if, disable_if): Timing.push_timer() context.push_state(state) try: return func(*args, **keywords) finally: elapsed, _ = Timing.pop_timer() count(metric + 'requests', reporter=reporter) record(metric + 'latency', elapsed, reporter=reporter) context.pop_state(state) else: return func(*args, **keywords)
def decorator(*args, **keywords): response = f(*args, **keywords) # Increment the worker count once per minute since that is our rollup interval try: now = int(time.time()) threshold = int(60*floor(now/60)) if not __globals.last_reported or __globals.last_reported < threshold: telemetry.count("gunicorn.active_workers", 1, reporter='gunicorn') __globals.last_reported = now except Exception as e: logger.exception("Instrumentation error while reporting gunicorn.active_workers") return response
def _worker_notify(func, *args, **keywords): response = func(*args, **keywords) # Increment the worker count once per minute since that is our rollup interval try: now = int(time.time()) threshold = int(60 * floor(now / 60)) if not __globals.last_reported or __globals.last_reported < threshold: telemetry.count("gunicorn.active_workers", 1, reporter='gunicorn') __globals.last_reported = now except Exception as e: logger.exception( "Instrumentation error while reporting gunicorn.active_workers") return response
def decorator(*args, **keywords): try: from flask import request route = request.url_rule.rule if request.url_rule else None context.push_tag('web.route', route) context.push_tag('web.method', request.method) telemetry.count('web.requests') Timing.push_timer() return f(*args, **keywords) finally: elapsed, net_elapsed = Timing.pop_timer() telemetry.record('web.response.latency', elapsed) telemetry.record('app.response.latency', net_elapsed) context.pop_tag() context.pop_tag()
def count(metric, incr=1): """ Increment the count for the given metric by the given increment. Example telemetry.count('requests') telemetry.count('bytesReceived', len(request.content)) :param metric: the given metric name :param incr: the value by which it is incremented """ return telemetry.count(metric, incr)
def decorator(*args, **keywords): # The type of the returned instance depends on the url # but is typically urllib.addinfourl for the baked-in protocols if context.has_state('external'): # Avoid double counting nested calls. All metrics will be reported # relative to the outermost operation return f(*args, **keywords) url = get_parameter(1, 'fullurl', *args, **keywords) if hasattr(url, 'get_full_url'): url = url.get_full_url() scheme = url.split(':')[0] if ':' in url else 'unknown' Timing.push_timer() try: context.push_state('external') telemetry.count('external.{}.requests'.format(scheme)) a = f(*args, **keywords) if a.getcode(): # Not meaningful for ftp etc telemetry.count('external.{}.status.%ixx'.format(scheme) % floor(a.getcode() / 100)) except: telemetry.count('external.{}.errors'.format(scheme)) raise finally: context.pop_state('external') elapsed, _ = Timing.pop_timer() telemetry.record('external.{}.response.latency'.format(scheme), elapsed) # Return a wrapped object so we can time subsequent read, readline etc calls return _response_wrapper(scheme, a)
def _urllib_open_wrapper(func, *args, **keywords): """ Wraps urllib.request.url_open """ if not _should_be_instrumented( state='external', enable_if='web', disable_if='model'): return func(*args, **keywords) url = get_parameter(1, 'fullurl', *args, **keywords) if hasattr(url, 'get_full_url'): url = url.get_full_url() scheme = url.split(':')[0] if ':' in url else 'unknown' Timing.push_timer() try: context.push_state('external') telemetry.count('external.{}.requests'.format(scheme)) a = func(*args, **keywords) if a.getcode(): # Not meaningful for ftp etc telemetry.count('external.{}.status.%ixx'.format(scheme) % floor(a.getcode() / 100)) except: telemetry.count('external.{}.errors'.format(scheme)) raise finally: context.pop_state('external') elapsed, _ = Timing.pop_timer() telemetry.record('external.{}.response.latency'.format(scheme), elapsed) # Return a wrapped object so we can time subsequent read, readline etc calls return _response_wrapper(scheme, a)
def decorator(*args, **keywords): try: route = args[2] if args[2] else None context.push_tag('web.route', route) context.push_tag('web.method', args[1]) telemetry.count('web.requests') Timing.push_timer() # call the request function response = f(*args, **keywords) if response.status: telemetry.count('web.status.%sxx' % response.status[0:1]) return response except Exception as e: telemetry.count('web.errors') raise e finally: try: elapsed, net_elapsed = Timing.pop_timer() telemetry.record('web.response.latency', elapsed) telemetry.record('app.response.latency', net_elapsed) try: context.pop_tag() context.pop_tag() except: logger.exception('Problem popping contexts') except: logger.exception('Teardown handler failed') raise
def decorator(*args, **keywords): telemetry.count('external.http.requests') Timing.push_timer() try: a = f(*args, **keywords) telemetry.count('external.http.status.%ixx' % floor(a.status_code / 100)) return a except: telemetry.count('external.http.errors') raise finally: elapsed, _ = Timing.pop_timer() telemetry.record('external.http.response.latency', elapsed)
def decorator(*args, **keywords): method = get_parameter(0, 'method', *args, **keywords) url = get_parameter(1, 'url', *args, **keywords) with context.add_all_tags([('external.url', url), ('external.method', method)]): telemetry.count('external.http.requests') Timing.push_timer() try: a = f(*args, **keywords) telemetry.count('external.http.status.%ixx' % floor(a.status_code / 100)) return a except: telemetry.count('external.http.errors') raise finally: elapsed, _ = Timing.pop_timer() telemetry.record('external.http.response.latency', elapsed)
def _session_send_wrapper(func, *args, **keywords): if not _should_be_instrumented(state="external", enable_if="web", disable_if="model"): return func(*args, **keywords) telemetry.count("external.http.requests") Timing.push_timer() try: context.push_state("external") a = func(*args, **keywords) telemetry.count("external.http.status.%ixx" % floor(a.status_code / 100)) return a except: telemetry.count("external.http.errors") raise finally: context.pop_state("external") elapsed, _ = Timing.pop_timer() telemetry.record("external.http.response.latency", elapsed)
def _session_send_wrapper(func, *args, **keywords): if not _should_be_instrumented( state='external', enable_if='web', disable_if='model'): return func(*args, **keywords) telemetry.count('external.http.requests') Timing.push_timer() try: context.push_state('external') a = func(*args, **keywords) telemetry.count('external.http.status.%ixx' % floor(a.status_code / 100)) return a except: telemetry.count('external.http.errors') raise finally: context.pop_state('external') elapsed, _ = Timing.pop_timer() telemetry.record('external.http.response.latency', elapsed)
def decorator(*args, **keywords): try: telemetry.count('web.requests') Timing.push_timer() # call the request function response = f(*args, **keywords) if response.status: telemetry.count('web.status.%sxx' % response.status[0:1]) return response except Exception as e: telemetry.count('web.errors') raise e finally: try: elapsed, net_elapsed = Timing.pop_timer() telemetry.record('web.response.latency', elapsed) telemetry.record('app.response.latency', net_elapsed) except: logger.exception('Teardown handler failed') raise
def increment_wrapper(func, *args, **kwargs): count(metric=metric, reporter=reporter, incr=increment) return func(*args, **kwargs)
def process_exception(self, request, exception): logger.debug('process_exception') if self.is_active: telemetry.count('web.errors')
def _teardown_request(e=None): if e: telemetry.count('web.errors')
def _after_request(response): # We need this since the response object isn't available in main function wrapper below (flask_dispatch). # Might not get called in the event of an application error. if response.status_code: telemetry.count('web.status.%ixx' % floor(response.status_code / 100)) return response
def process_request(self, request): self.is_active = True Timing.push_timer() context.push_state(STATE_NAME) telemetry.count('web.requests')
def process_request(self, request): self.is_active = True Timing.push_timer() context.push_state(STATE_NAME) telemetry.count("web.requests")
def process_exception(self, request, exception): logger.debug("process_exception") if self.is_active: telemetry.count("web.errors")