def __init__(self, request): self._request = request self._META = request.META # only write log if beeline has been initalised if beeline.get_beeline(): beeline.get_beeline().log(request.META)
def _beeline_wrapper(event, context): global COLD_START # don't blow up the world if the beeline has not been initialized if not beeline.get_beeline(): return handler(event, context) root_span = None try: # Create request context request_context = { "app.function_name": getattr(context, 'function_name', ""), "app.function_version": getattr(context, 'function_version', ""), "app.request_id": getattr(context, 'aws_request_id', ""), "app.event": event, "meta.cold_start": COLD_START, "name": handler.__name__ } lr = LambdaRequest(event) root_span = beeline.propagate_and_start_trace(request_context, lr) # Actually run the handler resp = handler(event, context) if resp is not None: beeline.add_context_field('app.response', resp) return resp finally: # This remains false for the lifetime of the module COLD_START = False beeline.finish_trace(root_span) # we have to flush events before the lambda returns beeline.get_beeline().client.flush()
def inited_app(environ, start_response): if not with_flask_tracing.beeline_inited: beeline.init( writekey=os.environ["HONEYCOMB_KEY"], dataset="IFTTT webhooks", service_name="fructify", presend_hook=presend, ) with_flask_tracing.beeline_inited = True try: return original_wsgi_app(environ, start_response) finally: # Always flush because vercel can suspend the process. beeline.get_beeline().client.flush()
def _beeline_wrapper(event, context): global COLD_START # don't blow up the world if the beeline has not been initialized if not beeline.get_beeline(): return handler(event, context) try: # assume we're going to get bad values sometimes in our headers trace_id, parent_id, trace_context = None, None, None try: trace_id, parent_id, trace_context = _get_trace_data(event) except Exception as e: beeline.internal.log( 'error attempting to extract trace context: %s', beeline.internal.stringify_exception(e)) pass with beeline.tracer(name=handler.__name__, trace_id=trace_id, parent_id=parent_id): beeline.add_context({ "app.function_name": getattr(context, 'function_name', ""), "app.function_version": getattr(context, 'function_version', ""), "app.request_id": getattr(context, 'aws_request_id', ""), "app.event": event, "meta.cold_start": COLD_START, }) # if there is custom context attached from upstream, add that now if isinstance(trace_context, dict): for k, v in trace_context.items(): beeline.add_trace_field(k, v) resp = handler(event, context) if resp is not None: beeline.add_context_field('app.response', resp) return resp finally: # This remains false for the lifetime of the module COLD_START = False # we have to flush events before the lambda returns beeline.get_beeline().client.flush()
def create_http_event(self, request): # if beeline has not been initialised, just execute request if not beeline.get_beeline(): return self.get_response(request) # Code to be executed for each request before # the view (and later middleware) are called. dr = DjangoRequest(request) request_context = self.get_context_from_request(request) root_span = beeline.propagate_and_start_trace(request_context, dr) response = self.get_response(request) # Code to be executed for each request/response after # the view is called. response_context = self.get_context_from_response(request, response) beeline.add_context(response_context) # Streaming responses return immediately, but iterate over # their `streaming_content` until it's empty; only close the # trace then, not now. def wrap_streaming_content(content): for chunk in content: yield chunk beeline.finish_trace(root_span) if response.streaming: response.streaming_content = wrap_streaming_content( response.streaming_content) else: beeline.finish_trace(root_span) return response
def process_view(self, request, view_func, view_args, view_kwargs): if beeline.get_beeline(): try: beeline.add_context_field("django.view_func", view_func.__name__) except AttributeError: pass
def _beeline_wrapper(event, context): global COLD_START # don't blow up the world if the beeline has not been initialized if not beeline.get_beeline(): return handler(event, context) root_span = None try: # Create request context request_context = { "app.function_name": getattr(context, 'function_name', ""), "app.function_version": getattr(context, 'function_version', ""), "app.request_id": getattr(context, 'aws_request_id', ""), "meta.cold_start": COLD_START, "name": handler.__name__ } if record_input: request_context["app.event"] = event lr = LambdaRequest(event) root_span = beeline.propagate_and_start_trace(request_context, lr) # Actually run the handler resp = handler(event, context) if resp is not None and record_output: beeline.add_context_field('app.response', resp) return resp except Exception as e: beeline.add_context({ "app.exception_type": str(type(e)), "app.exception_string": beeline.internal.stringify_exception(e), "app.exception_stacktrace": traceback.format_exc(), }) raise e finally: # This remains false for the lifetime of the module COLD_START = False beeline.finish_trace(root_span) # we have to flush events before the lambda returns beeline.get_beeline().client.flush()
def lambda_handler(event, context): trace_context = None input = None parent_trace = None output = {} logging.debug(f"event: {json.dumps(event)}") init_beeline() # Attempt to get trace_context(s) from the input input = event.get("Input", None) if input: trace_context = input.get("trace_context", None) # Start trace if it isn't already, otherwise resume if trace_context: trace_id, parent_id, context = beeline.trace.unmarshal_trace_context( trace_context) logging.info(f"Resuming trace: {trace_id}") trace = beeline.start_trace(trace_id=trace_id, parent_span_id=parent_id, context=context) # add a field to test context propogation beeline.add_trace_field( event.get("Path", "UnknownPath").lower(), uuid.uuid4()) beeline.add_context( {"name": event.get("Path", "Missing Path Information")}) beeline.add_context( {"function_name": event.get("Path", "Missing Path Information")}) random_sleep() beeline.finish_span(trace) else: trace = start_trace() beeline.add_trace_field("c3po", "r2d2") logging.info(f"Starting Trace") with beeline.tracer( name=event.get("Path", "Missing Path Information")): random_sleep() trace_context = beeline.get_beeline( ).tracer_impl.marshal_trace_context() # If final step close the parent trace if event.get("Path") == "Step4": # 2019-03-26T20:14:13.192Z parent_trace_id, parent_parent_id, parent_context_data = beeline.trace.unmarshal_trace_context( trace_context) start_time = datetime.strptime(event.get("start_time"), "%Y-%m-%dT%H:%M:%S.%fZ") close_final_trace(parent_trace_id, parent_parent_id, parent_context_data, start_time) # Close only (send pending) beeline.close() # Return the trace_context to the SFN output["trace_context"] = trace_context return output
def test_traced_thread(self): self.assertIsNone(beeline.get_beeline()) @beeline.traced_thread def my_sum(a, b): return a + b # this should not crash if the beeline isn't initialized # it should also accept arguments normally and return the function's value self.assertEqual(my_sum(1, 2), 3)
def test_tracer_context_manager(self): ''' ensure the tracer context manager doesn't break if the beeline is not initialized ''' self.assertIsNone(beeline.get_beeline()) def my_sum(a, b): with beeline.tracer(name="my_sum"): return a + b # this should not crash if the beeline isn't initialized # it should also accept arguments normally and return the function's value self.assertEqual(my_sum(1, 2), 3)
def request(_request, instance, args, kwargs): span = beeline.start_span(context={"meta.type": "http_client"}) b = beeline.get_beeline() if b and b.http_trace_propagation_hook is not None: new_headers = beeline.http_trace_propagation_hook() if new_headers: b.log( "requests lib - adding trace context to outbound request: %s", new_headers) instance.headers.update(new_headers) else: b.log("requests lib - no trace context found") try: resp = None # Required as Python treats the `or` keyword differently in string # interpolation vs. when assigning a variable. method = kwargs.get('method') or args[0] beeline.add_context({ "name": "requests_%s" % method, "request.method": method, "request.url": kwargs.get('url') or args[1], }) resp = _request(*args, **kwargs) return resp except Exception as e: beeline.add_context({ "request.error_type": str(type(e)), "request.error": beeline.internal.stringify_exception(e), }) raise finally: if resp is not None: content_type = resp.headers.get('content-type') if content_type: beeline.add_context_field("response.content_type", content_type) content_length = resp.headers.get('content-length') if content_length: beeline.add_context_field("response.content_length", content_length) if hasattr(resp, 'status_code'): beeline.add_context_field("response.status_code", resp.status_code) beeline.finish_span(span)
def request(_request, instance, args, kwargs): span = beeline.start_span(context={"meta.type": "http_client"}) b = beeline.get_beeline() if b: context = b.tracer_impl.marshal_trace_context() if context: b.log( "requests lib - adding trace context to outbound request: %s", context) instance.headers['X-Honeycomb-Trace'] = context else: b.log("requests lib - no trace context found") try: resp = None beeline.add_context({ "name": "requests_%s" % kwargs.get('method') or args[0], "request.method": kwargs.get('method') or args[0], "request.url": kwargs.get('url') or args[1], }) resp = _request(*args, **kwargs) return resp except Exception as e: beeline.add_context({ "request.error_type": str(type(e)), "request.error": beeline.internal.stringify_exception(e), }) raise finally: if resp: content_type = resp.headers.get('content-type') if content_type: beeline.add_context_field("response.content_type", content_type) content_length = resp.headers.get('content-length') if content_length: beeline.add_context_field("response.content_length", content_length) if hasattr(resp, 'status_code'): beeline.add_context_field("response.status_code", resp.status_code) beeline.finish_span(span)
def _urllibopen(_urlopen, instance, args, kwargs): # urlopen accepts either a string URL or a Request object as its first arg # It's easier to process the info contained in the request and modify it # by converting the URL string into a Request if type(args[0]) != urllib.request.Request: args = (urllib.request.Request(args[0]), ) + tuple(args[1:]) span = beeline.start_span(context={"meta.type": "http_client"}) b = beeline.get_beeline() if b and b.http_trace_propagation_hook is not None: new_headers = beeline.http_trace_propagation_hook() if new_headers: # Merge the new headers into the existing headers for the outbound request b.log("urllib lib - adding trace context to outbound request: %s", new_headers) args[0].headers.update(new_headers) try: resp = None beeline.add_context({ "name": "urllib_%s" % args[0].get_method(), "request.method": args[0].get_method(), "request.uri": args[0].full_url }) resp = _urlopen(*args, **kwargs) return resp except Exception as e: beeline.add_context({ "request.error_type": str(type(e)), "request.error": beeline.internal.stringify_exception(e), }) raise finally: if resp: beeline.add_context_field("response.status_code", resp.status) content_type = resp.getheader('content-type') if content_type: beeline.add_context_field("response.content_type", content_type) content_length = resp.getheader('content-length') if content_length: beeline.add_context_field("response.content_length", content_length) beeline.finish_span(span)
def _urllibopen(_urlopen, instance, args, kwargs): if type(args[0]) != urllib.request.Request: args[0] = urllib.request.Request(args[0]) span = beeline.start_span(context={"meta.type": "http_client"}) b = beeline.get_beeline() if b: context = b.tracer_impl.marshal_trace_context() if context: b.log("urllib lib - adding trace context to outbound request: %s", context) args[0].headers['X-Honeycomb-Trace'] = context else: b.log("urllib lib - no trace context found") try: resp = None beeline.add_context({ "name": "urllib_%s" % args[0].get_method(), "request.method": args[0].get_method(), "request.uri": args[0].full_url }) resp = _urlopen(*args, **kwargs) return resp except Exception as e: beeline.add_context({ "request.error_type": str(type(e)), "request.error": beeline.internal.stringify_exception(e), }) raise finally: if resp: beeline.add_context_field("response.status_code", resp.status) content_type = resp.getheader('content-type') if content_type: beeline.add_context_field("response.content_type", content_type) content_length = resp.getheader('content-length') if content_length: beeline.add_context_field("response.content_length", content_length) beeline.finish_span(span)
def create_http_event(self, request): # if beeline has not been initialised, just execute request if not beeline.get_beeline(): return self.get_response(request) # Code to be executed for each request before # the view (and later middleware) are called. dr = DjangoRequest(request) request_context = self.get_context_from_request(request) root_span = beeline.propagate_and_start_trace(request_context, dr) response = self.get_response(request) # Code to be executed for each request/response after # the view is called. response_context = self.get_context_from_response(request, response) beeline.add_context(response_context) beeline.finish_trace(root_span) return response
def __call__(self, execute, sql, params, many, context): # if beeline has not been initialised, just execute query if not beeline.get_beeline(): return execute(sql, params, many, context) vendor = context['connection'].vendor trace_name = "django_%s_query" % vendor with beeline.tracer(trace_name): beeline.add_context({ "type": "db", "db.query": sql, "db.query_args": params, }) beeline.add_rollup_field("db.call_count", 1) try: db_call_start = datetime.datetime.now() result = execute(sql, params, many, context) db_call_diff = datetime.datetime.now() - db_call_start duration = db_call_diff.total_seconds() * 1000 beeline.add_context_field("db.duration", duration) beeline.add_rollup_field("db.total_duration", duration) except Exception as e: beeline.add_context_field("db.error", str(type(e))) beeline.add_context_field( "db.error_detail", beeline.internal.stringify_exception(e)) raise else: return result finally: if vendor in ('postgresql', 'mysql'): beeline.add_context({ "db.last_insert_id": context['cursor'].cursor.lastrowid, "db.rows_affected": context['cursor'].cursor.rowcount, })
def honeycomb_send(data): """ ships a list or a dict of data to honeycomb """ beeline.init(writekey=HONEY_WRITEKEY, dataset=DATASET_NAME, debug=HONEY_DEBUG_ENABLED, block_on_send=True) LOG.debug(f'sending to honeycomb: {data}') client = beeline.get_beeline().client if not isinstance(data, list): data = [data] for eventdata in data: ev = client.new_event() ev.add(eventdata) ev.send() client.flush() LOG.info(f'{len(data)} events shipped to honeycomb ..')
def send_event(): bl = beeline.get_beeline() if bl: return bl.send_event()
def log(msg, *args, **kwargs): bl = beeline.get_beeline() if bl: bl.log(msg, *args, **kwargs)
def send_all(): bl = beeline.get_beeline() if bl: return bl.send_all()
'%(asctime)s [%(processName)s] %(levelname)s %(message)s')) logger.addHandler(ch) # when running inside docker, the default SIGINT signal handler is not installed, # so the KeyboardInterrupt is not triggered. This should install it manually: # https://stackoverflow.com/a/40785230 signal.signal(signal.SIGINT, signal.default_int_handler) # application performance monitoring: HONEYCOMP_APM_API_KEY = os.environ.get('HONEYCOMP_APM_API_KEY') beeline_client = None if HONEYCOMP_APM_API_KEY: beeline.init(writekey=HONEYCOMP_APM_API_KEY, dataset='OpenEO - workers', service_name='OpenEO') beeline_client = beeline.get_beeline().client def _feed_monitoring_system(): if not HONEYCOMP_APM_API_KEY: return # https://docs.python.org/3/library/resource.html rusage_parent = resource.getrusage(resource.RUSAGE_SELF) rusage_children = resource.getrusage(resource.RUSAGE_CHILDREN) metric_peak_memory = (rusage_parent.ru_maxrss + rusage_children.ru_maxrss) * resource.getpagesize() metric_cpu = psutil.cpu_percent() mem = psutil.virtual_memory()
def process_exception(self, request, exception): if beeline.get_beeline(): beeline.add_context_field( "request.error_detail", beeline.internal.stringify_exception(exception))