def collect(self): """ Collects all metrics attached to this metricset, and returns it as a list, together with a timestamp in microsecond precision. The format of the return value should be { "samples": {"metric.name": {"value": some_float}, ...}, "timestamp": unix epoch in microsecond precision } """ samples = {} if self._counters: samples.update({ label: { "value": c.val } for label, c in compat.iteritems(self._counters) }) if self._gauges: samples.update({ label: { "value": g.val } for label, g in compat.iteritems(self._gauges) }) if samples: return { "samples": samples, "timestamp": int(time.time() * 1000000) }
def collect(self): """ Collects all metrics attached to this metricset, and returns it as a generator with one or more elements. More than one element is returned if labels are used. The format of the return value should be { "samples": {"metric.name": {"value": some_float}, ...}, "timestamp": unix epoch in microsecond precision } """ self.before_collect() timestamp = int(time.time() * 1000000) samples = defaultdict(dict) if self._counters: # iterate over a copy of the dict to avoid threading issues, see #717 for (name, labels), c in compat.iteritems(self._counters.copy()): if c is not noop_metric: val = c.val if val or not c.reset_on_collect: samples[labels].update({name: {"value": val}}) if c.reset_on_collect: c.reset() if self._gauges: for (name, labels), g in compat.iteritems(self._gauges.copy()): if g is not noop_metric: val = g.val if val or not g.reset_on_collect: samples[labels].update({name: {"value": val}}) if g.reset_on_collect: g.reset() if self._timers: for (name, labels), t in compat.iteritems(self._timers.copy()): if t is not noop_metric: val, count = t.val if val or not t.reset_on_collect: samples[labels].update( {name + ".sum.us": { "value": int(val * 1000000) }}) samples[labels].update( {name + ".count": { "value": count }}) if t.reset_on_collect: t.reset() if samples: for labels, sample in compat.iteritems(samples): result = {"samples": sample, "timestamp": timestamp} if labels: result["tags"] = {k: v for k, v in labels} yield self.before_yield(result)
def test_metrics_reset_after_collect(elasticapm_client): elasticapm_client.begin_transaction("request") with elasticapm.capture_span("test", span_type="db", span_subtype="mysql", duration=5): pass elasticapm_client.end_transaction("test", "OK", duration=15) breakdown = elasticapm_client._metrics.get_metricset("elasticapm.metrics.sets.breakdown.BreakdownMetricSet") for labels, c in compat.iteritems(breakdown._counters): assert c.val != 0 for labels, t in compat.iteritems(breakdown._timers): assert t.val != (0, 0) list(breakdown.collect()) for labels, c in compat.iteritems(breakdown._counters): assert c.val == 0 for labels, t in compat.iteritems(breakdown._timers): assert t.val == (0, 0)
def varmap(func, var, context=None, name=None, **kwargs): """ Executes ``func(key_name, value)`` on all values, recursively discovering dict and list scoped values. """ if context is None: context = set() objid = id(var) if objid in context: return func(name, "<...>", **kwargs) context.add(objid) if isinstance(var, dict): # iterate over a copy of the dictionary to avoid "dictionary changed size during iteration" issues ret = func( name, dict((k, varmap(func, v, context, k, **kwargs)) for k, v in compat.iteritems(var.copy())), **kwargs) elif isinstance(var, (list, tuple)): ret = func(name, [varmap(func, f, context, name, **kwargs) for f in var], **kwargs) else: ret = func(name, var, **kwargs) context.remove(objid) return ret
def add_context_lines_to_frames(client, event): # divide frames up into source files before reading from disk. This should help # with utilizing the disk cache better # # TODO: further optimize by only opening each file once and reading all needed source # TODO: blocks at once. per_file = defaultdict(list) _process_stack_frames( event, lambda frame: per_file[frame["context_metadata"][0]].append(frame) if "context_metadata" in frame else None, ) for filename, frames in compat.iteritems(per_file): for frame in frames: # context_metadata key has been set in elasticapm.utils.stacks.get_frame_info for # all frames for which we should gather source code context lines fname, lineno, context_lines, loader, module_name = frame.pop( "context_metadata") pre_context, context_line, post_context = get_lines_from_file( fname, lineno, context_lines, loader, module_name) if context_line: frame["pre_context"] = pre_context frame["context_line"] = context_line frame["post_context"] = post_context return event
def update_config(self): if not self.transport: logger.warning("No transport set for config updates, skipping") return logger.debug("Checking for new config...") keys = {"service": {"name": self.service_name}} if self.environment: keys["service"]["environment"] = self.environment new_version, new_config, next_run = self.transport.get_config( self.config_version, keys) if new_version and new_config: errors = self.update(new_version, **new_config) if errors: logger.error("Error applying new configuration: %s", repr(errors)) else: logger.info( "Applied new remote configuration: %s", "; ".join("%s=%s" % (compat.text_type(k), compat.text_type(v)) for k, v in compat.iteritems(new_config)), ) elif new_version == self.config_version: logger.debug("Remote config unchanged") elif not new_config and self.changed: logger.debug("Remote config disappeared, resetting to original") self.reset() return next_run
def get_url_dict(event: dict) -> dict: """ Reconstruct URL from API Gateway """ headers = event.get("headers", {}) protocol = headers.get("X-Forwarded-Proto", headers.get("x-forwarded-proto", "https")) host = headers.get("Host", headers.get("host", "")) stage = "/" + (nested_key(event, "requestContext", "stage") or "") path = event.get("path", event.get("rawPath", "").split(stage)[-1]) port = headers.get("X-Forwarded-Port", headers.get("x-forwarded-port")) query = "" if "rawQueryString" in event: query = event["rawQueryString"] elif event.get("queryStringParameters"): query = "?" for k, v in compat.iteritems(event["queryStringParameters"]): query += "{}={}".format(k, v) url = protocol + "://" + host + stage + path + query url_dict = { "full": encoding.keyword_field(url), "protocol": protocol, "hostname": encoding.keyword_field(host), "pathname": encoding.keyword_field(stage + path), } if port: url_dict["port"] = port if query: url_dict["search"] = encoding.keyword_field(query) return url_dict
def update_config(agent): logger.debug("Checking for new config...") transport = agent._transport keys = {"service": {"name": agent.config.service_name}} if agent.config.environment: keys["service"]["environment"] = agent.config.environment new_version, new_config, next_run = transport.get_config( agent.config.config_version, keys) if new_version and new_config: errors = agent.config.update(new_version, **new_config) if errors: logger.error("Error applying new configuration: %s", repr(errors)) else: logger.info( "Applied new configuration: %s", "; ".join("%s=%s" % (compat.text_type(k), compat.text_type(v)) for k, v in compat.iteritems(new_config)), ) elif new_version == agent.config.config_version: logger.debug("Remote config unchanged") elif not new_config and agent.config.changed: logger.debug("Remote config disappeared, resetting to original") agent.config.reset() return next_run
def end(self, skip_frames=0, duration=None): self.duration = duration if duration is not None else (_time_func() - self.start_time) if self._transaction_metrics: self._transaction_metrics.timer( "transaction.duration", reset_on_collect=True, **{"transaction.name": self.name, "transaction.type": self.transaction_type} ).update(self.duration) if self._breakdown: for (span_type, span_subtype), timer in compat.iteritems(self._span_timers): labels = { "span.type": span_type, "transaction.name": self.name, "transaction.type": self.transaction_type, } if span_subtype: labels["span.subtype"] = span_subtype self._breakdown.timer("span.self_time", reset_on_collect=True, **labels).update(*timer.val) labels = {"transaction.name": self.name, "transaction.type": self.transaction_type} if self.is_sampled: self._breakdown.counter("transaction.breakdown.count", reset_on_collect=True, **labels).inc() self._breakdown.timer( "span.self_time", reset_on_collect=True, **{"span.type": "app", "transaction.name": self.name, "transaction.type": self.transaction_type} ).update(self.duration - self._child_durations.duration)
def transform(value, stack=None, context=None): # TODO: make this extendable if context is None: context = {} if stack is None: stack = [] objid = id(value) if objid in context: return "<...>" context[objid] = 1 transform_rec = lambda o: transform(o, stack + [value], context) if any(value is s for s in stack): ret = "cycle" elif isinstance(value, (tuple, list, set, frozenset)): try: ret = type(value)(transform_rec(o) for o in value) except Exception: # We may be dealing with a namedtuple class value_type(list): __name__ = type(value).__name__ ret = value_type(transform_rec(o) for o in value) elif isinstance(value, uuid.UUID): try: ret = repr(value) except AttributeError: ret = None elif isinstance(value, dict): ret = dict((to_unicode(k), transform_rec(v)) for k, v in compat.iteritems(value)) elif isinstance(value, compat.text_type): ret = to_unicode(value) elif isinstance(value, compat.binary_type): ret = to_string(value) elif not isinstance( value, compat.class_types) and _has_elasticapm_metadata(value): ret = transform_rec(value.__elasticapm__()) elif isinstance(value, bool): ret = bool(value) elif isinstance(value, float): ret = float(value) elif isinstance(value, int): ret = int(value) elif compat.PY2 and isinstance(value, long): # noqa F821 ret = long(value) # noqa F821 elif value is not None: try: ret = transform(repr(value)) except Exception: # It's common case that a model's __unicode__ definition may try to query the database # which if it was not cleaned up correctly, would hit a transaction aborted exception ret = u"<BadRepr: %s>" % type(value) else: ret = None del context[objid] return ret
def __init__(self, app, config, client_cls=Client): client_config = { key[11:]: val for key, val in compat.iteritems(config) if key.startswith("elasticapm.") } client = client_cls(**client_config) super(ElasticAPM, self).__init__(app, client)
def get_data_from_request(self, request, event_type): result = { "env": dict(get_environ(request.META)), "method": request.method, "socket": { "remote_address": request.META.get("REMOTE_ADDR"), "encrypted": request.is_secure() }, "cookies": dict(request.COOKIES), } if self.config.capture_headers: request_headers = dict(get_headers(request.META)) for key, value in request_headers.items(): if isinstance(value, (int, float)): request_headers[key] = str(value) result["headers"] = request_headers if request.method in constants.HTTP_WITH_BODY: content_type = request.META.get("CONTENT_TYPE") if content_type == "application/x-www-form-urlencoded": data = compat.multidict_to_dict(request.POST) elif content_type and content_type.startswith( "multipart/form-data"): data = compat.multidict_to_dict(request.POST) if request.FILES: data["_files"] = { field: file.name for field, file in compat.iteritems(request.FILES) } else: try: data = request.body except Exception as e: self.logger.debug("Can't capture request body: %s", compat.text_type(e)) data = "<unavailable>" capture_body = self.config.capture_body in ("all", event_type) result["body"] = data if (capture_body or not data) else "[REDACTED]" if hasattr(request, "get_raw_uri"): # added in Django 1.9 url = request.get_raw_uri() else: try: # Requires host to be in ALLOWED_HOSTS, might throw a # DisallowedHost exception url = request.build_absolute_uri() except DisallowedHost: # We can't figure out the real URL, so we have to set it to # DisallowedHost result["url"] = {"full": "DisallowedHost"} url = None if url: result["url"] = get_url_dict(url) return result
def collect(self): """ Collect metrics from all registered metric sets and queues them for sending :return: """ logger.debug("Collecting metrics") for name, metricset in compat.iteritems(self._metricsets): for data in metricset.collect(): self._queue_func(constants.METRICSET, data)
def get_headers(environ): """ Returns only proper HTTP headers. """ for key, value in compat.iteritems(environ): key = str(key) if key.startswith("HTTP_") and key not in ("HTTP_CONTENT_TYPE", "HTTP_CONTENT_LENGTH"): yield key[5:].replace("_", "-").lower(), value elif key in ("CONTENT_TYPE", "CONTENT_LENGTH"): yield key.replace("_", "-").lower(), value
def collect(self): """ Collect metrics from all registered metric sets :return: """ logger.debug("Collecting metrics") for name, metricset in compat.iteritems(self._metricsets): data = metricset.collect() if data: self._queue_func(constants.METRICSET, data)
def collect(self): """ Collect metrics from all registered metric sets and queues them for sending :return: """ if self.client.config.is_recording: logger.debug("Collecting metrics") for _, metricset in compat.iteritems(self._metricsets): for data in metricset.collect(): self.client.queue(constants.METRICSET, data)
def get_headers(environ): """ Returns only proper HTTP headers. """ for key, value in compat.iteritems(environ): key = str(key) if key.startswith('HTTP_') and key not in \ ('HTTP_CONTENT_TYPE', 'HTTP_CONTENT_LENGTH'): yield key[5:].replace('_', '-').lower(), value elif key in ('CONTENT_TYPE', 'CONTENT_LENGTH'): yield key.replace('_', '-').lower(), value
def test_metrics_reset_after_collect(elasticapm_client): elasticapm_client.begin_transaction("request") time.sleep(0.005) with elasticapm.capture_span("test", span_type="db", span_subtype="mysql"): time.sleep(0.005) time.sleep(0.005) elasticapm_client.end_transaction("test", "OK") breakdown = elasticapm_client._metrics.get_metricset( "elasticapm.metrics.sets.breakdown.BreakdownMetricSet") transaction_metrics = elasticapm_client._metrics.get_metricset( "elasticapm.metrics.sets.transactions.TransactionsMetricSet") for metricset in (breakdown, transaction_metrics): for labels, c in compat.iteritems(metricset._counters): assert c.val != 0 for labels, t in compat.iteritems(metricset._timers): assert t.val != (0, 0) list(metricset.collect()) for labels, c in compat.iteritems(metricset._counters): assert c.val == 0 for labels, t in compat.iteritems(metricset._timers): assert t.val == (0, 0)
def get_data_from_request(self, request, capture_body=False): result = { 'env': dict(get_environ(request.META)), 'headers': dict(get_headers(request.META)), 'method': request.method, 'socket': { 'remote_address': request.META.get('REMOTE_ADDR'), 'encrypted': request.is_secure() }, 'cookies': dict(request.COOKIES), } if request.method in constants.HTTP_WITH_BODY: content_type = request.META.get('CONTENT_TYPE') if content_type == 'application/x-www-form-urlencoded': data = compat.multidict_to_dict(request.POST) elif content_type and content_type.startswith( 'multipart/form-data'): data = compat.multidict_to_dict(request.POST) if request.FILES: data['_files'] = { field: file.name for field, file in compat.iteritems(request.FILES) } else: try: data = request.body except Exception: data = '<unavailable>' result['body'] = data if (capture_body or not data) else '[REDACTED]' if hasattr(request, 'get_raw_uri'): # added in Django 1.9 url = request.get_raw_uri() else: try: # Requires host to be in ALLOWED_HOSTS, might throw a # DisallowedHost exception url = request.build_absolute_uri() except DisallowedHost: # We can't figure out the real URL, so we have to set it to # DisallowedHost result['url'] = {'full': 'DisallowedHost'} url = None if url: result['url'] = get_url_dict(url) return result
def collect(self, start_timer=True): """ Collect metrics from all registered metric sets :param start_timer: if True, restarts the collect timer after collection :return: """ if start_timer: self._start_collect_timer() logger.debug("Collecting metrics") for name, metricset in compat.iteritems(self._metricsets): data = metricset.collect() if data: self._queue_func("metricset", data)
def enforce_label_format(labels): """ Enforces label format: * dots, double quotes or stars in keys are replaced by underscores * string values are limited to a length of 1024 characters * values can only be of a limited set of types :param labels: a dictionary of labels :return: a new dictionary with sanitized keys/values """ new = {} for key, value in compat.iteritems(labels): if not isinstance(value, LABEL_TYPES): value = keyword_field(compat.text_type(value)) new[LABEL_RE.sub("_", compat.text_type(key))] = value return new
def get_data_from_request(self, request, event_type): result = { "env": dict(get_environ(request.META)), "method": request.method, "socket": { "remote_address": request.META.get("REMOTE_ADDR") }, "cookies": dict(request.COOKIES), } if self.config.capture_headers: request_headers = dict(get_headers(request.META)) for key, value in request_headers.items(): if isinstance(value, (int, float)): request_headers[key] = str(value) result["headers"] = request_headers if request.method in constants.HTTP_WITH_BODY: capture_body = self.config.capture_body in ("all", event_type) if not capture_body: result["body"] = "[REDACTED]" else: content_type = request.META.get("CONTENT_TYPE") if content_type == "application/x-www-form-urlencoded": data = compat.multidict_to_dict(request.POST) elif content_type and content_type.startswith( "multipart/form-data"): data = compat.multidict_to_dict(request.POST) if request.FILES: data["_files"] = { field: file.name for field, file in compat.iteritems(request.FILES) } else: try: data = request.body except Exception as e: self.logger.debug("Can't capture request body: %s", compat.text_type(e)) data = "<unavailable>" if data is not None: result["body"] = data url = get_raw_uri(request) result["url"] = get_url_dict(url) return result
def shorten(var, list_length=50, string_length=200, dict_length=50, **kwargs): """ Shorten a given variable based on configurable maximum lengths, leaving breadcrumbs in the object to show that it was shortened. For strings, truncate the string to the max length, and append "..." so the user knows data was lost. For lists, truncate the list to the max length, and append two new strings to the list: "..." and "(<x> more elements)" where <x> is the number of elements removed. For dicts, truncate the dict to the max length (based on number of key/value pairs) and add a new (key, value) pair to the dict: ("...", "(<x> more elements)") where <x> is the number of key/value pairs removed. :param var: Variable to be shortened :param list_length: Max length (in items) of lists :param string_length: Max length (in characters) of strings :param dict_length: Max length (in key/value pairs) of dicts :return: Shortened variable """ var = transform(var) if isinstance(var, compat.string_types) and len(var) > string_length: var = var[:string_length - 3] + "..." elif isinstance(var, (list, tuple, set, frozenset)) and len(var) > list_length: # TODO: we should write a real API for storing some metadata with vars when # we get around to doing ref storage var = list(var)[:list_length] + [ "...", "(%d more elements)" % (len(var) - list_length, ) ] elif isinstance(var, dict) and len(var) > dict_length: trimmed_tuples = [ (k, v) for (k, v) in itertools.islice(compat.iteritems(var), dict_length) ] if "<truncated>" not in var: trimmed_tuples += [ ("<truncated>", "(%d more elements)" % (len(var) - dict_length)) ] var = dict(trimmed_tuples) return var
def update(self, config_dict=None, env_dict=None, inline_dict=None, initial=False): if config_dict is None: config_dict = {} if env_dict is None: env_dict = os.environ if inline_dict is None: inline_dict = {} for field, config_value in compat.iteritems(self.__class__.__dict__): if not isinstance(config_value, _ConfigValue): continue new_value = self._NO_VALUE # first check environment if config_value.env_key and config_value.env_key in env_dict: new_value = env_dict[config_value.env_key] # check the inline config elif field in inline_dict: new_value = inline_dict[field] # finally, check config dictionary elif config_value.dict_key in config_dict: new_value = config_dict[config_value.dict_key] # only set if new_value changed. We'll fall back to the field default if not. if new_value is not self._NO_VALUE: try: setattr(self, field, new_value) except ConfigurationError as e: self._errors[e.field_name] = str(e) # handle initial callbacks if (initial and config_value.callbacks_on_default and getattr(self, field) is not None and getattr(self, field) == config_value.default): self.callbacks_queue.append( (config_value.dict_key, self._NO_VALUE, config_value.default)) # if a field has not been provided by any config source, we have to check separately if it is required if config_value.required and getattr(self, field) is None: self._errors[ config_value. dict_key] = "Configuration error: value for {} is required.".format( config_value.dict_key) self.call_pending_callbacks()
def start_span(self, operation_name=None, child_of=None, references=None, tags=None, start_time=None, ignore_active_span=False): if isinstance(child_of, OTSpanContext): parent_context = child_of elif isinstance(child_of, OTSpan): parent_context = child_of.context elif references and references[0].type == ReferenceType.CHILD_OF: parent_context = references[0].referenced_context else: parent_context = None transaction = traces.execution_context.get_transaction() if not transaction: trace_parent = parent_context.trace_parent if parent_context else None transaction = self._agent.begin_transaction( "custom", trace_parent=trace_parent) transaction.name = operation_name span_context = OTSpanContext(trace_parent=transaction.trace_parent) ot_span = OTSpan(self, span_context, transaction) else: # to allow setting an explicit parent span, we check if the parent_context is set # and if it is a span. In all other cases, the parent is found implicitly through the # execution context. parent_span_id = (parent_context.span.elastic_apm_ref.id if parent_context and parent_context.span and not parent_context.span.is_transaction else None) span = transaction._begin_span(operation_name, None, parent_span_id=parent_span_id) trace_parent = parent_context.trace_parent if parent_context else transaction.trace_parent span_context = OTSpanContext(trace_parent=trace_parent.copy_from( span_id=span.id)) ot_span = OTSpan(self, span_context, span) if tags: for k, v in compat.iteritems(tags): ot_span.set_tag(k, v) return ot_span
def varmap(func, var, context=None, name=None): """ Executes ``func(key_name, value)`` on all values, recursively discovering dict and list scoped values. """ if context is None: context = set() objid = id(var) if objid in context: return func(name, '<...>') context.add(objid) if isinstance(var, dict): ret = dict((k, varmap(func, v, context, k)) for k, v in compat.iteritems(var)) elif isinstance(var, (list, tuple)): ret = func(name, [varmap(func, f, context, name) for f in var]) else: ret = func(name, var) context.remove(objid) return ret
def end(self, skip_frames: int = 0, duration: Optional[float] = None): super().end(skip_frames, duration) if self._breakdown: for (span_type, span_subtype), timer in compat.iteritems(self._span_timers): labels = { "span.type": span_type, "transaction.name": self.name, "transaction.type": self.transaction_type, } if span_subtype: labels["span.subtype"] = span_subtype val = timer.val self._breakdown.timer("span.self_time", reset_on_collect=True, unit="us", **labels).update( int(val[0] * 1000000), val[1] ) if self.is_sampled: self._breakdown.timer( "span.self_time", reset_on_collect=True, unit="us", **{"span.type": "app", "transaction.name": self.name, "transaction.type": self.transaction_type}, ).update(int((self.duration - self._child_durations.duration) * 1000000))
def _set_tracestate(self): elastic_value = ";".join([ "{}:{}".format(k, v) for k, v in compat.iteritems(self.tracestate_dict) ]) # No character validation needed, as we validate in `add_tracestate`. Just validate length. if len(elastic_value) > 256: logger.debug( "Modifications to TraceState would violate length limits, ignoring." ) raise TraceStateFormatException() elastic_state = "es={}".format(elastic_value) if not self.tracestate: return elastic_state else: # Remove es=<stuff> from the tracestate, and add the new es state to the end otherstate = re.sub(r"(?:,|^)es=([^,]*)", "", self.tracestate) otherstate = otherstate.lstrip(",") # No validation of `otherstate` required, since we're downstream. We only need to check `es=` # since we introduced it, and that validation has already been done at this point. if otherstate: return "{},{}".format(otherstate.rstrip(","), elastic_state) else: return elastic_state
def get_frame_info( frame, lineno, with_locals=True, library_frame_context_lines=None, in_app_frame_context_lines=None, include_paths_re=None, exclude_paths_re=None, locals_processor_func=None, ): # Support hidden frames f_locals = getattr(frame, "f_locals", {}) if _getitem_from_frame(f_locals, "__traceback_hide__"): return None f_globals = getattr(frame, "f_globals", {}) loader = f_globals.get("__loader__") module_name = f_globals.get("__name__") f_code = getattr(frame, "f_code", None) if f_code: abs_path = frame.f_code.co_filename function = frame.f_code.co_name else: abs_path = None function = None # Try to pull a relative file path # This changes /foo/site-packages/baz/bar.py into baz/bar.py try: base_filename = sys.modules[module_name.split(".", 1)[0]].__file__ filename = abs_path.split(base_filename.rsplit(os.path.sep, 2)[0], 1)[-1].lstrip(os.path.sep) except Exception: filename = abs_path if not filename: filename = abs_path frame_result = { "abs_path": abs_path, "filename": filename, "module": module_name, "function": function, "lineno": lineno, "library_frame": is_library_frame(abs_path, include_paths_re, exclude_paths_re), } context_lines = library_frame_context_lines if frame_result[ "library_frame"] else in_app_frame_context_lines if context_lines and lineno is not None and abs_path: pre_context, context_line, post_context = get_lines_from_file( abs_path, lineno, int(context_lines / 2), loader, module_name) else: pre_context, context_line, post_context = [], None, [] if context_line: frame_result["pre_context"] = pre_context frame_result["context_line"] = context_line frame_result["post_context"] = post_context if with_locals: if f_locals is not None and not isinstance(f_locals, dict): # XXX: Genshi (and maybe others) have broken implementations of # f_locals that are not actually dictionaries try: f_locals = to_dict(f_locals) except Exception: f_locals = "<invalid local scope>" if locals_processor_func: f_locals = { varname: locals_processor_func(var) for varname, var in compat.iteritems(f_locals) } frame_result["vars"] = transform(f_locals) return frame_result
def _build_msg_for_logging(self, event_type, date=None, context=None, custom=None, stack=None, handled=True, **kwargs): """ Captures, processes and serializes an event into a dict object """ transaction = get_transaction() if transaction: transaction_context = deepcopy(transaction.context) else: transaction_context = {} event_data = {} if custom is None: custom = {} if date is not None: warnings.warn( "The date argument is no longer evaluated and will be removed in a future release", DeprecationWarning) date = time.time() if stack is None: stack = self.config.auto_log_stacks if context: transaction_context.update(context) context = transaction_context else: context = transaction_context event_data["context"] = context if transaction and transaction.tags: context["tags"] = deepcopy(transaction.tags) # if '.' not in event_type: # Assume it's a builtin event_type = "elasticapm.events.%s" % event_type handler = self.get_handler(event_type) result = handler.capture(self, **kwargs) if self._filter_exception_type(result): return # data (explicit) culprit takes over auto event detection culprit = result.pop("culprit", None) if custom.get("culprit"): culprit = custom.pop("culprit") for k, v in compat.iteritems(result): if k not in event_data: event_data[k] = v log = event_data.get("log", {}) if stack and "stacktrace" not in log: if stack is True: frames = stacks.iter_stack_frames(skip=3) else: frames = stack frames = stacks.get_stack_info( frames, with_locals=self.config.collect_local_variables in ("errors", "all"), library_frame_context_lines=self.config. source_lines_error_library_frames, in_app_frame_context_lines=self.config. source_lines_error_app_frames, include_paths_re=self.include_paths_re, exclude_paths_re=self.exclude_paths_re, locals_processor_func=lambda local_var: varmap( lambda k, v: shorten( v, list_length=self.config.local_var_list_max_length, string_length=self.config.local_var_max_length, ), local_var, ), ) log["stacktrace"] = frames if "stacktrace" in log and not culprit: culprit = stacks.get_culprit(log["stacktrace"], self.config.include_paths, self.config.exclude_paths) if "level" in log and isinstance(log["level"], compat.integer_types): log["level"] = logging.getLevelName(log["level"]).lower() if log: event_data["log"] = log if culprit: event_data["culprit"] = culprit if "custom" in context: context["custom"].update(custom) else: context["custom"] = custom # Make sure all data is coerced event_data = transform(event_data) if "exception" in event_data: event_data["exception"]["handled"] = bool(handled) event_data["timestamp"] = int(date * 1000000) transaction = get_transaction() if transaction: if transaction.trace_parent: event_data["trace_id"] = transaction.trace_parent.trace_id event_data["parent_id"] = transaction.id event_data["transaction_id"] = transaction.id return event_data