def check_cinema(movie): with tracer.start_span('CheckCinema', child_of=get_current_span()) as span: with span_in_context(span): num = random.randint(1, 30) time.sleep(num) print("sleep : ", num) cinema_details = "Cinema Details" flags = ['false', 'true', 'false'] random_flag = random.choice(flags) span.set_tag('error', random_flag) span.log_kv({'event': 'CheckCinema', 'value': cinema_details}) return cinema_details
def book_show(showtime_details): with tracer.start_span('BookShow', child_of=get_current_span()) as span: with span_in_context(span): num = random.randint(1, 30) time.sleep(num) Ticket_details = "Ticket Details" flags = ['false', 'true', 'false'] random_flag = random.choice(flags) a = 5 / 0 span.set_tag('error', random_flag) span.log_kv({'event': 'CheckCinema', 'value': showtime_details}) print(Ticket_details)
def check_showtime(cinema_details): with tracer.start_span('CheckShowtime', child_of=get_current_span()) as span: with span_in_context(span): num = random.randint(1, 30) time.sleep(num) showtime_details = "Showtime Details" flags = ['false', 'true', 'false'] random_flag = random.choice(flags) span.set_tag('error', random_flag) span.log_kv({'event': 'CheckCinema', 'value': showtime_details}) return showtime_details
def http_get(port, path, param, value): url = 'http://localhost:%s/%s' % (port, path) span = get_current_span() span.set_tag(tags.HTTP_METHOD, 'GET') span.set_tag(tags.HTTP_URL, url) span.set_tag(tags.SPAN_KIND, tags.SPAN_KIND_RPC_CLIENT) headers = {} tracer.inject(span, Format.HTTP_HEADERS, headers) r = requests.get(url, params={param: value}, headers=headers) assert r.status_code == 200 return r.text
def get_user_settings(uuid): logger.info("getting user settings", extra={"uuid": uuid}) settings_url = urljoin("http://settings:5000/settings/", "{}".format(uuid)) span = get_current_span() span.set_tag(tags.HTTP_METHOD, 'GET') span.set_tag(tags.HTTP_URL, settings_url) span.set_tag(tags.SPAN_KIND, tags.SPAN_KIND_RPC_CLIENT) span.set_tag('uuid', uuid) headers = {} tracer.inject(span, Format.HTTP_HEADERS, headers) r = requests.get(settings_url, headers=headers) return r.json()
def before_client_send(target, context, method, kwargs, operation): """ A hook to be executed before RPC request is sent from the client side. It returns a Span object that can be used as a context manager around the actual RPC call implementation, or in case of async callback, it needs its `finish()` method to be called explicitly. Also, loads the child span context to the service's context object. Expects context object to have span attribute. :param target: RPC target object :param context: context object :param method: RPM method name :param kwargs: RPM method kwargs :return: returns child tracing span encapsulating this request """ # try to extract parent span, using request_context's get_current_span # for api service or rpc_context_span_extractor for other services. req_context_span = get_current_span() rpc_span = rpc_context_span_extractor(context) parent_span = req_context_span or rpc_span log.debug( "req_context_span: {}, rpc_span: {} ... Parent span: {}".format( req_context_span, rpc_span, parent_span)) span = utils.start_child_span(operation_name=operation, parent=parent_span) # Set the needed tags for the span span.set_tag(ext_tags.SPAN_KIND, ext_tags.SPAN_KIND_RPC_CLIENT) span.set_tag('rpc_method', method) span.set_tag('kwargs', str(kwargs)) request_id = context.request_id if request_id: span.set_tag('request.id', request_id) try: carrier = {} opentracing.tracer.inject(span_context=span.context, format=Format.TEXT_MAP, carrier=carrier) log.debug( "context object to be used for injecting {}".format(context)) span_dict = dict() for key, value in six.iteritems(carrier): span_dict[key] = value context.span = span_dict except opentracing.UnsupportedFormatException: log.warn("Error injecting opentracing span to context") return span
def get_tracebacks(es, tracer, start_date=None, end_date=None, num_matches=100): """ Queries the database for L{Traceback} from a given date range. Both dates are inclusive. Date filtering is done on the 'origin_timestamp' field of the Traceback. All filtering params are optional. Any params that are None are ignored. Returns a list (instead of a generator) so we can be cached Params: - start_date: must be a datetime.date - end_date: must be a datetime.date @rtype: list @postcondition: all(isinstance(v, Traceback) for v in return) @postcondition: len(return) <= num_matches """ params_list = {} if start_date is not None: params_list['gte'] = "%s||/d" % start_date if end_date is not None: params_list['lte'] = "%s||/d" % end_date if params_list: body = {"query": {"range": {"origin_timestamp": params_list}}} else: body = {"query": {"match_all": {}}} root_span = get_current_span() with tracer.start_span('elasticsearch', child_of=root_span): try: raw_tracebacks = es.search(index=INDEX, doc_type=DOC_TYPE, body=body, sort='origin_timestamp:desc', size=num_matches) except elasticsearch.exceptions.NotFoundError: logger.warning('traceback index not found. has it been created?') return [] res = [] for raw_traceback in raw_tracebacks['hits']['hits']: res.append(generate_traceback_from_source(raw_traceback['_source'])) return res
def insert_one(list_data, collection): with tracer.start_span('insert_one', child_of=get_current_span()) as span: span.set_tag('mongodb', 'operation:insert_one') with span_in_context(span): try: for data in list_data: collection.insert_one(data) span.log_kv({ 'event': 'data inserted by lines', 'value': len(data_list) }) span.set_tag('error', 'false') return "sucess" except: span.set_tag('error', 'true') return "error"
def group_join_wrapper(group, timeout=None, raise_error=False): parent_ctx = get_current_span() if parent_ctx: operation = 'gevent:{}:join'.format(group.__class__.__name__) tags_dict = { 'greenlets': self._get_greenlet_names(group.greenlets), 'timeout': timeout, 'raise_error': raise_error, } span = start_child_span( operation_name=operation, parent=parent_ctx, tags=tags_dict, ) with span: return _Group_join(group, timeout, raise_error) else: return _Group_join(group, timeout, raise_error)
def joinall_wrapper(greenlets, timeout=None, raise_error=False, count=None): parent_ctx = get_current_span() if parent_ctx: operation = 'gevent:joinall' tags_dict = { 'greenlets': self._get_greenlet_names(greenlets), 'timeout': timeout, 'raise_error': raise_error, 'count': count, } span = start_child_span( operation_name=operation, parent=parent_ctx, tags=tags_dict, ) with span: return _joinall(greenlets, timeout, raise_error, count) else: return _joinall(greenlets, timeout, raise_error, count)
def get_matching_jira_issues(es, tracer, traceback_text, match_level): """ Queries the database for any jira issues that include the traceback_text We use matching_percentage to determine how much the traceback needs to match the given traceback_text before we return it. Returns a list (instead of a generator) so we can be cached @type traceback_text: str @rtype: list @precondition: match_level in es_util.ALL_MATCH_LEVELS @postcondition: all(isinstance(v, JiraIssue) for v in return) """ assert isinstance(traceback_text, str), (type(traceback_text), traceback_text) assert match_level in es_util.ALL_MATCH_LEVELS, (match_level, es_util.ALL_MATCH_LEVELS) tracer = tracer or opentracing.tracer body = es_util.generate_text_match_payload( traceback_text, ["description_filtered", "comments_filtered"], match_level) root_span = get_current_span() with tracer.start_span('elasticsearch', child_of=root_span): try: raw_es_response = es.search(index=INDEX, doc_type=DOC_TYPE, body=body, size=1000) except elasticsearch.exceptions.NotFoundError: logger.warning('jira index not found. has it been created?') return [] res = [] for raw_jira_issue in raw_es_response['hits']['hits']: res.append(generate_from_source(raw_jira_issue['_source'])) return res
def __init__(self, run=None, *args, **kwargs): parent_ctx = get_current_span() if parent_ctx and run: operation = 'gevent:{}'.format(run.__name__) @functools.wraps(run) def patched_run(*args, **kwargs): span = start_child_span( operation_name=operation, parent=parent_ctx, ) with span, span_in_context(span): try: return run(*args, **kwargs) except Exception as error: span.set_tag(tags.ERROR, True) span.log_kv({ 'event': tags.ERROR, 'error.object': error, }) raise super(GreenletPatcher.TracedGreenlet, self).__init__(patched_run, *args, **kwargs) else: super(GreenletPatcher.TracedGreenlet, self).__init__(run, *args, **kwargs)
def execute_with_tracing(self, tracer): """Will wrap execution of inference with tracer to measure latency.""" with tracer.start_span('aiops_inference_execute', child_of=get_current_span()) as span: with span_in_context(span): return self.execute()
def execute_with_tracing(self, tracer): """Train models for anomaly detection with tracing enabled.""" with tracer.start_span('aiops_train_execute', child_of=get_current_span()) as span: with span_in_context(span): return self.execute()
def execute_with_tracing(self, tracer): """Will retrain with fresh data and perform predictions in batches.""" with tracer.start_span('aiops_inference_execute', child_of=get_current_span()) as span: with span_in_context(span): return self.execute()
def _extract_span(): span = get_current_span() if span: return span.get_baggage_item(BAGGAGE_KEY) return "baggage not found"
def async_task(): time.sleep(0.001) assert get_current_span() is not None
def http_get(url): span = get_current_span() r = requests.get(url) return r.text
def handler2(_): span = get_current_span() if span: return span.get_baggage_item(BAGGAGE_KEY) return "baggage not found"
def insert_one(list_data, collection): with tracer.start_span('insert_one', child_of=get_current_span()) as span: span.set_tag('mongodb', 'operation:insert_one') with span_in_context(span): for data in list_data: collection.insert_one(data)
def print_hello(hello_str): with tracer.start_span('printHello', child_of=get_current_span()) as span: with span_in_context(span): http_get(8082, 'publish', 'helloStr', hello_str) span.log_kv({'event': 'println'})
def formatter(tracer, greeting, name): active_span = get_current_span() with tracer.start_span('formatter', child_of=active_span) as format_span: with span_in_context(format_span): return '{} {}!!'.format(greeting.title(), name.title())
def check(span_to_check): assert get_current_span() == span_to_check
def print_hello(hello_str): root_span = get_current_span() with tracer.start_span('println', child_of=root_span) as span: print(hello_str) span.log_kv({'event': 'println'})
def format_string(hello_to): root_span = get_current_span() with tracer.start_span('format', child_of=root_span) as span: hello_str = 'Hello, %s!' % hello_to span.log_kv({'event': 'string-format', 'value': hello_str}) return hello_str
def get_tracebacks_for_day( ES, tracer, date_to_analyze: datetime.date, filter_text: str, hidden_traceback_ids: set, ) -> typing.List[TracebackPlusMetadata]: """ Retrieves the Tracebacks for the given date_to_analyze date. If provided, only returns Tracebacks which match filter_text. Only returns Tracebacks whose ids aren't in hidden_traceback_ids. """ tracer = tracer or opentracing.tracer root_span = get_current_span() # get all tracebacks with tracer.start_span('get all tracebacks', child_of=root_span) as span: with span_in_context(span): tracebacks = traceback_db.get_tracebacks(ES, tracer, date_to_analyze, date_to_analyze) logger.debug('found %s tracebacks', len(tracebacks)) # filter out tracebacks the user has hidden. we use a namedlist to store each traceback + some # metadata we'll use when rendering the html page tb_meta = [ TracebackPlusMetadata(traceback=t) for t in tracebacks if t.origin_papertrail_id not in hidden_traceback_ids ] # get a list of matching jira issues with tracer.start_span('for each traceback, get matching jira issues', child_of=root_span) as span: with span_in_context(span): for tb in tb_meta: tb.jira_issues = jira_issue_db.get_matching_jira_issues( ES, tracer, tb.traceback.traceback_text, es_util.EXACT_MATCH) matching_jira_keys = set(jira_issue.key for jira_issue in tb.jira_issues) similar_jira_issues = jira_issue_db.get_matching_jira_issues( ES, tracer, tb.traceback.traceback_text, es_util.SIMILAR_MATCH) tb.similar_jira_issues = [ similar_jira_issue for similar_jira_issue in similar_jira_issues if similar_jira_issue.key not in matching_jira_keys ] # apply user's filters if filter_text == 'Has Ticket': tb_meta = [tb for tb in tb_meta if tb.jira_issues] elif filter_text == 'No Ticket': tb_meta = [tb for tb in tb_meta if not tb.jira_issues] elif filter_text == 'No Recent Ticket': tb_meta_without_recent_ticket = [] for tb in tb_meta: has_recent_issues = False for issue in tb.jira_issues: if issue.updated > TWO_WEEKS_AGO: has_recent_issues = True break if not has_recent_issues: tb_meta_without_recent_ticket.append(tb) tb_meta = tb_meta_without_recent_ticket elif filter_text == 'Has Open Ticket': tb_meta = [ tb for tb in tb_meta if [issue for issue in tb.jira_issues if issue.status != 'Closed'] ] else: tb_meta = tb_meta # we take at most 100 tracebacks, due to performance issues of having more tb_meta = tb_meta[:100] # for each traceback, get all similar tracebacks with tracer.start_span('for each traceback, get similar tracebacks', child_of=root_span) as span: with span_in_context(span): for tb in tb_meta: tb.similar_tracebacks = [] tb.similar_tracebacks = traceback_db.get_matching_tracebacks( ES, tracer, tb.traceback.traceback_text, es_util.EXACT_MATCH, 100) return tb_meta
def insert_bulk(list_data, collection): with tracer.start_span('insert_bulk', child_of=get_current_span()) as span: span.set_tag('mongodb', 'operation:insert_many') with span_in_context(span): res = collection.insert_many(list_data) return len(res.inserted_ids)
def nested(nested_span_to_check, span_to_check): yield run_coroutine_with_span(span1, check, nested_span_to_check) assert get_current_span() == span_to_check
def getForwardHeaders(request): headers = {} # x-b3-*** headers can be populated using the opentracing span span = get_current_span() carrier = {} tracer.inject(span_context=span.context, format=Format.HTTP_HEADERS, carrier=carrier) headers.update(carrier) # We handle other (non x-b3-***) headers manually if 'user' in session: headers['end-user'] = session['user'] # Keep this in sync with the headers in details and reviews. incoming_headers = [ # All applications should propagate x-request-id. This header is # included in access log statements and is used for consistent trace # sampling and log sampling decisions in Istio. 'x-request-id', # Lightstep tracing header. Propagate this if you use lightstep tracing # in Istio (see # https://istio.io/latest/docs/tasks/observability/distributed-tracing/lightstep/) # Note: this should probably be changed to use B3 or W3C TRACE_CONTEXT. # Lightstep recommends using B3 or TRACE_CONTEXT and most application # libraries from lightstep do not support x-ot-span-context. 'x-ot-span-context', # Datadog tracing header. Propagate these headers if you use Datadog # tracing. 'x-datadog-trace-id', 'x-datadog-parent-id', 'x-datadog-sampling-priority', # W3C Trace Context. Compatible with OpenCensusAgent and Stackdriver Istio # configurations. 'traceparent', 'tracestate', # Cloud trace context. Compatible with OpenCensusAgent and Stackdriver Istio # configurations. 'x-cloud-trace-context', # Grpc binary trace context. Compatible with OpenCensusAgent nad # Stackdriver Istio configurations. 'grpc-trace-bin', # b3 trace headers. Compatible with Zipkin, OpenCensusAgent, and # Stackdriver Istio configurations. Commented out since they are # propagated by the OpenTracing tracer above. # 'x-b3-traceid', # 'x-b3-spanid', # 'x-b3-parentspanid', # 'x-b3-sampled', # 'x-b3-flags', # Application-specific headers to forward. 'user-agent', ] # For Zipkin, always propagate b3 headers. # For Lightstep, always propagate the x-ot-span-context header. # For Datadog, propagate the corresponding datadog headers. # For OpenCensusAgent and Stackdriver configurations, you can choose any # set of compatible headers to propagate within your application. For # example, you can propagate b3 headers or W3C trace context headers with # the same result. This can also allow you to translate between context # propagation mechanisms between different applications. for ihdr in incoming_headers: val = request.headers.get(ihdr) if val is not None: headers[ihdr] = val return headers
def check(): assert get_current_span() == span
def emit(self, record): span = request_context.get_current_span() if span is not None: msg = self.format(record) span.log_kv({logging.getLevelName(record.levelno): msg})