def test_remote_endpoint(self): encoder = get_encoder(Encoding.V1_THRIFT) remote_endpoint = create_endpoint( service_name='test_server', host='127.0.0.1', ) # For server spans, the remote endpoint is encoded as 'ca' binary_annotations = thrift.binary_annotation_list_builder({}, None) encoder.encode_remote_endpoint( remote_endpoint, Kind.SERVER, binary_annotations, ) assert binary_annotations == [ thrift.create_binary_annotation( key='ca', value=thrift.SERVER_ADDR_VAL, annotation_type=thrift.zipkin_core.AnnotationType.BOOL, host=thrift.create_endpoint(0, 'test_server', '127.0.0.1', None), ) ] # For client spans, the remote endpoint is encoded as 'sa' binary_annotations = thrift.binary_annotation_list_builder({}, None) encoder.encode_remote_endpoint( remote_endpoint, Kind.CLIENT, binary_annotations, ) assert binary_annotations == [ thrift.create_binary_annotation( key='sa', value=thrift.SERVER_ADDR_VAL, annotation_type=thrift.zipkin_core.AnnotationType.BOOL, host=thrift.create_endpoint(0, 'test_server', '127.0.0.1', None), ) ]
def add_sa_binary_annotation( self, port=0, service_name='unknown', host='127.0.0.1', ): """Adds a 'sa' binary annotation to the current span. 'sa' binary annotations are useful for situations where you need to log where a request is going but the destination doesn't support zipkin. Note that the span must have 'cs'/'cr' annotations. :param port: The port number of the destination :type port: int :param service_name: The name of the destination service :type service_name: str :param host: Host address of the destination :type host: str """ if not self.zipkin_attrs or (not self.zipkin_attrs.is_sampled and self.firehose_handler is None): return if 'client' not in self.include: # TODO: trying to set a sa binary annotation for a non-client span # should result in a logged error return sa_endpoint = create_endpoint( port=port, service_name=service_name, host=host, ) sa_binary_annotation = create_binary_annotation( key=zipkin_core.SERVER_ADDR, value=SERVER_ADDR_VAL, annotation_type=zipkin_core.AnnotationType.BOOL, host=sa_endpoint, ) if not self.logging_context: self.sa_binary_annotations.append(sa_binary_annotation) else: self.logging_context.sa_binary_annotations.append( sa_binary_annotation)
def encode_span(self, v2_span): """Encodes the current span to thrift.""" span = v2_span.build_v1_span() thrift_endpoint = thrift.create_endpoint( span.endpoint.port, span.endpoint.service_name, span.endpoint.ipv4, span.endpoint.ipv6, ) thrift_annotations = thrift.annotation_list_builder( span.annotations, thrift_endpoint, ) thrift_binary_annotations = thrift.binary_annotation_list_builder( span.binary_annotations, thrift_endpoint, ) # Add sa/ca binary annotations if v2_span.remote_endpoint: self.encode_remote_endpoint( v2_span.remote_endpoint, v2_span.kind, thrift_binary_annotations, ) thrift_span = thrift.create_span( span.id, span.parent_id, span.trace_id, span.name, thrift_annotations, thrift_binary_annotations, span.timestamp, span.duration, ) encoded_span = thrift.span_to_bytes(thrift_span) return encoded_span
def encode_remote_endpoint(self, remote_endpoint, kind, binary_annotations): thrift_remote_endpoint = thrift.create_endpoint( remote_endpoint.port, remote_endpoint.service_name, remote_endpoint.ipv4, remote_endpoint.ipv6, ) if kind == Kind.CLIENT: key = thrift.zipkin_core.SERVER_ADDR elif kind == Kind.SERVER: key = thrift.zipkin_core.CLIENT_ADDR binary_annotations.append( thrift.create_binary_annotation( key=key, value=thrift.SERVER_ADDR_VAL, annotation_type=thrift.zipkin_core.AnnotationType.BOOL, host=thrift_remote_endpoint, ))
def test__convert_from_thrift_binary_annotations_unicode(self): decoder = _V1ThriftDecoder() local_host = thrift.create_endpoint(8888, "test_service", "10.0.0.1", None) ann_type = zipkin_core.AnnotationType thrift_binary_annotations = [ create_binary_annotation("key1", u"再见", ann_type.STRING, local_host), create_binary_annotation("key2", "val2", ann_type.STRING, local_host), create_binary_annotation("key3", "再见", ann_type.STRING, local_host), ] ( tags, local_endpoint, remote_endpoint, ) = decoder._convert_from_thrift_binary_annotations(thrift_binary_annotations) assert tags == { "key1": u"再见", "key2": "val2", "key3": "再见", } assert local_endpoint == Endpoint("test_service", "10.0.0.1", None, 8888)
def thrift_endpoint(): return thrift.create_endpoint(8888, 'test_service', '10.0.0.1', None)
def check_v1_thrift(obj, zipkin_attrs, inner_span_id, ts): inner_span, root_span = _decode_binary_thrift_objs(obj) endpoint = thrift.create_endpoint( port=8080, service_name='test_service_name', ipv4='10.0.0.0', ) binary_annotations = thrift.binary_annotation_list_builder( {'some_key': 'some_value'}, endpoint, ) binary_annotations.append(thrift.create_binary_annotation( 'sa', '\x01', zipkin_core.AnnotationType.BOOL, thrift.create_endpoint( port=8888, service_name='sa_service', ipv6='2001:0db8:85a3:0000:0000:8a2e:0370:7334', ), )) expected_root = thrift.create_span( span_id=zipkin_attrs.span_id, parent_span_id=zipkin_attrs.parent_span_id, trace_id=zipkin_attrs.trace_id, span_name='test_span_name', annotations=thrift.annotation_list_builder( OrderedDict([('cs', ts), ('cr', ts + 10)]), endpoint, ), binary_annotations=binary_annotations, timestamp_s=None, duration_s=None, ) # py.test diffs of thrift Spans are pretty useless and hide many things # These prints would only appear on stdout if the test fails and help comparing # the 2 spans. print(root_span) print(expected_root) assert root_span == expected_root expected_inner = thrift.create_span( span_id=inner_span_id, parent_span_id=zipkin_attrs.span_id, trace_id=zipkin_attrs.trace_id, span_name='inner_span', annotations=thrift.annotation_list_builder( OrderedDict([('cs', ts), ('sr', ts), ('ss', ts + 5), ('cr', ts + 5), ('ws', ts)]), endpoint, ), binary_annotations=[], timestamp_s=ts, duration_s=5, ) # py.test diffs of thrift Spans are pretty useless and hide many things # These prints would only appear on stdout if the test fails and help comparing # the 2 spans. print(inner_span) print(expected_inner) assert inner_span == expected_inner
def start(self): """Enter the new span context. All annotations logged inside this context will be attributed to this span. All new spans generated inside this context will have this span as their parent. In the unsampled case, this context still generates new span IDs and pushes them onto the threadlocal stack, so downstream services calls made will pass the correct headers. However, the logging handler is never attached in the unsampled case, so the spans are never logged. """ self.do_pop_attrs = False # If zipkin_attrs are passed in or this span is doing its own sampling, # it will need to actually log spans at __exit__. self.perform_logging = self.zipkin_attrs or self.sample_rate is not None if self.sample_rate is not None: # This clause allows for sampling this service independently # of the passed-in zipkin_attrs. if self.zipkin_attrs and not self.zipkin_attrs.is_sampled: self.zipkin_attrs = create_attrs_for_span( sample_rate=self.sample_rate, trace_id=self.zipkin_attrs.trace_id, ) elif not self.zipkin_attrs: self.zipkin_attrs = create_attrs_for_span( sample_rate=self.sample_rate, ) if not self.zipkin_attrs: # This span is inside the context of an existing trace existing_zipkin_attrs = get_zipkin_attrs() if existing_zipkin_attrs: self.zipkin_attrs = ZipkinAttrs( trace_id=existing_zipkin_attrs.trace_id, span_id=generate_random_64bit_string(), parent_span_id=existing_zipkin_attrs.span_id, flags=existing_zipkin_attrs.flags, is_sampled=existing_zipkin_attrs.is_sampled, ) # If zipkin_attrs are not set up by now, that means this span is not # configured to perform logging itself, and it's not in an existing # Zipkin trace. That means there's nothing else to do and it can exit # early. if not self.zipkin_attrs: return self push_zipkin_attrs(self.zipkin_attrs) self.do_pop_attrs = True self.start_timestamp = time.time() # Set up logging if this is the root span if self.perform_logging: # Don't set up any logging if we're not sampling if not self.zipkin_attrs.is_sampled: return self endpoint = create_endpoint(self.port, self.service_name) self.log_handler = ZipkinLoggerHandler(self.zipkin_attrs) self.logging_context = ZipkinLoggingContext( self.zipkin_attrs, endpoint, self.log_handler, self.span_name, self.transport_handler, self.binary_annotations, add_logging_annotation=self.add_logging_annotation, ) self.logging_context.start() return self else: # In the sampled case, patch the ZipkinLoggerHandler. if self.zipkin_attrs.is_sampled: # Be defensive about logging setup. Since ZipkinAttrs are local to # the thread, multithreaded frameworks can get in strange states. # The logging is not going to be correct in these cases, so we set # a flag that turns off logging on __exit__. if len(zipkin_logger.handlers) > 0: # Put span ID on logging handler. Assume there's only a single # handler, since all logging should be set up in this package. self.log_handler = zipkin_logger.handlers[0] # Store the old parent_span_id, probably None, in case we have # nested zipkin_spans self.old_parent_span_id = self.log_handler.parent_span_id self.log_handler.parent_span_id = self.zipkin_attrs.span_id return self
def endpoint(self): if not self._endpoint: self._endpoint = create_endpoint(**self.endpoint_info) return self._endpoint
def thrift_endpoint(): return thrift.create_endpoint(8888, "test_service", "10.0.0.1", None)
def start(self): """Enter the new span context. All annotations logged inside this context will be attributed to this span. All new spans generated inside this context will have this span as their parent. In the unsampled case, this context still generates new span IDs and pushes them onto the threadlocal stack, so downstream services calls made will pass the correct headers. However, the logging handler is never attached in the unsampled case, so the spans are never logged. """ self.do_pop_attrs = False # If zipkin_attrs are passed in or this span is doing its own sampling, # it will need to actually log spans at __exit__. self.perform_logging = bool(self.zipkin_attrs or self.sample_rate is not None or self.firehose_handler is not None) report_root_timestamp = False if self.sample_rate is not None: if self.zipkin_attrs and not self.zipkin_attrs.is_sampled: report_root_timestamp = True self.zipkin_attrs = create_attrs_for_span( sample_rate=self.sample_rate, trace_id=self.zipkin_attrs.trace_id, use_128bit_trace_id=self.use_128bit_trace_id, ) elif not self.zipkin_attrs: report_root_timestamp = True self.zipkin_attrs = create_attrs_for_span( sample_rate=self.sample_rate, use_128bit_trace_id=self.use_128bit_trace_id, ) if not self.zipkin_attrs: # This span is inside the context of an existing trace existing_zipkin_attrs = self._context_stack.get() if existing_zipkin_attrs: self.zipkin_attrs = ZipkinAttrs( trace_id=existing_zipkin_attrs.trace_id, span_id=generate_random_64bit_string(), parent_span_id=existing_zipkin_attrs.span_id, flags=existing_zipkin_attrs.flags, is_sampled=existing_zipkin_attrs.is_sampled, ) elif self.firehose_handler is not None: # If it has gotten here, the only thing that is # causing a trace is the firehose. So we force a trace # with sample rate of 0 report_root_timestamp = True self.zipkin_attrs = create_attrs_for_span( sample_rate=0.0, use_128bit_trace_id=self.use_128bit_trace_id, ) # If zipkin_attrs are not set up by now, that means this span is not # configured to perform logging itself, and it's not in an existing # Zipkin trace. That means there's nothing else to do and it can exit # early. if not self.zipkin_attrs: return self self._context_stack.push(self.zipkin_attrs) self.do_pop_attrs = True self.start_timestamp = time.time() if self.perform_logging: # Don't set up any logging if we're not sampling if not self.zipkin_attrs.is_sampled and not self.firehose_handler: return self endpoint = create_endpoint(self.port, self.service_name, self.host) client_context = set(self.include) == {'client'} self.log_handler = ZipkinLoggerHandler(self.zipkin_attrs) self.logging_context = ZipkinLoggingContext( self.zipkin_attrs, endpoint, self.log_handler, self.span_name, self.transport_handler, report_root_timestamp or self.report_root_timestamp_override, binary_annotations=self.binary_annotations, add_logging_annotation=self.add_logging_annotation, client_context=client_context, max_span_batch_size=self.max_span_batch_size, firehose_handler=self.firehose_handler, ) self.logging_context.start() self.logging_configured = True return self else: # Patch the ZipkinLoggerHandler. # Be defensive about logging setup. Since ZipkinAttrs are local to # the thread, multithreaded frameworks can get in strange states. # The logging is not going to be correct in these cases, so we set # a flag that turns off logging on __exit__. try: # Assume there's only a single handler, since all logging # should be set up in this package. log_handler = zipkin_logger.handlers[0] except IndexError: return self # Make sure it's not a NullHandler or something if not isinstance(log_handler, ZipkinLoggerHandler): return self # Put span ID on logging handler. self.log_handler = zipkin_logger.handlers[0] # Store the old parent_span_id, probably None, in case we have # nested zipkin_spans self.old_parent_span_id = self.log_handler.parent_span_id self.log_handler.parent_span_id = self.zipkin_attrs.span_id self.logging_configured = True return self
def check_v1_thrift(obj, zipkin_attrs, inner_span_id, ts): inner_span, producer_span, root_span = _decode_binary_thrift_objs(obj) endpoint = thrift.create_endpoint( port=8080, service_name="test_service_name", ipv4="10.0.0.0", ) binary_annotations = thrift.binary_annotation_list_builder( {"some_key": "some_value"}, endpoint, ) binary_annotations.append( thrift.create_binary_annotation( "sa", "\x01", zipkin_core.AnnotationType.BOOL, thrift.create_endpoint( port=8888, service_name="sa_service", ipv6="2001:0db8:85a3:0000:0000:8a2e:0370:7334", ), ) ) expected_root = thrift.create_span( span_id=zipkin_attrs.span_id, parent_span_id=zipkin_attrs.parent_span_id, trace_id=zipkin_attrs.trace_id, span_name="test_span_name", annotations=thrift.annotation_list_builder( OrderedDict([("cs", ts), ("cr", ts + 10)]), endpoint, ), binary_annotations=binary_annotations, timestamp_s=None, duration_s=None, ) # py.test diffs of thrift Spans are pretty useless and hide many things # These prints would only appear on stdout if the test fails and help comparing # the 2 spans. print(root_span) print(expected_root) assert root_span == expected_root expected_inner = thrift.create_span( span_id=inner_span_id, parent_span_id=zipkin_attrs.span_id, trace_id=zipkin_attrs.trace_id, span_name="inner_span", annotations=thrift.annotation_list_builder( OrderedDict([("ws", ts)]), endpoint, ), binary_annotations=[], timestamp_s=ts, duration_s=5, ) # py.test diffs of thrift Spans are pretty useless and hide many things # These prints would only appear on stdout if the test fails and help comparing # the 2 spans. print(inner_span) print(expected_inner) assert inner_span == expected_inner expected_producer = thrift.create_span( span_id=inner_span_id, parent_span_id=zipkin_attrs.span_id, trace_id=zipkin_attrs.trace_id, span_name="producer_span", annotations=thrift.annotation_list_builder( OrderedDict([("ms", ts)]), endpoint, ), binary_annotations=[], timestamp_s=ts, duration_s=10, ) # py.test diffs of thrift Spans are pretty useless and hide many things # These prints would only appear on stdout if the test fails and help comparing # the 2 spans. print(producer_span) print(expected_producer) assert producer_span == expected_producer
def add_span( self, span_id, parent_span_id, trace_id, span_name, annotations, binary_annotations, timestamp_s, duration_s, endpoint, sa_endpoint, ): thrift_endpoint = thrift.create_endpoint( endpoint.port, endpoint.service_name, endpoint.ipv4, endpoint.ipv6, ) thrift_annotations = thrift.annotation_list_builder( annotations, thrift_endpoint, ) # Binary annotations can be set through debug messages or the # set_extra_binary_annotations registry setting. thrift_binary_annotations = thrift.binary_annotation_list_builder( binary_annotations, thrift_endpoint, ) # Add sa binary annotation if sa_endpoint is not None: thrift_sa_endpoint = thrift.create_endpoint( sa_endpoint.port, sa_endpoint.service_name, sa_endpoint.ipv4, sa_endpoint.ipv6, ) thrift_binary_annotations.append(thrift.create_binary_annotation( key=thrift.zipkin_core.SERVER_ADDR, value=thrift.SERVER_ADDR_VAL, annotation_type=thrift.zipkin_core.AnnotationType.BOOL, host=thrift_sa_endpoint, )) thrift_span = thrift.create_span( span_id, parent_span_id, trace_id, span_name, thrift_annotations, thrift_binary_annotations, timestamp_s, duration_s, ) encoded_span = thrift.span_to_bytes(thrift_span) # If we've already reached the max batch size or the new span doesn't # fit in max_payload_bytes, send what we've collected until now and # start a new batch. is_over_size_limit = ( self.max_payload_bytes is not None and self.current_size + len(encoded_span) > self.max_payload_bytes ) is_over_portion_limit = len(self.queue) >= self.max_portion_size if is_over_size_limit or is_over_portion_limit: self.flush() self.queue.append(encoded_span) self.current_size += len(encoded_span)