Ejemplo n.º 1
0
def test_add_sa_binary_annotation():
    zipkin_attrs = ZipkinAttrs(
        trace_id='0',
        span_id='1',
        parent_span_id=None,
        flags='0',
        is_sampled=True,
    )
    context = zipkin.zipkin_span(
        service_name='my_service',
        span_name='span_name',
        zipkin_attrs=zipkin_attrs,
        transport_handler=MockTransportHandler(),
        port=5,
    )

    with context:
        assert context.logging_context.sa_endpoint is None
        context.add_sa_binary_annotation(
            port=123,
            service_name='test_service',
            host='1.2.3.4',
        )
        expected_sa_endpoint = _encoding_helpers.create_endpoint(
            port=123,
            service_name='test_service',
            host='1.2.3.4',
        )
        assert context.logging_context.sa_endpoint == \
            expected_sa_endpoint

        nested_context = zipkin.zipkin_span(
            service_name='my_service',
            span_name='nested_span',
        )
        with nested_context:
            nested_context.add_sa_binary_annotation(
                port=456,
                service_name='nested_service',
                host='5.6.7.8',
            )
            expected_nested_sa_endpoint = _encoding_helpers.create_endpoint(
                port=456,
                service_name='nested_service',
                host='5.6.7.8',
            )
            assert nested_context.sa_endpoint == \
                expected_nested_sa_endpoint
Ejemplo n.º 2
0
def test_batch_sender_add_span_not_sampled_with_firehose(
        add_span_mock, flush_mock, time_mock):
    attr = ZipkinAttrs(
        trace_id='0000000000000001',
        span_id='0000000000000002',
        parent_span_id=None,
        flags=None,
        is_sampled=False,
    )
    log_handler = logging_helper.ZipkinLoggerHandler(attr)
    transport_handler = mock.Mock()
    firehose_handler = mock.Mock()
    context = logging_helper.ZipkinLoggingContext(
        zipkin_attrs=attr,
        endpoint=_encoding_helpers.create_endpoint(80, 'test_server',
                                                   '127.0.0.1'),
        log_handler=log_handler,
        span_name='span_name',
        transport_handler=transport_handler,
        report_root_timestamp=False,
        firehose_handler=firehose_handler,
    )
    context.start_timestamp = 24
    context.response_status_code = 200

    context.binary_annotations_dict = {'k': 'v'}
    time_mock.return_value = 42

    context.log_spans()
    assert add_span_mock.call_count == 1
    assert flush_mock.call_count == 1
def test_create_endpoint_defaults_service_name(gethostbyname):
    gethostbyname.return_value = '0.0.0.0'
    endpoint = _encoding_helpers.create_endpoint(port=8080)

    assert endpoint.service_name == 'unknown'
    assert endpoint.port == 8080
    assert endpoint.ipv4 == '0.0.0.0'
    assert endpoint.ipv6 is None
def test_create_endpoint_correct_host_ip(gethostbyname):
    gethostbyname.return_value = '1.2.3.4'
    endpoint = _encoding_helpers.create_endpoint(host='0.0.0.0')

    assert endpoint.service_name == 'unknown'
    assert endpoint.port == 0
    assert endpoint.ipv4 == '0.0.0.0'
    assert endpoint.ipv6 is None
def test_malformed_host():
    endpoint = _encoding_helpers.create_endpoint(
        port=8080,
        service_name='foo',
        host='80',
    )
    assert endpoint.service_name == 'foo'
    assert endpoint.port == 8080
    assert endpoint.ipv4 is None
    assert endpoint.ipv6 is None
def test_create_endpoint_ipv6():
    endpoint = _encoding_helpers.create_endpoint(
        port=8080,
        service_name='foo',
        host='2001:0db8:85a3:0000:0000:8a2e:0370:7334',
    )
    assert endpoint.service_name == 'foo'
    assert endpoint.port == 8080
    assert endpoint.ipv4 is None
    assert endpoint.ipv6 == '2001:0db8:85a3:0000:0000:8a2e:0370:7334'
def test_create_endpoint_defaults_localhost(gethostbyname):
    gethostbyname.side_effect = socket.gaierror

    endpoint = _encoding_helpers.create_endpoint(
        port=8080,
        service_name='foo',
    )
    assert endpoint.service_name == 'foo'
    assert endpoint.port == 8080
    assert endpoint.ipv4 == '127.0.0.1'
    assert endpoint.ipv6 is None
Ejemplo n.º 8
0
def context():
    attr = ZipkinAttrs(None, None, None, None, False)
    log_handler = logging_helper.ZipkinLoggerHandler(attr)
    return logging_helper.ZipkinLoggingContext(
        zipkin_attrs=attr,
        endpoint=_encoding_helpers.create_endpoint(80, 'test_server',
                                                   '127.0.0.1'),
        log_handler=log_handler,
        span_name='span_name',
        transport_handler=MockTransportHandler(),
        report_root_timestamp=False,
    )
Ejemplo n.º 9
0
    def add_sa_binary_annotation(
        self,
        port=0,
        service_name='unknown',
        host='127.0.0.1',
    ):
        """Adds a 'sa' binary annotation to the current span.

        'sa' binary annotations are useful for situations where you need to log
        where a request is going but the destination doesn't support zipkin.

        Note that the span must have 'cs'/'cr' annotations.

        :param port: The port number of the destination
        :type port: int
        :param service_name: The name of the destination service
        :type service_name: str
        :param host: Host address of the destination
        :type host: str
        """
        if not self.zipkin_attrs:
            return

        if 'client' not in self.include:
            # TODO: trying to set a sa binary annotation for a non-client span
            # should result in a logged error
            return

        sa_endpoint = create_endpoint(
            port=port,
            service_name=service_name,
            host=host,
        )
        if not self.logging_context:
            if self.sa_endpoint is not None:
                raise ValueError('SA annotation already set.')
            self.sa_endpoint = sa_endpoint
        else:
            if self.logging_context.sa_endpoint is not None:
                raise ValueError('SA annotation already set.')
            self.logging_context.sa_endpoint = sa_endpoint
Ejemplo n.º 10
0
def test_batch_sender_add_span_not_called_if_not_sampled(
        add_span_mock, flush_mock):
    attr = ZipkinAttrs(
        trace_id='0000000000000001',
        span_id='0000000000000002',
        parent_span_id=None,
        flags=None,
        is_sampled=False,
    )
    log_handler = logging_helper.ZipkinLoggerHandler(attr)
    transport_handler = mock.Mock()
    context = logging_helper.ZipkinLoggingContext(
        zipkin_attrs=attr,
        endpoint=_encoding_helpers.create_endpoint(80, 'test_server',
                                                   '127.0.0.1'),
        log_handler=log_handler,
        span_name='span_name',
        transport_handler=transport_handler,
        report_root_timestamp=False,
    )
    context.log_spans()
    assert add_span_mock.call_count == 0
    assert flush_mock.call_count == 0
Ejemplo n.º 11
0
def test_adding_sa_binary_annotation_without_sampling():
    """Even if we're not sampling, we still want to add binary annotations
    since they're useful for firehose traces.
    """
    context = zipkin.zipkin_span(
        service_name='my_service',
        span_name='span_name',
        transport_handler=MockTransportHandler(),
        sample_rate=0.0,
    )
    with context:
        context.add_sa_binary_annotation(
            port=123,
            service_name='test_service',
            host='1.2.3.4',
        )
        expected_sa_endpoint = _encoding_helpers.create_endpoint(
            port=123,
            service_name='test_service',
            host='1.2.3.4',
        )

        assert context.sa_endpoint == expected_sa_endpoint
Ejemplo n.º 12
0
def test_zipkin_logging_server_context_log_spans(add_span_mock, flush_mock,
                                                 time_mock, fake_endpoint):
    # This lengthy function tests that the logging context properly
    # logs both client and server spans, while attaching extra annotations
    # logged throughout the context of the trace.
    trace_id = '000000000000000f'
    parent_span_id = '0000000000000001'
    server_span_id = '0000000000000002'
    client_span_id = '0000000000000003'
    client_span_name = 'breadcrumbs'
    client_svc_name = 'svc'
    attr = ZipkinAttrs(
        trace_id=trace_id,
        span_id=server_span_id,
        parent_span_id=parent_span_id,
        flags=None,
        is_sampled=True,
    )
    handler = logging_helper.ZipkinLoggerHandler(attr)
    extra_server_annotations = {
        'parent_span_id': None,
        'annotations': {
            'foo': 1
        },
        'binary_annotations': {
            'what': 'whoa'
        },
    }
    extra_client_annotations = {
        'parent_span_id': client_span_id,
        'annotations': {
            'ann1': 1
        },
        'binary_annotations': {
            'bann1': 'aww'
        },
    }
    handler.extra_annotations = [
        extra_server_annotations,
        extra_client_annotations,
    ]
    handler.client_spans = [{
        'span_id': client_span_id,
        'parent_span_id': None,
        'span_name': client_span_name,
        'service_name': client_svc_name,
        'annotations': {
            'ann2': 2,
            'cs': 26,
            'cr': 30
        },
        'binary_annotations': {
            'bann2': 'yiss'
        },
    }]

    transport_handler = mock.Mock()

    context = logging_helper.ZipkinLoggingContext(
        zipkin_attrs=attr,
        endpoint=fake_endpoint,
        log_handler=handler,
        span_name='GET /foo',
        transport_handler=transport_handler,
        report_root_timestamp=True,
    )

    context.start_timestamp = 24
    context.response_status_code = 200

    context.binary_annotations_dict = {'k': 'v'}
    time_mock.return_value = 42

    expected_server_annotations = {'foo': 1, 'sr': 24, 'ss': 42}
    expected_server_bin_annotations = {'k': 'v', 'what': 'whoa'}

    expected_client_annotations = {'ann1': 1, 'ann2': 2, 'cs': 26, 'cr': 30}
    expected_client_bin_annotations = {'bann1': 'aww', 'bann2': 'yiss'}

    context.log_spans()
    client_log_call, server_log_call = add_span_mock.call_args_list
    assert server_log_call[1] == {
        'span_id': server_span_id,
        'parent_span_id': parent_span_id,
        'trace_id': trace_id,
        'span_name': 'GET /foo',
        'annotations': expected_server_annotations,
        'binary_annotations': expected_server_bin_annotations,
        'duration_s': 18,
        'timestamp_s': 24,
        'endpoint': fake_endpoint,
        'sa_endpoint': None,
    }
    assert client_log_call[1] == {
        'span_id': client_span_id,
        'parent_span_id': server_span_id,
        'trace_id': trace_id,
        'span_name': client_span_name,
        'annotations': expected_client_annotations,
        'binary_annotations': expected_client_bin_annotations,
        'duration_s': 4,
        'timestamp_s': 26,
        'endpoint': _encoding_helpers.create_endpoint(80, 'svc', '127.0.0.1'),
        'sa_endpoint': None,
    }
    assert flush_mock.call_count == 1
Ejemplo n.º 13
0
    def start(self):
        """Enter the new span context. All annotations logged inside this
        context will be attributed to this span. All new spans generated
        inside this context will have this span as their parent.

        In the unsampled case, this context still generates new span IDs and
        pushes them onto the threadlocal stack, so downstream services calls
        made will pass the correct headers. However, the logging handler is
        never attached in the unsampled case, so the spans are never logged.
        """
        self.do_pop_attrs = False
        # If zipkin_attrs are passed in or this span is doing its own sampling,
        # it will need to actually log spans at __exit__.
        perform_logging = bool(self.zipkin_attrs
                               or self.sample_rate is not None
                               or self.firehose_handler is not None)
        report_root_timestamp = False

        if self.sample_rate is not None:
            if self.zipkin_attrs and not self.zipkin_attrs.is_sampled:
                report_root_timestamp = True
                self.zipkin_attrs = create_attrs_for_span(
                    sample_rate=self.sample_rate,
                    trace_id=self.zipkin_attrs.trace_id,
                    use_128bit_trace_id=self.use_128bit_trace_id,
                )
            elif not self.zipkin_attrs:
                report_root_timestamp = True
                self.zipkin_attrs = create_attrs_for_span(
                    sample_rate=self.sample_rate,
                    use_128bit_trace_id=self.use_128bit_trace_id,
                )

        if not self.zipkin_attrs:
            # This span is inside the context of an existing trace
            existing_zipkin_attrs = self._context_stack.get()
            if existing_zipkin_attrs:
                self.zipkin_attrs = ZipkinAttrs(
                    trace_id=existing_zipkin_attrs.trace_id,
                    span_id=generate_random_64bit_string(),
                    parent_span_id=existing_zipkin_attrs.span_id,
                    flags=existing_zipkin_attrs.flags,
                    is_sampled=existing_zipkin_attrs.is_sampled,
                )
            elif self.firehose_handler is not None:
                # If it has gotten here, the only thing that is
                # causing a trace is the firehose. So we force a trace
                # with sample rate of 0
                report_root_timestamp = True
                self.zipkin_attrs = create_attrs_for_span(
                    sample_rate=0.0,
                    use_128bit_trace_id=self.use_128bit_trace_id,
                )

        # If zipkin_attrs are not set up by now, that means this span is not
        # configured to perform logging itself, and it's not in an existing
        # Zipkin trace. That means there's nothing else to do and it can exit
        # early.
        if not self.zipkin_attrs:
            return self

        self._context_stack.push(self.zipkin_attrs)
        self.do_pop_attrs = True

        self.start_timestamp = time.time()

        if perform_logging:
            # Don't set up any logging if we're not sampling
            if not self.zipkin_attrs.is_sampled and not self.firehose_handler:
                return self
            endpoint = create_endpoint(self.port, self.service_name, self.host)
            client_context = set(self.include) == {'client'}
            self.log_handler = ZipkinLoggerHandler(self.zipkin_attrs)
            self.logging_context = ZipkinLoggingContext(
                self.zipkin_attrs,
                endpoint,
                self.log_handler,
                self.span_name,
                self.transport_handler,
                report_root_timestamp or self.report_root_timestamp_override,
                binary_annotations=self.binary_annotations,
                add_logging_annotation=self.add_logging_annotation,
                client_context=client_context,
                max_span_batch_size=self.max_span_batch_size,
                firehose_handler=self.firehose_handler,
            )
            self.logging_context.start()
            self.logging_configured = True
            return self
        else:
            # Patch the ZipkinLoggerHandler.
            # Be defensive about logging setup. Since ZipkinAttrs are local to
            # the thread, multithreaded frameworks can get in strange states.
            # The logging is not going to be correct in these cases, so we set
            # a flag that turns off logging on __exit__.
            try:
                # Assume there's only a single handler, since all logging
                # should be set up in this package.
                log_handler = zipkin_logger.handlers[0]
            except IndexError:
                return self
            # Make sure it's not a NullHandler or something
            if not isinstance(log_handler, ZipkinLoggerHandler):
                return self
            # Put span ID on logging handler.
            self.log_handler = log_handler
            # Store the old parent_span_id, probably None, in case we have
            # nested zipkin_spans
            self.old_parent_span_id = self.log_handler.parent_span_id
            self.log_handler.parent_span_id = self.zipkin_attrs.span_id
            self.logging_configured = True
            return self