def test_finish_set_span_duration(): # If set the duration on a span, the span should be recorded with this # duration s = Span(tracer=None, name='test.span') s.duration = 1337.0 s.finish() assert s.duration == 1337.0
def test_tags_not_string(): # ensure we can cast as strings class Foo(object): def __repr__(self): 1 / 0 s = Span(tracer=None, name="test.span") s.set_tag("a", Foo())
def test_set_numpy_metric(): try: import numpy as np except ImportError: raise SkipTest("numpy not installed") s = Span(tracer=None, name="test.span") s.set_metric("a", np.int64(1)) eq_(s.get_metric("a"), 1) eq_(type(s.get_metric("a")), float)
def test_finish_called_multiple_times(): # we should only record a span the first time finish is called on it dt = DummyTracer() ctx = Context() s = Span(dt, 'bar', context=ctx) ctx.add_span(s) s.finish() s.finish() assert dt.spans_recorded == 1
def test_span_boolean_err(): s = Span(tracer=None, name="foo.bar", service="s", resource="r") s.error = True s.finish() d = s.to_dict() assert d eq_(d["error"], 1) eq_(type(d["error"]), int)
def test_traceback_with_error(): s = Span(None, "test.span") try: 1 / 0 except ZeroDivisionError: s.set_traceback() else: assert 0, "should have failed" assert s.error assert 'by zero' in s.get_tag(errors.ERROR_MSG) assert "ZeroDivisionError" in s.get_tag(errors.ERROR_TYPE)
def test_clone(self): ctx = Context() ctx.sampling_priority = 2 # manually create a root-child trace root = Span(tracer=None, name='root') child = Span(tracer=None, name='child_1', trace_id=root.trace_id, parent_id=root.span_id) child._parent = root ctx.add_span(root) ctx.add_span(child) cloned_ctx = ctx.clone() eq_(cloned_ctx._parent_trace_id, ctx._parent_trace_id) eq_(cloned_ctx._parent_span_id, ctx._parent_span_id) eq_(cloned_ctx._sampled, ctx._sampled) eq_(cloned_ctx._sampling_priority, ctx._sampling_priority) eq_(cloned_ctx._current_span, ctx._current_span) eq_(cloned_ctx._trace, []) eq_(cloned_ctx._finished_spans, 0)
def test_log_unfinished_spans(self, log): # when the root parent is finished, notify if there are spans still pending tracer = get_dummy_tracer() tracer.debug_logging = True ctx = Context() # manually create a root-child trace root = Span(tracer=tracer, name='root') child_1 = Span(tracer=tracer, name='child_1', trace_id=root.trace_id, parent_id=root.span_id) child_2 = Span(tracer=tracer, name='child_2', trace_id=root.trace_id, parent_id=root.span_id) child_1._parent = root child_2._parent = root ctx.add_span(root) ctx.add_span(child_1) ctx.add_span(child_2) # close only the parent root.finish() ok_(ctx.is_finished() is False) unfinished_spans_log = log.call_args_list[-3][0][2] child_1_log = log.call_args_list[-2][0][1] child_2_log = log.call_args_list[-1][0][1] eq_(2, unfinished_spans_log) ok_('name child_1' in child_1_log) ok_('name child_2' in child_2_log) ok_('duration 0.000000s' in child_1_log) ok_('duration 0.000000s' in child_2_log)
def test_set_invalid_metric(): s = Span(tracer=None, name="test.span") invalid_metrics = [ None, {}, [], s, "quarante-douze", float("nan"), float("inf"), 1j ] for i, m in enumerate(invalid_metrics): k = str(i) s.set_metric(k, m) eq_(s.get_metric(k), None)
def test_span_to_dict_sub(): parent = Span(tracer=None, name="test.span", service="s", resource="r") s = Span(tracer=None, name="test.span", service="s", resource="r") s._parent = parent s.span_type = "foo" s.set_tag("a", "1") s.set_meta("b", "2") s.finish() d = s.to_dict() assert d eq_(d["span_id"], s.span_id) eq_(d["trace_id"], s.trace_id) eq_(d["parent_id"], s.parent_id) eq_(d["meta"], {"a": "1", "b": "2"}) eq_(d["type"], "foo") eq_(d["error"], 0) eq_(type(d["error"]), int)
def test_tags(): s = Span(tracer=None, name="test.span") s.set_tag("a", "a") s.set_tag("b", 1) s.set_tag("c", "1") d = s.to_dict() expected = { "a" : "a", "b" : "1", "c" : "1", } eq_(d["meta"], expected)
def test_ctx_mgr(): dt = DummyTracer() s = Span(dt, "bar") assert not s.duration assert not s.error e = Exception("boo") try: with s: time.sleep(0.01) raise e except Exception as out: eq_(out, e) assert s.duration > 0, s.duration assert s.error eq_(s.get_tag(errors.ERROR_MSG), "boo") assert "Exception" in s.get_tag(errors.ERROR_TYPE) assert s.get_tag(errors.ERROR_STACK) else: assert 0, "should have failed"
def test_traceback_without_error(): s = Span(None, "test.span") s.set_traceback() assert not s.error assert not s.get_tag(errors.ERROR_MSG) assert not s.get_tag(errors.ERROR_TYPE) assert "in test_traceback_without_error" in s.get_tag(errors.ERROR_STACK)
def test_log_unfinished_spans_when_ok(self, log): # if the unfinished spans logging is enabled but the trace is finished, don't log anything tracer = get_dummy_tracer() tracer.debug_logging = True ctx = Context() # manually create a root-child trace root = Span(tracer=tracer, name='root') child = Span(tracer=tracer, name='child_1', trace_id=root.trace_id, parent_id=root.span_id) child._parent = root ctx.add_span(root) ctx.add_span(child) # close the trace child.finish() root.finish() # the logger has never been invoked to print unfinished spans for call, _ in log.call_args_list: msg = call[0] ok_('the trace has %d unfinished spans' not in msg)
def test_log_unfinished_spans_disabled(self, log): # the trace finished status logging is disabled tracer = get_dummy_tracer() tracer.debug_logging = False ctx = Context() # manually create a root-child trace root = Span(tracer=tracer, name='root') child_1 = Span(tracer=tracer, name='child_1', trace_id=root.trace_id, parent_id=root.span_id) child_2 = Span(tracer=tracer, name='child_2', trace_id=root.trace_id, parent_id=root.span_id) child_1._parent = root child_2._parent = root ctx.add_span(root) ctx.add_span(child_1) ctx.add_span(child_2) # close only the parent root.finish() ok_(ctx.is_finished() is False) # the logger has never been invoked to print unfinished spans for call, _ in log.call_args_list: msg = call[0] ok_('the trace has %d unfinished spans' not in msg)
def test_set_tag_env(self): s = Span(tracer=None, name="test.span") s.set_tag(ENV_KEY, "prod") assert s.get_tag(ENV_KEY) == "prod"
def test_numeric_tags(self): s = Span(tracer=None, name="test.span") s.set_tag("negative", -1) s.set_tag("zero", 0) s.set_tag("positive", 1) s.set_tag("large_int", 2 ** 53) s.set_tag("really_large_int", (2 ** 53) + 1) s.set_tag("large_negative_int", -(2 ** 53)) s.set_tag("really_large_negative_int", -((2 ** 53) + 1)) s.set_tag("float", 12.3456789) s.set_tag("negative_float", -12.3456789) s.set_tag("large_float", 2.0 ** 53) s.set_tag("really_large_float", (2.0 ** 53) + 1) d = s.to_dict() assert d["meta"] == dict( really_large_int=str(((2 ** 53) + 1)), really_large_negative_int=str(-((2 ** 53) + 1)), ) assert d["metrics"] == { "negative": -1, "zero": 0, "positive": 1, "large_int": 2 ** 53, "large_negative_int": -(2 ** 53), "float": 12.3456789, "negative_float": -12.3456789, "large_float": 2.0 ** 53, "really_large_float": (2.0 ** 53) + 1, }
def test_duration_int(self): s = Span(tracer=None, name="foo.bar", service="s", resource="r") s.finish() assert isinstance(s.duration_ns, int) assert isinstance(s.duration, float) s = Span(tracer=None, name="foo.bar", service="s", resource="r", start=123) s.finish(finish_time=123.2) assert s.duration_ns == 200000000 assert s.duration == 0.2 s = Span(tracer=None, name="foo.bar", service="s", resource="r", start=123.1) s.finish(finish_time=123.2) assert s.duration_ns == 100000000 assert s.duration == 0.1 s = Span(tracer=None, name="foo.bar", service="s", resource="r", start=122) s.finish(finish_time=123) assert s.duration_ns == 1000000000 assert s.duration == 1
def test_duration_zero(self): s = Span(tracer=None, name="foo.bar", service="s", resource="r", start=123) s.finish(finish_time=123) assert s.duration_ns == 0 assert s.duration == 0
def test_numeric_tags_bad_value(self): s = Span(tracer=None, name="test.span") s.set_tag(ANALYTICS_SAMPLE_RATE_KEY, "Hello") d = s.to_dict() assert d assert "metrics" not in d
def test_set_valid_metrics(): s = Span(tracer=None, name="test.span") s.set_metric("a", 0) s.set_metric("b", -12) s.set_metric("c", 12.134) s.set_metric("d", 1231543543265475686787869123) s.set_metric("e", "12.34") d = s.to_dict() expected = { "a": 0, "b": -12, "c": 12.134, "d": 1231543543265475686787869123, "e": 12.34, } eq_(d["metrics"], expected)
def test_set_tag_measured_no_value(): s = Span(tracer=None, name="test.span") s.set_tag(SPAN_MEASURED_KEY) assert_is_measured(s)
def test_finished(self): # a Context is finished if all spans inside are finished ctx = Context() span = Span(tracer=None, name="fake_span") ctx.add_span(span) ctx.close_span(span)
def test_current_span(self): # it should return the current active span ctx = Context() span = Span(tracer=None, name='fake_span') ctx.add_span(span) eq_(span, ctx.get_current_span())
def do_write(i): writer.write([Span(None, str(i))])
def test_flush_connection_timeout(endpoint_test_timeout_server): writer = AgentWriter(agent_url="http://%s:%s" % (_HOST, _TIMEOUT_PORT)) with pytest.raises(socket.timeout): writer._encoder.put([Span(None, "foobar")]) writer.flush_queue(raise_exc=True)
def test_span_ignored_exceptions(): s = Span(None, None) s._ignore_exception(ValueError) with pytest.raises(ValueError): with s: raise ValueError() assert s.error == 0 assert s.get_tag(errors.ERROR_MSG) is None assert s.get_tag(errors.ERROR_TYPE) is None assert s.get_tag(errors.ERROR_STACK) is None s = Span(None, None) s._ignore_exception(ValueError) with pytest.raises(ValueError): with s: raise ValueError() with pytest.raises(RuntimeError): with s: raise RuntimeError() assert s.error == 1 assert s.get_tag(errors.ERROR_MSG) is not None assert "RuntimeError" in s.get_tag(errors.ERROR_TYPE) assert s.get_tag(errors.ERROR_STACK) is not None
def test_set_tag_measured(value, assertion): s = Span(tracer=None, name="test.span") s.set_tag(SPAN_MEASURED_KEY, value) assertion(s)
def test_set_tag_measured_not_set(): # Span is not measured by default s = Span(tracer=None, name="test.span") assert_is_not_measured(s)
def test_current_root_span(self): # it should return the current active root span ctx = Context() span = Span(tracer=None, name="fake_span") ctx.add_span(span) assert span == ctx.get_current_root_span()
def test_span_unicode_set_tag(): span = Span(None, None) span.set_tag("key", u"😌") span.set_tag("😐", u"😌") span._set_str_tag("key", u"😌") span._set_str_tag(u"😐", u"😌")
def test_numeric_tags_bad_value(self): s = Span(tracer=None, name='test.span') s.set_tag(ANALYTICS_SAMPLE_RATE_KEY, 'Hello') d = s.to_dict() assert d assert 'metrics' not in d
def test_keep_rate(self): statsd = mock.Mock() writer_run_periodic = mock.Mock() writer_put = mock.Mock() writer_put.return_value = Response(status=200) writer = AgentWriter(agent_url="http://asdf:1234", dogstatsd=statsd, report_metrics=False) writer.run_periodic = writer_run_periodic writer._put = writer_put traces = [ [Span(tracer=None, name="name", trace_id=i, span_id=j, parent_id=j - 1 or None) for j in range(5)] for i in range(4) ] traces_too_big = [ [Span(tracer=None, name="a" * 5000, trace_id=i, span_id=j, parent_id=j - 1 or None) for j in range(2 ** 10)] for i in range(4) ] # 1. We write 4 traces successfully. for trace in traces: writer.write(trace) writer.flush_queue() payload = msgpack.unpackb(writer_put.call_args.args[0]) # No previous drops. assert 0.0 == writer._drop_sma.get() # 4 traces written. assert 4 == len(payload) # 100% of traces kept (refers to the past). # No traces sent before now so 100% kept. for trace in payload: assert 1.0 == trace[0]["metrics"].get(KEEP_SPANS_RATE_KEY, -1) # 2. We fail to write 4 traces because of size limitation. for trace in traces_too_big: writer.write(trace) writer.flush_queue() # 50% of traces were dropped historically. # 4 successfully written before and 4 dropped now. assert 0.5 == writer._drop_sma.get() # put not called since no new traces are available. writer_put.assert_called_once() # 3. We write 2 traces successfully. for trace in traces[:2]: writer.write(trace) writer.flush_queue() payload = msgpack.unpackb(writer_put.call_args.args[0]) # 40% of traces were dropped historically. assert 0.4 == writer._drop_sma.get() # 2 traces written. assert 2 == len(payload) # 50% of traces kept (refers to the past). # We had 4 successfully written and 4 dropped. for trace in payload: assert 0.5 == trace[0]["metrics"].get(KEEP_SPANS_RATE_KEY, -1) # 4. We write 1 trace successfully and fail to write 3. writer.write(traces[0]) for trace in traces_too_big[:3]: writer.write(trace) writer.flush_queue() payload = msgpack.unpackb(writer_put.call_args.args[0]) # 50% of traces were dropped historically. assert 0.5 == writer._drop_sma.get() # 1 trace written. assert 1 == len(payload) # 60% of traces kept (refers to the past). # We had 4 successfully written, then 4 dropped, then 2 written. for trace in payload: assert 0.6 == trace[0]["metrics"].get(KEEP_SPANS_RATE_KEY, -1)
def test_numeric_tags(self): s = Span(tracer=None, name='test.span') s.set_tag('negative', -1) s.set_tag('zero', 0) s.set_tag('positive', 1) s.set_tag('large_int', 2**53) s.set_tag('really_large_int', (2**53) + 1) s.set_tag('large_negative_int', -(2**53)) s.set_tag('really_large_negative_int', -((2**53) + 1)) s.set_tag('float', 12.3456789) s.set_tag('negative_float', -12.3456789) s.set_tag('large_float', 2.0**53) s.set_tag('really_large_float', (2.0**53) + 1) d = s.to_dict() assert d['meta'] == dict( really_large_int=str(((2**53) + 1)), really_large_negative_int=str(-((2**53) + 1)), ) assert d['metrics'] == { 'negative': -1, 'zero': 0, 'positive': 1, 'large_int': 2**53, 'large_negative_int': -(2**53), 'float': 12.3456789, 'negative_float': -12.3456789, 'large_float': 2.0**53, 'really_large_float': (2.0**53) + 1, }
def test_flush_connection_uds(endpoint_uds_server): writer = AgentWriter(agent_url="unix://%s" % endpoint_uds_server.server_address) writer._encoder.put([Span(None, "foobar")]) writer.flush_queue(raise_exc=True)
def test_set_tag_env(self): s = Span(tracer=None, name='test.span') s.set_tag(ENV_KEY, 'prod') assert s.get_tag(ENV_KEY) == 'prod'
def _fill_ctx(): ctx = l_ctx.get() span = Span(tracer=None, name='fake_span') ctx.add_span(span) eq_(1, len(ctx._trace))
def test_set_valid_metrics(self): s = Span(tracer=None, name='test.span') s.set_metric('a', 0) s.set_metric('b', -12) s.set_metric('c', 12.134) s.set_metric('d', 1231543543265475686787869123) s.set_metric('e', '12.34') d = s.to_dict() expected = { 'a': 0, 'b': -12, 'c': 12.134, 'd': 1231543543265475686787869123, 'e': 12.34, } assert d['metrics'] == expected
def test_finish_no_tracer(): # ensure finish works with no tracer without raising exceptions s = Span(tracer=None, name="test.span") s.finish()
def drop(cls, span: Span): tracer.get_call_context().sampling_priority = USER_REJECT span.set_tag(cls.EAGERLY_DROP_TRACE_KEY, True)
def _translate_to_datadog(self, spans): datadog_spans = [] for span in spans: trace_id, parent_id, span_id = _get_trace_ids(span) # datadog Span is initialized with a reference to the tracer which is # used to record the span when it is finished. We can skip ignore this # because we are not calling the finish method and explictly set the # duration. tracer = None datadog_span = DatadogSpan( tracer, _get_span_name(span), service=self.service, resource=_get_resource(span), span_type=_get_span_type(span), trace_id=trace_id, span_id=span_id, parent_id=parent_id, ) datadog_span.start_ns = span.start_time datadog_span.duration_ns = span.end_time - span.start_time if span.status.canonical_code is not StatusCanonicalCode.OK: datadog_span.error = 1 if span.status.description: exc_type, exc_val = _get_exc_info(span) # no mapping for error.stack since traceback not recorded datadog_span.set_tag("error.msg", exc_val) datadog_span.set_tag("error.type", exc_type) datadog_span.set_tags(span.attributes) # add configured env tag if self.env is not None: datadog_span.set_tag(ENV_KEY, self.env) # add configured application version tag to only root span if self.version is not None and parent_id == 0: datadog_span.set_tag(VERSION_KEY, self.version) # add configured global tags datadog_span.set_tags(self.tags) # add origin to root span origin = _get_origin(span) if origin and parent_id == 0: datadog_span.set_tag(DD_ORIGIN, origin) sampling_rate = _get_sampling_rate(span) if sampling_rate is not None: datadog_span.set_metric(SAMPLE_RATE_METRIC_KEY, sampling_rate) # span events and span links are not supported datadog_spans.append(datadog_span) return datadog_spans
def test_sampling_rule_sample_rate_0(): rule = SamplingRule(sample_rate=0) iterations = int(1e4) assert sum(rule.sample(Span(name=str(i))) for i in range(iterations)) == 0
def _fill_ctx(): span = Span(tracer=None, name="fake_span") ctx.add_span(span)
def test_span_to_dict_sub(self): parent = Span(tracer=None, name="test.span", service="s", resource="r") s = Span(tracer=None, name="test.span", service="s", resource="r") s._parent = parent s.span_type = "foo" s.set_tag("a", "1") s.set_meta("b", "2") s.finish() d = s.to_dict() assert d assert d["span_id"] == s.span_id assert d["trace_id"] == s.trace_id assert d["parent_id"] == s.parent_id assert d["meta"] == {"a": "1", "b": "2"} assert d["type"] == "foo" assert d["error"] == 0 assert type(d["error"]) == int
class Span(OpenTracingSpan): """Datadog implementation of :class:`opentracing.Span`""" def __init__(self, tracer, context, operation_name): if context is not None: context = SpanContext(ddcontext=context._dd_context, baggage=context.baggage) else: context = SpanContext() super(Span, self).__init__(tracer, context) self.finished = False self._lock = threading.Lock() # use a datadog span self._dd_span = DatadogSpan(tracer._dd_tracer, operation_name, context=context._dd_context) def finish(self, finish_time=None): """Finish the span. This calls finish on the ddspan. :param finish_time: specify a custom finish time with a unix timestamp per time.time() :type timestamp: float """ if self.finished: return # finish the datadog span self._dd_span.finish(finish_time) self.finished = True def set_baggage_item(self, key, value): """Sets a baggage item in the span context of this span. Baggage is used to propagate state between spans. :param key: baggage item key :type key: str :param value: baggage item value :type value: a type that can be compat.stringify()'d :rtype: Span :return: itself for chaining calls """ new_ctx = self.context.with_baggage_item(key, value) with self._lock: self._context = new_ctx return self def get_baggage_item(self, key): """Gets a baggage item from the span context of this span. :param key: baggage item key :type key: str :rtype: str :return: the baggage value for the given key or ``None``. """ return self.context.get_baggage_item(key) def set_operation_name(self, operation_name): """Set the operation name.""" self._dd_span.name = operation_name def log_kv(self, key_values, timestamp=None): """Add a log record to this span. Passes on relevant opentracing key values onto the datadog span. :param key_values: a dict of string keys and values of any type :type key_values: dict :param timestamp: a unix timestamp per time.time() :type timestamp: float :return: the span itself, for call chaining :rtype: Span """ # match opentracing defined keys to datadog functionality # opentracing/specification/blob/1be630515dafd4d2a468d083300900f89f28e24d/semantic_conventions.md#log-fields-table for key, val in key_values.items(): if key == 'event' and val == 'error': # TODO: not sure if it's actually necessary to set the error manually self._dd_span.error = 1 self.set_tag('error', 1) elif key == 'error' or key == 'error.object': self.set_tag(errors.ERROR_TYPE, val) elif key == 'message': self.set_tag(errors.ERROR_MSG, val) elif key == 'stack': self.set_tag(errors.ERROR_STACK, val) else: pass return self def set_tag(self, key, value): """Set a tag on the span. This sets the tag on the underlying datadog span. """ if key == Tags.SPAN_TYPE: self._dd_span.span_type = value elif key == Tags.SERVICE_NAME: self._dd_span.service = value elif key == Tags.RESOURCE_NAME or key == OTTags.DATABASE_STATEMENT: self._dd_span.resource = value elif key == OTTags.PEER_HOSTNAME: self._dd_span.set_tag(Tags.TARGET_HOST, value) elif key == OTTags.PEER_PORT: self._dd_span.set_tag(Tags.TARGET_PORT, value) elif key == Tags.SAMPLING_PRIORITY: self._dd_span.context.sampling_priority = value else: self._dd_span.set_tag(key, value) def _get_tag(self, key): """Gets a tag from the span. This method retrieves the tag from the underlying datadog span. """ return self._dd_span.get_tag(key) def _get_metric(self, key): """Gets a metric from the span. This method retrieves the metric from the underlying datadog span. """ return self._dd_span.get_metric(key) def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): if exc_type: self._dd_span.set_exc_info(exc_type, exc_val, exc_tb) # note: self.finish() AND _dd_span.__exit__ will call _span.finish() but # it is idempotent self._dd_span.__exit__(exc_type, exc_val, exc_tb) self.finish() def _associate_dd_span(self, ddspan): """Associates a DD span with this span.""" # get the datadog span context self._dd_span = ddspan self.context._dd_context = ddspan.context @property def _dd_context(self): return self._dd_span.context