def test_partial_flush_too_many(self): """ When calling `Context.get` When partial flushing is enabled When we have more than the minimum number of spans needed to flush We return the finished spans """ tracer = get_dummy_tracer() ctx = Context() # Create a root span with 5 children, all of the children are finished, the root is not root = Span(tracer=tracer, name='root') ctx.add_span(root) for i in range(5): child = Span(tracer=tracer, name='child_{}'.format(i), trace_id=root.trace_id, parent_id=root.span_id) child._parent = root child.finished = True ctx.add_span(child) ctx.close_span(child) with self.override_partial_flush(ctx, enabled=True, min_spans=1): trace, sampled = ctx.get() self.assertIsNotNone(trace) self.assertIsNotNone(sampled) self.assertEqual(len(trace), 5) self.assertEqual( set(['child_0', 'child_1', 'child_2', 'child_3', 'child_4']), set([span.name for span in trace]) ) # Ensure we clear/reset internal stats as expected self.assertEqual(ctx._trace, [root]) with self.override_partial_flush(ctx, enabled=True, min_spans=5): trace, sampled = ctx.get() self.assertIsNone(trace) self.assertIsNone(sampled)
def test_set_tag_manual_keep(self): ctx = Context() s = Span(tracer=None, name='root.span', service='s', resource='r', context=ctx) assert s.context == ctx assert ctx.sampling_priority != priority.USER_KEEP assert s.context.sampling_priority != priority.USER_KEEP assert s.meta == dict() s.set_tag('manual.keep') assert ctx.sampling_priority == priority.USER_KEEP assert s.context.sampling_priority == priority.USER_KEEP assert s.meta == dict() ctx.sampling_priority = priority.AUTO_REJECT assert ctx.sampling_priority == priority.AUTO_REJECT assert s.context.sampling_priority == priority.AUTO_REJECT assert s.meta == dict() s.set_tag('manual.keep') assert ctx.sampling_priority == priority.USER_KEEP assert s.context.sampling_priority == priority.USER_KEEP assert s.meta == dict()
def test_log_unfinished_spans(log, tracer_with_debug_logging): # when the root parent is finished, notify if there are spans still pending tracer = tracer_with_debug_logging ctx = Context() # manually create a root-child trace root = Span(tracer=tracer, name='root') child_1 = Span(tracer=tracer, name='child_1', trace_id=root.trace_id, parent_id=root.span_id) child_2 = Span(tracer=tracer, name='child_2', trace_id=root.trace_id, parent_id=root.span_id) child_1._parent = root child_2._parent = root ctx.add_span(root) ctx.add_span(child_1) ctx.add_span(child_2) # close only the parent root.finish() unfinished_spans_log = log.call_args_list[-3][0][2] child_1_log = log.call_args_list[-2][0][1] child_2_log = log.call_args_list[-1][0][1] assert 2 == unfinished_spans_log assert 'name child_1' in child_1_log assert 'name child_2' in child_2_log assert 'duration 0.000000s' in child_1_log assert 'duration 0.000000s' in child_2_log
def test_set_tag_manual_drop(self): ctx = Context() s = Span(tracer=None, name="root.span", service="s", resource="r", context=ctx) assert s.context == ctx assert ctx.sampling_priority != priority.USER_REJECT assert s.context.sampling_priority != priority.USER_REJECT assert s.meta == dict() s.set_tag("manual.drop") assert ctx.sampling_priority == priority.USER_REJECT assert s.context.sampling_priority == priority.USER_REJECT assert s.meta == dict() ctx.sampling_priority = priority.AUTO_REJECT assert ctx.sampling_priority == priority.AUTO_REJECT assert s.context.sampling_priority == priority.AUTO_REJECT assert s.meta == dict() s.set_tag("manual.drop") assert ctx.sampling_priority == priority.USER_REJECT assert s.context.sampling_priority == priority.USER_REJECT assert s.meta == dict()
def test_partial_flush_too_few(self): """ When calling `Context.get` When partial flushing is enabled When we do not have enough finished spans to flush We return no spans """ tracer = get_dummy_tracer() ctx = Context() # Create a root span with 5 children, all of the children are finished, the root is not root = Span(tracer=tracer, name='root') ctx.add_span(root) for i in range(5): child = Span(tracer=tracer, name='child_{}'.format(i), trace_id=root.trace_id, parent_id=root.span_id) child._parent = root child._finished = True ctx.add_span(child) ctx.close_span(child) # Test with having 1 too few spans for partial flush with self.override_partial_flush(ctx, enabled=True, min_spans=6): trace, sampled = ctx.get() self.assertIsNone(trace) self.assertIsNone(sampled) self.assertEqual(len(ctx._trace), 6) self.assertEqual(ctx._finished_spans, 5) self.assertEqual( set([ 'root', 'child_0', 'child_1', 'child_2', 'child_3', 'child_4' ]), set([span.name for span in ctx._trace]))
def run(self): sampling_priority = None if self.sampling_priority != "": sampling_priority = int(self.sampling_priority) dd_origin = self.dd_origin or None meta = None if self.meta: meta = json.loads(self.meta) ctx = Context( trace_id=8336172473188639332, span_id=6804240797025004118, sampling_priority=sampling_priority, dd_origin=dd_origin, meta=meta, ) def _(loops): for _ in range(loops): # Just pass in a new/empty dict, we don't care about the result http.HTTPPropagator.inject(ctx, {}) yield _
def test_finished_empty(self): # a Context is not finished if it's empty ctx = Context() assert ctx.is_finished() is False
def inject_new_context(self, *args, **kwargs): pin = Pin.get_from(self.app) pin.tracer.context_provider.activate( Context(trace_id=99999, span_id=99999, sampling_priority=1))
def setUp(self): super(CeleryDistributedTracingIntegrationTask, self).setUp() provider = Pin.get_from(self.app).tracer.context_provider provider.activate( Context(trace_id=12345, span_id=12345, sampling_priority=1))
import pytest from ddtrace.context import Context @pytest.mark.parametrize( "ctx1,ctx2", [ (Context(), Context()), (Context(trace_id=123), Context(trace_id=123)), ( Context(trace_id=123, span_id=321, dd_origin="synthetics", sampling_priority=2), Context(trace_id=123, span_id=321, dd_origin="synthetics", sampling_priority=2), ), ], ) def test_eq(ctx1, ctx2): assert ctx1 == ctx2 @pytest.mark.parametrize( "ctx1,ctx2", [ ( Context(trace_id=123,
def test_set_call_context(self): # a different Context is set for the current logical execution task = asyncio.Task.current_task() ctx = Context() helpers.set_call_context(task, ctx) eq_(ctx, self.tracer.get_call_context())
def test_current_span(self): # it should return the current active span ctx = Context() span = Span(tracer=None, name='fake_span') ctx.add_span(span) eq_(span, ctx.get_current_span())
def test_context_sampled(self): # a context is sampled if the spans are sampled ctx = Context() span = Span(tracer=None, name='fake_span') ctx.add_span(span) ok_(ctx._sampled is True)
import pytest from ddtrace.context import Context from ddtrace.span import Span @pytest.mark.parametrize( "ctx1,ctx2", [ (Context(), Context()), (Context(trace_id=123), Context(trace_id=123)), ( Context(trace_id=123, span_id=321, dd_origin="synthetics", sampling_priority=2), Context(trace_id=123, span_id=321, dd_origin="synthetics", sampling_priority=2), ), ], ) def test_eq(ctx1, ctx2): assert ctx1 == ctx2 @pytest.mark.parametrize( "ctx1,ctx2", [ (Context(), Span(None, "")),
def test_current_root_span_none(self): # it should return none when there is no root span ctx = Context() assert ctx.get_current_root_span() is None
def test_finished(self): # a Context is finished if all spans inside are finished ctx = Context() span = Span(tracer=None, name="fake_span") ctx.add_span(span) ctx.close_span(span)
def test_current_root_span(self): # it should return the current active root span ctx = Context() span = Span(tracer=None, name='fake_span') ctx.add_span(span) assert span == ctx.get_current_root_span()
default = "416", type = str) parser.add_argument("--frame-image", dest="image", default=False, help="Save image of detected frame", type=bool) parser.add_argument("--frame-skip", dest="skip", default=1, help="Skip N number of detected frames", type=int) parser.add_argument("--post-url", dest="post_url", help="URL to POST JSON back to") parser.add_argument("--trace-id", dest="trace_id", help="Trace ID") parser.add_argument("--parent-id", dest="parent_id", help="Parent Trace ID") parser.add_argument("--sampling-priority", dest="sampling_priority", help="Trace Sampling Priority") return parser.parse_args() if __name__ == '__main__': args = arg_parse() has_clocks = False clock_frames = 0 if args.trace_id: context = Context(trace_id=int(args.trace_id), span_id=int(args.parent_id), sampling_priority=int(args.sampling_priority)) tracer.context_provider.activate(context) with tracer.trace("gpu.yolovideoinference", service="yolo-inference-process") as span: confidence = float(args.confidence) nms_thesh = float(args.nms_thresh) start = 0 CUDA = torch.cuda.is_available() num_classes = 80 bbox_attrs = 5 + num_classes print("Loading network.....")