def test_patch_unpatch(self): # Test patch idempotence patch() patch() tracer = get_dummy_tracer() Pin.get_from(Cluster).clone(tracer=tracer).onto(Cluster) session = Cluster(port=CASSANDRA_CONFIG['port']).connect(self.TEST_KEYSPACE) session.execute(self.TEST_QUERY) spans = tracer.writer.pop() assert spans, spans eq_(len(spans), 1) # Test unpatch unpatch() session = Cluster(port=CASSANDRA_CONFIG['port']).connect(self.TEST_KEYSPACE) session.execute(self.TEST_QUERY) spans = tracer.writer.pop() assert not spans, spans # Test patch again patch() Pin.get_from(Cluster).clone(tracer=tracer).onto(Cluster) session = Cluster(port=CASSANDRA_CONFIG['port']).connect(self.TEST_KEYSPACE) session.execute(self.TEST_QUERY) spans = tracer.writer.pop() assert spans, spans
def test_parse_response_json(self, log): tracer = get_dummy_tracer() tracer.debug_logging = True test_cases = {'OK': {'js': None, 'log': "please make sure trace-agent is up to date"}, 'OK\n': {'js': None, 'log': "please make sure trace-agent is up to date"}, 'error:unsupported-endpoint': {'js': None, 'log': "unable to load JSON 'error:unsupported-endpoint'"}, 42: {'js': None, 'log': "unable to load JSON '42'"}, # int as key to trigger TypeError '{}': {'js': {}}, '[]': {'js': []}, '{"rate_by_service": {"service:,env:":0.5, "service:mcnulty,env:test":0.9, "service:postgres,env:test":0.6}}': {'js': {"rate_by_service": {"service:,env:":0.5, "service:mcnulty,env:test":0.9, "service:postgres,env:test":0.6}}}, ' [4,2,1] ': {'js': [4,2,1]}} for k,v in iteritems(test_cases): r = ResponseMock(k) js =_parse_response_json(r) eq_(v['js'], js) if 'log' in v: ok_(1<=len(log.call_args_list), "not enough elements in call_args_list: %s" % log.call_args_list) print(log.call_args_list) l = log.call_args_list[-1][0][0] ok_(v['log'] in l, "unable to find %s in %s" % (v['log'], l))
def test_pre_v4(): tracer = get_dummy_tracer() MySQL = get_traced_mysql_connection(tracer, service="my-mysql-server") conn = MySQL(**config.MYSQL_CONFIG) cursor = conn.cursor() cursor.execute("SELECT 1") assert cursor.fetchone()[0] == 1
def setUp(self): # use a dummy tracer self.tracer = get_dummy_tracer() self._original_tracer = ddtrace.tracer ddtrace.tracer = self.tracer # trace gevent patch()
def test_set_sample_rate_by_service(self): cases = [ {"service:,env:":1}, {"service:,env:":1, "service:mcnulty,env:dev":0.33, "service:postgres,env:dev":0.7}, {"service:,env:":1, "service:mcnulty,env:dev": 0.25, "service:postgres,env:dev": 0.5, "service:redis,env:prod": 0.75} ] tracer = get_dummy_tracer() tracer.configure(sampler=AllSampler(), priority_sampling=True) priority_sampler = tracer.priority_sampler for case in cases: priority_sampler.set_sample_rate_by_service(case) rates = {} for k,v in iteritems(priority_sampler._by_service_samplers): rates[k] = v.sample_rate assert case == rates, "%s != %s" % (case, rates) # It's important to also test in reverse mode for we want to make sure key deletion # works as well as key insertion (and doing this both ways ensures we trigger both cases) cases.reverse() for case in cases: priority_sampler.set_sample_rate_by_service(case) rates = {} for k,v in iteritems(priority_sampler._by_service_samplers): rates[k] = v.sample_rate assert case == rates, "%s != %s" % (case, rates)
def setUp(self): # provide a dummy tracer self.tracer = get_dummy_tracer() self._original_tracer = ddtrace.tracer ddtrace.tracer = self.tracer # provide a Bottle app self.app = bottle.Bottle()
def _traced_session(self): tracer = get_dummy_tracer() # pin the global Cluster to test if they will conflict Pin(service=self.TEST_SERVICE, tracer=tracer).onto(Cluster) self.cluster = Cluster(port=CASSANDRA_CONFIG['port']) return self.cluster.connect(self.TEST_KEYSPACE), tracer.writer
def test_log_unfinished_spans(self, log): # when the root parent is finished, notify if there are spans still pending tracer = get_dummy_tracer() tracer.debug_logging = True ctx = Context() # manually create a root-child trace root = Span(tracer=tracer, name='root') child_1 = Span(tracer=tracer, name='child_1', trace_id=root.trace_id, parent_id=root.span_id) child_2 = Span(tracer=tracer, name='child_2', trace_id=root.trace_id, parent_id=root.span_id) child_1._parent = root child_2._parent = root ctx.add_span(root) ctx.add_span(child_1) ctx.add_span(child_2) # close only the parent root.finish() ok_(ctx.is_finished() is False) unfinished_spans_log = log.call_args_list[-3][0][2] child_1_log = log.call_args_list[-2][0][1] child_2_log = log.call_args_list[-1][0][1] eq_(2, unfinished_spans_log) ok_('name child_1' in child_1_log) ok_('name child_2' in child_2_log) ok_('duration 0.000000s' in child_1_log) ok_('duration 0.000000s' in child_2_log)
def setUp(self): super(MiddlewareTestCase, self).setUp() # build a test app with a dummy tracer self._service = 'falcon' self.tracer = get_dummy_tracer() self.api = get_app(tracer=self.tracer)
def setUp(self): """ Create a tracer without workers, while spying the ``send()`` method """ # create a new API object to test the transport using synchronous calls self.tracer = get_dummy_tracer() self.api_json = API('localhost', 8126, encoder=JSONEncoder()) self.api_msgpack = API('localhost', 8126, encoder=MsgpackEncoder())
def setUp(self): # each test must have its own event loop self._main_loop = asyncio.get_event_loop() self.loop = asyncio.new_event_loop() asyncio.set_event_loop(self.loop) # Tracer with AsyncContextProvider self.tracer = get_dummy_tracer() self.tracer.configure(context_provider=context_provider)
def get_client(self): url = "%s:%s" % (cfg["host"], cfg["port"]) client = pylibmc.Client([url]) client.flush_all() tracer = get_dummy_tracer() Pin.get_from(client).clone(tracer=tracer).onto(client) return client, tracer
def get_client(self): url = "%s:%s" % (cfg["host"], cfg["port"]) raw_client = pylibmc.Client([url]) raw_client.flush_all() tracer = get_dummy_tracer() client = TracedClient(raw_client, tracer=tracer, service=self.TEST_SERVICE) return client, tracer
def test_sqlite(self): tracer = get_dummy_tracer() writer = tracer.writer # ensure we can trace multiple services without stomping services = ["db", "another"] for service in services: db = sqlite3.connect(":memory:") pin = Pin.get_from(db) assert pin eq_("db", pin.app_type) pin.clone( service=service, tracer=tracer).onto(db) # Ensure we can run a query and it's correctly traced q = "select * from sqlite_master" start = time.time() cursor = db.execute(q) rows = cursor.fetchall() end = time.time() assert not rows spans = writer.pop() assert spans eq_(len(spans), 1) span = spans[0] eq_(span.name, "sqlite.query") eq_(span.span_type, "sql") eq_(span.resource, q) eq_(span.service, service) ok_(span.get_tag("sql.query") is None) eq_(span.error, 0) assert start <= span.start <= end assert span.duration <= end - start # run a query with an error and ensure all is well q = "select * from some_non_existant_table" try: db.execute(q) except Exception: pass else: assert 0, "should have an error" spans = writer.pop() assert spans eq_(len(spans), 1) span = spans[0] eq_(span.name, "sqlite.query") eq_(span.resource, q) eq_(span.service, service) ok_(span.get_tag("sql.query") is None) eq_(span.error, 1) eq_(span.span_type, "sql") assert span.get_tag(errors.ERROR_STACK) assert 'OperationalError' in span.get_tag(errors.ERROR_TYPE) assert 'no such table' in span.get_tag(errors.ERROR_MSG)
def test_backwards_compat(): # a small test to ensure that if the previous interface is used # things still work tracer = get_dummy_tracer() factory = connection_factory(tracer, service="my_db_service") conn = sqlite3.connect(":memory:", factory=factory) q = "select * from sqlite_master" rows = conn.execute(q) assert not rows.fetchall() assert not tracer.writer.pop()
def test_inject(self): tracer = get_dummy_tracer() with tracer.trace("global_root_span") as span: headers = {} propagator = HTTPPropagator() propagator.inject(span.context, headers) eq_(int(headers[HTTP_HEADER_TRACE_ID]), span.trace_id) eq_(int(headers[HTTP_HEADER_PARENT_ID]), span.span_id)
def test_sqlite(self): tracer = get_dummy_tracer() writer = tracer.writer # ensure we can trace multiple services without stomping services = ["db", "another"] for service in services: db = sqlite3.connect(":memory:") pin = Pin.get_from(db) assert pin eq_("db", pin.app_type) pin.clone(service=service, tracer=tracer).onto(db) # Ensure we can run a query and it's correctly traced q = "select * from sqlite_master" start = time.time() cursor = db.execute(q) rows = cursor.fetchall() end = time.time() assert not rows spans = writer.pop() assert spans eq_(len(spans), 1) span = spans[0] eq_(span.name, "sqlite.query") eq_(span.span_type, "sql") eq_(span.resource, q) eq_(span.service, service) eq_(span.meta["sql.query"], q) eq_(span.error, 0) assert start <= span.start <= end assert span.duration <= end - start # run a query with an error and ensure all is well q = "select * from some_non_existant_table" try: db.execute(q) except Exception: pass else: assert 0, "should have an error" spans = writer.pop() assert spans eq_(len(spans), 1) span = spans[0] eq_(span.name, "sqlite.query") eq_(span.resource, q) eq_(span.service, service) eq_(span.meta["sql.query"], q) eq_(span.error, 1) eq_(span.span_type, "sql") assert span.get_tag(errors.ERROR_STACK) assert 'OperationalError' in span.get_tag(errors.ERROR_TYPE) assert 'no such table' in span.get_tag(errors.ERROR_MSG)
def setUp(self, get_container_info): """ Create a tracer without workers, while spying the ``send()`` method """ # Mock the container id we use for making requests get_container_info.return_value = CGroupInfo(container_id="test-container-id") # create a new API object to test the transport using synchronous calls self.tracer = get_dummy_tracer() self.api_json = API("localhost", 8126, encoder=JSONEncoder()) self.api_msgpack = API("localhost", 8126, encoder=MsgpackEncoder())
def make_client_pool(self, hostname, mock_socket_values, serializer=None, **kwargs): mock_client = pymemcache.client.base.Client( hostname, serializer=serializer, **kwargs ) tracer = get_dummy_tracer() Pin.override(mock_client, tracer=tracer) mock_client.sock = MockSocket(mock_socket_values) client = pymemcache.client.base.PooledClient(hostname, serializer=serializer) client.client_pool = pymemcache.pool.ObjectPool(lambda: mock_client) return mock_client
def test_less_than_v04(): # interface from < v0.4 from ddtrace.contrib.mongoengine import trace_mongoengine tracer = get_dummy_tracer() connect = trace_mongoengine(tracer, service="my-mongo-db", patch=False) connect(port=config.MONGO_CONFIG['port']) lc = Singer() lc.first_name = 'leonard' lc.last_name = 'cohen' lc.save()
def setUp(self): from tests.test_tracer import get_dummy_tracer self.tracer = get_dummy_tracer() ddtrace.tracer = self.tracer config = Configurator( settings={ 'pyramid.tweens': 'pyramid.tweens.excview_tween_factory\n' }) self.rend = config.testing_add_renderer('template.pt') app = get_app(config) self.app = webtest.TestApp(app)
def test_include_conflicts(): """ Test that includes do not create conflicts """ from ...test_tracer import get_dummy_tracer from ...util import override_global_tracer tracer = get_dummy_tracer() with override_global_tracer(tracer): config = Configurator(settings={'pyramid.includes': 'tests.contrib.pyramid.test_pyramid_autopatch'}) app = webtest.TestApp(config.make_wsgi_app()) app.get('/', status=404) spans = tracer.writer.pop() assert spans eq_(len(spans), 1)
def test_parse_response_json(self, log): tracer = get_dummy_tracer() tracer.debug_logging = True test_cases = { 'OK': { 'js': None, 'log': "please make sure trace-agent is up to date" }, 'OK\n': { 'js': None, 'log': "please make sure trace-agent is up to date" }, 'error:unsupported-endpoint': { 'js': None, 'log': "unable to load JSON 'error:unsupported-endpoint'" }, 42: { 'js': None, 'log': "unable to load JSON '42'" }, # int as key to trigger TypeError '{}': { 'js': {} }, '[]': { 'js': [] }, '{"rate_by_service": {"service:,env:":0.5, "service:mcnulty,env:test":0.9, "service:postgres,env:test":0.6}}': { 'js': { "rate_by_service": { "service:,env:": 0.5, "service:mcnulty,env:test": 0.9, "service:postgres,env:test": 0.6 } } }, ' [4,2,1] ': { 'js': [4, 2, 1] } } for k, v in iteritems(test_cases): r = ResponseMock(k) js = _parse_response_json(r) eq_(v['js'], js) if 'log' in v: ok_( 1 <= len(log.call_args_list), "not enough elements in call_args_list: %s" % log.call_args_list) print(log.call_args_list) l = log.call_args_list[-1][0][0] ok_(v['log'] in l, "unable to find %s in %s" % (v['log'], l))
def test_less_than_v04(): # interface from < v0.4 from oteltrace.contrib.mongoengine import trace_mongoengine tracer = get_dummy_tracer() connect = trace_mongoengine(tracer, service='my-mongo-db', patch=False) connect(port=config.MONGO_CONFIG['port']) lc = Singer() lc.first_name = 'leonard' lc.last_name = 'cohen' lc.save()
def setUp(self): from tests.test_tracer import get_dummy_tracer self.tracer = get_dummy_tracer() settings = { 'datadog_trace_service': 'foobar', 'datadog_tracer': self.tracer } config = Configurator(settings=settings) trace_pyramid(config) app = get_app(config) self.app = webtest.TestApp(app)
def test_service_info(self): tracer = get_dummy_tracer() backup_tracer = ddtrace.tracer ddtrace.tracer = tracer db = sqlite3.connect(":memory:") services = tracer.writer.pop_services() eq_(len(services), 1) expected = {'sqlite': {'app': 'sqlite', 'app_type': 'db'}} eq_(expected, services) ddtrace.tracer = backup_tracer
def test_inject(self): tracer = get_dummy_tracer() with tracer.trace("global_root_span") as span: span.context.sampling_priority = 2 span.context._dd_origin = "synthetics" headers = {} propagator = HTTPPropagator() propagator.inject(span.context, headers) assert int(headers[HTTP_HEADER_TRACE_ID]) == span.trace_id assert int(headers[HTTP_HEADER_PARENT_ID]) == span.span_id assert int(headers[HTTP_HEADER_SAMPLING_PRIORITY]) == span.context.sampling_priority assert headers[HTTP_HEADER_ORIGIN] == span.context._dd_origin
def setUp(self): """ Create a tracer without workers, while spying the ``send()`` method """ # create a new API object to test the transport using synchronous calls self.tracer = get_dummy_tracer() self.api_json = API('localhost', 8126, encoder=JSONEncoder(), priority_sampling=True) self.api_msgpack = API('localhost', 8126, encoder=MsgpackEncoder(), priority_sampling=True)
def _get_conn_tracer(self): if not self.conn: tracer = get_dummy_tracer() self.conn = mysql.connector.connect(**MYSQL_CONFIG) assert self.conn.is_connected() # Ensure that the default pin is there, with its default value pin = Pin.get_from(self.conn) assert pin assert pin.service == 'mysql' # Customize the service # we have to apply it on the existing one since new one won't inherit `app` pin.clone(service=self.TEST_SERVICE, tracer=tracer).onto(self.conn) return self.conn, tracer
def test_connect_factory(self): tracer = get_dummy_tracer() services = ['db', 'another'] for service in services: conn, _ = yield from self._get_conn_and_tracer() Pin.get_from(conn).clone(service=service, tracer=tracer).onto(conn) yield from self.assert_conn_is_traced(tracer, conn, service) conn.close() # ensure we have the service types service_meta = tracer.writer.pop_services() expected = {} eq_(service_meta, expected)
def test_patch_unpatch(self): unpatch() # assert we start unpatched conn = mysql.connector.connect(**MYSQL_CONFIG) assert not Pin.get_from(conn) conn.close() patch() try: tracer = get_dummy_tracer() writer = tracer.writer conn = mysql.connector.connect(**MYSQL_CONFIG) pin = Pin.get_from(conn) assert pin pin.clone(service=self.TEST_SERVICE, tracer=tracer).onto(conn) assert conn.is_connected() cursor = conn.cursor() cursor.execute("SELECT 1") rows = cursor.fetchall() eq_(len(rows), 1) spans = writer.pop() eq_(len(spans), 2) span = spans[0] eq_(span.service, self.TEST_SERVICE) eq_(span.name, 'mysql.query') eq_(span.span_type, 'sql') eq_(span.error, 0) assert_dict_issuperset( span.meta, { 'out.host': u'127.0.0.1', 'out.port': u'3306', 'db.name': u'test', 'db.user': u'test', }) ok_(span.get_tag('sql.query') is None) eq_(spans[1].name, 'mysql.query.fetchall') finally: unpatch() # assert we finish unpatched conn = mysql.connector.connect(**MYSQL_CONFIG) assert not Pin.get_from(conn) conn.close() patch()
def test_b3_inject(self): tracer = get_dummy_tracer() tracer.configure(http_propagator=B3HTTPPropagator) with tracer.trace("global_root_span") as span: headers = {} set_http_propagator_factory(B3HTTPPropagator) propagator = HTTPPropagator() propagator.inject(span.context, headers) assert int(headers[B3HTTPPropagator.TRACE_ID_KEY], 16) == span.trace_id assert int(headers[B3HTTPPropagator.SPAN_ID_KEY], 16) == span.span_id assert int(headers[B3HTTPPropagator.SAMPLED_KEY]) == 1
def test_service_info(self): tracer = get_dummy_tracer() backup_tracer = ddtrace.tracer ddtrace.tracer = tracer db = sqlite3.connect(":memory:") services = tracer.writer.pop_services() eq_(len(services), 1) expected = { 'sqlite': {'app': 'sqlite', 'app_type': 'db'} } eq_(expected, services) ddtrace.tracer = backup_tracer
def test_inject(self): tracer = get_dummy_tracer() with tracer.trace("global_root_span") as span: span.context.sampling_priority = 2 headers = {} propagator = HTTPPropagator() propagator.inject(span.context, headers) eq_(int(headers[HTTP_HEADER_TRACE_ID]), span.trace_id) eq_(int(headers[HTTP_HEADER_PARENT_ID]), span.span_id) eq_( int(headers[HTTP_HEADER_SAMPLING_PRIORITY]), span.context.sampling_priority, )
def _get_conn_tracer(self): if not self.conn: tracer = get_dummy_tracer() self.conn = mysql.connector.connect(**MYSQL_CONFIG) assert self.conn.is_connected() # Ensure that the default pin is there, with its default value pin = Pin.get_from(self.conn) assert pin assert pin.service == 'mysql' # Customize the service # we have to apply it on the existing one since new one won't inherit `app` pin.clone( service=self.TEST_SERVICE, tracer=tracer).onto(self.conn) return self.conn, tracer
def test_parse_response_json(self, log): tracer = get_dummy_tracer() tracer.debug_logging = True test_cases = { 'OK': dict( js=None, log='Cannot parse Datadog Agent response, please make sure your Datadog Agent is up to date', ), 'OK\n': dict( js=None, log='Cannot parse Datadog Agent response, please make sure your Datadog Agent is up to date', ), 'error:unsupported-endpoint': dict( js=None, log='Unable to parse Datadog Agent JSON response: .*? \'error:unsupported-endpoint\'', ), 42: dict( # int as key to trigger TypeError js=None, log='Unable to parse Datadog Agent JSON response: .*? 42', ), '{}': dict(js={}), '[]': dict(js=[]), # Priority sampling "rate_by_service" response ('{"rate_by_service": ' '{"service:,env:":0.5, "service:mcnulty,env:test":0.9, "service:postgres,env:test":0.6}}'): dict( js=dict( rate_by_service={ 'service:,env:': 0.5, 'service:mcnulty,env:test': 0.9, 'service:postgres,env:test': 0.6, }, ), ), ' [4,2,1] ': dict(js=[4, 2, 1]), } for k, v in iteritems(test_cases): log.reset_mock() r = Response.from_http_response(ResponseMock(k)) js = r.get_json() assert v['js'] == js if 'log' in v: log.assert_called_once() msg = log.call_args[0][0] % log.call_args[0][1:] assert re.match(v['log'], msg), msg
def test_connect_factory(self): tracer = get_dummy_tracer() services = ['db', 'another'] for service in services: conn, _ = self._get_conn_and_tracer() Pin.get_from(conn).clone(service=service, tracer=tracer).onto(conn) self.assert_conn_is_traced(tracer, conn, service) # ensure we have the service types service_meta = tracer.writer.pop_services() expected = { 'db': {'app': 'postgres', 'app_type': 'db'}, 'another': {'app': 'postgres', 'app_type': 'db'}, } self.assertEquals(service_meta, expected)
def test_configuration_service_name(self): """Ensure that the integration can be configured.""" with self.override_config('vertica', dict(service_name='test_svc_name')): patch() import vertica_python test_tracer = get_dummy_tracer() conn = vertica_python.connect(**VERTICA_CONFIG) cur = conn.cursor() Pin.override(cur, tracer=test_tracer) with conn: cur.execute("DROP TABLE IF EXISTS {}".format(TEST_TABLE)) spans = test_tracer.writer.pop() assert len(spans) == 1 assert spans[0].service == "test_svc_name"
def test_extract(self): tracer = get_dummy_tracer() headers = { HTTP_HEADER_TRACE_ID: '1234', HTTP_HEADER_PARENT_ID: '5678', HTTP_HEADER_SAMPLING_PRIORITY: '1', } propagator = HTTPPropagator() context = propagator.extract(headers) tracer.context_provider.activate(context) with tracer.trace("local_root_span") as span: eq_(span.trace_id, 1234) eq_(span.parent_id, 5678)
def test_connect_factory(self): tracer = get_dummy_tracer() services = ["db", "another"] for service in services: conn, _ = self._get_conn_and_tracer() Pin.get_from(conn).clone(service=service, tracer=tracer).onto(conn) self.assert_conn_is_traced(tracer, conn, service) # ensure we have the service types service_meta = tracer.writer.pop_services() expected = { "db" : {"app":"postgres", "app_type":"db"}, "another" : {"app":"postgres", "app_type":"db"}, } eq_(service_meta, expected)
def make_ot_tracer( service_name="my_svc", config=None, scope_manager=None, context_provider=None ): config = config or {} tracer = Tracer( service_name=service_name, config=config, scope_manager=scope_manager ) # similar to how we test the ddtracer, use a dummy tracer dd_tracer = get_dummy_tracer() if context_provider: dd_tracer.configure(context_provider=context_provider) # attach the dummy tracer to the opentracer tracer._dd_tracer = dd_tracer return tracer
def test_patch_unpatch(self): unpatch() # assert we start unpatched conn = mysql.connector.connect(**MYSQL_CONFIG) assert not Pin.get_from(conn) conn.close() patch() try: tracer = get_dummy_tracer() writer = tracer.writer conn = mysql.connector.connect(**MYSQL_CONFIG) pin = Pin.get_from(conn) assert pin pin.clone( service=self.TEST_SERVICE, tracer=tracer).onto(conn) assert conn.is_connected() cursor = conn.cursor() cursor.execute("SELECT 1") rows = cursor.fetchall() eq_(len(rows), 1) spans = writer.pop() eq_(len(spans), 1) span = spans[0] eq_(span.service, self.TEST_SERVICE) eq_(span.name, 'mysql.query') eq_(span.span_type, 'sql') eq_(span.error, 0) assert_dict_issuperset(span.meta, { 'out.host': u'127.0.0.1', 'out.port': u'3306', 'db.name': u'test', 'db.user': u'test', }) ok_(span.get_tag('sql.query') is None) finally: unpatch() # assert we finish unpatched conn = mysql.connector.connect(**MYSQL_CONFIG) assert not Pin.get_from(conn) conn.close() patch()
def test_extract(self): tracer = get_dummy_tracer() headers = { "x-datadog-trace-id": "1234", "x-datadog-parent-id": "5678", "x-datadog-sampling-priority": "1", } propagator = HTTPPropagator() context = propagator.extract(headers) tracer.context_provider.activate(context) with tracer.trace("local_root_span") as span: eq_(span.trace_id, 1234) eq_(span.parent_id, 5678) eq_(span.context.sampling_priority, 1)
def test_connect_factory(self): tracer = get_dummy_tracer() services = ['db', 'another'] for service in services: conn, _ = yield from self._get_conn_and_tracer() Pin.get_from(conn).clone(service=service, tracer=tracer).onto(conn) yield from self.assert_conn_is_traced(tracer, conn, service) conn.close() # ensure we have the service types service_meta = tracer.writer.pop_services() expected = { 'db': {'app': 'postgres', 'app_type': 'db'}, 'another': {'app': 'postgres', 'app_type': 'db'}, } eq_(service_meta, expected)
def test_partial_flush_remaining(self): """ When calling `Context.get` When partial flushing is enabled When we have some unfinished spans We keep the unfinished spans around """ tracer = get_dummy_tracer() ctx = Context() # Create a root span with 5 children, all of the children are finished, the root is not root = Span(tracer=tracer, name='root') ctx.add_span(root) for i in range(10): child = Span(tracer=tracer, name='child_{}'.format(i), trace_id=root.trace_id, parent_id=root.span_id) child._parent = root ctx.add_span(child) # CLose the first 5 only if i < 5: child._finished = True ctx.close_span(child) with self.override_partial_flush(ctx, enabled=True, min_spans=5): trace, sampled = ctx.get() # Assert partially flushed spans self.assertTrue(len(trace), 5) self.assertIsNotNone(sampled) self.assertEqual( set(['child_0', 'child_1', 'child_2', 'child_3', 'child_4']), set([span.name for span in trace])) # Assert remaining unclosed spans self.assertEqual(len(ctx._trace), 6) self.assertEqual(ctx._finished_spans, 0) self.assertEqual( set([ 'root', 'child_5', 'child_6', 'child_7', 'child_8', 'child_9' ]), set([span.name for span in ctx._trace]), )
def test_downgrade_api(self): # make a call to a not existing endpoint, downgrades # the current API to a stable one tracer = get_dummy_tracer() tracer.trace('client.testing').finish() trace = tracer.writer.pop() # the encoder is right but we're targeting an API # endpoint that is not available api = API('localhost', 8126) api._traces = '/v0.0/traces' assert isinstance(api._encoder, MsgpackEncoder) # after the call, we downgrade to a working endpoint response = api.send_traces([trace]) assert response assert response.status == 200 assert isinstance(api._encoder, JSONEncoder)
def test_distributred_tracing_disabled(self): self.tracer = get_dummy_tracer() self.api = get_app(tracer=self.tracer, distributed_tracing=False) headers = { 'x-datadog-trace-id': '100', 'x-datadog-parent-id': '42', } out = self.simulate_get('/200', headers=headers) assert out.status_code == 200 assert out.content.decode('utf-8') == 'Success' traces = self.tracer.writer.pop_traces() assert len(traces) == 1 assert len(traces[0]) == 1 assert traces[0][0].parent_id != 42 assert traces[0][0].trace_id != 100
def test_distributred_tracing_disabled(self): self.tracer = get_dummy_tracer() self.api = get_app(tracer=self.tracer, distributed_tracing=False) headers = { 'x-datadog-trace-id': '100', 'x-datadog-parent-id': '42', } out = self.simulate_get('/200', headers=headers) eq_(out.status_code, 200) eq_(out.content.decode('utf-8'), 'Success') traces = self.tracer.writer.pop_traces() eq_(len(traces), 1) eq_(len(traces[0]), 1) ok_(traces[0][0].parent_id is not 42) ok_(traces[0][0].trace_id is not 100)
def test_log_unfinished_spans_when_ok(self, log): # if the unfinished spans logging is enabled but the trace is finished, don't log anything tracer = get_dummy_tracer() tracer.debug_logging = True ctx = Context() # manually create a root-child trace root = Span(tracer=tracer, name='root') child = Span(tracer=tracer, name='child_1', trace_id=root.trace_id, parent_id=root.span_id) child._parent = root ctx.add_span(root) ctx.add_span(child) # close the trace child.finish() root.finish() # the logger has never been invoked to print unfinished spans for call, _ in log.call_args_list: msg = call[0] assert 'the trace has %d unfinished spans' not in msg
def test_downgrade_api(self): # make a call to a not existing endpoint, downgrades # the current API to a stable one tracer = get_dummy_tracer() tracer.trace('client.testing').finish() trace = tracer.writer.pop() # the encoder is right but we're targeting an API # endpoint that is not available api = API('localhost', 8126) api._traces = '/v0.0/traces' ok_(isinstance(api._encoder, MsgpackEncoder)) # after the call, we downgrade to a working endpoint response = api.send_traces([trace]) ok_(response) eq_(response.status, 200) ok_(isinstance(api._encoder, JSONEncoder))
def test_distributred_tracing_disabled(self): self.tracer = get_dummy_tracer() self.api = get_app(tracer=self.tracer) headers = { 'x-datadog-trace-id': '100', 'x-datadog-parent-id': '42', } out = self.simulate_get('/200', headers=headers) eq_(out.status_code, 200) eq_(out.content.decode('utf-8'), 'Success') traces = self.tracer.writer.pop_traces() eq_(len(traces), 1) eq_(len(traces[0]), 1) ok_(traces[0][0].parent_id is not 42) ok_(traces[0][0].trace_id is not 100)
def test_WSGI_extract(self): """Ensure we support the WSGI formatted headers as well.""" tracer = get_dummy_tracer() headers = { "HTTP_X_DATADOG_TRACE_ID": "1234", "HTTP_X_DATADOG_PARENT_ID": "5678", "HTTP_X_DATADOG_SAMPLING_PRIORITY": "1", } propagator = HTTPPropagator() context = propagator.extract(headers) tracer.context_provider.activate(context) with tracer.trace("local_root_span") as span: eq_(span.trace_id, 1234) eq_(span.parent_id, 5678) eq_(span.context.sampling_priority, 1)
def test_log_unfinished_spans_when_ok(self, log): # if the unfinished spans logging is enabled but the trace is finished, don't log anything tracer = get_dummy_tracer() tracer.debug_logging = True ctx = Context() # manually create a root-child trace root = Span(tracer=tracer, name='root') child = Span(tracer=tracer, name='child_1', trace_id=root.trace_id, parent_id=root.span_id) child._parent = root ctx.add_span(root) ctx.add_span(child) # close the trace child.finish() root.finish() # the logger has never been invoked to print unfinished spans for call, _ in log.call_args_list: msg = call[0] ok_('the trace has %d unfinished spans' not in msg)
def test_deterministic_behavior(self): """ Test that for a given trace ID, the result is always the same """ tracer = get_dummy_tracer() writer = tracer.writer tracer.sampler = RateSampler(0.5) random.seed(1234) for i in range(10): span = tracer.trace(i) span.finish() samples = writer.pop() assert len(samples) <= 1, "there should be 0 or 1 spans" sampled = (1 == len(samples)) for j in range(10): other_span = Span(tracer, i, trace_id=span.trace_id) assert sampled == tracer.sampler.sample(other_span), "sampling should give the same result for a given trace_id"
def test_deterministic_behavior(self): """ Test that for a given trace ID, the result is always the same """ tracer = get_dummy_tracer() writer = tracer.writer tracer.sampler = RateSampler(0.5) for i in range(10): span = tracer.trace(i) span.finish() samples = writer.pop() assert len(samples) <= 1, 'there should be 0 or 1 spans' sampled = (1 == len(samples)) for j in range(10): other_span = Span(tracer, i, trace_id=span.trace_id) assert ( sampled == tracer.sampler.sample(other_span) ), 'sampling should give the same result for a given trace_id'
def test_log_unfinished_spans_disabled(self, log): # the trace finished status logging is disabled tracer = get_dummy_tracer() tracer.debug_logging = False ctx = Context() # manually create a root-child trace root = Span(tracer=tracer, name='root') child_1 = Span(tracer=tracer, name='child_1', trace_id=root.trace_id, parent_id=root.span_id) child_2 = Span(tracer=tracer, name='child_2', trace_id=root.trace_id, parent_id=root.span_id) child_1._parent = root child_2._parent = root ctx.add_span(root) ctx.add_span(child_1) ctx.add_span(child_2) # close only the parent root.finish() ok_(ctx.is_finished() is False) # the logger has never been invoked to print unfinished spans for call, _ in log.call_args_list: msg = call[0] ok_('the trace has %d unfinished spans' not in msg)
def test_patch_unpatch(self): tracer = get_dummy_tracer() writer = tracer.writer # Test patch idempotence patch() patch() service = 'fo' conn = yield from aiopg.connect(**POSTGRES_CONFIG) Pin.get_from(conn).clone(service=service, tracer=tracer).onto(conn) yield from (yield from conn.cursor()).execute('select \'blah\'') conn.close() spans = writer.pop() assert spans, spans eq_(len(spans), 1) # Test unpatch unpatch() conn = yield from aiopg.connect(**POSTGRES_CONFIG) yield from (yield from conn.cursor()).execute('select \'blah\'') conn.close() spans = writer.pop() assert not spans, spans # Test patch again patch() conn = yield from aiopg.connect(**POSTGRES_CONFIG) Pin.get_from(conn).clone(service=service, tracer=tracer).onto(conn) yield from (yield from conn.cursor()).execute('select \'blah\'') conn.close() spans = writer.pop() assert spans, spans eq_(len(spans), 1)
def test_patch_unpatch(self): tracer = get_dummy_tracer() writer = tracer.writer url = "%s:%s" % (cfg["host"], cfg["port"]) # Test patch idempotence patch() patch() client = pylibmc.Client([url]) Pin.get_from(client).clone( service=self.TEST_SERVICE, tracer=tracer).onto(client) client.set("a", 1) spans = writer.pop() assert spans, spans eq_(len(spans), 1) # Test unpatch unpatch() client = pylibmc.Client([url]) client.set("a", 1) spans = writer.pop() assert not spans, spans # Test patch again patch() client = pylibmc.Client([url]) Pin(service=self.TEST_SERVICE, tracer=tracer).onto(client) client.set("a", 1) spans = writer.pop() assert spans, spans eq_(len(spans), 1)