def test_patch_unpatch(self): # Test patch idempotence patch() patch() tracer = get_dummy_tracer() Pin.get_from(Cluster).clone(tracer=tracer).onto(Cluster) session = Cluster(port=CASSANDRA_CONFIG['port']).connect(self.TEST_KEYSPACE) session.execute(self.TEST_QUERY) spans = tracer.writer.pop() assert spans, spans eq_(len(spans), 1) # Test unpatch unpatch() session = Cluster(port=CASSANDRA_CONFIG['port']).connect(self.TEST_KEYSPACE) session.execute(self.TEST_QUERY) spans = tracer.writer.pop() assert not spans, spans # Test patch again patch() Pin.get_from(Cluster).clone(tracer=tracer).onto(Cluster) session = Cluster(port=CASSANDRA_CONFIG['port']).connect(self.TEST_KEYSPACE) session.execute(self.TEST_QUERY) spans = tracer.writer.pop() assert spans, spans
def test_patch_unpatch(self): tracer = get_dummy_tracer() writer = tracer.writer # Test patch idempotence patch() patch() r = redis.Redis(port=REDIS_CONFIG['port']) Pin.get_from(r).clone(tracer=tracer).onto(r) r.get("key") spans = writer.pop() assert spans, spans eq_(len(spans), 1) # Test unpatch unpatch() r = redis.Redis(port=REDIS_CONFIG['port']) r.get("key") spans = writer.pop() assert not spans, spans # Test patch again patch() r = redis.Redis(port=REDIS_CONFIG['port']) Pin.get_from(r).clone(tracer=tracer).onto(r) r.get("key") spans = writer.pop() assert spans, spans eq_(len(spans), 1)
def get_tracer_and_connect(self): tracer = get_dummy_tracer() Pin.get_from(mongoengine.connect).clone( tracer=tracer).onto(mongoengine.connect) mongoengine.connect(port=MONGO_CONFIG['port']) return tracer
def test_patch_unpatch(self): tracer = get_dummy_tracer() writer = tracer.writer # Test patch idempotence patch() patch() client = pymongo.MongoClient(port=MONGO_CONFIG['port']) Pin.get_from(client).clone(tracer=tracer).onto(client) client["testdb"].drop_collection("whatever") spans = writer.pop() assert spans, spans eq_(len(spans), 1) # Test unpatch unpatch() client = pymongo.MongoClient(port=MONGO_CONFIG['port']) client["testdb"].drop_collection("whatever") spans = writer.pop() assert not spans, spans # Test patch again patch() client = pymongo.MongoClient(port=MONGO_CONFIG['port']) Pin.get_from(client).clone(tracer=tracer).onto(client) client["testdb"].drop_collection("whatever") spans = writer.pop() assert spans, spans eq_(len(spans), 1)
def test_patch_unpatch(self): tracer = get_dummy_tracer() # Test patch idempotence patch() patch() client = mongoengine.connect(port=MONGO_CONFIG['port']) Pin.get_from(client).clone(tracer=tracer).onto(client) Artist.drop_collection() spans = tracer.writer.pop() assert spans, spans eq_(len(spans), 1) # Test unpatch mongoengine.connection.disconnect() unpatch() mongoengine.connect(port=MONGO_CONFIG['port']) Artist.drop_collection() spans = tracer.writer.pop() assert not spans, spans # Test patch again patch() client = mongoengine.connect(port=MONGO_CONFIG['port']) Pin.get_from(client).clone(tracer=tracer).onto(client) Artist.drop_collection() spans = tracer.writer.pop() assert spans, spans eq_(len(spans), 1)
def test_pin_config(self): # ensure `Pin` has a configuration object that can be modified obj = self.Obj() Pin.override(obj, service='metrics') pin = Pin.get_from(obj) ok_(pin._config is not None) pin._config['distributed_tracing'] = True ok_(pin._config['distributed_tracing'] is True)
def patch_conn(conn): tags = {t: getattr(conn, a, '') for t, a in CONN_ATTR_BY_TAG.items()} pin = Pin(service="pymysql", app="pymysql", app_type=AppTypes.db, tags=tags) # grab the metadata from the conn wrapped = TracedConnection(conn, pin=pin) pin.onto(wrapped) return wrapped
def get_client(self): url = "%s:%s" % (cfg["host"], cfg["port"]) client = pylibmc.Client([url]) client.flush_all() tracer = get_dummy_tracer() Pin.get_from(client).clone(tracer=tracer).onto(client) return client, tracer
def test_pin(self): # ensure a Pin can be attached to an instance obj = self.Obj() pin = Pin(service='metrics') pin.onto(obj) got = Pin.get_from(obj) eq_(got.service, pin.service) ok_(got is pin)
def patch_conn(conn, *args, **kwargs): tags = {t: kwargs[k] if k in kwargs else args[p] for t, (k, p) in KWPOS_BY_TAG.items() if k in kwargs or len(args) > p} tags[net.TARGET_PORT] = conn.port pin = Pin(service="mysql", app="mysql", app_type=AppTypes.db, tags=tags) # grab the metadata from the conn wrapped = TracedConnection(conn, pin=pin) pin.onto(wrapped) return wrapped
def test_override_missing(self): # ensure overriding an instance doesn't override the Class class A(object): pass a = A() ok_(Pin.get_from(a) is None) Pin.override(a, service='metrics') eq_(Pin.get_from(a).service, 'metrics') b = A() ok_(Pin.get_from(b) is None)
def test_copy(self): # ensure a Pin is copied when using the clone methods p1 = Pin(service='metrics', app='flask', tags={'key': 'value'}) p2 = p1.clone(service='intake') # values are the same eq_(p1.service, 'metrics') eq_(p2.service, 'intake') eq_(p1.app, 'flask') eq_(p2.app, 'flask') # but it's a copy ok_(p1.tags is not p2.tags) ok_(p1._config is not p2._config) # of almost everything ok_(p1.tracer is p2.tracer)
def traced_start_fetching_next_page(func, instance, args, kwargs): has_more_pages = getattr(instance, 'has_more_pages', True) if not has_more_pages: return func(*args, **kwargs) session = getattr(instance, 'session', None) cluster = getattr(session, 'cluster', None) pin = Pin.get_from(cluster) if not pin or not pin.enabled(): return func(*args, **kwargs) # In case the current span is not finished we make sure to finish it old_span = getattr(instance, CURRENT_SPAN, None) if old_span: log.debug('previous span was not finished before fetching next page') old_span.finish() query = getattr(instance, 'query', None) span = _start_span_and_set_tags(pin, query, session, cluster) page_number = getattr(instance, PAGE_NUMBER, 1) + 1 setattr(instance, PAGE_NUMBER, page_number) setattr(instance, CURRENT_SPAN, span) try: return func(*args, **kwargs) except: with span: span.set_exc_info(*sys.exc_info()) raise
def _task_init(func, task, args, kwargs): func(*args, **kwargs) # Patch this task if our pin is enabled pin = Pin.get_from(task) if pin and pin.enabled(): patch_task(task, pin=pin)
def test_connect_factory(self): tracer = get_dummy_tracer() services = ["db", "another"] for service in services: conn, _ = self._get_conn_and_tracer() Pin.get_from(conn).clone(service=service, tracer=tracer).onto(conn) self.assert_conn_is_traced(tracer, conn, service) # ensure we have the service types service_meta = tracer.writer.pop_services() expected = { "db" : {"app":"postgres", "app_type":"db"}, "another" : {"app":"postgres", "app_type":"db"}, } eq_(service_meta, expected)
def test_patch_unpatch(self): unpatch() # assert we start unpatched conn = mysql.connector.connect(**MYSQL_CONFIG) assert not Pin.get_from(conn) conn.close() patch() try: tracer = get_dummy_tracer() writer = tracer.writer conn = mysql.connector.connect(**MYSQL_CONFIG) pin = Pin.get_from(conn) assert pin pin.clone( service=self.TEST_SERVICE, tracer=tracer).onto(conn) assert conn.is_connected() cursor = conn.cursor() cursor.execute("SELECT 1") rows = cursor.fetchall() eq_(len(rows), 1) spans = writer.pop() eq_(len(spans), 1) span = spans[0] eq_(span.service, self.TEST_SERVICE) eq_(span.name, 'mysql.query') eq_(span.span_type, 'sql') eq_(span.error, 0) assert_dict_issuperset(span.meta, { 'out.host': u'127.0.0.1', 'out.port': u'3306', 'db.name': u'test', 'db.user': u'test', }) ok_(span.get_tag('sql.query') is None) finally: unpatch() # assert we finish unpatched conn = mysql.connector.connect(**MYSQL_CONFIG) assert not Pin.get_from(conn) conn.close() patch()
def wrapper(wrapped, instance, args, kwargs): pin = Pin.get_from(instance) # Execute the original method if pin is not enabled if not pin or not pin.enabled(): return wrapped(*args, **kwargs) # Execute our decorated function return decorated(pin, wrapped, instance, args, kwargs)
def test_connect_factory(self): tracer = get_dummy_tracer() services = ['db', 'another'] for service in services: conn, _ = yield from self._get_conn_and_tracer() Pin.get_from(conn).clone(service=service, tracer=tracer).onto(conn) yield from self.assert_conn_is_traced(tracer, conn, service) conn.close() # ensure we have the service types service_meta = tracer.writer.pop_services() expected = { 'db': {'app': 'postgres', 'app_type': 'db'}, 'another': {'app': 'postgres', 'app_type': 'db'}, } eq_(service_meta, expected)
def test_pin_does_not_override_global(self): # ensure that when a `Pin` is created from a class, the specific # instance doesn't override the global one class A(object): pass Pin.override(A, service='metrics') global_pin = Pin.get_from(A) global_pin._config['distributed_tracing'] = True a = A() pin = Pin.get_from(a) ok_(pin is not None) ok_(pin._config['distributed_tracing'] is True) pin._config['distributed_tracing'] = False ok_(global_pin._config['distributed_tracing'] is True) ok_(pin._config['distributed_tracing'] is False)
def test_sqlite(self): tracer = get_dummy_tracer() writer = tracer.writer # ensure we can trace multiple services without stomping services = ["db", "another"] for service in services: db = sqlite3.connect(":memory:") pin = Pin.get_from(db) assert pin eq_("db", pin.app_type) pin.clone( service=service, tracer=tracer).onto(db) # Ensure we can run a query and it's correctly traced q = "select * from sqlite_master" start = time.time() cursor = db.execute(q) rows = cursor.fetchall() end = time.time() assert not rows spans = writer.pop() assert spans eq_(len(spans), 1) span = spans[0] eq_(span.name, "sqlite.query") eq_(span.span_type, "sql") eq_(span.resource, q) eq_(span.service, service) ok_(span.get_tag("sql.query") is None) eq_(span.error, 0) assert start <= span.start <= end assert span.duration <= end - start # run a query with an error and ensure all is well q = "select * from some_non_existant_table" try: db.execute(q) except Exception: pass else: assert 0, "should have an error" spans = writer.pop() assert spans eq_(len(spans), 1) span = spans[0] eq_(span.name, "sqlite.query") eq_(span.resource, q) eq_(span.service, service) ok_(span.get_tag("sql.query") is None) eq_(span.error, 1) eq_(span.span_type, "sql") assert span.get_tag(errors.ERROR_STACK) assert 'OperationalError' in span.get_tag(errors.ERROR_TYPE) assert 'no such table' in span.get_tag(errors.ERROR_MSG)
def aiobotocore_client(service, tracer): """Helper function that creates a new aiobotocore client so that it is closed at the end of the context manager. """ session = aiobotocore.session.get_session() endpoint = LOCALSTACK_ENDPOINT_URL[service] client = session.create_client( service, region_name='us-west-2', endpoint_url=endpoint, aws_access_key_id='aws', aws_secret_access_key='aws', aws_session_token='aws', ) Pin.override(client, tracer=tracer) try: yield client finally: client.close()
def test_patch_unpatch(self): unpatch() # assert we start unpatched conn = pymysql.connect(**MYSQL_CONFIG) assert not Pin.get_from(conn) conn.close() patch() try: tracer = get_dummy_tracer() writer = tracer.writer conn = pymysql.connect(**MYSQL_CONFIG) pin = Pin.get_from(conn) assert pin pin.clone(service=self.TEST_SERVICE, tracer=tracer).onto(conn) assert not conn._closed cursor = conn.cursor() cursor.execute("SELECT 1") rows = cursor.fetchall() eq_(len(rows), 1) spans = writer.pop() eq_(len(spans), 1) span = spans[0] eq_(span.service, self.TEST_SERVICE) eq_(span.name, 'pymysql.query') eq_(span.span_type, 'sql') eq_(span.error, 0) meta = {} meta.update(self.DB_INFO) assert_dict_issuperset(span.meta, meta) finally: unpatch() # assert we finish unpatched conn = pymysql.connect(**MYSQL_CONFIG) assert not Pin.get_from(conn) conn.close() patch()
def traced_execute_async(func, instance, args, kwargs): cluster = getattr(instance, 'cluster', None) pin = Pin.get_from(cluster) if not pin or not pin.enabled(): return func(*args, **kwargs) query = kwargs.get("query") or args[0] span = _start_span_and_set_tags(pin, query, instance, cluster) try: result = func(*args, **kwargs) setattr(result, CURRENT_SPAN, span) setattr(result, PAGE_NUMBER, 1) setattr( result, '_set_final_result', wrapt.FunctionWrapper( result._set_final_result, traced_set_final_result ) ) setattr( result, '_set_final_exception', wrapt.FunctionWrapper( result._set_final_exception, traced_set_final_exception ) ) setattr( result, 'start_fetching_next_page', wrapt.FunctionWrapper( result.start_fetching_next_page, traced_start_fetching_next_page ) ) # Since we cannot be sure that the previous methods were overwritten # before the call ended, we add callbacks that will be run # synchronously if the call already returned and we remove them right # after. result.add_callbacks( _close_span_on_success, _close_span_on_error, callback_args=(result,), errback_args=(result,) ) result.clear_callbacks() return result except: with span: span.set_exc_info(*sys.exc_info()) raise
def test_cant_pin_with_slots(self): # ensure a Pin can't be attached if the __slots__ is defined class Obj(object): __slots__ = ['value'] obj = Obj() obj.value = 1 Pin(service='metrics').onto(obj) got = Pin.get_from(obj) ok_(got is None)
def test_meta_override(self): r, tracer = self.get_redis_and_tracer() pin = Pin.get_from(r) if pin: pin.clone(tags={'cheese': 'camembert'}).onto(r) r.get('cheese') spans = tracer.writer.pop() eq_(len(spans), 1) span = spans[0] eq_(span.service, self.TEST_SERVICE) ok_('cheese' in span.meta and span.meta['cheese'] == 'camembert')
def test_patch_unpatch(self): tracer = get_dummy_tracer() writer = tracer.writer # Test patch idempotence patch() patch() service = 'fo' conn = yield from aiopg.connect(**POSTGRES_CONFIG) Pin.get_from(conn).clone(service=service, tracer=tracer).onto(conn) yield from (yield from conn.cursor()).execute('select \'blah\'') conn.close() spans = writer.pop() assert spans, spans eq_(len(spans), 1) # Test unpatch unpatch() conn = yield from aiopg.connect(**POSTGRES_CONFIG) yield from (yield from conn.cursor()).execute('select \'blah\'') conn.close() spans = writer.pop() assert not spans, spans # Test patch again patch() conn = yield from aiopg.connect(**POSTGRES_CONFIG) Pin.get_from(conn).clone(service=service, tracer=tracer).onto(conn) yield from (yield from conn.cursor()).execute('select \'blah\'') conn.close() spans = writer.pop() assert spans, spans eq_(len(spans), 1)
def test_patch_unpatch(self): tracer = get_dummy_tracer() writer = tracer.writer # Test patch idempotence patch() patch() db = sqlite3.connect(":memory:") pin = Pin.get_from(db) assert pin pin.clone(tracer=tracer).onto(db) db.cursor().execute("select 'blah'").fetchall() spans = writer.pop() assert spans, spans eq_(len(spans), 1) # Test unpatch unpatch() db = sqlite3.connect(":memory:") db.cursor().execute("select 'blah'").fetchall() spans = writer.pop() assert not spans, spans # Test patch again patch() db = sqlite3.connect(":memory:") pin = Pin.get_from(db) assert pin pin.clone(tracer=tracer).onto(db) db.cursor().execute("select 'blah'").fetchall() spans = writer.pop() assert spans, spans eq_(len(spans), 1)
def test_patch_unpatch(self): tracer = get_dummy_tracer() writer = tracer.writer url = "%s:%s" % (cfg["host"], cfg["port"]) # Test patch idempotence patch() patch() client = pylibmc.Client([url]) Pin.get_from(client).clone( service=self.TEST_SERVICE, tracer=tracer).onto(client) client.set("a", 1) spans = writer.pop() assert spans, spans eq_(len(spans), 1) # Test unpatch unpatch() client = pylibmc.Client([url]) client.set("a", 1) spans = writer.pop() assert not spans, spans # Test patch again patch() client = pylibmc.Client([url]) Pin(service=self.TEST_SERVICE, tracer=tracer).onto(client) client.set("a", 1) spans = writer.pop() assert spans, spans eq_(len(spans), 1)
def _get_conn_tracer(self): if not self.conn: tracer = get_dummy_tracer() self.conn = self._connect_with_kwargs() self.conn.ping() # Ensure that the default pin is there, with its default value pin = Pin.get_from(self.conn) assert pin assert pin.service == 'mysql' # Customize the service # we have to apply it on the existing one since new one won't inherit `app` pin.clone(service=self.TEST_SERVICE, tracer=tracer).onto(self.conn) return self.conn, tracer
def test_patch_unpatch(self): tracer = get_dummy_tracer() writer = tracer.writer # Test patch idempotence patch() patch() service = "fo" conn = psycopg2.connect(**POSTGRES_CONFIG) Pin.get_from(conn).clone(service=service, tracer=tracer).onto(conn) conn.cursor().execute("select 'blah'") spans = writer.pop() assert spans, spans eq_(len(spans), 1) # Test unpatch unpatch() conn = psycopg2.connect(**POSTGRES_CONFIG) conn.cursor().execute("select 'blah'") spans = writer.pop() assert not spans, spans # Test patch again patch() conn = psycopg2.connect(**POSTGRES_CONFIG) Pin.get_from(conn).clone(service=service, tracer=tracer).onto(conn) conn.cursor().execute("select 'blah'") spans = writer.pop() assert spans, spans eq_(len(spans), 1)
def test_sqs_send_message_trace_injection_with_message_attributes(self): sqs = self.session.create_client("sqs", region_name="us-east-1", endpoint_url="http://localhost:4566") queue = sqs.create_queue(QueueName="test") Pin(service=self.TEST_SERVICE, tracer=self.tracer).onto(sqs) message_attributes = { "one": { "DataType": "String", "StringValue": "one" }, "two": { "DataType": "String", "StringValue": "two" }, "three": { "DataType": "String", "StringValue": "three" }, "four": { "DataType": "String", "StringValue": "four" }, "five": { "DataType": "String", "StringValue": "five" }, "six": { "DataType": "String", "StringValue": "six" }, "seven": { "DataType": "String", "StringValue": "seven" }, "eight": { "DataType": "String", "StringValue": "eight" }, "nine": { "DataType": "String", "StringValue": "nine" }, } sqs.send_message(QueueUrl=queue["QueueUrl"], MessageBody="world", MessageAttributes=message_attributes) spans = self.get_spans() assert spans span = spans[0] self.assertEqual(len(spans), 1) self.assertEqual(span.get_tag("aws.region"), "us-east-1") self.assertEqual(span.get_tag("aws.operation"), "SendMessage") assert_is_measured(span) assert_span_http_status_code(span, 200) self.assertEqual(span.service, "test-botocore-tracing.sqs") self.assertEqual(span.resource, "sqs.sendmessage") trace_json = span.get_tag( "params.MessageAttributes._datadog.StringValue") trace_data_injected = json.loads(trace_json) self.assertEqual(trace_data_injected[HTTP_HEADER_TRACE_ID], str(span.trace_id)) self.assertEqual(trace_data_injected[HTTP_HEADER_PARENT_ID], str(span.span_id)) response = sqs.receive_message(QueueUrl=queue["QueueUrl"], MessageAttributeNames=["_datadog"]) self.assertEqual(len(response["Messages"]), 1) trace_json_message = response["Messages"][0]["MessageAttributes"][ "_datadog"]["StringValue"] trace_data_in_message = json.loads(trace_json_message) self.assertEqual(trace_data_in_message[HTTP_HEADER_TRACE_ID], str(span.trace_id)) self.assertEqual(trace_data_in_message[HTTP_HEADER_PARENT_ID], str(span.span_id)) sqs.delete_queue(QueueUrl=queue["QueueUrl"])
def _traced_session(self): tracer = get_dummy_tracer() cluster = Cluster(port=CASSANDRA_CONFIG['port']) Pin.get_from(cluster).clone(tracer=tracer).onto(cluster) return cluster.connect(self.TEST_KEYSPACE), tracer.writer
def test_repr(self): # ensure the service name is in the string representation of the Pin pin = Pin(service='metrics') assert 'metrics' in str(pin)
def __getddpin__(self): return Pin.get_from(self._protocol)
def wrapper(wrapped, instance, args, kwargs): pin = Pin._find(wrapped, instance, get_current_app()) if not pin or not pin.enabled(): return wrapped(*args, **kwargs) return func(pin, wrapped, instance, args, kwargs)
def make_client(self, mock_socket_values, **kwargs): tracer = DummyTracer() Pin.override(pymemcache, tracer=tracer) self.client = pymemcache.client.base.Client((TEST_HOST, TEST_PORT), **kwargs) self.client.sock = MockSocket(list(mock_socket_values)) return self.client
def _get_conn_and_tracer(self): conn = self._conn = yield from aiopg.connect(**POSTGRES_CONFIG) Pin.get_from(conn).clone(tracer=self.tracer).onto(conn) return conn, self.tracer
def get_tracer_and_client(self): client = pymongo.MongoClient(port=MONGO_CONFIG["port"]) Pin(service=self.TEST_SERVICE, tracer=self.tracer).onto(client) # We do not wish to trace tcp spans here Pin.get_from(pymongo.server.Server).remove_from(pymongo.server.Server) return self.tracer, client
def test_sqs_send_message_batch_trace_injection_with_max_message_attributes(self): sqs = self.session.create_client('sqs', region_name='us-east-1', endpoint_url='http://localhost:4566') queue = sqs.create_queue(QueueName='test') Pin(service=self.TEST_SERVICE, tracer=self.tracer).onto(sqs) entries = [ { 'Id': '1', 'MessageBody': 'ironmaiden', 'MessageAttributes': { 'one': { 'DataType': 'String', 'StringValue': 'one' }, 'two': { 'DataType': 'String', 'StringValue': 'two' }, 'three': { 'DataType': 'String', 'StringValue': 'three' }, 'four': { 'DataType': 'String', 'StringValue': 'four' }, 'five': { 'DataType': 'String', 'StringValue': 'five' }, 'six': { 'DataType': 'String', 'StringValue': 'six' }, 'seven': { 'DataType': 'String', 'StringValue': 'seven' }, 'eight': { 'DataType': 'String', 'StringValue': 'eight' }, 'nine': { 'DataType': 'String', 'StringValue': 'nine' }, 'ten': { 'DataType': 'String', 'StringValue': 'ten' }, } } ] sqs.send_message_batch(QueueUrl=queue['QueueUrl'], Entries=entries) spans = self.get_spans() assert spans span = spans[0] self.assertEqual(len(spans), 1) self.assertEqual(span.get_tag('aws.region'), 'us-east-1') self.assertEqual(span.get_tag('aws.operation'), 'SendMessageBatch') assert_is_measured(span) assert_span_http_status_code(span, 200) self.assertEqual(span.service, 'test-botocore-tracing.sqs') self.assertEqual(span.resource, 'sqs.sendmessagebatch') response = sqs.receive_message(QueueUrl=queue['QueueUrl'], MessageAttributeNames=['_datadog']) self.assertEqual(len(response['Messages']), 1) trace_in_message = 'MessageAttributes' in response['Messages'][0] self.assertEqual(trace_in_message, False) sqs.delete_queue(QueueUrl=queue['QueueUrl'])
def get_client(self): client, tracer = TestPylibmcPatchDefault.get_client(self) Pin.get_from(client).clone(service=self.TEST_SERVICE).onto(client) return client, tracer
def setUp(self): super(GrpcTestCase, self).setUp() patch() Pin.override(constants.GRPC_PIN_MODULE_SERVER, tracer=self.tracer) Pin.override(constants.GRPC_PIN_MODULE_CLIENT, tracer=self.tracer) self._start_server()
def _traced_session(self): tracer = get_dummy_tracer() Pin.get_from(self.cluster).clone(tracer=tracer).onto(self.cluster) return self.cluster.connect(self.TEST_KEYSPACE), tracer
def cursor(self, *args, **kwargs): cursor = self.__wrapped__.cursor(*args, **kwargs) pin = Pin.get_from(self) if not pin: return cursor return TracedCursor(cursor, pin)
def __init__(self, conn, pin=None): super(TracedConnection, self).__init__(conn) name = _get_vendor(conn) self._self_datadog_name = '{}.connection'.format(name) db_pin = pin or Pin(service=name, app=name, app_type=AppTypes.db) db_pin.onto(self)
def test_sqs_send_message_batch_trace_injection_with_max_message_attributes( self): sqs = self.session.create_client("sqs", region_name="us-east-1", endpoint_url="http://localhost:4566") queue = sqs.create_queue(QueueName="test") Pin(service=self.TEST_SERVICE, tracer=self.tracer).onto(sqs) entries = [{ "Id": "1", "MessageBody": "ironmaiden", "MessageAttributes": { "one": { "DataType": "String", "StringValue": "one" }, "two": { "DataType": "String", "StringValue": "two" }, "three": { "DataType": "String", "StringValue": "three" }, "four": { "DataType": "String", "StringValue": "four" }, "five": { "DataType": "String", "StringValue": "five" }, "six": { "DataType": "String", "StringValue": "six" }, "seven": { "DataType": "String", "StringValue": "seven" }, "eight": { "DataType": "String", "StringValue": "eight" }, "nine": { "DataType": "String", "StringValue": "nine" }, "ten": { "DataType": "String", "StringValue": "ten" }, }, }] sqs.send_message_batch(QueueUrl=queue["QueueUrl"], Entries=entries) spans = self.get_spans() assert spans span = spans[0] self.assertEqual(len(spans), 1) self.assertEqual(span.get_tag("aws.region"), "us-east-1") self.assertEqual(span.get_tag("aws.operation"), "SendMessageBatch") assert_is_measured(span) assert_span_http_status_code(span, 200) self.assertEqual(span.service, "test-botocore-tracing.sqs") self.assertEqual(span.resource, "sqs.sendmessagebatch") response = sqs.receive_message(QueueUrl=queue["QueueUrl"], MessageAttributeNames=["_datadog"]) self.assertEqual(len(response["Messages"]), 1) trace_in_message = "MessageAttributes" in response["Messages"][0] self.assertEqual(trace_in_message, False) sqs.delete_queue(QueueUrl=queue["QueueUrl"])
def test_sqs_send_message_trace_injection_with_message_attributes(self): sqs = self.session.create_client('sqs', region_name='us-east-1', endpoint_url='http://localhost:4566') queue = sqs.create_queue(QueueName='test') Pin(service=self.TEST_SERVICE, tracer=self.tracer).onto(sqs) message_attributes = { 'one': { 'DataType': 'String', 'StringValue': 'one' }, 'two': { 'DataType': 'String', 'StringValue': 'two' }, 'three': { 'DataType': 'String', 'StringValue': 'three' }, 'four': { 'DataType': 'String', 'StringValue': 'four' }, 'five': { 'DataType': 'String', 'StringValue': 'five' }, 'six': { 'DataType': 'String', 'StringValue': 'six' }, 'seven': { 'DataType': 'String', 'StringValue': 'seven' }, 'eight': { 'DataType': 'String', 'StringValue': 'eight' }, 'nine': { 'DataType': 'String', 'StringValue': 'nine' } } sqs.send_message(QueueUrl=queue['QueueUrl'], MessageBody='world', MessageAttributes=message_attributes) spans = self.get_spans() assert spans span = spans[0] self.assertEqual(len(spans), 1) self.assertEqual(span.get_tag('aws.region'), 'us-east-1') self.assertEqual(span.get_tag('aws.operation'), 'SendMessage') assert_is_measured(span) assert_span_http_status_code(span, 200) self.assertEqual(span.service, 'test-botocore-tracing.sqs') self.assertEqual(span.resource, 'sqs.sendmessage') trace_json = span.get_tag('params.MessageAttributes._datadog.StringValue') trace_data_injected = json.loads(trace_json) self.assertEqual(trace_data_injected[HTTP_HEADER_TRACE_ID], str(span.trace_id)) self.assertEqual(trace_data_injected[HTTP_HEADER_PARENT_ID], str(span.span_id)) response = sqs.receive_message(QueueUrl=queue['QueueUrl'], MessageAttributeNames=['_datadog']) self.assertEqual(len(response['Messages']), 1) trace_json_message = response['Messages'][0]['MessageAttributes']['_datadog']['StringValue'] trace_data_in_message = json.loads(trace_json_message) self.assertEqual(trace_data_in_message[HTTP_HEADER_TRACE_ID], str(span.trace_id)) self.assertEqual(trace_data_in_message[HTTP_HEADER_PARENT_ID], str(span.span_id)) sqs.delete_queue(QueueUrl=queue['QueueUrl'])
def test_firehose_no_records_arg(self): firehose = self.session.create_client("firehose", region_name="us-west-2") Pin(service=self.TEST_SERVICE, tracer=self.tracer).onto(firehose) stream_name = "test-stream" account_id = "test-account" firehose.create_delivery_stream( DeliveryStreamName=stream_name, RedshiftDestinationConfiguration={ "RoleARN": "arn:aws:iam::{}:role/firehose_delivery_role".format( account_id), "ClusterJDBCURL": "jdbc:redshift://host.amazonaws.com:5439/database", "CopyCommand": { "DataTableName": "outputTable", "CopyOptions": "CSV DELIMITER ',' NULL '\\0'", }, "Username": "******", "Password": "******", "S3Configuration": { "RoleARN": "arn:aws:iam::{}:role/firehose_delivery_role".format( account_id), "BucketARN": "arn:aws:s3:::kinesis-test", "Prefix": "myFolder/", "BufferingHints": { "SizeInMBs": 123, "IntervalInSeconds": 124 }, "CompressionFormat": "UNCOMPRESSED", }, }, ) firehose.put_record_batch( DeliveryStreamName=stream_name, Records=[{ "Data": "some data" }], ) spans = self.get_spans() assert spans assert len(spans) == 2 assert all(span.name == "firehose.command" for span in spans) delivery_stream_span, put_record_batch_span = spans assert delivery_stream_span.get_tag( "aws.operation") == "CreateDeliveryStream" assert put_record_batch_span.get_tag( "aws.operation") == "PutRecordBatch" assert put_record_batch_span.get_tag("params.Records") is None
def get_tracer_and_client(self): tracer = get_dummy_tracer() client = pymongo.MongoClient(port=MONGO_CONFIG['port']) Pin.get_from(client).clone(tracer=tracer).onto(client) return tracer, client
def test_same_tracer(self): """Ensure same tracer reference is used by the pin on pymemache and Clients. """ client = pymemcache.client.base.Client((TEST_HOST, TEST_PORT)) self.assertEqual(Pin.get_from(client).tracer, Pin.get_from(pymemcache).tracer)
def test_pin_find(self): # ensure Pin will find the first available pin # Override service obj_a = self.Obj() pin = Pin(service='service-a') pin.onto(obj_a) # Override service obj_b = self.Obj() pin = Pin(service='service-b') pin.onto(obj_b) # No Pin set obj_c = self.Obj() # We find the first pin (obj_b) pin = Pin._find(obj_c, obj_b, obj_a) assert pin is not None assert pin.service == 'service-b' # We find the first pin (obj_a) pin = Pin._find(obj_a, obj_b, obj_c) assert pin is not None assert pin.service == 'service-a' # We don't find a pin if none is there pin = Pin._find(obj_c, obj_c, obj_c) assert pin is None
def patch(): wrapt.wrap_function_wrapper('botocore.client', 'BaseClient._make_api_call', patched_api_call) Pin(service="aws", app="botocore", app_type="web").onto(botocore.client.BaseClient)
def test_cant_modify(self): # ensure a Pin is immutable once initialized pin = Pin(service='metrics') with pytest.raises(AttributeError): pin.service = 'intake'
def get_spans(self): pin = Pin.get_from(self.client) tracer = pin.tracer spans = tracer.writer.pop() return spans
def test_none(self): # ensure get_from returns None if a Pin is not available assert Pin.get_from(None) is None
def _given_a_traced_connection(self, tracer): db = sqlite3.connect(':memory:') Pin.get_from(db).clone(tracer=tracer).onto(db) return db
}) ## Trace patch for MySQL #patch(mysql=True) ## Connecting MySQL mydb = mysql.connector.connect( host=db_config.db_host, user=db_config.db_username, passwd=db_config.db_password, database=db_config.db_name ) mycursor = mydb.cursor() ## Use a pin to specify metadata related to this connection Pin.override(mydb, service='kikeyama_mysql') ## Flask app = Flask(__name__) #traced_app = TraceMiddleware(app, tracer, service="kikeyama_service", distributed_tracing=False) traced_app = TraceMiddleware(app, tracer, service='kikeyama_service') # Enable distributed tracing ddtrace.config.flask['distributed_tracing_enabled'] = True @app.route('/') def api_entry(): start_time = time.time() app.logger.info('getting root endpoint') # return 'Entrypoint to the Application'
def setUp(self): self.tracer = get_dummy_tracer() self.app = flask.Flask(__name__) Pin.override(self.app, service='test-flask', tracer=self.tracer) self.client = self.app.test_client()
def setUp(self): patch() self.tracer = get_dummy_tracer() Pin.override(molten, tracer=self.tracer, service=self.TEST_SERVICE)
def patch(): """ Patch `flask` module for tracing """ # Check to see if we have patched Flask yet or not if getattr(flask, '_datadog_patch', False): return setattr(flask, '_datadog_patch', True) # Attach service pin to `flask.app.Flask` Pin( service=config.flask['service_name'], app=config.flask['app'], app_type=config.flask['app_type'], ).onto(flask.Flask) # flask.app.Flask methods that have custom tracing (add metadata, wrap functions, etc) _w('flask', 'Flask.wsgi_app', traced_wsgi_app) _w('flask', 'Flask.dispatch_request', request_tracer('dispatch_request')) _w('flask', 'Flask.preprocess_request', request_tracer('preprocess_request')) _w('flask', 'Flask.add_url_rule', traced_add_url_rule) _w('flask', 'Flask.endpoint', traced_endpoint) _w('flask', 'Flask._register_error_handler', traced_register_error_handler) # flask.blueprints.Blueprint methods that have custom tracing (add metadata, wrap functions, etc) _w('flask', 'Blueprint.register', traced_blueprint_register) _w('flask', 'Blueprint.add_url_rule', traced_blueprint_add_url_rule) # flask.app.Flask traced hook decorators flask_hooks = [ 'before_request', 'before_first_request', 'after_request', 'teardown_request', 'teardown_appcontext', ] for hook in flask_hooks: _w('flask', 'Flask.{}'.format(hook), traced_flask_hook) _w('flask', 'after_this_request', traced_flask_hook) # flask.app.Flask traced methods flask_app_traces = [ 'process_response', 'handle_exception', 'handle_http_exception', 'handle_user_exception', 'try_trigger_before_first_request_functions', 'do_teardown_request', 'do_teardown_appcontext', 'send_static_file', ] for name in flask_app_traces: _w('flask', 'Flask.{}'.format(name), simple_tracer('flask.{}'.format(name))) # flask static file helpers _w('flask', 'send_file', simple_tracer('flask.send_file')) # flask.json.jsonify _w('flask', 'jsonify', traced_jsonify) # flask.templating traced functions _w('flask.templating', '_render', traced_render) _w('flask', 'render_template', traced_render_template) _w('flask', 'render_template_string', traced_render_template_string) # flask.blueprints.Blueprint traced hook decorators bp_hooks = [ 'after_app_request', 'after_request', 'before_app_first_request', 'before_app_request', 'before_request', 'teardown_request', 'teardown_app_request', ] for hook in bp_hooks: _w('flask', 'Blueprint.{}'.format(hook), traced_flask_hook) # flask.signals signals if config.flask['trace_signals']: signals = [ 'template_rendered', 'request_started', 'request_finished', 'request_tearing_down', 'got_request_exception', 'appcontext_tearing_down', ] # These were added in 0.11.0 if flask_version >= (0, 11): signals.append('before_render_template') # These were added in 0.10.0 if flask_version >= (0, 10): signals.append('appcontext_pushed') signals.append('appcontext_popped') signals.append('message_flashed') for signal in signals: module = 'flask' # v0.9 missed importing `appcontext_tearing_down` in `flask/__init__.py` # https://github.com/pallets/flask/blob/0.9/flask/__init__.py#L35-L37 # https://github.com/pallets/flask/blob/0.9/flask/signals.py#L52 # DEV: Version 0.9 doesn't have a patch version if flask_version <= (0, 9) and signal == 'appcontext_tearing_down': module = 'flask.signals' # DEV: Patch `receivers_for` instead of `connect` to ensure we don't mess with `disconnect` _w(module, '{}.receivers_for'.format(signal), traced_signal_receivers_for(signal))
def get_tracer_and_client(self): tracer = get_dummy_tracer() client = pymongo.MongoClient(port=MONGO_CONFIG['port']) Pin(service=self.TEST_SERVICE, tracer=tracer).onto(client) return tracer, client