def test_patch_unpatch(self): tracer = get_dummy_tracer() writer = tracer.writer # Test patch idempotence patch() patch() r = redis.Redis(port=REDIS_CONFIG['port']) Pin.get_from(r).clone(tracer=tracer).onto(r) r.get("key") spans = writer.pop() assert spans, spans eq_(len(spans), 1) # Test unpatch unpatch() r = redis.Redis(port=REDIS_CONFIG['port']) r.get("key") spans = writer.pop() assert not spans, spans # Test patch again patch() r = redis.Redis(port=REDIS_CONFIG['port']) Pin.get_from(r).clone(tracer=tracer).onto(r) r.get("key") spans = writer.pop() assert spans, spans eq_(len(spans), 1)
def test_patch_unpatch(self): tracer = get_dummy_tracer() writer = tracer.writer # Test patch idempotence patch() patch() client = pymongo.MongoClient(port=MONGO_CONFIG['port']) Pin.get_from(client).clone(tracer=tracer).onto(client) client["testdb"].drop_collection("whatever") spans = writer.pop() assert spans, spans eq_(len(spans), 1) # Test unpatch unpatch() client = pymongo.MongoClient(port=MONGO_CONFIG['port']) client["testdb"].drop_collection("whatever") spans = writer.pop() assert not spans, spans # Test patch again patch() client = pymongo.MongoClient(port=MONGO_CONFIG['port']) Pin.get_from(client).clone(tracer=tracer).onto(client) client["testdb"].drop_collection("whatever") spans = writer.pop() assert spans, spans eq_(len(spans), 1)
def test_patch_unpatch(self): tracer = get_dummy_tracer() # Test patch idempotence patch() patch() client = mongoengine.connect(port=MONGO_CONFIG['port']) Pin.get_from(client).clone(tracer=tracer).onto(client) Artist.drop_collection() spans = tracer.writer.pop() assert spans, spans eq_(len(spans), 1) # Test unpatch mongoengine.connection.disconnect() unpatch() mongoengine.connect(port=MONGO_CONFIG['port']) Artist.drop_collection() spans = tracer.writer.pop() assert not spans, spans # Test patch again patch() client = mongoengine.connect(port=MONGO_CONFIG['port']) Pin.get_from(client).clone(tracer=tracer).onto(client) Artist.drop_collection() spans = tracer.writer.pop() assert spans, spans eq_(len(spans), 1)
def get_tracer_and_connect(self): tracer = get_dummy_tracer() Pin.get_from(mongoengine.connect).clone( tracer=tracer).onto(mongoengine.connect) mongoengine.connect(port=MONGO_CONFIG['port']) return tracer
def test_patch_unpatch(self): # Test patch idempotence patch() patch() tracer = get_dummy_tracer() Pin.get_from(Cluster).clone(tracer=tracer).onto(Cluster) session = Cluster(port=CASSANDRA_CONFIG['port']).connect(self.TEST_KEYSPACE) session.execute(self.TEST_QUERY) spans = tracer.writer.pop() assert spans, spans eq_(len(spans), 1) # Test unpatch unpatch() session = Cluster(port=CASSANDRA_CONFIG['port']).connect(self.TEST_KEYSPACE) session.execute(self.TEST_QUERY) spans = tracer.writer.pop() assert not spans, spans # Test patch again patch() Pin.get_from(Cluster).clone(tracer=tracer).onto(Cluster) session = Cluster(port=CASSANDRA_CONFIG['port']).connect(self.TEST_KEYSPACE) session.execute(self.TEST_QUERY) spans = tracer.writer.pop() assert spans, spans
def get_client(self): url = "%s:%s" % (cfg["host"], cfg["port"]) client = pylibmc.Client([url]) client.flush_all() tracer = get_dummy_tracer() Pin.get_from(client).clone(tracer=tracer).onto(client) return client, tracer
def test_override_missing(self): # ensure overriding an instance doesn't override the Class class A(object): pass a = A() ok_(Pin.get_from(a) is None) Pin.override(a, service='metrics') eq_(Pin.get_from(a).service, 'metrics') b = A() ok_(Pin.get_from(b) is None)
def test_override(self): # ensure Override works for an instance object class A(object): pass Pin(service='metrics', app='flask').onto(A) a = A() Pin.override(a, app='django') eq_(Pin.get_from(a).app, 'django') eq_(Pin.get_from(a).service, 'metrics') b = A() eq_(Pin.get_from(b).app, 'flask') eq_(Pin.get_from(b).service, 'metrics')
def test_pin_config_is_a_copy(self): # ensure that when a `Pin` is cloned, the config is a copy obj = self.Obj() Pin.override(obj, service='metrics') p1 = Pin.get_from(obj) ok_(p1._config is not None) p1._config['distributed_tracing'] = True Pin.override(obj, service='intake') p2 = Pin.get_from(obj) ok_(p2._config is not None) p2._config['distributed_tracing'] = False ok_(p1._config['distributed_tracing'] is True) ok_(p2._config['distributed_tracing'] is False)
def test_connect_factory(self): tracer = get_dummy_tracer() services = ["db", "another"] for service in services: conn, _ = self._get_conn_and_tracer() Pin.get_from(conn).clone(service=service, tracer=tracer).onto(conn) self.assert_conn_is_traced(tracer, conn, service) # ensure we have the service types service_meta = tracer.writer.pop_services() expected = { "db" : {"app":"postgres", "app_type":"db"}, "another" : {"app":"postgres", "app_type":"db"}, } eq_(service_meta, expected)
def _task_init(func, task, args, kwargs): func(*args, **kwargs) # Patch this task if our pin is enabled pin = Pin.get_from(task) if pin and pin.enabled(): patch_task(task, pin=pin)
def traced_start_fetching_next_page(func, instance, args, kwargs): has_more_pages = getattr(instance, 'has_more_pages', True) if not has_more_pages: return func(*args, **kwargs) session = getattr(instance, 'session', None) cluster = getattr(session, 'cluster', None) pin = Pin.get_from(cluster) if not pin or not pin.enabled(): return func(*args, **kwargs) # In case the current span is not finished we make sure to finish it old_span = getattr(instance, CURRENT_SPAN, None) if old_span: log.debug('previous span was not finished before fetching next page') old_span.finish() query = getattr(instance, 'query', None) span = _start_span_and_set_tags(pin, query, session, cluster) page_number = getattr(instance, PAGE_NUMBER, 1) + 1 setattr(instance, PAGE_NUMBER, page_number) setattr(instance, CURRENT_SPAN, span) try: return func(*args, **kwargs) except: with span: span.set_exc_info(*sys.exc_info()) raise
def test_patch_unpatch(self): unpatch() # assert we start unpatched conn = mysql.connector.connect(**MYSQL_CONFIG) assert not Pin.get_from(conn) conn.close() patch() try: tracer = get_dummy_tracer() writer = tracer.writer conn = mysql.connector.connect(**MYSQL_CONFIG) pin = Pin.get_from(conn) assert pin pin.clone( service=self.TEST_SERVICE, tracer=tracer).onto(conn) assert conn.is_connected() cursor = conn.cursor() cursor.execute("SELECT 1") rows = cursor.fetchall() eq_(len(rows), 1) spans = writer.pop() eq_(len(spans), 1) span = spans[0] eq_(span.service, self.TEST_SERVICE) eq_(span.name, 'mysql.query') eq_(span.span_type, 'sql') eq_(span.error, 0) assert_dict_issuperset(span.meta, { 'out.host': u'127.0.0.1', 'out.port': u'3306', 'db.name': u'test', 'db.user': u'test', }) ok_(span.get_tag('sql.query') is None) finally: unpatch() # assert we finish unpatched conn = mysql.connector.connect(**MYSQL_CONFIG) assert not Pin.get_from(conn) conn.close() patch()
def test_connect_factory(self): tracer = get_dummy_tracer() services = ['db', 'another'] for service in services: conn, _ = yield from self._get_conn_and_tracer() Pin.get_from(conn).clone(service=service, tracer=tracer).onto(conn) yield from self.assert_conn_is_traced(tracer, conn, service) conn.close() # ensure we have the service types service_meta = tracer.writer.pop_services() expected = { 'db': {'app': 'postgres', 'app_type': 'db'}, 'another': {'app': 'postgres', 'app_type': 'db'}, } eq_(service_meta, expected)
def test_pin_config(self): # ensure `Pin` has a configuration object that can be modified obj = self.Obj() Pin.override(obj, service='metrics') pin = Pin.get_from(obj) ok_(pin._config is not None) pin._config['distributed_tracing'] = True ok_(pin._config['distributed_tracing'] is True)
def wrapper(wrapped, instance, args, kwargs): pin = Pin.get_from(instance) # Execute the original method if pin is not enabled if not pin or not pin.enabled(): return wrapped(*args, **kwargs) # Execute our decorated function return decorated(pin, wrapped, instance, args, kwargs)
def test_pin_does_not_override_global(self): # ensure that when a `Pin` is created from a class, the specific # instance doesn't override the global one class A(object): pass Pin.override(A, service='metrics') global_pin = Pin.get_from(A) global_pin._config['distributed_tracing'] = True a = A() pin = Pin.get_from(a) ok_(pin is not None) ok_(pin._config['distributed_tracing'] is True) pin._config['distributed_tracing'] = False ok_(global_pin._config['distributed_tracing'] is True) ok_(pin._config['distributed_tracing'] is False)
def test_pin(self): # ensure a Pin can be attached to an instance obj = self.Obj() pin = Pin(service='metrics') pin.onto(obj) got = Pin.get_from(obj) eq_(got.service, pin.service) ok_(got is pin)
def test_sqlite(self): tracer = get_dummy_tracer() writer = tracer.writer # ensure we can trace multiple services without stomping services = ["db", "another"] for service in services: db = sqlite3.connect(":memory:") pin = Pin.get_from(db) assert pin eq_("db", pin.app_type) pin.clone( service=service, tracer=tracer).onto(db) # Ensure we can run a query and it's correctly traced q = "select * from sqlite_master" start = time.time() cursor = db.execute(q) rows = cursor.fetchall() end = time.time() assert not rows spans = writer.pop() assert spans eq_(len(spans), 1) span = spans[0] eq_(span.name, "sqlite.query") eq_(span.span_type, "sql") eq_(span.resource, q) eq_(span.service, service) ok_(span.get_tag("sql.query") is None) eq_(span.error, 0) assert start <= span.start <= end assert span.duration <= end - start # run a query with an error and ensure all is well q = "select * from some_non_existant_table" try: db.execute(q) except Exception: pass else: assert 0, "should have an error" spans = writer.pop() assert spans eq_(len(spans), 1) span = spans[0] eq_(span.name, "sqlite.query") eq_(span.resource, q) eq_(span.service, service) ok_(span.get_tag("sql.query") is None) eq_(span.error, 1) eq_(span.span_type, "sql") assert span.get_tag(errors.ERROR_STACK) assert 'OperationalError' in span.get_tag(errors.ERROR_TYPE) assert 'no such table' in span.get_tag(errors.ERROR_MSG)
def test_patch_unpatch(self): unpatch() # assert we start unpatched conn = pymysql.connect(**MYSQL_CONFIG) assert not Pin.get_from(conn) conn.close() patch() try: tracer = get_dummy_tracer() writer = tracer.writer conn = pymysql.connect(**MYSQL_CONFIG) pin = Pin.get_from(conn) assert pin pin.clone(service=self.TEST_SERVICE, tracer=tracer).onto(conn) assert not conn._closed cursor = conn.cursor() cursor.execute("SELECT 1") rows = cursor.fetchall() eq_(len(rows), 1) spans = writer.pop() eq_(len(spans), 1) span = spans[0] eq_(span.service, self.TEST_SERVICE) eq_(span.name, 'pymysql.query') eq_(span.span_type, 'sql') eq_(span.error, 0) meta = {} meta.update(self.DB_INFO) assert_dict_issuperset(span.meta, meta) finally: unpatch() # assert we finish unpatched conn = pymysql.connect(**MYSQL_CONFIG) assert not Pin.get_from(conn) conn.close() patch()
def test_sqlite_ot(self): """Ensure sqlite works with the opentracer.""" ot_tracer = init_tracer('sqlite_svc', self.tracer) # Ensure we can run a query and it's correctly traced q = 'select * from sqlite_master' with ot_tracer.start_active_span('sqlite_op'): db = sqlite3.connect(':memory:') pin = Pin.get_from(db) assert pin self.assertEqual('db', pin.app_type) pin.clone(tracer=self.tracer).onto(db) cursor = db.execute(q) rows = cursor.fetchall() assert not rows self.assert_structure( dict(name='sqlite_op', service='sqlite_svc'), ( dict(name='sqlite.query', span_type='sql', resource=q, error=0), ) ) self.reset() with self.override_config('dbapi2', dict(trace_fetch_methods=True)): with ot_tracer.start_active_span('sqlite_op'): db = sqlite3.connect(':memory:') pin = Pin.get_from(db) assert pin self.assertEqual('db', pin.app_type) pin.clone(tracer=self.tracer).onto(db) cursor = db.execute(q) rows = cursor.fetchall() assert not rows self.assert_structure( dict(name='sqlite_op', service='sqlite_svc'), ( dict(name='sqlite.query', span_type='sql', resource=q, error=0), dict(name='sqlite.query.fetchall', span_type='sql', resource=q, error=0), ), )
def traced_execute_async(func, instance, args, kwargs): cluster = getattr(instance, 'cluster', None) pin = Pin.get_from(cluster) if not pin or not pin.enabled(): return func(*args, **kwargs) query = kwargs.get("query") or args[0] span = _start_span_and_set_tags(pin, query, instance, cluster) try: result = func(*args, **kwargs) setattr(result, CURRENT_SPAN, span) setattr(result, PAGE_NUMBER, 1) setattr( result, '_set_final_result', wrapt.FunctionWrapper( result._set_final_result, traced_set_final_result ) ) setattr( result, '_set_final_exception', wrapt.FunctionWrapper( result._set_final_exception, traced_set_final_exception ) ) setattr( result, 'start_fetching_next_page', wrapt.FunctionWrapper( result.start_fetching_next_page, traced_start_fetching_next_page ) ) # Since we cannot be sure that the previous methods were overwritten # before the call ended, we add callbacks that will be run # synchronously if the call already returned and we remove them right # after. result.add_callbacks( _close_span_on_success, _close_span_on_error, callback_args=(result,), errback_args=(result,) ) result.clear_callbacks() return result except: with span: span.set_exc_info(*sys.exc_info()) raise
def test_pin(): class A(object): pass a = A() pin = Pin(service="abc") pin.onto(a) got = Pin.get_from(a) assert pin.service == got.service assert pin is got
def _client_channel_interceptor(wrapped, instance, args, kwargs): channel = wrapped(*args, **kwargs) pin = Pin.get_from(constants.GRPC_PIN_MODULE_CLIENT) if not pin or not pin.enabled(): return channel (host, port) = _parse_target_from_arguments(args, kwargs) interceptor_function = create_client_interceptor(pin, host, port) return grpc.intercept_channel(channel, interceptor_function)
async def test_meta_override(tracer, test_spans): r = aredis.StrictRedis(port=REDIS_CONFIG["port"]) pin = Pin.get_from(r) assert pin is not None pin.clone(tags={"cheese": "camembert"}, tracer=tracer).onto(r) await r.get("cheese") test_spans.assert_trace_count(1) test_spans.assert_span_count(1) assert test_spans.spans[0].service == "redis" assert "cheese" in test_spans.spans[0].get_tags() and test_spans.spans[0].get_tag("cheese") == "camembert"
def _unpatch_client(): if not getattr(constants.GRPC_PIN_MODULE_CLIENT, "__datadog_patch", False): return setattr(constants.GRPC_PIN_MODULE_CLIENT, "__datadog_patch", False) pin = Pin.get_from(constants.GRPC_PIN_MODULE_CLIENT) if pin: pin.remove_from(constants.GRPC_PIN_MODULE_CLIENT) _u(grpc, "secure_channel") _u(grpc, "insecure_channel")
def test_cant_pin_with_slots(self): # ensure a Pin can't be attached if the __slots__ is defined class Obj(object): __slots__ = ['value'] obj = Obj() obj.value = 1 Pin(service='metrics').onto(obj) got = Pin.get_from(obj) ok_(got is None)
def _client_channel_interceptor(wrapped, instance, args, kwargs): channel = wrapped(*args, **kwargs) pin = Pin.get_from(channel) if not pin or not pin.enabled(): return channel (host, port) = utils._parse_target_from_args(args, kwargs) interceptor_function = create_client_interceptor(pin, host, port) return grpc.intercept_channel(channel, interceptor_function)
def test_pin_does_not_override_global_with_new_instance(self): # ensure that when a `Pin` is created from a class, the specific # instance doesn't override the global one, even if only the # `onto()` API has been used class A(object): pass pin = Pin(service='metrics') pin.onto(A) global_pin = Pin.get_from(A) global_pin._config['distributed_tracing'] = True a = A() pin = Pin.get_from(a) ok_(pin is not None) ok_(pin._config['distributed_tracing'] is True) pin._config['distributed_tracing'] = False ok_(global_pin._config['distributed_tracing'] is True) ok_(pin._config['distributed_tracing'] is False)
def _get_conn_tracer(self): if not self.conn: self.conn = pyodbc.connect(PYODBC_CONNECT_DSN) # Ensure that the default pin is there, with its default value pin = Pin.get_from(self.conn) assert pin # Customize the service # we have to apply it on the existing one since new one won't inherit `app` pin.clone(tracer=self.tracer).onto(self.conn) return self.conn, self.tracer
def test_blueprint_register(self): """ When we register a ``flask.Blueprint`` to a ``flask.Flask`` When no ``Pin`` is attached to the ``Blueprint`` We attach the pin from the ``flask.Flask`` app When a ``Pin`` is manually added to the ``Blueprint`` We do not use the ``flask.Flask`` app ``Pin`` """ bp = flask.Blueprint("pinned", __name__) Pin(service="flask-bp", tracer=self.tracer).onto(bp) # DEV: This is more common than calling ``flask.Blueprint.register`` directly self.app.register_blueprint(bp) pin = Pin.get_from(bp) self.assertEqual(pin.service, "flask-bp") bp = flask.Blueprint("not-pinned", __name__) self.app.register_blueprint(bp) pin = Pin.get_from(bp) self.assertNotEqual(pin.service, "flask-bp")
def _trace_method(self, method, name, extra_tags, *args, **kwargs): pin = Pin.get_from(self) if not pin or not pin.enabled(): return method(*args, **kwargs) service = pin.service with pin.tracer.trace(name, service=service) as s: s.set_tags(pin.tags) s.set_tags(extra_tags) return method(*args, **kwargs)
def test_sqlite(self): tracer = get_dummy_tracer() writer = tracer.writer # ensure we can trace multiple services without stomping services = ["db", "another"] for service in services: db = sqlite3.connect(":memory:") pin = Pin.get_from(db) assert pin eq_("db", pin.app_type) pin.clone(service=service, tracer=tracer).onto(db) # Ensure we can run a query and it's correctly traced q = "select * from sqlite_master" start = time.time() cursor = db.execute(q) rows = cursor.fetchall() end = time.time() assert not rows spans = writer.pop() assert spans eq_(len(spans), 1) span = spans[0] eq_(span.name, "sqlite.query") eq_(span.span_type, "sql") eq_(span.resource, q) eq_(span.service, service) ok_(span.get_tag("sql.query") is None) eq_(span.error, 0) assert start <= span.start <= end assert span.duration <= end - start # run a query with an error and ensure all is well q = "select * from some_non_existant_table" try: db.execute(q) except Exception: pass else: assert 0, "should have an error" spans = writer.pop() assert spans eq_(len(spans), 1) span = spans[0] eq_(span.name, "sqlite.query") eq_(span.resource, q) eq_(span.service, service) ok_(span.get_tag("sql.query") is None) eq_(span.error, 1) eq_(span.span_type, "sql") assert span.get_tag(errors.ERROR_STACK) assert 'OperationalError' in span.get_tag(errors.ERROR_TYPE) assert 'no such table' in span.get_tag(errors.ERROR_MSG)
def test_override_parent_pin(self): """Test that the service set on `pymemcache` is used for Clients.""" Pin.override(pymemcache, service='mysvc') client = self.make_client([b'STORED\r\n', b'VALUE key 0 5\r\nvalue\r\nEND\r\n']) client.set(b'key', b'value', noreply=False) pin = Pin.get_from(pymemcache) tracer = pin.tracer spans = tracer.writer.pop() self.assertEqual(spans[0].service, 'mysvc')
def test_override_parent_pin(self): """Test that the service set on `pymemcache` is used for Clients.""" Pin.override(pymemcache, service="mysvc") client = self.make_client([b"STORED\r\n", b"VALUE key 0 5\r\nvalue\r\nEND\r\n"]) client.set(b"key", b"value", noreply=False) pin = Pin.get_from(pymemcache) tracer = pin.tracer spans = tracer.pop() self.assertEqual(spans[0].service, "mysvc")
def test_patch_unpatch(self): unpatch() # assert we start unpatched conn = pymysql.connect(**MYSQL_CONFIG) assert not Pin.get_from(conn) conn.close() patch() try: writer = self.tracer.writer conn = pymysql.connect(**MYSQL_CONFIG) pin = Pin.get_from(conn) assert pin pin.clone(service=self.TEST_SERVICE, tracer=self.tracer).onto(conn) assert not conn._closed cursor = conn.cursor() cursor.execute("SELECT 1") rows = cursor.fetchall() eq_(len(rows), 1) spans = writer.pop() eq_(len(spans), 1) span = spans[0] eq_(span.service, self.TEST_SERVICE) eq_(span.name, 'pymysql.query') eq_(span.span_type, 'sql') eq_(span.error, 0) meta = {} meta.update(self.DB_INFO) assert_dict_issuperset(span.meta, meta) finally: unpatch() # assert we finish unpatched conn = pymysql.connect(**MYSQL_CONFIG) assert not Pin.get_from(conn) conn.close() patch()
def test_patch_unpatch(self): unpatch() # assert we start unpatched conn = pymysql.connect(**MYSQL_CONFIG) assert not Pin.get_from(conn) conn.close() patch() try: conn = pymysql.connect(**MYSQL_CONFIG) pin = Pin.get_from(conn) assert pin pin.clone(tracer=self.tracer).onto(conn) assert not conn._closed cursor = conn.cursor() cursor.execute("SELECT 1") rows = cursor.fetchall() assert len(rows) == 1 spans = self.pop_spans() assert len(spans) == 1 span = spans[0] assert span.service == "pymysql" assert span.name == "pymysql.query" assert span.span_type == "sql" assert span.error == 0 assert span.get_metric("out.port") == MYSQL_CONFIG.get("port") meta = {} meta.update(self.DB_INFO) assert_dict_issuperset(span.meta, meta) finally: unpatch() # assert we finish unpatched conn = pymysql.connect(**MYSQL_CONFIG) assert not Pin.get_from(conn) conn.close() patch()
def test_meta_override(self): r = self.r pin = Pin.get_from(r) if pin: pin.clone(tags={"cheese": "camembert"}).onto(r) r.get("cheese") spans = self.get_spans() assert len(spans) == 1 span = spans[0] assert span.service == "redis" assert "cheese" in span.meta and span.meta["cheese"] == "camembert"
def _get_conn_tracer(self): if not self.conn: self.conn = self._connect_with_kwargs() self.conn.ping() # Ensure that the default pin is there, with its default value pin = Pin.get_from(self.conn) assert pin # Customize the service # we have to apply it on the existing one since new one won't inherit `app` pin.clone(tracer=self.tracer).onto(self.conn) return self.conn, self.tracer
def test_meta_override(self): r, tracer = self.get_redis_and_tracer() pin = Pin.get_from(r) if pin: pin.clone(tags={'cheese': 'camembert'}).onto(r) r.get('cheese') spans = tracer.writer.pop() eq_(len(spans), 1) span = spans[0] eq_(span.service, self.TEST_SERVICE) ok_('cheese' in span.meta and span.meta['cheese'] == 'camembert')
def test_meta_override(self): r = self.r pin = Pin.get_from(r) if pin: pin.clone(tags={'cheese': 'camembert'}).onto(r) r.get('cheese') spans = self.get_spans() eq_(len(spans), 1) span = spans[0] eq_(span.service, self.TEST_SERVICE) ok_('cheese' in span.meta and span.meta['cheese'] == 'camembert')
def test_meta_override(self): r = self.r pin = Pin.get_from(r) if pin: pin.clone(tags={'cheese': 'camembert'}).onto(r) r.get('cheese') spans = self.get_spans() assert len(spans) == 1 span = spans[0] assert span.service == self.TEST_SERVICE assert 'cheese' in span.meta and span.meta['cheese'] == 'camembert'
def _get_conn_tracer(self): if not self.conn: self.conn = pymysql.connect(**MYSQL_CONFIG) assert not self.conn._closed # Ensure that the default pin is there, with its default value pin = Pin.get_from(self.conn) assert pin # Customize the service # we have to apply it on the existing one since new one won't inherit `app` pin.clone(tracer=self.tracer).onto(self.conn) return self.conn, self.tracer
def test_patch_unpatch(self): tracer = get_dummy_tracer() writer = tracer.writer # Test patch idempotence patch() patch() service = 'fo' conn = yield from aiopg.connect(**POSTGRES_CONFIG) Pin.get_from(conn).clone(service=service, tracer=tracer).onto(conn) yield from (yield from conn.cursor()).execute('select \'blah\'') conn.close() spans = writer.pop() assert spans, spans eq_(len(spans), 1) # Test unpatch unpatch() conn = yield from aiopg.connect(**POSTGRES_CONFIG) yield from (yield from conn.cursor()).execute('select \'blah\'') conn.close() spans = writer.pop() assert not spans, spans # Test patch again patch() conn = yield from aiopg.connect(**POSTGRES_CONFIG) Pin.get_from(conn).clone(service=service, tracer=tracer).onto(conn) yield from (yield from conn.cursor()).execute('select \'blah\'') conn.close() spans = writer.pop() assert spans, spans eq_(len(spans), 1)
def test_sqlite(self): # ensure we can trace multiple services without stomping services = ['db', 'another'] for service in services: db = sqlite3.connect(':memory:') pin = Pin.get_from(db) assert pin pin.clone(service=service, tracer=self.tracer).onto(db) # Ensure we can run a query and it's correctly traced q = 'select * from sqlite_master' start = time.time() cursor = db.execute(q) self.assertIsInstance(cursor, TracedSQLiteCursor) rows = cursor.fetchall() end = time.time() assert not rows self.assert_structure( dict(name='sqlite.query', span_type='sql', resource=q, service=service, error=0), ) root = self.get_root_span() assert_is_measured(root) self.assertIsNone(root.get_tag('sql.query')) assert start <= root.start <= end assert root.duration <= end - start self.reset() # run a query with an error and ensure all is well q = 'select * from some_non_existant_table' try: db.execute(q) except Exception: pass else: assert 0, 'should have an error' self.assert_structure( dict(name='sqlite.query', span_type='sql', resource=q, service=service, error=1), ) root = self.get_root_span() assert_is_measured(root) self.assertIsNone(root.get_tag('sql.query')) self.assertIsNotNone(root.get_tag(errors.ERROR_STACK)) self.assertIn('OperationalError', root.get_tag(errors.ERROR_TYPE)) self.assertIn('no such table', root.get_tag(errors.ERROR_MSG)) self.reset()
def test_patch_unpatch(self): tracer = get_dummy_tracer() writer = tracer.writer # Test patch idempotence patch() patch() db = sqlite3.connect(":memory:") pin = Pin.get_from(db) assert pin pin.clone(tracer=tracer).onto(db) db.cursor().execute("select 'blah'").fetchall() spans = writer.pop() assert spans, spans eq_(len(spans), 1) # Test unpatch unpatch() db = sqlite3.connect(":memory:") db.cursor().execute("select 'blah'").fetchall() spans = writer.pop() assert not spans, spans # Test patch again patch() db = sqlite3.connect(":memory:") pin = Pin.get_from(db) assert pin pin.clone(tracer=tracer).onto(db) db.cursor().execute("select 'blah'").fetchall() spans = writer.pop() assert spans, spans eq_(len(spans), 1)
def test_patch_unpatch(self): # Test patch idempotence patch() patch() service = 'fo' conn = yield from aiopg.connect(**POSTGRES_CONFIG) Pin.get_from(conn).clone(service=service, tracer=self.tracer).onto(conn) yield from (yield from conn.cursor()).execute('select \'blah\'') conn.close() spans = self.pop_spans() assert spans, spans assert len(spans) == 1 # Test unpatch unpatch() conn = yield from aiopg.connect(**POSTGRES_CONFIG) yield from (yield from conn.cursor()).execute('select \'blah\'') conn.close() spans = self.pop_spans() assert not spans, spans # Test patch again patch() conn = yield from aiopg.connect(**POSTGRES_CONFIG) Pin.get_from(conn).clone(service=service, tracer=self.tracer).onto(conn) yield from (yield from conn.cursor()).execute('select \'blah\'') conn.close() spans = self.pop_spans() assert spans, spans assert len(spans) == 1
def patch_django(tracer): # Patch Django and override tracer to be our test tracer pin = Pin.get_from(django) original_tracer = pin.tracer Pin.override(django, tracer=tracer) # Yield to our test yield # Reset the tracer pinned to Django and unpatch # DEV: unable to properly unpatch and reload django app with each test # unpatch() Pin.override(django, tracer=original_tracer)
def test_patch_unpatch(self): tracer = get_dummy_tracer() writer = tracer.writer url = "%s:%s" % (cfg["host"], cfg["port"]) # Test patch idempotence patch() patch() client = pylibmc.Client([url]) Pin.get_from(client).clone( service=self.TEST_SERVICE, tracer=tracer).onto(client) client.set("a", 1) spans = writer.pop() assert spans, spans eq_(len(spans), 1) # Test unpatch unpatch() client = pylibmc.Client([url]) client.set("a", 1) spans = writer.pop() assert not spans, spans # Test patch again patch() client = pylibmc.Client([url]) Pin(service=self.TEST_SERVICE, tracer=tracer).onto(client) client.set("a", 1) spans = writer.pop() assert spans, spans eq_(len(spans), 1)
def test_connect_factory(self): tracer = get_dummy_tracer() services = ["db", "another"] for service in services: conn, _ = self._get_conn_and_tracer() Pin.get_from(conn).clone(service=service, tracer=tracer).onto(conn) self.assert_conn_is_traced(tracer, conn, service) # ensure we have the service types service_meta = tracer.writer.pop_services() expected = { "db": { "app": "postgres", "app_type": "db" }, "another": { "app": "postgres", "app_type": "db" }, } eq_(service_meta, expected)
def test_user_pin_override(self): conn, tracer = self._get_conn_tracer() pin = Pin.get_from(conn) pin.clone(service="pin-svc", tracer=self.tracer).onto(conn) cursor = conn.cursor() cursor.execute("SELECT 1") rows = cursor.fetchall() assert len(rows) == 1 spans = tracer.writer.pop() assert len(spans) == 1 span = spans[0] assert span.service == "pin-svc"
def _get_conn_tracer(self): if not self.conn: tracer = get_dummy_tracer() self.conn = self._connect_with_kwargs() self.conn.ping() # Ensure that the default pin is there, with its default value pin = Pin.get_from(self.conn) assert pin assert pin.service == 'mysql' # Customize the service # we have to apply it on the existing one since new one won't inherit `app` pin.clone(service=self.TEST_SERVICE, tracer=tracer).onto(self.conn) return self.conn, tracer
def test_patch_unpatch(self): tracer = get_dummy_tracer() writer = tracer.writer # Test patch idempotence patch() patch() service = "fo" conn = psycopg2.connect(**POSTGRES_CONFIG) Pin.get_from(conn).clone(service=service, tracer=tracer).onto(conn) conn.cursor().execute("select 'blah'") spans = writer.pop() assert spans, spans eq_(len(spans), 1) # Test unpatch unpatch() conn = psycopg2.connect(**POSTGRES_CONFIG) conn.cursor().execute("select 'blah'") spans = writer.pop() assert not spans, spans # Test patch again patch() conn = psycopg2.connect(**POSTGRES_CONFIG) Pin.get_from(conn).clone(service=service, tracer=tracer).onto(conn) conn.cursor().execute("select 'blah'") spans = writer.pop() assert spans, spans eq_(len(spans), 1)
def _trace_method(self, method, resource, extra_tags, *args, **kwargs): pin = Pin.get_from(self) if not pin or not pin.enabled(): return method(*args, **kwargs) service = pin.service with pin.tracer.trace(self._self_datadog_name, service=service, resource=resource) as s: s.span_type = sql.TYPE s.set_tags(pin.tags) s.set_tags(extra_tags) try: return method(*args, **kwargs) finally: s.set_metric("db.rowcount", self.rowcount)