def test_cache_add_without_arguments(self): # initialize the dummy writer writer = DummyWriter() tracer = Tracer() tracer.writer = writer # create the TracedCache instance for a Flask app Cache = get_traced_cache(tracer, service=self.SERVICE) app = Flask(__name__) cache = Cache(app, config={"CACHE_TYPE": "simple"}) # make a wrong call with assert_raises(TypeError) as ex: cache.add() # ensure that the error is not caused by our tracer ok_("add()" in ex.exception.args[0]) ok_("argument" in ex.exception.args[0]) spans = writer.pop() # an error trace must be sent eq_(len(spans), 1) span = spans[0] eq_(span.service, self.SERVICE) eq_(span.resource, "add") eq_(span.name, "flask_cache.cmd") eq_(span.span_type, "cache") eq_(span.error, 1)
def test_tracer_wrap_factory_nested(): # it should use a wrap_factory if defined even in nested tracing writer = DummyWriter() tracer = Tracer() tracer.writer = writer def wrap_executor(tracer, fn, args, kwargs, span_name=None, service=None, resource=None, span_type=None): with tracer.trace('wrap.overwrite') as span: span.set_tag('args', args) span.set_tag('kwargs', kwargs) return fn(*args, **kwargs) @tracer.wrap() def wrapped_function(param, kw_param=None): eq_(42, param) eq_(42, kw_param) # set the custom wrap factory after the wrapper has been called tracer.configure(wrap_executor=wrap_executor) # call the function expecting that the custom tracing wrapper is used with tracer.trace('wrap.parent', service='webserver'): wrapped_function(42, kw_param=42) eq_(writer.spans[0].name, 'wrap.parent') eq_(writer.spans[0].service, 'webserver') eq_(writer.spans[1].name, 'wrap.overwrite') eq_(writer.spans[1].service, 'webserver') eq_(writer.spans[1].get_tag('args'), '(42,)') eq_(writer.spans[1].get_tag('kwargs'), '{\'kw_param\': 42}')
def test_redis_cache_tracing_with_a_wrong_connection(self): # initialize the dummy writer writer = DummyWriter() tracer = Tracer() tracer.writer = writer # create the TracedCache instance for a Flask app Cache = get_traced_cache(tracer, service=self.SERVICE) app = Flask(__name__) config = { "CACHE_TYPE": "redis", "CACHE_REDIS_PORT": 2230, "CACHE_REDIS_HOST": "127.0.0.1" } cache = Cache(app, config=config) # use a wrong redis connection with assert_raises(ConnectionError) as ex: cache.get(u"á_complex_operation") print(ex.exception) # ensure that the error is not caused by our tracer ok_("127.0.0.1:2230. Connection refused." in ex.exception.args[0]) spans = writer.pop() # an error trace must be sent eq_(len(spans), 1) span = spans[0] eq_(span.service, self.SERVICE) eq_(span.resource, "get") eq_(span.name, "flask_cache.cmd") eq_(span.span_type, "cache") eq_(span.meta[CACHE_BACKEND], "redis") eq_(span.meta[net.TARGET_HOST], '127.0.0.1') eq_(span.meta[net.TARGET_PORT], '2230') eq_(span.error, 1)
def test_memcached_cache_tracing_with_a_wrong_connection(self): # initialize the dummy writer writer = DummyWriter() tracer = Tracer() tracer.writer = writer # create the TracedCache instance for a Flask app Cache = get_traced_cache(tracer, service=self.SERVICE) app = Flask(__name__) config = { "CACHE_TYPE": "memcached", "CACHE_MEMCACHED_SERVERS": ['localhost:2230'], } cache = Cache(app, config=config) # use a wrong memcached connection try: cache.get(u"á_complex_operation") except Exception: pass # ensure that the error is not caused by our tracer spans = writer.pop() eq_(len(spans), 1) span = spans[0] eq_(span.service, self.SERVICE) eq_(span.resource, "get") eq_(span.name, "flask_cache.cmd") eq_(span.span_type, "cache") eq_(span.meta[CACHE_BACKEND], "memcached") eq_(span.meta[net.TARGET_HOST], 'localhost') eq_(span.meta[net.TARGET_PORT], '2230')
def test_simple_cache_add(self): # initialize the dummy writer writer = DummyWriter() tracer = Tracer() tracer.writer = writer # create the TracedCache instance for a Flask app Cache = get_traced_cache(tracer, service=self.SERVICE) app = Flask(__name__) cache = Cache(app, config={"CACHE_TYPE": "simple"}) cache.add(u"á_complex_number", 50) spans = writer.pop() eq_(len(spans), 1) span = spans[0] eq_(span.service, self.SERVICE) eq_(span.resource, "add") eq_(span.name, "flask_cache.cmd") eq_(span.span_type, "cache") eq_(span.error, 0) expected_meta = { "flask_cache.key": u"á_complex_number", "flask_cache.backend": "simple", } assert_dict_issuperset(span.meta, expected_meta)
def test_simple_cache_set_many(self): # initialize the dummy writer writer = DummyWriter() tracer = Tracer() tracer.writer = writer # create the TracedCache instance for a Flask app Cache = get_traced_cache(tracer, service=self.SERVICE) app = Flask(__name__) cache = Cache(app, config={"CACHE_TYPE": "simple"}) cache.set_many({ 'first_complex_op': 10, 'second_complex_op': 20, }) spans = writer.pop() eq_(len(spans), 1) span = spans[0] eq_(span.service, self.SERVICE) eq_(span.resource, "set_many") eq_(span.name, "flask_cache.cmd") eq_(span.span_type, "cache") eq_(span.error, 0) eq_(span.meta["flask_cache.backend"], "simple") ok_("first_complex_op" in span.meta["flask_cache.key"]) ok_("second_complex_op" in span.meta["flask_cache.key"])
def test_tracer_wrap_class(): writer = DummyWriter() tracer = Tracer() tracer.writer = writer class Foo(object): @staticmethod @tracer.wrap() def s(): return 1 @classmethod @tracer.wrap() def c(cls): return 2 @tracer.wrap() def i(cls): return 3 f = Foo() eq_(f.s(), 1) eq_(f.c(), 2) eq_(f.i(), 3) spans = writer.pop() eq_(len(spans), 3) names = [s.name for s in spans] # FIXME[matt] include the class name here. eq_(sorted(names), sorted(["tests.test_tracer.%s" % n for n in ["s", "c", "i"]]))
def test_tracer_pid(): writer = DummyWriter() tracer = Tracer() tracer.writer = writer with tracer.trace("root") as root_span: with tracer.trace("child") as child_span: time.sleep(0.05) eq_(root_span.get_tag(system.PID), str(getpid())) # Root span should contain the pid of the current process eq_(child_span.get_tag(system.PID), None) # Child span should not contain a pid tag
def test_configure_keeps_api_hostname_and_port(self): tracer = Tracer() # use real tracer with real api eq_('localhost', tracer.writer.api.hostname) eq_(8126, tracer.writer.api.port) tracer.configure(hostname='127.0.0.1', port=8127) eq_('127.0.0.1', tracer.writer.api.hostname) eq_(8127, tracer.writer.api.port) tracer.configure(priority_sampling=True) eq_('127.0.0.1', tracer.writer.api.hostname) eq_(8127, tracer.writer.api.port)
def test_tracer_wrap_default_name(): writer = DummyWriter() tracer = Tracer() tracer.writer = writer @tracer.wrap() def f(): pass f() eq_(writer.spans[0].name, 'tests.test_tracer.f')
def test_tracer_wrap_exception(): writer = DummyWriter() tracer = Tracer() tracer.writer = writer @tracer.wrap() def f(): raise Exception('bim') assert_raises(Exception, f) eq_(len(writer.spans), 1) eq_(writer.spans[0].error, 1)
def test_tracer(): # add some dummy tracing code. writer = DummyWriter() tracer = Tracer() tracer.writer = writer sleep = 0.05 def _mix(): with tracer.trace("cake.mix"): time.sleep(sleep) def _bake(): with tracer.trace("cake.bake"): time.sleep(sleep) def _make_cake(): with tracer.trace("cake.make") as span: span.service = "baker" span.resource = "cake" _mix() _bake() # let's run it and make sure all is well. assert not writer.spans _make_cake() spans = writer.pop() assert spans, "%s" % spans eq_(len(spans), 3) spans_by_name = {s.name:s for s in spans} eq_(len(spans_by_name), 3) make = spans_by_name["cake.make"] assert make.span_id assert make.parent_id is None assert make.trace_id for other in ["cake.mix", "cake.bake"]: s = spans_by_name[other] eq_(s.parent_id, make.span_id) eq_(s.trace_id, make.trace_id) eq_(s.service, make.service) # ensure it inherits the service eq_(s.resource, s.name) # ensure when we don't set a resource, it's there. # do it again and make sure it has new trace ids _make_cake() spans = writer.pop() for s in spans: assert s.trace_id != make.trace_id
def test_tracer_wrap_multiple_calls(): # Make sure that we create a new span each time the function is called writer = DummyWriter() tracer = Tracer() tracer.writer = writer @tracer.wrap() def f(): pass f() f() spans = writer.pop() eq_(len(spans), 2) assert spans[0].span_id != spans[1].span_id
def test_unserializable_span_with_finish(): try: import numpy as np except ImportError: raise SkipTest("numpy not installed") # a weird case where manually calling finish with an unserializable # span was causing an loop of serialization. writer = DummyWriter() tracer = Tracer() tracer.writer = writer with tracer.trace("parent") as span: span.metrics['as'] = np.int64(1) # circumvent the data checks span.finish()
def test_tracer_vars(): writer = DummyWriter() tracer = Tracer() tracer.writer = writer # explicit vars s = tracer.trace("a", service="s", resource="r", span_type="t") eq_(s.service, "s") eq_(s.resource, "r") eq_(s.span_type, "t") s.finish() # defaults s = tracer.trace("a") eq_(s.service, None) eq_(s.resource, "a") # inherits eq_(s.span_type, None)
def setUp(self): """ Create a tracer with running workers, while spying the ``_put()`` method to keep trace of triggered API calls. """ # create a new tracer self.tracer = Tracer() # spy the send() method self.api = self.tracer.writer.api self.api._put = mock.Mock(self.api._put, wraps=self.api._put)
def test_tracer_wrap(): writer = DummyWriter() tracer = Tracer() tracer.writer = writer @tracer.wrap('decorated_function', service='s', resource='r', span_type='t') def f(tag_name, tag_value): # make sure we can still set tags span = tracer.current_span() span.set_tag(tag_name, tag_value) f('a', 'b') spans = writer.pop() eq_(len(spans), 1) s = spans[0] eq_(s.name, 'decorated_function') eq_(s.service, 's') eq_(s.resource, 'r') eq_(s.span_type, 't') eq_(s.to_dict()['meta']['a'], 'b')
def test_tracer_wrap_span_nesting(): # Make sure that nested spans have the correct parents writer = DummyWriter() tracer = Tracer() tracer.writer = writer @tracer.wrap('inner') def inner(): pass @tracer.wrap('outer') def outer(): with tracer.trace('mid'): inner() outer() spans = writer.pop() eq_(len(spans), 3) # sift through the list so we're not dependent on span ordering within the # writer for span in spans: if span.name == 'outer': outer_span = span elif span.name == 'mid': mid_span = span elif span.name == 'inner': inner_span = span else: assert False, 'unknown span found' # should never get here assert outer_span assert mid_span assert inner_span eq_(outer_span.parent_id, None) eq_(mid_span.parent_id, outer_span.span_id) eq_(inner_span.parent_id, mid_span.span_id)
def test_tracer_global_tags(): writer = DummyWriter() tracer = Tracer() tracer.writer = writer s1 = tracer.trace('brie') s1.finish() assert not s1.get_tag('env') assert not s1.get_tag('other') tracer.set_tags({'env': 'prod'}) s2 = tracer.trace('camembert') s2.finish() assert s2.get_tag('env') == 'prod' assert not s2.get_tag('other') tracer.set_tags({'env': 'staging', 'other': 'tag'}) s3 = tracer.trace('gruyere') s3.finish() assert s3.get_tag('env') == 'staging' assert s3.get_tag('other') == 'tag'
def test_tracer_disabled_mem_leak(): # ensure that if the tracer is disabled, we still remove things from the # span buffer upon finishing. writer = DummyWriter() tracer = Tracer() tracer.writer = writer tracer.enabled = False s1 = tracer.trace("foo") s1.finish() p1 = tracer.current_span() s2 = tracer.trace("bar") assert not s2._parent, s2._parent s2.finish() assert not p1, p1
def test_tracer_disabled(): # add some dummy tracing code. writer = DummyWriter() tracer = Tracer() tracer.writer = writer tracer.enabled = True with tracer.trace("foo") as s: s.set_tag("a", "b") assert writer.pop() tracer.enabled = False with tracer.trace("foo") as s: s.set_tag("a", "b") assert not writer.pop()
from django.apps import apps from django.test import TestCase # project from ddtrace.tracer import Tracer from ddtrace.contrib.django.conf import settings from ddtrace.contrib.django.db import unpatch_db from ddtrace.contrib.django.cache import unpatch_cache from ddtrace.contrib.django.templates import unpatch_template from ddtrace.contrib.django.middleware import remove_exception_middleware, remove_trace_middleware # testing from ...test_tracer import DummyWriter # testing tracer tracer = Tracer() tracer.writer = DummyWriter() class DjangoTraceTestCase(TestCase): """ Base class that provides an internal tracer according to given Datadog settings. This class ensures that the tracer spans are properly reset after each run. The tracer is available in the ``self.tracer`` attribute. """ def setUp(self): # assign the default tracer self.tracer = settings.TRACER # empty the tracer spans from previous operations # such as database creation queries
def get_dummy_tracer(): tracer = Tracer() tracer.writer = DummyWriter() return tracer
class TestWorkers(TestCase): """ Ensures that a workers interacts correctly with the main thread. These are part of integration tests so real calls are triggered. """ def _decode(self, payload): """ Helper function that decodes data based on the given Encoder. """ if isinstance(self.api._encoder, JSONEncoder): return json.loads(payload) elif isinstance(self.api._encoder, MsgpackEncoder): return msgpack.unpackb(payload, encoding='utf-8') def setUp(self): """ Create a tracer with running workers, while spying the ``_put()`` method to keep trace of triggered API calls. """ # create a new tracer self.tracer = Tracer() # spy the send() method self.api = self.tracer.writer.api self.api._put = mock.Mock(self.api._put, wraps=self.api._put) def tearDown(self): """ Stop running worker """ self.tracer.writer._worker.stop() def _wait_thread_flush(self): """ Helper that waits for the thread flush """ self.tracer.writer._worker.stop() self.tracer.writer._worker.join() def _get_endpoint_payload(self, calls, endpoint): """ Helper to retrieve the endpoint call from a concurrent trace or service call. """ for call, _ in calls: if endpoint in call[0]: return call[0], self._decode(call[1]) return None, None def test_worker_single_trace(self): # create a trace block and send it using the transport system tracer = self.tracer tracer.trace('client.testing').finish() # one send is expected self._wait_thread_flush() eq_(self.api._put.call_count, 1) # check and retrieve the right call endpoint, payload = self._get_endpoint_payload(self.api._put.call_args_list, '/v0.3/traces') eq_(endpoint, '/v0.3/traces') eq_(len(payload), 1) eq_(len(payload[0]), 1) eq_(payload[0][0]['name'], 'client.testing') def test_worker_multiple_traces(self): # make a single send() if multiple traces are created before the flush interval tracer = self.tracer tracer.trace('client.testing').finish() tracer.trace('client.testing').finish() # one send is expected self._wait_thread_flush() eq_(self.api._put.call_count, 1) # check and retrieve the right call endpoint, payload = self._get_endpoint_payload(self.api._put.call_args_list, '/v0.3/traces') eq_(endpoint, '/v0.3/traces') eq_(len(payload), 2) eq_(len(payload[0]), 1) eq_(len(payload[1]), 1) eq_(payload[0][0]['name'], 'client.testing') eq_(payload[1][0]['name'], 'client.testing') def test_worker_single_trace_multiple_spans(self): # make a single send() if a single trace with multiple spans is created before the flush tracer = self.tracer parent = tracer.trace('client.testing') child = tracer.trace('client.testing').finish() parent.finish() # one send is expected self._wait_thread_flush() eq_(self.api._put.call_count, 1) # check and retrieve the right call endpoint, payload = self._get_endpoint_payload(self.api._put.call_args_list, '/v0.3/traces') eq_(endpoint, '/v0.3/traces') eq_(len(payload), 1) eq_(len(payload[0]), 2) eq_(payload[0][0]['name'], 'client.testing') eq_(payload[0][1]['name'], 'client.testing') def test_worker_single_service(self): # service must be sent correctly tracer = self.tracer tracer.set_service_info('client.service', 'django', 'web') tracer.trace('client.testing').finish() # expect a call for traces and services self._wait_thread_flush() eq_(self.api._put.call_count, 2) # check and retrieve the right call endpoint, payload = self._get_endpoint_payload(self.api._put.call_args_list, '/v0.3/services') eq_(endpoint, '/v0.3/services') eq_(len(payload.keys()), 1) eq_(payload['client.service'], {'app': 'django', 'app_type': 'web'}) def test_worker_service_called_multiple_times(self): # service must be sent correctly tracer = self.tracer tracer.set_service_info('backend', 'django', 'web') tracer.set_service_info('database', 'postgres', 'db') tracer.trace('client.testing').finish() # expect a call for traces and services self._wait_thread_flush() eq_(self.api._put.call_count, 2) # check and retrieve the right call endpoint, payload = self._get_endpoint_payload(self.api._put.call_args_list, '/v0.3/services') eq_(endpoint, '/v0.3/services') eq_(len(payload.keys()), 2) eq_(payload['backend'], {'app': 'django', 'app_type': 'web'}) eq_(payload['database'], {'app': 'postgres', 'app_type': 'db'}) def test_worker_http_error_logging(self): # Tests the logging http error logic tracer = self.tracer self.tracer.writer.api = FlawedAPI(Tracer.DEFAULT_HOSTNAME, Tracer.DEFAULT_PORT) tracer.trace('client.testing').finish() log = logging.getLogger("ddtrace.writer") log_handler = MockedLogHandler(level='DEBUG') log.addHandler(log_handler) # sleeping 1.01 secs to prevent writer from exiting before logging time.sleep(1.01) self._wait_thread_flush() assert tracer.writer._worker._last_error_ts < time.time() logged_errors = log_handler.messages['error'] eq_(len(logged_errors), 1) ok_('failed_to_send traces to Agent: HTTP error status 400, reason Bad Request, message Content-Type:' in logged_errors[0]) def test_worker_filter_request(self): self.tracer.configure(settings={FILTERS_KEY: [FilterRequestsOnUrl(r'http://example\.com/health')]}) # spy the send() method self.api = self.tracer.writer.api self.api._put = mock.Mock(self.api._put, wraps=self.api._put) span = self.tracer.trace('testing.filteredurl') span.set_tag(http.URL, 'http://example.com/health') span.finish() span = self.tracer.trace('testing.nonfilteredurl') span.set_tag(http.URL, 'http://example.com/api/resource') span.finish() self._wait_thread_flush() # Only the second trace should have been sent eq_(self.api._put.call_count, 1) # check and retrieve the right call endpoint, payload = self._get_endpoint_payload(self.api._put.call_args_list, '/v0.3/traces') eq_(endpoint, '/v0.3/traces') eq_(len(payload), 1) eq_(payload[0][0]['name'], 'testing.nonfilteredurl')
class TestWorkers(TestCase): """ Ensures that a workers interacts correctly with the main thread. These are part of integration tests so real calls are triggered. """ def _decode(self, payload): """ Helper function that decodes data based on the given Encoder. """ if isinstance(self.api._encoder, JSONEncoder): return json.loads(payload) elif isinstance(self.api._encoder, MsgpackEncoder): return msgpack.unpackb(payload, encoding='utf-8') def setUp(self): """ Create a tracer with running workers, while spying the ``_put()`` method to keep trace of triggered API calls. """ # create a new tracer self.tracer = Tracer() # spy the send() method self.api = self.tracer.writer.api self.api._put = mock.Mock(self.api._put, wraps=self.api._put) def tearDown(self): """ Stop running worker """ self._wait_thread_flush() def _wait_thread_flush(self): """ Helper that waits for the thread flush """ self.tracer.writer.stop() self.tracer.writer.join(None) def _get_endpoint_payload(self, calls, endpoint): """ Helper to retrieve the endpoint call from a concurrent trace or service call. """ for call, _ in calls: if endpoint in call[0]: return call[0], self._decode(call[1]) return None, None @skipUnless(os.environ.get( 'TEST_DATADOG_INTEGRATION_UDS', False ), 'You should have a running trace agent on a socket and set TEST_DATADOG_INTEGRATION_UDS=1 env variable' ) def test_worker_single_trace_uds(self): self.tracer.configure(uds_path='/tmp/ddagent/trace.sock') # Write a first trace so we get a _worker self.tracer.trace('client.testing').finish() worker = self.tracer.writer worker._log_error_status = mock.Mock( worker._log_error_status, wraps=worker._log_error_status, ) self.tracer.trace('client.testing').finish() # one send is expected self._wait_thread_flush() # Check that no error was logged assert worker._log_error_status.call_count == 0 def test_worker_single_trace_uds_wrong_socket_path(self): self.tracer.configure(uds_path='/tmp/ddagent/nosockethere') # Write a first trace so we get a _worker self.tracer.trace('client.testing').finish() worker = self.tracer.writer worker._log_error_status = mock.Mock( worker._log_error_status, wraps=worker._log_error_status, ) self.tracer.trace('client.testing').finish() # one send is expected self._wait_thread_flush() # Check that no error was logged assert worker._log_error_status.call_count == 1 def test_worker_single_trace(self): # create a trace block and send it using the transport system tracer = self.tracer tracer.trace('client.testing').finish() # one send is expected self._wait_thread_flush() assert self.api._put.call_count == 1 # check and retrieve the right call endpoint, payload = self._get_endpoint_payload( self.api._put.call_args_list, '/v0.4/traces') assert endpoint == '/v0.4/traces' assert len(payload) == 1 assert len(payload[0]) == 1 assert payload[0][0]['name'] == 'client.testing' # DEV: If we can make the writer flushing deterministic for the case of tests, then we can re-enable this @skip( 'Writer flush intervals are impossible to time correctly to make this test not flaky' ) def test_worker_multiple_traces(self): # make a single send() if multiple traces are created before the flush interval tracer = self.tracer tracer.trace('client.testing').finish() tracer.trace('client.testing').finish() # one send is expected self._wait_thread_flush() assert self.api._put.call_count == 1 # check and retrieve the right call endpoint, payload = self._get_endpoint_payload( self.api._put.call_args_list, '/v0.4/traces') assert endpoint == '/v0.4/traces' assert len(payload) == 2 assert len(payload[0]) == 1 assert len(payload[1]) == 1 assert payload[0][0]['name'] == 'client.testing' assert payload[1][0]['name'] == 'client.testing' def test_worker_single_trace_multiple_spans(self): # make a single send() if a single trace with multiple spans is created before the flush tracer = self.tracer parent = tracer.trace('client.testing') tracer.trace('client.testing').finish() parent.finish() # one send is expected self._wait_thread_flush() assert self.api._put.call_count == 1 # check and retrieve the right call endpoint, payload = self._get_endpoint_payload( self.api._put.call_args_list, '/v0.4/traces') assert endpoint == '/v0.4/traces' assert len(payload) == 1 assert len(payload[0]) == 2 assert payload[0][0]['name'] == 'client.testing' assert payload[0][1]['name'] == 'client.testing' def test_worker_http_error_logging(self): # Tests the logging http error logic tracer = self.tracer self.tracer.writer.api = FlawedAPI(Tracer.DEFAULT_HOSTNAME, Tracer.DEFAULT_PORT) tracer.trace('client.testing').finish() log = logging.getLogger('ddtrace.internal.writer') log_handler = MockedLogHandler(level='DEBUG') log.addHandler(log_handler) self._wait_thread_flush() assert tracer.writer._last_error_ts < monotonic.monotonic() logged_errors = log_handler.messages['error'] assert len(logged_errors) == 1 assert 'Failed to send traces to Datadog Agent at http://localhost:8126: ' \ 'HTTP error status 400, reason Bad Request, message Content-Type:' \ in logged_errors[0] def test_worker_filter_request(self): self.tracer.configure(settings={ FILTERS_KEY: [FilterRequestsOnUrl(r'http://example\.com/health')] }) # spy the send() method self.api = self.tracer.writer.api self.api._put = mock.Mock(self.api._put, wraps=self.api._put) span = self.tracer.trace('testing.filteredurl') span.set_tag(http.URL, 'http://example.com/health') span.finish() span = self.tracer.trace('testing.nonfilteredurl') span.set_tag(http.URL, 'http://example.com/api/resource') span.finish() self._wait_thread_flush() # Only the second trace should have been sent assert self.api._put.call_count == 1 # check and retrieve the right call endpoint, payload = self._get_endpoint_payload( self.api._put.call_args_list, '/v0.4/traces') assert endpoint == '/v0.4/traces' assert len(payload) == 1 assert payload[0][0]['name'] == 'testing.nonfilteredurl'