def test_simple_limit(self): writer = DummyWriter() tracer = Tracer() tracer.writer = writer with patch_time() as fake_time: tps = 5 tracer.sampler = ThroughputSampler(tps) for _ in range(10): s = tracer.trace("whatever") s.finish() traces = writer.pop() got = len(traces) expected = 10 assert got == expected, \ "Wrong number of traces sampled, %s instead of %s" % (got, expected) # Wait enough to reset fake_time.sleep(tracer.sampler.BUFFER_DURATION + 1) for _ in range(100): s = tracer.trace("whatever") s.finish() traces = writer.pop() got = len(traces) expected = tps * tracer.sampler.BUFFER_DURATION assert got == expected, \ "Wrong number of traces sampled, %s instead of %s" % (got, expected)
def test_tracer_pid(): writer = DummyWriter() tracer = Tracer() tracer.writer = writer with tracer.trace("root") as root_span: with tracer.trace("child") as child_span: time.sleep(0.05) eq_(root_span.get_tag(system.PID), str(getpid())) # Root span should contain the pid of the current process eq_(child_span.get_tag(system.PID), None) # Child span should not contain a pid tag
def test_manual_keep_then_drop(): tracer = Tracer() tracer.writer = DummyWriter() # Test changing the value before finish. with tracer.trace("asdf") as root: with tracer.trace("child") as child: child.set_tag(MANUAL_KEEP_KEY) root.set_tag(MANUAL_DROP_KEY) spans = tracer.writer.pop() assert spans[0].metrics[SAMPLING_PRIORITY_KEY] is priority.USER_REJECT
def test_tracer_disabled_mem_leak(): # ensure that if the tracer is disabled, we still remove things from the # span buffer upon finishing. writer = DummyWriter() tracer = Tracer() tracer.writer = writer tracer.enabled = False s1 = tracer.trace("foo") s1.finish() p1 = tracer.current_span() s2 = tracer.trace("bar") assert not s2._parent, s2._parent s2.finish() assert not p1, p1
def test_tracer_disabled(): # add some dummy tracing code. writer = DummyWriter() tracer = Tracer() tracer.writer = writer tracer.enabled = True with tracer.trace("foo") as s: s.set_tag("a", "b") assert writer.pop() tracer.enabled = False with tracer.trace("foo") as s: s.set_tag("a", "b") assert not writer.pop()
def test_get_report_hostname_default(get_hostname): get_hostname.return_value = "test-hostname" tracer = Tracer() tracer.writer = DummyWriter() with override_global_config(dict(report_hostname=False)): with tracer.trace("span"): with tracer.trace("child"): pass spans = tracer.writer.pop() root = spans[0] child = spans[1] assert root.get_tag(HOSTNAME_KEY) is None assert child.get_tag(HOSTNAME_KEY) is None
def test_tracer_wrap_factory_nested(): # it should use a wrap_factory if defined even in nested tracing writer = DummyWriter() tracer = Tracer() tracer.writer = writer def wrap_executor(tracer, fn, args, kwargs, span_name=None, service=None, resource=None, span_type=None): with tracer.trace('wrap.overwrite') as span: span.set_tag('args', args) span.set_tag('kwargs', kwargs) return fn(*args, **kwargs) @tracer.wrap() def wrapped_function(param, kw_param=None): eq_(42, param) eq_(42, kw_param) # set the custom wrap factory after the wrapper has been called tracer.configure(wrap_executor=wrap_executor) # call the function expecting that the custom tracing wrapper is used with tracer.trace('wrap.parent', service='webserver'): wrapped_function(42, kw_param=42) eq_(writer.spans[0].name, 'wrap.parent') eq_(writer.spans[0].service, 'webserver') eq_(writer.spans[1].name, 'wrap.overwrite') eq_(writer.spans[1].service, 'webserver') eq_(writer.spans[1].get_tag('args'), '(42,)') eq_(writer.spans[1].get_tag('kwargs'), '{\'kw_param\': 42}')
def test_long_run(self): writer = DummyWriter() tracer = Tracer() tracer.writer = writer # Test a big matrix of combinaisons # Ensure to have total_time >> BUFFER_DURATION to reduce edge effects for tps in [10, 23, 15, 31]: for (traces_per_s, total_time) in [(80, 23), (75, 66), (1000, 77)]: with patch_time() as fake_time: # We do tons of operations in this test, do not let the time slowly shift fake_time.set_delta(0) tracer.sampler = ThroughputSampler(tps) for _ in range(total_time): for _ in range(traces_per_s): s = tracer.trace("whatever") s.finish() fake_time.sleep(1) traces = writer.pop() # The current sampler implementation can introduce an error of up to # `tps * BUFFER_DURATION` traces at initialization (since the sampler starts empty) got = len(traces) expected = tps * total_time error_delta = tps * tracer.sampler.BUFFER_DURATION assert abs(got - expected) <= error_delta, \ "Wrong number of traces sampled, %s instead of %s (error_delta > %s)" % (got, expected, error_delta)
def test_manual_drop(): tracer = Tracer() tracer.writer = DummyWriter() # On a root span with tracer.trace("asdf") as s: s.set_tag(MANUAL_DROP_KEY) spans = tracer.writer.pop() assert spans[0].metrics[SAMPLING_PRIORITY_KEY] is priority.USER_REJECT # On a child span with tracer.trace("asdf"): with tracer.trace("child") as s: s.set_tag(MANUAL_DROP_KEY) spans = tracer.writer.pop() assert spans[0].metrics[SAMPLING_PRIORITY_KEY] is priority.USER_REJECT
def test_tracer_trace_across_fork(): """ When a trace is started in a parent process and a child process is spawned The trace should be continued in the child process """ tracer = Tracer() tracer.writer = DummyWriter() def task(tracer, q): tracer.writer = DummyWriter() with tracer.trace("child"): pass spans = tracer.writer.pop() q.put( [dict(trace_id=s.trace_id, parent_id=s.parent_id) for s in spans]) # Assert tracer in a new process correctly recreates the writer q = multiprocessing.Queue() with tracer.trace("parent") as parent: p = multiprocessing.Process(target=task, args=(tracer, q)) p.start() p.join() children = q.get() assert len(children) == 1 (child, ) = children assert parent.trace_id == child["trace_id"] assert child["parent_id"] == parent.span_id
def test_sample_rate_deviation(self): writer = DummyWriter() for sample_rate in [0.1, 0.25, 0.5, 1]: tracer = Tracer() tracer.writer = writer sample_rate = 0.5 tracer.sampler = RateSampler(sample_rate) random.seed(1234) iterations = int(2e4) for i in range(iterations): span = tracer.trace(i) span.finish() samples = writer.pop() # We must have at least 1 sample, check that it has its sample rate properly assigned assert samples[0].get_metric(SAMPLE_RATE_METRIC_KEY) == 0.5 # Less than 1% deviation when "enough" iterations (arbitrary, just check if it converges) deviation = abs(len(samples) - (iterations * sample_rate)) / (iterations * sample_rate) assert deviation < 0.01, "Deviation too high %f with sample_rate %f" % ( deviation, sample_rate)
class TestTornadoSettings(TornadoTestCase): """ Ensure that Tornado web application properly configures the given tracer. """ def get_app(self): # Override with a real tracer self.tracer = Tracer() super(TestTornadoSettings, self).get_app() def get_settings(self): # update tracer settings return { "datadog_trace": { "default_service": "custom-tornado", "tags": { "env": "production", "debug": "false" }, "enabled": False, "agent_hostname": "dd-agent.service.consul", "agent_port": 8126, "settings": { "FILTERS": [ TestFilter(), ], }, }, } def test_tracer_is_properly_configured(self): # the tracer must be properly configured assert self.tracer._tags.get("env") == "production" assert self.tracer._tags.get("debug") == "false" assert self.tracer.enabled is False assert self.tracer.agent_trace_url == "http://dd-agent.service.consul:8126" writer = DummyWriter() self.tracer.configure(enabled=True, writer=writer) with self.tracer.trace("keep"): pass spans = writer.pop() assert len(spans) == 1 with self.tracer.trace("drop"): pass spans = writer.pop() assert len(spans) == 0
def test_tracer_vars(): writer = DummyWriter() tracer = Tracer() tracer.writer = writer # explicit vars s = tracer.trace("a", service="s", resource="r", span_type="t") eq_(s.service, "s") eq_(s.resource, "r") eq_(s.span_type, "t") s.finish() # defaults s = tracer.trace("a") eq_(s.service, None) eq_(s.resource, "a") # inherits eq_(s.span_type, None)
def test_tracer_global_tags(): writer = DummyWriter() tracer = Tracer() tracer.writer = writer s1 = tracer.trace('brie') s1.finish() assert not s1.meta tracer.set_tags({'env': 'prod'}) s2 = tracer.trace('camembert') s2.finish() assert s2.meta == {'env': 'prod'} tracer.set_tags({'env': 'staging', 'other': 'tag'}) s3 = tracer.trace('gruyere') s3.finish() assert s3.meta == {'env': 'staging', 'other': 'tag'}
def test_tracer_global_tags(): writer = DummyWriter() tracer = Tracer() tracer.writer = writer s1 = tracer.trace('brie') s1.finish() assert not s1.get_tag('env') assert not s1.get_tag('other') tracer.set_tags({'env': 'prod'}) s2 = tracer.trace('camembert') s2.finish() assert s2.get_tag('env') == 'prod' assert not s2.get_tag('other') tracer.set_tags({'env': 'staging', 'other': 'tag'}) s3 = tracer.trace('gruyere') s3.finish() assert s3.get_tag('env') == 'staging' assert s3.get_tag('other') == 'tag'
def test_unserializable_span_with_finish(): try: import numpy as np except ImportError: raise SkipTest("numpy not installed") # a weird case where manually calling finish with an unserializable # span was causing an loop of serialization. writer = DummyWriter() tracer = Tracer() tracer.writer = writer with tracer.trace("parent") as span: span.metrics['as'] = np.int64(1) # circumvent the data checks span.finish()
class TestWorkers(TestCase): """ Ensures that a workers interacts correctly with the main thread. These are part of integration tests so real calls are triggered. """ def _decode(self, payload): """ Helper function that decodes data based on the given Encoder. """ if isinstance(self.api._encoder, JSONEncoder): return json.loads(payload) elif isinstance(self.api._encoder, MsgpackEncoder): return msgpack.unpackb(payload, encoding='utf-8') def setUp(self): """ Create a tracer with running workers, while spying the ``_put()`` method to keep trace of triggered API calls. """ # create a new tracer self.tracer = Tracer() # spy the send() method self.api = self.tracer.writer.api self.api._put = mock.Mock(self.api._put, wraps=self.api._put) def tearDown(self): """ Stop running worker """ self.tracer.writer._worker.stop() def _wait_thread_flush(self): """ Helper that waits for the thread flush """ self.tracer.writer._worker.stop() self.tracer.writer._worker.join() def _get_endpoint_payload(self, calls, endpoint): """ Helper to retrieve the endpoint call from a concurrent trace or service call. """ for call, _ in calls: if endpoint in call[0]: return call[0], self._decode(call[1]) return None, None def test_worker_single_trace(self): # create a trace block and send it using the transport system tracer = self.tracer tracer.trace('client.testing').finish() # one send is expected self._wait_thread_flush() eq_(self.api._put.call_count, 1) # check and retrieve the right call endpoint, payload = self._get_endpoint_payload( self.api._put.call_args_list, '/v0.3/traces') eq_(endpoint, '/v0.3/traces') eq_(len(payload), 1) eq_(len(payload[0]), 1) eq_(payload[0][0]['name'], 'client.testing') # DEV: If we can make the writer flushing deterministic for the case of tests, then we can re-enable this @skip( 'Writer flush intervals are impossible to time correctly to make this test not flaky' ) def test_worker_multiple_traces(self): # make a single send() if multiple traces are created before the flush interval tracer = self.tracer tracer.trace('client.testing').finish() tracer.trace('client.testing').finish() # one send is expected self._wait_thread_flush() eq_(self.api._put.call_count, 1) # check and retrieve the right call endpoint, payload = self._get_endpoint_payload( self.api._put.call_args_list, '/v0.3/traces') eq_(endpoint, '/v0.3/traces') eq_(len(payload), 2) eq_(len(payload[0]), 1) eq_(len(payload[1]), 1) eq_(payload[0][0]['name'], 'client.testing') eq_(payload[1][0]['name'], 'client.testing') def test_worker_single_trace_multiple_spans(self): # make a single send() if a single trace with multiple spans is created before the flush tracer = self.tracer parent = tracer.trace('client.testing') child = tracer.trace('client.testing').finish() parent.finish() # one send is expected self._wait_thread_flush() eq_(self.api._put.call_count, 1) # check and retrieve the right call endpoint, payload = self._get_endpoint_payload( self.api._put.call_args_list, '/v0.3/traces') eq_(endpoint, '/v0.3/traces') eq_(len(payload), 1) eq_(len(payload[0]), 2) eq_(payload[0][0]['name'], 'client.testing') eq_(payload[0][1]['name'], 'client.testing') def test_worker_single_service(self): # service must be sent correctly tracer = self.tracer tracer.set_service_info('client.service', 'django', 'web') tracer.trace('client.testing').finish() # expect a call for traces and services self._wait_thread_flush() eq_(self.api._put.call_count, 2) # check and retrieve the right call endpoint, payload = self._get_endpoint_payload( self.api._put.call_args_list, '/v0.3/services') eq_(endpoint, '/v0.3/services') eq_(len(payload.keys()), 1) eq_(payload['client.service'], {'app': 'django', 'app_type': 'web'}) def test_worker_service_called_multiple_times(self): # service must be sent correctly tracer = self.tracer tracer.set_service_info('backend', 'django', 'web') tracer.set_service_info('database', 'postgres', 'db') tracer.trace('client.testing').finish() # expect a call for traces and services self._wait_thread_flush() eq_(self.api._put.call_count, 2) # check and retrieve the right call endpoint, payload = self._get_endpoint_payload( self.api._put.call_args_list, '/v0.3/services') eq_(endpoint, '/v0.3/services') eq_(len(payload.keys()), 2) eq_(payload['backend'], {'app': 'django', 'app_type': 'web'}) eq_(payload['database'], {'app': 'postgres', 'app_type': 'db'}) def test_worker_http_error_logging(self): # Tests the logging http error logic tracer = self.tracer self.tracer.writer.api = FlawedAPI(Tracer.DEFAULT_HOSTNAME, Tracer.DEFAULT_PORT) tracer.trace('client.testing').finish() log = logging.getLogger("ddtrace.writer") log_handler = MockedLogHandler(level='DEBUG') log.addHandler(log_handler) # sleeping 1.01 secs to prevent writer from exiting before logging time.sleep(1.01) self._wait_thread_flush() assert tracer.writer._worker._last_error_ts < time.time() logged_errors = log_handler.messages['error'] eq_(len(logged_errors), 1) ok_('failed_to_send traces to Agent: HTTP error status 400, reason Bad Request, message Content-Type:' in logged_errors[0]) def test_worker_filter_request(self): self.tracer.configure(settings={ FILTERS_KEY: [FilterRequestsOnUrl(r'http://example\.com/health')] }) # spy the send() method self.api = self.tracer.writer.api self.api._put = mock.Mock(self.api._put, wraps=self.api._put) span = self.tracer.trace('testing.filteredurl') span.set_tag(http.URL, 'http://example.com/health') span.finish() span = self.tracer.trace('testing.nonfilteredurl') span.set_tag(http.URL, 'http://example.com/api/resource') span.finish() self._wait_thread_flush() # Only the second trace should have been sent eq_(self.api._put.call_count, 1) # check and retrieve the right call endpoint, payload = self._get_endpoint_payload( self.api._put.call_args_list, '/v0.3/traces') eq_(endpoint, '/v0.3/traces') eq_(len(payload), 1) eq_(payload[0][0]['name'], 'testing.nonfilteredurl')
class TestWorkers(TestCase): """ Ensures that a workers interacts correctly with the main thread. These are part of integration tests so real calls are triggered. """ def _decode(self, payload): """ Helper function that decodes data based on the given Encoder. """ if isinstance(self.api._encoder, JSONEncoder): return json.loads(payload) elif isinstance(self.api._encoder, MsgpackEncoder): return msgpack.unpackb(payload, encoding='utf-8') def setUp(self): """ Create a tracer with running workers, while spying the ``_put()`` method to keep trace of triggered API calls. """ # create a new tracer self.tracer = Tracer() # spy the send() method self.api = self.tracer.writer.api self.api._put = mock.Mock(self.api._put, wraps=self.api._put) def tearDown(self): """ Stop running worker """ self.tracer.writer._worker.stop() def _wait_thread_flush(self): """ Helper that waits for the thread flush """ self.tracer.writer._worker.stop() self.tracer.writer._worker.join() def _get_endpoint_payload(self, calls, endpoint): """ Helper to retrieve the endpoint call from a concurrent trace or service call. """ for call, _ in calls: if endpoint in call[0]: return call[0], self._decode(call[1]) return None, None def test_worker_single_trace(self): # create a trace block and send it using the transport system tracer = self.tracer tracer.trace('client.testing').finish() # one send is expected self._wait_thread_flush() eq_(self.api._put.call_count, 1) # check and retrieve the right call endpoint, payload = self._get_endpoint_payload(self.api._put.call_args_list, '/v0.3/traces') eq_(endpoint, '/v0.3/traces') eq_(len(payload), 1) eq_(len(payload[0]), 1) eq_(payload[0][0]['name'], 'client.testing') def test_worker_multiple_traces(self): # make a single send() if multiple traces are created before the flush interval tracer = self.tracer tracer.trace('client.testing').finish() tracer.trace('client.testing').finish() # one send is expected self._wait_thread_flush() eq_(self.api._put.call_count, 1) # check and retrieve the right call endpoint, payload = self._get_endpoint_payload(self.api._put.call_args_list, '/v0.3/traces') eq_(endpoint, '/v0.3/traces') eq_(len(payload), 2) eq_(len(payload[0]), 1) eq_(len(payload[1]), 1) eq_(payload[0][0]['name'], 'client.testing') eq_(payload[1][0]['name'], 'client.testing') def test_worker_single_trace_multiple_spans(self): # make a single send() if a single trace with multiple spans is created before the flush tracer = self.tracer parent = tracer.trace('client.testing') child = tracer.trace('client.testing').finish() parent.finish() # one send is expected self._wait_thread_flush() eq_(self.api._put.call_count, 1) # check and retrieve the right call endpoint, payload = self._get_endpoint_payload(self.api._put.call_args_list, '/v0.3/traces') eq_(endpoint, '/v0.3/traces') eq_(len(payload), 1) eq_(len(payload[0]), 2) eq_(payload[0][0]['name'], 'client.testing') eq_(payload[0][1]['name'], 'client.testing') def test_worker_single_service(self): # service must be sent correctly tracer = self.tracer tracer.set_service_info('client.service', 'django', 'web') tracer.trace('client.testing').finish() # expect a call for traces and services self._wait_thread_flush() eq_(self.api._put.call_count, 2) # check and retrieve the right call endpoint, payload = self._get_endpoint_payload(self.api._put.call_args_list, '/v0.3/services') eq_(endpoint, '/v0.3/services') eq_(len(payload.keys()), 1) eq_(payload['client.service'], {'app': 'django', 'app_type': 'web'}) def test_worker_service_called_multiple_times(self): # service must be sent correctly tracer = self.tracer tracer.set_service_info('backend', 'django', 'web') tracer.set_service_info('database', 'postgres', 'db') tracer.trace('client.testing').finish() # expect a call for traces and services self._wait_thread_flush() eq_(self.api._put.call_count, 2) # check and retrieve the right call endpoint, payload = self._get_endpoint_payload(self.api._put.call_args_list, '/v0.3/services') eq_(endpoint, '/v0.3/services') eq_(len(payload.keys()), 2) eq_(payload['backend'], {'app': 'django', 'app_type': 'web'}) eq_(payload['database'], {'app': 'postgres', 'app_type': 'db'}) def test_worker_http_error_logging(self): # Tests the logging http error logic tracer = self.tracer self.tracer.writer.api = FlawedAPI(Tracer.DEFAULT_HOSTNAME, Tracer.DEFAULT_PORT) tracer.trace('client.testing').finish() log = logging.getLogger("ddtrace.writer") log_handler = MockedLogHandler(level='DEBUG') log.addHandler(log_handler) # sleeping 1.01 secs to prevent writer from exiting before logging time.sleep(1.01) self._wait_thread_flush() assert tracer.writer._worker._last_error_ts < time.time() logged_errors = log_handler.messages['error'] eq_(len(logged_errors), 1) ok_('failed_to_send traces to Agent: HTTP error status 400, reason Bad Request, message Content-Type:' in logged_errors[0]) def test_worker_filter_request(self): self.tracer.configure(settings={FILTERS_KEY: [FilterRequestsOnUrl(r'http://example\.com/health')]}) # spy the send() method self.api = self.tracer.writer.api self.api._put = mock.Mock(self.api._put, wraps=self.api._put) span = self.tracer.trace('testing.filteredurl') span.set_tag(http.URL, 'http://example.com/health') span.finish() span = self.tracer.trace('testing.nonfilteredurl') span.set_tag(http.URL, 'http://example.com/api/resource') span.finish() self._wait_thread_flush() # Only the second trace should have been sent eq_(self.api._put.call_count, 1) # check and retrieve the right call endpoint, payload = self._get_endpoint_payload(self.api._put.call_args_list, '/v0.3/traces') eq_(endpoint, '/v0.3/traces') eq_(len(payload), 1) eq_(payload[0][0]['name'], 'testing.nonfilteredurl')
class TestWorkers(TestCase): """ Ensures that a workers interacts correctly with the main thread. These are part of integration tests so real calls are triggered. """ def _decode(self, payload): """ Helper function that decodes data based on the given Encoder. """ if isinstance(self.api._encoder, JSONEncoder): return json.loads(payload) elif isinstance(self.api._encoder, MsgpackEncoder): return msgpack.unpackb(payload, encoding='utf-8') def setUp(self): """ Create a tracer with running workers, while spying the ``_put()`` method to keep trace of triggered API calls. """ # create a new tracer self.tracer = Tracer() # spy the send() method self.api = self.tracer.writer.api self.api._put = mock.Mock(self.api._put, wraps=self.api._put) def tearDown(self): """ Stop running worker """ self._wait_thread_flush() def _wait_thread_flush(self): """ Helper that waits for the thread flush """ self.tracer.writer.stop() self.tracer.writer.join(None) def _get_endpoint_payload(self, calls, endpoint): """ Helper to retrieve the endpoint call from a concurrent trace or service call. """ for call, _ in calls: if endpoint in call[0]: return call[0], self._decode(call[1]) return None, None @skipUnless(os.environ.get( 'TEST_DATADOG_INTEGRATION_UDS', False ), 'You should have a running trace agent on a socket and set TEST_DATADOG_INTEGRATION_UDS=1 env variable' ) def test_worker_single_trace_uds(self): self.tracer.configure(uds_path='/tmp/ddagent/trace.sock') # Write a first trace so we get a _worker self.tracer.trace('client.testing').finish() worker = self.tracer.writer worker._log_error_status = mock.Mock( worker._log_error_status, wraps=worker._log_error_status, ) self.tracer.trace('client.testing').finish() # one send is expected self._wait_thread_flush() # Check that no error was logged assert worker._log_error_status.call_count == 0 def test_worker_single_trace_uds_wrong_socket_path(self): self.tracer.configure(uds_path='/tmp/ddagent/nosockethere') # Write a first trace so we get a _worker self.tracer.trace('client.testing').finish() worker = self.tracer.writer worker._log_error_status = mock.Mock( worker._log_error_status, wraps=worker._log_error_status, ) self.tracer.trace('client.testing').finish() # one send is expected self._wait_thread_flush() # Check that no error was logged assert worker._log_error_status.call_count == 1 def test_worker_single_trace(self): # create a trace block and send it using the transport system tracer = self.tracer tracer.trace('client.testing').finish() # one send is expected self._wait_thread_flush() assert self.api._put.call_count == 1 # check and retrieve the right call endpoint, payload = self._get_endpoint_payload( self.api._put.call_args_list, '/v0.4/traces') assert endpoint == '/v0.4/traces' assert len(payload) == 1 assert len(payload[0]) == 1 assert payload[0][0]['name'] == 'client.testing' # DEV: If we can make the writer flushing deterministic for the case of tests, then we can re-enable this @skip( 'Writer flush intervals are impossible to time correctly to make this test not flaky' ) def test_worker_multiple_traces(self): # make a single send() if multiple traces are created before the flush interval tracer = self.tracer tracer.trace('client.testing').finish() tracer.trace('client.testing').finish() # one send is expected self._wait_thread_flush() assert self.api._put.call_count == 1 # check and retrieve the right call endpoint, payload = self._get_endpoint_payload( self.api._put.call_args_list, '/v0.4/traces') assert endpoint == '/v0.4/traces' assert len(payload) == 2 assert len(payload[0]) == 1 assert len(payload[1]) == 1 assert payload[0][0]['name'] == 'client.testing' assert payload[1][0]['name'] == 'client.testing' def test_worker_single_trace_multiple_spans(self): # make a single send() if a single trace with multiple spans is created before the flush tracer = self.tracer parent = tracer.trace('client.testing') tracer.trace('client.testing').finish() parent.finish() # one send is expected self._wait_thread_flush() assert self.api._put.call_count == 1 # check and retrieve the right call endpoint, payload = self._get_endpoint_payload( self.api._put.call_args_list, '/v0.4/traces') assert endpoint == '/v0.4/traces' assert len(payload) == 1 assert len(payload[0]) == 2 assert payload[0][0]['name'] == 'client.testing' assert payload[0][1]['name'] == 'client.testing' def test_worker_http_error_logging(self): # Tests the logging http error logic tracer = self.tracer self.tracer.writer.api = FlawedAPI(Tracer.DEFAULT_HOSTNAME, Tracer.DEFAULT_PORT) tracer.trace('client.testing').finish() log = logging.getLogger('ddtrace.internal.writer') log_handler = MockedLogHandler(level='DEBUG') log.addHandler(log_handler) self._wait_thread_flush() assert tracer.writer._last_error_ts < time.time() logged_errors = log_handler.messages['error'] assert len(logged_errors) == 1 assert 'Failed to send traces to Datadog Agent at localhost:8126: ' \ 'HTTP error status 400, reason Bad Request, message Content-Type:' \ in logged_errors[0] def test_worker_filter_request(self): self.tracer.configure(settings={ FILTERS_KEY: [FilterRequestsOnUrl(r'http://example\.com/health')] }) # spy the send() method self.api = self.tracer.writer.api self.api._put = mock.Mock(self.api._put, wraps=self.api._put) span = self.tracer.trace('testing.filteredurl') span.set_tag(http.URL, 'http://example.com/health') span.finish() span = self.tracer.trace('testing.nonfilteredurl') span.set_tag(http.URL, 'http://example.com/api/resource') span.finish() self._wait_thread_flush() # Only the second trace should have been sent assert self.api._put.call_count == 1 # check and retrieve the right call endpoint, payload = self._get_endpoint_payload( self.api._put.call_args_list, '/v0.4/traces') assert endpoint == '/v0.4/traces' assert len(payload) == 1 assert payload[0][0]['name'] == 'testing.nonfilteredurl'
class TestWorkers(TestCase): """ Ensures that a workers interacts correctly with the main thread. These are part of integration tests so real calls are triggered. """ def setUp(self): """ Create a tracer with running workers, while spying the ``_put()`` method to keep trace of triggered API calls. """ # create a new tracer self.tracer = Tracer() # spy the send() method self.api = self.tracer.writer.api self.api._put = mock.Mock(self.api._put, wraps=self.api._put) def tearDown(self): """ Stop running worker """ self._wait_thread_flush() def _wait_thread_flush(self): """ Helper that waits for the thread flush """ self.tracer.writer.flush_queue() self.tracer.writer.stop() self.tracer.writer.join(None) def _get_endpoint_payload(self, calls, endpoint): """ Helper to retrieve the endpoint call from a concurrent trace or service call. """ for call, _ in calls: if endpoint in call[0]: return call[0], self.api._encoder._decode(call[1]) return None, None @pytest.mark.skipif(AGENT_VERSION == "v5" or AGENT_VERSION == "testagent", reason="Agent doesn't support UDS") def test_worker_single_trace_uds(self): self.tracer.configure(uds_path="/tmp/ddagent/trace.sock") # Write a first trace so we get a _worker self.tracer.trace("client.testing").finish() worker = self.tracer.writer worker._log_error_status = mock.Mock( worker._log_error_status, wraps=worker._log_error_status, ) self.tracer.trace("client.testing").finish() # one send is expected self._wait_thread_flush() # Check that no error was logged assert worker._log_error_status.call_count == 0 def test_worker_single_trace_uds_wrong_socket_path(self): self.tracer.configure(uds_path="/tmp/ddagent/nosockethere") # Write a first trace so we get a _worker self.tracer.trace("client.testing").finish() worker = self.tracer.writer worker._log_error_status = mock.Mock( worker._log_error_status, wraps=worker._log_error_status, ) self.tracer.trace("client.testing").finish() # one send is expected self._wait_thread_flush() # Check that no error was logged assert worker._log_error_status.call_count == 1 def test_worker_single_trace(self): # create a trace block and send it using the transport system tracer = self.tracer tracer.trace("client.testing").finish() # one send is expected self._wait_thread_flush() assert self.api._put.call_count == 1 # check and retrieve the right call endpoint, payload = self._get_endpoint_payload( self.api._put.call_args_list, "/v0.4/traces") assert endpoint == "/v0.4/traces" assert len(payload) == 1 assert len(payload[0]) == 1 assert payload[0][0][b"name"] == b"client.testing" # DEV: If we can make the writer flushing deterministic for the case of tests, then we can re-enable this @skip( "Writer flush intervals are impossible to time correctly to make this test not flaky" ) def test_worker_multiple_traces(self): # make a single send() if multiple traces are created before the flush interval tracer = self.tracer tracer.trace("client.testing").finish() tracer.trace("client.testing").finish() # one send is expected self._wait_thread_flush() assert self.api._put.call_count == 1 # check and retrieve the right call endpoint, payload = self._get_endpoint_payload( self.api._put.call_args_list, "/v0.4/traces") assert endpoint == "/v0.4/traces" assert len(payload) == 2 assert len(payload[0]) == 1 assert len(payload[1]) == 1 assert payload[0][0][b"name"] == b"client.testing" assert payload[1][0][b"name"] == b"client.testing" def test_worker_single_trace_multiple_spans(self): # make a single send() if a single trace with multiple spans is created before the flush tracer = self.tracer parent = tracer.trace("client.testing") tracer.trace("client.testing").finish() parent.finish() # one send is expected self._wait_thread_flush() assert self.api._put.call_count == 1 # check and retrieve the right call endpoint, payload = self._get_endpoint_payload( self.api._put.call_args_list, "/v0.4/traces") assert endpoint == "/v0.4/traces" assert len(payload) == 1 assert len(payload[0]) == 2 assert payload[0][0][b"name"] == b"client.testing" assert payload[0][1][b"name"] == b"client.testing" @pytest.mark.skipif(AGENT_VERSION == "testagent", reason="Test agent doesn't support json API.") def test_worker_http_error_logging(self): # Tests the logging http error logic tracer = self.tracer self.tracer.writer.api = FlawedAPI(Tracer.DEFAULT_HOSTNAME, Tracer.DEFAULT_PORT) tracer.trace("client.testing").finish() log = logging.getLogger("ddtrace.internal.writer") log_handler = MockedLogHandler(level="DEBUG") log.addHandler(log_handler) self._wait_thread_flush() assert tracer.writer._last_error_ts < monotonic() logged_errors = log_handler.messages["error"] assert len(logged_errors) == 1 assert ( "Failed to send traces to Datadog Agent at http://localhost:8126: " "HTTP error status 400, reason Bad Request, message Content-Type:" in logged_errors[0]) def test_worker_filter_request(self): self.tracer.configure(settings={ FILTERS_KEY: [FilterRequestsOnUrl(r"http://example\.com/health")] }) # spy the send() method span = self.tracer.trace("testing.filteredurl") span.set_tag(http.URL, "http://example.com/health") span.finish() span = self.tracer.trace("testing.nonfilteredurl") span.set_tag(http.URL, "http://example.com/api/resource") span.finish() self._wait_thread_flush() # Only the second trace should have been sent assert self.api._put.call_count == 1 # check and retrieve the right call endpoint, payload = self._get_endpoint_payload( self.api._put.call_args_list, "/v0.4/traces") assert endpoint == "/v0.4/traces" assert len(payload) == 1 assert payload[0][0][b"name"] == b"testing.nonfilteredurl"