コード例 #1
0
    def test_parse_response_json(self, log):
        tracer = get_dummy_tracer()
        tracer.debug_logging = True

        test_cases = {
            'OK':
            dict(
                js=None,
                log=
                'Cannot parse Datadog Agent response, please make sure your Datadog Agent is up to date',
            ),
            'OK\n':
            dict(
                js=None,
                log=
                'Cannot parse Datadog Agent response, please make sure your Datadog Agent is up to date',
            ),
            'error:unsupported-endpoint':
            dict(
                js=None,
                log=
                'Unable to parse Datadog Agent JSON response: .*? \'error:unsupported-endpoint\'',
            ),
            42:
            dict(  # int as key to trigger TypeError
                js=None,
                log='Unable to parse Datadog Agent JSON response: .*? 42',
            ),
            '{}':
            dict(js={}),
            '[]':
            dict(js=[]),

            # Priority sampling "rate_by_service" response
            ('{"rate_by_service": '
             '{"service:,env:":0.5, "service:mcnulty,env:test":0.9, "service:postgres,env:test":0.6}}'):
            dict(js=dict(rate_by_service={
                'service:,env:': 0.5,
                'service:mcnulty,env:test': 0.9,
                'service:postgres,env:test': 0.6,
            }, ), ),
            ' [4,2,1] ':
            dict(js=[4, 2, 1]),
        }

        for k, v in iteritems(test_cases):
            log.reset_mock()

            r = Response.from_http_response(ResponseMock(k))
            js = r.get_json()
            assert v['js'] == js
            if 'log' in v:
                log.assert_called_once()
                msg = log.call_args[0][0] % log.call_args[0][1:]
                assert re.match(v['log'], msg), msg
コード例 #2
0
    def test_parse_response_json(self, log):
        test_cases = {
            "OK":
            dict(
                js=None,
                log=
                "Cannot parse Datadog Agent response, please make sure your Datadog Agent is up to date",
            ),
            "OK\n":
            dict(
                js=None,
                log=
                "Cannot parse Datadog Agent response, please make sure your Datadog Agent is up to date",
            ),
            "error:unsupported-endpoint":
            dict(
                js=None,
                log=
                "Unable to parse Datadog Agent JSON response: 'error:unsupported-endpoint'",
            ),
            42:
            dict(  # int as key to trigger TypeError
                js=None,
                log="Unable to parse Datadog Agent JSON response: 42",
            ),
            "{}":
            dict(js={}),
            "[]":
            dict(js=[]),
            # Priority sampling "rate_by_service" response
            ('{"rate_by_service": '
             '{"service:,env:":0.5, "service:mcnulty,env:test":0.9, "service:postgres,env:test":0.6}}'):
            dict(js=dict(rate_by_service={
                "service:,env:": 0.5,
                "service:mcnulty,env:test": 0.9,
                "service:postgres,env:test": 0.6,
            }, ), ),
            " [4,2,1] ":
            dict(js=[4, 2, 1]),
        }

        for k, v in iteritems(test_cases):
            log.reset_mock()

            r = Response.from_http_response(ResponseMock(k))
            js = r.get_json()
            assert v["js"] == js
            if "log" in v:
                log.assert_called_once()
                msg = log.call_args[0][0] % log.call_args[0][1:]
                assert re.match(v["log"], msg), msg
コード例 #3
0
 def _put(self, endpoint, data, count=0):
     conn = httplib.HTTPConnection(self.hostname, self.port)
     conn.request('HEAD', endpoint, data, self._headers)
     return Response.from_http_response(conn.getresponse())
コード例 #4
0
ファイル: test_writer.py プロジェクト: jwthomp/dd-trace-py
    def test_keep_rate(self):
        statsd = mock.Mock()
        writer_run_periodic = mock.Mock()
        writer_put = mock.Mock()
        writer_put.return_value = Response(status=200)
        writer = AgentWriter(dogstatsd=statsd, report_metrics=False, hostname="asdf", port=1234)
        writer.run_periodic = writer_run_periodic
        writer._put = writer_put

        traces = [
            [Span(tracer=None, name="name", trace_id=i, span_id=j, parent_id=j - 1 or None) for j in range(5)]
            for i in range(4)
        ]

        traces_too_big = [
            [Span(tracer=None, name="a" * 5000, trace_id=i, span_id=j, parent_id=j - 1 or None) for j in range(2 ** 10)]
            for i in range(4)
        ]

        # 1. We write 4 traces successfully.
        for trace in traces:
            writer.write(trace)
        writer.flush_queue()

        payload = msgpack.unpackb(writer_put.call_args.args[0])
        # No previous drops.
        assert 0.0 == writer._drop_sma.get()
        # 4 traces written.
        assert 4 == len(payload)
        # 100% of traces kept (refers to the past).
        # No traces sent before now so 100% kept.
        for trace in payload:
            assert 1.0 == trace[0]["metrics"].get(KEEP_SPANS_RATE_KEY, -1)

        # 2. We fail to write 4 traces because of size limitation.
        for trace in traces_too_big:
            writer.write(trace)
        writer.flush_queue()

        # 50% of traces were dropped historically.
        # 4 successfully written before and 4 dropped now.
        assert 0.5 == writer._drop_sma.get()
        # put not called since no new traces are available.
        writer_put.assert_called_once()

        # 3. We write 2 traces successfully.
        for trace in traces[:2]:
            writer.write(trace)
        writer.flush_queue()

        payload = msgpack.unpackb(writer_put.call_args.args[0])
        # 40% of traces were dropped historically.
        assert 0.4 == writer._drop_sma.get()
        # 2 traces written.
        assert 2 == len(payload)
        # 50% of traces kept (refers to the past).
        # We had 4 successfully written and 4 dropped.
        for trace in payload:
            assert 0.5 == trace[0]["metrics"].get(KEEP_SPANS_RATE_KEY, -1)

        # 4. We write 1 trace successfully and fail to write 3.
        writer.write(traces[0])
        for trace in traces_too_big[:3]:
            writer.write(trace)
        writer.flush_queue()

        payload = msgpack.unpackb(writer_put.call_args.args[0])
        # 50% of traces were dropped historically.
        assert 0.5 == writer._drop_sma.get()
        # 1 trace written.
        assert 1 == len(payload)
        # 60% of traces kept (refers to the past).
        # We had 4 successfully written, then 4 dropped, then 2 written.
        for trace in payload:
            assert 0.6 == trace[0]["metrics"].get(KEEP_SPANS_RATE_KEY, -1)