Example #1
0
    def test_metrics_multi(self):
        statsd = mock.Mock()
        writer = AgentWriter(dogstatsd=statsd,
                             report_metrics=True,
                             hostname="asdf",
                             port=1234)
        for i in range(10):
            writer.write([
                Span(tracer=None,
                     name="name",
                     trace_id=i,
                     span_id=j,
                     parent_id=j - 1 or None) for j in range(5)
            ])
        writer.flush_queue()
        statsd.increment.assert_has_calls([
            mock.call("datadog.tracer.http.requests"),
        ])
        statsd.distribution.assert_has_calls(
            [
                mock.call("datadog.tracer.buffer.accepted.traces", 10,
                          tags=[]),
                mock.call("datadog.tracer.buffer.accepted.spans", 50, tags=[]),
                mock.call("datadog.tracer.http.requests", 1, tags=[]),
                mock.call("datadog.tracer.http.errors", 1, tags=["type:err"]),
                mock.call(
                    "datadog.tracer.http.dropped.bytes", AnyInt(), tags=[]),
            ],
            any_order=True,
        )

        statsd.reset_mock()

        for i in range(10):
            writer.write([
                Span(tracer=None,
                     name="name",
                     trace_id=i,
                     span_id=j,
                     parent_id=j - 1 or None) for j in range(5)
            ])
        writer.stop()
        writer.join()

        statsd.increment.assert_has_calls([
            mock.call("datadog.tracer.http.requests"),
        ])
        statsd.distribution.assert_has_calls(
            [
                mock.call("datadog.tracer.buffer.accepted.traces", 10,
                          tags=[]),
                mock.call("datadog.tracer.buffer.accepted.spans", 50, tags=[]),
                mock.call("datadog.tracer.http.requests", 1, tags=[]),
                mock.call("datadog.tracer.http.errors", 1, tags=["type:err"]),
                mock.call(
                    "datadog.tracer.http.dropped.bytes", AnyInt(), tags=[]),
            ],
            any_order=True,
        )
Example #2
0
def test_flush_connection_reset(endpoint_test_reset_server):
    writer = AgentWriter(agent_url="http://%s:%s" % (_HOST, _RESET_PORT))
    if PY3:
        exc_types = (httplib.BadStatusLine, ConnectionResetError)
    else:
        exc_types = (httplib.BadStatusLine,)
    with pytest.raises(exc_types):
        writer._buffer.put(b"foobar")
        writer.flush_queue(raise_exc=True)
Example #3
0
def test_flush_connection_timeout_connect():
    writer = AgentWriter(agent_url="http://%s:%s" % (_HOST, 2019))
    if PY3:
        exc_type = OSError
    else:
        exc_type = socket.error
    with pytest.raises(exc_type):
        writer._buffer.put(b"foobar")
        writer.flush_queue(raise_exc=True)
Example #4
0
def test_writer_reuse_connections():
    # Ensure connection is not reused
    writer = AgentWriter(agent_url="http://localhost:9126",
                         reuse_connections=True)
    # Do an initial flush to get a connection
    writer.flush_queue()
    assert writer._conn is None
    writer.flush_queue()
    assert writer._conn is None
Example #5
0
def test_writer_reuse_connections_false():
    # Ensure connection is reused
    writer = AgentWriter(agent_url="http://localhost:9126",
                         reuse_connections=False)
    # Do an initial flush to get a connection
    writer.flush_queue()
    conn = writer._conn
    # And another to potentially have it reset
    writer.flush_queue()
    assert writer._conn is conn
Example #6
0
def test_flush_queue_raise():
    writer = AgentWriter(agent_url="http://dne:1234")

    # Should not raise
    writer.write([])
    writer.flush_queue(raise_exc=False)

    error = OSError if PY3 else IOError
    with pytest.raises(error):
        writer.write([])
        writer.flush_queue(raise_exc=True)
Example #7
0
def test_flush_log(caplog):
    caplog.set_level(logging.INFO)

    writer = AgentWriter(agent.get_trace_url())

    with mock.patch("ddtrace.internal.writer.log") as log:
        writer.write([])
        writer.flush_queue(raise_exc=True)
        calls = [
            mock.call(logging.DEBUG, "sent %s in %.5fs", AnyStr(), AnyFloat())
        ]
        log.log.assert_has_calls(calls)
def test_flush_log(caplog, encoding, monkeypatch):
    monkeypatch.setenv("DD_TRACE_API_VERSION", encoding)

    caplog.set_level(logging.INFO)

    writer = AgentWriter(agent.get_trace_url())

    with mock.patch("ddtrace.internal.writer.log") as log:
        writer.write([])
        writer.flush_queue(raise_exc=True)
        calls = [
            mock.call(
                logging.DEBUG,
                "sent %s in %.5fs to %s",
                AnyStr(),
                AnyFloat(),
                writer.agent_url,
            )
        ]
        log.log.assert_has_calls(calls)
Example #9
0
def test_flush_log(caplog, encoding, monkeypatch):
    monkeypatch.setenv("DD_TRACE_API_VERSION", encoding)

    caplog.set_level(logging.INFO)

    writer = AgentWriter(agent.get_trace_url())

    with mock.patch("ddtrace.internal.writer.log") as log:
        writer.write([])
        writer.flush_queue(raise_exc=True)
        # for latest agent, default to v0.3 since no priority sampler is set
        expected_encoding = "v0.3" if AGENT_VERSION == "v5" else (encoding
                                                                  or "v0.3")
        calls = [
            mock.call(
                logging.DEBUG,
                "sent %s in %.5fs to %s",
                AnyStr(),
                AnyFloat(),
                "{}/{}/traces".format(writer.agent_url, expected_encoding),
            )
        ]
        log.log.assert_has_calls(calls)
Example #10
0
def test_agent_url_path(endpoint_assert_path):
    # test without base path
    endpoint_assert_path("/v0.")
    writer = AgentWriter(agent_url="http://%s:%s/" % (_HOST, _PORT))
    writer._buffer.put(b"foobar")
    writer.flush_queue(raise_exc=True)

    # test without base path nor trailing slash
    writer = AgentWriter(agent_url="http://%s:%s" % (_HOST, _PORT))
    writer._buffer.put(b"foobar")
    writer.flush_queue(raise_exc=True)

    # test with a base path
    endpoint_assert_path("/test/v0.")
    writer = AgentWriter(agent_url="http://%s:%s/test/" % (_HOST, _PORT))
    writer._buffer.put(b"foobar")
    writer.flush_queue(raise_exc=True)
Example #11
0
def test_flush_connection_uds(endpoint_uds_server):
    writer = AgentWriter(agent_url="unix://%s" % endpoint_uds_server.server_address)
    writer._buffer.put(b"foobar")
    writer.flush_queue(raise_exc=True)
Example #12
0
def test_flush_connection_timeout(endpoint_test_timeout_server):
    writer = AgentWriter(agent_url="http://%s:%s" % (_HOST, _TIMEOUT_PORT))
    with pytest.raises(socket.timeout):
        writer._buffer.put(b"foobar")
        writer.flush_queue(raise_exc=True)
Example #13
0
    def test_keep_rate(self):
        statsd = mock.Mock()
        writer_run_periodic = mock.Mock()
        writer_put = mock.Mock()
        writer_put.return_value = Response(status=200)
        writer = AgentWriter(agent_url="http://asdf:1234", dogstatsd=statsd, report_metrics=False)
        writer.run_periodic = writer_run_periodic
        writer._put = writer_put

        traces = [
            [Span(tracer=None, name="name", trace_id=i, span_id=j, parent_id=j - 1 or None) for j in range(5)]
            for i in range(4)
        ]

        traces_too_big = [
            [Span(tracer=None, name="a" * 5000, trace_id=i, span_id=j, parent_id=j - 1 or None) for j in range(2 ** 10)]
            for i in range(4)
        ]

        # 1. We write 4 traces successfully.
        for trace in traces:
            writer.write(trace)
        writer.flush_queue()

        payload = msgpack.unpackb(writer_put.call_args.args[0])
        # No previous drops.
        assert 0.0 == writer._drop_sma.get()
        # 4 traces written.
        assert 4 == len(payload)
        # 100% of traces kept (refers to the past).
        # No traces sent before now so 100% kept.
        for trace in payload:
            assert 1.0 == trace[0]["metrics"].get(KEEP_SPANS_RATE_KEY, -1)

        # 2. We fail to write 4 traces because of size limitation.
        for trace in traces_too_big:
            writer.write(trace)
        writer.flush_queue()

        # 50% of traces were dropped historically.
        # 4 successfully written before and 4 dropped now.
        assert 0.5 == writer._drop_sma.get()
        # put not called since no new traces are available.
        writer_put.assert_called_once()

        # 3. We write 2 traces successfully.
        for trace in traces[:2]:
            writer.write(trace)
        writer.flush_queue()

        payload = msgpack.unpackb(writer_put.call_args.args[0])
        # 40% of traces were dropped historically.
        assert 0.4 == writer._drop_sma.get()
        # 2 traces written.
        assert 2 == len(payload)
        # 50% of traces kept (refers to the past).
        # We had 4 successfully written and 4 dropped.
        for trace in payload:
            assert 0.5 == trace[0]["metrics"].get(KEEP_SPANS_RATE_KEY, -1)

        # 4. We write 1 trace successfully and fail to write 3.
        writer.write(traces[0])
        for trace in traces_too_big[:3]:
            writer.write(trace)
        writer.flush_queue()

        payload = msgpack.unpackb(writer_put.call_args.args[0])
        # 50% of traces were dropped historically.
        assert 0.5 == writer._drop_sma.get()
        # 1 trace written.
        assert 1 == len(payload)
        # 60% of traces kept (refers to the past).
        # We had 4 successfully written, then 4 dropped, then 2 written.
        for trace in payload:
            assert 0.6 == trace[0]["metrics"].get(KEEP_SPANS_RATE_KEY, -1)
Example #14
0
def test_flush_connection_uds(endpoint_uds_server):
    writer = AgentWriter(agent_url="unix://%s" % endpoint_uds_server.server_address)
    writer._encoder.put([Span(None, "foobar")])
    writer.flush_queue(raise_exc=True)
Example #15
0
def collect(tracer):
    """Collect system and library information into a serializable dict."""

    # The tracer doesn't actually maintain a hostname/port, instead it stores
    # it on the possibly None writer which actually stores it on an API object.
    # Note that the tracer DOES have hostname and port attributes that it
    # sets to the defaults and ignores afterwards.
    if tracer.writer and isinstance(tracer.writer, LogWriter):
        agent_url = "AGENTLESS"
        agent_error = None
    else:
        if isinstance(tracer.writer, AgentWriter):
            writer = tracer.writer
        else:
            writer = AgentWriter()

        agent_url = writer.agent_url
        try:
            writer.write([])
            writer.flush_queue(raise_exc=True)
        except Exception as e:
            agent_error = "Agent not reachable at %s. Exception raised: %s" % (
                agent_url, str(e))
        else:
            agent_error = None

    is_venv = in_venv()

    packages_available = {
        p.project_name: p.version
        for p in pkg_resources.working_set
    }
    integration_configs = {}
    for module, enabled in ddtrace.monkey.PATCH_MODULES.items():
        # TODO: this check doesn't work in all cases... we need a mapping
        #       between the module and the library name.
        module_available = module in packages_available
        module_instrumented = module in ddtrace.monkey._PATCHED_MODULES
        module_imported = module in sys.modules

        if enabled:
            # Note that integration configs aren't added until the integration
            # module is imported. This typically occurs as a side-effect of
            # patch().
            # This also doesn't load work in all cases since we don't always
            # name the configuration entry the same as the integration module
            # name :/
            config = ddtrace.config._config.get(module, "N/A")
        else:
            config = None

        if module_available:
            integration_configs[module] = dict(
                enabled=enabled,
                instrumented=module_instrumented,
                module_available=module_available,
                module_version=packages_available[module],
                module_imported=module_imported,
                config=config,
            )
        else:
            # Use N/A here to avoid the additional clutter of an entire
            # config dictionary for a module that isn't available.
            integration_configs[module] = "N/A"

    pip_version = packages_available.get("pip", "N/A")

    return dict(
        # Timestamp UTC ISO 8601
        date=datetime.datetime.utcnow().isoformat(),
        # eg. "Linux", "Darwin"
        os_name=platform.system(),
        # eg. 12.5.0
        os_version=platform.release(),
        is_64_bit=sys.maxsize > 2**32,
        architecture=platform.architecture()[0],
        vm=platform.python_implementation(),
        version=ddtrace.__version__,
        lang="python",
        lang_version=platform.python_version(),
        pip_version=pip_version,
        in_virtual_env=is_venv,
        agent_url=agent_url,
        agent_error=agent_error,
        env=ddtrace.config.env or "",
        is_global_tracer=tracer == ddtrace.tracer,
        enabled_env_setting=os.getenv("DATADOG_TRACE_ENABLED"),
        tracer_enabled=tracer.enabled,
        sampler_type=type(tracer.sampler).__name__
        if tracer.sampler else "N/A",
        priority_sampler_type=type(tracer.priority_sampler).__name__
        if tracer.priority_sampler else "N/A",
        service=ddtrace.config.service or "",
        debug=ddtrace.tracer.log.isEnabledFor(logging.DEBUG),
        enabled_cli="ddtrace" in os.getenv("PYTHONPATH", ""),
        analytics_enabled=ddtrace.config.analytics_enabled,
        log_injection_enabled=ddtrace.config.logs_injection,
        health_metrics_enabled=ddtrace.config.health_metrics_enabled,
        dd_version=ddtrace.config.version or "",
        priority_sampling_enabled=tracer.priority_sampler is not None,
        global_tags=os.getenv("DD_TAGS", ""),
        tracer_tags=tags_to_str(tracer.tags),
        integrations=integration_configs,
    )