Ejemplo n.º 1
0
 def test_asbool(self):
     # ensure the value is properly cast
     eq_(asbool("True"), True)
     eq_(asbool("true"), True)
     eq_(asbool("1"), True)
     eq_(asbool("False"), False)
     eq_(asbool("false"), False)
     eq_(asbool(None), False)
     eq_(asbool(""), False)
     eq_(asbool(True), True)
     eq_(asbool(False), False)
Ejemplo n.º 2
0
def update_patched_modules():
    modules_to_patch = os.environ.get("DATADOG_PATCH_MODULES")
    if not modules_to_patch:
        return

    modules = parse_tags_str(modules_to_patch)
    for module, should_patch in modules.items():
        EXTRA_PATCHED_MODULES[module] = asbool(should_patch)
Ejemplo n.º 3
0
    def __attrs_post_init__(self):
        r = self._recorder = recorder.Recorder(
            max_events={
                # Allow to store up to 10 threads for 60 seconds at 100 Hz
                stack.StackSampleEvent:
                10 * 60 * 100,
                stack.StackExceptionSampleEvent:
                10 * 60 * 100,
                # This can generate one event every 0.1s if 100% are taken — though we take 5% by default.
                # = (60 seconds / 0.1 seconds)
                memory.MemorySampleEvent:
                int(60 / 0.1),
                # (default buffer size / interval) * export interval
                memalloc.MemoryAllocSampleEvent:
                int((memalloc.MemoryCollector._DEFAULT_MAX_EVENTS /
                     memalloc.MemoryCollector._DEFAULT_INTERVAL) * 60),
                # Do not limit the heap sample size as the number of events is relative to allocated memory anyway
                memalloc.MemoryHeapSampleEvent:
                None,
            },
            default_max_events=int(
                os.environ.get("DD_PROFILING_MAX_EVENTS",
                               recorder.Recorder._DEFAULT_MAX_EVENTS)),
        )

        if formats.asbool(os.environ.get("DD_PROFILING_MEMALLOC", "true")):
            mem_collector = memalloc.MemoryCollector(r)
        else:
            mem_collector = memory.MemoryCollector(r)

        self._collectors = [
            stack.StackCollector(r, tracer=self.tracer),
            mem_collector,
            threading.LockCollector(r, tracer=self.tracer),
        ]

        exporters = self._build_default_exporters(self.tracer, self.url,
                                                  self.tags, self.service,
                                                  self.env, self.version)

        if exporters:
            self._scheduler = scheduler.Scheduler(
                recorder=r,
                exporters=exporters,
                before_flush=self._collectors_snapshot)
Ejemplo n.º 4
0
    def __attrs_post_init__(self):
        r = recorder.Recorder(
            max_events={
                # Allow to store up to 10 threads for 60 seconds at 100 Hz
                stack.StackSampleEvent:
                10 * 60 * 100,
                stack.StackExceptionSampleEvent:
                10 * 60 * 100,
                # This can generate one event every 0.1s if 100% are taken — though we take 5% by default.
                # = (60 seconds / 0.1 seconds)
                memory.MemorySampleEvent:
                int(60 / 0.1),
                # (default buffer size / interval) * export interval
                memalloc.MemoryAllocSampleEvent:
                int((64 / 0.5) * 60),
            },
            default_max_events=int(
                os.environ.get("DD_PROFILING_MAX_EVENTS",
                               recorder.Recorder._DEFAULT_MAX_EVENTS)),
        )

        if formats.asbool(os.environ.get("DD_PROFILING_MEMALLOC", "false")):
            mem_collector = memalloc.MemoryCollector(r)
        else:
            mem_collector = memory.MemoryCollector(r)

        self._collectors = [
            stack.StackCollector(r, tracer=self.tracer),
            mem_collector,
            exceptions.UncaughtExceptionCollector(r),
            threading.LockCollector(r),
        ]

        exporters = self._build_default_exporters(self.service, self.env,
                                                  self.version)

        if exporters:
            self._scheduler = scheduler.Scheduler(recorder=r,
                                                  exporters=exporters)
Ejemplo n.º 5
0
    enabled = os.environ.get("DATADOG_TRACE_ENABLED")
    hostname = os.environ.get("DATADOG_TRACE_AGENT_HOSTNAME")
    port = os.environ.get("DATADOG_TRACE_AGENT_PORT")
    priority_sampling = os.environ.get("DATADOG_PRIORITY_SAMPLING")

    opts = {}

    if enabled and enabled.lower() == "false":
        opts["enabled"] = False
        patch = False
    if hostname:
        opts["hostname"] = hostname
    if port:
        opts["port"] = int(port)
    if priority_sampling:
        opts["priority_sampling"] = asbool(priority_sampling)

    if opts:
        tracer.configure(**opts)

    if not hasattr(sys, 'argv'):
        sys.argv = ['']

    if patch:
        update_patched_modules()
        from ddtrace import patch_all; patch_all(**EXTRA_PATCHED_MODULES) # noqa

    debug = os.environ.get("DATADOG_TRACE_DEBUG")
    if debug and debug.lower() == "true":
        tracer.debug_logging = True
Ejemplo n.º 6
0
class _ProfilerInstance(service.Service):
    """A instance of the profiler.

    Each process must manage its own instance.

    """

    # User-supplied values
    url = attr.ib(default=None)
    service = attr.ib(factory=_get_service_name)
    tags = attr.ib(factory=dict)
    env = attr.ib(factory=lambda: os.environ.get("DD_ENV"))
    version = attr.ib(factory=lambda: os.environ.get("DD_VERSION"))
    tracer = attr.ib(default=ddtrace.tracer)
    api_key = attr.ib(factory=lambda: os.environ.get("DD_API_KEY"),
                      type=Optional[str])
    agentless = attr.ib(factory=lambda: formats.asbool(
        os.environ.get("DD_PROFILING_AGENTLESS", "False")),
                        type=bool)

    _recorder = attr.ib(init=False, default=None)
    _collectors = attr.ib(init=False, default=None)
    _scheduler = attr.ib(init=False, default=None)

    ENDPOINT_TEMPLATE = "https://intake.profile.{}"

    def _build_default_exporters(self):
        # type: (...) -> List[exporter.Exporter]
        _OUTPUT_PPROF = os.environ.get("DD_PROFILING_OUTPUT_PPROF")
        if _OUTPUT_PPROF:
            return [
                file.PprofFileExporter(_OUTPUT_PPROF),
            ]

        if self.url is not None:
            endpoint = self.url
        elif self.agentless:
            LOG.warning(
                "Agentless uploading is currently for internal usage only and not officially supported. "
                "You should not enable it unless somebody at Datadog instructed you to do so."
            )
            endpoint = self.ENDPOINT_TEMPLATE.format(
                os.environ.get("DD_SITE", "datadoghq.com"))
        else:
            if isinstance(self.tracer.writer, writer.AgentWriter):
                endpoint = self.tracer.writer.agent_url
            else:
                endpoint = agent.get_trace_url()

        if self.agentless:
            endpoint_path = "/v1/input"
        else:
            # Agent mode
            # path is relative because it is appended
            # to the agent base path.
            endpoint_path = "profiling/v1/input"

        return [
            http.PprofHTTPExporter(
                service=self.service,
                env=self.env,
                tags=self.tags,
                version=self.version,
                api_key=self.api_key,
                endpoint=endpoint,
                endpoint_path=endpoint_path,
            ),
        ]

    def __attrs_post_init__(self):
        r = self._recorder = recorder.Recorder(
            max_events={
                # Allow to store up to 10 threads for 60 seconds at 100 Hz
                stack.StackSampleEvent:
                10 * 60 * 100,
                stack.StackExceptionSampleEvent:
                10 * 60 * 100,
                # (default buffer size / interval) * export interval
                memalloc.MemoryAllocSampleEvent:
                int((memalloc.MemoryCollector._DEFAULT_MAX_EVENTS /
                     memalloc.MemoryCollector._DEFAULT_INTERVAL) * 60),
                # Do not limit the heap sample size as the number of events is relative to allocated memory anyway
                memalloc.MemoryHeapSampleEvent:
                None,
            },
            default_max_events=int(
                os.environ.get("DD_PROFILING_MAX_EVENTS",
                               recorder.Recorder._DEFAULT_MAX_EVENTS)),
        )

        self._collectors = [
            stack.StackCollector(r, tracer=self.tracer),
            memalloc.MemoryCollector(r),
            threading.LockCollector(r, tracer=self.tracer),
        ]

        exporters = self._build_default_exporters()

        if exporters:
            self._scheduler = scheduler.Scheduler(
                recorder=r,
                exporters=exporters,
                before_flush=self._collectors_snapshot)

    def _collectors_snapshot(self):
        for c in self._collectors:
            try:
                snapshot = c.snapshot()
                if snapshot:
                    for events in snapshot:
                        self._recorder.push_events(events)
            except Exception:
                LOG.error("Error while snapshoting collector %r",
                          c,
                          exc_info=True)

    def copy(self):
        return self.__class__(service=self.service,
                              env=self.env,
                              version=self.version,
                              tracer=self.tracer,
                              tags=self.tags)

    def _start_service(self):  # type: ignore[override]
        # type: (...) -> None
        """Start the profiler."""
        collectors = []
        for col in self._collectors:
            try:
                col.start()
            except collector.CollectorUnavailable:
                LOG.debug("Collector %r is unavailable, disabling", col)
            except Exception:
                LOG.error("Failed to start collector %r, disabling.",
                          col,
                          exc_info=True)
            else:
                collectors.append(col)
        self._collectors = collectors

        if self._scheduler is not None:
            self._scheduler.start()

    def _stop_service(  # type: ignore[override]
            self,
            flush=True  # type: bool
    ):
        # type: (...) -> None
        """Stop the profiler.

        :param flush: Flush a last profile.
        """
        if self._scheduler is not None:
            self._scheduler.stop()
            # Wait for the export to be over: export might need collectors (e.g., for snapshot) so we can't stop
            # collectors before the possibly running flush is finished.
            self._scheduler.join()
            if flush:
                # Do not stop the collectors before flushing, they might be needed (snapshot)
                self._scheduler.flush()

        for col in reversed(self._collectors):
            try:
                col.stop()
            except service.ServiceStatusError:
                # It's possible some collector failed to start, ignore failure to stop
                pass

        for col in reversed(self._collectors):
            col.join()
Ejemplo n.º 7
0
import mariadb

from ddtrace import Pin
from ddtrace import config
from ddtrace.contrib.dbapi import TracedConnection
from ddtrace.ext import db
from ddtrace.ext import net
from ddtrace.utils.formats import asbool
from ddtrace.utils.formats import get_env
from ddtrace.utils.wrappers import unwrap
from ddtrace.vendor import wrapt

config._add(
    "mariadb",
    dict(
        trace_fetch_methods=asbool(
            get_env("mariadb", "trace_fetch_methods", default=False)),
        _default_service="mariadb",
    ),
)


def patch():
    if getattr(mariadb, "_datadog_patch", False):
        return
    setattr(mariadb, "_datadog_patch", True)
    wrapt.wrap_function_wrapper("mariadb", "connect", _connect)


def unpatch():
    if getattr(mariadb, "_datadog_patch", False):
        setattr(mariadb, "_datadog_patch", False)
Ejemplo n.º 8
0
"""
Bootstrapping code that is run when using the `ddtrace-run` Python entrypoint
Add all monkey-patching that needs to run by default here
"""

import os
import imp
import sys
import logging

from ddtrace.utils.formats import asbool, get_env
from ddtrace.internal.logger import get_logger
from ddtrace import constants

logs_injection = asbool(get_env('logs', 'injection'))
DD_LOG_FORMAT = '%(asctime)s %(levelname)s [%(name)s] [%(filename)s:%(lineno)d] {}- %(message)s'.format(
    '[dd.trace_id=%(dd.trace_id)s dd.span_id=%(dd.span_id)s] ' if logs_injection else ''
)

if logs_injection:
    # immediately patch logging if trace id injected
    from ddtrace import patch
    patch(logging=True)

debug = os.environ.get('DATADOG_TRACE_DEBUG')

# Set here a default logging format for basicConfig

# DEV: Once basicConfig is called here, future calls to it cannot be used to
# change the formatter since it applies the formatter to the root handler only
# upon initializing it the first time.
Ejemplo n.º 9
0
"""

import logging

import wrapt

from ddtrace import Pin
from ddtrace.ext import AppTypes, sql
from ddtrace.settings import config
from ddtrace.utils.formats import asbool, get_env

log = logging.getLogger(__name__)

config._add(
    'dbapi2',
    dict(trace_fetch_methods=asbool(
        get_env('dbapi2', 'trace_fetch_methods', 'false')), ))


class TracedCursor(wrapt.ObjectProxy):
    """ TracedCursor wraps a psql cursor and traces it's queries. """
    def __init__(self, cursor, pin):
        super(TracedCursor, self).__init__(cursor)
        pin.onto(self)
        name = pin.app or 'sql'
        self._self_datadog_name = '{}.query'.format(name)
        self._self_last_execute_operation = None

    def _trace_method(self, method, name, resource, extra_tags, *args,
                      **kwargs):
        """
        Internal function to trace the call to the underlying cursor method
Ejemplo n.º 10
0
log = get_logger(__name__)

config._add(
    "django",
    dict(
        _default_service="django",
        cache_service_name=get_env("django", "cache_service_name") or "django",
        database_service_name_prefix=get_env("django",
                                             "database_service_name_prefix",
                                             default=""),
        database_service_name=get_env("django",
                                      "database_service_name",
                                      default=""),
        distributed_tracing_enabled=True,
        instrument_middleware=asbool(
            get_env("django", "instrument_middleware", default=True)),
        instrument_databases=True,
        instrument_caches=True,
        analytics_enabled=
        None,  # None allows the value to be overridden by the global config
        analytics_sample_rate=None,
        trace_query_string=None,  # Default to global config
        include_user_name=True,
        use_handler_resource_format=asbool(
            get_env("django", "use_handler_resource_format", default=False)),
        use_legacy_resource_format=asbool(
            get_env("django", "use_legacy_resource_format", default=False)),
    ),
)

propagator = HTTPPropagator()
Ejemplo n.º 11
0
from ddtrace.utils.formats import asbool  # noqa
from ddtrace.utils.formats import get_env
from ddtrace.utils.formats import parse_tags_str

if config.logs_injection:
    # immediately patch logging if trace id injected
    from ddtrace import patch

    patch(logging=True)

# DEV: Once basicConfig is called here, future calls to it cannot be used to
# change the formatter since it applies the formatter to the root handler only
# upon initializing it the first time.
# See https://github.com/python/cpython/blob/112e4afd582515fcdcc0cde5012a4866e5cfda12/Lib/logging/__init__.py#L1550
# Debug mode from the tracer will do a basicConfig so only need to do this otherwise
call_basic_config = asbool(os.environ.get("DD_CALL_BASIC_CONFIG", "true"))
if not debug_mode and call_basic_config:
    if config.logs_injection:
        logging.basicConfig(format=DD_LOG_FORMAT)
    else:
        logging.basicConfig()

log = get_logger(__name__)

EXTRA_PATCHED_MODULES = {
    "bottle": True,
    "django": True,
    "falcon": True,
    "flask": True,
    "pylons": True,
    "pyramid": True,
    hostname = os.environ.get("DD_AGENT_HOST",
                              os.environ.get("DATADOG_TRACE_AGENT_HOSTNAME"))
    port = os.environ.get("DATADOG_TRACE_AGENT_PORT")
    priority_sampling = os.environ.get("DATADOG_PRIORITY_SAMPLING")

    opts = {}

    if enabled and enabled.lower() == "false":
        opts["enabled"] = False
        patch = False
    if hostname:
        opts["hostname"] = hostname
    if port:
        opts["port"] = int(port)
    if priority_sampling:
        opts["priority_sampling"] = asbool(priority_sampling)

    opts["collect_metrics"] = asbool(get_env("runtime_metrics", "enabled"))

    if opts:
        tracer.configure(**opts)

    if patch:
        update_patched_modules()
        from ddtrace import patch_all

        patch_all(**EXTRA_PATCHED_MODULES)

    if "DATADOG_ENV" in os.environ:
        tracer.set_tags({constants.ENV_KEY: os.environ["DATADOG_ENV"]})
Ejemplo n.º 13
0
"""
Bootstrapping code that is run when using the `ddtrace-run` Python entrypoint
Add all monkey-patching that needs to run by default here
"""

import os
import imp
import sys
import logging

from ddtrace.utils.formats import asbool, get_env

logs_injection = asbool(get_env('logs', 'injection'))
DD_LOG_FORMAT = '%(asctime)s %(levelname)s [%(name)s] [%(filename)s:%(lineno)d] {}- %(message)s'.format(
    '[dd.trace_id=%(dd.trace_id)s dd.span_id=%(dd.span_id)s] '
    if logs_injection else '')

debug = os.environ.get("DATADOG_TRACE_DEBUG")

# Set here a default logging format for basicConfig

# DEV: Once basicConfig is called here, future calls to it cannot be used to
# change the formatter since it applies the formatter to the root handler only
# upon initializing it the first time.
# See https://github.com/python/cpython/blob/112e4afd582515fcdcc0cde5012a4866e5cfda12/Lib/logging/__init__.py#L1550
if debug and debug.lower() == "true":
    logging.basicConfig(level=logging.DEBUG, format=DD_LOG_FORMAT)
else:
    logging.basicConfig(format=DD_LOG_FORMAT)

log = logging.getLogger(__name__)
Ejemplo n.º 14
0
    enabled = os.environ.get("DATADOG_TRACE_ENABLED")
    hostname = os.environ.get("DD_AGENT_HOST", os.environ.get("DATADOG_TRACE_AGENT_HOSTNAME"))
    port = os.environ.get("DATADOG_TRACE_AGENT_PORT")
    priority_sampling = os.environ.get("DATADOG_PRIORITY_SAMPLING")

    opts = {}

    if enabled and enabled.lower() == "false":
        opts["enabled"] = False
        patch = False
    if hostname:
        opts["hostname"] = hostname
    if port:
        opts["port"] = int(port)
    if priority_sampling:
        opts["priority_sampling"] = asbool(priority_sampling)

    opts["collect_metrics"] = asbool(os.environ.get(METRICS_ENABLED_ENV_VAR, True))

    key_intersection = set(_LIGHTSTEP_ENV_VARS.keys()) & set(os.environ.keys())
    if key_intersection:
        tracer.set_tags({_LIGHTSTEP_ENV_VARS[key]: os.environ[key] for key in key_intersection})

    if "DD_TRACE_GLOBAL_TAGS" in os.environ:
        add_global_tags(tracer)

    if opts:
        tracer.configure(**opts)

    if patch:
        update_patched_modules()
Ejemplo n.º 15
0
from ddtrace.propagation.http import HTTPPropagator
from ddtrace.utils import get_argument_value
from ddtrace.utils.formats import asbool
from ddtrace.utils.formats import get_env
from ddtrace.utils.wrappers import unwrap as _u
from ddtrace.vendor.wrapt import wrap_function_wrapper as _w


if typing.TYPE_CHECKING:
    from ddtrace import Span
    from ddtrace.vendor.wrapt import BoundFunctionWrapper

config._add(
    "httpx",
    {
        "distributed_tracing": asbool(get_env("httpx", "distributed_tracing", default=True)),
        "split_by_domain": asbool(get_env("httpx", "split_by_domain", default=False)),
    },
)


def _url_to_str(url):
    # type: (httpx.URL) -> str
    """
    Helper to convert the httpx.URL parts from bytes to a str
    """
    scheme, host, port, raw_path = url.raw
    url = scheme + b"://" + host
    if port is not None:
        url += b":" + ensure_binary(str(port))
    url += raw_path
Ejemplo n.º 16
0
from .. import trace_utils
from .compat import get_resolver, user_is_authenticated
from . import utils, conf


log = get_logger(__name__)

config._add(
    "django",
    dict(
        _default_service="django",
        cache_service_name=get_env("django", "cache_service_name") or "django",
        database_service_name_prefix=get_env("django", "database_service_name_prefix", default=""),
        database_service_name=get_env("django", "database_service_name", default=""),
        distributed_tracing_enabled=True,
        instrument_middleware=asbool(get_env("django", "instrument_middleware", default=True)),
        instrument_databases=True,
        instrument_caches=True,
        analytics_enabled=None,  # None allows the value to be overridden by the global config
        analytics_sample_rate=None,
        trace_query_string=None,  # Default to global config
        include_user_name=True,
        use_handler_resource_format=get_env("django", "use_handler_resource_format", default=False),
    ),
)

propagator = HTTPPropagator()


def patch_conn(django, conn):
    def cursor(django, pin, func, instance, args, kwargs):
Ejemplo n.º 17
0
def main():
    parser = argparse.ArgumentParser(
        description=USAGE,
        prog="ddtrace-run",
        usage="ddtrace-run <your usual python command>",
        formatter_class=argparse.RawTextHelpFormatter,
    )
    parser.add_argument("command",
                        nargs=argparse.REMAINDER,
                        type=str,
                        help="Command string to execute.")
    parser.add_argument("-d",
                        "--debug",
                        help="enable debug mode (disabled by default)",
                        action="store_true")
    parser.add_argument("-i",
                        "--info",
                        help="print library info useful for debugging",
                        action="store_true")
    parser.add_argument("-p",
                        "--profiling",
                        help="enable profiling (disabled by default)",
                        action="store_true")
    parser.add_argument("-v",
                        "--version",
                        action="version",
                        version="%(prog)s " + ddtrace.__version__)
    args = parser.parse_args()

    if args.profiling:
        os.environ["DD_PROFILING_ENABLED"] = "true"

    debug_mode = args.debug or asbool(get_env("trace", "debug", default=False))

    if debug_mode:
        logging.basicConfig(level=logging.DEBUG)
        os.environ["DD_TRACE_DEBUG"] = "true"

    if args.info:
        # Inline imports for performance.
        import pprint

        from ddtrace.internal.debug import collect

        pprint.pprint(collect(ddtrace.tracer))
        sys.exit(0)

    root_dir = os.path.dirname(ddtrace.__file__)
    log.debug("ddtrace root: %s", root_dir)

    bootstrap_dir = os.path.join(root_dir, "bootstrap")
    log.debug("ddtrace bootstrap: %s", bootstrap_dir)

    _add_bootstrap_to_pythonpath(bootstrap_dir)
    log.debug("PYTHONPATH: %s", os.environ["PYTHONPATH"])
    log.debug("sys.path: %s", sys.path)

    if not args.command:
        parser.print_help()
        sys.exit(1)

    # Find the executable path
    executable = spawn.find_executable(args.command[0])
    if not executable:
        print("ddtrace-run: failed to find executable '%s'.\n" %
              args.command[0])
        parser.print_usage()
        sys.exit(1)

    log.debug("program executable: %s", executable)

    if os.path.basename(executable) == "uwsgi":
        print((
            "ddtrace-run has known compatibility issues with uWSGI where the "
            "tracer is not started properly in uWSGI workers which can cause "
            "broken behavior. It is recommended you remove ddtrace-run and "
            "update your uWSGI configuration following "
            "https://ddtrace.readthedocs.io/en/stable/advanced_usage.html#uwsgi."
        ))

    try:
        # Raises OSError for permissions errors in Python 2
        #        PermissionError for Python 3
        os.execl(executable, executable, *args.command[1:])
    except (OSError, PermissionError):
        print(
            "ddtrace-run: executable '%s' does not have executable permissions.\n"
            % executable)
        parser.print_usage()
        sys.exit(1)

    sys.exit(0)
Ejemplo n.º 18
0
    enabled = os.environ.get("DATADOG_TRACE_ENABLED")
    hostname = os.environ.get("DATADOG_TRACE_AGENT_HOSTNAME")
    port = os.environ.get("DATADOG_TRACE_AGENT_PORT")
    priority_sampling = os.environ.get("DATADOG_PRIORITY_SAMPLING")

    opts = {}

    if enabled and enabled.lower() == "false":
        opts["enabled"] = False
        patch = False
    if hostname:
        opts["hostname"] = hostname
    if port:
        opts["port"] = int(port)
    if priority_sampling:
        opts["priority_sampling"] = asbool(priority_sampling)

    if opts:
        tracer.configure(**opts)

    if patch:
        update_patched_modules()
        from ddtrace import patch_all
        patch_all(**EXTRA_PATCHED_MODULES)  # noqa

    debug = os.environ.get("DATADOG_TRACE_DEBUG")
    if debug and debug.lower() == "true":
        tracer.debug_logging = True

    if 'DATADOG_ENV' in os.environ:
        tracer.set_tags({"env": os.environ["DATADOG_ENV"]})
Ejemplo n.º 19
0
        EXTRA_PATCHED_MODULES[module] = asbool(should_patch)


try:
    from ddtrace import tracer

    patch = True

    # Respect DATADOG_* environment variables in global tracer configuration
    # TODO: these variables are deprecated; use utils method and update our documentation
    # correct prefix should be DD_*
    enabled = os.environ.get("DATADOG_TRACE_ENABLED")
    hostname = os.environ.get("DD_AGENT_HOST", os.environ.get("DATADOG_TRACE_AGENT_HOSTNAME"))
    port = os.environ.get("DATADOG_TRACE_AGENT_PORT")
    priority_sampling = os.environ.get("DATADOG_PRIORITY_SAMPLING")
    profiling = asbool(os.environ.get("DD_PROFILING_ENABLED", False))

    if profiling:
        import ddtrace.profiling.auto  # noqa: F401

    opts = {}

    if enabled and enabled.lower() == "false":
        opts["enabled"] = False
        patch = False
    if hostname:
        opts["hostname"] = hostname
    if port:
        opts["port"] = int(port)
    if priority_sampling:
        opts["priority_sampling"] = asbool(priority_sampling)
"""
Bootstrapping code that is run when using the `ddtrace-run` Python entrypoint
Add all monkey-patching that needs to run by default here
"""

import os
import imp
import sys
import logging

from ddtrace.utils.formats import asbool, get_env
from ddtrace.internal.logger import get_logger
from ddtrace import constants

logs_injection = asbool(get_env("logs", "injection"))
DD_LOG_FORMAT = "%(asctime)s %(levelname)s [%(name)s] [%(filename)s:%(lineno)d] {}- %(message)s".format(
    "[dd.trace_id=%(dd.trace_id)s dd.span_id=%(dd.span_id)s] "
    if logs_injection else "")

if logs_injection:
    # immediately patch logging if trace id injected
    from ddtrace import patch

    patch(logging=True)

debug = os.environ.get("DATADOG_TRACE_DEBUG")

# Set here a default logging format for basicConfig

# DEV: Once basicConfig is called here, future calls to it cannot be used to
# change the formatter since it applies the formatter to the root handler only