def test_patch_before_import(self): from ddtrace import patch patch(celery=True) import celery app = celery.Celery() assert Pin.get_from(app) is not None
def test_patch_after_import(self): import celery from ddtrace import patch patch(celery=True) app = celery.Celery() assert Pin.get_from(app) is not None
def main(): if os.environ.get('DDTRACE_EXTRA_PATCH') == 'true': # The ddtrace/Django integration only patches Django internals, it # doesn't patch other libraries. # Manually patching them very early here seems like a less intrusive # approach than running the whole app under `ddtrace-run` import ddtrace ddtrace.patch(requests=True, botocore=True, redis=True) # Dirty Monkey Patch to prevent boto3 from creating many threadpools try: from boto3.s3 import transfer except ImportError: pass else: def create_transfer_manager(*arg, **kwargs): return transfer.TransferManager( *arg, **kwargs, executor_cls=transfer.NonThreadedExecutor ) transfer.create_transfer_manager = create_transfer_manager os.environ.setdefault("DJANGO_SETTINGS_MODULE", "feedsubs.settings.dev") try: from django.core.management import execute_from_command_line except ImportError as exc: raise ImportError( "Couldn't import Django. Are you sure it's installed and " "available on your PYTHONPATH environment variable? Did you " "forget to activate a virtual environment?" ) from exc execute_from_command_line(sys.argv)
def run(): # If not patched yet, you can patch grpc specifically patch(grpc=True) with grpc.insecure_channel('localhost:50051') as channel: stub = HelloStub(channel) response = stub.SayHello(HelloRequest(name="test")) print(response)
def test_futures_double_instrumentation(self): # it should not double wrap `ThreadpPoolExecutor.submit` method if # `futures` is already instrumented from ddtrace import patch; patch(futures=True) from concurrent.futures import ThreadPoolExecutor from wrapt import BoundFunctionWrapper fn_wrapper = getattr(ThreadPoolExecutor.submit, '__wrapped__', None) ok_(not isinstance(fn_wrapper, BoundFunctionWrapper))
def test_regression_logging_in_context(tmpdir, logs_injection, debug_mode, patch_logging): """ When logs injection is enabled and the logger is patched When a parent span closes before a child The application does not deadlock due to context lock acquisition """ f = tmpdir.join("test.py") f.write(""" import ddtrace ddtrace.patch(logging=%s) s1 = ddtrace.tracer.trace("1") s2 = ddtrace.tracer.trace("2") s1.finish() s2.finish() """.lstrip() % str(patch_logging)) p = subprocess.Popen( [sys.executable, "test.py"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=str(tmpdir), env=dict( DD_TRACE_LOGS_INJECTION=str(logs_injection).lower(), DD_TRACE_DEBUG=str(debug_mode).lower(), ), ) try: p.wait(timeout=2) except TypeError: # timeout argument added in Python 3.3 p.wait() assert p.returncode == 0
def test_patch(self): """ Patching `requests` before `gevent` monkeypatching This is a regression test for https://github.com/DataDog/dd-trace-py/issues/506 When using `ddtrace-run` along with `requests` and `gevent` our patching causes `requests` and `urllib3` to get loaded before `gevent` has a chance to monkey patch. This causes `gevent` to show a warning and under certain versions cause a maxiumum recursion exception to be raised. """ # Assert none of our modules have been imported yet # DEV: This regression test depends on being able to control import order of these modules # DEV: This is not entirely necessary but is a nice safe guard self.assertNotIn('ddtrace', sys.modules) self.assertNotIn('gevent', sys.modules) self.assertNotIn('requests', sys.modules) self.assertNotIn('urllib3', sys.modules) try: # Import ddtrace and patch only `requests` # DEV: We do not need to patch `gevent` for the exception to occur from ddtrace import patch patch(requests=True) # Import gevent and monkeypatch from gevent import monkey monkey.patch_all() # This is typically what will fail if `requests` (or `urllib3`) # gets loaded before running `monkey.patch_all()` # DEV: We are testing that no exception gets raised import requests # DEV: We **MUST** use an HTTPS request, that is what causes the issue requests.get('https://httpbin.org/get') finally: # Ensure we always unpatch `requests` when we are done from ddtrace.contrib.requests import unpatch unpatch()
def inject_correlation_ids(): """ Override the formatter of LambdaLoggerHandler to inject datadog trace and span id for log correlation. For manual injections to custom log handlers, use `ddtrace.helpers.get_correlation_ids` to retrieve correlation ids (trace_id, span_id). """ # Override the log format of the AWS provided LambdaLoggerHandler root_logger = logging.getLogger() for handler in root_logger.handlers: if handler.__class__.__name__ == 'LambdaLoggerHandler': handler.setFormatter(logging.Formatter( '[%(levelname)s]\t%(asctime)s.%(msecs)dZ\t%(aws_request_id)s\t' '[dd.trace_id=%(dd.trace_id)s dd.span_id=%(dd.span_id)s]\t%(message)s\n', '%Y-%m-%dT%H:%M:%S' )) # Patch `logging.Logger.makeRecord` to actually inject correlation ids patch(logging=True) logger.debug('logs injection configured')
from datadog import initialize, statsd initialize(statsd_host=os.environ['DOGSTATSD_HOST_IP'], statsd_port=8125) statsd.increment('flaskapp.times.started') from flask import Flask #trace stuff from ddtrace import tracer, patch, Pin from ddtrace.contrib.flask import TraceMiddleware tracer.configure( hostname=os.environ['DD_AGENT_SERVICE_HOST'], port=os.environ['DD_AGENT_SERVICE_PORT'], ) patch(sqlalchemy=True, logging=True) app = Flask(__name__) #patch traceware traced_app = TraceMiddleware(app, tracer, service="my-flask-app", distributed_tracing=False) #postgres stuff POSTGRES = { 'user': '******', 'pw': 'flask', 'db': 'docker', 'host': os.environ['POSTGRES_SERVICE_HOST'],
import boto3 import requests import scrapy from ddtrace import tracer, patch patch(botocore=True) class MainSpider(scrapy.Spider): name = "main" ITEM_PIPELINES = { "tracing.pipelines.MainPipeline": 300, } def start_requests(self): urls = [ "http://quotes.toscrape.com/page/1/", "http://quotes.toscrape.com/page/2/", ] for url in urls: yield scrapy.Request(url=url, callback=self.parse) def parse(self, response): for quote in response.css("div.quote"): with tracer.start_span("producer.parse") as span: return { "content": quote.css("span.text::text").get(), "span": span }
"""Oz""" from __future__ import absolute_import, division, print_function, with_statement, unicode_literals import collections import logging import os try: from ddtrace import patch except ImportError: logging.getLogger("oz").debug( "Skipped ddtrace because it is not available.", exc_info=True) else: patch(tornado=True) import tornado.web import tornado.options import tornado.util import tornado.log # On trigger execution, trigger listeners can return this to notify the # request handler to cancel execution of the next functions in the trigger # chain. break_trigger = object() # Mapping of action name -> callback _actions = {} # Mapping of uimodule name -> class _uimodules = {}
# sentry support SENTRY_DSN = os.environ.get("SENTRY_DSN") if SENTRY_DSN: INSTALLED_APPS += ('raven.contrib.django.raven_compat', ) RAVEN_CONFIG = { 'dsn': SENTRY_DSN, } # apm support APM_ID = os.environ.get("APM_ID") APM_TOKEN = os.environ.get("APM_TOKEN") if APM_ID and APM_TOKEN: INSTALLED_APPS += ('ddtrace.contrib.django', ) DATADOG_TRACE = { 'TAGS': { 'env': os.getenv('BKPAAS_ENVIRONMENT', 'dev'), 'apm_id': APM_ID, 'apm_token': APM_TOKEN, }, } # requests for APIGateway/ESB # remove pymysql while Django Defaultdb has been traced already try: import requests # noqa from ddtrace import patch patch(requests=True, pymysql=False) except Exception as e: print("patch fail for requests and pymysql: %s" % e)
from flask import request as flask_request from ddtrace import tracer, patch, config from ddtrace.contrib.flask import TraceMiddleware from bootstrap import create_app, db from models import Network, Sensor import random sensors = [] # Tracer configuration tracer.configure(hostname='agent') tracer.set_tags({'env': 'workshop'}) patch(requests=True) # enable distributed tracing for requests # to send headers (globally) config.requests['distributed_tracing'] = True app = create_app() traced_app = TraceMiddleware(app, tracer, service='sensors-api', distributed_tracing=True) @app.route('/') def hello(): return Response({'Hello from Sensors': 'world'},
import os import ddtrace import django import psycopg2 import redis from django.core.wsgi import get_wsgi_application from gevent import monkey from whitenoise import WhiteNoise from common.apm import tracer ddtrace.patch_all() ddtrace.patch(gevent=True) ddtrace.Pin.override(django, tracer=tracer) ddtrace.Pin.override(psycopg2, tracer=tracer) ddtrace.Pin.override(redis, tracer=tracer) monkey.patch_all() os.environ.setdefault("DJANGO_SETTINGS_MODULE", "turnout.settings") application = get_wsgi_application() application = WhiteNoise(application) application.add_files("/app/static", prefix="static/") # type: ignore
import sqlalchemy from sqlalchemy import Table, Column, Integer, String, MetaData, ForeignKey from sqlalchemy.sql import select from flask import Flask #trace stuff from ddtrace import tracer, patch, Pin from ddtrace.contrib.flask import TraceMiddleware tracer.configure( hostname=os.environ['DD_AGENT_SERVICE_HOST'], port=os.environ['DD_AGENT_SERVICE_PORT'], ) patch(sqlalchemy=True) app = Flask(__name__) #patch traceware traced_app = TraceMiddleware(app, tracer, service="my-flask-app", distributed_tracing=False) #postgres stuff POSTGRES = { 'user': '******', 'pw': 'flask', 'db': 'docker', 'host': os.environ['POSTGRES_SERVICE_HOST'],
# coding=utf-8 """Define basic fixtures.""" # First patch httplib try: from ddtrace import config, patch, tracer config.httplib["distributed_tracing"] = True patch(httplib=True) except ImportError: tracer = None import importlib import json import logging import os import re import sys import time import warnings from datetime import datetime import pytest from pytest_bdd import ( given, parsers, scenarios, then, when, )
import imp import sys import logging from ddtrace.utils.formats import asbool, get_env from ddtrace.internal.logger import get_logger logs_injection = asbool(get_env('logs', 'injection')) DD_LOG_FORMAT = '%(asctime)s %(levelname)s [%(name)s] [%(filename)s:%(lineno)d] {}- %(message)s'.format( '[dd.trace_id=%(dd.trace_id)s dd.span_id=%(dd.span_id)s] ' if logs_injection else '') if logs_injection: # immediately patch logging if trace id injected from ddtrace import patch patch(logging=True) # noqa debug = os.environ.get("DATADOG_TRACE_DEBUG") # Set here a default logging format for basicConfig # DEV: Once basicConfig is called here, future calls to it cannot be used to # change the formatter since it applies the formatter to the root handler only # upon initializing it the first time. # See https://github.com/python/cpython/blob/112e4afd582515fcdcc0cde5012a4866e5cfda12/Lib/logging/__init__.py#L1550 if debug and debug.lower() == "true": logging.basicConfig(level=logging.DEBUG, format=DD_LOG_FORMAT) else: logging.basicConfig(format=DD_LOG_FORMAT) log = get_logger(__name__)
from flask import Flask from ddtrace import tracer, patch patch(sqlalchemy=True, sqlite3=True) from models import Thought, db # configure the tracer so that it reaches the Datadog Agent # available in another container tracer.configure(hostname='agent') def create_app(): """Create a Flask application""" app = Flask(__name__) app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///app.db' app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False db.init_app(app) initialize_database(app, db) return app def initialize_database(app, db): """Drop and restore database in a consistent state""" with app.app_context(): db.drop_all() db.create_all() db.session.add( Thought( quote= 'My religion consists of a humble admiration of the illimitable superior spirit who reveals himself in the slight details we are able to perceive with our frail and feeble mind.',
'version': 1, 'formatters': { 'verbose': { 'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s' }, }, 'handlers': { 'console': { 'level': 'DEBUG', 'class': 'logging.StreamHandler', 'formatter': 'verbose', }, }, 'loggers': { 'ddtrace': { 'handlers': ['console'], 'level': 'DEBUG', 'propagate': True, }, } }) if __name__ == '__main__': patch(pymongo=True) client = pymongo.MongoClient() pin = Pin.override(client, service="mongo-master----------------s") db = client.test_database collection = db.test_collection collection.insert_one({"name": "Luca"})
from fastapi import FastAPI from log_formatter import CustomJsonFormatter class CustomLogger(logging.Logger): propagate = False logger = CustomLogger("fastapi") logHandler = logging.StreamHandler() formatter = CustomJsonFormatter() logHandler.setFormatter(formatter) logger.addHandler(logHandler) patch(fastapi=True, logging=True) initialize(statsd_host=os.getenv("DATADOG_HOST"), statsd_port=8125, host_name="fastapi") tracer.configure(hostname=os.getenv("DATADOG_HOST"), port=8126, enabled=True) app = FastAPI() @statsd.timed("fastapi.views.check.timer", tags=["function:do_check"]) def do_check(): sleep(random()) @app.get("/check")
from ddtrace import tracer, patch patch(sqlalchemy=True, redis=True, requests=True) from flask import Flask from models import Beer, Donut, db # configure the tracer so that it reaches the Datadog Agent # available in another container tracer.configure(hostname='agent') def create_app(): """Create a Flask application""" app = Flask(__name__) app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///app.db' app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False db.init_app(app) initialize_database(app, db) return app def initialize_database(app, db): """Drop and restore database in a consistent state""" with app.app_context(): db.drop_all() db.create_all() # create beers db.session.add(Beer('ipa', 10))
"""Oz""" from __future__ import absolute_import, division, print_function, with_statement, unicode_literals import collections import logging import os try: from ddtrace import patch except ImportError: logging.getLogger("oz").debug("Skipped ddtrace because it is not available.", exc_info=True) else: patch(tornado=True) import tornado.web import tornado.options import tornado.util import tornado.log # On trigger execution, trigger listeners can return this to notify the # request handler to cancel execution of the next functions in the trigger # chain. break_trigger = object() # Mapping of action name -> callback _actions = {} # Mapping of uimodule name -> class _uimodules = {}
import json import requests import requests.exceptions import rollbar from bs4 import BeautifulSoup from datadog import statsd from ddtrace import patch from ddtrace import tracer from time import sleep from utility import nb_logging patch(requests=True) logger = nb_logging.setup_logger('NewsblurConnector') class NewsblurConnector: def __init__(self, config, username, password): self.cookies = None self.config = config self.verify = config.get('VERIFY') self.nb_endpoint = config.get('NB_ENDPOINT') self.credentials = {'username': username, 'password': password} @statsd.timed('nb.NewsblurConnector.login') def login(self): """ log in and save cookies """
from __future__ import absolute_import from os.path import abspath, dirname, join import os, sys from django.conf import settings from celery import Celery from ddtrace import patch patch(celery=True) PROJECT_ROOT = abspath(dirname(__file__)) PORTAL_ROOT = join(PROJECT_ROOT, "portal") BUILTIN_FIXUPS = frozenset([ 'juloserver.julo.fixups_custom:fixup', ]) CELERY_LOADER = "juloserver.routing.celery_loader_custom:AppLoader" sys.path.insert(0, PORTAL_ROOT) sys.path.insert(1, join(PORTAL_ROOT, "authentication")) sys.path.insert(2, join(PORTAL_ROOT, "core")) sys.path.insert(3, join(PORTAL_ROOT, "configuration")) sys.path.insert(4, join(PORTAL_ROOT, "object")) sys.path.insert(5, join(PORTAL_ROOT, "process")) celery_app = Celery('juloserver', broker="amqp://*****:*****@localhost:5672//", fixups=BUILTIN_FIXUPS, loader=CELERY_LOADER)
import jinja2 from trafaret_config import commandline import aiohttp_jinja2 from aiohttp import web from aiohttpdemo_polls.db import close_pg, init_pg from aiohttpdemo_polls.middlewares import setup_middlewares from aiohttpdemo_polls.routes import setup_routes from aiohttpdemo_polls.utils import TRAFARET # tracing code from ddtrace import patch from ddtrace.contrib.aiohttp import trace_app patch(aiohttp=True) def init(loop, argv): ap = argparse.ArgumentParser() commandline.standard_argparse_options(ap, default_config='./config/polls.yaml') # # define your command-line arguments here # options = ap.parse_args(argv) config = commandline.config_from_options(options, TRAFARET) # setup application and extensions app = web.Application(loop=loop)
}, 'handlers': { 'console': { 'level': 'DEBUG', 'class': 'logging.StreamHandler', 'formatter': 'verbose', }, }, 'loggers': { 'ddtrace': { 'handlers': ['console'], 'level': 'DEBUG', 'propagate': True, }, } }) # If not patched yet, you can patch mysql specifically patch(mysql=True) # This will report a span with the default settings conn = mysql.connector.connect(user="******", password="******", host="localhost", port=3306, database="test") cursor = conn.cursor() cursor.execute("SHOW TABLES") # Use a pin to specify metadata related to this connection Pin.override(conn, service='mysql-users')
""" auto patch things. """ # manual test for monkey patching import logging import sys # project import ddtrace # allow logging logging.basicConfig(stream=sys.stdout, level=logging.DEBUG) ddtrace.tracer.debug_logging = True # Patch nothing ddtrace.patch() # Patch all except Redis ddtrace.patch_all(redis=False) # Patch Redis ddtrace.patch(redis=True)
import requests from flask import Flask, Response, jsonify from flask import request as flask_request from flask_caching import Cache from ddtrace import tracer, patch from ddtrace.contrib.flask import TraceMiddleware from bootstrap import create_app from models import Thought from time import sleep patch(redis=True) app = create_app() cache = Cache(config={'CACHE_TYPE': 'redis', 'CACHE_REDIS_HOST': 'redis'}) cache.init_app(app) traced_app = TraceMiddleware(app, tracer, service='thinker-microservice', distributed_tracing=True) # Tracer configuration tracer.configure(hostname='agent') @tracer.wrap(name='think') @cache.memoize(30)
# (C) Datadog, Inc. 2018-present # All rights reserved # Licensed under a 3-clause BSD style license (see LICENSE) try: import datadog_agent if datadog_agent.get_config('integration_tracing'): from ddtrace import patch # handle thread monitoring as an additional option # See: http://pypi.datadoghq.com/trace/docs/other_integrations.html#futures if datadog_agent.get_config('integration_tracing_futures'): patch(requests=True, futures=True) else: patch(requests=True) except ImportError: # Tracing Integrations is only available with Agent 6 pass
from flask import Flask from ddtrace import tracer, patch patch(sqlalchemy=True,sqlite3=True,psycopg=True) from models import Pump, db # configure the tracer so that it reaches the Datadog Agent # available in another container tracer.configure(hostname='agent') import os DB_USERNAME = os.environ['POSTGRES_USER'] DB_PASSWORD = os.environ['POSTGRES_PASSWORD'] def create_app(): """Create a Flask application""" app = Flask(__name__) #app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///app.db' app.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql://' + DB_USERNAME + ':' + DB_PASSWORD + '@' + 'db/' + DB_USERNAME app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False db.init_app(app) initialize_database(app, db) return app def initialize_database(app, db): """Drop and restore database in a consistent state""" with app.app_context(): db.drop_all()
from ddtrace import patch patch(tornado=True) patch(asyncio=True) import asyncio # noqa: E402 import functools # noqa: E402 import logging # noqa: E402 import signal # noqa: E402 import tornado.netutil # noqa: E402 import tornado.process # noqa: E402 from datadog import initialize # noqa: E402 from tornado.httpserver import HTTPServer # noqa: E402 from tornado.web import Application # noqa: E402 import traceback # noqa: E402 import pprint #noqa: E402 def main(): try: sockets = tornado.netutil.bind_sockets(9000) """ Tornado AsyncIO integration needs to fork processes before asyncio event loop gets initiated per process http://www.tornadoweb.org/en/stable/asyncio.html https://stackoverflow.com/questions/42767635 """ tornado.process.fork_processes(1) # Initialize Datadog initialize(statsd_host='localhost', statsd_port='8125') DD_SETTINGS = { 'datadog_trace': {
from ddtrace.utils.formats import asbool, get_env, parse_tags_str from ddtrace.internal.logger import get_logger from ddtrace import config, constants DD_LOG_FORMAT = "%(asctime)s %(levelname)s [%(name)s] [%(filename)s:%(lineno)d] {}- %(message)s".format( "[dd.service=%(dd.service)s dd.env=%(dd.env)s dd.version=%(dd.version)s" " dd.trace_id=%(dd.trace_id)s dd.span_id=%(dd.span_id)s] " if config.logs_injection else "" ) if config.logs_injection: # immediately patch logging if trace id injected from ddtrace import patch patch(logging=True) debug = os.environ.get("DATADOG_TRACE_DEBUG") # Set here a default logging format for basicConfig # DEV: Once basicConfig is called here, future calls to it cannot be used to # change the formatter since it applies the formatter to the root handler only # upon initializing it the first time. # See https://github.com/python/cpython/blob/112e4afd582515fcdcc0cde5012a4866e5cfda12/Lib/logging/__init__.py#L1550 if debug and debug.lower() == "true": logging.basicConfig(level=logging.DEBUG, format=DD_LOG_FORMAT) else: logging.basicConfig(format=DD_LOG_FORMAT) log = get_logger(__name__)
#!/usr/bin/env python # Run this with # PYTHONPATH=. DJANGO_SETTINGS_MODULE=testsite.settings testsite/tornado_main.py # Serves by default at # http://localhost:8080/hello-tornado and # http://localhost:8080/hello-django # from ddtrace import patch_all # patch_all() from ddtrace import tracer, patch patch(tornado=True) patch(django=True) import logging from ddtrace import tracer logging.basicConfig(level=logging.DEBUG) tracer.debug_logging = True from tornado.options import options, define, parse_command_line import django.core.handlers.wsgi import tornado.httpserver import tornado.ioloop import tornado.web import tornado.wsgi from django.conf import settings import tornado.gen