Ejemplo n.º 1
0
def _get_lambda_stats():
    global _lambda_stats
    # This is not thread-safe, it should be called first by _LambdaDecorator
    if _lambda_stats is None:
        _lambda_stats = ThreadStats()
        _lambda_stats.start(flush_in_greenlet=False, flush_in_thread=False)
    return _lambda_stats
Ejemplo n.º 2
0
class Statsd(object):

    def __init__(self):
        self.__dog = ThreadStats()

    def start(self, **kwargs):
        self.__dog.start(**kwargs)

    def flush(self):
        self.__dog.flush()

    def increment_counter(self, name, value=1):
        return self.__dog.increment(name, value, tags=[settings.ENV])

    def gauge(self, name, value):
        return self.__dog.gauge(name, value, tags=[settings.ENV])

    def get_timed(self, name):
        return self.__dog.timed(name, tags=[settings.ENV])
Ejemplo n.º 3
0
import os
import json
import time
import base64
import logging

import boto3
from datadog import api
from datadog.threadstats import ThreadStats
from datadog_lambda.tags import get_enhanced_metrics_tags, tag_dd_lambda_layer

ENHANCED_METRICS_NAMESPACE_PREFIX = "aws.lambda.enhanced"

logger = logging.getLogger(__name__)

lambda_stats = ThreadStats()
lambda_stats.start()


def lambda_metric(metric_name, value, timestamp=None, tags=None):
    """
    Submit a data point to Datadog distribution metrics.
    https://docs.datadoghq.com/graphing/metrics/distributions/

    When DD_FLUSH_TO_LOG is True, write metric to log, and
    wait for the Datadog Log Forwarder Lambda function to submit
    the metrics asynchronously.

    Otherwise, the metrics will be submitted to the Datadog API
    periodically and at the end of the function execution in a
    background thread.
Ejemplo n.º 4
0
                        should_flush = False
                if should_flush:
                    _lambda_stats.flush(float("inf"))

    def __call__(self, *args, **kw):
        warnings.warn(
            "datadog_lambda_wrapper() is relocated to https://git.io/fjy8o",
            DeprecationWarning)
        _LambdaDecorator._enter()
        try:
            return self.func(*args, **kw)
        finally:
            _LambdaDecorator._close()


_lambda_stats = ThreadStats()
_lambda_stats.start(flush_in_greenlet=False, flush_in_thread=False)
datadog_lambda_wrapper = _LambdaDecorator


def lambda_metric(*args, **kw):
    """ Alias to expose only distributions for lambda functions"""
    _lambda_stats.distribution(*args, **kw)


def _init_api_client():
    """ No-op GET to initialize the requests connection with DD's endpoints

    The goal here is to make the final flush faster:
    we keep alive the Requests session, this means that we can re-use the connection
    The consequence is that the HTTP Handshake, which can take hundreds of ms,
Ejemplo n.º 5
0
    def __init__(self):
        options = {"statsd_host": "127.0.0.1", "statsd_port": 8125}
        initialize(**options)

    def distribution(self, metric_name, value, tags=[], timestamp=None):
        statsd.distribution(metric_name, value, tags=tags)

    def flush(self, value):
        pass


lambda_stats = None
if should_use_extension:
    lambda_stats = StatsDWrapper()
else:
    lambda_stats = ThreadStats()
    lambda_stats.start()


def lambda_metric(metric_name,
                  value,
                  timestamp=None,
                  tags=None,
                  force_async=False):
    """
    Submit a data point to Datadog distribution metrics.
    https://docs.datadoghq.com/graphing/metrics/distributions/

    When DD_FLUSH_TO_LOG is True, write metric to log, and
    wait for the Datadog Log Forwarder Lambda function to submit
    the metrics asynchronously.
Ejemplo n.º 6
0
 def __init__(self):
     self.__dog = ThreadStats()