def __init__(self, api_key, app_key, flush_interval=10, namespace="aplt"):

        datadog.initialize(api_key=api_key, app_key=app_key)
        self._client = ThreadStats()
        self._flush_interval = flush_interval
        self._host = get_hostname()
        self._namespace = namespace
示例#2
0
    def test_timed_decorator(self):
        dog = ThreadStats()
        dog.start(roll_up_interval=1, flush_in_thread=False)
        reporter = dog.reporter = MemoryReporter()

        @dog.timed("timed.test")
        def func(a, b, c=1, d=1):
            """docstring"""
            return (a, b, c, d)

        nt.assert_equal(func.__name__, "func")
        nt.assert_equal(func.__doc__, "docstring")

        result = func(1, 2, d=3)
        # Assert it handles args and kwargs correctly.
        nt.assert_equal(result, (1, 2, 1, 3))
        time.sleep(1)  # Argh. I hate this.
        dog.flush()
        metrics = self.sort_metrics(reporter.metrics)
        nt.assert_equal(len(metrics), 8)
        (_, _, _, _, avg, count, max_, min_) = metrics
        nt.assert_equal(avg["metric"], "timed.test.avg")
        nt.assert_equal(count["metric"], "timed.test.count")
        nt.assert_equal(max_["metric"], "timed.test.max")
        nt.assert_equal(min_["metric"], "timed.test.min")
示例#3
0
 def __init__(self):
     app_name = settings.DATADOG_APP_NAME
     self.stats = ThreadStats()
     self.stats.start()
     self.error_metric = '{0}.errors'.format(app_name)
     self.timing_metric = '{0}.request_time'.format(app_name)
     self.event_tags = [app_name, 'exception']
示例#4
0
def configure_metrics(datadog_api_key=DATADOG_API_KEY,
                      datadog_app_key=DATADOG_APP_KEY,
                      engine_name=ENGINE_NAME,
                      os_type=OS_TYPE,
                      poly_work=os.getenv('POLY_WORK', 'local'),
                      source=os.getenv('HOSTNAME', "local"),
                      tags=None,
                      disabled=False) -> ThreadStats:
    """
    Initialize Datadog metric collectors when the datadog env keys are set
    :return: datadog.ThreadStats
    """
    if datadog_api_key or datadog_app_key:
        if tags is None:
            tags = [
                f'poly_work:{poly_work}',
                f'engine_name:{engine_name}',
                f'pod_name:{source}',
                f'os:{os_type}',
                'testing' if poly_work == 'local' else '',
            ]
        options = {
            'api_key': datadog_api_key,
            'app_key': datadog_app_key,
            'host_name': source,
        }

        initialize(**options)

    else:
        disabled = True

    metrics_collector = ThreadStats(namespace='polyswarm', constant_tags=tags)
    metrics_collector.start(disabled=disabled)
    return metrics_collector
示例#5
0
    def __init__(self, *args, **kwargs):
        super().__init__(*args,
                         shard_count=3,
                         game=discord.Game(name="rp!help for help!"),
                         **kwargs)
        self.owner_id = 122739797646245899
        self.lounge_id = 166349353999532035
        self.uptime = datetime.datetime.utcnow()
        self.commands_used = Counter()
        self.server_commands = Counter()
        self.socket_stats = Counter()
        self.shutdowns = []
        self.lotteries = dict()

        self.logger = logging.getLogger('discord')  # Discord Logging
        self.logger.setLevel(logging.INFO)
        self.handler = logging.FileHandler(filename=os.path.join(
            'resources', 'discord.log'),
                                           encoding='utf-8',
                                           mode='w')
        self.handler.setFormatter(
            logging.Formatter(
                '%(asctime)s:%(levelname)s:%(name)s: %(message)s'))
        self.logger.addHandler(self.handler)

        self.session = aiohttp.ClientSession(loop=self.loop)
        self.shutdowns.append(self.shutdown)

        with open("resources/auth", 'r') as af:
            self._auth = json.loads(af.read())

        self.db: db.Database = db.Database(self)
        self.di: data.DataInteraction = data.DataInteraction(self)
        self.default_udata = data.default_user
        self.default_servdata = data.default_server
        self.rnd = "1234567890abcdefghijklmnopqrstuvwxyz"

        icogs = [
            cogs.admin.Admin(self),
            cogs.team.Team(self),
            cogs.economy.Economy(self),
            cogs.inventory.Inventory(self),
            cogs.settings.Settings(self),
            cogs.misc.Misc(self),
            cogs.characters.Characters(self),
            cogs.pokemon.Pokemon(self),
            cogs.groups.Groups(self),
            cogs.user.User(self),
            cogs.salary.Salary(self)
        ]
        for cog in icogs:
            self.add_cog(cog)

        self.loop.create_task(self.start_serv())
        self.loop.create_task(self.db.connect())

        init_dd(self._auth[3], self._auth[4])
        self.stats = ThreadStats()
        self.stats.start()
class DatadogStatsLogger(LoggingMixin):
    def __init__(self, datadog_conn_id='datadog_default'):
        super().__init__()
        conn = BaseHook.get_connection(datadog_conn_id)
        self.api_key = conn.extra_dejson.get('api_key', None)
        self.app_key = conn.extra_dejson.get('app_key', None)
        self.source_type_name = conn.extra_dejson.get('source_type_name ', None)

        # If the host is populated, it will use that hostname instead
        # for all metric submissions
        self.host = conn.host

        if self.api_key is None:
            raise AirflowException('api_key must be specified in the '
                                   'Datadog connection details')

        self.log.info('Setting up api keys for Datadog')
        self.stats = None
        initialize(api_key=self.api_key, app_key=self.app_key)

    def incr(self, stat, count=1, rate=1, tags=None):
        self.log.info('datadog incr: {} {} {} {}'.format(stat, count, rate, tags))
        self.stats.increment(stat, value=count, sample_rate=rate,
                             tags=self._format_tags(tags))

    def decr(self, stat, count=1, rate=1, tags=None):
        self.log.info('datadog decr: {} {} {} {}'.format(stat, count, rate, tags))
        self.stats.decrement(stat, value=count, sample_rate=rate,
                             tags=self._format_tags(tags))

    def gauge(self, stat, value, rate=1, delta=False, tags=None):
        self.log.info('datadog gauge: {} {} {} {} {}'.format(stat, value, rate, delta, tags))
        if delta:
            self.log.warning('Deltas are unsupported in Datadog')
        self.stats.gauge(stat, value, sample_rate=rate,
                         tags=self._format_tags(tags))

    def timing(self, stat, delta, rate=1, tags=None):
        self.log.info('datadog timing: {} {} {}'.format(stat, delta, tags))
        if isinstance(delta, timedelta):
            delta = delta.total_seconds() * 1000.
        self.stats.timing(stat, delta, sample_rate=rate,
                          tags=self._format_tags(tags))

    @classmethod
    def _format_tags(cls, tags):
        if not tags:
            return None
        return ['{}:{}'.format(k, v) for k, v in tags.items()]

    def start(self):
        self.stats = ThreadStats(namespace='airflow')
        self.stats.start()
        register(self.stop)

    def stop(self):
        unregister(self.stop)
        self.stats.stop()
示例#7
0
    def __init__(self, api_key, app_key, hostname, flush_interval=10,
                 namespace="autopush"):

        datadog.initialize(api_key=api_key, app_key=app_key,
                           host_name=hostname)
        self._client = ThreadStats()
        self._flush_interval = flush_interval
        self._host = hostname
        self._namespace = namespace
示例#8
0
    def test_histogram_percentiles(self):
        dog = ThreadStats()
        dog.start(roll_up_interval=10, flush_in_thread=False)
        reporter = dog.reporter = MemoryReporter()
        # Sample all numbers between 1-100 many times. This
        # means our percentiles should be relatively close to themselves.
        percentiles = list(range(100))
        random.shuffle(percentiles)  # in place
        for i in percentiles:
            for j in range(20):
                dog.histogram('percentiles', i, 1000.0)
        dog.flush(2000.0)
        metrics = reporter.metrics

        def assert_almost_equal(i, j, e=1):
            # Floating point math?
            assert abs(i - j) <= e, "%s %s %s" % (i, j, e)

        nt.assert_equal(len(metrics), 8)
        p75, p85, p95, p99, _, _, _, _ = self.sort_metrics(metrics)
        nt.assert_equal(p75['metric'], 'percentiles.75percentile')
        nt.assert_equal(p75['points'][0][0], 1000.0)
        assert_almost_equal(p75['points'][0][1], 75, 8)
        assert_almost_equal(p85['points'][0][1], 85, 8)
        assert_almost_equal(p95['points'][0][1], 95, 8)
        assert_almost_equal(p99['points'][0][1], 99, 8)
示例#9
0
class DatadogMetrics(object):
    """DataDog Metric backend"""
    def __init__(self, api_key, app_key, hostname, flush_interval=10,
                 namespace="autopush"):

        datadog.initialize(api_key=api_key, app_key=app_key,
                           host_name=hostname)
        self._client = ThreadStats()
        self._flush_interval = flush_interval
        self._host = hostname
        self._namespace = namespace

    def _prefix_name(self, name):
        return "%s.%s" % (self._namespace, name)

    def start(self):
        self._client.start(flush_interval=self._flush_interval,
                           roll_up_interval=self._flush_interval)

    def increment(self, name, count=1, **kwargs):
        self._client.increment(self._prefix_name(name), count, host=self._host,
                               **kwargs)

    def gauge(self, name, count, **kwargs):
        self._client.gauge(self._prefix_name(name), count, host=self._host,
                           **kwargs)

    def timing(self, name, duration, **kwargs):
        self._client.timing(self._prefix_name(name), value=duration,
                            host=self._host, **kwargs)
示例#10
0
class DatadogMiddleware(object):
    DD_TIMING_ATTRIBUTE = '_dd_start_time'

    def __init__(self):
        app_name = settings.DATADOG_APP_NAME
        self.stats = ThreadStats()
        self.stats.start()
        self.error_metric = '{0}.errors'.format(app_name)
        self.timing_metric = '{0}.request_time'.format(app_name)
        self.event_tags = [app_name, 'exception']

    def process_request(self, request):
        setattr(request, self.DD_TIMING_ATTRIBUTE, time.time())

    def process_response(self, request, response):
        """ Submit timing metrics from the current request """
        if not hasattr(request, self.DD_TIMING_ATTRIBUTE):
            return response

        # Calculate request time and submit to Datadog
        request_time = time.time() - getattr(request, self.DD_TIMING_ATTRIBUTE)
        tags = self._get_metric_tags(request)
        self.stats.histogram(self.timing_metric, request_time, tags=tags)

        return response

    def process_exception(self, request, exception):
        """ Captures Django view exceptions as Datadog events """
        if isinstance(exception, Http404):
            # Don't report 404 not found
            return

        # Get a formatted version of the traceback.
        exc = traceback.format_exc()

        # Make request.META json-serializable.
        szble = {}
        for k, v in request.META.items():
            if isinstance(v, (list, basestring, bool, int, float, long)):
                szble[k] = v
            else:
                szble[k] = str(v)

        title = 'Exception from {0}'.format(request.path)
        text = "Traceback:\n@@@\n{0}\n@@@\nMetadata:\n@@@\n{1}\n@@@" \
            .format(exc, json.dumps(szble, indent=2))

        # Submit the exception to Datadog
        self.stats.event(title,
                         text,
                         alert_type='error',
                         aggregation_key=request.path,
                         tags=self.event_tags)

        # Increment our errors metric
        tags = self._get_metric_tags(request)
        self.stats.increment(self.error_metric, tags=tags)

    def _get_metric_tags(self, request):
        return ['path:{0}'.format(request.path)]
示例#11
0
class DatadogMetrics(object):
    """DataDog Metric backend"""
    def __init__(self, api_key, app_key, flush_interval=10,
                 namespace="autopush"):

        datadog.initialize(api_key=api_key, app_key=app_key)
        self._client = ThreadStats()
        self._flush_interval = flush_interval
        self._host = get_hostname()
        self._namespace = namespace

    def _prefix_name(self, name):
        return "%s.%s" % (self._namespace, name)

    def start(self):
        self._client.start(flush_interval=self._flush_interval,
                           roll_up_interval=self._flush_interval)

    def increment(self, name, count=1, **kwargs):
        self._client.increment(self._prefix_name(name), count, host=self._host,
                               **kwargs)

    def gauge(self, name, count, **kwargs):
        self._client.gauge(self._prefix_name(name), count, host=self._host,
                           **kwargs)

    def timing(self, name, duration, **kwargs):
        self._client.timing(self._prefix_name(name), value=duration,
                            host=self._host, **kwargs)
示例#12
0
 def __init__(self,
              monitor_host,
              monitor_port,
              interval,
              datadog=True,
              elstic=False):
     self.host = monitor_host
     self.port = monitor_port
     self.interval = interval
     self.s = None
     self.datadog = datadog
     self.init_datadog()
     self.stats = ThreadStats()
     self.stats.start(flush_interval=interval, flush_in_thread=False)
     self.tags = ['server:{}'.format(os.uname()[1]), 'type:openvpn']
示例#13
0
 def test_disabled_mode(self):
     dog = ThreadStats()
     dog.start(disabled=True, flush_interval=1, roll_up_interval=1)
     reporter = dog.reporter = MemoryReporter()
     dog.gauge('testing', 1, timestamp=1000)
     dog.gauge('testing', 2, timestamp=1000)
     dog.flush(2000.0)
     assert not reporter.metrics
 def __init__(self):
     app_name = settings.DATADOG_APP_NAME
     self.stats = ThreadStats()
     self.stats.start()
     self.error_metric = '{0}.errors'.format(app_name)
     self.timing_metric = '{0}.request_time'.format(app_name)
     self.event_tags = [app_name, 'exception']
示例#15
0
 def test_default_host_and_device(self):
     dog = ThreadStats()
     dog.start(roll_up_interval=1, flush_in_thread=False)
     reporter = dog.reporter = MemoryReporter()
     dog.gauge('my.gauge', 1, 100.0)
     dog.flush(1000)
     metric = reporter.metrics[0]
     assert not metric['device']
     assert not metric['host']
示例#16
0
 def test_custom_host_and_device(self):
     dog = ThreadStats()
     dog.start(roll_up_interval=1, flush_in_thread=False, device='dev')
     reporter = dog.reporter = MemoryReporter()
     dog.gauge('my.gauge', 1, 100.0, host='host')
     dog.flush(1000)
     metric = reporter.metrics[0]
     nt.assert_equal(metric['device'], 'dev')
     nt.assert_equal(metric['host'], 'host')
示例#17
0
    def __init__(self, api_key, app_key, flush_interval=10,
                 namespace="aplt"):

        datadog.initialize(api_key=api_key, app_key=app_key)
        self._client = ThreadStats()
        self._flush_interval = flush_interval
        self._host = get_hostname()
        self._namespace = namespace
class DatadogMiddleware(object):
    DD_TIMING_ATTRIBUTE = '_dd_start_time'

    def __init__(self):
        app_name = settings.DATADOG_APP_NAME
        self.stats = ThreadStats()
        self.stats.start()
        self.error_metric = '{0}.errors'.format(app_name)
        self.timing_metric = '{0}.request_time'.format(app_name)
        self.event_tags = [app_name, 'exception']

    def process_request(self, request):
        setattr(request, self.DD_TIMING_ATTRIBUTE, time.time())

    def process_response(self, request, response):
        """ Submit timing metrics from the current request """
        if not hasattr(request, self.DD_TIMING_ATTRIBUTE):
            return response

        # Calculate request time and submit to Datadog
        request_time = time.time() - getattr(request, self.DD_TIMING_ATTRIBUTE)
        tags = self._get_metric_tags(request)
        self.stats.histogram(self.timing_metric, request_time, tags=tags)

        return response

    def process_exception(self, request, exception):
        """ Captures Django view exceptions as Datadog events """
        if isinstance(exception, Http404):
            # Don't report 404 not found
            return

        # Get a formatted version of the traceback.
        exc = traceback.format_exc()

        # Make request.META json-serializable.
        szble = {}
        for k, v in request.META.items():
            if isinstance(v, (list, basestring, bool, int, float, long)):
                szble[k] = v
            else:
                szble[k] = str(v)

        title = 'Exception from {0}'.format(request.path)
        text = "Traceback:\n@@@\n{0}\n@@@\nMetadata:\n@@@\n{1}\n@@@" \
            .format(exc, json.dumps(szble, indent=2))

        # Submit the exception to Datadog
        self.stats.event(title, text, alert_type='error', aggregation_key=request.path, tags=self.event_tags)

        # Increment our errors metric
        tags = self._get_metric_tags(request)
        self.stats.increment(self.error_metric, tags=tags)

    def _get_metric_tags(self, request):
        return ['path:{0}'.format(request.path)]
示例#19
0
    def test_histogram_percentiles(self):
        dog = ThreadStats()
        dog.start(roll_up_interval=10, flush_in_thread=False)
        reporter = dog.reporter = MemoryReporter()
        # Sample all numbers between 1-100 many times. This
        # means our percentiles should be relatively close to themselves.
        percentiles = list(range(100))
        random.shuffle(percentiles)  # in place
        for i in percentiles:
            for j in range(20):
                dog.histogram("percentiles", i, 1000.0)
        dog.flush(2000.0)
        metrics = reporter.metrics

        def assert_almost_equal(i, j, e=1):
            # Floating point math?
            assert abs(i - j) <= e, "%s %s %s" % (i, j, e)

        nt.assert_equal(len(metrics), 8)
        p75, p85, p95, p99, _, _, _, _ = self.sort_metrics(metrics)
        nt.assert_equal(p75["metric"], "percentiles.75percentile")
        nt.assert_equal(p75["points"][0][0], 1000.0)
        assert_almost_equal(p75["points"][0][1], 75, 8)
        assert_almost_equal(p85["points"][0][1], 85, 8)
        assert_almost_equal(p95["points"][0][1], 95, 8)
        assert_almost_equal(p99["points"][0][1], 99, 8)
示例#20
0
    def test_metric_namespace(self):
        """
        Namespace prefixes all metric names.
        """
        # Set up ThreadStats with a namespace
        dog = ThreadStats(namespace="foo")
        dog.start(roll_up_interval=1, flush_in_thread=False)
        dog.reporter = self.reporter

        # Send a few metrics
        dog.gauge("gauge", 20, timestamp=100.0)
        dog.increment("counter", timestamp=100.0)
        dog.flush(200.0)

        # Metric names are prefixed with the namespace
        self.assertMetric(count=2)
        self.assertMetric(name="foo.gauge", count=1)
        self.assertMetric(name="foo.counter", count=1)
示例#21
0
 def test_disabled_mode(self):
     dog = ThreadStats()
     reporter = dog.reporter = MemoryReporter()
     dog.start(disabled=True, flush_interval=1, roll_up_interval=1)
     dog.gauge("testing", 1, timestamp=1000)
     dog.gauge("testing", 2, timestamp=1000)
     dog.flush(2000.0)
     assert not reporter.metrics
示例#22
0
 def test_default_host_and_device(self):
     dog = ThreadStats()
     dog.start(roll_up_interval=1, flush_in_thread=False)
     reporter = dog.reporter = MemoryReporter()
     dog.gauge("my.gauge", 1, 100.0)
     dog.flush(1000)
     metric = reporter.metrics[0]
     assert not metric["device"]
     assert not metric["host"]
示例#23
0
 def test_custom_host_and_device(self):
     dog = ThreadStats()
     dog.start(roll_up_interval=1, flush_in_thread=False, device="dev")
     reporter = dog.reporter = MemoryReporter()
     dog.gauge("my.gauge", 1, 100.0, host="host")
     dog.flush(1000)
     metric = reporter.metrics[0]
     nt.assert_equal(metric["device"], "dev")
     nt.assert_equal(metric["host"], "host")
示例#24
0
 def test_custom_host_and_device(self):
     dog = ThreadStats()
     dog.start(roll_up_interval=1, flush_in_thread=False, device='dev')
     reporter = dog.reporter = MemoryReporter()
     dog.gauge('my.gauge', 1, 100.0, host='host')
     dog.flush(1000)
     metric = reporter.metrics[0]
     nt.assert_equal(metric['device'], 'dev')
     nt.assert_equal(metric['host'], 'host')
示例#25
0
    def __init__(self):
        # prepare the statsd client
        options = {
            'api_key': settings.DATADOG_API_KEY,
        }
        initialize(**options)

        # start the statsd thread
        disabled = not settings.DATADOG_API_KEY
        self.statsd = ThreadStats()
        self.statsd.start(flush_interval=1, roll_up_interval=1, disabled=disabled)
        logger.debug('statsd thread initialized, disabled: %s', disabled)
示例#26
0
    def test_timed_decorator(self):
        dog = ThreadStats()
        dog.start(roll_up_interval=1, flush_in_thread=False)
        reporter = dog.reporter = MemoryReporter()

        @dog.timed('timed.test')
        def func(a, b, c=1, d=1):
            """docstring"""
            return (a, b, c, d)

        assert func.__name__ == 'func'
        assert func.__doc__ == 'docstring'

        result = func(1, 2, d=3)
        # Assert it handles args and kwargs correctly.
        assert result == (1, 2, 1, 3)
        time.sleep(1)  # Argh. I hate this.
        dog.flush()
        metrics = self.sort_metrics(reporter.metrics)
        assert len(metrics) == 8
        (_, _, _, _, avg, count, max_, min_) = metrics
        assert avg['metric'] == 'timed.test.avg'
        assert count['metric'] == 'timed.test.count'
        assert max_['metric'] == 'timed.test.max'
        assert min_['metric'] == 'timed.test.min'
示例#27
0
    def test_timed_decorator(self):
        dog = ThreadStats()
        dog.start(roll_up_interval=1, flush_in_thread=False)
        reporter = dog.reporter = MemoryReporter()

        @dog.timed('timed.test')
        def func(a, b, c=1, d=1):
            """docstring"""
            return (a, b, c, d)

        nt.assert_equal(func.__name__, 'func')
        nt.assert_equal(func.__doc__, 'docstring')

        result = func(1, 2, d=3)
        # Assert it handles args and kwargs correctly.
        nt.assert_equal(result, (1, 2, 1, 3))
        time.sleep(1)  # Argh. I hate this.
        dog.flush()
        metrics = self.sort_metrics(reporter.metrics)
        nt.assert_equal(len(metrics), 8)
        (_, _, _, _, avg, count, max_, min_) = metrics
        nt.assert_equal(avg['metric'], 'timed.test.avg')
        nt.assert_equal(count['metric'], 'timed.test.count')
        nt.assert_equal(max_['metric'], 'timed.test.max')
        nt.assert_equal(min_['metric'], 'timed.test.min')
示例#28
0
class DatadogMetricsBackend(MetricsBackend):
    def __init__(self, prefix=None, **kwargs):
        # TODO(dcramer): it'd be nice if the initialize call wasn't a global
        initialize(**kwargs)
        self._stats = ThreadStats()
        self._stats.start()
        super(DatadogMetricsBackend, self).__init__(prefix=prefix)

    def __del__(self):
        self._stats.stop()

    def incr(self, key, amount=1, sample_rate=1):
        self._stats.increment(self._get_key(key), amount, sample_rate=sample_rate)

    def timing(self, key, value, sample_rate=1):
        self._stats.timing(self._get_key(key), value, sample_rate=sample_rate)
示例#29
0
class DatadogMetricsBackend(MetricsBackend):
    def __init__(self, prefix=None, **kwargs):
        self._stats = ThreadStats()
        self._stats.start()
        # TODO(dcramer): it'd be nice if the initialize call wasn't a global
        initialize(**kwargs)
        super(DatadogMetricsBackend, self).__init__(prefix=prefix)

    def __del__(self):
        self._stats.stop()

    def incr(self, key, amount=1, sample_rate=1):
        self._stats.increment(self._get_key(key),
                              amount,
                              sample_rate=sample_rate)

    def timing(self, key, value, sample_rate=1):
        self._stats.timing(self._get_key(key), value, sample_rate=sample_rate)
示例#30
0
    def test_tags_from_environment_env_service_version(self):
        test_tags = set(['env:staging', 'service:food', 'version:1.2.3'])
        with EnvVars(env_vars={
                "DD_ENV": "staging",
                "DD_VERSION": "1.2.3",
                "DD_SERVICE": "food",
        }):
            dog = ThreadStats()
        dog.start(roll_up_interval=10, flush_in_thread=False)
        reporter = dog.reporter = MemoryReporter()

        # Add two events
        event1_title = "Event 1 title"
        event1_text = "Event 1 text"
        dog.event(event1_title, event1_text)

        # Flush and test
        dog.flush()
        [event1] = reporter.events
        assert event1['title'] == event1_title
        assert event1['text'] == event1_text
        assert set(event1['tags']) == test_tags
示例#31
0
class DatadogAdapter(BaseAdapter):
    """
    DatadogAdapter sends the given `Receipt` values to a local
    Datadog agent via dogstatsd.
    """
    METRIC_PREFIX = 'shop.{}'.format(slugify(settings.REGISTER_NAME))

    def __init__(self):
        # prepare the statsd client
        options = {
            'api_key': settings.DATADOG_API_KEY,
        }
        initialize(**options)

        # start the statsd thread
        disabled = not settings.DATADOG_API_KEY
        self.statsd = ThreadStats()
        self.statsd.start(flush_interval=1, roll_up_interval=1, disabled=disabled)
        logger.debug('statsd thread initialized, disabled: %s', disabled)

    def push(self, receipt):
        """
        Sends data to a local Datadog agent. The `Receipt` products
        are properly tagged using a stringify function so that
        they can be easily aggregated through Datadog backend.
        """
        try:
            # count the receipt
            timestamp = receipt.date.timestamp()
            count_metric = '{prefix}.receipt.count'.format(prefix=self.METRIC_PREFIX)
            self.statsd.increment(count_metric, timestamp=timestamp)

            for item in receipt.sell_set.all():
                # generate tags and metrics name
                tags = ['product:{}'.format(slugify(item.product.name))]
                items_count = '{prefix}.receipt.items.count'.format(prefix=self.METRIC_PREFIX)
                receipt_amount = '{prefix}.receipt.amount'.format(prefix=self.METRIC_PREFIX)

                # compute item metrics
                quantity = item.quantity
                total = float((item.price * item.quantity).amount)

                # send data
                self.statsd.increment(items_count, timestamp=timestamp, value=quantity, tags=tags)
                self.statsd.increment(receipt_amount, timestamp=timestamp, value=total, tags=tags)

            logger.debug('pushed metrics for %d sold items', receipt.sell_set.count())
        except Exception:
            raise AdapterPushFailed
示例#32
0
    def send(metric_name: str, data_value: float, **kwargs):
        tags = ['metric_submission:threadstats']
        if kwargs:
            for key, value in kwargs.items():
                if 'tag' in key:
                    tags.append('{0}:{1}'.format(key[3:], value))

        options = {
            'api_key': '52ef848539fe3e746a1dc5d189c91315',
            'app_key': '76b1154922c2beea61fa4aefbda3d639373e4a12'
        }

        initialize(**options)

        stats = ThreadStats()
        stats.start()
        stats.gauge(metric_name, value=data_value, tags=tags)
    def send_metric(metric_name: str, data_value: float, **kwargs):
        tags = ['metric_submission:threadstats']
        if kwargs is not None:
            for key, value in kwargs.items():
                if 'tag' in key:
                    tags.append('{0}:{1}'.format(key[3:], value))

        api_key = config.Get().datadog_config()['api_key']
        app_key = config.Get().datadog_config()['app_key']
        options = {'api_key': '' + api_key + '',
                   'app_key': '' + app_key + ''}

        initialize(**options)

        stats = ThreadStats()
        stats.start()
        try:
            stats.gauge(metric_name,
                        value=data_value,
                        tags=tags)
            return True
        except Exception as e:
            print(e)
            return False
示例#34
0
    def test_metric_type(self):
        """
        Checks the submitted metric's metric type.
        """
        # Set up ThreadStats with a namespace
        dog = ThreadStats(namespace="foo")
        dog.start(roll_up_interval=1, flush_in_thread=False)
        reporter = dog.reporter = self.reporter

        # Send a few metrics
        dog.gauge("gauge", 20, timestamp=100.0)
        dog.increment("counter", timestamp=100.0)
        dog.histogram('histogram.1', 20, 100.0)
        dog.flush(200.0)

        (first, second, p75, p85, p95, p99, avg, cnt, max_,
         min_) = self.sort_metrics(reporter.metrics)

        # Assert Metric type
        nt.assert_equal(first['type'], 'rate')
        nt.assert_equal(second['type'], 'gauge')
        nt.assert_equal(p75['type'], 'gauge')
        nt.assert_equal(p85['type'], 'gauge')
        nt.assert_equal(p95['type'], 'gauge')
        nt.assert_equal(p99['type'], 'gauge')
        nt.assert_equal(avg['type'], 'gauge')
        nt.assert_equal(cnt['type'], 'rate')
        nt.assert_equal(max_['type'], 'gauge')
        nt.assert_equal(min_['type'], 'gauge')
示例#35
0
    def test_host(self):
        dog = ThreadStats()
        dog.start(roll_up_interval=10, flush_in_thread=False)
        reporter = dog.reporter = MemoryReporter()

        # Post the same metric with different tags.
        dog.gauge('gauge', 12, timestamp=100.0, host='')  # unset the host
        dog.gauge('gauge', 10, timestamp=100.0)
        dog.gauge('gauge', 15, timestamp=100.0, host='test')
        dog.gauge('gauge', 15, timestamp=100.0, host='test')

        dog.increment('counter', timestamp=100.0)
        dog.increment('counter', timestamp=100.0)
        dog.increment('counter', timestamp=100.0, host='test')
        dog.increment('counter', timestamp=100.0, host='test', tags=['tag'])
        dog.increment('counter', timestamp=100.0, host='test', tags=['tag'])

        dog.flush(200.0)

        metrics = self.sort_metrics(reporter.metrics)
        nt.assert_equal(len(metrics), 6)

        [c1, c2, c3, g1, g2, g3] = metrics
        (nt.assert_equal(c['metric'], 'counter') for c in [c1, c2, c3])
        nt.assert_equal(c1['host'], None)
        nt.assert_equal(c1['tags'], None)
        nt.assert_equal(c1['points'][0][1], 2)
        nt.assert_equal(c2['host'], 'test')
        nt.assert_equal(c2['tags'], None)
        nt.assert_equal(c2['points'][0][1], 1)
        nt.assert_equal(c3['host'], 'test')
        nt.assert_equal(c3['tags'], ['tag'])
        nt.assert_equal(c3['points'][0][1], 2)

        (nt.assert_equal(g['metric'], 'gauge') for g in [g1, g2, g3])
        nt.assert_equal(g1['host'], None)
        nt.assert_equal(g1['points'][0][1], 10)
        nt.assert_equal(g2['host'], '')
        nt.assert_equal(g2['points'][0][1], 12)
        nt.assert_equal(g3['host'], 'test')
        nt.assert_equal(g3['points'][0][1], 15)

        # Ensure histograms work as well.
        @dog.timed('timed', host='test')
        def test():
            pass
        test()
        dog.histogram('timed', 20, timestamp=300.0, host='test')
        reporter.metrics = []
        dog.flush(400)
        for metric in reporter.metrics:
            assert metric['host'] == 'test'
示例#36
0
 def test_stop(self):
     dog = ThreadStats()
     dog.start(flush_interval=1, roll_up_interval=1)
     dog.reporter = MemoryReporter()
     for i in range(10):
         dog.gauge('metric', i)
     time.sleep(2)
     flush_count = dog.flush_count
     assert flush_count
     dog.stop()
     for i in range(10):
         dog.gauge('metric', i)
     time.sleep(2)
     for i in range(10):
         dog.gauge('metric', i)
     time.sleep(2)
     assert dog.flush_count in [flush_count, flush_count + 1]
示例#37
0
    def test_tags_from_environment_and_constant(self):
        test_tags = ['country:china', 'age:45', 'blue']
        constant_tags = ['country:canada', 'red']
        with preserve_environment_variable('DATADOG_TAGS'):
            os.environ['DATADOG_TAGS'] = ','.join(test_tags)
            dog = ThreadStats(constant_tags=constant_tags)
        dog.start(roll_up_interval=10, flush_in_thread=False)
        reporter = dog.reporter = MemoryReporter()

        # Add two events
        event1_title = "Event 1 title"
        event2_title = "Event 1 title"
        event1_text = "Event 1 text"
        event2_text = "Event 2 text"
        dog.event(event1_title, event1_text)
        dog.event(event2_title, event2_text)

        # Flush and test
        dog.flush()
        event1, event2 = reporter.events
        nt.assert_equal(event1['title'], event1_title)
        nt.assert_equal(event1['text'], event1_text)
        nt.assert_equal(event1['tags'], constant_tags + test_tags)
        nt.assert_equal(event2['title'], event2_title)
        nt.assert_equal(event2['text'], event2_text)
        nt.assert_equal(event2['text'], event2_text)
        nt.assert_equal(event2['tags'], constant_tags + test_tags)

        # Test more parameters
        reporter.events = []
        event1_priority = "low"
        event1_date_happened = 1375296969
        event1_tag = "Event 2 tag"
        dog.event(event1_title,
                  event1_text,
                  priority=event1_priority,
                  date_happened=event1_date_happened,
                  tags=[event1_tag])

        # Flush and test
        dog.flush()
        event, = reporter.events
        nt.assert_equal(event['title'], event1_title)
        nt.assert_equal(event['text'], event1_text)
        nt.assert_equal(event['priority'], event1_priority)
        nt.assert_equal(event['date_happened'], event1_date_happened)
        nt.assert_equal(event['tags'],
                        [event1_tag] + constant_tags + test_tags)
        dog.start(flush_interval=1, roll_up_interval=1)
示例#38
0
    def test_event_constant_tags(self):
        constant_tag = 'type:constant'
        dog = ThreadStats(constant_tags=[constant_tag])
        dog.start(roll_up_interval=10, flush_in_thread=False)
        reporter = dog.reporter = MemoryReporter()

        # Add two events
        event1_title = "Event 1 title"
        event2_title = "Event 1 title"
        event1_text = "Event 1 text"
        event2_text = "Event 2 text"
        dog.event(event1_title, event1_text)
        dog.event(event2_title, event2_text)

        # Flush and test
        dog.flush()
        event1, event2 = reporter.events
        nt.assert_equal(event1['title'], event1_title)
        nt.assert_equal(event1['text'], event1_text)
        nt.assert_equal(event1['tags'], [constant_tag])
        nt.assert_equal(event2['title'], event2_title)
        nt.assert_equal(event2['text'], event2_text)
        nt.assert_equal(event2['text'], event2_text)
        nt.assert_equal(event2['tags'], [constant_tag])

        # Test more parameters
        reporter.events = []
        event1_priority = "low"
        event1_date_happened = 1375296969
        event1_tag = "Event 2 tag"
        dog.event(event1_title, event1_text, priority=event1_priority,
                  date_happened=event1_date_happened, tags=[event1_tag])

        # Flush and test
        dog.flush()
        event, = reporter.events
        nt.assert_equal(event['title'], event1_title)
        nt.assert_equal(event['text'], event1_text)
        nt.assert_equal(event['priority'], event1_priority)
        nt.assert_equal(event['date_happened'], event1_date_happened)
        nt.assert_equal(event['tags'], [event1_tag, constant_tag])
示例#39
0
    def test_event(self):
        dog = ThreadStats()
        dog.start(roll_up_interval=10, flush_in_thread=False)
        reporter = dog.reporter = MemoryReporter()

        # Add two events
        event1_title = "Event 1 title"
        event2_title = "Event 1 title"
        event1_text = "Event 1 text"
        event2_text = "Event 2 text"
        dog.event(event1_title, event1_text)
        dog.event(event2_title, event2_text)

        # Flush and test
        dog.flush()
        event1, event2 = reporter.events
        nt.assert_equal(event1["title"], event1_title)
        nt.assert_equal(event1["text"], event1_text)
        nt.assert_equal(event2["title"], event2_title)
        nt.assert_equal(event2["text"], event2_text)

        # Test more parameters
        reporter.events = []
        event1_priority = "low"
        event1_date_happened = 1375296969
        event1_tag = "Event 2 tag"
        dog.event(
            event1_title, event1_text, priority=event1_priority, date_happened=event1_date_happened, tags=[event1_tag]
        )

        # Flush and test
        dog.flush()
        event, = reporter.events
        nt.assert_equal(event["title"], event1_title)
        nt.assert_equal(event["text"], event1_text)
        nt.assert_equal(event["priority"], event1_priority)
        nt.assert_equal(event["date_happened"], event1_date_happened)
        nt.assert_equal(event["tags"], [event1_tag])
示例#40
0
    def test_histogram(self):
        dog = ThreadStats()
        dog.start(roll_up_interval=10, flush_in_thread=False)
        reporter = dog.reporter = MemoryReporter()

        # Add some histogram metrics.
        dog.histogram("histogram.1", 20, 100.0)
        dog.histogram("histogram.1", 30, 105.0)
        dog.histogram("histogram.1", 40, 106.0)
        dog.histogram("histogram.1", 50, 106.0)

        dog.histogram("histogram.1", 30, 110.0)
        dog.histogram("histogram.1", 50, 115.0)
        dog.histogram("histogram.1", 40, 116.0)

        dog.histogram("histogram.2", 40, 100.0)

        dog.histogram("histogram.3", 50, 134.0)

        # Flush and ensure they roll up properly.
        dog.flush(120.0)
        metrics = self.sort_metrics(reporter.metrics)
        nt.assert_equal(len(metrics), 24)

        # Test histograms elsewhere.
        (
            h1751,
            h1851,
            h1951,
            h1991,
            h1avg1,
            h1cnt1,
            h1max1,
            h1min1,
            _,
            _,
            _,
            _,
            h2avg1,
            h2cnt1,
            h2max1,
            h2min1,
            h1752,
            _,
            _,
            h1992,
            h1avg2,
            h1cnt2,
            h1max2,
            h1min2,
        ) = metrics

        nt.assert_equal(h1avg1["metric"], "histogram.1.avg")
        nt.assert_equal(h1avg1["points"][0][0], 100.0)
        nt.assert_equal(h1avg1["points"][0][1], 35)
        nt.assert_equal(h1cnt1["metric"], "histogram.1.count")
        nt.assert_equal(h1cnt1["points"][0][0], 100.0)
        nt.assert_equal(h1cnt1["points"][0][1], 4)
        nt.assert_equal(h1min1["metric"], "histogram.1.min")
        nt.assert_equal(h1min1["points"][0][1], 20)
        nt.assert_equal(h1max1["metric"], "histogram.1.max")
        nt.assert_equal(h1max1["points"][0][1], 50)
        nt.assert_equal(h1751["metric"], "histogram.1.75percentile")
        nt.assert_equal(h1751["points"][0][1], 40)
        nt.assert_equal(h1991["metric"], "histogram.1.99percentile")
        nt.assert_equal(h1991["points"][0][1], 50)

        nt.assert_equal(h1avg2["metric"], "histogram.1.avg")
        nt.assert_equal(h1avg2["points"][0][0], 110.0)
        nt.assert_equal(h1avg2["points"][0][1], 40)
        nt.assert_equal(h1cnt2["metric"], "histogram.1.count")
        nt.assert_equal(h1cnt2["points"][0][0], 110.0)
        nt.assert_equal(h1cnt2["points"][0][1], 3)
        nt.assert_equal(h1752["metric"], "histogram.1.75percentile")
        nt.assert_equal(h1752["points"][0][0], 110.0)
        nt.assert_equal(h1752["points"][0][1], 40.0)
        nt.assert_equal(h1992["metric"], "histogram.1.99percentile")
        nt.assert_equal(h1992["points"][0][0], 110.0)
        nt.assert_equal(h1992["points"][0][1], 50.0)

        nt.assert_equal(h2avg1["metric"], "histogram.2.avg")
        nt.assert_equal(h2avg1["points"][0][0], 100.0)
        nt.assert_equal(h2avg1["points"][0][1], 40)
        nt.assert_equal(h2cnt1["metric"], "histogram.2.count")
        nt.assert_equal(h2cnt1["points"][0][0], 100.0)
        nt.assert_equal(h2cnt1["points"][0][1], 1)

        # Flush again ensure they're gone.
        dog.reporter.metrics = []
        dog.flush(140.0)
        nt.assert_equal(len(dog.reporter.metrics), 8)
        dog.reporter.metrics = []
        dog.flush(200.0)
        nt.assert_equal(len(dog.reporter.metrics), 0)
示例#41
0
 def __init__(self, prefix=None, **kwargs):
     self._stats = ThreadStats()
     self._stats.start()
     # TODO(dcramer): it'd be nice if the initialize call wasn't a global
     initialize(**kwargs)
     super(DatadogMetricsBackend, self).__init__(prefix=prefix)
示例#42
0
    def test_counter(self):
        # Create some fake metrics.
        dog = ThreadStats()
        dog.start(roll_up_interval=10, flush_in_thread=False)
        reporter = dog.reporter = MemoryReporter()

        dog.increment("test.counter.1", timestamp=1000.0)
        dog.increment("test.counter.1", value=2, timestamp=1005.0)
        dog.increment("test.counter.2", timestamp=1015.0)
        dog.increment("test.counter.3", timestamp=1025.0)
        dog.flush(1021.0)

        # Assert they've been properly flushed.
        metrics = self.sort_metrics(reporter.metrics)
        nt.assert_equal(len(metrics), 2)
        (first, second) = metrics
        nt.assert_equal(first["metric"], "test.counter.1")
        nt.assert_equal(first["points"][0][0], 1000.0)
        nt.assert_equal(first["points"][0][1], 3)
        nt.assert_equal(second["metric"], "test.counter.2")

        # Test decrement
        dog.increment("test.counter.1", value=10, timestamp=1000.0)
        dog.decrement("test.counter.1", value=2, timestamp=1005.0)
        reporter.metrics = []
        dog.flush(1021.0)

        metrics = self.sort_metrics(reporter.metrics)
        nt.assert_equal(len(metrics), 1)
        first, = metrics
        nt.assert_equal(first["metric"], "test.counter.1")
        nt.assert_equal(first["points"][0][0], 1000.0)
        nt.assert_equal(first["points"][0][1], 8)
        nt.assert_equal(second["metric"], "test.counter.2")

        # Flush again and make sure we're progressing.
        reporter.metrics = []
        dog.flush(1030.0)
        nt.assert_equal(len(reporter.metrics), 1)

        # Finally, make sure we've flushed all metrics.
        reporter.metrics = []
        dog.flush(1050.0)
        nt.assert_equal(len(reporter.metrics), 0)
示例#43
0
    def test_gauge(self):
        # Create some fake metrics.
        dog = ThreadStats()
        dog.start(roll_up_interval=10, flush_in_thread=False)
        reporter = dog.reporter = MemoryReporter()

        dog.gauge("test.gauge.1", 20, 100.0)
        dog.gauge("test.gauge.1", 22, 105.0)
        dog.gauge("test.gauge.2", 30, 115.0)
        dog.gauge("test.gauge.3", 30, 125.0)
        dog.flush(120.0)

        # Assert they've been properly flushed.
        metrics = self.sort_metrics(reporter.metrics)
        nt.assert_equal(len(metrics), 2)

        (first, second) = metrics
        nt.assert_equal(first["metric"], "test.gauge.1")
        nt.assert_equal(first["points"][0][0], 100.0)
        nt.assert_equal(first["points"][0][1], 22)
        nt.assert_equal(second["metric"], "test.gauge.2")

        # Flush again and make sure we're progressing.
        reporter.metrics = []
        dog.flush(130.0)
        nt.assert_equal(len(reporter.metrics), 1)

        # Finally, make sure we've flushed all metrics.
        reporter.metrics = []
        dog.flush(150.0)
        nt.assert_equal(len(reporter.metrics), 0)
示例#44
0
def main():

    start = time.time()

    parser = argparse.ArgumentParser()
    parser.add_argument("--artifacts-dir", required=True)
    parser.add_argument("--sha1-signing-cert", required=True)
    parser.add_argument("--sha384-signing-cert", required=True)
    parser.add_argument("--task-definition",
                        required=True,
                        type=argparse.FileType('r'))
    parser.add_argument("--filename-template",
                        default=DEFAULT_FILENAME_TEMPLATE)
    parser.add_argument("--no-freshclam",
                        action="store_true",
                        default=False,
                        help="Do not refresh ClamAV DB")
    parser.add_argument("-q",
                        "--quiet",
                        dest="log_level",
                        action="store_const",
                        const=logging.WARNING,
                        default=logging.DEBUG)
    args = parser.parse_args()

    logging.basicConfig(format="%(asctime)s - %(levelname)s - %(message)s")
    log.setLevel(args.log_level)
    task = json.load(args.task_definition)
    # TODO: verify task["extra"]["funsize"]["partials"] with jsonschema

    signing_certs = {
        'sha1': open(args.sha1_signing_cert, 'rb').read(),
        'sha384': open(args.sha384_signing_cert, 'rb').read(),
    }

    assert (get_keysize(signing_certs['sha1']) == 2048)
    assert (get_keysize(signing_certs['sha384']) == 4096)

    # Intended for local testing.
    dd_api_key = os.environ.get('DATADOG_API_KEY')
    # Intended for Taskcluster.
    if not dd_api_key and os.environ.get('DATADOG_API_SECRET'):
        dd_api_key = get_secret(
            os.environ.get('DATADOG_API_SECRET')).get('key')

    # Create this even when not sending metrics, so the context manager
    # statements work.
    ddstats = ThreadStats(namespace='releng.releases.partials')

    if dd_api_key:
        dd_options = {
            'api_key': dd_api_key,
        }
        log.info("Starting metric collection")
        initialize(**dd_options)
        ddstats.start(flush_interval=1)
    else:
        log.info("No metric collection")

    if args.no_freshclam:
        log.info("Skipping freshclam")
    else:
        log.info("Refreshing clamav db...")
        try:
            redo.retry(lambda: sh.freshclam(
                "--stdout", "--verbose", _timeout=300, _err_to_out=True))
            log.info("Done.")
        except sh.ErrorReturnCode:
            log.warning("Freshclam failed, skipping DB update")

    manifest = []
    for e in task["extra"]["funsize"]["partials"]:
        for mar in (e["from_mar"], e["to_mar"]):
            verify_allowed_url(mar)

        work_env = WorkEnv()
        # TODO: run setup once
        work_env.setup()
        complete_mars = {}
        use_old_format = False
        for mar_type, f in (("from", e["from_mar"]), ("to", e["to_mar"])):
            dest = os.path.join(work_env.workdir, "{}.mar".format(mar_type))
            unpack_dir = os.path.join(work_env.workdir, mar_type)
            with ddstats.timer('mar.download.time'):
                download(f, dest)
            if not os.getenv("MOZ_DISABLE_MAR_CERT_VERIFICATION"):
                verify_signature(dest, signing_certs)
            complete_mars["%s_size" % mar_type] = os.path.getsize(dest)
            complete_mars["%s_hash" % mar_type] = get_hash(dest)
            with ddstats.timer('mar.unpack.time'):
                unpack(work_env, dest, unpack_dir)
            if mar_type == 'from':
                version = get_option(unpack_dir,
                                     filename="application.ini",
                                     section="App",
                                     option="Version")
                major = int(version.split(".")[0])
                # The updater for versions less than 56.0 requires BZ2
                # compressed MAR files
                if major < 56:
                    use_old_format = True
                    log.info("Forcing BZ2 compression for %s", f)
            log.info("AV-scanning %s ...", unpack_dir)
            metric_tags = [
                "platform:{}".format(e['platform']),
            ]
            with ddstats.timer('mar.clamscan.time', tags=metric_tags):
                sh.clamscan("-r", unpack_dir, _timeout=600, _err_to_out=True)
            log.info("Done.")

        path = os.path.join(work_env.workdir, "to")
        from_path = os.path.join(work_env.workdir, "from")
        mar_data = {
            "ACCEPTED_MAR_CHANNEL_IDS":
            get_option(path,
                       filename="update-settings.ini",
                       section="Settings",
                       option="ACCEPTED_MAR_CHANNEL_IDS"),
            "version":
            get_option(path,
                       filename="application.ini",
                       section="App",
                       option="Version"),
            "to_buildid":
            get_option(path,
                       filename="application.ini",
                       section="App",
                       option="BuildID"),
            "from_buildid":
            get_option(from_path,
                       filename="application.ini",
                       section="App",
                       option="BuildID"),
            "appName":
            get_option(from_path,
                       filename="application.ini",
                       section="App",
                       option="Name"),
            # Use Gecko repo and rev from platform.ini, not application.ini
            "repo":
            get_option(path,
                       filename="platform.ini",
                       section="Build",
                       option="SourceRepository"),
            "revision":
            get_option(path,
                       filename="platform.ini",
                       section="Build",
                       option="SourceStamp"),
            "from_mar":
            e["from_mar"],
            "to_mar":
            e["to_mar"],
            "platform":
            e["platform"],
            "locale":
            e["locale"],
        }
        # Override ACCEPTED_MAR_CHANNEL_IDS if needed
        if "ACCEPTED_MAR_CHANNEL_IDS" in os.environ:
            mar_data["ACCEPTED_MAR_CHANNEL_IDS"] = os.environ[
                "ACCEPTED_MAR_CHANNEL_IDS"]
        for field in ("update_number", "previousVersion",
                      "previousBuildNumber", "toVersion", "toBuildNumber"):
            if field in e:
                mar_data[field] = e[field]
        mar_data.update(complete_mars)
        # if branch not set explicitly use repo-name
        mar_data["branch"] = e.get("branch",
                                   mar_data["repo"].rstrip("/").split("/")[-1])
        if 'dest_mar' in e:
            mar_name = e['dest_mar']
        else:
            # default to formatted name if not specified
            mar_name = args.filename_template.format(**mar_data)
        mar_data["mar"] = mar_name
        dest_mar = os.path.join(work_env.workdir, mar_name)
        # TODO: download these once
        work_env.download_buildsystem_bits(repo=mar_data["repo"],
                                           revision=mar_data["revision"])

        metric_tags = [
            "branch:{}".format(mar_data['branch']),
            "platform:{}".format(mar_data['platform']),
            # If required. Shouldn't add much useful info, but increases
            # cardinality of metrics substantially, so avoided.
            # "locale:{}".format(mar_data['locale']),
        ]

        with ddstats.timer('generate_partial.time', tags=metric_tags):
            generate_partial(work_env, from_path, path, dest_mar,
                             mar_data["ACCEPTED_MAR_CHANNEL_IDS"],
                             mar_data["version"], use_old_format)

        mar_data["size"] = os.path.getsize(dest_mar)
        metric_tags.append("unit:bytes")
        # Allows us to find out how many releases there were between the two,
        # making buckets of the file sizes easier.
        metric_tags.append("update_number:{}".format(
            mar_data.get('update_number', 0)))
        ddstats.gauge('partial_mar_size', mar_data['size'], tags=metric_tags)

        mar_data["hash"] = get_hash(dest_mar)

        shutil.copy(dest_mar, args.artifacts_dir)
        work_env.cleanup()
        manifest.append(mar_data)

    manifest_file = os.path.join(args.artifacts_dir, "manifest.json")
    with open(manifest_file, "w") as fp:
        json.dump(manifest, fp, indent=2, sort_keys=True)

    # Warning: Assumption that one partials task will always be for one branch.
    metric_tags = [
        "branch:{}".format(mar_data['branch']),
    ]

    ddstats.timing('task_duration',
                   time.time() - start,
                   start,
                   tags=metric_tags)
    # Wait for all the metrics to flush. If the program ends before
    # they've been sent, they'll be dropped.
    # Should be more than the flush_interval for the ThreadStats object
    time.sleep(10)
    def check(self):
        logging.info('check info')
        try:
            yaml_file = os.environ.get('DATADOG_CONF',
                                       '%s/aws_redshift_status.yaml' % config.get_confd_path())
            yaml_data = yaml.load(file(yaml_file))
            init_config = yaml_data['init_config']
            interval = init_config.get('min_collection_interval', 300)

            stats = ThreadStats()
            stats.start(flush_interval=10, roll_up_interval=1, device=None,
                        flush_in_thread=False, flush_in_greenlet=False, disabled=False)

            start = time.time()
            for instance in yaml_data['instances']:
                logging.debug('instance name is %s' % instance['name'])

                name, cluster_name, cluster_address, cluster_port, db_name, user_name, user_password, \
                    aws_access_key_id, aws_secret_access_key, aws_region, query, \
                    tags = self._load_conf(instance)

                if cluster_address is None and cluster_port is None:
                    redshift = boto.redshift.connect_to_region(aws_region,
                                                               aws_access_key_id=aws_access_key_id,
                                                               aws_secret_access_key=aws_secret_access_key)
                    clusters = redshift.describe_clusters(cluster_name)
                    if len(clusters) == 0:
                        raise Exception('Cluster is empty')

                    cluster = clusters['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0]
                    endpoint = cluster['Endpoint']
                    cluster_address = endpoint['Address']
                    cluster_port = endpoint['Port']

                conn = None
                try:
                    connect_timeout = init_config.get('connect_timeout', 5)
                    conn = psycopg2.connect(
                        host=cluster_address,
                        port=cluster_port,
                        database=db_name,
                        user=user_name,
                        password=user_password,
                        connect_timeout=connect_timeout,
                    )

                    today = datetime.datetime.utcnow()
                    starttime = (today - datetime.timedelta(seconds=interval)).strftime('%Y-%m-%d %H:%M:%S.%f')
                    endtime = today.strftime('%Y-%m-%d %H:%M:%S.%f')

                    results = self._db_query(conn, QUERY_TABLE_COUNT)
                    stats.gauge('aws.redshift_status.table_count', results[0][0], tags=tags)
                    logging.debug('aws.redshift_status.table_count is %s' % results[0][0])

                    results = self._db_query(conn, QUERY_NODE)
                    for row in results:
                        gauge_tags = tags[:]
                        gauge_tags.append('node:%s' % row[0])
                        stats.gauge('aws_redshift_status.node_slice', row[1], tags=gauge_tags)
                        logging.debug('aws_redshift_status.node_slice is %s' % row[1])

                    results = self._db_query(conn, QUERY_TABLE_RECORD)
                    for row in results:
                        gauge_tags = tags[:]
                        gauge_tags.append('table:%s' % row[0])
                        stats.gauge('aws_redshift_status.table_records', row[1], tags=gauge_tags)
                        logging.debug('aws_redshift_status.table_records is %s' % row[1])

                    results = self._db_query(conn, QUERY_TABLE_STATUS)
                    for row in results:
                        gauge_tags = tags[:]
                        gauge_tags.append('table:%s' % row[0])
                        stats.gauge('aws_redshift_status.table_status.size', row[1], tags=gauge_tags)
                        logging.debug('aws_redshift_status.table_status.size is %s' % row[1])
                        stats.gauge('aws_redshift_status.table_status.tbl_rows', row[2], tags=gauge_tags)
                        logging.debug('aws_redshift_status.table_status.tbl_rows is %s' % row[2])
                        stats.gauge('aws_redshift_status.table_status.skew_rows', row[3], tags=gauge_tags)
                        logging.debug('aws_redshift_status.table_status.skew_rows is %s' % row[3])

                    for q in [ 'select', 'insert', 'update', 'delete', 'analyze' ]:
                        results = self._db_query(conn, QUERY_LOG_TYPE % (starttime, endtime, '%s %%' % q))
                        for row in results:
                            stats.gauge('aws_redshift_status.query.%s' % q, row[0], tags=tags)
                            logging.debug('aws_redshift_status.query.%s is %s' % (q, row[0]))

                        running_time = time.time() - start
                        stats.gauge('aws_redshift_status.response_time', running_time, tags=tags)
                        logging.debug('aws_redshift_status.response_time is %s' % running_time)
                finally:
                    if conn:
                        conn.close()

            stats.flush()
            stop = stats.stop()
            logging.debug('Stopping is %s' % stop)
        except Exception:
            logging.warning(sys.exc_info())
示例#46
0
import requests
import sh

import redo
from scriptworker.utils import retry_async
from mardor.reader import MarReader
from mardor.signing import get_keysize

from datadog import initialize, ThreadStats


log = logging.getLogger(__name__)

# Create this even when not sending metrics, so the context manager
# statements work.
ddstats = ThreadStats(namespace='releng.releases.partials')


ALLOWED_URL_PREFIXES = [
    "http://download.cdn.mozilla.net/pub/mozilla.org/firefox/nightly/",
    "http://download.cdn.mozilla.net/pub/firefox/nightly/",
    "https://mozilla-nightly-updates.s3.amazonaws.com",
    "https://queue.taskcluster.net/",
    "http://ftp.mozilla.org/",
    "http://download.mozilla.org/",
    "https://archive.mozilla.org/",
    "http://archive.mozilla.org/",
    "https://queue.taskcluster.net/v1/task/",
]

DEFAULT_FILENAME_TEMPLATE = "{appName}-{branch}-{version}-{platform}-" \
示例#47
0
    def test_constant_tags(self):
        """
        Constant tags are attached to all metrics.
        """
        dog = ThreadStats(constant_tags=["type:constant"])
        dog.start(roll_up_interval=1, flush_in_thread=False)
        dog.reporter = self.reporter

        # Post the same metric with different tags.
        dog.gauge("gauge", 10, timestamp=100.0)
        dog.gauge("gauge", 15, timestamp=100.0, tags=["env:production", 'db'])
        dog.gauge("gauge", 20, timestamp=100.0, tags=["env:staging"])

        dog.increment("counter", timestamp=100.0)
        dog.increment("counter", timestamp=100.0, tags=["env:production", 'db'])
        dog.increment("counter", timestamp=100.0, tags=["env:staging"])

        dog.flush(200.0)

        # Assertions on all metrics
        self.assertMetric(count=6)

        # Assertions on gauges
        self.assertMetric(name='gauge', value=10, tags=["type:constant"], count=1)
        self.assertMetric(name="gauge", value=15, tags=["env:production", "db", "type:constant"], count=1)  # noqa
        self.assertMetric(name="gauge", value=20, tags=["env:staging", "type:constant"], count=1)

        # Assertions on counters
        self.assertMetric(name="counter", value=1, tags=["type:constant"], count=1)
        self.assertMetric(name="counter", value=1, tags=["env:production", "db", "type:constant"], count=1)  # noqa
        self.assertMetric(name="counter", value=1, tags=["env:staging", "type:constant"], count=1)

        # Ensure histograms work as well.
        @dog.timed('timed', tags=['version:1'])
        def do_nothing():
            """
            A function that does nothing, but being timed.
            """
            pass

        with patch("datadog.threadstats.base.time", return_value=300):
            do_nothing()

        dog.histogram('timed', 20, timestamp=300.0, tags=['db', 'version:2'])

        self.reporter.metrics = []
        dog.flush(400.0)

        # Histograms, and related metric types, produce 8 different metrics
        self.assertMetric(tags=["version:1", "type:constant"], count=8)
        self.assertMetric(tags=["db", "version:2", "type:constant"], count=8)
示例#48
0
import gzip
import json
from StringIO import StringIO
from base64 import b64decode

import boto3

from datadog import initialize, ThreadStats

# retrieve datadog options from KMS
KMS_ENCRYPTED_KEYS = "<KMS_ENCRYPTED_KEYS>"  # Enter the base-64 encoded, encrypted Datadog token (CiphertextBlob)
kms = boto3.client('kms')
datadog_keys = kms.decrypt(CiphertextBlob=b64decode(KMS_ENCRYPTED_KEYS))['Plaintext']
initialize(**json.loads(datadog_keys))

stats = ThreadStats()
stats.start(flush_in_thread=False)

print 'Lambda function initialized, ready to send metrics'


def _process_rds_enhanced_monitoring_message(base_tags, ts, message):
    engine = message["engine"]
    instance_id = message["instanceID"]

    tags = [
        'engine:%s' % engine,
        'dbinstanceidentifier:%s' % instance_id,
    ] + base_tags

    # metrics generation
import numpy as np
from datadog import initialize


def read_properties(file):
    properties = {}
    for line in file:
        parts = line.split('=')
        if len(parts) > 1:
            properties[parts[0].strip()] = parts[1].strip()
    return properties


dd_props = read_properties(open('datadog.ini', 'r'))

options = {'api_key': dd_props['api_key'], 'app_key': dd_props['app_key']}

initialize(**options)

num = 100000
refdist = {'apple': 0.35, 'orange': 0.35, 'grape': 0.25, 'durian': 0.05}

from datadog import ThreadStats
stats = ThreadStats()
stats.start()
for _ in range(num):
    sampled_fruit = np.random.choice(list(refdist.keys()),
                                     p=list(refdist.values()))
    stats.increment('fruit.picked', 1, tags=['fruit:' + sampled_fruit])
示例#50
0
    def test_host(self):
        dog = ThreadStats()
        dog.start(roll_up_interval=10, flush_in_thread=False)
        reporter = dog.reporter = MemoryReporter()

        # Post the same metric with different tags.
        dog.gauge('gauge', 12, timestamp=100.0, host='')  # unset the host
        dog.gauge('gauge', 10, timestamp=100.0)
        dog.gauge('gauge', 15, timestamp=100.0, host='test')
        dog.gauge('gauge', 15, timestamp=100.0, host='test')

        dog.increment('counter', timestamp=100.0)
        dog.increment('counter', timestamp=100.0)
        dog.increment('counter', timestamp=100.0, host='test')
        dog.increment('counter', timestamp=100.0, host='test', tags=['tag'])
        dog.increment('counter', timestamp=100.0, host='test', tags=['tag'])

        dog.flush(200.0)

        metrics = self.sort_metrics(reporter.metrics)
        nt.assert_equal(len(metrics), 6)

        [c1, c2, c3, g1, g2, g3] = metrics
        (nt.assert_equal(c['metric'], 'counter') for c in [c1, c2, c3])
        nt.assert_equal(c1['host'], None)
        nt.assert_equal(c1['tags'], None)
        nt.assert_equal(c1['points'][0][1], 0.2)
        nt.assert_equal(c2['host'], 'test')
        nt.assert_equal(c2['tags'], None)
        nt.assert_equal(c2['points'][0][1], 0.1)
        nt.assert_equal(c3['host'], 'test')
        nt.assert_equal(c3['tags'], ['tag'])
        nt.assert_equal(c3['points'][0][1], 0.2)

        (nt.assert_equal(g['metric'], 'gauge') for g in [g1, g2, g3])
        nt.assert_equal(g1['host'], None)
        nt.assert_equal(g1['points'][0][1], 10)
        nt.assert_equal(g2['host'], '')
        nt.assert_equal(g2['points'][0][1], 12)
        nt.assert_equal(g3['host'], 'test')
        nt.assert_equal(g3['points'][0][1], 15)

        # Ensure histograms work as well.
        @dog.timed('timed', host='test')
        def test():
            pass

        test()
        dog.histogram('timed', 20, timestamp=300.0, host='test')
        reporter.metrics = []
        dog.flush(400)
        for metric in reporter.metrics:
            assert metric['host'] == 'test'
示例#51
0
    def test_tags(self):
        dog = ThreadStats()
        dog.start(roll_up_interval=10, flush_in_thread=False)
        reporter = dog.reporter = MemoryReporter()

        # Post the same metric with different tags.
        dog.gauge('gauge', 10, timestamp=100.0)
        dog.gauge('gauge', 15, timestamp=100.0, tags=['env:production', 'db'])
        dog.gauge('gauge', 20, timestamp=100.0, tags=['env:staging'])

        dog.increment('counter', timestamp=100.0)
        dog.increment('counter', timestamp=100.0, tags=['env:production', 'db'])
        dog.increment('counter', timestamp=100.0, tags=['env:staging'])

        dog.flush(200.0)

        metrics = self.sort_metrics(reporter.metrics)
        nt.assert_equal(len(metrics), 6)

        [c1, c2, c3, g1, g2, g3] = metrics
        (nt.assert_equal(c['metric'], 'counter') for c in [c1, c2, c3])
        nt.assert_equal(c1['tags'], None)
        nt.assert_equal(c1['points'][0][1], 1)
        nt.assert_equal(c2['tags'], ['env:production', 'db'])
        nt.assert_equal(c2['points'][0][1], 1)
        nt.assert_equal(c3['tags'], ['env:staging'])
        nt.assert_equal(c3['points'][0][1], 1)

        (nt.assert_equal(c['metric'], 'gauge') for c in [g1, g2, g3])
        nt.assert_equal(g1['tags'], None)
        nt.assert_equal(g1['points'][0][1], 10)
        nt.assert_equal(g2['tags'], ['env:production', 'db'])
        nt.assert_equal(g2['points'][0][1], 15)
        nt.assert_equal(g3['tags'], ['env:staging'])
        nt.assert_equal(g3['points'][0][1], 20)
示例#52
0
    def test_tags_from_environment_and_constant(self):
        test_tags = ['country:china', 'age:45', 'blue']
        constant_tags = ['country:canada', 'red']
        with preserve_environment_variable('DATADOG_TAGS'):
            os.environ['DATADOG_TAGS'] = ','.join(test_tags)
            dog = ThreadStats(constant_tags=constant_tags)
        dog.start(roll_up_interval=10, flush_in_thread=False)
        reporter = dog.reporter = MemoryReporter()

        # Add two events
        event1_title = "Event 1 title"
        event2_title = "Event 1 title"
        event1_text = "Event 1 text"
        event2_text = "Event 2 text"
        dog.event(event1_title, event1_text)
        dog.event(event2_title, event2_text)

        # Flush and test
        dog.flush()
        event1, event2 = reporter.events
        nt.assert_equal(event1['title'], event1_title)
        nt.assert_equal(event1['text'], event1_text)
        nt.assert_equal(event1['tags'], constant_tags + test_tags)
        nt.assert_equal(event2['title'], event2_title)
        nt.assert_equal(event2['text'], event2_text)
        nt.assert_equal(event2['text'], event2_text)
        nt.assert_equal(event2['tags'], constant_tags + test_tags)

        # Test more parameters
        reporter.events = []
        event1_priority = "low"
        event1_date_happened = 1375296969
        event1_tag = "Event 2 tag"
        dog.event(event1_title, event1_text, priority=event1_priority,
                  date_happened=event1_date_happened, tags=[event1_tag])

        # Flush and test
        dog.flush()
        event, = reporter.events
        nt.assert_equal(event['title'], event1_title)
        nt.assert_equal(event['text'], event1_text)
        nt.assert_equal(event['priority'], event1_priority)
        nt.assert_equal(event['date_happened'], event1_date_happened)
        nt.assert_equal(event['tags'], [event1_tag] + constant_tags + test_tags)
        dog.start(flush_interval=1, roll_up_interval=1)
示例#53
0
 def stats(self):
     instance = ThreadStats()
     instance.start()
     return instance
示例#54
0
 def test_stop(self):
     dog = ThreadStats()
     dog.start(flush_interval=1, roll_up_interval=1)
     for i in range(10):
         dog.gauge("metric", i)
     time.sleep(2)
     flush_count = dog.flush_count
     assert flush_count
     dog.stop()
     for i in range(10):
         dog.gauge("metric", i)
     time.sleep(2)
     for i in range(10):
         dog.gauge("metric", i)
     time.sleep(2)
     assert dog.flush_count in [flush_count, flush_count + 1]
示例#55
0
class Bot(commands.AutoShardedBot):
    def __init__(self, *args, **kwargs):
        super().__init__(*args,
                         shard_count=3,
                         game=discord.Game(name="rp!help for help!"),
                         **kwargs)
        self.owner_id = 122739797646245899
        self.lounge_id = 166349353999532035
        self.uptime = datetime.datetime.utcnow()
        self.commands_used = Counter()
        self.server_commands = Counter()
        self.socket_stats = Counter()
        self.shutdowns = []
        self.lotteries = dict()

        self.logger = logging.getLogger('discord')  # Discord Logging
        self.logger.setLevel(logging.INFO)
        self.handler = logging.FileHandler(filename=os.path.join(
            'resources', 'discord.log'),
                                           encoding='utf-8',
                                           mode='w')
        self.handler.setFormatter(
            logging.Formatter(
                '%(asctime)s:%(levelname)s:%(name)s: %(message)s'))
        self.logger.addHandler(self.handler)

        self.session = aiohttp.ClientSession(loop=self.loop)
        self.shutdowns.append(self.shutdown)

        with open("resources/auth", 'r') as af:
            self._auth = json.loads(af.read())

        self.db: db.Database = db.Database(self)
        self.di: data.DataInteraction = data.DataInteraction(self)
        self.default_udata = data.default_user
        self.default_servdata = data.default_server
        self.rnd = "1234567890abcdefghijklmnopqrstuvwxyz"

        icogs = [
            cogs.admin.Admin(self),
            cogs.team.Team(self),
            cogs.economy.Economy(self),
            cogs.inventory.Inventory(self),
            cogs.settings.Settings(self),
            cogs.misc.Misc(self),
            cogs.characters.Characters(self),
            cogs.pokemon.Pokemon(self),
            cogs.groups.Groups(self),
            cogs.user.User(self),
            cogs.salary.Salary(self)
        ]
        for cog in icogs:
            self.add_cog(cog)

        self.loop.create_task(self.start_serv())
        self.loop.create_task(self.db.connect())

        init_dd(self._auth[3], self._auth[4])
        self.stats = ThreadStats()
        self.stats.start()

    async def on_ready(self):
        print('Logged in as')
        print(self.user.name)
        print(self.user.id)
        print('------')
        await self.update_stats()

    async def update_stats(self):
        url = "https://bots.discord.pw/api/bots/{}/stats".format(self.user.id)
        payload = json.dumps(dict(server_count=len(self.guilds))).encode()
        headers = {
            'authorization': self._auth[1],
            "Content-Type": "application/json"
        }

        async with self.session.post(url, data=payload,
                                     headers=headers) as response:
            await response.read()

        url = "https://discordbots.org/api/bots/{}/stats".format(self.user.id)
        payload = json.dumps(dict(server_count=len(self.guilds))).encode()
        headers = {
            'authorization': self._auth[2],
            "Content-Type": "application/json"
        }

        async with self.session.post(url, data=payload,
                                     headers=headers) as response:
            await response.read()

        self.loop.call_later(
            14400, lambda: asyncio.ensure_future(self.update_stats()))

    async def on_message(self, message):
        if message.author.bot:
            return

        await self.process_commands(message)

    async def on_command(self, ctx):
        self.stats.increment("RPGBot.commands",
                             tags=["RPGBot:commands"],
                             host="scw-8112e8")
        self.stats.increment(
            f"RPGBot.commands.{str(ctx.command).replace(' ', '.')}",
            tags=["RPGBot:commands"],
            host="scw-8112e8")
        self.commands_used[ctx.command] += 1
        if isinstance(ctx.author, discord.Member):
            self.server_commands[ctx.guild.id] += 1
            if not (self.server_commands[ctx.guild.id] % 50):
                await ctx.send(
                    "This bot costs $130/yr to run. If you like the utilities it provides,"
                    " consider buying me a coffee <https://ko-fi.com/henrys>"
                    " or subscribe as a Patron <https://www.patreon.com/henry232323>"
                )

            add = choice([0, 0, 0, 0, 0, 1, 1, 2, 3])
            fpn = ctx.command.full_parent_name
            if fpn:
                values = {
                    "character": 2,
                    "inventory": 1,
                    "economy": 1,
                    "pokemon": 2,
                    "guild": 2,
                    "team": 1,
                }
                add += values.get(fpn, 0)

            if add:
                await asyncio.sleep(3)
                r = await self.di.add_exp(ctx.author, add)
                if r is not None:
                    await ctx.send(f"{ctx.author.mention} is now level {r}!")

    async def on_command_error(self, ctx, exception):
        self.stats.increment("RPGBot.errors",
                             tags=["RPGBot:errors"],
                             host="scw-8112e8")
        logging.info(
            f"Exception in {ctx.command} {ctx.guild}:{ctx.channel} {exception}"
        )
        if isinstance(exception, commands.MissingRequiredArgument):
            await ctx.send(f"`{exception}`")
        else:
            await ctx.send(f"`{exception}`")

    async def on_guild_join(self, guild):
        if sum(1
               for m in guild.members if m.bot) / guild.member_count >= 3 / 4:
            try:
                await guild.channels[0].send(
                    "This server has too many bots! I'm just going to leave if thats alright"
                )
            finally:
                await guild.leave()
        else:
            self.stats.increment("RPGBot.guilds",
                                 tags=["RPGBot:guilds"],
                                 host="scw-8112e8")

    async def on_guild_leave(self, guild):
        self.stats.increment("RPGBot.guilds",
                             -1,
                             tags=["RPGBot:guilds"],
                             host="scw-8112e8")

    async def on_socket_response(self, msg):
        self.socket_stats[msg.get('t')] += 1

    async def get_bot_uptime(self):
        """Get time between now and when the bot went up"""
        now = datetime.datetime.utcnow()
        delta = now - self.uptime
        hours, remainder = divmod(int(delta.total_seconds()), 3600)
        minutes, seconds = divmod(remainder, 60)
        days, hours = divmod(hours, 24)

        if days:
            fmt = '{d} days, {h} hours, {m} minutes, and {s} seconds'
        else:
            fmt = '{h} hours, {m} minutes, and {s} seconds'

        return fmt.format(d=days, h=hours, m=minutes, s=seconds)

    def randsample(self):
        return "".join(sample(self.rnd, 6))

    @staticmethod
    def get_exp(level):
        return int(0.1 * level**2 + 5 * level + 4)

    @staticmethod
    def get_ram():
        """Get the bot's RAM usage info."""
        mem = psutil.virtual_memory()
        return f"{mem.used / 0x40_000_000:.2f}/{mem.total / 0x40_000_000:.2f}GB ({mem.percent}%)"

    @staticmethod
    def format_table(lines, separate_head=True):
        """Prints a formatted table given a 2 dimensional array"""
        # Count the column width
        widths = []
        for line in lines:
            for i, size in enumerate([len(x) for x in line]):
                while i >= len(widths):
                    widths.append(0)
                if size > widths[i]:
                    widths[i] = size

        # Generate the format string to pad the columns
        print_string = ""
        for i, width in enumerate(widths):
            print_string += "{" + str(i) + ":" + str(width) + "} | "
        if not len(print_string):
            return
        print_string = print_string[:-3]

        # Print the actual data
        fin = []
        for i, line in enumerate(lines):
            fin.append(print_string.format(*line))
            if i == 0 and separate_head:
                fin.append("-" * (sum(widths) + 3 * (len(widths) - 1)))

        return "\n".join(fin)

    async def shutdown(self):
        self.session.close()

    async def start_serv(self):
        self.webapp = Kyoukai(__name__)

        @self.webapp.route("/servers/<int:snowflake>/", methods=["GET"])
        async def getservinfo(ctx: HTTPRequestContext, snowflake: int):
            try:
                snowflake = int(snowflake)
                req = f"""SELECT info FROM servdata WHERE UUID = {snowflake};"""
                async with self.db._conn.acquire() as connection:
                    response = await connection.fetchval(req)
                return Response(response if response else json.dumps(
                    self.default_servdata, indent=4),
                                status=200)
            except:
                return HTTPException(
                    "Invalid snowflake!",
                    Response("Failed to fetch info!", status=400))

        @self.webapp.route("/users/<int:snowflake>/", methods=["GET"])
        async def getuserinfo(ctx: HTTPRequestContext, snowflake: int):
            try:
                snowflake = int(snowflake)
                req = f"""SELECT info FROM userdata WHERE UUID = {snowflake};"""
                async with self.db._conn.acquire() as connection:
                    response = await connection.fetchval(req)
                return Response(response if response else json.dumps(
                    self.default_udata, indent=4),
                                status=200)
            except:
                return HTTPException(
                    "Invalid snowflake!",
                    Response("Failed to fetch info!", status=400))

        await self.webapp.start('0.0.0.0', 1441)
示例#56
0
    def test_tags(self):
        dog = ThreadStats()
        dog.start(roll_up_interval=10, flush_in_thread=False)
        reporter = dog.reporter = MemoryReporter()

        # Post the same metric with different tags.
        dog.gauge("gauge", 10, timestamp=100.0)
        dog.gauge("gauge", 15, timestamp=100.0, tags=["env:production", "db"])
        dog.gauge("gauge", 20, timestamp=100.0, tags=["env:staging"])

        dog.increment("counter", timestamp=100.0)
        dog.increment("counter", timestamp=100.0, tags=["env:production", "db"])
        dog.increment("counter", timestamp=100.0, tags=["env:staging"])

        dog.flush(200.0)

        metrics = self.sort_metrics(reporter.metrics)
        nt.assert_equal(len(metrics), 6)

        [c1, c2, c3, g1, g2, g3] = metrics
        (nt.assert_equal(c["metric"], "counter") for c in [c1, c2, c3])
        nt.assert_equal(c1["tags"], None)
        nt.assert_equal(c1["points"][0][1], 1)
        nt.assert_equal(c2["tags"], ["env:production", "db"])
        nt.assert_equal(c2["points"][0][1], 1)
        nt.assert_equal(c3["tags"], ["env:staging"])
        nt.assert_equal(c3["points"][0][1], 1)

        (nt.assert_equal(c["metric"], "gauge") for c in [g1, g2, g3])
        nt.assert_equal(g1["tags"], None)
        nt.assert_equal(g1["points"][0][1], 10)
        nt.assert_equal(g2["tags"], ["env:production", "db"])
        nt.assert_equal(g2["points"][0][1], 15)
        nt.assert_equal(g3["tags"], ["env:staging"])
        nt.assert_equal(g3["points"][0][1], 20)
示例#57
0
 def stats(self):
     instance = ThreadStats()
     instance.start()
     return instance
示例#58
0
    def test_constant_tags(self):
        dog = ThreadStats(constant_tags=["type:constant"])
        dog.start(roll_up_interval=10, flush_in_thread=False)
        reporter = dog.reporter = MemoryReporter()

        # Post the same metric with different tags.
        dog.gauge("gauge", 10, timestamp=100.0)
        dog.gauge("gauge", 15, timestamp=100.0, tags=["env:production", "db"])
        dog.gauge("gauge", 20, timestamp=100.0, tags=["env:staging"])

        dog.increment("counter", timestamp=100.0)
        dog.increment("counter", timestamp=100.0, tags=["env:production", "db"])
        dog.increment("counter", timestamp=100.0, tags=["env:staging"])

        dog.flush(200.0)

        metrics = self.sort_metrics(reporter.metrics)
        nt.assert_equal(len(metrics), 6)

        [c1, c2, c3, g1, g2, g3] = metrics
        (nt.assert_equal(c["metric"], "counter") for c in [c1, c2, c3])
        nt.assert_equal(c1["tags"], ["env:production", "db", "type:constant"])
        nt.assert_equal(c1["points"][0][1], 1)
        nt.assert_equal(c2["tags"], ["env:staging", "type:constant"])
        nt.assert_equal(c2["points"][0][1], 1)
        nt.assert_equal(c3["tags"], ["type:constant"])
        nt.assert_equal(c3["points"][0][1], 1)

        (nt.assert_equal(c["metric"], "gauge") for c in [g1, g2, g3])
        nt.assert_equal(g1["tags"], ["env:production", "db", "type:constant"])
        nt.assert_equal(g1["points"][0][1], 15)
        nt.assert_equal(g2["tags"], ["env:staging", "type:constant"])
        nt.assert_equal(g2["points"][0][1], 20)
        nt.assert_equal(g3["tags"], ["type:constant"])
        nt.assert_equal(g3["points"][0][1], 10)

        # Ensure histograms work as well.
        @dog.timed("timed", tags=["version:1"])
        def test():
            pass

        test()
        dog.histogram("timed", 20, timestamp=300.0, tags=["db", "version:2"])
        reporter.metrics = []
        dog.flush(400)
        for metric in reporter.metrics:
            assert metric["tags"]  # this is enough
示例#59
0
title = "Arcserve Brazil application is running"
text = 'Another happy customer!'
tags = ['version:1', 'application:web']

api.Event.create(title=title, text=text, tags=tags)

# Use Statsd, a Python client for DogStatsd
from datadog import statsd

# Increment a counter.
statsd.increment('page.views')

# Or ThreadStats, an alternative tool to collect and flush metrics,using Datadog REST API
from datadog import ThreadStats

stats = ThreadStats()
stats.start()
stats.increment('page.views')


def load_initial():
    server = 'localhost'
    database = 'acmedb'
    username = '******'
    password = '******'
    cnxn = pyodbc.connect('DRIVER={ODBC Driver 17 for SQL Server};SERVER=' +
                          server + ';DATABASE=' + database + ';UID=' +
                          username + ';PWD=' + password)
    cursor = cnxn.cursor()
    cursor.execute("SELECT DISTINCT company from archchk_tbl")
    #	row=cursor.fetchone()
示例#60
0
    def test_host(self):
        dog = ThreadStats()
        dog.start(roll_up_interval=10, flush_in_thread=False)
        reporter = dog.reporter = MemoryReporter()

        # Post the same metric with different tags.
        dog.gauge("gauge", 12, timestamp=100.0, host="")  # unset the host
        dog.gauge("gauge", 10, timestamp=100.0)
        dog.gauge("gauge", 15, timestamp=100.0, host="test")
        dog.gauge("gauge", 15, timestamp=100.0, host="test")

        dog.increment("counter", timestamp=100.0)
        dog.increment("counter", timestamp=100.0)
        dog.increment("counter", timestamp=100.0, host="test")
        dog.increment("counter", timestamp=100.0, host="test", tags=["tag"])
        dog.increment("counter", timestamp=100.0, host="test", tags=["tag"])

        dog.flush(200.0)

        metrics = self.sort_metrics(reporter.metrics)
        nt.assert_equal(len(metrics), 6)

        [c1, c2, c3, g1, g2, g3] = metrics
        (nt.assert_equal(c["metric"], "counter") for c in [c1, c2, c3])
        nt.assert_equal(c1["host"], None)
        nt.assert_equal(c1["tags"], None)
        nt.assert_equal(c1["points"][0][1], 2)
        nt.assert_equal(c2["host"], "test")
        nt.assert_equal(c2["tags"], None)
        nt.assert_equal(c2["points"][0][1], 1)
        nt.assert_equal(c3["host"], "test")
        nt.assert_equal(c3["tags"], ["tag"])
        nt.assert_equal(c3["points"][0][1], 2)

        (nt.assert_equal(g["metric"], "gauge") for g in [g1, g2, g3])
        nt.assert_equal(g1["host"], None)
        nt.assert_equal(g1["points"][0][1], 10)
        nt.assert_equal(g2["host"], "")
        nt.assert_equal(g2["points"][0][1], 12)
        nt.assert_equal(g3["host"], "test")
        nt.assert_equal(g3["points"][0][1], 15)

        # Ensure histograms work as well.
        @dog.timed("timed", host="test")
        def test():
            pass

        test()
        dog.histogram("timed", 20, timestamp=300.0, host="test")
        reporter.metrics = []
        dog.flush(400)
        for metric in reporter.metrics:
            assert metric["host"] == "test"