def test_tags(self): dog = ThreadStats() dog.start(roll_up_interval=10, flush_in_thread=False) reporter = dog.reporter = MemoryReporter() # Post the same metric with different tags. dog.gauge('gauge', 10, timestamp=100.0) dog.gauge('gauge', 15, timestamp=100.0, tags=['env:production', 'db']) dog.gauge('gauge', 20, timestamp=100.0, tags=['env:staging']) dog.increment('counter', timestamp=100.0) dog.increment('counter', timestamp=100.0, tags=['env:production', 'db']) dog.increment('counter', timestamp=100.0, tags=['env:staging']) dog.flush(200.0) metrics = self.sort_metrics(reporter.metrics) assert_equal(len(metrics), 6) [c1, c2, c3, g1, g2, g3] = metrics (assert_equal(c['metric'], 'counter') for c in [c1, c2, c3]) assert_equal(c1['tags'], None) assert_equal(c1['points'][0][1], 0.1) assert_equal(c2['tags'], ['env:production', 'db']) assert_equal(c2['points'][0][1], 0.1) assert_equal(c3['tags'], ['env:staging']) assert_equal(c3['points'][0][1], 0.1) (assert_equal(c['metric'], 'gauge') for c in [g1, g2, g3]) assert_equal(g1['tags'], None) assert_equal(g1['points'][0][1], 10) assert_equal(g2['tags'], ['env:production', 'db']) assert_equal(g2['points'][0][1], 15) assert_equal(g3['tags'], ['env:staging']) assert_equal(g3['points'][0][1], 20)
def test_histogram_percentiles(self): dog = ThreadStats() dog.start(roll_up_interval=10, flush_in_thread=False) reporter = dog.reporter = MemoryReporter() # Sample all numbers between 1-100 many times. This # means our percentiles should be relatively close to themselves. percentiles = list(range(100)) random.shuffle(percentiles) # in place for i in percentiles: for j in range(20): dog.histogram('percentiles', i, 1000.0) dog.flush(2000.0) metrics = reporter.metrics def assert_almost_equal(i, j, e=1): # Floating point math? assert abs(i - j) <= e, "%s %s %s" % (i, j, e) nt.assert_equal(len(metrics), 8) p75, p85, p95, p99, _, _, _, _ = self.sort_metrics(metrics) nt.assert_equal(p75['metric'], 'percentiles.75percentile') nt.assert_equal(p75['points'][0][0], 1000.0) assert_almost_equal(p75['points'][0][1], 75, 8) assert_almost_equal(p85['points'][0][1], 85, 8) assert_almost_equal(p95['points'][0][1], 95, 8) assert_almost_equal(p99['points'][0][1], 99, 8)
def configure_metrics(datadog_api_key=DATADOG_API_KEY, datadog_app_key=DATADOG_APP_KEY, engine_name=ENGINE_NAME, os_type=OS_TYPE, poly_work=os.getenv('POLY_WORK', 'local'), source=os.getenv('HOSTNAME', "local"), tags=None, disabled=False) -> ThreadStats: """ Initialize Datadog metric collectors when the datadog env keys are set :return: datadog.ThreadStats """ if datadog_api_key or datadog_app_key: if tags is None: tags = [ f'poly_work:{poly_work}', f'engine_name:{engine_name}', f'pod_name:{source}', f'os:{os_type}', 'testing' if poly_work == 'local' else '', ] options = { 'api_key': datadog_api_key, 'app_key': datadog_app_key, 'host_name': source, } initialize(**options) else: disabled = True metrics_collector = ThreadStats(namespace='polyswarm', constant_tags=tags) metrics_collector.start(disabled=disabled) return metrics_collector
def test_gauge(self): # Create some fake metrics. dog = ThreadStats() dog.start(roll_up_interval=10, flush_in_thread=False) reporter = dog.reporter = MemoryReporter() dog.gauge('test.gauge.1', 20, 100.0) dog.gauge('test.gauge.1', 22, 105.0) dog.gauge('test.gauge.2', 30, 115.0) dog.gauge('test.gauge.3', 30, 125.0) dog.flush(120.0) # Assert they've been properly flushed. metrics = self.sort_metrics(reporter.metrics) assert len(metrics) == 2 (first, second) = metrics assert first['metric'] == 'test.gauge.1' assert first['points'][0][0] == 100.0 assert first['points'][0][1] == 22 assert second['metric'] == 'test.gauge.2' # Flush again and make sure we're progressing. reporter.metrics = [] dog.flush(130.0) assert len(reporter.metrics) == 1 # Finally, make sure we've flushed all metrics. reporter.metrics = [] dog.flush(150.0) assert len(reporter.metrics) == 0
def test_timed_decorator(self): dog = ThreadStats() dog.start(roll_up_interval=1, flush_in_thread=False) reporter = dog.reporter = MemoryReporter() @dog.timed('timed.test') def func(a, b, c=1, d=1): """docstring""" return (a, b, c, d) nt.assert_equal(func.__name__, 'func') nt.assert_equal(func.__doc__, 'docstring') result = func(1, 2, d=3) # Assert it handles args and kwargs correctly. nt.assert_equal(result, (1, 2, 1, 3)) time.sleep(1) # Argh. I hate this. dog.flush() metrics = self.sort_metrics(reporter.metrics) nt.assert_equal(len(metrics), 8) (_, _, _, _, avg, count, max_, min_) = metrics nt.assert_equal(avg['metric'], 'timed.test.avg') nt.assert_equal(count['metric'], 'timed.test.count') nt.assert_equal(max_['metric'], 'timed.test.max') nt.assert_equal(min_['metric'], 'timed.test.min')
class DatadogMiddleware(object): DD_TIMING_ATTRIBUTE = '_dd_start_time' def __init__(self): app_name = settings.DATADOG_APP_NAME self.stats = ThreadStats() self.stats.start() self.error_metric = '{0}.errors'.format(app_name) self.timing_metric = '{0}.request_time'.format(app_name) self.event_tags = [app_name, 'exception'] def process_request(self, request): setattr(request, self.DD_TIMING_ATTRIBUTE, time.time()) def process_response(self, request, response): """ Submit timing metrics from the current request """ if not hasattr(request, self.DD_TIMING_ATTRIBUTE): return response # Calculate request time and submit to Datadog request_time = time.time() - getattr(request, self.DD_TIMING_ATTRIBUTE) tags = self._get_metric_tags(request) self.stats.histogram(self.timing_metric, request_time, tags=tags) return response def process_exception(self, request, exception): """ Captures Django view exceptions as Datadog events """ if isinstance(exception, Http404): # Don't report 404 not found return # Get a formatted version of the traceback. exc = traceback.format_exc() # Make request.META json-serializable. szble = {} for k, v in request.META.items(): if isinstance(v, (list, basestring, bool, int, float, long)): szble[k] = v else: szble[k] = str(v) title = 'Exception from {0}'.format(request.path) text = "Traceback:\n@@@\n{0}\n@@@\nMetadata:\n@@@\n{1}\n@@@" \ .format(exc, json.dumps(szble, indent=2)) # Submit the exception to Datadog self.stats.event(title, text, alert_type='error', aggregation_key=request.path, tags=self.event_tags) # Increment our errors metric tags = self._get_metric_tags(request) self.stats.increment(self.error_metric, tags=tags) def _get_metric_tags(self, request): return ['path:{0}'.format(request.path)]
def test_timed_decorator(self): dog = ThreadStats() dog.start(roll_up_interval=1, flush_in_thread=False) reporter = dog.reporter = MemoryReporter() @dog.timed('timed.test') def func(a, b, c=1, d=1): """docstring""" return (a, b, c, d) assert func.__name__ == 'func' assert func.__doc__ == 'docstring' result = func(1, 2, d=3) # Assert it handles args and kwargs correctly. assert result == (1, 2, 1, 3) time.sleep(1) # Argh. I hate this. dog.flush() metrics = self.sort_metrics(reporter.metrics) assert len(metrics) == 8 (_, _, _, _, avg, count, max_, min_) = metrics assert avg['metric'] == 'timed.test.avg' assert count['metric'] == 'timed.test.count' assert max_['metric'] == 'timed.test.max' assert min_['metric'] == 'timed.test.min'
def test_distribution(self): # Create some fake metrics. dog = ThreadStats() dog.start(roll_up_interval=10, flush_in_thread=False) reporter = dog.reporter = MemoryReporter() dog.distribution('test.dist.1', 20, 100.0) dog.distribution('test.dist.1', 22, 105.0) dog.distribution('test.dist.2', 30, 115.0) dog.distribution('test.dist.3', 30, 125.0) dog.flush(120.0) # Assert they've been properly flushed. dists = self.sort_metrics(reporter.distributions) nt.assert_equal(len(dists), 2) (first, second) = dists nt.assert_equal(first['metric'], 'test.dist.1') nt.assert_equal(first['points'][0][0], 100.0) nt.assert_equal(first['points'][0][1], [20, 22]) nt.assert_equal(second['metric'], 'test.dist.2') # Flush again and make sure we're progressing. reporter.distributions = [] dog.flush(130.0) nt.assert_equal(len(reporter.distributions), 1) # Finally, make sure we've flushed all metrics. reporter.distributions = [] dog.flush(150.0) nt.assert_equal(len(reporter.distributions), 0)
def test_histogram_percentiles(self): dog = ThreadStats() dog.start(roll_up_interval=10, flush_in_thread=False) reporter = dog.reporter = MemoryReporter() # Sample all numbers between 1-100 many times. This # means our percentiles should be relatively close to themselves. percentiles = list(range(100)) random.shuffle(percentiles) # in place for i in percentiles: for j in range(20): dog.histogram("percentiles", i, 1000.0) dog.flush(2000.0) metrics = reporter.metrics def assert_almost_equal(i, j, e=1): # Floating point math? assert abs(i - j) <= e, "%s %s %s" % (i, j, e) nt.assert_equal(len(metrics), 8) p75, p85, p95, p99, _, _, _, _ = self.sort_metrics(metrics) nt.assert_equal(p75["metric"], "percentiles.75percentile") nt.assert_equal(p75["points"][0][0], 1000.0) assert_almost_equal(p75["points"][0][1], 75, 8) assert_almost_equal(p85["points"][0][1], 85, 8) assert_almost_equal(p95["points"][0][1], 95, 8) assert_almost_equal(p99["points"][0][1], 99, 8)
def test_gauge(self): # Create some fake metrics. dog = ThreadStats() dog.start(roll_up_interval=10, flush_in_thread=False) reporter = dog.reporter = MemoryReporter() dog.gauge("test.gauge.1", 20, 100.0) dog.gauge("test.gauge.1", 22, 105.0) dog.gauge("test.gauge.2", 30, 115.0) dog.gauge("test.gauge.3", 30, 125.0) dog.flush(120.0) # Assert they've been properly flushed. metrics = self.sort_metrics(reporter.metrics) nt.assert_equal(len(metrics), 2) (first, second) = metrics nt.assert_equal(first["metric"], "test.gauge.1") nt.assert_equal(first["points"][0][0], 100.0) nt.assert_equal(first["points"][0][1], 22) nt.assert_equal(second["metric"], "test.gauge.2") # Flush again and make sure we're progressing. reporter.metrics = [] dog.flush(130.0) nt.assert_equal(len(reporter.metrics), 1) # Finally, make sure we've flushed all metrics. reporter.metrics = [] dog.flush(150.0) nt.assert_equal(len(reporter.metrics), 0)
class DatadogMetrics(object): """DataDog Metric backend""" def __init__(self, api_key, app_key, flush_interval=10, namespace="aplt"): datadog.initialize(api_key=api_key, app_key=app_key) self._client = ThreadStats() self._flush_interval = flush_interval self._host = get_hostname() self._namespace = namespace def _prefix_name(self, name): return "%s.%s" % (self._namespace, name) def start(self): self._client.start(flush_interval=self._flush_interval, roll_up_interval=self._flush_interval) def increment(self, name, count=1, **kwargs): self._client.increment(self._prefix_name(name), count, host=self._host, **kwargs) def timing(self, name, duration, **kwargs): self._client.timing(self._prefix_name(name), value=duration, host=self._host, **kwargs)
def test_metric_type(self): """ Checks the submitted metric's metric type. """ # Set up ThreadStats with a namespace dog = ThreadStats(namespace="foo") dog.start(roll_up_interval=1, flush_in_thread=False) reporter = dog.reporter = self.reporter # Send a few metrics dog.gauge("gauge", 20, timestamp=100.0) dog.increment("counter", timestamp=100.0) dog.histogram('histogram.1', 20, 100.0) dog.flush(200.0) (first, second, p75, p85, p95, p99, avg, cnt, max_, min_) = self.sort_metrics(reporter.metrics) # Assert Metric type nt.assert_equal(first['type'], 'rate') nt.assert_equal(second['type'], 'gauge') nt.assert_equal(p75['type'], 'gauge') nt.assert_equal(p85['type'], 'gauge') nt.assert_equal(p95['type'], 'gauge') nt.assert_equal(p99['type'], 'gauge') nt.assert_equal(avg['type'], 'gauge') nt.assert_equal(cnt['type'], 'rate') nt.assert_equal(max_['type'], 'gauge') nt.assert_equal(min_['type'], 'gauge')
def test_tags(self): dog = ThreadStats() dog.start(roll_up_interval=10, flush_in_thread=False) reporter = dog.reporter = MemoryReporter() # Post the same metric with different tags. dog.gauge("gauge", 10, timestamp=100.0) dog.gauge("gauge", 15, timestamp=100.0, tags=["env:production", "db"]) dog.gauge("gauge", 20, timestamp=100.0, tags=["env:staging"]) dog.increment("counter", timestamp=100.0) dog.increment("counter", timestamp=100.0, tags=["env:production", "db"]) dog.increment("counter", timestamp=100.0, tags=["env:staging"]) dog.flush(200.0) metrics = self.sort_metrics(reporter.metrics) nt.assert_equal(len(metrics), 6) [c1, c2, c3, g1, g2, g3] = metrics (nt.assert_equal(c["metric"], "counter") for c in [c1, c2, c3]) nt.assert_equal(c1["tags"], None) nt.assert_equal(c1["points"][0][1], 1) nt.assert_equal(c2["tags"], ["env:production", "db"]) nt.assert_equal(c2["points"][0][1], 1) nt.assert_equal(c3["tags"], ["env:staging"]) nt.assert_equal(c3["points"][0][1], 1) (nt.assert_equal(c["metric"], "gauge") for c in [g1, g2, g3]) nt.assert_equal(g1["tags"], None) nt.assert_equal(g1["points"][0][1], 10) nt.assert_equal(g2["tags"], ["env:production", "db"]) nt.assert_equal(g2["points"][0][1], 15) nt.assert_equal(g3["tags"], ["env:staging"]) nt.assert_equal(g3["points"][0][1], 20)
def test_timed_decorator(self): dog = ThreadStats() dog.start(roll_up_interval=1, flush_in_thread=False) reporter = dog.reporter = MemoryReporter() @dog.timed("timed.test") def func(a, b, c=1, d=1): """docstring""" return (a, b, c, d) nt.assert_equal(func.__name__, "func") nt.assert_equal(func.__doc__, "docstring") result = func(1, 2, d=3) # Assert it handles args and kwargs correctly. nt.assert_equal(result, (1, 2, 1, 3)) time.sleep(1) # Argh. I hate this. dog.flush() metrics = self.sort_metrics(reporter.metrics) nt.assert_equal(len(metrics), 8) (_, _, _, _, avg, count, max_, min_) = metrics nt.assert_equal(avg["metric"], "timed.test.avg") nt.assert_equal(count["metric"], "timed.test.count") nt.assert_equal(max_["metric"], "timed.test.max") nt.assert_equal(min_["metric"], "timed.test.min")
def test_tags(self): dog = ThreadStats() dog.start(roll_up_interval=10, flush_in_thread=False) reporter = dog.reporter = MemoryReporter() # Post the same metric with different tags. dog.gauge('gauge', 10, timestamp=100.0) dog.gauge('gauge', 15, timestamp=100.0, tags=['env:production', 'db']) dog.gauge('gauge', 20, timestamp=100.0, tags=['env:staging']) dog.increment('counter', timestamp=100.0) dog.increment('counter', timestamp=100.0, tags=['env:production', 'db']) dog.increment('counter', timestamp=100.0, tags=['env:staging']) dog.flush(200.0) metrics = self.sort_metrics(reporter.metrics) nt.assert_equal(len(metrics), 6) [c1, c2, c3, g1, g2, g3] = metrics (nt.assert_equal(c['metric'], 'counter') for c in [c1, c2, c3]) nt.assert_equal(c1['tags'], None) nt.assert_equal(c1['points'][0][1], 1) nt.assert_equal(c2['tags'], ['env:production', 'db']) nt.assert_equal(c2['points'][0][1], 1) nt.assert_equal(c3['tags'], ['env:staging']) nt.assert_equal(c3['points'][0][1], 1) (nt.assert_equal(c['metric'], 'gauge') for c in [g1, g2, g3]) nt.assert_equal(g1['tags'], None) nt.assert_equal(g1['points'][0][1], 10) nt.assert_equal(g2['tags'], ['env:production', 'db']) nt.assert_equal(g2['points'][0][1], 15) nt.assert_equal(g3['tags'], ['env:staging']) nt.assert_equal(g3['points'][0][1], 20)
def test_disabled_mode(self): dog = ThreadStats() reporter = dog.reporter = MemoryReporter() dog.start(disabled=True, flush_interval=1, roll_up_interval=1) dog.gauge("testing", 1, timestamp=1000) dog.gauge("testing", 2, timestamp=1000) dog.flush(2000.0) assert not reporter.metrics
def test_disabled_mode(self): dog = ThreadStats() dog.start(disabled=True, flush_interval=1, roll_up_interval=1) reporter = dog.reporter = MemoryReporter() dog.gauge('testing', 1, timestamp=1000) dog.gauge('testing', 2, timestamp=1000) dog.flush(2000.0) assert not reporter.metrics
class DatadogStatsLogger(LoggingMixin): def __init__(self, datadog_conn_id='datadog_default'): super().__init__() conn = BaseHook.get_connection(datadog_conn_id) self.api_key = conn.extra_dejson.get('api_key', None) self.app_key = conn.extra_dejson.get('app_key', None) self.source_type_name = conn.extra_dejson.get('source_type_name ', None) # If the host is populated, it will use that hostname instead # for all metric submissions self.host = conn.host if self.api_key is None: raise AirflowException('api_key must be specified in the ' 'Datadog connection details') self.log.info('Setting up api keys for Datadog') self.stats = None initialize(api_key=self.api_key, app_key=self.app_key) def incr(self, stat, count=1, rate=1, tags=None): self.log.info('datadog incr: {} {} {} {}'.format(stat, count, rate, tags)) self.stats.increment(stat, value=count, sample_rate=rate, tags=self._format_tags(tags)) def decr(self, stat, count=1, rate=1, tags=None): self.log.info('datadog decr: {} {} {} {}'.format(stat, count, rate, tags)) self.stats.decrement(stat, value=count, sample_rate=rate, tags=self._format_tags(tags)) def gauge(self, stat, value, rate=1, delta=False, tags=None): self.log.info('datadog gauge: {} {} {} {} {}'.format(stat, value, rate, delta, tags)) if delta: self.log.warning('Deltas are unsupported in Datadog') self.stats.gauge(stat, value, sample_rate=rate, tags=self._format_tags(tags)) def timing(self, stat, delta, rate=1, tags=None): self.log.info('datadog timing: {} {} {}'.format(stat, delta, tags)) if isinstance(delta, timedelta): delta = delta.total_seconds() * 1000. self.stats.timing(stat, delta, sample_rate=rate, tags=self._format_tags(tags)) @classmethod def _format_tags(cls, tags): if not tags: return None return ['{}:{}'.format(k, v) for k, v in tags.items()] def start(self): self.stats = ThreadStats(namespace='airflow') self.stats.start() register(self.stop) def stop(self): unregister(self.stop) self.stats.stop()
def test_custom_host_and_device(self): dog = ThreadStats() dog.start(roll_up_interval=1, flush_in_thread=False, device='dev') reporter = dog.reporter = MemoryReporter() dog.gauge('my.gauge', 1, 100.0, host='host') dog.flush(1000) metric = reporter.metrics[0] nt.assert_equal(metric['device'], 'dev') nt.assert_equal(metric['host'], 'host')
def test_host(self): dog = ThreadStats() dog.start(roll_up_interval=10, flush_in_thread=False) reporter = dog.reporter = MemoryReporter() # Post the same metric with different tags. dog.gauge('gauge', 12, timestamp=100.0, host='') # unset the host dog.gauge('gauge', 10, timestamp=100.0) dog.gauge('gauge', 15, timestamp=100.0, host='test') dog.gauge('gauge', 15, timestamp=100.0, host='test') dog.increment('counter', timestamp=100.0) dog.increment('counter', timestamp=100.0) dog.increment('counter', timestamp=100.0, host='test') dog.increment('counter', timestamp=100.0, host='test', tags=['tag']) dog.increment('counter', timestamp=100.0, host='test', tags=['tag']) dog.flush(200.0) metrics = self.sort_metrics(reporter.metrics) assert len(metrics) == 6 [c1, c2, c3, g1, g2, g3] = metrics assert c1['metric'] == 'counter' assert c2['metric'] == 'counter' assert c3['metric'] == 'counter' assert c1['host'] is None assert c1['tags'] is None assert c1['points'][0][1] == 0.2 assert c2['host'] == 'test' assert c2['tags'] is None assert c2['points'][0][1] == 0.1 assert c3['host'] == 'test' assert c3['tags'] == ['tag'] assert c3['points'][0][1] == 0.2 assert g1['metric'] == 'gauge' assert g2['metric'] == 'gauge' assert g3['metric'] == 'gauge' assert g1['host'] is None assert g1['points'][0][1] == 10 assert g2['host'] == '' assert g2['points'][0][1] == 12 assert g3['host'] == 'test' assert g3['points'][0][1] == 15 # Ensure histograms work as well. @dog.timed('timed', host='test') def test(): pass test() dog.histogram('timed', 20, timestamp=300.0, host='test') reporter.metrics = [] dog.flush(400) for metric in reporter.metrics: assert metric['host'] == 'test'
def test_default_host_and_device(self): dog = ThreadStats() dog.start(roll_up_interval=1, flush_in_thread=False) reporter = dog.reporter = MemoryReporter() dog.gauge('my.gauge', 1, 100.0) dog.flush(1000) metric = reporter.metrics[0] assert not metric['device'] assert not metric['host']
def test_default_host_and_device(self): dog = ThreadStats() dog.start(roll_up_interval=1, flush_in_thread=False) reporter = dog.reporter = MemoryReporter() dog.gauge("my.gauge", 1, 100.0) dog.flush(1000) metric = reporter.metrics[0] assert not metric["device"] assert not metric["host"]
def test_custom_host_and_device(self): dog = ThreadStats() dog.start(roll_up_interval=1, flush_in_thread=False, device="dev") reporter = dog.reporter = MemoryReporter() dog.gauge("my.gauge", 1, 100.0, host="host") dog.flush(1000) metric = reporter.metrics[0] nt.assert_equal(metric["device"], "dev") nt.assert_equal(metric["host"], "host")
def test_init(self): # Test compress_payload setting t = ThreadStats(compress_payload=True) t.start() assert t.reporter.compress_payload is True t.stop() # Default value t = ThreadStats() t.start() assert t.reporter.compress_payload is False t.stop()
def test_constant_tags(self): """ Constant tags are attached to all metrics. """ dog = ThreadStats(constant_tags=["type:constant"]) dog.start(roll_up_interval=1, flush_in_thread=False) dog.reporter = self.reporter # Post the same metric with different tags. dog.gauge("gauge", 10, timestamp=100.0) dog.gauge("gauge", 15, timestamp=100.0, tags=["env:production", 'db']) dog.gauge("gauge", 20, timestamp=100.0, tags=["env:staging"]) dog.increment("counter", timestamp=100.0) dog.increment("counter", timestamp=100.0, tags=["env:production", 'db']) dog.increment("counter", timestamp=100.0, tags=["env:staging"]) dog.flush(200.0) # Assertions on all metrics self.assertMetric(count=6) # Assertions on gauges self.assertMetric(name='gauge', value=10, tags=["type:constant"], count=1) self.assertMetric(name="gauge", value=15, tags=["env:production", "db", "type:constant"], count=1) # noqa self.assertMetric(name="gauge", value=20, tags=["env:staging", "type:constant"], count=1) # Assertions on counters self.assertMetric(name="counter", value=1, tags=["type:constant"], count=1) self.assertMetric(name="counter", value=1, tags=["env:production", "db", "type:constant"], count=1) # noqa self.assertMetric(name="counter", value=1, tags=["env:staging", "type:constant"], count=1) # Ensure histograms work as well. @dog.timed('timed', tags=['version:1']) def do_nothing(): """ A function that does nothing, but being timed. """ pass with patch("datadog.threadstats.base.time", return_value=300): do_nothing() dog.histogram('timed', 20, timestamp=300.0, tags=['db', 'version:2']) self.reporter.metrics = [] dog.flush(400.0) # Histograms, and related metric types, produce 8 different metrics self.assertMetric(tags=["version:1", "type:constant"], count=8) self.assertMetric(tags=["db", "version:2", "type:constant"], count=8)
def test_host(self): dog = ThreadStats() dog.start(roll_up_interval=10, flush_in_thread=False) reporter = dog.reporter = MemoryReporter() # Post the same metric with different tags. dog.gauge('gauge', 12, timestamp=100.0, host='') # unset the host dog.gauge('gauge', 10, timestamp=100.0) dog.gauge('gauge', 15, timestamp=100.0, host='test') dog.gauge('gauge', 15, timestamp=100.0, host='test') dog.increment('counter', timestamp=100.0) dog.increment('counter', timestamp=100.0) dog.increment('counter', timestamp=100.0, host='test') dog.increment('counter', timestamp=100.0, host='test', tags=['tag']) dog.increment('counter', timestamp=100.0, host='test', tags=['tag']) dog.flush(200.0) metrics = self.sort_metrics(reporter.metrics) nt.assert_equal(len(metrics), 6) [c1, c2, c3, g1, g2, g3] = metrics (nt.assert_equal(c['metric'], 'counter') for c in [c1, c2, c3]) nt.assert_equal(c1['host'], None) nt.assert_equal(c1['tags'], None) nt.assert_equal(c1['points'][0][1], 0.2) nt.assert_equal(c2['host'], 'test') nt.assert_equal(c2['tags'], None) nt.assert_equal(c2['points'][0][1], 0.1) nt.assert_equal(c3['host'], 'test') nt.assert_equal(c3['tags'], ['tag']) nt.assert_equal(c3['points'][0][1], 0.2) (nt.assert_equal(g['metric'], 'gauge') for g in [g1, g2, g3]) nt.assert_equal(g1['host'], None) nt.assert_equal(g1['points'][0][1], 10) nt.assert_equal(g2['host'], '') nt.assert_equal(g2['points'][0][1], 12) nt.assert_equal(g3['host'], 'test') nt.assert_equal(g3['points'][0][1], 15) # Ensure histograms work as well. @dog.timed('timed', host='test') def test(): pass test() dog.histogram('timed', 20, timestamp=300.0, host='test') reporter.metrics = [] dog.flush(400) for metric in reporter.metrics: assert metric['host'] == 'test'
def test_host(self): dog = ThreadStats() dog.start(roll_up_interval=10, flush_in_thread=False) reporter = dog.reporter = MemoryReporter() # Post the same metric with different tags. dog.gauge("gauge", 12, timestamp=100.0, host="") # unset the host dog.gauge("gauge", 10, timestamp=100.0) dog.gauge("gauge", 15, timestamp=100.0, host="test") dog.gauge("gauge", 15, timestamp=100.0, host="test") dog.increment("counter", timestamp=100.0) dog.increment("counter", timestamp=100.0) dog.increment("counter", timestamp=100.0, host="test") dog.increment("counter", timestamp=100.0, host="test", tags=["tag"]) dog.increment("counter", timestamp=100.0, host="test", tags=["tag"]) dog.flush(200.0) metrics = self.sort_metrics(reporter.metrics) nt.assert_equal(len(metrics), 6) [c1, c2, c3, g1, g2, g3] = metrics (nt.assert_equal(c["metric"], "counter") for c in [c1, c2, c3]) nt.assert_equal(c1["host"], None) nt.assert_equal(c1["tags"], None) nt.assert_equal(c1["points"][0][1], 2) nt.assert_equal(c2["host"], "test") nt.assert_equal(c2["tags"], None) nt.assert_equal(c2["points"][0][1], 1) nt.assert_equal(c3["host"], "test") nt.assert_equal(c3["tags"], ["tag"]) nt.assert_equal(c3["points"][0][1], 2) (nt.assert_equal(g["metric"], "gauge") for g in [g1, g2, g3]) nt.assert_equal(g1["host"], None) nt.assert_equal(g1["points"][0][1], 10) nt.assert_equal(g2["host"], "") nt.assert_equal(g2["points"][0][1], 12) nt.assert_equal(g3["host"], "test") nt.assert_equal(g3["points"][0][1], 15) # Ensure histograms work as well. @dog.timed("timed", host="test") def test(): pass test() dog.histogram("timed", 20, timestamp=300.0, host="test") reporter.metrics = [] dog.flush(400) for metric in reporter.metrics: assert metric["host"] == "test"
def test_host(self): dog = ThreadStats() dog.start(roll_up_interval=10, flush_in_thread=False) reporter = dog.reporter = MemoryReporter() # Post the same metric with different tags. dog.gauge('gauge', 12, timestamp=100.0, host='') # unset the host dog.gauge('gauge', 10, timestamp=100.0) dog.gauge('gauge', 15, timestamp=100.0, host='test') dog.gauge('gauge', 15, timestamp=100.0, host='test') dog.increment('counter', timestamp=100.0) dog.increment('counter', timestamp=100.0) dog.increment('counter', timestamp=100.0, host='test') dog.increment('counter', timestamp=100.0, host='test', tags=['tag']) dog.increment('counter', timestamp=100.0, host='test', tags=['tag']) dog.flush(200.0) metrics = self.sort_metrics(reporter.metrics) nt.assert_equal(len(metrics), 6) [c1, c2, c3, g1, g2, g3] = metrics (nt.assert_equal(c['metric'], 'counter') for c in [c1, c2, c3]) nt.assert_equal(c1['host'], None) nt.assert_equal(c1['tags'], None) nt.assert_equal(c1['points'][0][1], 2) nt.assert_equal(c2['host'], 'test') nt.assert_equal(c2['tags'], None) nt.assert_equal(c2['points'][0][1], 1) nt.assert_equal(c3['host'], 'test') nt.assert_equal(c3['tags'], ['tag']) nt.assert_equal(c3['points'][0][1], 2) (nt.assert_equal(g['metric'], 'gauge') for g in [g1, g2, g3]) nt.assert_equal(g1['host'], None) nt.assert_equal(g1['points'][0][1], 10) nt.assert_equal(g2['host'], '') nt.assert_equal(g2['points'][0][1], 12) nt.assert_equal(g3['host'], 'test') nt.assert_equal(g3['points'][0][1], 15) # Ensure histograms work as well. @dog.timed('timed', host='test') def test(): pass test() dog.histogram('timed', 20, timestamp=300.0, host='test') reporter.metrics = [] dog.flush(400) for metric in reporter.metrics: assert metric['host'] == 'test'
def test_constant_tags(self): dog = ThreadStats(constant_tags=['type:constant']) dog.start(roll_up_interval=10, flush_in_thread=False) reporter = dog.reporter = MemoryReporter() # Post the same metric with different tags. dog.gauge('gauge', 10, timestamp=100.0) dog.gauge('gauge', 15, timestamp=100.0, tags=['env:production', 'db']) dog.gauge('gauge', 20, timestamp=100.0, tags=['env:staging']) dog.increment('counter', timestamp=100.0) dog.increment('counter', timestamp=100.0, tags=['env:production', 'db']) dog.increment('counter', timestamp=100.0, tags=['env:staging']) dog.flush(200.0) metrics = self.sort_metrics(reporter.metrics) nt.assert_equal(len(metrics), 6) [c1, c2, c3, g1, g2, g3] = metrics (nt.assert_equal(c['metric'], 'counter') for c in [c1, c2, c3]) nt.assert_equal(c1['tags'], ['env:production', 'db', 'type:constant']) nt.assert_equal(c1['points'][0][1], 1) nt.assert_equal(c2['tags'], ['env:staging', 'type:constant']) nt.assert_equal(c2['points'][0][1], 1) nt.assert_equal(c3['tags'], ['type:constant']) nt.assert_equal(c3['points'][0][1], 1) (nt.assert_equal(c['metric'], 'gauge') for c in [g1, g2, g3]) nt.assert_equal(g1['tags'], ['env:production', 'db', 'type:constant']) nt.assert_equal(g1['points'][0][1], 15) nt.assert_equal(g2['tags'], ['env:staging', 'type:constant']) nt.assert_equal(g2['points'][0][1], 20) nt.assert_equal(g3['tags'], ['type:constant']) nt.assert_equal(g3['points'][0][1], 10) # Ensure histograms work as well. @dog.timed('timed', tags=['version:1']) def test(): pass test() dog.histogram('timed', 20, timestamp=300.0, tags=['db', 'version:2']) reporter.metrics = [] dog.flush(400) for metric in reporter.metrics: assert metric['tags'] # this is enough
def test_tags_from_environment_and_constant(self): test_tags = ['country:china', 'age:45', 'blue'] constant_tags = ['country:canada', 'red'] with preserve_environment_variable('DATADOG_TAGS'): os.environ['DATADOG_TAGS'] = ','.join(test_tags) dog = ThreadStats(constant_tags=constant_tags) dog.start(roll_up_interval=10, flush_in_thread=False) reporter = dog.reporter = MemoryReporter() # Add two events event1_title = "Event 1 title" event2_title = "Event 1 title" event1_text = "Event 1 text" event2_text = "Event 2 text" dog.event(event1_title, event1_text) dog.event(event2_title, event2_text) # Flush and test dog.flush() event1, event2 = reporter.events nt.assert_equal(event1['title'], event1_title) nt.assert_equal(event1['text'], event1_text) nt.assert_equal(event1['tags'], constant_tags + test_tags) nt.assert_equal(event2['title'], event2_title) nt.assert_equal(event2['text'], event2_text) nt.assert_equal(event2['text'], event2_text) nt.assert_equal(event2['tags'], constant_tags + test_tags) # Test more parameters reporter.events = [] event1_priority = "low" event1_date_happened = 1375296969 event1_tag = "Event 2 tag" dog.event(event1_title, event1_text, priority=event1_priority, date_happened=event1_date_happened, tags=[event1_tag]) # Flush and test dog.flush() event, = reporter.events nt.assert_equal(event['title'], event1_title) nt.assert_equal(event['text'], event1_text) nt.assert_equal(event['priority'], event1_priority) nt.assert_equal(event['date_happened'], event1_date_happened) nt.assert_equal(event['tags'], [event1_tag] + constant_tags + test_tags) dog.start(flush_interval=1, roll_up_interval=1)
class DatadogAdapter(BaseAdapter): """ DatadogAdapter sends the given `Receipt` values to a local Datadog agent via dogstatsd. """ METRIC_PREFIX = 'shop.{}'.format(slugify(settings.REGISTER_NAME)) def __init__(self): # prepare the statsd client options = { 'api_key': settings.DATADOG_API_KEY, } initialize(**options) # start the statsd thread disabled = not settings.DATADOG_API_KEY self.statsd = ThreadStats() self.statsd.start(flush_interval=1, roll_up_interval=1, disabled=disabled) logger.debug('statsd thread initialized, disabled: %s', disabled) def push(self, receipt): """ Sends data to a local Datadog agent. The `Receipt` products are properly tagged using a stringify function so that they can be easily aggregated through Datadog backend. """ try: # count the receipt timestamp = receipt.date.timestamp() count_metric = '{prefix}.receipt.count'.format(prefix=self.METRIC_PREFIX) self.statsd.increment(count_metric, timestamp=timestamp) for item in receipt.sell_set.all(): # generate tags and metrics name tags = ['product:{}'.format(slugify(item.product.name))] items_count = '{prefix}.receipt.items.count'.format(prefix=self.METRIC_PREFIX) receipt_amount = '{prefix}.receipt.amount'.format(prefix=self.METRIC_PREFIX) # compute item metrics quantity = item.quantity total = float((item.price * item.quantity).amount) # send data self.statsd.increment(items_count, timestamp=timestamp, value=quantity, tags=tags) self.statsd.increment(receipt_amount, timestamp=timestamp, value=total, tags=tags) logger.debug('pushed metrics for %d sold items', receipt.sell_set.count()) except Exception: raise AdapterPushFailed
def test_stop(self): dog = ThreadStats() dog.start(flush_interval=1, roll_up_interval=1) for i in range(10): dog.gauge('metric', i) time.sleep(2) flush_count = dog.flush_count assert flush_count dog.stop() for i in range(10): dog.gauge('metric', i) time.sleep(2) for i in range(10): dog.gauge('metric', i) time.sleep(2) assert dog.flush_count in [flush_count, flush_count + 1]
class DatadogMetricsBackend(MetricsBackend): def __init__(self, prefix=None, **kwargs): # TODO(dcramer): it'd be nice if the initialize call wasn't a global initialize(**kwargs) self._stats = ThreadStats() self._stats.start() super(DatadogMetricsBackend, self).__init__(prefix=prefix) def __del__(self): self._stats.stop() def incr(self, key, amount=1, sample_rate=1): self._stats.increment(self._get_key(key), amount, sample_rate=sample_rate) def timing(self, key, value, sample_rate=1): self._stats.timing(self._get_key(key), value, sample_rate=sample_rate)
def test_stop(self): dog = ThreadStats() dog.start(flush_interval=1, roll_up_interval=1) for i in range(10): dog.gauge("metric", i) time.sleep(2) flush_count = dog.flush_count assert flush_count dog.stop() for i in range(10): dog.gauge("metric", i) time.sleep(2) for i in range(10): dog.gauge("metric", i) time.sleep(2) assert dog.flush_count in [flush_count, flush_count + 1]
def test_constant_tags(self): dog = ThreadStats(constant_tags=["type:constant"]) dog.start(roll_up_interval=10, flush_in_thread=False) reporter = dog.reporter = MemoryReporter() # Post the same metric with different tags. dog.gauge("gauge", 10, timestamp=100.0) dog.gauge("gauge", 15, timestamp=100.0, tags=["env:production", "db"]) dog.gauge("gauge", 20, timestamp=100.0, tags=["env:staging"]) dog.increment("counter", timestamp=100.0) dog.increment("counter", timestamp=100.0, tags=["env:production", "db"]) dog.increment("counter", timestamp=100.0, tags=["env:staging"]) dog.flush(200.0) metrics = self.sort_metrics(reporter.metrics) nt.assert_equal(len(metrics), 6) [c1, c2, c3, g1, g2, g3] = metrics (nt.assert_equal(c["metric"], "counter") for c in [c1, c2, c3]) nt.assert_equal(c1["tags"], ["env:production", "db", "type:constant"]) nt.assert_equal(c1["points"][0][1], 1) nt.assert_equal(c2["tags"], ["env:staging", "type:constant"]) nt.assert_equal(c2["points"][0][1], 1) nt.assert_equal(c3["tags"], ["type:constant"]) nt.assert_equal(c3["points"][0][1], 1) (nt.assert_equal(c["metric"], "gauge") for c in [g1, g2, g3]) nt.assert_equal(g1["tags"], ["env:production", "db", "type:constant"]) nt.assert_equal(g1["points"][0][1], 15) nt.assert_equal(g2["tags"], ["env:staging", "type:constant"]) nt.assert_equal(g2["points"][0][1], 20) nt.assert_equal(g3["tags"], ["type:constant"]) nt.assert_equal(g3["points"][0][1], 10) # Ensure histograms work as well. @dog.timed("timed", tags=["version:1"]) def test(): pass test() dog.histogram("timed", 20, timestamp=300.0, tags=["db", "version:2"]) reporter.metrics = [] dog.flush(400) for metric in reporter.metrics: assert metric["tags"] # this is enough
def send(metric_name: str, data_value: float, **kwargs): tags = ['metric_submission:threadstats'] if kwargs: for key, value in kwargs.items(): if 'tag' in key: tags.append('{0}:{1}'.format(key[3:], value)) options = { 'api_key': '52ef848539fe3e746a1dc5d189c91315', 'app_key': '76b1154922c2beea61fa4aefbda3d639373e4a12' } initialize(**options) stats = ThreadStats() stats.start() stats.gauge(metric_name, value=data_value, tags=tags)
def test_metric_namespace(self): """ Namespace prefixes all metric names. """ # Set up ThreadStats with a namespace dog = ThreadStats(namespace="foo") dog.start(roll_up_interval=1, flush_in_thread=False) dog.reporter = self.reporter # Send a few metrics dog.gauge("gauge", 20, timestamp=100.0) dog.increment("counter", timestamp=100.0) dog.flush(200.0) # Metric names are prefixed with the namespace self.assertMetric(count=2) self.assertMetric(name="foo.gauge", count=1) self.assertMetric(name="foo.counter", count=1)
class DatadogMetricsBackend(MetricsBackend): def __init__(self, prefix=None, **kwargs): self._stats = ThreadStats() self._stats.start() # TODO(dcramer): it'd be nice if the initialize call wasn't a global initialize(**kwargs) super(DatadogMetricsBackend, self).__init__(prefix=prefix) def __del__(self): self._stats.stop() def incr(self, key, amount=1, sample_rate=1): self._stats.increment(self._get_key(key), amount, sample_rate=sample_rate) def timing(self, key, value, sample_rate=1): self._stats.timing(self._get_key(key), value, sample_rate=sample_rate)
def test_event_constant_tags(self): constant_tag = 'type:constant' dog = ThreadStats(constant_tags=[constant_tag]) dog.start(roll_up_interval=10, flush_in_thread=False) reporter = dog.reporter = MemoryReporter() # Add two events event1_title = "Event 1 title" event2_title = "Event 1 title" event1_text = "Event 1 text" event2_text = "Event 2 text" dog.event(event1_title, event1_text) dog.event(event2_title, event2_text) # Flush and test dog.flush() event1, event2 = reporter.events nt.assert_equal(event1['title'], event1_title) nt.assert_equal(event1['text'], event1_text) nt.assert_equal(event1['tags'], [constant_tag]) nt.assert_equal(event2['title'], event2_title) nt.assert_equal(event2['text'], event2_text) nt.assert_equal(event2['text'], event2_text) nt.assert_equal(event2['tags'], [constant_tag]) # Test more parameters reporter.events = [] event1_priority = "low" event1_date_happened = 1375296969 event1_tag = "Event 2 tag" dog.event(event1_title, event1_text, priority=event1_priority, date_happened=event1_date_happened, tags=[event1_tag]) # Flush and test dog.flush() event, = reporter.events nt.assert_equal(event['title'], event1_title) nt.assert_equal(event['text'], event1_text) nt.assert_equal(event['priority'], event1_priority) nt.assert_equal(event['date_happened'], event1_date_happened) nt.assert_equal(event['tags'], [event1_tag, constant_tag])
def test_counter(self): # Create some fake metrics. dog = ThreadStats() dog.start(roll_up_interval=10, flush_in_thread=False) reporter = dog.reporter = MemoryReporter() dog.increment("test.counter.1", timestamp=1000.0) dog.increment("test.counter.1", value=2, timestamp=1005.0) dog.increment("test.counter.2", timestamp=1015.0) dog.increment("test.counter.3", timestamp=1025.0) dog.flush(1021.0) # Assert they've been properly flushed. metrics = self.sort_metrics(reporter.metrics) nt.assert_equal(len(metrics), 2) (first, second) = metrics nt.assert_equal(first["metric"], "test.counter.1") nt.assert_equal(first["points"][0][0], 1000.0) nt.assert_equal(first["points"][0][1], 3) nt.assert_equal(second["metric"], "test.counter.2") # Test decrement dog.increment("test.counter.1", value=10, timestamp=1000.0) dog.decrement("test.counter.1", value=2, timestamp=1005.0) reporter.metrics = [] dog.flush(1021.0) metrics = self.sort_metrics(reporter.metrics) nt.assert_equal(len(metrics), 1) first, = metrics nt.assert_equal(first["metric"], "test.counter.1") nt.assert_equal(first["points"][0][0], 1000.0) nt.assert_equal(first["points"][0][1], 8) nt.assert_equal(second["metric"], "test.counter.2") # Flush again and make sure we're progressing. reporter.metrics = [] dog.flush(1030.0) nt.assert_equal(len(reporter.metrics), 1) # Finally, make sure we've flushed all metrics. reporter.metrics = [] dog.flush(1050.0) nt.assert_equal(len(reporter.metrics), 0)
def test_counter(self): # Create some fake metrics. dog = ThreadStats() dog.start(roll_up_interval=10, flush_in_thread=False) reporter = dog.reporter = MemoryReporter() dog.increment('test.counter.1', timestamp=1000.0) dog.increment('test.counter.1', value=2, timestamp=1005.0) dog.increment('test.counter.2', timestamp=1015.0) dog.increment('test.counter.3', timestamp=1025.0) dog.flush(1021.0) # Assert they've been properly flushed. metrics = self.sort_metrics(reporter.metrics) nt.assert_equal(len(metrics), 2) (first, second) = metrics nt.assert_equal(first['metric'], 'test.counter.1') nt.assert_equal(first['points'][0][0], 1000.0) nt.assert_equal(first['points'][0][1], 0.3) nt.assert_equal(second['metric'], 'test.counter.2') # Test decrement dog.increment('test.counter.1', value=10, timestamp=1000.0) dog.decrement('test.counter.1', value=2, timestamp=1005.0) reporter.metrics = [] dog.flush(1021.0) metrics = self.sort_metrics(reporter.metrics) nt.assert_equal(len(metrics), 1) first, = metrics nt.assert_equal(first['metric'], 'test.counter.1') nt.assert_equal(first['points'][0][0], 1000.0) nt.assert_equal(first['points'][0][1], 0.8) nt.assert_equal(second['metric'], 'test.counter.2') # Flush again and make sure we're progressing. reporter.metrics = [] dog.flush(1030.0) nt.assert_equal(len(reporter.metrics), 1) # Finally, make sure we've flushed all metrics. reporter.metrics = [] dog.flush(1050.0) nt.assert_equal(len(reporter.metrics), 0)
def test_event_constant_tags(self): constant_tag = "type:constant" dog = ThreadStats(constant_tags=[constant_tag]) dog.start(roll_up_interval=10, flush_in_thread=False) reporter = dog.reporter = MemoryReporter() # Add two events event1_title = "Event 1 title" event2_title = "Event 1 title" event1_text = "Event 1 text" event2_text = "Event 2 text" dog.event(event1_title, event1_text) dog.event(event2_title, event2_text) # Flush and test dog.flush() event1, event2 = reporter.events nt.assert_equal(event1["title"], event1_title) nt.assert_equal(event1["text"], event1_text) nt.assert_equal(event1["tags"], [constant_tag]) nt.assert_equal(event2["title"], event2_title) nt.assert_equal(event2["text"], event2_text) nt.assert_equal(event2["text"], event2_text) nt.assert_equal(event2["tags"], [constant_tag]) # Test more parameters reporter.events = [] event1_priority = "low" event1_date_happened = 1375296969 event1_tag = "Event 2 tag" dog.event( event1_title, event1_text, priority=event1_priority, date_happened=event1_date_happened, tags=[event1_tag] ) # Flush and test dog.flush() event, = reporter.events nt.assert_equal(event["title"], event1_title) nt.assert_equal(event["text"], event1_text) nt.assert_equal(event["priority"], event1_priority) nt.assert_equal(event["date_happened"], event1_date_happened) nt.assert_equal(event["tags"], [event1_tag, constant_tag])
def test_tags(self): dog = ThreadStats() dog.start(roll_up_interval=10, flush_in_thread=False) reporter = dog.reporter = MemoryReporter() # Post the same metric with different tags. dog.gauge('gauge', 10, timestamp=100.0) dog.gauge('gauge', 15, timestamp=100.0, tags=['env:production', 'db']) dog.gauge('gauge', 20, timestamp=100.0, tags=['env:staging']) dog.increment('counter', timestamp=100.0) dog.increment('counter', timestamp=100.0, tags=['env:production', 'db']) dog.increment('counter', timestamp=100.0, tags=['env:staging']) dog.flush(200.0) metrics = self.sort_metrics(reporter.metrics) assert len(metrics) == 6 [c1, c2, c3, g1, g2, g3] = metrics assert c1['metric'] == 'counter' assert c2['metric'] == 'counter' assert c3['metric'] == 'counter' assert c1['tags'] is None assert c1['points'][0][1] == 0.1 assert c2['tags'] == ['env:production', 'db'] assert c2['points'][0][1] == 0.1 assert c3['tags'] == ['env:staging'] assert c3['points'][0][1] == 0.1 assert g1['metric'] == 'gauge' assert g2['metric'] == 'gauge' assert g3['metric'] == 'gauge' assert g1['tags'] is None assert g1['points'][0][1] == 10 assert g2['tags'] == ['env:production', 'db'] assert g2['points'][0][1] == 15 assert g3['tags'] == ['env:staging'] assert g3['points'][0][1] == 20
def test_event(self): dog = ThreadStats() dog.start(roll_up_interval=10, flush_in_thread=False) reporter = dog.reporter = MemoryReporter() # Add two events event1_title = "Event 1 title" event2_title = "Event 1 title" event1_text = "Event 1 text" event2_text = "Event 2 text" dog.event(event1_title, event1_text) dog.event(event2_title, event2_text) # Flush and test dog.flush() event1, event2 = reporter.events assert event1['title'] == event1_title assert event1['text'] == event1_text assert event2['title'] == event2_title assert event2['text'] == event2_text # Test more parameters reporter.events = [] event1_priority = "low" event1_date_happened = 1375296969 event1_tag = "Event 2 tag" dog.event(event1_title, event1_text, priority=event1_priority, date_happened=event1_date_happened, tags=[event1_tag]) # Flush and test dog.flush() event, = reporter.events assert event['title'] == event1_title assert event['text'] == event1_text assert event['priority'] == event1_priority assert event['date_happened'] == event1_date_happened assert event['tags'] == [event1_tag]
def test_tags_from_environment_env_service_version(self): test_tags = set(['env:staging', 'service:food', 'version:1.2.3']) with EnvVars(env_vars={ "DD_ENV": "staging", "DD_VERSION": "1.2.3", "DD_SERVICE": "food", }): dog = ThreadStats() dog.start(roll_up_interval=10, flush_in_thread=False) reporter = dog.reporter = MemoryReporter() # Add two events event1_title = "Event 1 title" event1_text = "Event 1 text" dog.event(event1_title, event1_text) # Flush and test dog.flush() [event1] = reporter.events assert event1['title'] == event1_title assert event1['text'] == event1_text assert set(event1['tags']) == test_tags
def test_event(self): dog = ThreadStats() dog.start(roll_up_interval=10, flush_in_thread=False) reporter = dog.reporter = MemoryReporter() # Add two events event1_title = "Event 1 title" event2_title = "Event 1 title" event1_text = "Event 1 text" event2_text = "Event 2 text" dog.event(event1_title, event1_text) dog.event(event2_title, event2_text) # Flush and test dog.flush() event1, event2 = reporter.events nt.assert_equal(event1['title'], event1_title) nt.assert_equal(event1['text'], event1_text) nt.assert_equal(event2['title'], event2_title) nt.assert_equal(event2['text'], event2_text) # Test more parameters reporter.events = [] event1_priority = "low" event1_date_happened = 1375296969 event1_tag = "Event 2 tag" dog.event(event1_title, event1_text, priority=event1_priority, date_happened=event1_date_happened, tags=[event1_tag]) # Flush and test dog.flush() event, = reporter.events nt.assert_equal(event['title'], event1_title) nt.assert_equal(event['text'], event1_text) nt.assert_equal(event['priority'], event1_priority) nt.assert_equal(event['date_happened'], event1_date_happened) nt.assert_equal(event['tags'], [event1_tag])
def send_metric(metric_name: str, data_value: float, **kwargs): tags = ['metric_submission:threadstats'] if kwargs is not None: for key, value in kwargs.items(): if 'tag' in key: tags.append('{0}:{1}'.format(key[3:], value)) api_key = config.Get().datadog_config()['api_key'] app_key = config.Get().datadog_config()['app_key'] options = {'api_key': '' + api_key + '', 'app_key': '' + app_key + ''} initialize(**options) stats = ThreadStats() stats.start() try: stats.gauge(metric_name, value=data_value, tags=tags) return True except Exception as e: print(e) return False
def stats(self): instance = ThreadStats() instance.start() return instance
def test_histogram(self): dog = ThreadStats() dog.start(roll_up_interval=10, flush_in_thread=False) reporter = dog.reporter = MemoryReporter() # Add some histogram metrics. dog.histogram("histogram.1", 20, 100.0) dog.histogram("histogram.1", 30, 105.0) dog.histogram("histogram.1", 40, 106.0) dog.histogram("histogram.1", 50, 106.0) dog.histogram("histogram.1", 30, 110.0) dog.histogram("histogram.1", 50, 115.0) dog.histogram("histogram.1", 40, 116.0) dog.histogram("histogram.2", 40, 100.0) dog.histogram("histogram.3", 50, 134.0) # Flush and ensure they roll up properly. dog.flush(120.0) metrics = self.sort_metrics(reporter.metrics) nt.assert_equal(len(metrics), 24) # Test histograms elsewhere. ( h1751, h1851, h1951, h1991, h1avg1, h1cnt1, h1max1, h1min1, _, _, _, _, h2avg1, h2cnt1, h2max1, h2min1, h1752, _, _, h1992, h1avg2, h1cnt2, h1max2, h1min2, ) = metrics nt.assert_equal(h1avg1["metric"], "histogram.1.avg") nt.assert_equal(h1avg1["points"][0][0], 100.0) nt.assert_equal(h1avg1["points"][0][1], 35) nt.assert_equal(h1cnt1["metric"], "histogram.1.count") nt.assert_equal(h1cnt1["points"][0][0], 100.0) nt.assert_equal(h1cnt1["points"][0][1], 4) nt.assert_equal(h1min1["metric"], "histogram.1.min") nt.assert_equal(h1min1["points"][0][1], 20) nt.assert_equal(h1max1["metric"], "histogram.1.max") nt.assert_equal(h1max1["points"][0][1], 50) nt.assert_equal(h1751["metric"], "histogram.1.75percentile") nt.assert_equal(h1751["points"][0][1], 40) nt.assert_equal(h1991["metric"], "histogram.1.99percentile") nt.assert_equal(h1991["points"][0][1], 50) nt.assert_equal(h1avg2["metric"], "histogram.1.avg") nt.assert_equal(h1avg2["points"][0][0], 110.0) nt.assert_equal(h1avg2["points"][0][1], 40) nt.assert_equal(h1cnt2["metric"], "histogram.1.count") nt.assert_equal(h1cnt2["points"][0][0], 110.0) nt.assert_equal(h1cnt2["points"][0][1], 3) nt.assert_equal(h1752["metric"], "histogram.1.75percentile") nt.assert_equal(h1752["points"][0][0], 110.0) nt.assert_equal(h1752["points"][0][1], 40.0) nt.assert_equal(h1992["metric"], "histogram.1.99percentile") nt.assert_equal(h1992["points"][0][0], 110.0) nt.assert_equal(h1992["points"][0][1], 50.0) nt.assert_equal(h2avg1["metric"], "histogram.2.avg") nt.assert_equal(h2avg1["points"][0][0], 100.0) nt.assert_equal(h2avg1["points"][0][1], 40) nt.assert_equal(h2cnt1["metric"], "histogram.2.count") nt.assert_equal(h2cnt1["points"][0][0], 100.0) nt.assert_equal(h2cnt1["points"][0][1], 1) # Flush again ensure they're gone. dog.reporter.metrics = [] dog.flush(140.0) nt.assert_equal(len(dog.reporter.metrics), 8) dog.reporter.metrics = [] dog.flush(200.0) nt.assert_equal(len(dog.reporter.metrics), 0)
import json from StringIO import StringIO from base64 import b64decode import boto3 from datadog import initialize, ThreadStats # retrieve datadog options from KMS KMS_ENCRYPTED_KEYS = "<KMS_ENCRYPTED_KEYS>" # Enter the base-64 encoded, encrypted Datadog token (CiphertextBlob) kms = boto3.client('kms') datadog_keys = kms.decrypt(CiphertextBlob=b64decode(KMS_ENCRYPTED_KEYS))['Plaintext'] initialize(**json.loads(datadog_keys)) stats = ThreadStats() stats.start(flush_in_thread=False) print 'Lambda function initialized, ready to send metrics' def _process_rds_enhanced_monitoring_message(base_tags, ts, message): engine = message["engine"] instance_id = message["instanceID"] tags = [ 'engine:%s' % engine, 'dbinstanceidentifier:%s' % instance_id, ] + base_tags # metrics generation
def check(self): logging.info('check info') try: yaml_file = os.environ.get('DATADOG_CONF', '%s/aws_redshift_status.yaml' % config.get_confd_path()) yaml_data = yaml.load(file(yaml_file)) init_config = yaml_data['init_config'] interval = init_config.get('min_collection_interval', 300) stats = ThreadStats() stats.start(flush_interval=10, roll_up_interval=1, device=None, flush_in_thread=False, flush_in_greenlet=False, disabled=False) start = time.time() for instance in yaml_data['instances']: logging.debug('instance name is %s' % instance['name']) name, cluster_name, cluster_address, cluster_port, db_name, user_name, user_password, \ aws_access_key_id, aws_secret_access_key, aws_region, query, \ tags = self._load_conf(instance) if cluster_address is None and cluster_port is None: redshift = boto.redshift.connect_to_region(aws_region, aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key) clusters = redshift.describe_clusters(cluster_name) if len(clusters) == 0: raise Exception('Cluster is empty') cluster = clusters['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0] endpoint = cluster['Endpoint'] cluster_address = endpoint['Address'] cluster_port = endpoint['Port'] conn = None try: connect_timeout = init_config.get('connect_timeout', 5) conn = psycopg2.connect( host=cluster_address, port=cluster_port, database=db_name, user=user_name, password=user_password, connect_timeout=connect_timeout, ) today = datetime.datetime.utcnow() starttime = (today - datetime.timedelta(seconds=interval)).strftime('%Y-%m-%d %H:%M:%S.%f') endtime = today.strftime('%Y-%m-%d %H:%M:%S.%f') results = self._db_query(conn, QUERY_TABLE_COUNT) stats.gauge('aws.redshift_status.table_count', results[0][0], tags=tags) logging.debug('aws.redshift_status.table_count is %s' % results[0][0]) results = self._db_query(conn, QUERY_NODE) for row in results: gauge_tags = tags[:] gauge_tags.append('node:%s' % row[0]) stats.gauge('aws_redshift_status.node_slice', row[1], tags=gauge_tags) logging.debug('aws_redshift_status.node_slice is %s' % row[1]) results = self._db_query(conn, QUERY_TABLE_RECORD) for row in results: gauge_tags = tags[:] gauge_tags.append('table:%s' % row[0]) stats.gauge('aws_redshift_status.table_records', row[1], tags=gauge_tags) logging.debug('aws_redshift_status.table_records is %s' % row[1]) results = self._db_query(conn, QUERY_TABLE_STATUS) for row in results: gauge_tags = tags[:] gauge_tags.append('table:%s' % row[0]) stats.gauge('aws_redshift_status.table_status.size', row[1], tags=gauge_tags) logging.debug('aws_redshift_status.table_status.size is %s' % row[1]) stats.gauge('aws_redshift_status.table_status.tbl_rows', row[2], tags=gauge_tags) logging.debug('aws_redshift_status.table_status.tbl_rows is %s' % row[2]) stats.gauge('aws_redshift_status.table_status.skew_rows', row[3], tags=gauge_tags) logging.debug('aws_redshift_status.table_status.skew_rows is %s' % row[3]) for q in [ 'select', 'insert', 'update', 'delete', 'analyze' ]: results = self._db_query(conn, QUERY_LOG_TYPE % (starttime, endtime, '%s %%' % q)) for row in results: stats.gauge('aws_redshift_status.query.%s' % q, row[0], tags=tags) logging.debug('aws_redshift_status.query.%s is %s' % (q, row[0])) running_time = time.time() - start stats.gauge('aws_redshift_status.response_time', running_time, tags=tags) logging.debug('aws_redshift_status.response_time is %s' % running_time) finally: if conn: conn.close() stats.flush() stop = stats.stop() logging.debug('Stopping is %s' % stop) except Exception: logging.warning(sys.exc_info())