Пример #1
0
    def test_counter(self):
        stats = MetricsAggregator('myhost')

        # Track some counters.
        stats.submit_packets('my.first.counter:1|c')
        stats.submit_packets('my.first.counter:5|c')
        stats.submit_packets('my.second.counter:1|c')
        stats.submit_packets('my.third.counter:3|c')

        # Ensure they roll up nicely.
        metrics = self.sort_metrics(stats.flush())
        assert len(metrics) == 3

        first, second, third = metrics
        nt.assert_equals(first['metric'], 'my.first.counter')
        nt.assert_equals(first['points'][0][1], 6)
        nt.assert_equals(first['host'], 'myhost')

        nt.assert_equals(second['metric'], 'my.second.counter')
        nt.assert_equals(second['points'][0][1], 1)

        nt.assert_equals(third['metric'], 'my.third.counter')
        nt.assert_equals(third['points'][0][1], 3)

        # Ensure that counters reset to zero.
        metrics = self.sort_metrics(stats.flush())
        first, second, third = metrics
        nt.assert_equals(first['metric'], 'my.first.counter')
        nt.assert_equals(first['points'][0][1], 0)
        nt.assert_equals(second['metric'], 'my.second.counter')
        nt.assert_equals(second['points'][0][1], 0)
        nt.assert_equals(third['metric'], 'my.third.counter')
        nt.assert_equals(third['points'][0][1], 0)
Пример #2
0
    def test_monokey_batching_withtags_with_sampling(self):
        # The min is not enabled by default
        stats = MetricsAggregator(
            'host',
            histogram_aggregates=DEFAULT_HISTOGRAM_AGGREGATES+['min']
        )
        stats.submit_packets('test_metric:1.5|c|#tag1:one,tag2:two:2.3|g|#tag3:three:3|g:42|h|#tag1:12,tag42:42|@0.22')

        stats_ref = MetricsAggregator(
            'host',
            histogram_aggregates=DEFAULT_HISTOGRAM_AGGREGATES+['min']
        )
        packets = [
            'test_metric:1.5|c|#tag1:one,tag2:two',
            'test_metric:2.3|g|#tag3:three',
            'test_metric:3|g',
            'test_metric:42|h|#tag1:12,tag42:42|@0.22'
        ]
        stats_ref.submit_packets("\n".join(packets))

        metrics = self.sort_metrics(stats.flush())
        metrics_ref = self.sort_metrics(stats_ref.flush())

        self.assertTrue(len(metrics) == len(metrics_ref) == 9, (metrics, metrics_ref))
        for i in range(len(metrics)):
            nt.assert_equal(metrics[i]['points'][0][1], metrics_ref[i]['points'][0][1])
            nt.assert_equal(metrics[i]['tags'], metrics_ref[i]['tags'])
Пример #3
0
    def test_gauge(self):
        stats = MetricsAggregator("myhost")

        # Track some counters.
        stats.submit_packets("my.first.gauge:1|g")
        stats.submit_packets("my.first.gauge:5|g")
        stats.submit_packets("my.second.gauge:1.5|g")

        # Ensure that gauges roll up correctly.
        metrics = self.sort_metrics(stats.flush())
        assert len(metrics) == 2

        first, second = metrics

        nt.assert_equals(first["metric"], "my.first.gauge")
        nt.assert_equals(first["points"][0][1], 5)
        nt.assert_equals(first["host"], "myhost")

        nt.assert_equals(second["metric"], "my.second.gauge")
        nt.assert_equals(second["points"][0][1], 1.5)

        # Ensure that old gauges get dropped due to old timestamps
        stats.gauge("my.first.gauge", 5)
        stats.gauge("my.first.gauge", 1, timestamp=1000000000)
        stats.gauge("my.second.gauge", 20, timestamp=1000000000)

        metrics = self.sort_metrics(stats.flush())
        assert len(metrics) == 1

        first = metrics[0]

        nt.assert_equals(first["metric"], "my.first.gauge")
        nt.assert_equals(first["points"][0][1], 5)
        nt.assert_equals(first["host"], "myhost")
Пример #4
0
    def test_gauge(self):
        stats = MetricsAggregator('myhost')

        # Track some counters.
        stats.submit_packets('my.first.gauge:1|g')
        stats.submit_packets('my.first.gauge:5|g')
        stats.submit_packets('my.second.gauge:1.5|g')

        # Ensure that gauges roll up correctly.
        metrics = self.sort_metrics(stats.flush())
        assert len(metrics) == 2

        first, second = metrics

        nt.assert_equals(first['metric'], 'my.first.gauge')
        nt.assert_equals(first['points'][0][1], 5)
        nt.assert_equals(first['host'], 'myhost')

        nt.assert_equals(second['metric'], 'my.second.gauge')
        nt.assert_equals(second['points'][0][1], 1.5)

        # Ensure that old gauges get dropped due to old timestamps
        stats.gauge('my.first.gauge', 5)
        stats.gauge('my.first.gauge', 1, timestamp=1000000000)
        stats.gauge('my.second.gauge', 20, timestamp=1000000000)

        metrics = self.sort_metrics(stats.flush())
        assert len(metrics) == 1

        first = metrics[0]

        nt.assert_equals(first['metric'], 'my.first.gauge')
        nt.assert_equals(first['points'][0][1], 5)
        nt.assert_equals(first['host'], 'myhost')
Пример #5
0
    def test_histogram(self):
        stats = MetricsAggregator('myhost')

        # Sample all numbers between 1-100 many times. This
        # means our percentiles should be relatively close to themselves.
        percentiles = range(100)
        random.shuffle(percentiles) # in place
        for i in percentiles:
            for j in xrange(20):
                for type_ in ['h', 'ms']:
                    m = 'my.p:%s|%s' % (i, type_)
                    stats.submit_packets(m)

        metrics = self.sort_metrics(stats.flush())

        nt.assert_equal(len(metrics), 5)
        p95, pavg, pcount, pmax, pmed = self.sort_metrics(metrics)
        nt.assert_equal(p95['metric'], 'my.p.95percentile')
        self.assert_almost_equal(p95['points'][0][1], 95, 10)
        self.assert_almost_equal(pmax['points'][0][1], 99, 1)
        self.assert_almost_equal(pmed['points'][0][1], 50, 2)
        self.assert_almost_equal(pavg['points'][0][1], 50, 2)
        self.assert_almost_equal(pcount['points'][0][1], 4000, 0) # 100 * 20 * 2
        nt.assert_equals(p95['host'], 'myhost')

        # Ensure that histograms are reset.
        metrics = self.sort_metrics(stats.flush())
        assert not metrics
Пример #6
0
    def test_histogram(self):
        # The min is not enabled by default
        stats = MetricsAggregator(
            'myhost',
            histogram_aggregates=DEFAULT_HISTOGRAM_AGGREGATES+['min']
        )

        # Sample all numbers between 1-100 many times. This
        # means our percentiles should be relatively close to themselves.
        percentiles = range(100)
        random.shuffle(percentiles)  # in place
        for i in percentiles:
            for j in xrange(20):
                for type_ in ['h', 'ms']:
                    m = 'my.p:%s|%s' % (i, type_)
                    stats.submit_packets(m)

        metrics = self.sort_metrics(stats.flush())

        nt.assert_equal(len(metrics), 6)
        p95, pavg, pcount, pmax, pmed, pmin = self.sort_metrics(metrics)
        nt.assert_equal(p95[0], 'my.p.95percentile')
        self.assert_almost_equal(p95[2], 95, 10)
        self.assert_almost_equal(pmax[2], 99, 1)
        self.assert_almost_equal(pmed[2], 50, 2)
        self.assert_almost_equal(pavg[2], 50, 2)
        self.assert_almost_equal(pmin[2], 1, 1)
        self.assert_almost_equal(pcount[2], 4000, 0)  # 100 * 20 * 2
        nt.assert_equals(p95[3]['hostname'], 'myhost')

        # Ensure that histograms are reset.
        metrics = self.sort_metrics(stats.flush())
        assert not metrics
Пример #7
0
    def test_dogstatsd_aggregation_perf(self):
        ma = MetricsAggregator('my.host')

        for _ in xrange(self.FLUSH_COUNT):
            for i in xrange(self.LOOPS_PER_FLUSH):
                for j in xrange(self.METRIC_COUNT):

                    # metrics
                    ma.submit_packets('counter.%s:%s|c' % (j, i))
                    ma.submit_packets('gauge.%s:%s|g' % (j, i))
                    ma.submit_packets('histogram.%s:%s|h' % (j, i))
                    ma.submit_packets('set.%s:%s|s' % (j, 1.0))

                    # tagged metrics
                    ma.submit_packets('counter.%s:%s|c|#tag1,tag2' % (j, i))
                    ma.submit_packets('gauge.%s:%s|g|#tag1,tag2' % (j, i))
                    ma.submit_packets('histogram.%s:%s|h|#tag1,tag2' % (j, i))
                    ma.submit_packets('set.%s:%s|s|#tag1,tag2' % (j, i))

                    # sampled metrics
                    ma.submit_packets('counter.%s:%s|c|@0.5' % (j, i))
                    ma.submit_packets('gauge.%s:%s|g|@0.5' % (j, i))
                    ma.submit_packets('histogram.%s:%s|h|@0.5' % (j, i))
                    ma.submit_packets('set.%s:%s|s|@0.5' % (j, i))

            ma.flush()
Пример #8
0
    def test_monokey_batching_notags(self):
        # The min is not enabled by default
        stats = MetricsAggregator(
            'host',
            histogram_aggregates=DEFAULT_HISTOGRAM_AGGREGATES+['min']
        )
        stats.submit_packets('test_hist:0.3|ms:2.5|ms|@0.5:3|ms')

        stats_ref = MetricsAggregator(
            'host',
            histogram_aggregates=DEFAULT_HISTOGRAM_AGGREGATES+['min']
        )
        packets = [
            'test_hist:0.3|ms',
            'test_hist:2.5|ms|@0.5',
            'test_hist:3|ms'
        ]
        stats_ref.submit_packets("\n".join(packets))

        metrics = stats.flush()
        metrics_ref = stats_ref.flush()

        self.assertTrue(len(metrics) == len(metrics_ref) == 6, (metrics, metrics_ref))

        for i in range(len(metrics)):
            nt.assert_equal(metrics[i]['points'][0][1], metrics_ref[i]['points'][0][1])
Пример #9
0
    def test_recent_point_threshold(self):
        threshold = 100
        # The min is not enabled by default
        stats = MetricsAggregator(
            'myhost',
            recent_point_threshold=threshold,
            histogram_aggregates=DEFAULT_HISTOGRAM_AGGREGATES+['min']
        )
        timestamp_beyond_threshold = time.time() - threshold*2
        timestamp_within_threshold = time.time() - threshold/2

        # Ensure that old gauges get dropped due to old timestamps
        stats.submit_metric('my.first.gauge', 5, 'g')
        stats.submit_metric('my.first.gauge', 1, 'g', timestamp=timestamp_beyond_threshold)
        stats.submit_metric('my.second.gauge', 20, 'g', timestamp=timestamp_beyond_threshold)

        metrics = self.sort_metrics(stats.flush())
        assert len(metrics) == 1

        first = metrics[0]
        nt.assert_equals(first['metric'], 'my.first.gauge')
        nt.assert_equals(first['points'][0][1], 5)
        nt.assert_equals(first['host'], 'myhost')

        # Ensure that old gauges get dropped due to old timestamps
        stats.submit_metric('my.1.gauge', 5, 'g')
        stats.submit_metric('my.1.gauge', 1, 'g', timestamp=timestamp_within_threshold)
        stats.submit_metric('my.2.counter', 20, 'c', timestamp=timestamp_within_threshold)
        stats.submit_metric('my.3.set', 20, 's', timestamp=timestamp_within_threshold)
        stats.submit_metric('my.4.histogram', 20, 'h', timestamp=timestamp_within_threshold)

        flush_timestamp = time.time()
        metrics = self.sort_metrics(stats.flush())
        nt.assert_equal(len(metrics), 9)

        first, second, third, h1, h2, h3, h4, h5, h6 = metrics
        nt.assert_equals(first['metric'], 'my.1.gauge')
        nt.assert_equals(first['points'][0][1], 1)
        nt.assert_equals(first['host'], 'myhost')
        self.assert_almost_equal(first['points'][0][0], timestamp_within_threshold, 0.1)

        nt.assert_equals(second['metric'], 'my.2.counter')
        nt.assert_equals(second['points'][0][1], 20)
        self.assert_almost_equal(second['points'][0][0], flush_timestamp, 0.1)

        nt.assert_equals(third['metric'], 'my.3.set')
        nt.assert_equals(third['points'][0][1], 1)
        self.assert_almost_equal(third['points'][0][0], flush_timestamp, 0.1)

        nt.assert_equals(h1['metric'], 'my.4.histogram.95percentile')
        nt.assert_equals(h1['points'][0][1], 20)
        self.assert_almost_equal(h1['points'][0][0], flush_timestamp, 0.1)
        nt.assert_equal(h1['points'][0][0], h2['points'][0][0])
        nt.assert_equal(h1['points'][0][0], h3['points'][0][0])
        nt.assert_equal(h1['points'][0][0], h4['points'][0][0])
        nt.assert_equal(h1['points'][0][0], h5['points'][0][0])
Пример #10
0
    def test_checksd_aggregation_perf(self):
        ma = MetricsAggregator('my.host')

        for _ in xrange(self.FLUSH_COUNT):
            for i in xrange(self.LOOPS_PER_FLUSH):
                # Counters
                for j in xrange(self.METRIC_COUNT):
                    ma.increment('counter.%s' % j, i)
                    ma.gauge('gauge.%s' % j, i)
                    ma.histogram('histogram.%s' % j, i)
                    ma.set('set.%s' % j, float(i))
            ma.flush()
Пример #11
0
    def test_metrics_expiry(self):
        # Ensure metrics eventually expire and stop submitting.
        ag_interval = 1
        expiry = ag_interval * 4 + 2
        # The min is not enabled by default
        stats = MetricsAggregator(
            'myhost',
            interval=ag_interval,
            expiry_seconds=expiry,
            histogram_aggregates=DEFAULT_HISTOGRAM_AGGREGATES+['min']
        )
        stats.submit_packets('test.counter:123|c')
        stats.submit_packets('test.gauge:55|g')
        stats.submit_packets('test.set:44|s')
        stats.submit_packets('test.histogram:11|h')

        # Ensure points keep submitting
        time.sleep(ag_interval)
        metrics = self.sort_metrics(stats.flush())
        nt.assert_equal(len(metrics), 9)
        nt.assert_equal(metrics[0]['metric'], 'test.counter')
        nt.assert_equal(metrics[0]['points'][0][1], 123)
        time.sleep(ag_interval)
        metrics = self.sort_metrics(stats.flush())
        nt.assert_equal(len(metrics), 1)
        nt.assert_equal(metrics[0]['metric'], 'test.counter')
        nt.assert_equal(metrics[0]['points'][0][1], 0)

        time.sleep(ag_interval)
        time.sleep(0.5)
        metrics = self.sort_metrics(stats.flush())
        nt.assert_equal(len(metrics), 1)
        nt.assert_equal(metrics[0]['metric'], 'test.counter')
        nt.assert_equal(metrics[0]['points'][0][1], 0)

        # Now sleep for longer than the expiry window and ensure
        # no points are submitted
        time.sleep(ag_interval)
        time.sleep(2)
        m = stats.flush()
        assert not m, str(m)

        # If we submit again, we're all good.
        stats.submit_packets('test.counter:123|c')
        stats.submit_packets('test.gauge:55|g')
        stats.submit_packets('test.set:44|s')
        stats.submit_packets('test.histogram:11|h')

        metrics = self.sort_metrics(stats.flush())
        nt.assert_equal(len(metrics), 9)
        nt.assert_equal(metrics[0]['metric'], 'test.counter')
        nt.assert_equal(metrics[0]['points'][0][1], 123)
Пример #12
0
    def test_recent_point_threshold(self):
        threshold = 100
        stats = MetricsAggregator("myhost", recent_point_threshold=threshold)
        timestamp_beyond_threshold = time.time() - threshold * 2
        timestamp_within_threshold = time.time() - threshold / 2

        # Ensure that old gauges get dropped due to old timestamps
        stats.submit_metric("my.first.gauge", 5, "g")
        stats.submit_metric("my.first.gauge", 1, "g", timestamp=timestamp_beyond_threshold)
        stats.submit_metric("my.second.gauge", 20, "g", timestamp=timestamp_beyond_threshold)

        metrics = self.sort_metrics(stats.flush())
        assert len(metrics) == 1

        first = metrics[0]
        nt.assert_equals(first["metric"], "my.first.gauge")
        nt.assert_equals(first["points"][0][1], 5)
        nt.assert_equals(first["host"], "myhost")

        # Ensure that old gauges get dropped due to old timestamps
        stats.submit_metric("my.1.gauge", 5, "g")
        stats.submit_metric("my.1.gauge", 1, "g", timestamp=timestamp_within_threshold)
        stats.submit_metric("my.2.counter", 20, "c", timestamp=timestamp_within_threshold)
        stats.submit_metric("my.3.set", 20, "s", timestamp=timestamp_within_threshold)
        stats.submit_metric("my.4.histogram", 20, "h", timestamp=timestamp_within_threshold)

        flush_timestamp = time.time()
        metrics = self.sort_metrics(stats.flush())
        nt.assert_equal(len(metrics), 8)

        first, second, third, h1, h2, h3, h4, h5 = metrics
        nt.assert_equals(first["metric"], "my.1.gauge")
        nt.assert_equals(first["points"][0][1], 1)
        nt.assert_equals(first["host"], "myhost")
        self.assert_almost_equal(first["points"][0][0], timestamp_within_threshold, 0.1)

        nt.assert_equals(second["metric"], "my.2.counter")
        nt.assert_equals(second["points"][0][1], 20)
        self.assert_almost_equal(second["points"][0][0], flush_timestamp, 0.1)

        nt.assert_equals(third["metric"], "my.3.set")
        nt.assert_equals(third["points"][0][1], 1)
        self.assert_almost_equal(third["points"][0][0], flush_timestamp, 0.1)

        nt.assert_equals(h1["metric"], "my.4.histogram.95percentile")
        nt.assert_equals(h1["points"][0][1], 20)
        self.assert_almost_equal(h1["points"][0][0], flush_timestamp, 0.1)
        nt.assert_equal(h1["points"][0][0], h2["points"][0][0])
        nt.assert_equal(h1["points"][0][0], h3["points"][0][0])
        nt.assert_equal(h1["points"][0][0], h4["points"][0][0])
        nt.assert_equal(h1["points"][0][0], h5["points"][0][0])
Пример #13
0
    def test_magic_tags(self):
        stats = MetricsAggregator('myhost')
        stats.submit_packets('my.gauge.a:1|c|#host:test-a')
        stats.submit_packets('my.gauge.b:4|c|#tag1,tag2,host:test-b')
        stats.submit_packets('my.gauge.b:8|c|#host:test-b,tag2,tag1')
        stats.submit_packets('my.gauge.c:10|c|#tag3')
        stats.submit_packets('my.gauge.c:16|c|#device:floppy,tag3')

        metrics = self.sort_metrics(stats.flush())

        nt.assert_equal(len(metrics), 4)
        first, second, third, fourth = metrics

        nt.assert_equal(first['metric'], 'my.gauge.a')
        nt.assert_equal(first['tags'], None)
        nt.assert_equal(first['points'][0][1], 1)
        nt.assert_equal(first['host'], 'test-a')

        nt.assert_equal(second['metric'], 'my.gauge.b')
        nt.assert_equal(second['tags'], ('tag1', 'tag2'))
        nt.assert_equal(second['points'][0][1], 12)
        nt.assert_equal(second['host'], 'test-b')

        nt.assert_equal(third['metric'], 'my.gauge.c')
        nt.assert_equal(third['tags'], ('tag3', ))
        nt.assert_equal(third['points'][0][1], 10)
        nt.assert_equal(third['device_name'], None)

        nt.assert_equal(fourth['metric'], 'my.gauge.c')
        nt.assert_equal(fourth['tags'], ('tag3', ))
        nt.assert_equal(fourth['points'][0][1], 16)
        nt.assert_equal(fourth['device_name'], 'floppy')
Пример #14
0
    def test_tags(self):
        stats = MetricsAggregator("myhost")
        stats.submit_packets("gauge:1|c")
        stats.submit_packets("gauge:2|c|@1")
        stats.submit_packets("gauge:4|c|#tag1,tag2")
        stats.submit_packets("gauge:8|c|#tag2,tag1")  # Should be the same as above
        stats.submit_packets("gauge:16|c|#tag3,tag4")

        metrics = self.sort_metrics(stats.flush())

        assert len(metrics) == 3
        first, second, third = metrics

        nt.assert_equal(first["metric"], "gauge")
        nt.assert_equal(first["tags"], None)
        nt.assert_equal(first["points"][0][1], 3)
        nt.assert_equal(first["host"], "myhost")

        nt.assert_equal(second["metric"], "gauge")
        nt.assert_equal(second["tags"], ("tag1", "tag2"))
        nt.assert_equal(second["points"][0][1], 12)
        nt.assert_equal(second["host"], "myhost")

        nt.assert_equal(third["metric"], "gauge")
        nt.assert_equal(third["tags"], ("tag3", "tag4"))
        nt.assert_equal(third["points"][0][1], 16)
        nt.assert_equal(third["host"], "myhost")
Пример #15
0
    def test_rate(self):
        stats = MetricsAggregator('myhost')
        stats.submit_packets('my.rate:10|_dd-r')
        # Sleep 1 second so the time interval > 0
        time.sleep(1)
        stats.submit_packets('my.rate:40|_dd-r')

        # Check that the rate is calculated correctly
        metrics = stats.flush()
        nt.assert_equal(len(metrics), 1)
        m = metrics[0]
        nt.assert_equals(m['metric'], 'my.rate')
        nt.assert_equals(m['points'][0][1], 30)

        # Assert that no more rates are given
        assert not stats.flush()
Пример #16
0
    def test_custom_aggregate(self):
        configstr = 'median, max'
        stats = MetricsAggregator(
            'myhost',
            histogram_aggregates=get_histogram_aggregates(configstr)
        )

        self.assertEquals(
            sorted(stats.metric_config[Histogram]['aggregates']),
            ['max', 'median'],
            stats.metric_config[Histogram]
        )

        for i in xrange(20):
            stats.submit_packets('myhistogram:{0}|h'.format(i))

        metrics = stats.flush()

        self.assertEquals(len(metrics), 3, metrics)

        value_by_type = {}
        for k in metrics:
            value_by_type[k['metric'][len('myhistogram')+1:]] = k['points'][0][1]

        self.assertEquals(value_by_type['median'], 9, value_by_type)
        self.assertEquals(value_by_type['max'], 19, value_by_type)
        self.assertEquals(value_by_type['95percentile'], 18, value_by_type)
Пример #17
0
    def test_tags(self):
        stats = MetricsAggregator('myhost')
        stats.submit_packets('gauge:1|c')
        stats.submit_packets('gauge:2|c|@1')
        stats.submit_packets('gauge:4|c|#tag1,tag2')
        stats.submit_packets('gauge:8|c|#tag2,tag1') # Should be the same as above
        stats.submit_packets('gauge:16|c|#tag3,tag4')

        metrics = self.sort_metrics(stats.flush())

        assert len(metrics) == 3
        first, second, third = metrics

        nt.assert_equal(first['metric'], 'gauge')
        nt.assert_equal(first['tags'], None)
        nt.assert_equal(first['points'][0][1], 3)
        nt.assert_equal(first['host'], 'myhost')

        nt.assert_equal(second['metric'], 'gauge')
        nt.assert_equal(second['tags'], ('tag1', 'tag2'))
        nt.assert_equal(second['points'][0][1], 12)
        nt.assert_equal(second['host'], 'myhost')

        nt.assert_equal(third['metric'], 'gauge')
        nt.assert_equal(third['tags'], ('tag3', 'tag4'))
        nt.assert_equal(third['points'][0][1], 16)
        nt.assert_equal(third['host'], 'myhost')
Пример #18
0
    def test_custom_single_percentile(self):
        configstr = '0.40'
        stats = MetricsAggregator(
            'myhost',
            histogram_percentiles=get_histogram_percentiles(configstr)
        )

        self.assertEquals(
            stats.metric_config[Histogram]['percentiles'],
            [0.40],
            stats.metric_config[Histogram]
        )

        for i in xrange(20):
            stats.submit_packets('myhistogram:{0}|h'.format(i))

        metrics = stats.flush()

        self.assertEquals(len(metrics), 5, metrics)

        value_by_type = {}
        for k in metrics:
            value_by_type[k[0][len('myhistogram')+1:]] = k[2]

        self.assertEquals(value_by_type['40percentile'], 7, value_by_type)
Пример #19
0
    def test_custom_multiple_percentile(self):
        configstr = '0.4, 0.65, 0.999'
        stats = MetricsAggregator(
            'myhost',
            histogram_percentiles=get_histogram_percentiles(configstr)
        )

        self.assertEquals(
            stats.metric_config[Histogram]['percentiles'],
            [0.4, 0.65, 0.99],
            stats.metric_config[Histogram]
        )

        for i in xrange(20):
            stats.submit_packets('myhistogram:{0}|h'.format(i))

        metrics = stats.flush()

        self.assertEquals(len(metrics), 7, metrics)

        value_by_type = {}
        for k in metrics:
            value_by_type[k['metric'][len('myhistogram')+1:]] = k['points'][0][1]

        self.assertEquals(value_by_type['40percentile'], 7, value_by_type)
        self.assertEquals(value_by_type['65percentile'], 12, value_by_type)
        self.assertEquals(value_by_type['99percentile'], 19, value_by_type)
Пример #20
0
    def test_metrics_expiry(self):
        # Ensure metrics eventually expire and stop submitting.
        ag_interval = 1
        expiry = ag_interval * 4 + 2
        stats = MetricsAggregator("myhost", interval=ag_interval, expiry_seconds=expiry)
        stats.submit_packets("test.counter:123|c")
        stats.submit_packets("test.gauge:55|g")
        stats.submit_packets("test.set:44|s")
        stats.submit_packets("test.histogram:11|h")

        # Ensure points keep submitting
        time.sleep(ag_interval)
        metrics = self.sort_metrics(stats.flush())
        nt.assert_equal(len(metrics), 8)
        nt.assert_equal(metrics[0]["metric"], "test.counter")
        nt.assert_equal(metrics[0]["points"][0][1], 123)
        time.sleep(ag_interval)
        metrics = self.sort_metrics(stats.flush())
        nt.assert_equal(len(metrics), 1)
        nt.assert_equal(metrics[0]["metric"], "test.counter")
        nt.assert_equal(metrics[0]["points"][0][1], 0)

        time.sleep(ag_interval)
        time.sleep(0.5)
        metrics = self.sort_metrics(stats.flush())
        nt.assert_equal(len(metrics), 1)
        nt.assert_equal(metrics[0]["metric"], "test.counter")
        nt.assert_equal(metrics[0]["points"][0][1], 0)

        # Now sleep for longer than the expiry window and ensure
        # no points are submitted
        time.sleep(ag_interval)
        time.sleep(2)
        m = stats.flush()
        assert not m, str(m)

        # If we submit again, we're all good.
        stats.submit_packets("test.counter:123|c")
        stats.submit_packets("test.gauge:55|g")
        stats.submit_packets("test.set:44|s")
        stats.submit_packets("test.histogram:11|h")

        metrics = self.sort_metrics(stats.flush())
        nt.assert_equal(len(metrics), 8)
        nt.assert_equal(metrics[0]["metric"], "test.counter")
        nt.assert_equal(metrics[0]["points"][0][1], 123)
Пример #21
0
    def test_rate_errors(self):
        stats = MetricsAggregator('myhost')
        stats.submit_packets('my.rate:10|_dd-r')
        # Sleep 1 second so the time interval > 0 (timestamp is converted to an int)
        time.sleep(1)
        stats.submit_packets('my.rate:9|_dd-r')

        # Since the difference < 0 we shouldn't get a value
        metrics = stats.flush()
        nt.assert_equal(len(metrics), 0)

        stats.submit_packets('my.rate:10|_dd-r')
        # Trying to have the times be the same
        stats.submit_packets('my.rate:40|_dd-r')

        metrics = stats.flush()
        nt.assert_equal(len(metrics), 0)
    def test_sets(self):
        stats = MetricsAggregator('myhost')
        stats.submit_packets('my.set:10|s')
        stats.submit_packets('my.set:20|s')
        stats.submit_packets('my.set:20|s')
        stats.submit_packets('my.set:30|s')
        stats.submit_packets('my.set:30|s')
        stats.submit_packets('my.set:30|s')

        # Assert that it's treated normally.
        metrics = stats.flush()
        assert len(metrics) == 1
        m = metrics[0]
        assert m['metric'] == 'my.set'
        assert m['points'][0][1] == 3

        # Assert there are no more sets
        assert not stats.flush()
Пример #23
0
    def test_string_sets(self):
        stats = MetricsAggregator("myhost")
        stats.submit_packets("my.set:string|s")
        stats.submit_packets("my.set:sets|s")
        stats.submit_packets("my.set:sets|s")
        stats.submit_packets("my.set:test|s")
        stats.submit_packets("my.set:test|s")
        stats.submit_packets("my.set:test|s")

        # Assert that it's treated normally.
        metrics = stats.flush()
        nt.assert_equal(len(metrics), 1)
        m = metrics[0]
        nt.assert_equal(m["metric"], "my.set")
        nt.assert_equal(m["points"][0][1], 3)

        # Assert there are no more sets
        assert not stats.flush()
Пример #24
0
    def test_scientific_notation(self):
        stats = MetricsAggregator('myhost', interval=10)

        stats.submit_packets('test.scinot:9.512901e-05|g')
        metrics = self.sort_metrics(stats.flush())

        assert len(metrics) == 1
        ts, val = metrics[0].get('points')[0]
        nt.assert_almost_equal(val, 9.512901e-05)
Пример #25
0
    def test_sets(self):
        stats = MetricsAggregator('myhost')
        stats.submit_packets('my.set:10|s')
        stats.submit_packets('my.set:20|s')
        stats.submit_packets('my.set:20|s')
        stats.submit_packets('my.set:30|s')
        stats.submit_packets('my.set:30|s')
        stats.submit_packets('my.set:30|s')

        # Assert that it's treated normally.
        metrics = stats.flush()
        nt.assert_equal(len(metrics), 1)
        m = metrics[0]
        nt.assert_equal(m[0], 'my.set')
        nt.assert_equal(m[2], 3)

        # Assert there are no more sets
        assert not stats.flush()
Пример #26
0
    def test_string_sets(self):
        stats = MetricsAggregator('myhost')
        stats.submit_packets('my.set:string|s')
        stats.submit_packets('my.set:sets|s')
        stats.submit_packets('my.set:sets|s')
        stats.submit_packets('my.set:test|s')
        stats.submit_packets('my.set:test|s')
        stats.submit_packets('my.set:test|s')

        # Assert that it's treated normally.
        metrics = stats.flush()
        nt.assert_equal(len(metrics), 1)
        m = metrics[0]
        nt.assert_equal(m['metric'], 'my.set')
        nt.assert_equal(m['points'][0][1], 3)

        # Assert there are no more sets
        assert not stats.flush()
Пример #27
0
    def test_sampled_counter(self):

        # Submit a sampled counter.
        stats = MetricsAggregator('myhost')
        stats.submit_packets('sampled.counter:1|c|@0.5')
        metrics = stats.flush()
        assert len(metrics) == 1
        m = metrics[0]
        assert m['metric'] == 'sampled.counter'
        nt.assert_equal(m['points'][0][1], 2)
Пример #28
0
    def test_monokey_batching_notags(self):
        stats = MetricsAggregator('host')
        stats.submit_packets('test_hist:0.3|ms:2.5|ms|@0.5:3|ms')

        stats_ref = MetricsAggregator('host')
        packets = [
            'test_hist:0.3|ms', 'test_hist:2.5|ms|@0.5', 'test_hist:3|ms'
        ]
        stats_ref.submit_packets("\n".join(packets))

        metrics = stats.flush()
        metrics_ref = stats_ref.flush()

        self.assertTrue(
            len(metrics) == len(metrics_ref) == 5, (metrics, metrics_ref))

        for i in range(len(metrics)):
            nt.assert_equal(metrics[i]['points'][0][1],
                            metrics_ref[i]['points'][0][1])
Пример #29
0
    def test_sampled_counter(self):

        # Submit a sampled counter.
        stats = MetricsAggregator("myhost")
        stats.submit_packets("sampled.counter:1|c|@0.5")
        metrics = stats.flush()
        assert len(metrics) == 1
        m = metrics[0]
        assert m["metric"] == "sampled.counter"
        nt.assert_equal(m["points"][0][1], 2)
Пример #30
0
    def test_sampled_counter(self):

        # Submit a sampled counter.
        stats = MetricsAggregator('myhost')
        stats.submit_packets('sampled.counter:1|c|@0.5')
        metrics = stats.flush()
        assert len(metrics) == 1
        m = metrics[0]
        assert m['metric'] == 'sampled.counter'
        nt.assert_equal(m['points'][0][1], 2)
Пример #31
0
    def test_monokey_batching_notags(self):
        stats = MetricsAggregator('host')
        stats.submit_packets('test_hist:0.3|ms:2.5|ms|@0.5:3|ms')

        stats_ref = MetricsAggregator('host')
        packets = [
                'test_hist:0.3|ms',
                'test_hist:2.5|ms|@0.5',
                'test_hist:3|ms'
        ]
        stats_ref.submit_packets("\n".join(packets))

        metrics = stats.flush()
        metrics_ref = stats_ref.flush()

        self.assertTrue(len(metrics) == len(metrics_ref) == 5, (metrics, metrics_ref))

        for i in range(len(metrics)):
            nt.assert_equal(metrics[i]['points'][0][1], metrics_ref[i]['points'][0][1])
Пример #32
0
    def test_ignore_distribution(self):
        stats = MetricsAggregator('myhost')
        stats.submit_packets('my.dist:5.0|d')
        stats.submit_packets('my.gauge:1|g')

        # Assert that it's treated normally, and that the distribution is ignored
        metrics = stats.flush()
        nt.assert_equal(len(metrics), 1)
        m = metrics[0]
        nt.assert_equal(m['metric'], 'my.gauge')
        nt.assert_equal(m['points'][0][1], 1)
    def test_diagnostic_stats(self):
        stats = MetricsAggregator('myhost')
        for i in xrange(10):
            stats.submit_packets('metric:10|c')
        stats.send_packet_count('datadog.dogstatsd.packet.count')
        metrics = self.sort_metrics(stats.flush())
        assert len(metrics) == 2
        first, second = metrics

        assert first['metric'] == 'datadog.dogstatsd.packet.count'
        assert first['points'][0][1] == 10
Пример #34
0
    def test_monokey_batching_withtags(self):
        stats = MetricsAggregator('host')
        stats.submit_packets('test_gauge:1.5|g|#tag1:one,tag2:two:2.3|g|#tag3:three:3|g')

        stats_ref = MetricsAggregator('host')
        packets = [
            'test_gauge:1.5|g|#tag1:one,tag2:two',
            'test_gauge:2.3|g|#tag3:three',
            'test_gauge:3|g'
        ]
        stats_ref.submit_packets("\n".join(packets))

        metrics = self.sort_metrics(stats.flush())
        metrics_ref = self.sort_metrics(stats_ref.flush())

        self.assertTrue(len(metrics) == len(metrics_ref) == 3, (metrics, metrics_ref))

        for i in range(len(metrics)):
            nt.assert_equal(metrics[i]['points'][0][1], metrics_ref[i]['points'][0][1])
            nt.assert_equal(metrics[i]['tags'], metrics_ref[i]['tags'])
Пример #35
0
    def test_monokey_batching_withtags(self):
        stats = MetricsAggregator('host')
        stats.submit_packets('test_gauge:1.5|g|#tag1:one,tag2:two:2.3|g|#tag3:three:3|g')

        stats_ref = MetricsAggregator('host')
        packets = [
            'test_gauge:1.5|g|#tag1:one,tag2:two',
            'test_gauge:2.3|g|#tag3:three',
            'test_gauge:3|g'
        ]
        stats_ref.submit_packets("\n".join(packets))

        metrics = self.sort_metrics(stats.flush())
        metrics_ref = self.sort_metrics(stats_ref.flush())

        self.assertTrue(len(metrics) == len(metrics_ref) == 3, (metrics, metrics_ref))

        for i in range(len(metrics)):
            nt.assert_equal(metrics[i]['points'][0][1], metrics_ref[i]['points'][0][1])
            nt.assert_equal(metrics[i]['tags'], metrics_ref[i]['tags'])
Пример #36
0
    def test_diagnostic_stats(self):
        stats = MetricsAggregator('myhost')
        for i in xrange(10):
            stats.submit_packets('metric:10|c')
        stats.send_packet_count('datadog.dogstatsd.packet.count')
        metrics = self.sort_metrics(stats.flush())
        nt.assert_equals(2, len(metrics))
        first, second = metrics

        nt.assert_equal(first['metric'], 'datadog.dogstatsd.packet.count')
        nt.assert_equal(first['points'][0][1], 10)
Пример #37
0
    def test_string_sets(self):
        stats = MetricsAggregator('myhost')
        stats.submit_packets('my.set:string|s')
        stats.submit_packets('my.set:sets|s')
        stats.submit_packets('my.set:sets|s')
        stats.submit_packets('my.set:test|s')
        stats.submit_packets('my.set:test|s')
        stats.submit_packets('my.set:test|s')

        # Assert that it's treated normally.
        metrics = stats.flush()
        assert len(metrics) == 2
        m = metrics[0]
        assert m['metric'] == 'my.set'
        assert m['points'][0][1] == 3

        # Assert there are no more sets
        metrics = stats.flush()
        assert len(metrics) == 1
        assert metrics[0]['metric'] == 'datadog.agent.running'
Пример #38
0
    def test_diagnostic_stats(self):
        stats = MetricsAggregator('myhost')
        for i in xrange(10):
            stats.submit_packets('metric:10|c')
        stats.send_packet_count('datadog.dogstatsd.packet.count')
        metrics = self.sort_metrics(stats.flush())
        nt.assert_equals(2, len(metrics))
        first, second = metrics

        nt.assert_equal(first['metric'], 'datadog.dogstatsd.packet.count')
        nt.assert_equal(first['points'][0][1], 10)
Пример #39
0
    def test_gauge_sample_rate(self):
        stats = MetricsAggregator('myhost')

        # Submit a sampled gauge metric.
        stats.submit_packets('sampled.gauge:10|g|@0.1')

        # Assert that it's treated normally.
        metrics = stats.flush()
        assert len(metrics) == 2
        m = metrics[0]
        assert m['metric'] == 'sampled.gauge'
        assert m['points'][0][1] == 10
Пример #40
0
    def test_formatter(self):
        stats = MetricsAggregator('myhost', interval=10,
                                  formatter=get_formatter({'dogstatsd': {'metric_namespace': 'datadog'}}))
        stats.submit_packets('gauge:16|c|#tag3,tag4')
        metrics = self.sort_metrics(stats.flush()[:-1])
        assert (len(metrics) == 1)
        assert (metrics[0]['metric'] == 'datadog.gauge')

        stats = MetricsAggregator('myhost', interval=10,
                                  formatter=get_formatter({'dogstatsd': {'metric_namespace': 'datadoge'}}))
        stats.submit_packets('gauge:16|c|#tag3,tag4')
        metrics = self.sort_metrics(stats.flush()[:-1])
        assert (len(metrics) == 1)
        assert (metrics[0]['metric'] == 'datadoge.gauge')

        stats = MetricsAggregator('myhost', interval=10,
                                  formatter=get_formatter({'dogstatsd': {'metric_namespace': None}}))
        stats.submit_packets('gauge:16|c|#tag3,tag4')
        metrics = self.sort_metrics(stats.flush()[:-1])
        assert (len(metrics) == 1)
        assert (metrics[0]['metric'] == 'gauge')
Пример #41
0
    def test_ignore_distribution(self):
        stats = MetricsAggregator('myhost')
        stats.submit_packets('my.dist:5.0|d')
        stats.submit_packets('my.other.dist:5.0|dk')
        stats.submit_packets('my.gauge:1|g')

        # Assert that it's treated normally, and that the distribution is ignored
        metrics = stats.flush()
        assert len(metrics) == 2
        m = metrics[0]
        assert m['metric'] == 'my.gauge'
        assert m['points'][0][1] == 1
Пример #42
0
    def test_sampled_histogram(self):
        # Submit a sampled histogram.
        stats = MetricsAggregator('myhost')
        stats.submit_packets('sampled.hist:5|h|@0.5')

        # Assert we scale up properly.
        metrics = self.sort_metrics(stats.flush())
        p95, pavg, pcount, pmax, pmed = self.sort_metrics(metrics)

        nt.assert_equal(pcount['points'][0][1], 2)
        for p in [p95, pavg, pmed, pmax]:
            nt.assert_equal(p['points'][0][1], 5)
Пример #43
0
    def test_gauge_sample_rate(self):
        stats = MetricsAggregator('myhost')

        # Submit a sampled gauge metric.
        stats.submit_packets('sampled.gauge:10|g|@0.1')

        # Assert that it's treated normally.
        metrics = stats.flush()
        nt.assert_equal(len(metrics), 1)
        m = metrics[0]
        nt.assert_equal(m['metric'], 'sampled.gauge')
        nt.assert_equal(m['points'][0][1], 10)
Пример #44
0
    def test_monokey_batching_withtags(self):
        stats = MetricsAggregator('host')
        stats.submit_packets(
            'test_gauge:1.5|g|#tag1:one,tag2:two:2.3|g|#tag3:three:3|g')

        stats_ref = MetricsAggregator('host')
        packets = [
            'test_gauge:1.5|g|#tag1:one,tag2:two',
            'test_gauge:2.3|g|#tag3:three', 'test_gauge:3|g'
        ]
        stats_ref.submit_packets('\n'.join(packets))

        metrics = self.sort_metrics(stats.flush()[:-1])
        metrics_ref = self.sort_metrics(stats_ref.flush()[:-1])

        assert len(metrics) == 3
        assert len(metrics) == len(metrics_ref)

        for i in range(len(metrics)):
            assert metrics[i]['points'][0][1] == metrics_ref[i]['points'][0][1]
            assert metrics[i]['tags'] == metrics_ref[i]['tags']
Пример #45
0
    def test_formatter(self):
        stats = MetricsAggregator('myhost', interval=10,
            formatter = get_formatter({"statsd_metric_namespace": "datadog"}))
        stats.submit_packets('gauge:16|c|#tag3,tag4')
        metrics = self.sort_metrics(stats.flush())
        self.assertTrue(len(metrics) == 1)
        self.assertTrue(metrics[0]['metric'] == "datadog.gauge")

        stats = MetricsAggregator('myhost', interval=10,
            formatter = get_formatter({"statsd_metric_namespace": "datadoge."}))
        stats.submit_packets('gauge:16|c|#tag3,tag4')
        metrics = self.sort_metrics(stats.flush())
        self.assertTrue(len(metrics) == 1)
        self.assertTrue(metrics[0]['metric'] == "datadoge.gauge")

        stats = MetricsAggregator('myhost', interval=10,
        formatter = get_formatter({"statsd_metric_namespace": None}))
        stats.submit_packets('gauge:16|c|#tag3,tag4')
        metrics = self.sort_metrics(stats.flush())
        self.assertTrue(len(metrics) == 1)
        self.assertTrue(metrics[0]['metric'] == "gauge")
Пример #46
0
    def test_running_beacon(self):
        stats = MetricsAggregator('myhost', interval=10)

        metrics = stats.flush()
        assert (len(metrics) == 1)
        assert metrics[0]['metric'] == 'datadog.agent.running'
        assert metrics[0]['host'] == 'myhost'

        stats.submit_packets('int:1|c')
        metrics = stats.flush()
        assert (len(metrics) == 2)
        assert metrics[-1]['metric'] == 'datadog.agent.running'
        assert metrics[-1]['host'] == 'myhost'

        # Namespace shouldn't affect
        stats = MetricsAggregator('myhost', interval=10,
                                  formatter=get_formatter({'dogstatsd': {'metric_namespace': None}}))
        metrics = stats.flush()
        assert (len(metrics) == 1)
        assert metrics[0]['metric'] == 'datadog.agent.running'
        assert metrics[0]['host'] == 'myhost'
Пример #47
0
    def test_batch_submission(self):
        # Submit a sampled histogram.
        stats = MetricsAggregator('myhost')
        metrics = ['counter:1|c', 'counter:1|c', 'gauge:1|g']
        packet = "\n".join(metrics)
        stats.submit_packets(packet)

        metrics = self.sort_metrics(stats.flush())
        nt.assert_equal(2, len(metrics))
        counter, gauge = metrics
        assert counter['points'][0][1] == 2
        assert gauge['points'][0][1] == 1
Пример #48
0
    def test_histogram_normalization(self):
        stats = MetricsAggregator('myhost', interval=10)
        for i in range(5):
            stats.submit_packets('h1:1|h')
        for i in range(20):
            stats.submit_packets('h2:1|h')

        metrics = self.sort_metrics(stats.flush())
        _, _, h1count, _, _, \
        _, _, h2count, _, _ = metrics

        nt.assert_equal(h1count['points'][0][1], 0.5)
        nt.assert_equal(h2count['points'][0][1], 2)
Пример #49
0
    def test_monokey_batching_notags(self):
        # The min is not enabled by default
        stats = MetricsAggregator(
            'host',
            histogram_aggregates=DEFAULT_HISTOGRAM_AGGREGATES + ['min'])
        stats.submit_packets('test_hist:0.3|ms:2.5|ms|@0.5:3|ms')

        stats_ref = MetricsAggregator(
            'host',
            histogram_aggregates=DEFAULT_HISTOGRAM_AGGREGATES + ['min'])
        packets = [
            'test_hist:0.3|ms', 'test_hist:2.5|ms|@0.5', 'test_hist:3|ms'
        ]
        stats_ref.submit_packets('\n'.join(packets))

        metrics = stats.flush()[:-1]
        metrics_ref = stats_ref.flush()[:-1]

        assert len(metrics) == len(metrics_ref) == 6

        for i in range(len(metrics)):
            assert metrics[i]['points'][0][1] == metrics_ref[i]['points'][0][1]
Пример #50
0
    def test_monokey_batching_notags(self):
        # The min is not enabled by default
        stats = MetricsAggregator(
            'host',
            histogram_aggregates=DEFAULT_HISTOGRAM_AGGREGATES + ['min'])
        stats.submit_packets('test_hist:0.3|ms:2.5|ms|@0.5:3|ms')

        stats_ref = MetricsAggregator(
            'host',
            histogram_aggregates=DEFAULT_HISTOGRAM_AGGREGATES + ['min'])
        packets = [
            'test_hist:0.3|ms', 'test_hist:2.5|ms|@0.5', 'test_hist:3|ms'
        ]
        stats_ref.submit_packets("\n".join(packets))

        metrics = stats.flush()
        metrics_ref = stats_ref.flush()

        self.assertTrue(
            len(metrics) == len(metrics_ref) == 6, (metrics, metrics_ref))

        for i in range(len(metrics)):
            nt.assert_equal(metrics[i][2], metrics_ref[i][2])
Пример #51
0
    def test_sampled_histogram(self):
        # Submit a sampled histogram.
        # The min is not enabled by default
        stats = MetricsAggregator(
            'myhost',
            histogram_aggregates=DEFAULT_HISTOGRAM_AGGREGATES + ['min'])
        stats.submit_packets('sampled.hist:5|h|@0.5')

        # Assert we scale up properly.
        metrics = self.sort_metrics(stats.flush())
        p95, pavg, pcount, pmax, pmed, pmin = self.sort_metrics(metrics)

        nt.assert_equal(pcount['points'][0][1], 2)
        for p in [p95, pavg, pmed, pmax, pmin]:
            nt.assert_equal(p['points'][0][1], 5)
    def test_histogram_normalization(self):
        # The min is not enabled by default
        stats = MetricsAggregator(
            'myhost',
            interval=10,
            histogram_aggregates=DEFAULT_HISTOGRAM_AGGREGATES + ['min'])
        for i in range(5):
            stats.submit_packets('h1:1|h')
        for i in range(20):
            stats.submit_packets('h2:1|h')

        metrics = self.sort_metrics(stats.flush())
        _, _, h1count, _, _, _, _, _, h2count, _, _, _ = metrics

        nt.assert_equal(h1count['points'][0][1], 0.5)
        nt.assert_equal(h2count['points'][0][1], 2)
    def test_custom_single_percentile(self):
        stats = MetricsAggregator('myhost', histogram_percentiles=[0.4])

        assert stats.metric_config[Histogram]['percentiles'] == [0.40]

        for i in xrange(20):
            stats.submit_packets('myhistogram:{0}|h'.format(i))

        metrics = stats.flush()

        assert len(metrics) == 5

        value_by_type = {}
        for k in metrics:
            value_by_type[k['metric'][len('myhistogram') +
                                      1:]] = k['points'][0][1]

        assert value_by_type['40percentile'] == 7
Пример #54
0
    def test_histogram_counter(self):
        # Test whether histogram.count == increment
        # same deal with a sample rate
        cnt = 100000
        for run in [1, 2]:
            stats = MetricsAggregator('myhost')
            for i in xrange(cnt):
                if run == 2:
                    stats.submit_packets('test.counter:1|c|@0.5')
                    stats.submit_packets('test.hist:1|ms|@0.5')
                else:
                    stats.submit_packets('test.counter:1|c')
                    stats.submit_packets('test.hist:1|ms')
            metrics = self.sort_metrics(stats.flush())
            assert len(metrics) > 0

            nt.assert_equal([m['points'][0][1] for m in metrics if m['metric'] == 'test.counter'], [cnt * run])
            nt.assert_equal([m['points'][0][1] for m in metrics if m['metric'] == 'test.hist.count'], [cnt * run])
Пример #55
0
    def test_packet_string_endings(self):
        stats = MetricsAggregator('myhost')

        stats.submit_packets('line_ending.generic:500|c')
        stats.submit_packets('line_ending.unix:400|c\n')
        stats.submit_packets('line_ending.windows:300|c\r\n')

        metrics = self.sort_metrics(stats.flush()[:-1])
        assert len(metrics) == 3

        first, second, third = metrics
        assert first['metric'] == 'line_ending.generic'
        assert first['points'][0][1] == 500

        assert second['metric'] == 'line_ending.unix'
        assert second['points'][0][1] == 400

        assert third['metric'] == 'line_ending.windows'
        assert third['points'][0][1] == 300
Пример #56
0
    def test_packet_string_endings(self):
        stats = MetricsAggregator('myhost')

        stats.submit_packets('line_ending.generic:500|c')
        stats.submit_packets('line_ending.unix:400|c\n')
        stats.submit_packets('line_ending.windows:300|c\r\n')

        metrics = self.sort_metrics(stats.flush())

        assert len(metrics) == 3

        first, second, third = metrics
        nt.assert_equals(first['metric'], 'line_ending.generic')
        nt.assert_equals(first['points'][0][1], 500)

        nt.assert_equals(second['metric'], 'line_ending.unix')
        nt.assert_equals(second['points'][0][1], 400)

        nt.assert_equals(third['metric'], 'line_ending.windows')
        nt.assert_equals(third['points'][0][1], 300)
Пример #57
0
    def test_custom_single_percentile(self):
        configstr = '0.40'
        stats = MetricsAggregator(
            'myhost',
            histogram_percentiles=get_histogram_percentiles(configstr))

        self.assertEquals(stats.metric_config[Histogram]['percentiles'],
                          [0.40], stats.metric_config[Histogram])

        for i in xrange(20):
            stats.submit_packets('myhistogram:{0}|h'.format(i))

        metrics = stats.flush()

        self.assertEquals(len(metrics), 5, metrics)

        value_by_type = {}
        for k in metrics:
            value_by_type[k[0][len('myhistogram') + 1:]] = k[2]

        self.assertEquals(value_by_type['40percentile'], 7, value_by_type)
Пример #58
0
    def test_counter_normalization(self):
        stats = MetricsAggregator('myhost', interval=10)

        # Assert counters are normalized.
        stats.submit_packets('int:1|c')
        stats.submit_packets('int:4|c')
        stats.submit_packets('int:15|c')

        stats.submit_packets('float:5|c')

        metrics = self.sort_metrics(stats.flush())
        assert len(metrics) == 2

        floatc, intc = metrics

        nt.assert_equal(floatc['metric'], 'float')
        nt.assert_equal(floatc['points'][0][1], 0.5)
        nt.assert_equal(floatc['host'], 'myhost')

        nt.assert_equal(intc['metric'], 'int')
        nt.assert_equal(intc['points'][0][1], 2)
        nt.assert_equal(intc['host'], 'myhost')
Пример #59
0
    def test_counter_normalization(self):
        stats = MetricsAggregator('myhost', interval=10)

        # Assert counters are normalized.
        stats.submit_packets('int:1|c')
        stats.submit_packets('int:4|c')
        stats.submit_packets('int:15|c')

        stats.submit_packets('float:5|c')

        metrics = self.sort_metrics(stats.flush()[:-1])
        assert len(metrics) == 2

        floatc, intc = metrics

        assert floatc['metric'] == 'float'
        assert floatc['points'][0][1] == 0.5
        assert floatc['host'] == 'myhost'

        assert intc['metric'] == 'int'
        assert intc['points'][0][1] == 2
        assert intc['host'] == 'myhost'
Пример #60
0
    def test_custom_aggregate(self):
        configstr = 'median, max'
        stats = MetricsAggregator(
            'myhost', histogram_aggregates=get_histogram_aggregates(configstr))

        self.assertEquals(sorted(stats.metric_config[Histogram]['aggregates']),
                          ['max', 'median'], stats.metric_config[Histogram])

        for i in xrange(20):
            stats.submit_packets('myhistogram:{0}|h'.format(i))

        metrics = stats.flush()

        self.assertEquals(len(metrics), 3, metrics)

        value_by_type = {}
        for k in metrics:
            value_by_type[k['metric'][len('myhistogram') +
                                      1:]] = k['points'][0][1]

        self.assertEquals(value_by_type['median'], 9, value_by_type)
        self.assertEquals(value_by_type['max'], 19, value_by_type)
        self.assertEquals(value_by_type['95percentile'], 18, value_by_type)