Esempio n. 1
0
    def test_sets_flush_during_bucket(self):
        ag_interval = self.interval
        stats = MetricsBucketAggregator('myhost', interval=ag_interval)
        self.wait_for_bucket_boundary(ag_interval)

        stats.submit_packets('my.set:10|s')
        stats.submit_packets('my.set:20|s')
        stats.submit_packets('my.set:20|s')
        stats.submit_packets('my.set:30|s')
        stats.submit_packets('my.set:30|s')
        stats.submit_packets('my.set:30|s')
        self.sleep_for_interval_length(ag_interval)
        stats.submit_packets('my.set:40|s')

        # Assert that it's treated normally.
        metrics = stats.flush()

        assert len(metrics) == 1
        m = metrics[0]
        assert m['metric'] == 'my.set'
        assert m['points'][0][1] == 3

        self.sleep_for_interval_length(ag_interval)
        metrics = stats.flush()
        m = metrics[0]
        assert m['metric'] == 'my.set'
        assert m['points'][0][1] == 1

        # Assert there are no more sets
        assert not stats.flush()
    def test_sdstatsd_aggregation_perf(self):
        ma = MetricsBucketAggregator('my.host')

        for _ in xrange(self.FLUSH_COUNT):
            for i in xrange(self.LOOPS_PER_FLUSH):
                for j in xrange(self.METRIC_COUNT):

                    # metrics
                    ma.submit_packets('counter.%s:%s|c' % (j, i))
                    ma.submit_packets('gauge.%s:%s|g' % (j, i))
                    ma.submit_packets('histogram.%s:%s|h' % (j, i))
                    ma.submit_packets('set.%s:%s|s' % (j, 1.0))

                    # tagged metrics
                    ma.submit_packets('counter.%s:%s|c|#tag1,tag2' % (j, i))
                    ma.submit_packets('gauge.%s:%s|g|#tag1,tag2' % (j, i))
                    ma.submit_packets('histogram.%s:%s|h|#tag1,tag2' % (j, i))
                    ma.submit_packets('set.%s:%s|s|#tag1,tag2' % (j, i))

                    # sampled metrics
                    ma.submit_packets('counter.%s:%s|c|@0.5' % (j, i))
                    ma.submit_packets('gauge.%s:%s|g|@0.5' % (j, i))
                    ma.submit_packets('histogram.%s:%s|h|@0.5' % (j, i))
                    ma.submit_packets('set.%s:%s|s|@0.5' % (j, i))

            ma.flush()
Esempio n. 3
0
    def test_gauge_buckets(self):
        # Tests calling returing data from 2 time buckets
        ag_interval = self.interval
        stats = MetricsBucketAggregator('myhost', interval=ag_interval)
        self.wait_for_bucket_boundary(ag_interval)

        # Track some counters.
        stats.submit_packets('my.first.gauge:1|g')
        stats.submit_packets('my.first.gauge:5|g')
        stats.submit_packets('my.second.gauge:1.5|g')
        self.sleep_for_interval_length(ag_interval)
        stats.submit_packets('my.second.gauge:9.5|g')

        # Ensure that gauges roll up correctly.
        self.sleep_for_interval_length(ag_interval)
        metrics = self.sort_metrics(stats.flush())
        assert len(metrics) == 3

        first, second, second_b = metrics

        assert first['metric'] == 'my.first.gauge'
        assert first['points'][0][1] == 5
        assert first['host'] == 'myhost'

        assert second_b['metric'] == 'my.second.gauge'
        assert second_b['points'][0][1] == 9.5

        assert second['metric'] == 'my.second.gauge'
        assert second['points'][0][1] == 1.5

        # check that they come back empty
        self.sleep_for_interval_length(ag_interval)
        metrics = self.sort_metrics(stats.flush())
        assert len(metrics) == 0
Esempio n. 4
0
    def test_sets_buckets(self):
        stats = MetricsBucketAggregator('myhost', interval=self.interval)
        stats.submit_packets('my.set:10|s')
        stats.submit_packets('my.set:20|s')
        stats.submit_packets('my.set:20|s')
        stats.submit_packets('my.set:30|s')
        stats.submit_packets('my.set:30|s')
        stats.submit_packets('my.set:30|s')
        self.sleep_for_interval_length()
        stats.submit_packets('my.set:40|s')

        # Assert that it's treated normally.
        self.sleep_for_interval_length()
        metrics = stats.flush()

        assert len(metrics) == 2
        m, m2 = metrics
        assert m['metric'] == 'my.set'
        assert m['points'][0][1] == 3

        assert m2['metric'] == 'my.set'
        assert m2['points'][0][1] == 1

        # Assert there are no more sets
        assert not stats.flush()
Esempio n. 5
0
    def test_gauge_flush_during_bucket(self):
        # Tests returning data when flush is called in the middle of a time bucket that has data
        ag_interval = self.interval
        stats = MetricsBucketAggregator('myhost', interval=ag_interval)
        self.wait_for_bucket_boundary(ag_interval)

        # Track some counters.
        stats.submit_packets('my.first.gauge:1|g')
        stats.submit_packets('my.first.gauge:5|g')
        stats.submit_packets('my.second.gauge:1.5|g')
        self.sleep_for_interval_length(ag_interval)
        stats.submit_packets('my.second.gauge:9.5|g')

        # Ensure that gauges roll up correctly.
        metrics = self.sort_metrics(stats.flush())
        assert len(metrics) == 2

        first, second = metrics

        assert first['metric'] == 'my.first.gauge'
        assert first['points'][0][1] == 5
        assert first['host'] == 'myhost'

        assert second['metric'] == 'my.second.gauge'
        assert second['points'][0][1] == 1.5

        self.sleep_for_interval_length(ag_interval)
        metrics = self.sort_metrics(stats.flush())
        assert len(metrics) == 1

        assert second['metric'] == 'my.second.gauge'
        assert second['points'][0][1] == 1.5
Esempio n. 6
0
    def test_dogstatsd_aggregation_perf(self):
        ma = MetricsBucketAggregator('my.host')

        for _ in xrange(self.FLUSH_COUNT):
            for i in xrange(self.LOOPS_PER_FLUSH):
                for j in xrange(self.METRIC_COUNT):

                    # metrics
                    ma.submit_packets('counter.%s:%s|c' % (j, i))
                    ma.submit_packets('gauge.%s:%s|g' % (j, i))
                    ma.submit_packets('histogram.%s:%s|h' % (j, i))
                    ma.submit_packets('set.%s:%s|s' % (j, 1.0))

                    # tagged metrics
                    ma.submit_packets('counter.%s:%s|c|#tag1,tag2' % (j, i))
                    ma.submit_packets('gauge.%s:%s|g|#tag1,tag2' % (j, i))
                    ma.submit_packets('histogram.%s:%s|h|#tag1,tag2' % (j, i))
                    ma.submit_packets('set.%s:%s|s|#tag1,tag2' % (j, i))

                    # sampled metrics
                    ma.submit_packets('counter.%s:%s|c|@0.5' % (j, i))
                    ma.submit_packets('gauge.%s:%s|g|@0.5' % (j, i))
                    ma.submit_packets('histogram.%s:%s|h|@0.5' % (j, i))
                    ma.submit_packets('set.%s:%s|s|@0.5' % (j, i))

            ma.flush()
Esempio n. 7
0
    def test_histogram_flush_during_bucket(self):
        ag_interval = 1
        # The min is not enabled by default
        stats = MetricsBucketAggregator(
            'myhost',
            interval=ag_interval,
            histogram_aggregates=DEFAULT_HISTOGRAM_AGGREGATES + ['min'])

        # Sample all numbers between 1-100 many times. This
        # means our percentiles should be relatively close to themselves.
        self.wait_for_bucket_boundary(ag_interval)
        percentiles = list(range(100))
        random.shuffle(percentiles)  # in place
        for i in percentiles:
            for j in range(20):
                for type_ in ['h', 'ms']:
                    m = 'my.p:%s|%s' % (i, type_)
                    stats.submit_packets(m)

        time.sleep(
            self.BUCKET_BOUNDARY_TOLERANCE
        )  # Make sure that we'll wait for the _next_ bucket boundary
        self.wait_for_bucket_boundary(ag_interval)
        percentiles = list(range(50))
        random.shuffle(percentiles)  # in place
        for i in percentiles:
            for j in range(20):
                for type_ in ['h', 'ms']:
                    m = 'my.p:%s|%s' % (i, type_)
                    stats.submit_packets(m)

        metrics = self.sort_metrics(stats.flush())

        assert len(metrics) == 6
        p95, pavg, pcount, pmax, pmed, pmin = self.sort_metrics(metrics)
        assert p95['metric'] == 'my.p.95percentile'
        self.assert_almost_equal(p95['points'][0][1], 95, 10)
        self.assert_almost_equal(pmax['points'][0][1], 99, 1)
        self.assert_almost_equal(pmed['points'][0][1], 50, 2)
        self.assert_almost_equal(pavg['points'][0][1], 50, 2)
        self.assert_almost_equal(pmin['points'][0][1], 1, 1)
        assert pcount['points'][0][1] == 4000  # 100 * 20 * 2
        assert p95['host'] == 'myhost'

        self.sleep_for_interval_length()
        metrics = self.sort_metrics(stats.flush())
        assert len(metrics) == 6
        p95_b, pavg_b, pcount_b, pmax_b, pmed_b, pmin_b = self.sort_metrics(
            metrics)
        assert p95_b['metric'] == 'my.p.95percentile'
        self.assert_almost_equal(p95_b['points'][0][1], 47, 10)
        self.assert_almost_equal(pmax_b['points'][0][1], 49, 1)
        self.assert_almost_equal(pmed_b['points'][0][1], 25, 2)
        self.assert_almost_equal(pavg_b['points'][0][1], 25, 2)
        self.assert_almost_equal(pmin_b['points'][0][1], 1, 1)
        assert pcount_b['points'][0][1] == 2000  # 100 * 20 * 2

        # Ensure that histograms are reset.
        metrics = self.sort_metrics(stats.flush())
        assert not metrics
Esempio n. 8
0
    def test_counter_buckets(self):
        ag_interval = 5
        stats = MetricsBucketAggregator('myhost', interval=ag_interval)
        self.wait_for_bucket_boundary(ag_interval)

        # Track some counters.
        stats.submit_packets("my.first.counter:%s|c" % (1 * ag_interval))
        stats.submit_packets("my.second.counter:%s|c" % (1 * ag_interval))
        stats.submit_packets("my.third.counter:%s|c" % (3 * ag_interval))
        time.sleep(ag_interval)
        stats.submit_packets("my.first.counter:%s|c" % (5 * ag_interval))

        # Want to get 2 different entries for my.first.counter in one set of metrics,
        #  so wait for the time bucket interval to pass
        self.sleep_for_interval_length(ag_interval)
        # Ensure they roll up nicely.
        metrics = self.sort_metrics(stats.flush())
        assert len(metrics) == 6

        first, first_b, second, second_b, third, third_b = metrics
        assert first['metric'] == 'my.first.counter'
        assert first['points'][0][1] == 1
        assert first['host'] == 'myhost'

        assert first_b['metric'] == 'my.first.counter'
        assert first_b['points'][0][1] == 5
        assert (first_b['points'][0][0] - first['points'][0][0]) == ag_interval

        assert first['points'][0][0] % ag_interval == 0
        assert first_b['points'][0][0] % ag_interval == 0

        assert second['metric'] == 'my.second.counter'
        assert second['points'][0][1] == 1
        assert second_b['metric'] == 'my.second.counter'
        assert second_b['points'][0][1] == 0

        assert third['metric'] == 'my.third.counter'
        assert third['points'][0][1] == 3
        assert third_b['metric'] == 'my.third.counter'
        assert third_b['points'][0][1] == 0

        metrics = self.sort_metrics(stats.flush())
        assert len(metrics) == 0

        self.sleep_for_interval_length(ag_interval)
        # Ensure that counters reset to zero.
        metrics = self.sort_metrics(stats.flush())
        first, second, third = metrics
        assert first['metric'] == 'my.first.counter'
        assert first['points'][0][1] == 0
        assert second['metric'] == 'my.second.counter'
        assert second['points'][0][1] == 0
        assert third['metric'] == 'my.third.counter'
        assert third['points'][0][1] == 0
Esempio n. 9
0
    def test_tags(self):
        stats = MetricsBucketAggregator('myhost', interval=self.interval)
        stats.submit_packets('gauge:1|c')
        stats.submit_packets('gauge:2|c|@1')
        stats.submit_packets('gauge:4|c|#tag1,tag2')
        stats.submit_packets(
            'gauge:8|c|#tag2,tag1')  # Should be the same as above
        stats.submit_packets('gauge:16|c|#tag3,tag4')

        self.sleep_for_interval_length()
        metrics = self.sort_metrics(stats.flush())

        assert len(metrics) == 3
        first, second, third = metrics

        assert first['metric'] == 'gauge'
        assert first['tags'] is None
        assert first['points'][0][1] == 3
        assert first['host'] == 'myhost'

        assert second['metric'] == 'gauge'
        assert second['tags'] == ('tag1', 'tag2')
        assert second['points'][0][1] == 12
        assert second['host'] == 'myhost'

        assert third['metric'] == 'gauge'
        assert third['tags'] == ('tag3', 'tag4')
        assert third['points'][0][1] == 16
        assert third['host'] == 'myhost'
Esempio n. 10
0
 def test_sampled_counter(self):
     # Submit a sampled counter.
     stats = MetricsBucketAggregator('myhost', interval=self.interval)
     stats.submit_packets('sampled.counter:1|c|@0.5')
     self.sleep_for_interval_length()
     metrics = stats.flush()
     assert len(metrics) == 1
     m = metrics[0]
     assert m['metric'] == 'sampled.counter'
     assert m['points'][0][1] == 2
Esempio n. 11
0
    def test_scientific_notation(self):
        ag_interval = 10
        stats = MetricsBucketAggregator('myhost', interval=ag_interval)

        stats.submit_packets('test.scinot:9.512901e-05|g')
        self.sleep_for_interval_length(ag_interval)

        metrics = self.sort_metrics(stats.flush())
        assert len(metrics) == 1
        ts, val = metrics[0].get('points')[0]
        self.assert_almost_equal(val, 9.512901e-05)
    def test_sdstatsd_ascii_events(self):
        ma = MetricsBucketAggregator('my.host')

        for _ in xrange(self.FLUSH_COUNT):
            for i in xrange(self.LOOPS_PER_FLUSH):
                for j in xrange(self.METRIC_COUNT):

                    ma.submit_packets(self.create_event_packet(
                        'asldkfj fdsaljfas dflksjafs fasdfkjaldsfkjasldf',
                        """alkdjfa slfalskdfjas lkfdjaoisudhfalsdkjbfaksdhfbasjdk fa;sf ljda fsafksadfh alsdjfhaskjdfgahls d;fjasdlkfh9823udjs dlfhaspdf98as ufdaksjhfaisdhufalskdjfhas df"""
                    ))
                    ma.submit_packets(self.create_event_packet(
                        'kdjfsofuousodifu982309rijdfsljsd  dfsdf sdf',
                        """dflskjdfs8d9fsdfjs sldfjka ;dlfjapfoia jsdflakjsdfp 0adsfuolwejf wflsdjf lsdkjf0saoiufja dlfjasd of;lasdjf ;askdjf asodfhas lkmfbashudf asd,fasdfna s,dfjas lcjx vjaskdlfjals dfkjasdflk jasldfkj asldkfjas ldfkasjdf a"""
                    ))
                    ma.submit_packets(self.create_event_packet(
                        'asdf askdjf asldfkjsad lfkajsdlfksajd fasdfsdfdf',
                        """skdfjsld flskdjf alksdjfpasdofuapo sdfjalksdjf ;as.kjdf ;ljLKJL :KJL:KJ l;kdsjf ;lkj :Lkj FLDKFJ LSKFDJ ;LDFJ SLKDJF KSDLjf: Lfjldkj fLKDSJf lSKDjf ls;kdjf s;lkfjs L:KAJ :LFKJDL:DKjf L:SKjf;lKDJfl;SKJDf :LKSDj;lsdfj fsdljfsd ofisunafoialjsflmsdifjas;dlkfaj sdfkasjd flaksjdfnpmsao;difjkas dfnlaksdfa;sodljfas lfdjasdflmajsdlfknaf98wouanepr9qo3ud fadspuf oaisdufpoasid fj askdjn LKJH LKJHFL KJDHSF DSFLHSL JKDFHLSK DJFHLS KJDFHS"""
                    ))

            ma.flush()
Esempio n. 13
0
    def test_batch_submission(self):
        # Submit a sampled histogram.
        stats = MetricsBucketAggregator('myhost', interval=self.interval)
        metrics = ['counter:1|c', 'counter:1|c', 'gauge:1|g']
        packet = "\n".join(metrics)
        stats.submit_packets(packet)

        self.sleep_for_interval_length()
        metrics = self.sort_metrics(stats.flush())
        assert len(metrics) == 2
        counter, gauge = metrics
        assert counter['points'][0][1] == 2
        assert gauge['points'][0][1] == 1
Esempio n. 14
0
    def test_diagnostic_stats(self):
        stats = MetricsBucketAggregator('myhost', interval=self.interval)
        for i in range(10):
            stats.submit_packets('metric:10|c')
        stats.send_packet_count('datadog.dogstatsd.packet.count')

        self.sleep_for_interval_length()
        metrics = self.sort_metrics(stats.flush())
        assert len(metrics) == 2
        first, second = metrics

        assert first['metric'] == 'datadog.dogstatsd.packet.count'
        assert first['points'][0][1] == 10
    def test_sdstatsd_utf8_events(self):
        ma = MetricsBucketAggregator('my.host')

        for _ in xrange(self.FLUSH_COUNT):
            for i in xrange(self.LOOPS_PER_FLUSH):
                for j in xrange(self.METRIC_COUNT):

                    ma.submit_packets(self.create_event_packet(
                        'Τη γλώσσα μου έδωσαν ελληνική',
                        """τὸ σπίτι φτωχικὸ στὶς ἀμμουδιὲς τοῦ Ὁμήρου. Μονάχη ἔγνοια ἡ γλῶσσα μου στὶς ἀμμουδιὲς τοῦ Ὁμήρου. ἀπὸ τὸ Ἄξιον ἐστί τοῦ Ὀδυσσέα Ἐλύτη"""
                    ))
                    ma.submit_packets(self.create_event_packet(
                        'ვეპხის ტყაოსანი შოთა რუსთაველი',
                        """ღმერთსი შემვედრე, ნუთუ კვლა დამხსნას სოფლისა შრომასა, ცეცხლს, წყალსა და მიწასა, ჰაერთა თანა მრომასა; მომცნეს ფრთენი და აღვფრინდე, მივჰხვდე მას ჩემსა ნდომასა, დღისით და ღამით ვჰხედვიდე მზისა ელვათა კრთომაასა.
                        """
                    ))
                    ma.submit_packets(self.create_event_packet(
                        'Traité sur la tolérance',
                        """Ose supposer qu'un Ministre éclairé & magnanime, un Prélat humain & sage, un Prince qui sait que son intérêt consiste dans le grand nombre de ses Sujets, & sa gloire dans leur bonheur, daigne jetter les yeux sur cet Ecrit informe & défectueux; il y supplée par ses propres lumieres; il se dit à lui-même: Que risquerai-je à voir la terre cultivée & ornée par plus de mains laborieuses, les tributs augmentés, l'Etat plus florissant?"""
                    ))

            ma.flush()
Esempio n. 16
0
    def test_string_sets(self):
        stats = MetricsBucketAggregator('myhost', interval=self.interval)
        stats.submit_packets('my.set:string|s')
        stats.submit_packets('my.set:sets|s')
        stats.submit_packets('my.set:sets|s')
        stats.submit_packets('my.set:test|s')
        stats.submit_packets('my.set:test|s')
        stats.submit_packets('my.set:test|s')

        # Assert that it's treated normally.
        self.sleep_for_interval_length()
        metrics = stats.flush()

        assert len(metrics) == 1
        m = metrics[0]
        assert m['metric'] == 'my.set'
        assert m['points'][0][1] == 3

        # Assert there are no more sets
        assert not stats.flush()
        self.sleep_for_interval_length()
        assert not stats.flush()
Esempio n. 17
0
    def test_empty_counter(self):
        ag_interval = self.interval
        stats = MetricsBucketAggregator('myhost', interval=ag_interval)

        self.sleep_for_interval_length(ag_interval)
        metrics = self.sort_metrics(stats.flush())
        # Should be an empty list
        assert len(metrics) == 0

        # Track some counters.
        stats.submit_packets('my.first.counter:%s|c' % (1 * ag_interval))
        # Call flush before the bucket_length has been exceeded
        metrics = self.sort_metrics(stats.flush())
        # Should be an empty list
        assert len(metrics) == 0

        self.sleep_for_interval_length(ag_interval)
        metrics = self.sort_metrics(stats.flush())
        # Should now have the data
        assert len(metrics) == 1
        assert metrics[0]['metric'] == 'my.first.counter'
        assert metrics[0]['points'][0][1] == 1
Esempio n. 18
0
    def test_gauge(self):
        ag_interval = 2
        stats = MetricsBucketAggregator('myhost', interval=ag_interval)
        self.wait_for_bucket_boundary(ag_interval)

        # Track some counters.
        stats.submit_packets('my.first.gauge:1|g')
        stats.submit_packets('my.first.gauge:5|g')
        stats.submit_packets('my.second.gauge:1.5|g')

        # Ensure that gauges roll up correctly.
        self.sleep_for_interval_length(ag_interval)
        metrics = self.sort_metrics(stats.flush())
        assert len(metrics) == 2

        first, second = metrics

        assert first['metric'] == 'my.first.gauge'
        assert first['points'][0][1] == 5
        assert first['host'] == 'myhost'

        assert second['metric'] == 'my.second.gauge'
        assert second['points'][0][1] == 1.5

        # Ensure that old gauges get dropped due to old timestamps
        stats.submit_metric('my.first.gauge', 5, 'g')
        stats.submit_metric('my.first.gauge', 1, 'g', timestamp=1000000000)
        stats.submit_metric('my.second.gauge', 20, 'g', timestamp=1000000000)

        self.sleep_for_interval_length(ag_interval)
        metrics = self.sort_metrics(stats.flush())
        assert len(metrics) == 1

        first = metrics[0]

        assert first['metric'] == 'my.first.gauge'
        assert first['points'][0][1] == 5
        assert first['host'] == 'myhost'
Esempio n. 19
0
    def test_gauge_sample_rate(self):
        ag_interval = self.interval
        stats = MetricsBucketAggregator('myhost', interval=ag_interval)

        # Submit a sampled gauge metric.
        stats.submit_packets('sampled.gauge:10|g|@0.1')

        # Assert that it's treated normally.
        self.sleep_for_interval_length(ag_interval)
        metrics = stats.flush()
        assert len(metrics) == 1
        m = metrics[0]
        assert m['metric'] == 'sampled.gauge'
        assert m['points'][0][1] == 10
Esempio n. 20
0
    def test_dogstatsd_utf8_events(self):
        ma = MetricsBucketAggregator('my.host')

        for _ in xrange(self.FLUSH_COUNT):
            for i in xrange(self.LOOPS_PER_FLUSH):
                for j in xrange(self.METRIC_COUNT):

                    ma.submit_packets(
                        self.create_event_packet(
                            'Τη γλώσσα μου έδωσαν ελληνική',
                            """τὸ σπίτι φτωχικὸ στὶς ἀμμουδιὲς τοῦ Ὁμήρου. Μονάχη ἔγνοια ἡ γλῶσσα μου στὶς ἀμμουδιὲς τοῦ Ὁμήρου. ἀπὸ τὸ Ἄξιον ἐστί τοῦ Ὀδυσσέα Ἐλύτη"""
                        ))
                    ma.submit_packets(
                        self.create_event_packet(
                            'ვეპხის ტყაოსანი შოთა რუსთაველი',
                            """ღმერთსი შემვედრე, ნუთუ კვლა დამხსნას სოფლისა შრომასა, ცეცხლს, წყალსა და მიწასა, ჰაერთა თანა მრომასა; მომცნეს ფრთენი და აღვფრინდე, მივჰხვდე მას ჩემსა ნდომასა, დღისით და ღამით ვჰხედვიდე მზისა ელვათა კრთომაასა.
                        """))
                    ma.submit_packets(
                        self.create_event_packet(
                            'Traité sur la tolérance',
                            """Ose supposer qu'un Ministre éclairé & magnanime, un Prélat humain & sage, un Prince qui sait que son intérêt consiste dans le grand nombre de ses Sujets, & sa gloire dans leur bonheur, daigne jetter les yeux sur cet Ecrit informe & défectueux; il y supplée par ses propres lumieres; il se dit à lui-même: Que risquerai-je à voir la terre cultivée & ornée par plus de mains laborieuses, les tributs augmentés, l'Etat plus florissant?"""
                        ))

            ma.flush()
Esempio n. 21
0
    def test_dogstatsd_ascii_events(self):
        ma = MetricsBucketAggregator('my.host')

        for _ in xrange(self.FLUSH_COUNT):
            for i in xrange(self.LOOPS_PER_FLUSH):
                for j in xrange(self.METRIC_COUNT):

                    ma.submit_packets(
                        self.create_event_packet(
                            'asldkfj fdsaljfas dflksjafs fasdfkjaldsfkjasldf',
                            """alkdjfa slfalskdfjas lkfdjaoisudhfalsdkjbfaksdhfbasjdk fa;sf ljda fsafksadfh alsdjfhaskjdfgahls d;fjasdlkfh9823udjs dlfhaspdf98as ufdaksjhfaisdhufalskdjfhas df"""
                        ))
                    ma.submit_packets(
                        self.create_event_packet(
                            'kdjfsofuousodifu982309rijdfsljsd  dfsdf sdf',
                            """dflskjdfs8d9fsdfjs sldfjka ;dlfjapfoia jsdflakjsdfp 0adsfuolwejf wflsdjf lsdkjf0saoiufja dlfjasd of;lasdjf ;askdjf asodfhas lkmfbashudf asd,fasdfna s,dfjas lcjx vjaskdlfjals dfkjasdflk jasldfkj asldkfjas ldfkasjdf a"""
                        ))
                    ma.submit_packets(
                        self.create_event_packet(
                            'asdf askdjf asldfkjsad lfkajsdlfksajd fasdfsdfdf',
                            """skdfjsld flskdjf alksdjfpasdofuapo sdfjalksdjf ;as.kjdf ;ljLKJL :KJL:KJ l;kdsjf ;lkj :Lkj FLDKFJ LSKFDJ ;LDFJ SLKDJF KSDLjf: Lfjldkj fLKDSJf lSKDjf ls;kdjf s;lkfjs L:KAJ :LFKJDL:DKjf L:SKjf;lKDJfl;SKJDf :LKSDj;lsdfj fsdljfsd ofisunafoialjsflmsdifjas;dlkfaj sdfkasjd flaksjdfnpmsao;difjkas dfnlaksdfa;sodljfas lfdjasdflmajsdlfknaf98wouanepr9qo3ud fadspuf oaisdufpoasid fj askdjn LKJH LKJHFL KJDHSF DSFLHSL JKDFHLSK DJFHLS KJDFHS"""
                        ))

            ma.flush()
    def test_histogram(self):
        ag_interval = self.interval
        # The min is not enabled by default
        stats = MetricsBucketAggregator(
            'myhost',
            interval=ag_interval,
            histogram_aggregates=DEFAULT_HISTOGRAM_AGGREGATES + ['min'])
        self.wait_for_bucket_boundary(ag_interval)

        # Sample all numbers between 1-100 many times. This
        # means our percentiles should be relatively close to themselves.
        percentiles = range(100)
        random.shuffle(percentiles)  # in place
        for i in percentiles:
            for j in xrange(20):
                for type_ in ['h', 'ms']:
                    m = 'my.p:%s|%s' % (i, type_)
                    stats.submit_packets(m)

        self.sleep_for_interval_length(ag_interval)
        metrics = self.sort_metrics(stats.flush())

        assert len(metrics) == 6
        p95, pavg, pcount, pmax, pmed, pmin = self.sort_metrics(metrics)
        assert p95['metric'] == 'my.p.95percentile'
        self.assert_almost_equal(p95['points'][0][1], 95, 10)
        self.assert_almost_equal(pmax['points'][0][1], 99, 1)
        self.assert_almost_equal(pmed['points'][0][1], 50, 2)
        self.assert_almost_equal(pavg['points'][0][1], 50, 2)
        self.assert_almost_equal(pmin['points'][0][1], 1, 1)
        assert pcount['points'][0][1] == 4000  # 100 * 20 * 2
        assert p95['host'] == 'myhost'

        # Ensure that histograms are reset.
        metrics = self.sort_metrics(stats.flush())
        assert not metrics
Esempio n. 23
0
    def test_counter(self):
        ag_interval = 1.0
        stats = MetricsBucketAggregator('myhost', interval=ag_interval)

        # Track some counters.
        stats.submit_packets('my.first.counter:1|c')
        stats.submit_packets('my.first.counter:5|c')
        stats.submit_packets('my.second.counter:1|c')
        stats.submit_packets('my.third.counter:3|c')

        # Ensure they roll up nicely.
        self.sleep_for_interval_length(ag_interval)
        metrics = self.sort_metrics(stats.flush())
        assert len(metrics) == 3

        first, second, third = metrics
        assert first['metric'] == 'my.first.counter'
        assert first['points'][0][1] == 6
        assert first['host'] == 'myhost'

        assert second['metric'] == 'my.second.counter'
        assert second['points'][0][1] == 1

        assert third['metric'] == 'my.third.counter'
        assert third['points'][0][1] == 3

        self.sleep_for_interval_length(ag_interval)
        # Ensure that counters reset to zero.
        metrics = self.sort_metrics(stats.flush())
        first, second, third = metrics
        assert first['metric'] == 'my.first.counter'
        assert first['points'][0][1] == 0
        assert second['metric'] == 'my.second.counter'
        assert second['points'][0][1] == 0
        assert third['metric'] == 'my.third.counter'
        assert third['points'][0][1] == 0
Esempio n. 24
0
    def test_sampled_histogram(self):
        # Submit a sampled histogram.
        # The min is not enabled by default
        stats = MetricsBucketAggregator(
            'myhost',
            interval=self.interval,
            histogram_aggregates=DEFAULT_HISTOGRAM_AGGREGATES + ['min'])
        stats.submit_packets('sampled.hist:5|h|@0.5')

        # Assert we scale up properly.
        self.sleep_for_interval_length()
        metrics = self.sort_metrics(stats.flush())
        p95, pavg, pcount, pmax, pmed, pmin = self.sort_metrics(metrics)

        assert pcount['points'][0][1] == 2
        for p in [p95, pavg, pmed, pmax, pmin]:
            assert p['points'][0][1] == 5
Esempio n. 25
0
    def test_histogram_normalization(self):
        ag_interval = 10
        # The min is not enabled by default
        stats = MetricsBucketAggregator(
            'myhost',
            interval=ag_interval,
            histogram_aggregates=DEFAULT_HISTOGRAM_AGGREGATES + ['min'])
        for i in range(5):
            stats.submit_packets('h1:1|h')
        for i in range(20):
            stats.submit_packets('h2:1|h')

        self.sleep_for_interval_length(ag_interval)
        metrics = self.sort_metrics(stats.flush())
        _, _, h1count, _, _, _, _, _, h2count, _, _, _ = metrics

        assert h1count['points'][0][1] == 0.5
        assert h2count['points'][0][1] == 2
Esempio n. 26
0
    def test_histogram_counter(self):
        # Test whether histogram.count == increment
        # same deal with a sample rate
        ag_interval = self.interval
        cnt = 100000
        for run in [1, 2]:
            stats = MetricsBucketAggregator('myhost', interval=ag_interval)
            for i in range(cnt):
                if run == 2:
                    stats.submit_packets('test.counter:1|c|@0.5')
                    stats.submit_packets('test.hist:1|ms|@0.5')
                else:
                    stats.submit_packets('test.counter:1|c')
                    stats.submit_packets('test.hist:1|ms')
            self.sleep_for_interval_length(ag_interval)
            metrics = self.sort_metrics(stats.flush())
            assert len(metrics) > 0

            # depending on timing, some runs may return the metric more that
            # one bucket, meaning there may be more than one 'metric' for each
            # of the counters
            counter_count = 0
            hist_count = 0
            for num in [
                    m['points'][0][1] for m in metrics
                    if m['metric'] == 'test.counter'
            ]:
                counter_count += num
            for num in [
                    m['points'][0][1] for m in metrics
                    if m['metric'] == 'test.hist.count'
            ]:
                hist_count += num

            assert counter_count == (cnt * run)
            assert hist_count == (cnt * run)
Esempio n. 27
0
    def test_counter_normalization(self):
        ag_interval = 10
        stats = MetricsBucketAggregator('myhost', interval=ag_interval)

        # Assert counters are normalized.
        stats.submit_packets('int:1|c')
        stats.submit_packets('int:4|c')
        stats.submit_packets('int:15|c')

        stats.submit_packets('float:5|c')

        self.sleep_for_interval_length(ag_interval)
        metrics = self.sort_metrics(stats.flush())
        assert len(metrics) == 2

        floatc, intc = metrics

        assert floatc['metric'] == 'float'
        assert floatc['points'][0][1] == 0.5
        assert floatc['host'] == 'myhost'

        assert intc['metric'] == 'int'
        assert intc['points'][0][1] == 2
        assert intc['host'] == 'myhost'
Esempio n. 28
0
    def test_recent_point_threshold(self):
        ag_interval = 1
        threshold = 100
        # The min is not enabled by default
        stats = MetricsBucketAggregator(
            'myhost',
            recent_point_threshold=threshold,
            interval=ag_interval,
            histogram_aggregates=DEFAULT_HISTOGRAM_AGGREGATES + ['min'])
        timestamp_beyond_threshold = time.time() - threshold * 2

        # Ensure that old gauges get dropped due to old timestamps
        stats.submit_metric('my.first.gauge', 5, 'g')
        stats.submit_metric('my.first.gauge',
                            1,
                            'g',
                            timestamp=timestamp_beyond_threshold)
        stats.submit_metric('my.second.gauge',
                            20,
                            'g',
                            timestamp=timestamp_beyond_threshold)

        self.sleep_for_interval_length(ag_interval)
        metrics = self.sort_metrics(stats.flush())
        assert len(metrics) == 1

        first = metrics[0]
        assert first['metric'] == 'my.first.gauge'
        assert first['points'][0][1] == 5
        assert first['host'] == 'myhost'

        timestamp_within_threshold = time.time() - threshold / 2
        bucket_for_timestamp_within_threshold = timestamp_within_threshold - (
            timestamp_within_threshold % ag_interval)
        stats.submit_metric('my.1.gauge', 5, 'g')
        stats.submit_metric('my.1.gauge',
                            1,
                            'g',
                            timestamp=timestamp_within_threshold)
        stats.submit_metric('my.2.counter',
                            20,
                            'c',
                            timestamp=timestamp_within_threshold)
        stats.submit_metric('my.3.set',
                            20,
                            's',
                            timestamp=timestamp_within_threshold)
        stats.submit_metric('my.4.histogram',
                            20,
                            'h',
                            timestamp=timestamp_within_threshold)

        self.sleep_for_interval_length(ag_interval)
        flush_timestamp = time.time()
        # The bucket timestamp is the beginning of the bucket that ended before we flushed
        bucket_timestamp = flush_timestamp - (flush_timestamp %
                                              ag_interval) - ag_interval
        metrics = self.sort_metrics(stats.flush())
        assert len(metrics) == 11

        first, first_b, second, second_b, third, h1, h2, h3, h4, h5, h6 = metrics
        assert first['metric'] == 'my.1.gauge'
        assert first['points'][0][1] == 1
        assert first['host'] == 'myhost'
        self.assert_almost_equal(first['points'][0][0],
                                 bucket_for_timestamp_within_threshold, 0.1)
        assert first_b['metric'] == 'my.1.gauge'
        assert first_b['points'][0][1] == 5
        self.assert_almost_equal(first_b['points'][0][0], bucket_timestamp,
                                 0.1)

        assert second['metric'] == 'my.2.counter'
        assert second['points'][0][1] == 20
        self.assert_almost_equal(second['points'][0][0],
                                 bucket_for_timestamp_within_threshold, 0.1)
        assert second_b['metric'] == 'my.2.counter'
        assert second_b['points'][0][1] == 0
        self.assert_almost_equal(second_b['points'][0][0], bucket_timestamp,
                                 0.1)

        assert third['metric'] == 'my.3.set'
        assert third['points'][0][1] == 1
        self.assert_almost_equal(third['points'][0][0],
                                 bucket_for_timestamp_within_threshold, 0.1)

        assert h1['metric'] == 'my.4.histogram.95percentile'
        assert h1['points'][0][1] == 20
        self.assert_almost_equal(h1['points'][0][0],
                                 bucket_for_timestamp_within_threshold, 0.1)
        assert h1['points'][0][0] == h2['points'][0][0]
        assert h1['points'][0][0] == h3['points'][0][0]
        assert h1['points'][0][0] == h4['points'][0][0]
        assert h1['points'][0][0] == h5['points'][0][0]
Esempio n. 29
0
    def test_counter_flush_during_bucket(self):
        ag_interval = 5
        stats = MetricsBucketAggregator('myhost', interval=ag_interval)
        self.wait_for_bucket_boundary(ag_interval)
        time.sleep(0.5)

        # Track some counters.
        stats.submit_packets("my.first.counter:%s|c" % (1 * ag_interval))
        stats.submit_packets("my.second.counter:%s|c" % (1 * ag_interval))
        stats.submit_packets("my.third.counter:%s|c" % (3 * ag_interval))
        time.sleep(ag_interval)
        stats.submit_packets("my.first.counter:%s|c" % (5 * ag_interval))

        # Want to get the date from the 2 buckets in 2 differnt calls, so don't wait for
        #  the bucket interval to pass
        metrics = self.sort_metrics(stats.flush())

        assert len(metrics) == 3
        first, second, third = metrics
        assert first['metric'] == 'my.first.counter'
        assert first['points'][0][1] == 1
        assert first['host'] == 'myhost'

        assert second['metric'] == 'my.second.counter'
        assert second['points'][0][1] == 1

        assert third['metric'] == 'my.third.counter'
        assert third['points'][0][1] == 3

        # Now wait for the bucket interval to pass, and get the other points
        self.sleep_for_interval_length(ag_interval)
        metrics = self.sort_metrics(stats.flush())

        assert len(metrics) == 3

        first, second, third = metrics
        assert first['metric'] == 'my.first.counter'
        assert first['points'][0][1] == 5
        assert first['host'] == 'myhost'

        assert second['metric'] == 'my.second.counter'
        assert second['points'][0][1] == 0

        assert third['metric'] == 'my.third.counter'
        assert third['points'][0][1] == 0

        self.sleep_for_interval_length(ag_interval)
        # Ensure that counters reset to zero.
        metrics = self.sort_metrics(stats.flush())
        assert len(metrics) == 3
        first, second, third = metrics
        assert first['metric'] == 'my.first.counter'
        assert first['points'][0][1] == 0
        assert second['metric'] == 'my.second.counter'
        assert second['points'][0][1] == 0
        assert third['metric'] == 'my.third.counter'
        assert third['points'][0][1] == 0

        self.sleep_for_interval_length(ag_interval)
        # Ensure that counters reset to zero.
        metrics = self.sort_metrics(stats.flush())
        assert len(metrics) == 3
        first, second, third = metrics
        assert first['metric'] == 'my.first.counter'
        assert first['points'][0][1] == 0
        assert second['metric'] == 'my.second.counter'
        assert second['points'][0][1] == 0
        assert third['metric'] == 'my.third.counter'
        assert third['points'][0][1] == 0
Esempio n. 30
0
    def test_metrics_expiry(self):
        # Ensure metrics eventually expire and stop submitting.
        ag_interval = self.interval
        expiry = ag_interval * 5 + 2
        # The min is not enabled by default
        stats = MetricsBucketAggregator(
            'myhost',
            interval=ag_interval,
            expiry_seconds=expiry,
            histogram_aggregates=DEFAULT_HISTOGRAM_AGGREGATES + ['min'])
        stats.submit_packets('test.counter:123|c')
        stats.submit_packets('test.gauge:55|g')
        stats.submit_packets('test.set:44|s')
        stats.submit_packets('test.histogram:11|h')
        submit_time = time.time()
        submit_bucket_timestamp = submit_time - (submit_time % ag_interval)

        # Ensure points keep submitting
        self.sleep_for_interval_length()
        metrics = self.sort_metrics(stats.flush())
        assert len(metrics) == 9
        assert metrics[0]['metric'] == 'test.counter'
        assert metrics[0]['points'][0][1] == 123
        assert metrics[0]['points'][0][0] == submit_bucket_timestamp

        # flush without waiting - should get nothing
        metrics = self.sort_metrics(stats.flush())
        assert not metrics, str(metrics)

        # Don't sumbit anything
        submit_time = time.time()
        bucket_timestamp = submit_time - (submit_time % ag_interval)

        self.sleep_for_interval_length()
        metrics = self.sort_metrics(stats.flush())
        assert len(metrics) == 1
        assert metrics[0]['metric'] == 'test.counter'
        assert metrics[0]['points'][0][1] == 0
        assert metrics[0]['points'][0][0] == bucket_timestamp

        stats.submit_packets('test.gauge:5|g')
        self.sleep_for_interval_length()
        time.sleep(0.3)
        metrics = self.sort_metrics(stats.flush())
        assert len(metrics) == 2
        assert metrics[0]['metric'] == 'test.counter'
        assert metrics[0]['points'][0][1] == 0
        assert metrics[1]['metric'] == 'test.gauge'
        assert metrics[1]['points'][0][1] == 5

        # flush without waiting - should get nothing
        metrics = self.sort_metrics(stats.flush())
        assert not metrics, str(metrics)

        self.sleep_for_interval_length()
        metrics = self.sort_metrics(stats.flush())

        assert len(metrics) == 1
        assert metrics[0]['metric'] == 'test.counter'
        assert metrics[0]['points'][0][1] == 0

        # Now sleep for longer than the expiry window and ensure
        # no points are submitted
        self.sleep_for_interval_length()
        time.sleep(2)
        m = stats.flush()
        assert not m, str(m)

        # If we submit again, we're all good.
        stats.submit_packets('test.counter:123|c')
        stats.submit_packets('test.gauge:55|g')
        stats.submit_packets('test.set:44|s')
        stats.submit_packets('test.histogram:11|h')
        self.sleep_for_interval_length()
        metrics = self.sort_metrics(stats.flush())
        assert len(metrics) == 9
        assert metrics[0]['metric'] == 'test.counter'
        assert metrics[0]['points'][0][1] == 123