예제 #1
0
    def test_counter(self):
        stats = MetricsAggregator('myhost')

        # Track some counters.
        stats.submit_packets('my.first.counter:1|c')
        stats.submit_packets('my.first.counter:5|c')
        stats.submit_packets('my.second.counter:1|c')
        stats.submit_packets('my.third.counter:3|c')

        # Ensure they roll up nicely.
        metrics = self.sort_metrics(stats.flush())
        assert len(metrics) == 3

        first, second, third = metrics
        nt.assert_equals(first['metric'], 'my.first.counter')
        nt.assert_equals(first['points'][0][1], 6)
        nt.assert_equals(first['host'], 'myhost')

        nt.assert_equals(second['metric'], 'my.second.counter')
        nt.assert_equals(second['points'][0][1], 1)

        nt.assert_equals(third['metric'], 'my.third.counter')
        nt.assert_equals(third['points'][0][1], 3)

        # Ensure that counters reset to zero.
        metrics = self.sort_metrics(stats.flush())
        first, second, third = metrics
        nt.assert_equals(first['metric'], 'my.first.counter')
        nt.assert_equals(first['points'][0][1], 0)
        nt.assert_equals(second['metric'], 'my.second.counter')
        nt.assert_equals(second['points'][0][1], 0)
        nt.assert_equals(third['metric'], 'my.third.counter')
        nt.assert_equals(third['points'][0][1], 0)
예제 #2
0
    def test_event_tags(self):
        stats = MetricsAggregator('myhost')
        stats.submit_packets('_e{6,4}:title1|text')
        stats.submit_packets('_e{6,4}:title2|text|#t1')
        stats.submit_packets('_e{6,4}:title3|text|#t1,t2:v2,t3,t4')
        stats.submit_packets('_e{6,4}:title4|text|k:key|p:normal|#t1,t2')

        events = self.sort_events(stats.flush_events())

        assert len(events) == 4
        first, second, third, fourth = events

        try:
            first['dimensions']
        except Exception:
            assert True
        else:
            assert False, "event['tags'] shouldn't be defined when no tags aren't explicited in the packet"
        nt.assert_equal(first['title'], 'title1')
        nt.assert_equal(first['text'], 'text')

        nt.assert_equal(second['title'], 'title2')
        nt.assert_equal(second['text'], 'text')
        nt.assert_equal(second['dimensions'], sorted(['t1']))

        nt.assert_equal(third['title'], 'title3')
        nt.assert_equal(third['text'], 'text')
        nt.assert_equal(third['dimensions'], sorted(['t1', 't2:v2', 't3', 't4']))

        nt.assert_equal(fourth['title'], 'title4')
        nt.assert_equal(fourth['text'], 'text')
        nt.assert_equal(fourth['aggregation_key'], 'key')
        nt.assert_equal(fourth['priority'], 'normal')
        nt.assert_equal(fourth['dimensions'], sorted(['t1', 't2']))
예제 #3
0
    def test_histogram(self):
        stats = MetricsAggregator('myhost')

        # Sample all numbers between 1-100 many times. This
        # means our percentiles should be relatively close to themselves.
        percentiles = range(100)
        random.shuffle(percentiles)  # in place
        for i in percentiles:
            for j in xrange(20):
                for type_ in ['h', 'ms']:
                    m = 'my.p:%s|%s' % (i, type_)
                    stats.submit_packets(m)

        metrics = self.sort_metrics(stats.flush())

        def assert_almost_equal(i, j, e=1):
            # Floating point math?
            assert abs(i - j) <= e, "%s %s %s" % (i, j, e)

        nt.assert_equal(len(metrics), 5)
        p95, pavg, pcount, pmax, pmed = self.sort_metrics(metrics)
        nt.assert_equal(p95['metric'], 'my.p.95percentile')
        assert_almost_equal(p95['points'][0][1], 95, 10)
        assert_almost_equal(pmax['points'][0][1], 99, 1)
        assert_almost_equal(pmed['points'][0][1], 50, 2)
        assert_almost_equal(pavg['points'][0][1], 50, 2)
        assert_almost_equal(pcount['points'][0][1], 4000, 0)  # 100 * 20 * 2
        nt.assert_equals(p95['host'], 'myhost')

        # Ensure that histograms are reset.
        metrics = self.sort_metrics(stats.flush())
        assert not metrics
예제 #4
0
    def test_tags(self):
        stats = MetricsAggregator('myhost')
        stats.submit_packets('gauge:1|c')
        stats.submit_packets('gauge:2|c|@1')
        stats.submit_packets('gauge:4|c|#tag1,tag2')
        stats.submit_packets('gauge:8|c|#tag2,tag1')  # Should be the same as above
        stats.submit_packets('gauge:16|c|#tag3,tag4')

        metrics = self.sort_metrics(stats.flush())

        assert len(metrics) == 3
        first, second, third = metrics

        nt.assert_equal(first['metric'], 'gauge')
        nt.assert_equal(first['dimensions'], None)
        nt.assert_equal(first['points'][0][1], 3)
        nt.assert_equal(first['host'], 'myhost')

        nt.assert_equal(second['metric'], 'gauge')
        nt.assert_equal(second['dimensions'], ('tag1', 'tag2'))
        nt.assert_equal(second['points'][0][1], 12)
        nt.assert_equal(second['host'], 'myhost')

        nt.assert_equal(third['metric'], 'gauge')
        nt.assert_equal(third['dimensions'], ('tag3', 'tag4'))
        nt.assert_equal(third['points'][0][1], 16)
        nt.assert_equal(third['host'], 'myhost')
예제 #5
0
    def test_scientific_notation(self):
        stats = MetricsAggregator('myhost', interval=10)

        stats.submit_packets('test.scinot:9.512901e-05|g')
        metrics = self.sort_metrics(stats.flush())

        assert len(metrics) == 1
        ts, val = metrics[0].get('points')[0]
        nt.assert_almost_equal(val, 9.512901e-05)
예제 #6
0
    def test_scientific_notation(self):
        stats = MetricsAggregator('myhost', interval=10)

        stats.submit_packets('test.scinot:9.512901e-05|g')
        metrics = self.sort_metrics(stats.flush())

        assert len(metrics) == 1
        ts, val = metrics[0].get('points')[0]
        nt.assert_almost_equal(val, 9.512901e-05)
예제 #7
0
    def test_sampled_counter(self):

        # Submit a sampled counter.
        stats = MetricsAggregator('myhost')
        stats.submit_packets('sampled.counter:1|c|@0.5')
        metrics = stats.flush()
        assert len(metrics) == 1
        m = metrics[0]
        assert m['metric'] == 'sampled.counter'
        nt.assert_equal(m['points'][0][1], 2)
예제 #8
0
    def test_sampled_counter(self):

        # Submit a sampled counter.
        stats = MetricsAggregator('myhost')
        stats.submit_packets('sampled.counter:1|c|@0.5')
        metrics = stats.flush()
        assert len(metrics) == 1
        m = metrics[0]
        assert m['metric'] == 'sampled.counter'
        nt.assert_equal(m['points'][0][1], 2)
예제 #9
0
    def test_counter(self):
        stats = MetricsAggregator('myhost')

        # Track some counters.
        stats.submit_packets('my.first.counter:1|c')
        stats.submit_packets('my.first.counter:5|c')
        stats.submit_packets('my.second.counter:1|c')
        stats.submit_packets('my.third.counter:3|c')

        # Ensure they roll up nicely.
        metrics = self.sort_metrics(stats.flush())
        assert len(metrics) == 3

        first, second, third = metrics
        nt.assert_equals(first['metric'], 'my.first.counter')
        nt.assert_equals(first['points'][0][1], 6)
        nt.assert_equals(first['host'], 'myhost')

        nt.assert_equals(second['metric'], 'my.second.counter')
        nt.assert_equals(second['points'][0][1], 1)

        nt.assert_equals(third['metric'], 'my.third.counter')
        nt.assert_equals(third['points'][0][1], 3)

        # Ensure that counters reset to zero.
        metrics = self.sort_metrics(stats.flush())
        first, second, third = metrics
        nt.assert_equals(first['metric'], 'my.first.counter')
        nt.assert_equals(first['points'][0][1], 0)
        nt.assert_equals(second['metric'], 'my.second.counter')
        nt.assert_equals(second['points'][0][1], 0)
        nt.assert_equals(third['metric'], 'my.third.counter')
        nt.assert_equals(third['points'][0][1], 0)
예제 #10
0
class TestAggregator(unittest.TestCase):

    def setUp(self):
        self.aggr = MetricsAggregator('test-aggr')

    def test_dupe_tags(self):
        self.aggr.increment('test-counter', 1, dimensions={'a': 'avalue', 'b': 'bvalue'})
        self.aggr.increment(
            'test-counter', 1, dimensions={'a': 'avalue', 'b': 'bvalue', 'b': 'bvalue'})
        self.assertEquals(len(self.aggr.metrics), 1, self.aggr.metrics)
        metric = self.aggr.metrics.values()[0]
        self.assertEquals(metric.value, 2)
예제 #11
0
    def test_gauge_sample_rate(self):
        stats = MetricsAggregator('myhost')

        # Submit a sampled gauge metric.
        stats.submit_packets('sampled.gauge:10|g|@0.1')

        # Assert that it's treated normally.
        metrics = stats.flush()
        nt.assert_equal(len(metrics), 1)
        m = metrics[0]
        nt.assert_equal(m['metric'], 'sampled.gauge')
        nt.assert_equal(m['points'][0][1], 10)
예제 #12
0
    def test_sampled_histogram(self):
        # Submit a sampled histogram.
        stats = MetricsAggregator('myhost')
        stats.submit_packets('sampled.hist:5|h|@0.5')

        # Assert we scale up properly.
        metrics = self.sort_metrics(stats.flush())
        p95, pavg, pcount, pmax, pmed = self.sort_metrics(metrics)

        nt.assert_equal(pcount['points'][0][1], 2)
        for p in [p95, pavg, pmed, pmax]:
            nt.assert_equal(p['points'][0][1], 5)
예제 #13
0
    def test_batch_submission(self):
        # Submit a sampled histogram.
        stats = MetricsAggregator('myhost')
        metrics = ['counter:1|c', 'counter:1|c', 'gauge:1|g']
        packet = "\n".join(metrics)
        stats.submit_packets(packet)

        metrics = self.sort_metrics(stats.flush())
        nt.assert_equal(2, len(metrics))
        counter, gauge = metrics
        assert counter['points'][0][1] == 2
        assert gauge['points'][0][1] == 1
예제 #14
0
    def test_sampled_histogram(self):
        # Submit a sampled histogram.
        stats = MetricsAggregator('myhost')
        stats.submit_packets('sampled.hist:5|h|@0.5')

        # Assert we scale up properly.
        metrics = self.sort_metrics(stats.flush())
        p95, pavg, pcount, pmax, pmed = self.sort_metrics(metrics)

        nt.assert_equal(pcount['points'][0][1], 2)
        for p in [p95, pavg, pmed, pmax]:
            nt.assert_equal(p['points'][0][1], 5)
예제 #15
0
    def test_gauge_sample_rate(self):
        stats = MetricsAggregator('myhost')

        # Submit a sampled gauge metric.
        stats.submit_packets('sampled.gauge:10|g|@0.1')

        # Assert that it's treated normally.
        metrics = stats.flush()
        nt.assert_equal(len(metrics), 1)
        m = metrics[0]
        nt.assert_equal(m['metric'], 'sampled.gauge')
        nt.assert_equal(m['points'][0][1], 10)
예제 #16
0
    def test_batch_submission(self):
        # Submit a sampled histogram.
        stats = MetricsAggregator('myhost')
        metrics = [
            'counter:1|c',
            'counter:1|c',
            'gauge:1|g'
        ]
        packet = "\n".join(metrics)
        stats.submit_packets(packet)

        metrics = self.sort_metrics(stats.flush())
        nt.assert_equal(2, len(metrics))
        counter, gauge = metrics
        assert counter['points'][0][1] == 2
        assert gauge['points'][0][1] == 1
예제 #17
0
    def __init__(self, name, init_config, agent_config, instances=None):
        """Initialize a new check.

        :param name: The name of the check
        :param init_config: The config for initializing the check
        :param agent_config: The global configuration for the agent
        :param instances: A list of configuration objects for each instance.
        """
        self.name = name
        self.init_config = init_config
        self.agent_config = agent_config
        self.hostname = monagent.common.util.get_hostname(agent_config)
        self.log = logging.getLogger('%s.%s' % (__name__, name))

        self.aggregator = MetricsAggregator(self.hostname,
                                            recent_point_threshold=agent_config.get('recent_point_threshold',
                                                                                    None))

        self.events = []
        self.instances = instances or []
        self.warnings = []
        self.library_versions = None

        api_config = self.agent_config['Api']
        self.keystone = Keystone(api_config)
예제 #18
0
    def test_event_text(self):
        stats = MetricsAggregator('myhost')
        stats.submit_packets('_e{2,0}:t1|')
        stats.submit_packets('_e{2,12}:t2|text|content')
        stats.submit_packets('_e{2,23}:t3|First line\\nSecond line')  # \n is a newline
        stats.submit_packets(u'_e{2,19}:t4|♬ †øU †øU ¥ºu T0µ ♪')  # utf-8 compliant

        events = self.sort_events(stats.flush_events())

        assert len(events) == 4
        first, second, third, fourth = events

        nt.assert_equal(first['text'], '')
        nt.assert_equal(second['text'], 'text|content')
        nt.assert_equal(third['text'], 'First line\nSecond line')
        nt.assert_equal(fourth['text'], u'♬ †øU †øU ¥ºu T0µ ♪')
예제 #19
0
    def test_event_title(self):
        stats = MetricsAggregator('myhost')
        stats.submit_packets('_e{0,4}:|text')
        stats.submit_packets(u'_e{9,4}:2intitulé|text')
        stats.submit_packets('_e{14,4}:3title content|text')
        stats.submit_packets('_e{14,4}:4title|content|text')
        stats.submit_packets('_e{13,4}:5title\\ntitle|text')  # \n stays escaped

        events = self.sort_events(stats.flush_events())

        assert len(events) == 5
        first, second, third, fourth, fifth = events

        nt.assert_equal(first['title'], '')
        nt.assert_equal(second['title'], u'2intitulé')
        nt.assert_equal(third['title'], '3title content')
        nt.assert_equal(fourth['title'], '4title|content')
        nt.assert_equal(fifth['title'], '5title\\ntitle')
예제 #20
0
    def test_sets(self):
        stats = MetricsAggregator('myhost')
        stats.submit_packets('my.set:10|s')
        stats.submit_packets('my.set:20|s')
        stats.submit_packets('my.set:20|s')
        stats.submit_packets('my.set:30|s')
        stats.submit_packets('my.set:30|s')
        stats.submit_packets('my.set:30|s')

        # Assert that it's treated normally.
        metrics = stats.flush()
        nt.assert_equal(len(metrics), 1)
        m = metrics[0]
        nt.assert_equal(m['metric'], 'my.set')
        nt.assert_equal(m['points'][0][1], 3)

        # Assert there are no more sets
        assert not stats.flush()
예제 #21
0
    def setUp(self):
        aggregator = MetricsAggregator("test_host")
        self.server = Server(aggregator, "localhost", STATSD_PORT)
        self.reporter = DummyReporter(aggregator)

        self.t1 = threading.Thread(target=self.server.start)
        self.t1.start()

        confd_path = os.path.realpath(os.path.join(os.path.abspath(__file__), "..", "jmx_yamls"))
        JMXFetch.init(confd_path, {'dogstatsd_port': STATSD_PORT}, get_logging_config(), 15)
예제 #22
0
    def test_metrics_expiry(self):
        # Ensure metrics eventually expire and stop submitting.
        stats = MetricsAggregator('myhost', expiry_seconds=1)
        stats.submit_packets('test.counter:123|c')

        # Ensure points keep submitting
        assert stats.flush()
        assert stats.flush()
        time.sleep(0.5)
        assert stats.flush()

        # Now sleep for longer than the expiry window and ensure
        # no points are submitted
        time.sleep(2)
        m = stats.flush()
        assert not m, str(m)

        # If we submit again, we're all good.
        stats.submit_packets('test.counter:123|c')
        assert stats.flush()
예제 #23
0
    def test_histogram_counter(self):
        # Test whether histogram.count == increment
        # same deal with a sample rate
        cnt = 100000
        for run in [1, 2]:
            stats = MetricsAggregator('myhost')
            for i in xrange(cnt):
                if run == 2:
                    stats.submit_packets('test.counter:1|c|@0.5')
                    stats.submit_packets('test.hist:1|ms|@0.5')
                else:
                    stats.submit_packets('test.counter:1|c')
                    stats.submit_packets('test.hist:1|ms')
            metrics = self.sort_metrics(stats.flush())
            assert len(metrics) > 0

            nt.assert_equal([m['points'][0][1]
                             for m in metrics if m['metric'] == 'test.counter'], [cnt * run])
            nt.assert_equal([m['points'][0][1]
                             for m in metrics if m['metric'] == 'test.hist.count'], [cnt * run])
예제 #24
0
class TestAggregator(unittest.TestCase):
    def setUp(self):
        self.aggr = MetricsAggregator('test-aggr')

    def test_dupe_tags(self):
        self.aggr.increment('test-counter',
                            1,
                            dimensions={
                                'a': 'avalue',
                                'b': 'bvalue'
                            })
        self.aggr.increment('test-counter',
                            1,
                            dimensions={
                                'a': 'avalue',
                                'b': 'bvalue',
                                'b': 'bvalue'
                            })
        self.assertEqual(len(self.aggr.metrics), 1, self.aggr.metrics)
        metric = self.aggr.metrics.values()[0]
        self.assertEqual(metric.value, 2)
예제 #25
0
    def test_histogram(self):
        stats = MetricsAggregator('myhost')

        # Sample all numbers between 1-100 many times. This
        # means our percentiles should be relatively close to themselves.
        percentiles = range(100)
        random.shuffle(percentiles)  # in place
        for i in percentiles:
            for j in xrange(20):
                for type_ in ['h', 'ms']:
                    m = 'my.p:%s|%s' % (i, type_)
                    stats.submit_packets(m)

        metrics = self.sort_metrics(stats.flush())

        def assert_almost_equal(i, j, e=1):
            # Floating point math?
            assert abs(i - j) <= e, "%s %s %s" % (i, j, e)

        nt.assert_equal(len(metrics), 5)
        p95, pavg, pcount, pmax, pmed = self.sort_metrics(metrics)
        nt.assert_equal(p95['metric'], 'my.p.95percentile')
        assert_almost_equal(p95['points'][0][1], 95, 10)
        assert_almost_equal(pmax['points'][0][1], 99, 1)
        assert_almost_equal(pmed['points'][0][1], 50, 2)
        assert_almost_equal(pavg['points'][0][1], 50, 2)
        assert_almost_equal(pcount['points'][0][1], 4000, 0)  # 100 * 20 * 2
        nt.assert_equals(p95['host'], 'myhost')

        # Ensure that histograms are reset.
        metrics = self.sort_metrics(stats.flush())
        assert not metrics
예제 #26
0
    def test_sets(self):
        stats = MetricsAggregator('myhost')
        stats.submit_packets('my.set:10|s')
        stats.submit_packets('my.set:20|s')
        stats.submit_packets('my.set:20|s')
        stats.submit_packets('my.set:30|s')
        stats.submit_packets('my.set:30|s')
        stats.submit_packets('my.set:30|s')

        # Assert that it's treated normally.
        metrics = stats.flush()
        nt.assert_equal(len(metrics), 1)
        m = metrics[0]
        nt.assert_equal(m['metric'], 'my.set')
        nt.assert_equal(m['points'][0][1], 3)

        # Assert there are no more sets
        assert not stats.flush()
예제 #27
0
    def test_histogram_normalization(self):
        stats = MetricsAggregator('myhost', interval=10)
        for i in range(5):
            stats.submit_packets('h1:1|h')
        for i in range(20):
            stats.submit_packets('h2:1|h')

        metrics = self.sort_metrics(stats.flush())
        _, _, h1count, _, _, _, _, h2count, _, _ = metrics

        nt.assert_equal(h1count['points'][0][1], 0.5)
        nt.assert_equal(h2count['points'][0][1], 2)
예제 #28
0
    def test_rate(self):
        stats = MetricsAggregator('myhost')
        stats.submit_packets('my.rate:10|_dd-r')
        # Sleep 1 second so the time interval > 0
        time.sleep(1)
        stats.submit_packets('my.rate:40|_dd-r')

        # Check that the rate is calculated correctly
        metrics = stats.flush()
        nt.assert_equal(len(metrics), 1)
        m = metrics[0]
        nt.assert_equals(m['metric'], 'my.rate')
        nt.assert_equals(m['points'][0][1], 30)

        # Assert that no more rates are given
        assert not stats.flush()
예제 #29
0
    def __init__(self, name, init_config, agent_config, instances=None):
        """Initialize a new check.

        :param name: The name of the check
        :param init_config: The config for initializing the check
        :param agent_config: The global configuration for the agent
        :param instances: A list of configuration objects for each instance.
        """
        self.name = name
        self.init_config = init_config
        self.agent_config = agent_config
        self.hostname = monagent.common.util.get_hostname(agent_config)
        self.log = logging.getLogger('%s.%s' % (__name__, name))

        self.aggregator = MetricsAggregator(
            self.hostname,
            recent_point_threshold=agent_config.get('recent_point_threshold',
                                                    None))

        self.events = []
        self.instances = instances or []
        self.warnings = []
        self.library_versions = None
예제 #30
0
    def test_metrics_expiry(self):
        # Ensure metrics eventually expire and stop submitting.
        stats = MetricsAggregator('myhost', expiry_seconds=1)
        stats.submit_packets('test.counter:123|c')

        # Ensure points keep submitting
        assert stats.flush()
        assert stats.flush()
        time.sleep(0.5)
        assert stats.flush()

        # Now sleep for longer than the expiry window and ensure
        # no points are submitted
        time.sleep(2)
        m = stats.flush()
        assert not m, str(m)

        # If we submit again, we're all good.
        stats.submit_packets('test.counter:123|c')
        assert stats.flush()
예제 #31
0
    def test_bad_packets_throw_errors(self):
        packets = [
            'missing.value.and.type',
            'missing.type:2',
            'missing.value|c',
            '2|c',
            'unknown.type:2|z',
            'string.value:abc|c',
            'string.sample.rate:0|c|@abc',
            # Bad event-like packets
            '_ev{1,2}:bad_header'
            '_e{1,}:invalid|headers',
            '_e:missing|size|headers',
            '_e:{1,1}:t|t|t:bad_meta|h',
        ]

        stats = MetricsAggregator('myhost')
        for packet in packets:
            try:
                stats.submit_packets(packet)
            except Exception:
                assert True
            else:
                assert False, 'invalid : %s' % packet
예제 #32
0
    def test_bad_packets_throw_errors(self):
        packets = [
            'missing.value.and.type',
            'missing.type:2',
            'missing.value|c',
            '2|c',
            'unknown.type:2|z',
            'string.value:abc|c',
            'string.sample.rate:0|c|@abc',
            # Bad event-like packets
            '_ev{1,2}:bad_header'
            '_e{1,}:invalid|headers',
            '_e:missing|size|headers',
            '_e:{1,1}:t|t|t:bad_meta|h',
        ]

        stats = MetricsAggregator('myhost')
        for packet in packets:
            try:
                stats.submit_packets(packet)
            except Exception:
                assert True
            else:
                assert False, 'invalid : %s' % packet
예제 #33
0
    def test_counter_normalization(self):
        stats = MetricsAggregator('myhost', interval=10)

        # Assert counters are normalized.
        stats.submit_packets('int:1|c')
        stats.submit_packets('int:4|c')
        stats.submit_packets('int:15|c')

        stats.submit_packets('float:5|c')

        metrics = self.sort_metrics(stats.flush())
        assert len(metrics) == 2

        floatc, intc = metrics

        nt.assert_equal(floatc['metric'], 'float')
        nt.assert_equal(floatc['points'][0][1], 0.5)
        nt.assert_equal(floatc['host'], 'myhost')

        nt.assert_equal(intc['metric'], 'int')
        nt.assert_equal(intc['points'][0][1], 2)
        nt.assert_equal(intc['host'], 'myhost')
예제 #34
0
    def test_histogram_normalization(self):
        stats = MetricsAggregator('myhost', interval=10)
        for i in range(5):
            stats.submit_packets('h1:1|h')
        for i in range(20):
            stats.submit_packets('h2:1|h')

        metrics = self.sort_metrics(stats.flush())
        _, _, h1count, _, _, _, _, h2count, _, _ = metrics

        nt.assert_equal(h1count['points'][0][1], 0.5)
        nt.assert_equal(h2count['points'][0][1], 2)
예제 #35
0
    def test_rate(self):
        stats = MetricsAggregator('myhost')
        stats.submit_packets('my.rate:10|_dd-r')
        # Sleep 1 second so the time interval > 0
        time.sleep(1)
        stats.submit_packets('my.rate:40|_dd-r')

        # Check that the rate is calculated correctly
        metrics = stats.flush()
        nt.assert_equal(len(metrics), 1)
        m = metrics[0]
        nt.assert_equals(m['metric'], 'my.rate')
        nt.assert_equals(m['points'][0][1], 30)

        # Assert that no more rates are given
        assert not stats.flush()
예제 #36
0
    def test_counter_normalization(self):
        stats = MetricsAggregator('myhost', interval=10)

        # Assert counters are normalized.
        stats.submit_packets('int:1|c')
        stats.submit_packets('int:4|c')
        stats.submit_packets('int:15|c')

        stats.submit_packets('float:5|c')

        metrics = self.sort_metrics(stats.flush())
        assert len(metrics) == 2

        floatc, intc = metrics

        nt.assert_equal(floatc['metric'], 'float')
        nt.assert_equal(floatc['points'][0][1], 0.5)
        nt.assert_equal(floatc['host'], 'myhost')

        nt.assert_equal(intc['metric'], 'int')
        nt.assert_equal(intc['points'][0][1], 2)
        nt.assert_equal(intc['host'], 'myhost')
예제 #37
0
class AgentCheck(object):
    def __init__(self, name, init_config, agent_config, instances=None):
        """Initialize a new check.

        :param name: The name of the check
        :param init_config: The config for initializing the check
        :param agent_config: The global configuration for the agent
        :param instances: A list of configuration objects for each instance.
        """
        self.name = name
        self.init_config = init_config
        self.agent_config = agent_config
        self.hostname = monagent.common.util.get_hostname(agent_config)
        self.log = logging.getLogger('%s.%s' % (__name__, name))

        self.aggregator = MetricsAggregator(
            self.hostname,
            recent_point_threshold=agent_config.get('recent_point_threshold',
                                                    None))

        self.events = []
        self.instances = instances or []
        self.warnings = []
        self.library_versions = None

    def instance_count(self):
        """Return the number of instances that are configured for this check.
        """
        return len(self.instances)

    def gauge(self,
              metric,
              value,
              dimensions=None,
              delegated_tenant=None,
              hostname=None,
              device_name=None,
              timestamp=None):
        """Record the value of a gauge, with optional dimensions, hostname and device name.

        :param metric: The name of the metric
        :param value: The value of the gauge
        :param dimensions: (optional) A dictionary of dimensions for this metric
        :param delegated_tenant: (optional) Submit metrics on behalf of this tenant ID.
        :param hostname: (optional) A hostname for this metric. Defaults to the current hostname.
        :param device_name: (optional) The device name for this metric
        :param timestamp: (optional) The timestamp for this metric value
        """
        self.aggregator.gauge(metric, value, self._set_dimensions(dimensions),
                              delegated_tenant, hostname, device_name,
                              timestamp)

    def increment(self,
                  metric,
                  value=1,
                  dimensions=None,
                  delegated_tenant=None,
                  hostname=None,
                  device_name=None):
        """Increment a counter with optional dimensions, hostname and device name.

        :param metric: The name of the metric
        :param value: The value to increment by
        :param dimensions: (optional) A dictionary of dimensions for this metric
        :param delegated_tenant: (optional) Submit metrics on behalf of this tenant ID.
        :param hostname: (optional) A hostname for this metric. Defaults to the current hostname.
        :param device_name: (optional) The device name for this metric
        """
        self.aggregator.increment(metric, value,
                                  self._set_dimensions(dimensions),
                                  delegated_tenant, hostname, device_name)

    def decrement(self,
                  metric,
                  value=-1,
                  dimensions=None,
                  delegated_tenant=None,
                  hostname=None,
                  device_name=None):
        """Decrement a counter with optional dimensions, hostname and device name.

        :param metric: The name of the metric
        :param value: The value to decrement by
        :param dimensions: (optional) A dictionary of dimensions for this metric
        :param delegated_tenant: (optional) Submit metrics on behalf of this tenant ID.
        :param hostname: (optional) A hostname for this metric. Defaults to the current hostname.
        :param device_name: (optional) The device name for this metric
        """
        self.aggregator.decrement(metric, value,
                                  self._set_dimensions(dimensions),
                                  delegated_tenant, hostname, device_name)

    def rate(self,
             metric,
             value,
             dimensions=None,
             delegated_tenant=None,
             hostname=None,
             device_name=None):
        """Submit a point for a metric that will be calculated as a rate on flush.

        Values will persist across each call to `check` if there is not enough
        point to generate a rate on the flush.

        :param metric: The name of the metric
        :param value: The value of the rate
        :param dimensions: (optional) A dictionary of dimensions for this metric
        :param delegated_tenant: (optional) Submit metrics on behalf of this tenant ID.
        :param hostname: (optional) A hostname for this metric. Defaults to the current hostname.
        :param device_name: (optional) The device name for this metric
        """
        self.aggregator.rate(metric, value, self._set_dimensions(dimensions),
                             delegated_tenant, hostname, device_name)

    def histogram(self,
                  metric,
                  value,
                  dimensions=None,
                  delegated_tenant=None,
                  hostname=None,
                  device_name=None):
        """Sample a histogram value, with optional dimensions, hostname and device name.

        :param metric: The name of the metric
        :param value: The value to sample for the histogram
        :param dimensions: (optional) A dictionary of dimensions for this metric
        :param delegated_tenant: (optional) Submit metrics on behalf of this tenant ID.
        :param hostname: (optional) A hostname for this metric. Defaults to the current hostname.
        :param device_name: (optional) The device name for this metric
        """
        self.aggregator.histogram(metric, value,
                                  self._set_dimensions(dimensions),
                                  delegated_tenant, hostname, device_name)

    def set(self,
            metric,
            value,
            dimensions=None,
            delegated_tenant=None,
            hostname=None,
            device_name=None):
        """Sample a set value, with optional dimensions, hostname and device name.

        :param metric: The name of the metric
        :param value: The value for the set
        :param dimensions: (optional) A dictionary of dimensions for this metric
        :param delegated_tenant: (optional) Submit metrics on behalf of this tenant ID.
        :param hostname: (optional) A hostname for this metric. Defaults to the current hostname.
        :param device_name: (optional) The device name for this metric
        """
        self.aggregator.set(metric, value, self._set_dimensions(dimensions),
                            delegated_tenant, hostname, device_name)

    def _set_dimensions(self, dimensions):
        new_dimensions = {
            'component': 'monasca-agent',
            'service': 'monitoring'
        }
        if dimensions is not None:
            new_dimensions.update(dimensions.copy())
        return new_dimensions

    def event(self, event):
        """Save an event.

        :param event: The event payload as a dictionary. Has the following
        structure:

            {
                "timestamp": int, the epoch timestamp for the event,
                "event_type": string, the event time name,
                "api_key": string, the api key of the account to associate the event with,
                "msg_title": string, the title of the event,
                "msg_text": string, the text body of the event,
                "alert_type": (optional) string, one of ('error', 'warning', 'success', 'info').
                    Defaults to 'info'.
                "source_type_name": (optional) string, the source type name,
                "host": (optional) string, the name of the host,
                "dimensions": (optional) a dictionary of dimensions to associate with this event
            }
        """
        if event.get('api_key') is None:
            event['api_key'] = self.agent_config['api_key']
        self.events.append(event)

    def has_events(self):
        """Check whether the check has saved any events

        @return whether or not the check has saved any events
        @rtype boolean
        """
        return len(self.events) > 0

    def get_metrics(self, prettyprint=False):
        """Get all metrics, including the ones that are tagged.

        @return the list of samples
        @rtype list of Measurement objects from monagent.common.metrics
        """
        if prettyprint:
            metrics = self.aggregator.flush()
            for metric in metrics:
                print(" Timestamp:  {}".format(metric.timestamp))
                print(" Name:       {}".format(metric.name))
                print(" Value:      {}".format(metric.value))
                if (metric.delegated_tenant):
                    print(" Delegtd ID: {}".format(metric.delegated_tenant))

                print(" Dimensions: ", end='')
                line = 0
                for name in metric.dimensions:
                    if line != 0:
                        print(" " * 13, end='')
                    print("{0}={1}".format(name, metric.dimensions[name]))
                    line += 1
                print("-" * 24)
        return self.aggregator.flush()

    def get_events(self):
        """Return a list of the events saved by the check, if any

        @return the list of events saved by this check
        @rtype list of event dictionaries
        """
        events = self.events
        self.events = []
        return events

    def has_warnings(self):
        """Check whether the instance run created any warnings.
        """
        return len(self.warnings) > 0

    def warning(self, warning_message):
        """Add a warning message that will be printed in the info page

        :param warning_message: String. Warning message to be displayed
        """
        self.warnings.append(warning_message)

    def get_library_info(self):
        if self.library_versions is not None:
            return self.library_versions
        try:
            self.library_versions = self.get_library_versions()
        except NotImplementedError:
            pass

    def get_library_versions(self):
        """Should return a string that shows which version

        of the needed libraries are used
        """
        raise NotImplementedError

    def get_warnings(self):
        """Return the list of warnings messages to be displayed in the info page.
        """
        warnings = self.warnings
        self.warnings = []
        return warnings

    def run(self):
        """Run all instances.
        """
        instance_statuses = []
        for i, instance in enumerate(self.instances):
            try:
                self.check(instance)
                if self.has_warnings():
                    instance_status = monagent.common.check_status.InstanceStatus(
                        i,
                        monagent.common.check_status.STATUS_WARNING,
                        warnings=self.get_warnings())
                else:
                    instance_status = monagent.common.check_status.InstanceStatus(
                        i, monagent.common.check_status.STATUS_OK)
            except Exception as e:
                self.log.exception("Check '%s' instance #%s failed" %
                                   (self.name, i))
                instance_status = monagent.common.check_status.InstanceStatus(
                    i,
                    monagent.common.check_status.STATUS_ERROR,
                    error=e,
                    tb=traceback.format_exc())
            instance_statuses.append(instance_status)
        return instance_statuses

    def check(self, instance):
        """Overriden by the check class. This will be called to run the check.

        :param instance: A dict with the instance information. This will vary
        depending on your config structure.
        """
        raise NotImplementedError()

    @staticmethod
    def stop():
        """To be executed when the agent is being stopped to clean ressources.
        """
        pass

    @classmethod
    def from_yaml(cls,
                  path_to_yaml=None,
                  agentConfig=None,
                  yaml_text=None,
                  check_name=None):
        """A method used for testing your check without running the agent.
        """
        if hasattr(yaml, 'CLoader'):
            Loader = yaml.CLoader
        else:
            Loader = yaml.Loader

        if path_to_yaml:
            check_name = os.path.basename(path_to_yaml).split('.')[0]
            try:
                f = open(path_to_yaml)
            except IOError:
                raise Exception('Unable to open yaml config: %s' %
                                path_to_yaml)
            yaml_text = f.read()
            f.close()

        config = yaml.load(yaml_text, Loader=Loader)
        check = cls(check_name,
                    config.get('init_config') or {}, agentConfig or {})

        return check, config.get('instances', [])

    @staticmethod
    def normalize(metric, prefix=None):
        """Turn a metric into a well-formed metric name prefix.b.c

        :param metric The metric name to normalize
        :param prefix A prefix to to add to the normalized name, default None
        """
        name = re.sub(r"[,\+\*\-/()\[\]{}]", "_", metric)
        # Eliminate multiple _
        name = re.sub(r"__+", "_", name)
        # Don't start/end with _
        name = re.sub(r"^_", "", name)
        name = re.sub(r"_$", "", name)
        # Drop ._ and _.
        name = re.sub(r"\._", ".", name)
        name = re.sub(r"_\.", ".", name)

        if prefix is not None:
            return prefix + "." + name
        else:
            return name

    @staticmethod
    def read_config(instance, key, message=None, cast=None, optional=False):
        val = instance.get(key)
        if val is None:
            if optional is False:
                message = message or 'Must provide `%s` value in instance config' % key
                raise Exception(message)
            else:
                return val

        if cast is None:
            return val
        else:
            return cast(val)
예제 #38
0
    def test_dogstatsd_aggregation_perf(self):
        ma = MetricsAggregator('my.host')

        for _ in xrange(self.FLUSH_COUNT):
            for i in xrange(self.LOOPS_PER_FLUSH):
                for j in xrange(self.METRIC_COUNT):

                    # metrics
                    ma.submit_packets('counter.%s:%s|c' % (j, i))
                    ma.submit_packets('gauge.%s:%s|g' % (j, i))
                    ma.submit_packets('histogram.%s:%s|h' % (j, i))
                    ma.submit_packets('set.%s:%s|s' % (j, 1.0))

                    # tagged metrics
                    ma.submit_packets('counter.%s:%s|c|#tag1,tag2' % (j, i))
                    ma.submit_packets('gauge.%s:%s|g|#tag1,tag2' % (j, i))
                    ma.submit_packets('histogram.%s:%s|h|#tag1,tag2' % (j, i))
                    ma.submit_packets('set.%s:%s|s|#tag1,tag2' % (j, i))

                    # sampled metrics
                    ma.submit_packets('counter.%s:%s|c|@0.5' % (j, i))
                    ma.submit_packets('gauge.%s:%s|g|@0.5' % (j, i))
                    ma.submit_packets('histogram.%s:%s|h|@0.5' % (j, i))
                    ma.submit_packets('set.%s:%s|s|@0.5' % (j, i))

            ma.flush()
예제 #39
0
class AgentCheck(object):

    def __init__(self, name, init_config, agent_config, instances=None):
        """Initialize a new check.

        :param name: The name of the check
        :param init_config: The config for initializing the check
        :param agent_config: The global configuration for the agent
        :param instances: A list of configuration objects for each instance.
        """
        self.name = name
        self.init_config = init_config
        self.agent_config = agent_config
        self.hostname = monagent.common.util.get_hostname(agent_config)
        self.log = logging.getLogger('%s.%s' % (__name__, name))

        self.aggregator = MetricsAggregator(self.hostname,
                                            recent_point_threshold=agent_config.get('recent_point_threshold',
                                                                                    None))

        self.events = []
        self.instances = instances or []
        self.warnings = []
        self.library_versions = None

    def instance_count(self):
        """Return the number of instances that are configured for this check.
        """
        return len(self.instances)

    def gauge(self, metric, value, dimensions=None, delegated_tenant=None,
              hostname=None, device_name=None, timestamp=None):
        """Record the value of a gauge, with optional dimensions, hostname and device name.

        :param metric: The name of the metric
        :param value: The value of the gauge
        :param dimensions: (optional) A dictionary of dimensions for this metric
        :param delegated_tenant: (optional) Submit metrics on behalf of this tenant ID.
        :param hostname: (optional) A hostname for this metric. Defaults to the current hostname.
        :param device_name: (optional) The device name for this metric
        :param timestamp: (optional) The timestamp for this metric value
        """
        self.aggregator.gauge(metric,
                              value,
                              self._set_dimensions(dimensions),
                              delegated_tenant,
                              hostname,
                              device_name,
                              timestamp)

    def increment(self, metric, value=1, dimensions=None, delegated_tenant=None, hostname=None, device_name=None):
        """Increment a counter with optional dimensions, hostname and device name.

        :param metric: The name of the metric
        :param value: The value to increment by
        :param dimensions: (optional) A dictionary of dimensions for this metric
        :param delegated_tenant: (optional) Submit metrics on behalf of this tenant ID.
        :param hostname: (optional) A hostname for this metric. Defaults to the current hostname.
        :param device_name: (optional) The device name for this metric
        """
        self.aggregator.increment(metric,
                                  value,
                                  self._set_dimensions(dimensions),
                                  delegated_tenant,
                                  hostname,
                                  device_name)

    def decrement(self, metric, value=-1, dimensions=None, delegated_tenant=None, hostname=None, device_name=None):
        """Decrement a counter with optional dimensions, hostname and device name.

        :param metric: The name of the metric
        :param value: The value to decrement by
        :param dimensions: (optional) A dictionary of dimensions for this metric
        :param delegated_tenant: (optional) Submit metrics on behalf of this tenant ID.
        :param hostname: (optional) A hostname for this metric. Defaults to the current hostname.
        :param device_name: (optional) The device name for this metric
        """
        self.aggregator.decrement(metric,
                                  value,
                                  self._set_dimensions(dimensions),
                                  delegated_tenant,
                                  hostname,
                                  device_name)

    def rate(self, metric, value, dimensions=None, delegated_tenant=None, hostname=None, device_name=None):
        """Submit a point for a metric that will be calculated as a rate on flush.

        Values will persist across each call to `check` if there is not enough
        point to generate a rate on the flush.

        :param metric: The name of the metric
        :param value: The value of the rate
        :param dimensions: (optional) A dictionary of dimensions for this metric
        :param delegated_tenant: (optional) Submit metrics on behalf of this tenant ID.
        :param hostname: (optional) A hostname for this metric. Defaults to the current hostname.
        :param device_name: (optional) The device name for this metric
        """
        self.aggregator.rate(metric,
                             value,
                             self._set_dimensions(dimensions),
                             delegated_tenant,
                             hostname,
                             device_name)

    def histogram(self, metric, value, dimensions=None, delegated_tenant=None, hostname=None, device_name=None):
        """Sample a histogram value, with optional dimensions, hostname and device name.

        :param metric: The name of the metric
        :param value: The value to sample for the histogram
        :param dimensions: (optional) A dictionary of dimensions for this metric
        :param delegated_tenant: (optional) Submit metrics on behalf of this tenant ID.
        :param hostname: (optional) A hostname for this metric. Defaults to the current hostname.
        :param device_name: (optional) The device name for this metric
        """
        self.aggregator.histogram(metric,
                                  value,
                                  self._set_dimensions(dimensions),
                                  delegated_tenant,
                                  hostname,
                                  device_name)

    def set(self, metric, value, dimensions=None, delegated_tenant=None, hostname=None, device_name=None):
        """Sample a set value, with optional dimensions, hostname and device name.

        :param metric: The name of the metric
        :param value: The value for the set
        :param dimensions: (optional) A dictionary of dimensions for this metric
        :param delegated_tenant: (optional) Submit metrics on behalf of this tenant ID.
        :param hostname: (optional) A hostname for this metric. Defaults to the current hostname.
        :param device_name: (optional) The device name for this metric
        """
        self.aggregator.set(metric,
                            value,
                            self._set_dimensions(dimensions),
                            delegated_tenant,
                            hostname,
                            device_name)

    def _set_dimensions(self, dimensions):
        new_dimensions = {'component': 'monasca-agent', 'service': 'monitoring'}
        if dimensions is not None:
            new_dimensions.update(dimensions.copy())
        return new_dimensions

    def event(self, event):
        """Save an event.

        :param event: The event payload as a dictionary. Has the following
        structure:

            {
                "timestamp": int, the epoch timestamp for the event,
                "event_type": string, the event time name,
                "api_key": string, the api key of the account to associate the event with,
                "msg_title": string, the title of the event,
                "msg_text": string, the text body of the event,
                "alert_type": (optional) string, one of ('error', 'warning', 'success', 'info').
                    Defaults to 'info'.
                "source_type_name": (optional) string, the source type name,
                "host": (optional) string, the name of the host,
                "dimensions": (optional) a dictionary of dimensions to associate with this event
            }
        """
        if event.get('api_key') is None:
            event['api_key'] = self.agent_config['api_key']
        self.events.append(event)

    def has_events(self):
        """Check whether the check has saved any events

        @return whether or not the check has saved any events
        @rtype boolean
        """
        return len(self.events) > 0

    def get_metrics(self, prettyprint=False):
        """Get all metrics, including the ones that are tagged.

        @return the list of samples
        @rtype list of Measurement objects from monagent.common.metrics
        """
        if prettyprint:
            metrics = self.aggregator.flush()
            for metric in metrics:
                print(" Timestamp:  {}".format(metric.timestamp))
                print(" Name:       {}".format(metric.name))
                print(" Value:      {}".format(metric.value))
                if (metric.delegated_tenant):
                    print(" Delegtd ID: {}".format(metric.delegated_tenant))

                print(" Dimensions: ", end='')
                line = 0
                for name in metric.dimensions:
                    if line != 0:
                        print(" " * 13, end='')
                    print("{0}={1}".format(name, metric.dimensions[name]))
                    line += 1
                print("-" * 24)
        return self.aggregator.flush()

    def get_events(self):
        """Return a list of the events saved by the check, if any

        @return the list of events saved by this check
        @rtype list of event dictionaries
        """
        events = self.events
        self.events = []
        return events

    def has_warnings(self):
        """Check whether the instance run created any warnings.
        """
        return len(self.warnings) > 0

    def warning(self, warning_message):
        """Add a warning message that will be printed in the info page

        :param warning_message: String. Warning message to be displayed
        """
        self.warnings.append(warning_message)

    def get_library_info(self):
        if self.library_versions is not None:
            return self.library_versions
        try:
            self.library_versions = self.get_library_versions()
        except NotImplementedError:
            pass

    def get_library_versions(self):
        """Should return a string that shows which version

        of the needed libraries are used
        """
        raise NotImplementedError

    def get_warnings(self):
        """Return the list of warnings messages to be displayed in the info page.
        """
        warnings = self.warnings
        self.warnings = []
        return warnings

    def run(self):
        """Run all instances.
        """
        instance_statuses = []
        for i, instance in enumerate(self.instances):
            try:
                self.check(instance)
                if self.has_warnings():
                    instance_status = monagent.common.check_status.InstanceStatus(i,
                                                                                  monagent.common.check_status.STATUS_WARNING,
                                                                                  warnings=self.get_warnings())
                else:
                    instance_status = monagent.common.check_status.InstanceStatus(i,
                                                                                  monagent.common.check_status.STATUS_OK)
            except Exception as e:
                self.log.exception("Check '%s' instance #%s failed" % (self.name, i))
                instance_status = monagent.common.check_status.InstanceStatus(i,
                                                                              monagent.common.check_status.STATUS_ERROR,
                                                                              error=e,
                                                                              tb=traceback.format_exc())
            instance_statuses.append(instance_status)
        return instance_statuses

    def check(self, instance):
        """Overriden by the check class. This will be called to run the check.

        :param instance: A dict with the instance information. This will vary
        depending on your config structure.
        """
        raise NotImplementedError()

    @staticmethod
    def stop():
        """To be executed when the agent is being stopped to clean ressources.
        """
        pass

    @classmethod
    def from_yaml(cls, path_to_yaml=None, agentConfig=None, yaml_text=None, check_name=None):
        """A method used for testing your check without running the agent.
        """
        if hasattr(yaml, 'CLoader'):
            Loader = yaml.CLoader
        else:
            Loader = yaml.Loader

        if path_to_yaml:
            check_name = os.path.basename(path_to_yaml).split('.')[0]
            try:
                f = open(path_to_yaml)
            except IOError:
                raise Exception('Unable to open yaml config: %s' % path_to_yaml)
            yaml_text = f.read()
            f.close()

        config = yaml.load(yaml_text, Loader=Loader)
        check = cls(check_name, config.get('init_config') or {}, agentConfig or {})

        return check, config.get('instances', [])

    @staticmethod
    def normalize(metric, prefix=None):
        """Turn a metric into a well-formed metric name prefix.b.c

        :param metric The metric name to normalize
        :param prefix A prefix to to add to the normalized name, default None
        """
        name = re.sub(r"[,\+\*\-/()\[\]{}]", "_", metric)
        # Eliminate multiple _
        name = re.sub(r"__+", "_", name)
        # Don't start/end with _
        name = re.sub(r"^_", "", name)
        name = re.sub(r"_$", "", name)
        # Drop ._ and _.
        name = re.sub(r"\._", ".", name)
        name = re.sub(r"_\.", ".", name)

        if prefix is not None:
            return prefix + "." + name
        else:
            return name

    @staticmethod
    def read_config(instance, key, message=None, cast=None, optional=False):
        val = instance.get(key)
        if val is None:
            if optional is False:
                message = message or 'Must provide `%s` value in instance config' % key
                raise Exception(message)
            else:
                return val

        if cast is None:
            return val
        else:
            return cast(val)
예제 #40
0
    def test_gauge(self):
        stats = MetricsAggregator('myhost')

        # Track some counters.
        stats.submit_packets('my.first.gauge:1|g')
        stats.submit_packets('my.first.gauge:5|g')
        stats.submit_packets('my.second.gauge:1.5|g')

        # Ensure that gauges roll up correctly.
        metrics = self.sort_metrics(stats.flush())
        assert len(metrics) == 2

        first, second = metrics

        nt.assert_equals(first['metric'], 'my.first.gauge')
        nt.assert_equals(first['points'][0][1], 5)
        nt.assert_equals(first['host'], 'myhost')

        nt.assert_equals(second['metric'], 'my.second.gauge')
        nt.assert_equals(second['points'][0][1], 1.5)

        # Ensure that old gauges get dropped due to old timestamps
        stats.gauge('my.first.gauge', 5)
        stats.gauge('my.first.gauge', 1, timestamp=1000000000)
        stats.gauge('my.second.gauge', 20, timestamp=1000000000)

        metrics = self.sort_metrics(stats.flush())
        assert len(metrics) == 1

        first = metrics[0]

        nt.assert_equals(first['metric'], 'my.first.gauge')
        nt.assert_equals(first['points'][0][1], 5)
        nt.assert_equals(first['host'], 'myhost')
예제 #41
0
    def test_event_tags(self):
        stats = MetricsAggregator('myhost')
        stats.submit_packets('_e{6,4}:title1|text')
        stats.submit_packets('_e{6,4}:title2|text|#t1')
        stats.submit_packets('_e{6,4}:title3|text|#t1,t2:v2,t3,t4')
        stats.submit_packets('_e{6,4}:title4|text|k:key|p:normal|#t1,t2')

        events = self.sort_events(stats.flush_events())

        assert len(events) == 4
        first, second, third, fourth = events

        try:
            first['dimensions']
        except Exception:
            assert True
        else:
            assert False, "event['tags'] shouldn't be defined when no tags aren't explicited in the packet"
        nt.assert_equal(first['title'], 'title1')
        nt.assert_equal(first['text'], 'text')

        nt.assert_equal(second['title'], 'title2')
        nt.assert_equal(second['text'], 'text')
        nt.assert_equal(second['dimensions'], sorted(['t1']))

        nt.assert_equal(third['title'], 'title3')
        nt.assert_equal(third['text'], 'text')
        nt.assert_equal(third['dimensions'],
                        sorted(['t1', 't2:v2', 't3', 't4']))

        nt.assert_equal(fourth['title'], 'title4')
        nt.assert_equal(fourth['text'], 'text')
        nt.assert_equal(fourth['aggregation_key'], 'key')
        nt.assert_equal(fourth['priority'], 'normal')
        nt.assert_equal(fourth['dimensions'], sorted(['t1', 't2']))
예제 #42
0
 def setUp(self):
     self.aggr = MetricsAggregator('test-aggr')
예제 #43
0
    def test_histogram_counter(self):
        # Test whether histogram.count == increment
        # same deal with a sample rate
        cnt = 100000
        for run in [1, 2]:
            stats = MetricsAggregator('myhost')
            for i in xrange(cnt):
                if run == 2:
                    stats.submit_packets('test.counter:1|c|@0.5')
                    stats.submit_packets('test.hist:1|ms|@0.5')
                else:
                    stats.submit_packets('test.counter:1|c')
                    stats.submit_packets('test.hist:1|ms')
            metrics = self.sort_metrics(stats.flush())
            assert len(metrics) > 0

            nt.assert_equal([
                m['points'][0][1]
                for m in metrics if m['metric'] == 'test.counter'
            ], [cnt * run])
            nt.assert_equal([
                m['points'][0][1]
                for m in metrics if m['metric'] == 'test.hist.count'
            ], [cnt * run])
예제 #44
0
 def setUp(self):
     self.aggr = MetricsAggregator('test-aggr')
예제 #45
0
    def test_gauge(self):
        stats = MetricsAggregator('myhost')

        # Track some counters.
        stats.submit_packets('my.first.gauge:1|g')
        stats.submit_packets('my.first.gauge:5|g')
        stats.submit_packets('my.second.gauge:1.5|g')

        # Ensure that gauges roll up correctly.
        metrics = self.sort_metrics(stats.flush())
        assert len(metrics) == 2

        first, second = metrics

        nt.assert_equals(first['metric'], 'my.first.gauge')
        nt.assert_equals(first['points'][0][1], 5)
        nt.assert_equals(first['host'], 'myhost')

        nt.assert_equals(second['metric'], 'my.second.gauge')
        nt.assert_equals(second['points'][0][1], 1.5)

        # Ensure that old gauges get dropped due to old timestamps
        stats.gauge('my.first.gauge', 5)
        stats.gauge('my.first.gauge', 1, timestamp=1000000000)
        stats.gauge('my.second.gauge', 20, timestamp=1000000000)

        metrics = self.sort_metrics(stats.flush())
        assert len(metrics) == 1

        first = metrics[0]

        nt.assert_equals(first['metric'], 'my.first.gauge')
        nt.assert_equals(first['points'][0][1], 5)
        nt.assert_equals(first['host'], 'myhost')
예제 #46
0
    def test_event_text(self):
        stats = MetricsAggregator('myhost')
        stats.submit_packets('_e{2,0}:t1|')
        stats.submit_packets('_e{2,12}:t2|text|content')
        stats.submit_packets(
            '_e{2,23}:t3|First line\\nSecond line')  # \n is a newline
        stats.submit_packets(
            u'_e{2,19}:t4|♬ †øU †øU ¥ºu T0µ ♪')  # utf-8 compliant

        events = self.sort_events(stats.flush_events())

        assert len(events) == 4
        first, second, third, fourth = events

        nt.assert_equal(first['text'], '')
        nt.assert_equal(second['text'], 'text|content')
        nt.assert_equal(third['text'], 'First line\nSecond line')
        nt.assert_equal(fourth['text'], u'♬ †øU †øU ¥ºu T0µ ♪')
예제 #47
0
    def test_checksd_aggregation_perf(self):
        ma = MetricsAggregator('my.host')

        for _ in xrange(self.FLUSH_COUNT):
            for i in xrange(self.LOOPS_PER_FLUSH):
                # Counters
                for j in xrange(self.METRIC_COUNT):
                    ma.increment('counter.%s' % j, i)
                    ma.gauge('gauge.%s' % j, i)
                    ma.histogram('histogram.%s' % j, i)
                    ma.set('set.%s' % j, float(i))
            ma.flush()
예제 #48
0
    def test_tags(self):
        stats = MetricsAggregator('myhost')
        stats.submit_packets('gauge:1|c')
        stats.submit_packets('gauge:2|c|@1')
        stats.submit_packets('gauge:4|c|#tag1,tag2')
        stats.submit_packets(
            'gauge:8|c|#tag2,tag1')  # Should be the same as above
        stats.submit_packets('gauge:16|c|#tag3,tag4')

        metrics = self.sort_metrics(stats.flush())

        assert len(metrics) == 3
        first, second, third = metrics

        nt.assert_equal(first['metric'], 'gauge')
        nt.assert_equal(first['dimensions'], None)
        nt.assert_equal(first['points'][0][1], 3)
        nt.assert_equal(first['host'], 'myhost')

        nt.assert_equal(second['metric'], 'gauge')
        nt.assert_equal(second['dimensions'], ('tag1', 'tag2'))
        nt.assert_equal(second['points'][0][1], 12)
        nt.assert_equal(second['host'], 'myhost')

        nt.assert_equal(third['metric'], 'gauge')
        nt.assert_equal(third['dimensions'], ('tag3', 'tag4'))
        nt.assert_equal(third['points'][0][1], 16)
        nt.assert_equal(third['host'], 'myhost')
예제 #49
0
    def test_event_title(self):
        stats = MetricsAggregator('myhost')
        stats.submit_packets('_e{0,4}:|text')
        stats.submit_packets(u'_e{9,4}:2intitulé|text')
        stats.submit_packets('_e{14,4}:3title content|text')
        stats.submit_packets('_e{14,4}:4title|content|text')
        stats.submit_packets(
            '_e{13,4}:5title\\ntitle|text')  # \n stays escaped

        events = self.sort_events(stats.flush_events())

        assert len(events) == 5
        first, second, third, fourth, fifth = events

        nt.assert_equal(first['title'], '')
        nt.assert_equal(second['title'], u'2intitulé')
        nt.assert_equal(third['title'], '3title content')
        nt.assert_equal(fourth['title'], '4title|content')
        nt.assert_equal(fifth['title'], '5title\\ntitle')