Пример #1
0
    def test_tags(self):
        dog = ThreadStats()
        dog.start(roll_up_interval=10, flush_in_thread=False)
        reporter = dog.reporter = MemoryReporter()

        # Post the same metric with different tags.
        dog.gauge('gauge', 10, timestamp=100.0)
        dog.gauge('gauge', 15, timestamp=100.0, tags=['env:production', 'db'])
        dog.gauge('gauge', 20, timestamp=100.0, tags=['env:staging'])

        dog.increment('counter', timestamp=100.0)
        dog.increment('counter', timestamp=100.0, tags=['env:production', 'db'])
        dog.increment('counter', timestamp=100.0, tags=['env:staging'])

        dog.flush(200.0)

        metrics = self.sort_metrics(reporter.metrics)
        nt.assert_equal(len(metrics), 6)

        [c1, c2, c3, g1, g2, g3] = metrics
        (nt.assert_equal(c['metric'], 'counter') for c in [c1, c2, c3])
        nt.assert_equal(c1['tags'], None)
        nt.assert_equal(c1['points'][0][1], 1)
        nt.assert_equal(c2['tags'], ['env:production', 'db'])
        nt.assert_equal(c2['points'][0][1], 1)
        nt.assert_equal(c3['tags'], ['env:staging'])
        nt.assert_equal(c3['points'][0][1], 1)

        (nt.assert_equal(c['metric'], 'gauge') for c in [g1, g2, g3])
        nt.assert_equal(g1['tags'], None)
        nt.assert_equal(g1['points'][0][1], 10)
        nt.assert_equal(g2['tags'], ['env:production', 'db'])
        nt.assert_equal(g2['points'][0][1], 15)
        nt.assert_equal(g3['tags'], ['env:staging'])
        nt.assert_equal(g3['points'][0][1], 20)
Пример #2
0
    def test_histogram_percentiles(self):
        dog = ThreadStats()
        dog.start(roll_up_interval=10, flush_in_thread=False)
        reporter = dog.reporter = MemoryReporter()
        # Sample all numbers between 1-100 many times. This
        # means our percentiles should be relatively close to themselves.
        percentiles = list(range(100))
        random.shuffle(percentiles)  # in place
        for i in percentiles:
            for j in range(20):
                dog.histogram("percentiles", i, 1000.0)
        dog.flush(2000.0)
        metrics = reporter.metrics

        def assert_almost_equal(i, j, e=1):
            # Floating point math?
            assert abs(i - j) <= e, "%s %s %s" % (i, j, e)

        nt.assert_equal(len(metrics), 8)
        p75, p85, p95, p99, _, _, _, _ = self.sort_metrics(metrics)
        nt.assert_equal(p75["metric"], "percentiles.75percentile")
        nt.assert_equal(p75["points"][0][0], 1000.0)
        assert_almost_equal(p75["points"][0][1], 75, 8)
        assert_almost_equal(p85["points"][0][1], 85, 8)
        assert_almost_equal(p95["points"][0][1], 95, 8)
        assert_almost_equal(p99["points"][0][1], 99, 8)
Пример #3
0
    def test_timed_decorator(self):
        dog = ThreadStats()
        dog.start(roll_up_interval=1, flush_in_thread=False)
        reporter = dog.reporter = MemoryReporter()

        @dog.timed('timed.test')
        def func(a, b, c=1, d=1):
            """docstring"""
            return (a, b, c, d)

        nt.assert_equal(func.__name__, 'func')
        nt.assert_equal(func.__doc__, 'docstring')

        result = func(1, 2, d=3)
        # Assert it handles args and kwargs correctly.
        nt.assert_equal(result, (1, 2, 1, 3))
        time.sleep(1)  # Argh. I hate this.
        dog.flush()
        metrics = self.sort_metrics(reporter.metrics)
        nt.assert_equal(len(metrics), 8)
        (_, _, _, _, avg, count, max_, min_) = metrics
        nt.assert_equal(avg['metric'], 'timed.test.avg')
        nt.assert_equal(count['metric'], 'timed.test.count')
        nt.assert_equal(max_['metric'], 'timed.test.max')
        nt.assert_equal(min_['metric'], 'timed.test.min')
Пример #4
0
    def test_timed_decorator(self):
        dog = ThreadStats()
        dog.start(roll_up_interval=1, flush_in_thread=False)
        reporter = dog.reporter = MemoryReporter()

        @dog.timed("timed.test")
        def func(a, b, c=1, d=1):
            """docstring"""
            return (a, b, c, d)

        nt.assert_equal(func.__name__, "func")
        nt.assert_equal(func.__doc__, "docstring")

        result = func(1, 2, d=3)
        # Assert it handles args and kwargs correctly.
        nt.assert_equal(result, (1, 2, 1, 3))
        time.sleep(1)  # Argh. I hate this.
        dog.flush()
        metrics = self.sort_metrics(reporter.metrics)
        nt.assert_equal(len(metrics), 8)
        (_, _, _, _, avg, count, max_, min_) = metrics
        nt.assert_equal(avg["metric"], "timed.test.avg")
        nt.assert_equal(count["metric"], "timed.test.count")
        nt.assert_equal(max_["metric"], "timed.test.max")
        nt.assert_equal(min_["metric"], "timed.test.min")
Пример #5
0
    def test_timed_decorator(self):
        dog = ThreadStats()
        dog.start(roll_up_interval=1, flush_in_thread=False)
        reporter = dog.reporter = MemoryReporter()

        @dog.timed('timed.test')
        def func(a, b, c=1, d=1):
            """docstring"""
            return (a, b, c, d)

        assert func.__name__ == 'func'
        assert func.__doc__ == 'docstring'

        result = func(1, 2, d=3)
        # Assert it handles args and kwargs correctly.
        assert result == (1, 2, 1, 3)
        time.sleep(1)  # Argh. I hate this.
        dog.flush()
        metrics = self.sort_metrics(reporter.metrics)
        assert len(metrics) == 8
        (_, _, _, _, avg, count, max_, min_) = metrics
        assert avg['metric'] == 'timed.test.avg'
        assert count['metric'] == 'timed.test.count'
        assert max_['metric'] == 'timed.test.max'
        assert min_['metric'] == 'timed.test.min'
Пример #6
0
    def test_tags(self):
        dog = ThreadStats()
        dog.start(roll_up_interval=10, flush_in_thread=False)
        reporter = dog.reporter = MemoryReporter()

        # Post the same metric with different tags.
        dog.gauge('gauge', 10, timestamp=100.0)
        dog.gauge('gauge', 15, timestamp=100.0, tags=['env:production', 'db'])
        dog.gauge('gauge', 20, timestamp=100.0, tags=['env:staging'])

        dog.increment('counter', timestamp=100.0)
        dog.increment('counter', timestamp=100.0, tags=['env:production', 'db'])
        dog.increment('counter', timestamp=100.0, tags=['env:staging'])

        dog.flush(200.0)

        metrics = self.sort_metrics(reporter.metrics)
        assert_equal(len(metrics), 6)

        [c1, c2, c3, g1, g2, g3] = metrics
        (assert_equal(c['metric'], 'counter') for c in [c1, c2, c3])
        assert_equal(c1['tags'], None)
        assert_equal(c1['points'][0][1], 0.1)
        assert_equal(c2['tags'], ['env:production', 'db'])
        assert_equal(c2['points'][0][1], 0.1)
        assert_equal(c3['tags'], ['env:staging'])
        assert_equal(c3['points'][0][1], 0.1)

        (assert_equal(c['metric'], 'gauge') for c in [g1, g2, g3])
        assert_equal(g1['tags'], None)
        assert_equal(g1['points'][0][1], 10)
        assert_equal(g2['tags'], ['env:production', 'db'])
        assert_equal(g2['points'][0][1], 15)
        assert_equal(g3['tags'], ['env:staging'])
        assert_equal(g3['points'][0][1], 20)
Пример #7
0
    def test_tags(self):
        dog = ThreadStats()
        dog.start(roll_up_interval=10, flush_in_thread=False)
        reporter = dog.reporter = MemoryReporter()

        # Post the same metric with different tags.
        dog.gauge("gauge", 10, timestamp=100.0)
        dog.gauge("gauge", 15, timestamp=100.0, tags=["env:production", "db"])
        dog.gauge("gauge", 20, timestamp=100.0, tags=["env:staging"])

        dog.increment("counter", timestamp=100.0)
        dog.increment("counter", timestamp=100.0, tags=["env:production", "db"])
        dog.increment("counter", timestamp=100.0, tags=["env:staging"])

        dog.flush(200.0)

        metrics = self.sort_metrics(reporter.metrics)
        nt.assert_equal(len(metrics), 6)

        [c1, c2, c3, g1, g2, g3] = metrics
        (nt.assert_equal(c["metric"], "counter") for c in [c1, c2, c3])
        nt.assert_equal(c1["tags"], None)
        nt.assert_equal(c1["points"][0][1], 1)
        nt.assert_equal(c2["tags"], ["env:production", "db"])
        nt.assert_equal(c2["points"][0][1], 1)
        nt.assert_equal(c3["tags"], ["env:staging"])
        nt.assert_equal(c3["points"][0][1], 1)

        (nt.assert_equal(c["metric"], "gauge") for c in [g1, g2, g3])
        nt.assert_equal(g1["tags"], None)
        nt.assert_equal(g1["points"][0][1], 10)
        nt.assert_equal(g2["tags"], ["env:production", "db"])
        nt.assert_equal(g2["points"][0][1], 15)
        nt.assert_equal(g3["tags"], ["env:staging"])
        nt.assert_equal(g3["points"][0][1], 20)
Пример #8
0
    def test_histogram_percentiles(self):
        dog = ThreadStats()
        dog.start(roll_up_interval=10, flush_in_thread=False)
        reporter = dog.reporter = MemoryReporter()
        # Sample all numbers between 1-100 many times. This
        # means our percentiles should be relatively close to themselves.
        percentiles = list(range(100))
        random.shuffle(percentiles)  # in place
        for i in percentiles:
            for j in range(20):
                dog.histogram('percentiles', i, 1000.0)
        dog.flush(2000.0)
        metrics = reporter.metrics

        def assert_almost_equal(i, j, e=1):
            # Floating point math?
            assert abs(i - j) <= e, "%s %s %s" % (i, j, e)

        nt.assert_equal(len(metrics), 8)
        p75, p85, p95, p99, _, _, _, _ = self.sort_metrics(metrics)
        nt.assert_equal(p75['metric'], 'percentiles.75percentile')
        nt.assert_equal(p75['points'][0][0], 1000.0)
        assert_almost_equal(p75['points'][0][1], 75, 8)
        assert_almost_equal(p85['points'][0][1], 85, 8)
        assert_almost_equal(p95['points'][0][1], 95, 8)
        assert_almost_equal(p99['points'][0][1], 99, 8)
Пример #9
0
    def test_metric_type(self):
        """
        Checks the submitted metric's metric type.
        """
        # Set up ThreadStats with a namespace
        dog = ThreadStats(namespace="foo")
        dog.start(roll_up_interval=1, flush_in_thread=False)
        reporter = dog.reporter = self.reporter

        # Send a few metrics
        dog.gauge("gauge", 20, timestamp=100.0)
        dog.increment("counter", timestamp=100.0)
        dog.histogram('histogram.1', 20, 100.0)
        dog.flush(200.0)

        (first, second, p75, p85, p95, p99, avg, cnt, max_,
         min_) = self.sort_metrics(reporter.metrics)

        # Assert Metric type
        nt.assert_equal(first['type'], 'rate')
        nt.assert_equal(second['type'], 'gauge')
        nt.assert_equal(p75['type'], 'gauge')
        nt.assert_equal(p85['type'], 'gauge')
        nt.assert_equal(p95['type'], 'gauge')
        nt.assert_equal(p99['type'], 'gauge')
        nt.assert_equal(avg['type'], 'gauge')
        nt.assert_equal(cnt['type'], 'rate')
        nt.assert_equal(max_['type'], 'gauge')
        nt.assert_equal(min_['type'], 'gauge')
Пример #10
0
 def test_disabled_mode(self):
     dog = ThreadStats()
     dog.start(disabled=True, flush_interval=1, roll_up_interval=1)
     reporter = dog.reporter = MemoryReporter()
     dog.gauge('testing', 1, timestamp=1000)
     dog.gauge('testing', 2, timestamp=1000)
     dog.flush(2000.0)
     assert not reporter.metrics
Пример #11
0
 def test_disabled_mode(self):
     dog = ThreadStats()
     reporter = dog.reporter = MemoryReporter()
     dog.start(disabled=True, flush_interval=1, roll_up_interval=1)
     dog.gauge("testing", 1, timestamp=1000)
     dog.gauge("testing", 2, timestamp=1000)
     dog.flush(2000.0)
     assert not reporter.metrics
Пример #12
0
 def test_default_host_and_device(self):
     dog = ThreadStats()
     dog.start(roll_up_interval=1, flush_in_thread=False)
     reporter = dog.reporter = MemoryReporter()
     dog.gauge("my.gauge", 1, 100.0)
     dog.flush(1000)
     metric = reporter.metrics[0]
     assert not metric["device"]
     assert not metric["host"]
Пример #13
0
 def test_custom_host_and_device(self):
     dog = ThreadStats()
     dog.start(roll_up_interval=1, flush_in_thread=False, device="dev")
     reporter = dog.reporter = MemoryReporter()
     dog.gauge("my.gauge", 1, 100.0, host="host")
     dog.flush(1000)
     metric = reporter.metrics[0]
     nt.assert_equal(metric["device"], "dev")
     nt.assert_equal(metric["host"], "host")
Пример #14
0
    def test_host(self):
        dog = ThreadStats()
        dog.start(roll_up_interval=10, flush_in_thread=False)
        reporter = dog.reporter = MemoryReporter()

        # Post the same metric with different tags.
        dog.gauge('gauge', 12, timestamp=100.0, host='')  # unset the host
        dog.gauge('gauge', 10, timestamp=100.0)
        dog.gauge('gauge', 15, timestamp=100.0, host='test')
        dog.gauge('gauge', 15, timestamp=100.0, host='test')

        dog.increment('counter', timestamp=100.0)
        dog.increment('counter', timestamp=100.0)
        dog.increment('counter', timestamp=100.0, host='test')
        dog.increment('counter', timestamp=100.0, host='test', tags=['tag'])
        dog.increment('counter', timestamp=100.0, host='test', tags=['tag'])

        dog.flush(200.0)

        metrics = self.sort_metrics(reporter.metrics)
        assert len(metrics) == 6

        [c1, c2, c3, g1, g2, g3] = metrics
        assert c1['metric'] == 'counter'
        assert c2['metric'] == 'counter'
        assert c3['metric'] == 'counter'
        assert c1['host'] is None
        assert c1['tags'] is None
        assert c1['points'][0][1] == 0.2
        assert c2['host'] == 'test'
        assert c2['tags'] is None
        assert c2['points'][0][1] == 0.1
        assert c3['host'] == 'test'
        assert c3['tags'] == ['tag']
        assert c3['points'][0][1] == 0.2

        assert g1['metric'] == 'gauge'
        assert g2['metric'] == 'gauge'
        assert g3['metric'] == 'gauge'
        assert g1['host'] is None
        assert g1['points'][0][1] == 10
        assert g2['host'] == ''
        assert g2['points'][0][1] == 12
        assert g3['host'] == 'test'
        assert g3['points'][0][1] == 15

        # Ensure histograms work as well.
        @dog.timed('timed', host='test')
        def test():
            pass

        test()
        dog.histogram('timed', 20, timestamp=300.0, host='test')
        reporter.metrics = []
        dog.flush(400)
        for metric in reporter.metrics:
            assert metric['host'] == 'test'
Пример #15
0
 def test_custom_host_and_device(self):
     dog = ThreadStats()
     dog.start(roll_up_interval=1, flush_in_thread=False, device='dev')
     reporter = dog.reporter = MemoryReporter()
     dog.gauge('my.gauge', 1, 100.0, host='host')
     dog.flush(1000)
     metric = reporter.metrics[0]
     nt.assert_equal(metric['device'], 'dev')
     nt.assert_equal(metric['host'], 'host')
Пример #16
0
 def test_default_host_and_device(self):
     dog = ThreadStats()
     dog.start(roll_up_interval=1, flush_in_thread=False)
     reporter = dog.reporter = MemoryReporter()
     dog.gauge('my.gauge', 1, 100.0)
     dog.flush(1000)
     metric = reporter.metrics[0]
     assert not metric['device']
     assert not metric['host']
Пример #17
0
 def test_custom_host_and_device(self):
     dog = ThreadStats()
     dog.start(roll_up_interval=1, flush_in_thread=False, device='dev')
     reporter = dog.reporter = MemoryReporter()
     dog.gauge('my.gauge', 1, 100.0, host='host')
     dog.flush(1000)
     metric = reporter.metrics[0]
     nt.assert_equal(metric['device'], 'dev')
     nt.assert_equal(metric['host'], 'host')
Пример #18
0
    def test_constant_tags(self):
        """
        Constant tags are attached to all metrics.
        """
        dog = ThreadStats(constant_tags=["type:constant"])
        dog.start(roll_up_interval=1, flush_in_thread=False)
        dog.reporter = self.reporter

        # Post the same metric with different tags.
        dog.gauge("gauge", 10, timestamp=100.0)
        dog.gauge("gauge", 15, timestamp=100.0, tags=["env:production", 'db'])
        dog.gauge("gauge", 20, timestamp=100.0, tags=["env:staging"])

        dog.increment("counter", timestamp=100.0)
        dog.increment("counter", timestamp=100.0, tags=["env:production", 'db'])
        dog.increment("counter", timestamp=100.0, tags=["env:staging"])

        dog.flush(200.0)

        # Assertions on all metrics
        self.assertMetric(count=6)

        # Assertions on gauges
        self.assertMetric(name='gauge', value=10, tags=["type:constant"], count=1)
        self.assertMetric(name="gauge", value=15,
                          tags=["env:production", "db", "type:constant"], count=1)  # noqa
        self.assertMetric(name="gauge", value=20, tags=["env:staging", "type:constant"], count=1)

        # Assertions on counters
        self.assertMetric(name="counter", value=1, tags=["type:constant"], count=1)
        self.assertMetric(name="counter", value=1,
                          tags=["env:production", "db", "type:constant"], count=1)  # noqa
        self.assertMetric(name="counter", value=1, tags=["env:staging", "type:constant"], count=1)

        # Ensure histograms work as well.
        @dog.timed('timed', tags=['version:1'])
        def do_nothing():
            """
            A function that does nothing, but being timed.
            """
            pass

        with patch("datadog.threadstats.base.time", return_value=300):
            do_nothing()

        dog.histogram('timed', 20, timestamp=300.0, tags=['db', 'version:2'])

        self.reporter.metrics = []
        dog.flush(400.0)

        # Histograms, and related metric types, produce 8 different metrics
        self.assertMetric(tags=["version:1", "type:constant"], count=8)
        self.assertMetric(tags=["db", "version:2", "type:constant"], count=8)
Пример #19
0
    def test_host(self):
        dog = ThreadStats()
        dog.start(roll_up_interval=10, flush_in_thread=False)
        reporter = dog.reporter = MemoryReporter()

        # Post the same metric with different tags.
        dog.gauge('gauge', 12, timestamp=100.0, host='')  # unset the host
        dog.gauge('gauge', 10, timestamp=100.0)
        dog.gauge('gauge', 15, timestamp=100.0, host='test')
        dog.gauge('gauge', 15, timestamp=100.0, host='test')

        dog.increment('counter', timestamp=100.0)
        dog.increment('counter', timestamp=100.0)
        dog.increment('counter', timestamp=100.0, host='test')
        dog.increment('counter', timestamp=100.0, host='test', tags=['tag'])
        dog.increment('counter', timestamp=100.0, host='test', tags=['tag'])

        dog.flush(200.0)

        metrics = self.sort_metrics(reporter.metrics)
        nt.assert_equal(len(metrics), 6)

        [c1, c2, c3, g1, g2, g3] = metrics
        (nt.assert_equal(c['metric'], 'counter') for c in [c1, c2, c3])
        nt.assert_equal(c1['host'], None)
        nt.assert_equal(c1['tags'], None)
        nt.assert_equal(c1['points'][0][1], 0.2)
        nt.assert_equal(c2['host'], 'test')
        nt.assert_equal(c2['tags'], None)
        nt.assert_equal(c2['points'][0][1], 0.1)
        nt.assert_equal(c3['host'], 'test')
        nt.assert_equal(c3['tags'], ['tag'])
        nt.assert_equal(c3['points'][0][1], 0.2)

        (nt.assert_equal(g['metric'], 'gauge') for g in [g1, g2, g3])
        nt.assert_equal(g1['host'], None)
        nt.assert_equal(g1['points'][0][1], 10)
        nt.assert_equal(g2['host'], '')
        nt.assert_equal(g2['points'][0][1], 12)
        nt.assert_equal(g3['host'], 'test')
        nt.assert_equal(g3['points'][0][1], 15)

        # Ensure histograms work as well.
        @dog.timed('timed', host='test')
        def test():
            pass

        test()
        dog.histogram('timed', 20, timestamp=300.0, host='test')
        reporter.metrics = []
        dog.flush(400)
        for metric in reporter.metrics:
            assert metric['host'] == 'test'
Пример #20
0
    def test_host(self):
        dog = ThreadStats()
        dog.start(roll_up_interval=10, flush_in_thread=False)
        reporter = dog.reporter = MemoryReporter()

        # Post the same metric with different tags.
        dog.gauge("gauge", 12, timestamp=100.0, host="")  # unset the host
        dog.gauge("gauge", 10, timestamp=100.0)
        dog.gauge("gauge", 15, timestamp=100.0, host="test")
        dog.gauge("gauge", 15, timestamp=100.0, host="test")

        dog.increment("counter", timestamp=100.0)
        dog.increment("counter", timestamp=100.0)
        dog.increment("counter", timestamp=100.0, host="test")
        dog.increment("counter", timestamp=100.0, host="test", tags=["tag"])
        dog.increment("counter", timestamp=100.0, host="test", tags=["tag"])

        dog.flush(200.0)

        metrics = self.sort_metrics(reporter.metrics)
        nt.assert_equal(len(metrics), 6)

        [c1, c2, c3, g1, g2, g3] = metrics
        (nt.assert_equal(c["metric"], "counter") for c in [c1, c2, c3])
        nt.assert_equal(c1["host"], None)
        nt.assert_equal(c1["tags"], None)
        nt.assert_equal(c1["points"][0][1], 2)
        nt.assert_equal(c2["host"], "test")
        nt.assert_equal(c2["tags"], None)
        nt.assert_equal(c2["points"][0][1], 1)
        nt.assert_equal(c3["host"], "test")
        nt.assert_equal(c3["tags"], ["tag"])
        nt.assert_equal(c3["points"][0][1], 2)

        (nt.assert_equal(g["metric"], "gauge") for g in [g1, g2, g3])
        nt.assert_equal(g1["host"], None)
        nt.assert_equal(g1["points"][0][1], 10)
        nt.assert_equal(g2["host"], "")
        nt.assert_equal(g2["points"][0][1], 12)
        nt.assert_equal(g3["host"], "test")
        nt.assert_equal(g3["points"][0][1], 15)

        # Ensure histograms work as well.
        @dog.timed("timed", host="test")
        def test():
            pass

        test()
        dog.histogram("timed", 20, timestamp=300.0, host="test")
        reporter.metrics = []
        dog.flush(400)
        for metric in reporter.metrics:
            assert metric["host"] == "test"
Пример #21
0
    def test_host(self):
        dog = ThreadStats()
        dog.start(roll_up_interval=10, flush_in_thread=False)
        reporter = dog.reporter = MemoryReporter()

        # Post the same metric with different tags.
        dog.gauge('gauge', 12, timestamp=100.0, host='')  # unset the host
        dog.gauge('gauge', 10, timestamp=100.0)
        dog.gauge('gauge', 15, timestamp=100.0, host='test')
        dog.gauge('gauge', 15, timestamp=100.0, host='test')

        dog.increment('counter', timestamp=100.0)
        dog.increment('counter', timestamp=100.0)
        dog.increment('counter', timestamp=100.0, host='test')
        dog.increment('counter', timestamp=100.0, host='test', tags=['tag'])
        dog.increment('counter', timestamp=100.0, host='test', tags=['tag'])

        dog.flush(200.0)

        metrics = self.sort_metrics(reporter.metrics)
        nt.assert_equal(len(metrics), 6)

        [c1, c2, c3, g1, g2, g3] = metrics
        (nt.assert_equal(c['metric'], 'counter') for c in [c1, c2, c3])
        nt.assert_equal(c1['host'], None)
        nt.assert_equal(c1['tags'], None)
        nt.assert_equal(c1['points'][0][1], 2)
        nt.assert_equal(c2['host'], 'test')
        nt.assert_equal(c2['tags'], None)
        nt.assert_equal(c2['points'][0][1], 1)
        nt.assert_equal(c3['host'], 'test')
        nt.assert_equal(c3['tags'], ['tag'])
        nt.assert_equal(c3['points'][0][1], 2)

        (nt.assert_equal(g['metric'], 'gauge') for g in [g1, g2, g3])
        nt.assert_equal(g1['host'], None)
        nt.assert_equal(g1['points'][0][1], 10)
        nt.assert_equal(g2['host'], '')
        nt.assert_equal(g2['points'][0][1], 12)
        nt.assert_equal(g3['host'], 'test')
        nt.assert_equal(g3['points'][0][1], 15)

        # Ensure histograms work as well.
        @dog.timed('timed', host='test')
        def test():
            pass
        test()
        dog.histogram('timed', 20, timestamp=300.0, host='test')
        reporter.metrics = []
        dog.flush(400)
        for metric in reporter.metrics:
            assert metric['host'] == 'test'
Пример #22
0
    def test_constant_tags(self):
        """
        Constant tags are attached to all metrics.
        """
        dog = ThreadStats(constant_tags=["type:constant"])
        dog.start(roll_up_interval=1, flush_in_thread=False)
        dog.reporter = self.reporter

        # Post the same metric with different tags.
        dog.gauge("gauge", 10, timestamp=100.0)
        dog.gauge("gauge", 15, timestamp=100.0, tags=["env:production", 'db'])
        dog.gauge("gauge", 20, timestamp=100.0, tags=["env:staging"])

        dog.increment("counter", timestamp=100.0)
        dog.increment("counter", timestamp=100.0, tags=["env:production", 'db'])
        dog.increment("counter", timestamp=100.0, tags=["env:staging"])

        dog.flush(200.0)

        # Assertions on all metrics
        self.assertMetric(count=6)

        # Assertions on gauges
        self.assertMetric(name='gauge', value=10, tags=["type:constant"], count=1)
        self.assertMetric(name="gauge", value=15, tags=["env:production", "db", "type:constant"], count=1)  # noqa
        self.assertMetric(name="gauge", value=20, tags=["env:staging", "type:constant"], count=1)

        # Assertions on counters
        self.assertMetric(name="counter", value=1, tags=["type:constant"], count=1)
        self.assertMetric(name="counter", value=1, tags=["env:production", "db", "type:constant"], count=1)  # noqa
        self.assertMetric(name="counter", value=1, tags=["env:staging", "type:constant"], count=1)

        # Ensure histograms work as well.
        @dog.timed('timed', tags=['version:1'])
        def do_nothing():
            """
            A function that does nothing, but being timed.
            """
            pass

        with patch("datadog.threadstats.base.time", return_value=300):
            do_nothing()

        dog.histogram('timed', 20, timestamp=300.0, tags=['db', 'version:2'])

        self.reporter.metrics = []
        dog.flush(400.0)

        # Histograms, and related metric types, produce 8 different metrics
        self.assertMetric(tags=["version:1", "type:constant"], count=8)
        self.assertMetric(tags=["db", "version:2", "type:constant"], count=8)
Пример #23
0
    def test_tags_from_environment_and_constant(self):
        test_tags = ['country:china', 'age:45', 'blue']
        constant_tags = ['country:canada', 'red']
        with preserve_environment_variable('DATADOG_TAGS'):
            os.environ['DATADOG_TAGS'] = ','.join(test_tags)
            dog = ThreadStats(constant_tags=constant_tags)
        dog.start(roll_up_interval=10, flush_in_thread=False)
        reporter = dog.reporter = MemoryReporter()

        # Add two events
        event1_title = "Event 1 title"
        event2_title = "Event 1 title"
        event1_text = "Event 1 text"
        event2_text = "Event 2 text"
        dog.event(event1_title, event1_text)
        dog.event(event2_title, event2_text)

        # Flush and test
        dog.flush()
        event1, event2 = reporter.events
        nt.assert_equal(event1['title'], event1_title)
        nt.assert_equal(event1['text'], event1_text)
        nt.assert_equal(event1['tags'], constant_tags + test_tags)
        nt.assert_equal(event2['title'], event2_title)
        nt.assert_equal(event2['text'], event2_text)
        nt.assert_equal(event2['text'], event2_text)
        nt.assert_equal(event2['tags'], constant_tags + test_tags)

        # Test more parameters
        reporter.events = []
        event1_priority = "low"
        event1_date_happened = 1375296969
        event1_tag = "Event 2 tag"
        dog.event(event1_title,
                  event1_text,
                  priority=event1_priority,
                  date_happened=event1_date_happened,
                  tags=[event1_tag])

        # Flush and test
        dog.flush()
        event, = reporter.events
        nt.assert_equal(event['title'], event1_title)
        nt.assert_equal(event['text'], event1_text)
        nt.assert_equal(event['priority'], event1_priority)
        nt.assert_equal(event['date_happened'], event1_date_happened)
        nt.assert_equal(event['tags'],
                        [event1_tag] + constant_tags + test_tags)
        dog.start(flush_interval=1, roll_up_interval=1)
Пример #24
0
    def test_constant_tags(self):
        dog = ThreadStats(constant_tags=['type:constant'])
        dog.start(roll_up_interval=10, flush_in_thread=False)
        reporter = dog.reporter = MemoryReporter()

        # Post the same metric with different tags.
        dog.gauge('gauge', 10, timestamp=100.0)
        dog.gauge('gauge', 15, timestamp=100.0, tags=['env:production', 'db'])
        dog.gauge('gauge', 20, timestamp=100.0, tags=['env:staging'])

        dog.increment('counter', timestamp=100.0)
        dog.increment('counter',
                      timestamp=100.0,
                      tags=['env:production', 'db'])
        dog.increment('counter', timestamp=100.0, tags=['env:staging'])

        dog.flush(200.0)

        metrics = self.sort_metrics(reporter.metrics)
        nt.assert_equal(len(metrics), 6)

        [c1, c2, c3, g1, g2, g3] = metrics
        (nt.assert_equal(c['metric'], 'counter') for c in [c1, c2, c3])
        nt.assert_equal(c1['tags'], ['env:production', 'db', 'type:constant'])
        nt.assert_equal(c1['points'][0][1], 1)
        nt.assert_equal(c2['tags'], ['env:staging', 'type:constant'])
        nt.assert_equal(c2['points'][0][1], 1)
        nt.assert_equal(c3['tags'], ['type:constant'])
        nt.assert_equal(c3['points'][0][1], 1)

        (nt.assert_equal(c['metric'], 'gauge') for c in [g1, g2, g3])
        nt.assert_equal(g1['tags'], ['env:production', 'db', 'type:constant'])
        nt.assert_equal(g1['points'][0][1], 15)
        nt.assert_equal(g2['tags'], ['env:staging', 'type:constant'])
        nt.assert_equal(g2['points'][0][1], 20)
        nt.assert_equal(g3['tags'], ['type:constant'])
        nt.assert_equal(g3['points'][0][1], 10)

        # Ensure histograms work as well.
        @dog.timed('timed', tags=['version:1'])
        def test():
            pass

        test()
        dog.histogram('timed', 20, timestamp=300.0, tags=['db', 'version:2'])
        reporter.metrics = []
        dog.flush(400)
        for metric in reporter.metrics:
            assert metric['tags']  # this is enough
Пример #25
0
    def test_constant_tags(self):
        dog = ThreadStats(constant_tags=["type:constant"])
        dog.start(roll_up_interval=10, flush_in_thread=False)
        reporter = dog.reporter = MemoryReporter()

        # Post the same metric with different tags.
        dog.gauge("gauge", 10, timestamp=100.0)
        dog.gauge("gauge", 15, timestamp=100.0, tags=["env:production", "db"])
        dog.gauge("gauge", 20, timestamp=100.0, tags=["env:staging"])

        dog.increment("counter", timestamp=100.0)
        dog.increment("counter", timestamp=100.0, tags=["env:production", "db"])
        dog.increment("counter", timestamp=100.0, tags=["env:staging"])

        dog.flush(200.0)

        metrics = self.sort_metrics(reporter.metrics)
        nt.assert_equal(len(metrics), 6)

        [c1, c2, c3, g1, g2, g3] = metrics
        (nt.assert_equal(c["metric"], "counter") for c in [c1, c2, c3])
        nt.assert_equal(c1["tags"], ["env:production", "db", "type:constant"])
        nt.assert_equal(c1["points"][0][1], 1)
        nt.assert_equal(c2["tags"], ["env:staging", "type:constant"])
        nt.assert_equal(c2["points"][0][1], 1)
        nt.assert_equal(c3["tags"], ["type:constant"])
        nt.assert_equal(c3["points"][0][1], 1)

        (nt.assert_equal(c["metric"], "gauge") for c in [g1, g2, g3])
        nt.assert_equal(g1["tags"], ["env:production", "db", "type:constant"])
        nt.assert_equal(g1["points"][0][1], 15)
        nt.assert_equal(g2["tags"], ["env:staging", "type:constant"])
        nt.assert_equal(g2["points"][0][1], 20)
        nt.assert_equal(g3["tags"], ["type:constant"])
        nt.assert_equal(g3["points"][0][1], 10)

        # Ensure histograms work as well.
        @dog.timed("timed", tags=["version:1"])
        def test():
            pass

        test()
        dog.histogram("timed", 20, timestamp=300.0, tags=["db", "version:2"])
        reporter.metrics = []
        dog.flush(400)
        for metric in reporter.metrics:
            assert metric["tags"]  # this is enough
Пример #26
0
    def test_constant_tags(self):
        dog = ThreadStats(constant_tags=['type:constant'])
        dog.start(roll_up_interval=10, flush_in_thread=False)
        reporter = dog.reporter = MemoryReporter()

        # Post the same metric with different tags.
        dog.gauge('gauge', 10, timestamp=100.0)
        dog.gauge('gauge', 15, timestamp=100.0, tags=['env:production', 'db'])
        dog.gauge('gauge', 20, timestamp=100.0, tags=['env:staging'])

        dog.increment('counter', timestamp=100.0)
        dog.increment('counter', timestamp=100.0, tags=['env:production', 'db'])
        dog.increment('counter', timestamp=100.0, tags=['env:staging'])

        dog.flush(200.0)

        metrics = self.sort_metrics(reporter.metrics)
        nt.assert_equal(len(metrics), 6)

        [c1, c2, c3, g1, g2, g3] = metrics
        (nt.assert_equal(c['metric'], 'counter') for c in [c1, c2, c3])
        nt.assert_equal(c1['tags'], ['env:production', 'db', 'type:constant'])
        nt.assert_equal(c1['points'][0][1], 1)
        nt.assert_equal(c2['tags'], ['env:staging', 'type:constant'])
        nt.assert_equal(c2['points'][0][1], 1)
        nt.assert_equal(c3['tags'], ['type:constant'])
        nt.assert_equal(c3['points'][0][1], 1)

        (nt.assert_equal(c['metric'], 'gauge') for c in [g1, g2, g3])
        nt.assert_equal(g1['tags'], ['env:production', 'db', 'type:constant'])
        nt.assert_equal(g1['points'][0][1], 15)
        nt.assert_equal(g2['tags'], ['env:staging', 'type:constant'])
        nt.assert_equal(g2['points'][0][1], 20)
        nt.assert_equal(g3['tags'], ['type:constant'])
        nt.assert_equal(g3['points'][0][1], 10)

        # Ensure histograms work as well.
        @dog.timed('timed', tags=['version:1'])
        def test():
            pass
        test()
        dog.histogram('timed', 20, timestamp=300.0, tags=['db', 'version:2'])
        reporter.metrics = []
        dog.flush(400)
        for metric in reporter.metrics:
            assert metric['tags']  # this is enough
Пример #27
0
    def test_tags_from_environment_and_constant(self):
        test_tags = ['country:china', 'age:45', 'blue']
        constant_tags = ['country:canada', 'red']
        with preserve_environment_variable('DATADOG_TAGS'):
            os.environ['DATADOG_TAGS'] = ','.join(test_tags)
            dog = ThreadStats(constant_tags=constant_tags)
        dog.start(roll_up_interval=10, flush_in_thread=False)
        reporter = dog.reporter = MemoryReporter()

        # Add two events
        event1_title = "Event 1 title"
        event2_title = "Event 1 title"
        event1_text = "Event 1 text"
        event2_text = "Event 2 text"
        dog.event(event1_title, event1_text)
        dog.event(event2_title, event2_text)

        # Flush and test
        dog.flush()
        event1, event2 = reporter.events
        nt.assert_equal(event1['title'], event1_title)
        nt.assert_equal(event1['text'], event1_text)
        nt.assert_equal(event1['tags'], constant_tags + test_tags)
        nt.assert_equal(event2['title'], event2_title)
        nt.assert_equal(event2['text'], event2_text)
        nt.assert_equal(event2['text'], event2_text)
        nt.assert_equal(event2['tags'], constant_tags + test_tags)

        # Test more parameters
        reporter.events = []
        event1_priority = "low"
        event1_date_happened = 1375296969
        event1_tag = "Event 2 tag"
        dog.event(event1_title, event1_text, priority=event1_priority,
                  date_happened=event1_date_happened, tags=[event1_tag])

        # Flush and test
        dog.flush()
        event, = reporter.events
        nt.assert_equal(event['title'], event1_title)
        nt.assert_equal(event['text'], event1_text)
        nt.assert_equal(event['priority'], event1_priority)
        nt.assert_equal(event['date_happened'], event1_date_happened)
        nt.assert_equal(event['tags'], [event1_tag] + constant_tags + test_tags)
        dog.start(flush_interval=1, roll_up_interval=1)
Пример #28
0
    def test_metric_namespace(self):
        """
        Namespace prefixes all metric names.
        """
        # Set up ThreadStats with a namespace
        dog = ThreadStats(namespace="foo")
        dog.start(roll_up_interval=1, flush_in_thread=False)
        dog.reporter = self.reporter

        # Send a few metrics
        dog.gauge("gauge", 20, timestamp=100.0)
        dog.increment("counter", timestamp=100.0)
        dog.flush(200.0)

        # Metric names are prefixed with the namespace
        self.assertMetric(count=2)
        self.assertMetric(name="foo.gauge", count=1)
        self.assertMetric(name="foo.counter", count=1)
Пример #29
0
    def test_metric_namespace(self):
        """
        Namespace prefixes all metric names.
        """
        # Set up ThreadStats with a namespace
        dog = ThreadStats(namespace="foo")
        dog.start(roll_up_interval=1, flush_in_thread=False)
        dog.reporter = self.reporter

        # Send a few metrics
        dog.gauge("gauge", 20, timestamp=100.0)
        dog.increment("counter", timestamp=100.0)
        dog.flush(200.0)

        # Metric names are prefixed with the namespace
        self.assertMetric(count=2)
        self.assertMetric(name="foo.gauge", count=1)
        self.assertMetric(name="foo.counter", count=1)
Пример #30
0
    def test_event_constant_tags(self):
        constant_tag = 'type:constant'
        dog = ThreadStats(constant_tags=[constant_tag])
        dog.start(roll_up_interval=10, flush_in_thread=False)
        reporter = dog.reporter = MemoryReporter()

        # Add two events
        event1_title = "Event 1 title"
        event2_title = "Event 1 title"
        event1_text = "Event 1 text"
        event2_text = "Event 2 text"
        dog.event(event1_title, event1_text)
        dog.event(event2_title, event2_text)

        # Flush and test
        dog.flush()
        event1, event2 = reporter.events
        nt.assert_equal(event1['title'], event1_title)
        nt.assert_equal(event1['text'], event1_text)
        nt.assert_equal(event1['tags'], [constant_tag])
        nt.assert_equal(event2['title'], event2_title)
        nt.assert_equal(event2['text'], event2_text)
        nt.assert_equal(event2['text'], event2_text)
        nt.assert_equal(event2['tags'], [constant_tag])

        # Test more parameters
        reporter.events = []
        event1_priority = "low"
        event1_date_happened = 1375296969
        event1_tag = "Event 2 tag"
        dog.event(event1_title,
                  event1_text,
                  priority=event1_priority,
                  date_happened=event1_date_happened,
                  tags=[event1_tag])

        # Flush and test
        dog.flush()
        event, = reporter.events
        nt.assert_equal(event['title'], event1_title)
        nt.assert_equal(event['text'], event1_text)
        nt.assert_equal(event['priority'], event1_priority)
        nt.assert_equal(event['date_happened'], event1_date_happened)
        nt.assert_equal(event['tags'], [event1_tag, constant_tag])
Пример #31
0
    def test_event_constant_tags(self):
        constant_tag = "type:constant"
        dog = ThreadStats(constant_tags=[constant_tag])
        dog.start(roll_up_interval=10, flush_in_thread=False)
        reporter = dog.reporter = MemoryReporter()

        # Add two events
        event1_title = "Event 1 title"
        event2_title = "Event 1 title"
        event1_text = "Event 1 text"
        event2_text = "Event 2 text"
        dog.event(event1_title, event1_text)
        dog.event(event2_title, event2_text)

        # Flush and test
        dog.flush()
        event1, event2 = reporter.events
        nt.assert_equal(event1["title"], event1_title)
        nt.assert_equal(event1["text"], event1_text)
        nt.assert_equal(event1["tags"], [constant_tag])
        nt.assert_equal(event2["title"], event2_title)
        nt.assert_equal(event2["text"], event2_text)
        nt.assert_equal(event2["text"], event2_text)
        nt.assert_equal(event2["tags"], [constant_tag])

        # Test more parameters
        reporter.events = []
        event1_priority = "low"
        event1_date_happened = 1375296969
        event1_tag = "Event 2 tag"
        dog.event(
            event1_title, event1_text, priority=event1_priority, date_happened=event1_date_happened, tags=[event1_tag]
        )

        # Flush and test
        dog.flush()
        event, = reporter.events
        nt.assert_equal(event["title"], event1_title)
        nt.assert_equal(event["text"], event1_text)
        nt.assert_equal(event["priority"], event1_priority)
        nt.assert_equal(event["date_happened"], event1_date_happened)
        nt.assert_equal(event["tags"], [event1_tag, constant_tag])
Пример #32
0
    def test_tags(self):
        dog = ThreadStats()
        dog.start(roll_up_interval=10, flush_in_thread=False)
        reporter = dog.reporter = MemoryReporter()

        # Post the same metric with different tags.
        dog.gauge('gauge', 10, timestamp=100.0)
        dog.gauge('gauge', 15, timestamp=100.0, tags=['env:production', 'db'])
        dog.gauge('gauge', 20, timestamp=100.0, tags=['env:staging'])

        dog.increment('counter', timestamp=100.0)
        dog.increment('counter',
                      timestamp=100.0,
                      tags=['env:production', 'db'])
        dog.increment('counter', timestamp=100.0, tags=['env:staging'])

        dog.flush(200.0)

        metrics = self.sort_metrics(reporter.metrics)
        assert len(metrics) == 6

        [c1, c2, c3, g1, g2, g3] = metrics
        assert c1['metric'] == 'counter'
        assert c2['metric'] == 'counter'
        assert c3['metric'] == 'counter'
        assert c1['tags'] is None
        assert c1['points'][0][1] == 0.1
        assert c2['tags'] == ['env:production', 'db']
        assert c2['points'][0][1] == 0.1
        assert c3['tags'] == ['env:staging']
        assert c3['points'][0][1] == 0.1

        assert g1['metric'] == 'gauge'
        assert g2['metric'] == 'gauge'
        assert g3['metric'] == 'gauge'
        assert g1['tags'] is None
        assert g1['points'][0][1] == 10
        assert g2['tags'] == ['env:production', 'db']
        assert g2['points'][0][1] == 15
        assert g3['tags'] == ['env:staging']
        assert g3['points'][0][1] == 20
Пример #33
0
    def test_gauge(self):
        # Create some fake metrics.
        dog = ThreadStats()
        dog.start(roll_up_interval=10, flush_in_thread=False)
        reporter = dog.reporter = MemoryReporter()

        dog.gauge("test.gauge.1", 20, 100.0)
        dog.gauge("test.gauge.1", 22, 105.0)
        dog.gauge("test.gauge.2", 30, 115.0)
        dog.gauge("test.gauge.3", 30, 125.0)
        dog.flush(120.0)

        # Assert they've been properly flushed.
        metrics = self.sort_metrics(reporter.metrics)
        nt.assert_equal(len(metrics), 2)

        (first, second) = metrics
        nt.assert_equal(first["metric"], "test.gauge.1")
        nt.assert_equal(first["points"][0][0], 100.0)
        nt.assert_equal(first["points"][0][1], 22)
        nt.assert_equal(second["metric"], "test.gauge.2")

        # Flush again and make sure we're progressing.
        reporter.metrics = []
        dog.flush(130.0)
        nt.assert_equal(len(reporter.metrics), 1)

        # Finally, make sure we've flushed all metrics.
        reporter.metrics = []
        dog.flush(150.0)
        nt.assert_equal(len(reporter.metrics), 0)
Пример #34
0
    def test_event(self):
        dog = ThreadStats()
        dog.start(roll_up_interval=10, flush_in_thread=False)
        reporter = dog.reporter = MemoryReporter()

        # Add two events
        event1_title = "Event 1 title"
        event2_title = "Event 1 title"
        event1_text = "Event 1 text"
        event2_text = "Event 2 text"
        dog.event(event1_title, event1_text)
        dog.event(event2_title, event2_text)

        # Flush and test
        dog.flush()
        event1, event2 = reporter.events
        assert event1['title'] == event1_title
        assert event1['text'] == event1_text
        assert event2['title'] == event2_title
        assert event2['text'] == event2_text

        # Test more parameters
        reporter.events = []
        event1_priority = "low"
        event1_date_happened = 1375296969
        event1_tag = "Event 2 tag"
        dog.event(event1_title,
                  event1_text,
                  priority=event1_priority,
                  date_happened=event1_date_happened,
                  tags=[event1_tag])

        # Flush and test
        dog.flush()
        event, = reporter.events
        assert event['title'] == event1_title
        assert event['text'] == event1_text
        assert event['priority'] == event1_priority
        assert event['date_happened'] == event1_date_happened
        assert event['tags'] == [event1_tag]
Пример #35
0
    def test_distribution(self):
        # Create some fake metrics.
        dog = ThreadStats()
        dog.start(roll_up_interval=10, flush_in_thread=False)
        reporter = dog.reporter = MemoryReporter()

        dog.distribution('test.dist.1', 20, 100.0)
        dog.distribution('test.dist.1', 22, 105.0)
        dog.distribution('test.dist.2', 30, 115.0)
        dog.distribution('test.dist.3', 30, 125.0)
        dog.flush(120.0)

        # Assert they've been properly flushed.
        dists = self.sort_metrics(reporter.distributions)
        nt.assert_equal(len(dists), 2)

        (first, second) = dists
        nt.assert_equal(first['metric'], 'test.dist.1')
        nt.assert_equal(first['points'][0][0], 100.0)
        nt.assert_equal(first['points'][0][1], [20, 22])
        nt.assert_equal(second['metric'], 'test.dist.2')

        # Flush again and make sure we're progressing.
        reporter.distributions = []
        dog.flush(130.0)
        nt.assert_equal(len(reporter.distributions), 1)

        # Finally, make sure we've flushed all metrics.
        reporter.distributions = []
        dog.flush(150.0)
        nt.assert_equal(len(reporter.distributions), 0)
Пример #36
0
    def test_gauge(self):
        # Create some fake metrics.
        dog = ThreadStats()
        dog.start(roll_up_interval=10, flush_in_thread=False)
        reporter = dog.reporter = MemoryReporter()

        dog.gauge('test.gauge.1', 20, 100.0)
        dog.gauge('test.gauge.1', 22, 105.0)
        dog.gauge('test.gauge.2', 30, 115.0)
        dog.gauge('test.gauge.3', 30, 125.0)
        dog.flush(120.0)

        # Assert they've been properly flushed.
        metrics = self.sort_metrics(reporter.metrics)
        assert len(metrics) == 2

        (first, second) = metrics
        assert first['metric'] == 'test.gauge.1'
        assert first['points'][0][0] == 100.0
        assert first['points'][0][1] == 22
        assert second['metric'] == 'test.gauge.2'

        # Flush again and make sure we're progressing.
        reporter.metrics = []
        dog.flush(130.0)
        assert len(reporter.metrics) == 1

        # Finally, make sure we've flushed all metrics.
        reporter.metrics = []
        dog.flush(150.0)
        assert len(reporter.metrics) == 0
Пример #37
0
    def test_tags_from_environment_env_service_version(self):
        test_tags = set(['env:staging', 'service:food', 'version:1.2.3'])
        with EnvVars(env_vars={
                "DD_ENV": "staging",
                "DD_VERSION": "1.2.3",
                "DD_SERVICE": "food",
        }):
            dog = ThreadStats()
        dog.start(roll_up_interval=10, flush_in_thread=False)
        reporter = dog.reporter = MemoryReporter()

        # Add two events
        event1_title = "Event 1 title"
        event1_text = "Event 1 text"
        dog.event(event1_title, event1_text)

        # Flush and test
        dog.flush()
        [event1] = reporter.events
        assert event1['title'] == event1_title
        assert event1['text'] == event1_text
        assert set(event1['tags']) == test_tags
Пример #38
0
    def test_event(self):
        dog = ThreadStats()
        dog.start(roll_up_interval=10, flush_in_thread=False)
        reporter = dog.reporter = MemoryReporter()

        # Add two events
        event1_title = "Event 1 title"
        event2_title = "Event 1 title"
        event1_text = "Event 1 text"
        event2_text = "Event 2 text"
        dog.event(event1_title, event1_text)
        dog.event(event2_title, event2_text)

        # Flush and test
        dog.flush()
        event1, event2 = reporter.events
        nt.assert_equal(event1['title'], event1_title)
        nt.assert_equal(event1['text'], event1_text)
        nt.assert_equal(event2['title'], event2_title)
        nt.assert_equal(event2['text'], event2_text)

        # Test more parameters
        reporter.events = []
        event1_priority = "low"
        event1_date_happened = 1375296969
        event1_tag = "Event 2 tag"
        dog.event(event1_title, event1_text, priority=event1_priority,
                  date_happened=event1_date_happened, tags=[event1_tag])

        # Flush and test
        dog.flush()
        event, = reporter.events
        nt.assert_equal(event['title'], event1_title)
        nt.assert_equal(event['text'], event1_text)
        nt.assert_equal(event['priority'], event1_priority)
        nt.assert_equal(event['date_happened'], event1_date_happened)
        nt.assert_equal(event['tags'], [event1_tag])
Пример #39
0
    def test_counter(self):
        # Create some fake metrics.
        dog = ThreadStats()
        dog.start(roll_up_interval=10, flush_in_thread=False)
        reporter = dog.reporter = MemoryReporter()

        dog.increment('test.counter.1', timestamp=1000.0)
        dog.increment('test.counter.1', value=2, timestamp=1005.0)
        dog.increment('test.counter.2', timestamp=1015.0)
        dog.increment('test.counter.3', timestamp=1025.0)
        dog.flush(1021.0)

        # Assert they've been properly flushed.
        metrics = self.sort_metrics(reporter.metrics)
        nt.assert_equal(len(metrics), 2)
        (first, second) = metrics
        nt.assert_equal(first['metric'], 'test.counter.1')
        nt.assert_equal(first['points'][0][0], 1000.0)
        nt.assert_equal(first['points'][0][1], 0.3)
        nt.assert_equal(second['metric'], 'test.counter.2')

        # Test decrement
        dog.increment('test.counter.1', value=10, timestamp=1000.0)
        dog.decrement('test.counter.1', value=2, timestamp=1005.0)
        reporter.metrics = []
        dog.flush(1021.0)

        metrics = self.sort_metrics(reporter.metrics)
        nt.assert_equal(len(metrics), 1)
        first, = metrics
        nt.assert_equal(first['metric'], 'test.counter.1')
        nt.assert_equal(first['points'][0][0], 1000.0)
        nt.assert_equal(first['points'][0][1], 0.8)
        nt.assert_equal(second['metric'], 'test.counter.2')

        # Flush again and make sure we're progressing.
        reporter.metrics = []
        dog.flush(1030.0)
        nt.assert_equal(len(reporter.metrics), 1)

        # Finally, make sure we've flushed all metrics.
        reporter.metrics = []
        dog.flush(1050.0)
        nt.assert_equal(len(reporter.metrics), 0)
Пример #40
0
    def test_counter(self):
        # Create some fake metrics.
        dog = ThreadStats()
        dog.start(roll_up_interval=10, flush_in_thread=False)
        reporter = dog.reporter = MemoryReporter()

        dog.increment("test.counter.1", timestamp=1000.0)
        dog.increment("test.counter.1", value=2, timestamp=1005.0)
        dog.increment("test.counter.2", timestamp=1015.0)
        dog.increment("test.counter.3", timestamp=1025.0)
        dog.flush(1021.0)

        # Assert they've been properly flushed.
        metrics = self.sort_metrics(reporter.metrics)
        nt.assert_equal(len(metrics), 2)
        (first, second) = metrics
        nt.assert_equal(first["metric"], "test.counter.1")
        nt.assert_equal(first["points"][0][0], 1000.0)
        nt.assert_equal(first["points"][0][1], 3)
        nt.assert_equal(second["metric"], "test.counter.2")

        # Test decrement
        dog.increment("test.counter.1", value=10, timestamp=1000.0)
        dog.decrement("test.counter.1", value=2, timestamp=1005.0)
        reporter.metrics = []
        dog.flush(1021.0)

        metrics = self.sort_metrics(reporter.metrics)
        nt.assert_equal(len(metrics), 1)
        first, = metrics
        nt.assert_equal(first["metric"], "test.counter.1")
        nt.assert_equal(first["points"][0][0], 1000.0)
        nt.assert_equal(first["points"][0][1], 8)
        nt.assert_equal(second["metric"], "test.counter.2")

        # Flush again and make sure we're progressing.
        reporter.metrics = []
        dog.flush(1030.0)
        nt.assert_equal(len(reporter.metrics), 1)

        # Finally, make sure we've flushed all metrics.
        reporter.metrics = []
        dog.flush(1050.0)
        nt.assert_equal(len(reporter.metrics), 0)
Пример #41
0
    def test_histogram(self):
        dog = ThreadStats()
        dog.start(roll_up_interval=10, flush_in_thread=False)
        reporter = dog.reporter = MemoryReporter()

        # Add some histogram metrics.
        dog.histogram('histogram.1', 20, 100.0)
        dog.histogram('histogram.1', 30, 105.0)
        dog.histogram('histogram.1', 40, 106.0)
        dog.histogram('histogram.1', 50, 106.0)

        dog.histogram('histogram.1', 30, 110.0)
        dog.histogram('histogram.1', 50, 115.0)
        dog.histogram('histogram.1', 40, 116.0)

        dog.histogram('histogram.2', 40, 100.0)

        dog.histogram('histogram.3', 50, 134.0)

        # Flush and ensure they roll up properly.
        dog.flush(120.0)
        metrics = self.sort_metrics(reporter.metrics)
        nt.assert_equal(len(metrics), 24)

        # Test histograms elsewhere.
        (h1751, h1851, h1951, h1991, h1avg1, h1cnt1, h1max1, h1min1, _, _, _,
         _, h2avg1, h2cnt1, h2max1, h2min1, h1752, _, _, h1992, h1avg2, h1cnt2,
         h1max2, h1min2) = metrics

        nt.assert_equal(h1avg1['metric'], 'histogram.1.avg')
        nt.assert_equal(h1avg1['points'][0][0], 100.0)
        nt.assert_equal(h1avg1['points'][0][1], 35)
        nt.assert_equal(h1cnt1['metric'], 'histogram.1.count')
        nt.assert_equal(h1cnt1['points'][0][0], 100.0)
        nt.assert_equal(h1cnt1['points'][0][1], 0.4)
        nt.assert_equal(h1min1['metric'], 'histogram.1.min')
        nt.assert_equal(h1min1['points'][0][1], 20)
        nt.assert_equal(h1max1['metric'], 'histogram.1.max')
        nt.assert_equal(h1max1['points'][0][1], 50)
        nt.assert_equal(h1751['metric'], 'histogram.1.75percentile')
        nt.assert_equal(h1751['points'][0][1], 40)
        nt.assert_equal(h1991['metric'], 'histogram.1.99percentile')
        nt.assert_equal(h1991['points'][0][1], 50)

        nt.assert_equal(h1avg2['metric'], 'histogram.1.avg')
        nt.assert_equal(h1avg2['points'][0][0], 110.0)
        nt.assert_equal(h1avg2['points'][0][1], 40)
        nt.assert_equal(h1cnt2['metric'], 'histogram.1.count')
        nt.assert_equal(h1cnt2['points'][0][0], 110.0)
        nt.assert_equal(h1cnt2['points'][0][1], 0.3)
        nt.assert_equal(h1752['metric'], 'histogram.1.75percentile')
        nt.assert_equal(h1752['points'][0][0], 110.0)
        nt.assert_equal(h1752['points'][0][1], 40.0)
        nt.assert_equal(h1992['metric'], 'histogram.1.99percentile')
        nt.assert_equal(h1992['points'][0][0], 110.0)
        nt.assert_equal(h1992['points'][0][1], 50.0)

        nt.assert_equal(h2avg1['metric'], 'histogram.2.avg')
        nt.assert_equal(h2avg1['points'][0][0], 100.0)
        nt.assert_equal(h2avg1['points'][0][1], 40)
        nt.assert_equal(h2cnt1['metric'], 'histogram.2.count')
        nt.assert_equal(h2cnt1['points'][0][0], 100.0)
        nt.assert_equal(h2cnt1['points'][0][1], 0.1)

        # Flush again ensure they're gone.
        dog.reporter.metrics = []
        dog.flush(140.0)
        nt.assert_equal(len(dog.reporter.metrics), 8)
        dog.reporter.metrics = []
        dog.flush(200.0)
        nt.assert_equal(len(dog.reporter.metrics), 0)
Пример #42
0
    def check(self):
        logging.info('check info')
        try:
            yaml_file = os.environ.get('DATADOG_CONF',
                                       '%s/aws_redshift_status.yaml' % config.get_confd_path())
            yaml_data = yaml.load(file(yaml_file))
            init_config = yaml_data['init_config']
            interval = init_config.get('min_collection_interval', 300)

            stats = ThreadStats()
            stats.start(flush_interval=10, roll_up_interval=1, device=None,
                        flush_in_thread=False, flush_in_greenlet=False, disabled=False)

            start = time.time()
            for instance in yaml_data['instances']:
                logging.debug('instance name is %s' % instance['name'])

                name, cluster_name, cluster_address, cluster_port, db_name, user_name, user_password, \
                    aws_access_key_id, aws_secret_access_key, aws_region, query, \
                    tags = self._load_conf(instance)

                if cluster_address is None and cluster_port is None:
                    redshift = boto.redshift.connect_to_region(aws_region,
                                                               aws_access_key_id=aws_access_key_id,
                                                               aws_secret_access_key=aws_secret_access_key)
                    clusters = redshift.describe_clusters(cluster_name)
                    if len(clusters) == 0:
                        raise Exception('Cluster is empty')

                    cluster = clusters['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0]
                    endpoint = cluster['Endpoint']
                    cluster_address = endpoint['Address']
                    cluster_port = endpoint['Port']

                conn = None
                try:
                    connect_timeout = init_config.get('connect_timeout', 5)
                    conn = psycopg2.connect(
                        host=cluster_address,
                        port=cluster_port,
                        database=db_name,
                        user=user_name,
                        password=user_password,
                        connect_timeout=connect_timeout,
                    )

                    today = datetime.datetime.utcnow()
                    starttime = (today - datetime.timedelta(seconds=interval)).strftime('%Y-%m-%d %H:%M:%S.%f')
                    endtime = today.strftime('%Y-%m-%d %H:%M:%S.%f')

                    results = self._db_query(conn, QUERY_TABLE_COUNT)
                    stats.gauge('aws.redshift_status.table_count', results[0][0], tags=tags)
                    logging.debug('aws.redshift_status.table_count is %s' % results[0][0])

                    results = self._db_query(conn, QUERY_NODE)
                    for row in results:
                        gauge_tags = tags[:]
                        gauge_tags.append('node:%s' % row[0])
                        stats.gauge('aws_redshift_status.node_slice', row[1], tags=gauge_tags)
                        logging.debug('aws_redshift_status.node_slice is %s' % row[1])

                    results = self._db_query(conn, QUERY_TABLE_RECORD)
                    for row in results:
                        gauge_tags = tags[:]
                        gauge_tags.append('table:%s' % row[0])
                        stats.gauge('aws_redshift_status.table_records', row[1], tags=gauge_tags)
                        logging.debug('aws_redshift_status.table_records is %s' % row[1])

                    results = self._db_query(conn, QUERY_TABLE_STATUS)
                    for row in results:
                        gauge_tags = tags[:]
                        gauge_tags.append('table:%s' % row[0])
                        stats.gauge('aws_redshift_status.table_status.size', row[1], tags=gauge_tags)
                        logging.debug('aws_redshift_status.table_status.size is %s' % row[1])
                        stats.gauge('aws_redshift_status.table_status.tbl_rows', row[2], tags=gauge_tags)
                        logging.debug('aws_redshift_status.table_status.tbl_rows is %s' % row[2])
                        stats.gauge('aws_redshift_status.table_status.skew_rows', row[3], tags=gauge_tags)
                        logging.debug('aws_redshift_status.table_status.skew_rows is %s' % row[3])

                    for q in [ 'select', 'insert', 'update', 'delete', 'analyze' ]:
                        results = self._db_query(conn, QUERY_LOG_TYPE % (starttime, endtime, '%s %%' % q))
                        for row in results:
                            stats.gauge('aws_redshift_status.query.%s' % q, row[0], tags=tags)
                            logging.debug('aws_redshift_status.query.%s is %s' % (q, row[0]))

                        running_time = time.time() - start
                        stats.gauge('aws_redshift_status.response_time', running_time, tags=tags)
                        logging.debug('aws_redshift_status.response_time is %s' % running_time)
                finally:
                    if conn:
                        conn.close()

            stats.flush()
            stop = stats.stop()
            logging.debug('Stopping is %s' % stop)
        except Exception:
            logging.warning(sys.exc_info())
Пример #43
0
class OpenvpnMonitor():
    def __init__(self,
                 monitor_host,
                 monitor_port,
                 interval,
                 datadog=True,
                 elstic=False):
        self.host = monitor_host
        self.port = monitor_port
        self.interval = interval
        self.s = None
        self.datadog = datadog
        self.init_datadog()
        self.stats = ThreadStats()
        self.stats.start(flush_interval=interval, flush_in_thread=False)
        self.tags = ['server:{}'.format(os.uname()[1]), 'type:openvpn']

    def connect(self):
        try:
            self.s = socket.create_connection((self.host, self.port), 2)
        except:
            print('Unable to connect')
            sys.exit()

    def init_datadog(self):
        options = {
            'api_key': os.getenv('DD_API_KEY'),
            'app_key': os.getenv('DD_APP_KEY')
        }
        initialize(**options)
        logging.basicConfig(level=logging.DEBUG)

    def flush_datadog(self):
        self.stats.flush()

    def disconnect(self):
        self.s.send('quit\n'.encode('ascii'))
        self.s.shutdown(socket.SHUT_RDWR)
        self.s.close()

    def get_loadstats(self):
        self.s.send('load-stats\n'.encode('ascii'))
        return self.get_data()

    def get_status(self):
        self.s.send('status 2\n'.encode('ascii'))
        return self.get_data()

    def get_version(self):
        self.s.send('version\n'.encode('ascii'))
        return self.get_data()

    def get_data(self):
        socket_list = [self.s]
        read_sockets, write_sockets, error_sockets = select.select(
            socket_list, [], [])
        for sock in read_sockets:
            data = sock.recv(65565)
        return data.decode('utf8')

    def parse_version(self, version, datadog=True, elastic=False):
        """OpenVPN Version: OpenVPN 2.4.3 x86_64-redhat-linux-gnu [Fedora EPEL patched] [SSL (OpenSSL)] [LZO] [LZ4] [EPOLL] [PKCS11] [MH/PKTINFO] [AEAD] built on Jun 21 2017
OpenVPN Version: OpenVPN 2.3.14 x86_64-alpine-linux-musl [SSL (OpenSSL)] [LZO] [EPOLL] [MH] [IPv6] built on Dec 18 2016"""
        ver = version.split(" ")
        tags = ["version:{}_{}".format(ver[2], ver[3])]
        self.tags += tags

    def parse_loadstats(self, loadstats, datadog=True, elastic=False):
        pattern = re.compile(
            r"SUCCESS:.*nclients=(?P<nclients>\d*),bytesin=(?P<bytesin>\d*),bytesout=(?P<bytesout>\d*).*"
        )
        for line in loadstats.splitlines():
            o_stats = pattern.match(line)
        if o_stats:
            if self.datadog:
                self.stats.gauge('openvpn.nclients',
                                 int(o_stats.group('nclients')),
                                 tags=self.tags)
                self.stats.gauge('openvpn.bytesin',
                                 int(o_stats.group('bytesin')),
                                 tags=self.tags)
                self.stats.gauge('openvpn.bytesout',
                                 int(o_stats.group('bytesout')),
                                 tags=self.tags)

    def parse_status(self, status):
        """HEADER,CLIENT_LIST,Common Name,Real Address,Virtual Address,Bytes Received,Bytes Sent,Connected Since,Connected Since (time_t),Username
           HEADER,CLIENT_LIST,Common Name,Real Address,Virtual Address,Virtual IPv6 Address,Bytes Received,Bytes Sent,Connected Since,Connected Since (time_t),Username,Client ID,Peer ID
CLIENT_LIST,globbi,192.168.1.112:56513,10.8.0.18,,2735402,5955826,Sun Oct  1 20:15:18 2017,1506888918,jakobant,36,1"""
        COMMONNAME = 1
        REAL_ADDR = 2
        VIRT_ADDR = 3
        BYTESIN = 5  # 4
        BYTESOUT = 6  # 5
        USERNAME = 9  # 8
        CONN_SINCET = 8  # 7
        for line in status.splitlines():
            if line.startswith('CLIENT_LIST'):
                o_stats = line.split(',')
                if len(o_stats) < 10:
                    BYTESIN = 4  # 4
                    BYTESOUT = 5  # 5
                    USERNAME = 8  # 8
                    CONN_SINCET = 7  # 7
                if self.datadog:
                    tags = [
                        'commonname:{}'.format(
                            o_stats[COMMONNAME]), 'real_addr:{}'.format(
                                o_stats[REAL_ADDR].split(":")[0]),
                        'virt_addr:{}'.format(o_stats[VIRT_ADDR]),
                        'username:{}'.format(o_stats[USERNAME])
                    ] + self.tags
                    connected_time = int(time.time()) - int(
                        o_stats[CONN_SINCET])
                    self.stats.gauge('openvpn.client.bytesin',
                                     int(o_stats[BYTESIN]),
                                     tags=tags)
                    self.stats.gauge('openvpn.client.bytesout',
                                     int(o_stats[BYTESOUT]),
                                     tags=tags)
                    self.stats.gauge('openvpn.client.conntime',
                                     connected_time,
                                     tags=tags)

    def tail_log(self, logfile):
        """Fri Sep 29 21:29:59 2017 192.168.1.112:62493 TLS: Username/Password authentication succeeded for username 'jakobant'
Fri Sep 29 21:31:57 2017 192.168.1.112:62787 VERIFY OK: depth=1, C=IS, ST=Rkv, L=Reykjavik, O=Heima, OU=Ops, CN=Heima CA, name=EasyRSA, [email protected]
Fri Sep 29 21:31:57 2017 192.168.1.112:62787 VERIFY OK: depth=0, C=IS, ST=Rkv, L=Reykjavik, O=Heima, OU=Ops, CN=globbi, name=EasyRSA, [email protected]
AUTH-PAM: BACKGROUND: user 'jakobant' failed to authenticate: Authentication failure"""
        login = re.compile(r".*authentication succeeded.*")
        faild_login = re.compile(
            r".*(failed to authenticate|Incorrect password|was not found).*")
        for line in Pygtail(logfile):
            match = login.match(line)
            if match:
                print(line)
                self.stats.event('Login success',
                                 line,
                                 alert_type='success',
                                 tags=self.tags)
            match = faild_login.match(line)
            if match:
                print(line)
                self.stats.event('Authentication failure',
                                 line,
                                 alert_type='error',
                                 tags=self.tags)
Пример #44
0
    def test_histogram(self):
        dog = ThreadStats()
        dog.start(roll_up_interval=10, flush_in_thread=False)
        reporter = dog.reporter = MemoryReporter()

        # Add some histogram metrics.
        dog.histogram("histogram.1", 20, 100.0)
        dog.histogram("histogram.1", 30, 105.0)
        dog.histogram("histogram.1", 40, 106.0)
        dog.histogram("histogram.1", 50, 106.0)

        dog.histogram("histogram.1", 30, 110.0)
        dog.histogram("histogram.1", 50, 115.0)
        dog.histogram("histogram.1", 40, 116.0)

        dog.histogram("histogram.2", 40, 100.0)

        dog.histogram("histogram.3", 50, 134.0)

        # Flush and ensure they roll up properly.
        dog.flush(120.0)
        metrics = self.sort_metrics(reporter.metrics)
        nt.assert_equal(len(metrics), 24)

        # Test histograms elsewhere.
        (
            h1751,
            h1851,
            h1951,
            h1991,
            h1avg1,
            h1cnt1,
            h1max1,
            h1min1,
            _,
            _,
            _,
            _,
            h2avg1,
            h2cnt1,
            h2max1,
            h2min1,
            h1752,
            _,
            _,
            h1992,
            h1avg2,
            h1cnt2,
            h1max2,
            h1min2,
        ) = metrics

        nt.assert_equal(h1avg1["metric"], "histogram.1.avg")
        nt.assert_equal(h1avg1["points"][0][0], 100.0)
        nt.assert_equal(h1avg1["points"][0][1], 35)
        nt.assert_equal(h1cnt1["metric"], "histogram.1.count")
        nt.assert_equal(h1cnt1["points"][0][0], 100.0)
        nt.assert_equal(h1cnt1["points"][0][1], 4)
        nt.assert_equal(h1min1["metric"], "histogram.1.min")
        nt.assert_equal(h1min1["points"][0][1], 20)
        nt.assert_equal(h1max1["metric"], "histogram.1.max")
        nt.assert_equal(h1max1["points"][0][1], 50)
        nt.assert_equal(h1751["metric"], "histogram.1.75percentile")
        nt.assert_equal(h1751["points"][0][1], 40)
        nt.assert_equal(h1991["metric"], "histogram.1.99percentile")
        nt.assert_equal(h1991["points"][0][1], 50)

        nt.assert_equal(h1avg2["metric"], "histogram.1.avg")
        nt.assert_equal(h1avg2["points"][0][0], 110.0)
        nt.assert_equal(h1avg2["points"][0][1], 40)
        nt.assert_equal(h1cnt2["metric"], "histogram.1.count")
        nt.assert_equal(h1cnt2["points"][0][0], 110.0)
        nt.assert_equal(h1cnt2["points"][0][1], 3)
        nt.assert_equal(h1752["metric"], "histogram.1.75percentile")
        nt.assert_equal(h1752["points"][0][0], 110.0)
        nt.assert_equal(h1752["points"][0][1], 40.0)
        nt.assert_equal(h1992["metric"], "histogram.1.99percentile")
        nt.assert_equal(h1992["points"][0][0], 110.0)
        nt.assert_equal(h1992["points"][0][1], 50.0)

        nt.assert_equal(h2avg1["metric"], "histogram.2.avg")
        nt.assert_equal(h2avg1["points"][0][0], 100.0)
        nt.assert_equal(h2avg1["points"][0][1], 40)
        nt.assert_equal(h2cnt1["metric"], "histogram.2.count")
        nt.assert_equal(h2cnt1["points"][0][0], 100.0)
        nt.assert_equal(h2cnt1["points"][0][1], 1)

        # Flush again ensure they're gone.
        dog.reporter.metrics = []
        dog.flush(140.0)
        nt.assert_equal(len(dog.reporter.metrics), 8)
        dog.reporter.metrics = []
        dog.flush(200.0)
        nt.assert_equal(len(dog.reporter.metrics), 0)
Пример #45
0
    def test_histogram(self):
        dog = ThreadStats()
        dog.start(roll_up_interval=10, flush_in_thread=False)
        reporter = dog.reporter = MemoryReporter()

        # Add some histogram metrics.
        dog.histogram('histogram.1', 20, 100.0)
        dog.histogram('histogram.1', 30, 105.0)
        dog.histogram('histogram.1', 40, 106.0)
        dog.histogram('histogram.1', 50, 106.0)

        dog.histogram('histogram.1', 30, 110.0)
        dog.histogram('histogram.1', 50, 115.0)
        dog.histogram('histogram.1', 40, 116.0)

        dog.histogram('histogram.2', 40, 100.0)

        dog.histogram('histogram.3', 50, 134.0)

        # Flush and ensure they roll up properly.
        dog.flush(120.0)
        metrics = self.sort_metrics(reporter.metrics)
        assert len(metrics) == 24

        # Test histograms elsewhere.
        (h1751, h1851, h1951, h1991, h1avg1, h1cnt1, h1max1, h1min1, _, _, _,
         _, h2avg1, h2cnt1, h2max1, h2min1, h1752, _, _, h1992, h1avg2, h1cnt2,
         h1max2, h1min2) = metrics

        assert h1avg1['metric'] == 'histogram.1.avg'
        assert h1avg1['points'][0][0] == 100.0
        assert h1avg1['points'][0][1] == 35
        assert h1cnt1['metric'] == 'histogram.1.count'
        assert h1cnt1['points'][0][0] == 100.0
        assert h1cnt1['points'][0][1] == 0.4
        assert h1min1['metric'] == 'histogram.1.min'
        assert h1min1['points'][0][1] == 20
        assert h1max1['metric'] == 'histogram.1.max'
        assert h1max1['points'][0][1] == 50
        assert h1751['metric'] == 'histogram.1.75percentile'
        assert h1751['points'][0][1] == 40
        assert h1991['metric'] == 'histogram.1.99percentile'
        assert h1991['points'][0][1] == 50

        assert h1avg2['metric'] == 'histogram.1.avg'
        assert h1avg2['points'][0][0] == 110.0
        assert h1avg2['points'][0][1] == 40
        assert h1cnt2['metric'] == 'histogram.1.count'
        assert h1cnt2['points'][0][0] == 110.0
        assert h1cnt2['points'][0][1] == 0.3
        assert h1752['metric'] == 'histogram.1.75percentile'
        assert h1752['points'][0][0] == 110.0
        assert h1752['points'][0][1] == 40.0
        assert h1992['metric'] == 'histogram.1.99percentile'
        assert h1992['points'][0][0] == 110.0
        assert h1992['points'][0][1] == 50.0

        assert h2avg1['metric'] == 'histogram.2.avg'
        assert h2avg1['points'][0][0] == 100.0
        assert h2avg1['points'][0][1] == 40
        assert h2cnt1['metric'] == 'histogram.2.count'
        assert h2cnt1['points'][0][0] == 100.0
        assert h2cnt1['points'][0][1] == 0.1

        # Flush again ensure they're gone.
        dog.reporter.metrics = []
        dog.flush(140.0)
        assert len(dog.reporter.metrics) == 8
        dog.reporter.metrics = []
        dog.flush(200.0)
        assert len(dog.reporter.metrics) == 0
Пример #46
0
def ingest_trip_updates():
    stats = ThreadStats()
    stats.start()
    counter = 0

    trip_feed = gtfs_realtime_pb2.FeedMessage()
    trip_response = requests.get('https://cdn.mbta.com/realtime/TripUpdates.pb')
    trip_feed.ParseFromString(trip_response.content)
    trip_feed_ts = trip_feed.header.timestamp
    for entity in trip_feed.entity:
        if entity.HasField('trip_update'):
            trip_update = entity.trip_update
            if trip_update.trip.route_id not in enabled_routes:
                continue
            route_name = trip_update.trip.route_id
            if trip_update.trip.route_id in route_names:
                route_name = route_names[trip_update.trip.route_id]
            last_stop_id = trip_update.stop_time_update[len(trip_update.stop_time_update) - 1].stop_id
            destination = stop_names[last_stop_id]
            trip_id = trip_update.trip.trip_id
            vehicle = trip_update.vehicle.label

            for stop in trip_update.stop_time_update:
                stop_name = stop_names[stop.stop_id]

                if stop.departure.time > 0:
                    if stop.arrival.time > 0:
                        # mid-route stop, use arrival time
                        time = stop.arrival.time
                    else:
                        # first stop, use departure time
                        time = stop.departure.time
                else:
                    # last stop, ignore
                    continue

                arrives_in = (time - trip_feed_ts)
                catchable_tag = 'catchable:false'
                if arrives_in > 120:
                    catchable_tag = 'catchable:true'

                tags = [
                    'trip_id:{}'.format(trip_id),
                    'stop:{}'.format(stop_name),
                    'destination:{}'.format(destination),
                    'vehicle:{}'.format(vehicle),
                    'route:{}'.format(route_name),
                    catchable_tag,
                ]
                if route_name.startswith('Green'):
                    tags.append('route:green')
                stats.gauge('mbta.trip.arrival_secs', arrives_in, tags=tags)
                stats.gauge('mbta.trip.arrival_min', arrives_in / 60, tags=tags)
                counter += 1
                if counter % 50 == 0:
                    print("Flushing trip updates {}...".format(counter))
                    stats.flush()
                    print("Done")

    print("Flushing trip updates {}...".format(counter))
    stats.flush()
    print("Done")
Пример #47
0
def ingest_currentmetrics():
    stats = ThreadStats()
    stats.start()
    counter = 0

    mbta_perf_api_key = os.environ.get('MBTA_PERF_API_KEY')

    routes = ['red', 'orange', 'blue', 'green-B', 'green-C', 'green-D', 'green-E']
    for route in routes:
        currentmetrics_url = 'http://realtime.mbta.com/developer/api/v2.1/currentmetrics?api_key={api_key}&format=json&route={route}'.format(
            route = route,
            api_key = mbta_perf_api_key,
        )
        currentmetrics_response = requests.get(currentmetrics_url)
        print("Response code from perf API for {route}: {status}, size: {size}".format(
            route = route,
            status = currentmetrics_response.status_code,
            size = len(currentmetrics_response.text)
        ))
        if currentmetrics_response.status_code != 200:
            print("Error loading perf metrics: {error}".format(
                error = currentmetrics_response.text
            ))
        currentmetrics = json.loads(currentmetrics_response.content)

        # in the absence of data, assume good service, which means 100% of customers under all thresholds
        metrics = {
            'threshold_id_01.metric_result_last_hour': {
                'value': 1,
                'tags': [
                    'route:{}'.format(route),
                    'threshold_name:Headway',
                    'threshold_type:wait_time_headway_based',
                ],
            },
            'threshold_id_01.metric_result_current_day': {
                'value': 1,
                'tags': [
                    'route:{}'.format(route),
                    'threshold_name:Headway',
                    'threshold_type:wait_time_headway_based',
                ],
            },
            'threshold_id_02.metric_result_last_hour': {
                'value': 1,
                'tags': [
                    'route:{}'.format(route),
                    'threshold_name:Big Gap',
                    'threshold_type:wait_time_headway_based',
                ],
            },
            'threshold_id_02.metric_result_current_day': {
                'value': 1,
                'tags': [
                    'route:{}'.format(route),
                    'threshold_name:Big Gap',
                    'threshold_type:wait_time_headway_based',
                ],
            },
            'threshold_id_03.metric_result_last_hour': {
                'value': 1,
                'tags': [
                    'route:{}'.format(route),
                    'threshold_name:2X Headway',
                    'threshold_type:wait_time_headway_based',
                ],
            },
            'threshold_id_03.metric_result_current_day': {
                'value': 1,
                'tags': [
                    'route:{}'.format(route),
                    'threshold_name:2X Headway',
                    'threshold_type:wait_time_headway_based',
                ],
            },
            'threshold_id_04.metric_result_last_hour': {
                'value': 1,
                'tags': [
                    'route:{}'.format(route),
                    'threshold_name:delayed < 3 min.',
                    'threshold_type:travel_time',
                ],
            },
            'threshold_id_04.metric_result_current_day': {
                'value': 1,
                'tags': [
                    'route:{}'.format(route),
                    'threshold_name:delayed < 3 min.',
                    'threshold_type:travel_time',
                ],
            },
            'threshold_id_05.metric_result_last_hour': {
                'value': 1,
                'tags': [
                    'route:{}'.format(route),
                    'threshold_name:delayed < 6 min.',
                    'threshold_type:travel_time',
                ],
            },
            'threshold_id_05.metric_result_current_day': {
                'value': 1,
                'tags': [
                    'route:{}'.format(route),
                    'threshold_name:delayed < 6 min.',
                    'threshold_type:travel_time',
                ],
            },
            'threshold_id_06.metric_result_last_hour': {
                'value': 1,
                'tags': [
                    'route:{}'.format(route),
                    'threshold_name:delayed 10 min.',
                    'threshold_type:travel_time ',
                ],
            },
            'threshold_id_06.metric_result_current_day': {
                'value': 1,
                'tags': [
                    'route:{}'.format(route),
                    'threshold_name:delayed 10 min.',
                    'threshold_type:travel_time ',
                ],
            },
        }
        if route.startswith('green'):
            for key in metrics:
                metrics[key]['tags'].append('route:green')

        if 'current_metrics' in currentmetrics:
            for threshold in currentmetrics['current_metrics']:
                metric_last_hour = '{}.metric_result_last_hour'.format(threshold['threshold_id'])
                metric_current_day = '{}.metric_result_current_day'.format(threshold['threshold_id'])
                metrics[metric_last_hour]['value'] = threshold['metric_result_last_hour']
                metrics[metric_current_day]['value'] = threshold['metric_result_current_day']

        for metric_name, values in metrics.items():
            stats.gauge('mbta.perf.{}'.format(metric_name), values['value'], tags=values['tags'])
            counter += 1
            if counter % 50 == 0:
                print("Flushing currentmetrics {}...".format(counter))
                stats.flush()
                print("Done")

    print("Flushing currentmetrics {}...".format(counter))
    stats.flush()
    print("Done")